blob: 264c5968a1d399bd3cd8dbb6aee4d954f6f4e5a8 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
30#include "amdgpu.h"
31#include "amdgpu_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * amdgpu_vm_num_pde - return the number of page directory entries
55 *
56 * @adev: amdgpu_device pointer
57 *
Christian König8843dbb2016-01-26 12:17:11 +010058 * Calculate the number of page directory entries.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059 */
60static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
61{
62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
63}
64
65/**
66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @adev: amdgpu_device pointer
69 *
Christian König8843dbb2016-01-26 12:17:11 +010070 * Calculate the size of the page directory in bytes.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040071 */
72static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
73{
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
75}
76
77/**
Christian König56467eb2015-12-11 15:16:32 +010078 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
Alex Deucherd38ceaf2015-04-20 16:55:21 -040079 *
80 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +010081 * @validated: head of validation list
Christian König56467eb2015-12-11 15:16:32 +010082 * @entry: entry to add
Alex Deucherd38ceaf2015-04-20 16:55:21 -040083 *
84 * Add the page directory to the list of BOs to
Christian König56467eb2015-12-11 15:16:32 +010085 * validate for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040086 */
Christian König56467eb2015-12-11 15:16:32 +010087void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
88 struct list_head *validated,
89 struct amdgpu_bo_list_entry *entry)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090{
Christian König56467eb2015-12-11 15:16:32 +010091 entry->robj = vm->page_directory;
Christian König56467eb2015-12-11 15:16:32 +010092 entry->priority = 0;
93 entry->tv.bo = &vm->page_directory->tbo;
94 entry->tv.shared = true;
95 list_add(&entry->tv.head, validated);
96}
Alex Deucherd38ceaf2015-04-20 16:55:21 -040097
Christian König56467eb2015-12-11 15:16:32 +010098/**
Christian Königee1782c2015-12-11 21:01:23 +010099 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
Christian König56467eb2015-12-11 15:16:32 +0100100 *
101 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +0100102 * @duplicates: head of duplicates list
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103 *
Christian Königee1782c2015-12-11 21:01:23 +0100104 * Add the page directory to the BO duplicates list
105 * for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 */
Christian Königee1782c2015-12-11 21:01:23 +0100107void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400108{
Christian Königee1782c2015-12-11 21:01:23 +0100109 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400110
111 /* add the vm page table to the list */
Christian Königee1782c2015-12-11 21:01:23 +0100112 for (i = 0; i <= vm->max_pde_used; ++i) {
113 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114
Christian Königee1782c2015-12-11 21:01:23 +0100115 if (!entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116 continue;
117
Christian Königee1782c2015-12-11 21:01:23 +0100118 list_add(&entry->tv.head, duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119 }
Christian Königeceb8a12016-01-11 15:35:21 +0100120
121}
122
123/**
124 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
125 *
126 * @adev: amdgpu device instance
127 * @vm: vm providing the BOs
128 *
129 * Move the PT BOs to the tail of the LRU.
130 */
131void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
132 struct amdgpu_vm *vm)
133{
134 struct ttm_bo_global *glob = adev->mman.bdev.glob;
135 unsigned i;
136
137 spin_lock(&glob->lru_lock);
138 for (i = 0; i <= vm->max_pde_used; ++i) {
139 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
140
141 if (!entry->robj)
142 continue;
143
144 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
145 }
146 spin_unlock(&glob->lru_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400147}
148
149/**
150 * amdgpu_vm_grab_id - allocate the next free VMID
151 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152 * @vm: vm to allocate id for
Christian König7f8a5292015-07-20 16:09:40 +0200153 * @ring: ring we want to submit job to
154 * @sync: sync object where we add dependencies
Christian König94dd0a42016-01-18 17:01:42 +0100155 * @fence: fence protecting ID from reuse
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400156 *
Christian König7f8a5292015-07-20 16:09:40 +0200157 * Allocate an id for the vm, adding fences to the sync obj as necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158 */
Christian König7f8a5292015-07-20 16:09:40 +0200159int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
Christian König94dd0a42016-01-18 17:01:42 +0100160 struct amdgpu_sync *sync, struct fence *fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400161{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400162 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
163 struct amdgpu_device *adev = ring->adev;
Christian Königa9a78b32016-01-21 10:19:11 +0100164 struct amdgpu_vm_manager_id *id;
165 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400166
Christian König94dd0a42016-01-18 17:01:42 +0100167 mutex_lock(&adev->vm_manager.lock);
168
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169 /* check if the id is still valid */
Christian König1c16c0a2015-11-14 21:31:40 +0100170 if (vm_id->id) {
Christian König1c16c0a2015-11-14 21:31:40 +0100171 long owner;
172
Christian Königa9a78b32016-01-21 10:19:11 +0100173 id = &adev->vm_manager.ids[vm_id->id];
174 owner = atomic_long_read(&id->owner);
Christian König1c16c0a2015-11-14 21:31:40 +0100175 if (owner == (long)vm) {
Christian Königa9a78b32016-01-21 10:19:11 +0100176 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
Christian König165e4e02016-01-07 18:15:22 +0100177 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
Christian Königa9a78b32016-01-21 10:19:11 +0100178
179 fence_put(id->active);
180 id->active = fence_get(fence);
181
Christian König94dd0a42016-01-18 17:01:42 +0100182 mutex_unlock(&adev->vm_manager.lock);
Christian König1c16c0a2015-11-14 21:31:40 +0100183 return 0;
184 }
Christian König39ff8442015-09-28 12:01:20 +0200185 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186
187 /* we definately need to flush */
188 vm_id->pd_gpu_addr = ~0ll;
189
Christian Königa9a78b32016-01-21 10:19:11 +0100190 id = list_first_entry(&adev->vm_manager.ids_lru,
191 struct amdgpu_vm_manager_id,
192 list);
193 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
194 atomic_long_set(&id->owner, (long)vm);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400195
Christian Königa9a78b32016-01-21 10:19:11 +0100196 vm_id->id = id - adev->vm_manager.ids;
197 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400198
Christian Königa9a78b32016-01-21 10:19:11 +0100199 r = amdgpu_sync_fence(ring->adev, sync, id->active);
200
201 if (!r) {
202 fence_put(id->active);
203 id->active = fence_get(fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400204 }
205
Christian König94dd0a42016-01-18 17:01:42 +0100206 mutex_unlock(&adev->vm_manager.lock);
Christian Königa9a78b32016-01-21 10:19:11 +0100207 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400208}
209
210/**
211 * amdgpu_vm_flush - hardware flush the vm
212 *
213 * @ring: ring to use for flush
214 * @vm: vm we want to flush
215 * @updates: last vm update that we waited for
216 *
Christian König8843dbb2016-01-26 12:17:11 +0100217 * Flush the vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400218 */
219void amdgpu_vm_flush(struct amdgpu_ring *ring,
220 struct amdgpu_vm *vm,
Chunming Zhou3c623382015-08-20 18:33:59 +0800221 struct fence *updates)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400222{
223 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
224 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
Chunming Zhou3c623382015-08-20 18:33:59 +0800225 struct fence *flushed_updates = vm_id->flushed_updates;
Christian Königb56c2282015-10-29 17:01:19 +0100226 bool is_later;
Chunming Zhou3c623382015-08-20 18:33:59 +0800227
Christian Königb56c2282015-10-29 17:01:19 +0100228 if (!flushed_updates)
229 is_later = true;
230 else if (!updates)
231 is_later = false;
232 else
233 is_later = fence_is_later(updates, flushed_updates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400234
Christian Königb56c2282015-10-29 17:01:19 +0100235 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400236 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
Christian Königb56c2282015-10-29 17:01:19 +0100237 if (is_later) {
Chunming Zhou3c623382015-08-20 18:33:59 +0800238 vm_id->flushed_updates = fence_get(updates);
239 fence_put(flushed_updates);
240 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241 vm_id->pd_gpu_addr = pd_addr;
242 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
243 }
244}
245
246/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400247 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
248 *
249 * @vm: requested vm
250 * @bo: requested buffer object
251 *
Christian König8843dbb2016-01-26 12:17:11 +0100252 * Find @bo inside the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400253 * Search inside the @bos vm list for the requested vm
254 * Returns the found bo_va or NULL if none is found
255 *
256 * Object has to be reserved!
257 */
258struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
259 struct amdgpu_bo *bo)
260{
261 struct amdgpu_bo_va *bo_va;
262
263 list_for_each_entry(bo_va, &bo->va, bo_list) {
264 if (bo_va->vm == vm) {
265 return bo_va;
266 }
267 }
268 return NULL;
269}
270
271/**
272 * amdgpu_vm_update_pages - helper to call the right asic function
273 *
274 * @adev: amdgpu_device pointer
Christian König9ab21462015-11-30 14:19:26 +0100275 * @gtt: GART instance to use for mapping
276 * @gtt_flags: GTT hw access flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400277 * @ib: indirect buffer to fill with commands
278 * @pe: addr of the page entry
279 * @addr: dst addr to write into pe
280 * @count: number of page entries to update
281 * @incr: increase next addr by incr bytes
282 * @flags: hw access flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400283 *
284 * Traces the parameters and calls the right asic functions
285 * to setup the page table using the DMA.
286 */
287static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
Christian König9ab21462015-11-30 14:19:26 +0100288 struct amdgpu_gart *gtt,
289 uint32_t gtt_flags,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400290 struct amdgpu_ib *ib,
291 uint64_t pe, uint64_t addr,
292 unsigned count, uint32_t incr,
Christian König9ab21462015-11-30 14:19:26 +0100293 uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400294{
295 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
296
Christian König9ab21462015-11-30 14:19:26 +0100297 if ((gtt == &adev->gart) && (flags == gtt_flags)) {
298 uint64_t src = gtt->table_addr + (addr >> 12) * 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400299 amdgpu_vm_copy_pte(adev, ib, pe, src, count);
300
Christian König9ab21462015-11-30 14:19:26 +0100301 } else if (gtt) {
302 dma_addr_t *pages_addr = gtt->pages_addr;
Christian Königb07c9d22015-11-30 13:26:07 +0100303 amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
304 count, incr, flags);
305
306 } else if (count < 3) {
307 amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
308 count, incr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400309
310 } else {
311 amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
312 count, incr, flags);
313 }
314}
315
316/**
317 * amdgpu_vm_clear_bo - initially clear the page dir/table
318 *
319 * @adev: amdgpu_device pointer
320 * @bo: bo to clear
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800321 *
322 * need to reserve bo first before calling it.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400323 */
324static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
Christian König2bd9ccf2016-02-01 12:53:58 +0100325 struct amdgpu_vm *vm,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400326 struct amdgpu_bo *bo)
327{
Christian König2d55e452016-02-08 17:37:38 +0100328 struct amdgpu_ring *ring;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800329 struct fence *fence = NULL;
Christian Königd71518b2016-02-01 12:20:25 +0100330 struct amdgpu_job *job;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400331 unsigned entries;
332 uint64_t addr;
333 int r;
334
Christian König2d55e452016-02-08 17:37:38 +0100335 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
336
monk.liuca952612015-05-25 14:44:05 +0800337 r = reservation_object_reserve_shared(bo->tbo.resv);
338 if (r)
339 return r;
340
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400341 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
342 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800343 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400344
345 addr = amdgpu_bo_gpu_offset(bo);
346 entries = amdgpu_bo_size(bo) / 8;
347
Christian Königd71518b2016-02-01 12:20:25 +0100348 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
349 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800350 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351
Christian Königd71518b2016-02-01 12:20:25 +0100352 amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
353 0, 0);
354 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
355
356 WARN_ON(job->ibs[0].length_dw > 64);
Christian König2bd9ccf2016-02-01 12:53:58 +0100357 r = amdgpu_job_submit(job, ring, &vm->entity,
358 AMDGPU_FENCE_OWNER_VM, &fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400359 if (r)
360 goto error_free;
361
Christian Königd71518b2016-02-01 12:20:25 +0100362 amdgpu_bo_fence(bo, fence, true);
Chunming Zhou281b4222015-08-12 12:58:31 +0800363 fence_put(fence);
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800364 return 0;
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800365
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400366error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100367 amdgpu_job_free(job);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400368
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800369error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400370 return r;
371}
372
373/**
Christian Königb07c9d22015-11-30 13:26:07 +0100374 * amdgpu_vm_map_gart - Resolve gart mapping of addr
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400375 *
Christian Königb07c9d22015-11-30 13:26:07 +0100376 * @pages_addr: optional DMA address to use for lookup
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400377 * @addr: the unmapped addr
378 *
379 * Look up the physical address of the page that the pte resolves
Christian Königb07c9d22015-11-30 13:26:07 +0100380 * to and return the pointer for the page table entry.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400381 */
Christian Königb07c9d22015-11-30 13:26:07 +0100382uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400383{
384 uint64_t result;
385
Christian Königb07c9d22015-11-30 13:26:07 +0100386 if (pages_addr) {
387 /* page table offset */
388 result = pages_addr[addr >> PAGE_SHIFT];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400389
Christian Königb07c9d22015-11-30 13:26:07 +0100390 /* in case cpu page size != gpu page size*/
391 result |= addr & (~PAGE_MASK);
392
393 } else {
394 /* No mapping required */
395 result = addr;
396 }
397
398 result &= 0xFFFFFFFFFFFFF000ULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400399
400 return result;
401}
402
403/**
404 * amdgpu_vm_update_pdes - make sure that page directory is valid
405 *
406 * @adev: amdgpu_device pointer
407 * @vm: requested vm
408 * @start: start of GPU address range
409 * @end: end of GPU address range
410 *
411 * Allocates new page tables if necessary
Christian König8843dbb2016-01-26 12:17:11 +0100412 * and updates the page directory.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400413 * Returns 0 for success, error for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400414 */
415int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
416 struct amdgpu_vm *vm)
417{
Christian König2d55e452016-02-08 17:37:38 +0100418 struct amdgpu_ring *ring;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400419 struct amdgpu_bo *pd = vm->page_directory;
420 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
421 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
422 uint64_t last_pde = ~0, last_pt = ~0;
423 unsigned count = 0, pt_idx, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100424 struct amdgpu_job *job;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800425 struct amdgpu_ib *ib;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800426 struct fence *fence = NULL;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800427
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400428 int r;
429
Christian König2d55e452016-02-08 17:37:38 +0100430 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
431
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400432 /* padding, etc. */
433 ndw = 64;
434
435 /* assume the worst case */
436 ndw += vm->max_pde_used * 6;
437
Christian Königd71518b2016-02-01 12:20:25 +0100438 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
439 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400440 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100441
442 ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400443
444 /* walk over the address space and update the page directory */
445 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
Christian Königee1782c2015-12-11 21:01:23 +0100446 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400447 uint64_t pde, pt;
448
449 if (bo == NULL)
450 continue;
451
452 pt = amdgpu_bo_gpu_offset(bo);
453 if (vm->page_tables[pt_idx].addr == pt)
454 continue;
455 vm->page_tables[pt_idx].addr = pt;
456
457 pde = pd_addr + pt_idx * 8;
458 if (((last_pde + 8 * count) != pde) ||
459 ((last_pt + incr * count) != pt)) {
460
461 if (count) {
Christian König9ab21462015-11-30 14:19:26 +0100462 amdgpu_vm_update_pages(adev, NULL, 0, ib,
463 last_pde, last_pt,
464 count, incr,
465 AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400466 }
467
468 count = 1;
469 last_pde = pde;
470 last_pt = pt;
471 } else {
472 ++count;
473 }
474 }
475
476 if (count)
Christian König9ab21462015-11-30 14:19:26 +0100477 amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
478 count, incr, AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400479
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800480 if (ib->length_dw != 0) {
Christian König9e5d53092016-01-31 12:20:55 +0100481 amdgpu_ring_pad_ib(ring, ib);
Christian Könige86f9ce2016-02-08 12:13:05 +0100482 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
483 AMDGPU_FENCE_OWNER_VM);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800484 WARN_ON(ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +0100485 r = amdgpu_job_submit(job, ring, &vm->entity,
486 AMDGPU_FENCE_OWNER_VM, &fence);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800487 if (r)
488 goto error_free;
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200489
Chunming Zhou4af9f072015-08-03 12:57:31 +0800490 amdgpu_bo_fence(pd, fence, true);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200491 fence_put(vm->page_directory_fence);
492 vm->page_directory_fence = fence_get(fence);
Chunming Zhou281b4222015-08-12 12:58:31 +0800493 fence_put(fence);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800494
Christian Königd71518b2016-02-01 12:20:25 +0100495 } else {
496 amdgpu_job_free(job);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800497 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400498
499 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800500
501error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100502 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800503 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400504}
505
506/**
507 * amdgpu_vm_frag_ptes - add fragment information to PTEs
508 *
509 * @adev: amdgpu_device pointer
Christian König9ab21462015-11-30 14:19:26 +0100510 * @gtt: GART instance to use for mapping
511 * @gtt_flags: GTT hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400512 * @ib: IB for the update
513 * @pe_start: first PTE to handle
514 * @pe_end: last PTE to handle
515 * @addr: addr those PTEs should point to
516 * @flags: hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400517 */
518static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
Christian König9ab21462015-11-30 14:19:26 +0100519 struct amdgpu_gart *gtt,
520 uint32_t gtt_flags,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400521 struct amdgpu_ib *ib,
522 uint64_t pe_start, uint64_t pe_end,
Christian König9ab21462015-11-30 14:19:26 +0100523 uint64_t addr, uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524{
525 /**
526 * The MC L1 TLB supports variable sized pages, based on a fragment
527 * field in the PTE. When this field is set to a non-zero value, page
528 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
529 * flags are considered valid for all PTEs within the fragment range
530 * and corresponding mappings are assumed to be physically contiguous.
531 *
532 * The L1 TLB can store a single PTE for the whole fragment,
533 * significantly increasing the space available for translation
534 * caching. This leads to large improvements in throughput when the
535 * TLB is under pressure.
536 *
537 * The L2 TLB distributes small and large fragments into two
538 * asymmetric partitions. The large fragment cache is significantly
539 * larger. Thus, we try to use large fragments wherever possible.
540 * Userspace can support this by aligning virtual base address and
541 * allocation size to the fragment size.
542 */
543
544 /* SI and newer are optimized for 64KB */
545 uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
546 uint64_t frag_align = 0x80;
547
548 uint64_t frag_start = ALIGN(pe_start, frag_align);
549 uint64_t frag_end = pe_end & ~(frag_align - 1);
550
551 unsigned count;
552
Christian König31f6c1f2016-01-26 12:37:49 +0100553 /* Abort early if there isn't anything to do */
554 if (pe_start == pe_end)
555 return;
556
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400557 /* system pages are non continuously */
Christian König9ab21462015-11-30 14:19:26 +0100558 if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400559
560 count = (pe_end - pe_start) / 8;
Christian König9ab21462015-11-30 14:19:26 +0100561 amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
562 addr, count, AMDGPU_GPU_PAGE_SIZE,
563 flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400564 return;
565 }
566
567 /* handle the 4K area at the beginning */
568 if (pe_start != frag_start) {
569 count = (frag_start - pe_start) / 8;
Christian König9ab21462015-11-30 14:19:26 +0100570 amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
571 count, AMDGPU_GPU_PAGE_SIZE, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400572 addr += AMDGPU_GPU_PAGE_SIZE * count;
573 }
574
575 /* handle the area in the middle */
576 count = (frag_end - frag_start) / 8;
Christian König9ab21462015-11-30 14:19:26 +0100577 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
578 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400579
580 /* handle the 4K area at the end */
581 if (frag_end != pe_end) {
582 addr += AMDGPU_GPU_PAGE_SIZE * count;
583 count = (pe_end - frag_end) / 8;
Christian König9ab21462015-11-30 14:19:26 +0100584 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
585 count, AMDGPU_GPU_PAGE_SIZE, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400586 }
587}
588
589/**
590 * amdgpu_vm_update_ptes - make sure that page tables are valid
591 *
592 * @adev: amdgpu_device pointer
Christian König9ab21462015-11-30 14:19:26 +0100593 * @gtt: GART instance to use for mapping
594 * @gtt_flags: GTT hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400595 * @vm: requested vm
596 * @start: start of GPU address range
597 * @end: end of GPU address range
598 * @dst: destination address to map to
599 * @flags: mapping flags
600 *
Christian König8843dbb2016-01-26 12:17:11 +0100601 * Update the page tables in the range @start - @end.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400602 */
Christian Königa1e08d32016-01-26 11:40:46 +0100603static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
604 struct amdgpu_gart *gtt,
605 uint32_t gtt_flags,
606 struct amdgpu_vm *vm,
607 struct amdgpu_ib *ib,
608 uint64_t start, uint64_t end,
609 uint64_t dst, uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400610{
Christian König31f6c1f2016-01-26 12:37:49 +0100611 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
612
613 uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400614 uint64_t addr;
615
616 /* walk over the address space and update the page tables */
617 for (addr = start; addr < end; ) {
618 uint64_t pt_idx = addr >> amdgpu_vm_block_size;
Christian Königee1782c2015-12-11 21:01:23 +0100619 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400620 unsigned nptes;
Christian König31f6c1f2016-01-26 12:37:49 +0100621 uint64_t pe_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400622
623 if ((addr & ~mask) == (end & ~mask))
624 nptes = end - addr;
625 else
626 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
627
Christian König31f6c1f2016-01-26 12:37:49 +0100628 pe_start = amdgpu_bo_gpu_offset(pt);
629 pe_start += (addr & mask) * 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400630
Christian König31f6c1f2016-01-26 12:37:49 +0100631 if (last_pe_end != pe_start) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400632
Christian König31f6c1f2016-01-26 12:37:49 +0100633 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
634 last_pe_start, last_pe_end,
635 last_dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400636
Christian König31f6c1f2016-01-26 12:37:49 +0100637 last_pe_start = pe_start;
638 last_pe_end = pe_start + 8 * nptes;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400639 last_dst = dst;
640 } else {
Christian König31f6c1f2016-01-26 12:37:49 +0100641 last_pe_end += 8 * nptes;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400642 }
643
644 addr += nptes;
645 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
646 }
647
Christian König31f6c1f2016-01-26 12:37:49 +0100648 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
649 last_pe_start, last_pe_end,
650 last_dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400651}
652
653/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
655 *
656 * @adev: amdgpu_device pointer
Christian König9ab21462015-11-30 14:19:26 +0100657 * @gtt: GART instance to use for mapping
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400658 * @gtt_flags: flags as they are used for GTT
Christian Königa14faa62016-01-25 14:27:31 +0100659 * @vm: requested vm
660 * @start: start of mapped range
661 * @last: last mapped entry
662 * @flags: flags for the entries
663 * @addr: addr to set the area to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400664 * @fence: optional resulting fence
665 *
Christian Königa14faa62016-01-25 14:27:31 +0100666 * Fill in the page table entries between @start and @last.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400667 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400668 */
669static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
Christian König9ab21462015-11-30 14:19:26 +0100670 struct amdgpu_gart *gtt,
671 uint32_t gtt_flags,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400672 struct amdgpu_vm *vm,
Christian Königa14faa62016-01-25 14:27:31 +0100673 uint64_t start, uint64_t last,
674 uint32_t flags, uint64_t addr,
675 struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400676{
Christian König2d55e452016-02-08 17:37:38 +0100677 struct amdgpu_ring *ring;
Christian Königa1e08d32016-01-26 11:40:46 +0100678 void *owner = AMDGPU_FENCE_OWNER_VM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400679 unsigned nptes, ncmds, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100680 struct amdgpu_job *job;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800681 struct amdgpu_ib *ib;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800682 struct fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400683 int r;
684
Christian König2d55e452016-02-08 17:37:38 +0100685 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
686
Christian Königa1e08d32016-01-26 11:40:46 +0100687 /* sync to everything on unmapping */
688 if (!(flags & AMDGPU_PTE_VALID))
689 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
690
Christian Königa14faa62016-01-25 14:27:31 +0100691 nptes = last - start + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400692
693 /*
694 * reserve space for one command every (1 << BLOCK_SIZE)
695 * entries or 2k dwords (whatever is smaller)
696 */
697 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
698
699 /* padding, etc. */
700 ndw = 64;
701
Christian König9ab21462015-11-30 14:19:26 +0100702 if ((gtt == &adev->gart) && (flags == gtt_flags)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 /* only copy commands needed */
704 ndw += ncmds * 7;
705
Christian König9ab21462015-11-30 14:19:26 +0100706 } else if (gtt) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 /* header for write data commands */
708 ndw += ncmds * 4;
709
710 /* body of write data command */
711 ndw += nptes * 2;
712
713 } else {
714 /* set page commands needed */
715 ndw += ncmds * 10;
716
717 /* two extra commands for begin/end of fragment */
718 ndw += 2 * 10;
719 }
720
Christian Königd71518b2016-02-01 12:20:25 +0100721 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
722 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100724
725 ib = &job->ibs[0];
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800726
Christian Könige86f9ce2016-02-08 12:13:05 +0100727 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
Christian Königa1e08d32016-01-26 11:40:46 +0100728 owner);
729 if (r)
730 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400731
Christian Königa1e08d32016-01-26 11:40:46 +0100732 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
733 if (r)
734 goto error_free;
735
736 amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
737 addr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400738
Christian König9e5d53092016-01-31 12:20:55 +0100739 amdgpu_ring_pad_ib(ring, ib);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800740 WARN_ON(ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +0100741 r = amdgpu_job_submit(job, ring, &vm->entity,
742 AMDGPU_FENCE_OWNER_VM, &f);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800743 if (r)
744 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400745
Christian Königbf60efd2015-09-04 10:47:56 +0200746 amdgpu_bo_fence(vm->page_directory, f, true);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800747 if (fence) {
748 fence_put(*fence);
749 *fence = fence_get(f);
750 }
Chunming Zhou281b4222015-08-12 12:58:31 +0800751 fence_put(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400752 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800753
754error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100755 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800756 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757}
758
759/**
Christian Königa14faa62016-01-25 14:27:31 +0100760 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
761 *
762 * @adev: amdgpu_device pointer
763 * @gtt: GART instance to use for mapping
764 * @vm: requested vm
765 * @mapping: mapped range and flags to use for the update
766 * @addr: addr to set the area to
767 * @gtt_flags: flags as they are used for GTT
768 * @fence: optional resulting fence
769 *
770 * Split the mapping into smaller chunks so that each update fits
771 * into a SDMA IB.
772 * Returns 0 for success, -EINVAL for failure.
773 */
774static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
775 struct amdgpu_gart *gtt,
776 uint32_t gtt_flags,
777 struct amdgpu_vm *vm,
778 struct amdgpu_bo_va_mapping *mapping,
779 uint64_t addr, struct fence **fence)
780{
781 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
782
783 uint64_t start = mapping->it.start;
784 uint32_t flags = gtt_flags;
785 int r;
786
787 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
788 * but in case of something, we filter the flags in first place
789 */
790 if (!(mapping->flags & AMDGPU_PTE_READABLE))
791 flags &= ~AMDGPU_PTE_READABLE;
792 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
793 flags &= ~AMDGPU_PTE_WRITEABLE;
794
795 trace_amdgpu_vm_bo_update(mapping);
796
797 addr += mapping->offset;
798
799 if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
800 return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
801 start, mapping->it.last,
802 flags, addr, fence);
803
804 while (start != mapping->it.last + 1) {
805 uint64_t last;
806
807 last = min((uint64_t)mapping->it.last, start + max_size);
808 r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
809 start, last, flags, addr,
810 fence);
811 if (r)
812 return r;
813
814 start = last + 1;
815 addr += max_size;
816 }
817
818 return 0;
819}
820
821/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400822 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
823 *
824 * @adev: amdgpu_device pointer
825 * @bo_va: requested BO and VM object
826 * @mem: ttm mem
827 *
828 * Fill in the page table entries for @bo_va.
829 * Returns 0 for success, -EINVAL for failure.
830 *
831 * Object have to be reserved and mutex must be locked!
832 */
833int amdgpu_vm_bo_update(struct amdgpu_device *adev,
834 struct amdgpu_bo_va *bo_va,
835 struct ttm_mem_reg *mem)
836{
837 struct amdgpu_vm *vm = bo_va->vm;
838 struct amdgpu_bo_va_mapping *mapping;
Christian König9ab21462015-11-30 14:19:26 +0100839 struct amdgpu_gart *gtt = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400840 uint32_t flags;
841 uint64_t addr;
842 int r;
843
844 if (mem) {
Christian Königb7d698d2015-09-07 12:32:09 +0200845 addr = (u64)mem->start << PAGE_SHIFT;
Christian König9ab21462015-11-30 14:19:26 +0100846 switch (mem->mem_type) {
847 case TTM_PL_TT:
848 gtt = &bo_va->bo->adev->gart;
849 break;
850
851 case TTM_PL_VRAM:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400852 addr += adev->vm_manager.vram_base_offset;
Christian König9ab21462015-11-30 14:19:26 +0100853 break;
854
855 default:
856 break;
857 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400858 } else {
859 addr = 0;
860 }
861
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400862 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
863
Christian König7fc11952015-07-30 11:53:42 +0200864 spin_lock(&vm->status_lock);
865 if (!list_empty(&bo_va->vm_status))
866 list_splice_init(&bo_va->valids, &bo_va->invalids);
867 spin_unlock(&vm->status_lock);
868
869 list_for_each_entry(mapping, &bo_va->invalids, list) {
Christian Königa14faa62016-01-25 14:27:31 +0100870 r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
871 &bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400872 if (r)
873 return r;
874 }
875
Christian Königd6c10f62015-09-28 12:00:23 +0200876 if (trace_amdgpu_vm_bo_mapping_enabled()) {
877 list_for_each_entry(mapping, &bo_va->valids, list)
878 trace_amdgpu_vm_bo_mapping(mapping);
879
880 list_for_each_entry(mapping, &bo_va->invalids, list)
881 trace_amdgpu_vm_bo_mapping(mapping);
882 }
883
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400884 spin_lock(&vm->status_lock);
monk.liu6d1d0ef2015-08-14 13:36:41 +0800885 list_splice_init(&bo_va->invalids, &bo_va->valids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400886 list_del_init(&bo_va->vm_status);
Christian König7fc11952015-07-30 11:53:42 +0200887 if (!mem)
888 list_add(&bo_va->vm_status, &vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400889 spin_unlock(&vm->status_lock);
890
891 return 0;
892}
893
894/**
895 * amdgpu_vm_clear_freed - clear freed BOs in the PT
896 *
897 * @adev: amdgpu_device pointer
898 * @vm: requested vm
899 *
900 * Make sure all freed BOs are cleared in the PT.
901 * Returns 0 for success.
902 *
903 * PTs have to be reserved and mutex must be locked!
904 */
905int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
906 struct amdgpu_vm *vm)
907{
908 struct amdgpu_bo_va_mapping *mapping;
909 int r;
910
jimqu81d75a32015-12-04 17:17:00 +0800911 spin_lock(&vm->freed_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400912 while (!list_empty(&vm->freed)) {
913 mapping = list_first_entry(&vm->freed,
914 struct amdgpu_bo_va_mapping, list);
915 list_del(&mapping->list);
jimqu81d75a32015-12-04 17:17:00 +0800916 spin_unlock(&vm->freed_lock);
Christian Königa14faa62016-01-25 14:27:31 +0100917 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
918 0, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400919 kfree(mapping);
920 if (r)
921 return r;
922
jimqu81d75a32015-12-04 17:17:00 +0800923 spin_lock(&vm->freed_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400924 }
jimqu81d75a32015-12-04 17:17:00 +0800925 spin_unlock(&vm->freed_lock);
926
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400927 return 0;
928
929}
930
931/**
932 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
933 *
934 * @adev: amdgpu_device pointer
935 * @vm: requested vm
936 *
937 * Make sure all invalidated BOs are cleared in the PT.
938 * Returns 0 for success.
939 *
940 * PTs have to be reserved and mutex must be locked!
941 */
942int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +0800943 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400944{
monk.liucfe2c972015-05-26 15:01:54 +0800945 struct amdgpu_bo_va *bo_va = NULL;
Christian König91e1a522015-07-06 22:06:40 +0200946 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400947
948 spin_lock(&vm->status_lock);
949 while (!list_empty(&vm->invalidated)) {
950 bo_va = list_first_entry(&vm->invalidated,
951 struct amdgpu_bo_va, vm_status);
952 spin_unlock(&vm->status_lock);
Chunming Zhou69b576a2015-11-18 11:17:39 +0800953 mutex_lock(&bo_va->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
Chunming Zhou69b576a2015-11-18 11:17:39 +0800955 mutex_unlock(&bo_va->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400956 if (r)
957 return r;
958
959 spin_lock(&vm->status_lock);
960 }
961 spin_unlock(&vm->status_lock);
962
monk.liucfe2c972015-05-26 15:01:54 +0800963 if (bo_va)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800964 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
Christian König91e1a522015-07-06 22:06:40 +0200965
966 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400967}
968
969/**
970 * amdgpu_vm_bo_add - add a bo to a specific vm
971 *
972 * @adev: amdgpu_device pointer
973 * @vm: requested vm
974 * @bo: amdgpu buffer object
975 *
Christian König8843dbb2016-01-26 12:17:11 +0100976 * Add @bo into the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400977 * Add @bo to the list of bos associated with the vm
978 * Returns newly added bo_va or NULL for failure
979 *
980 * Object has to be reserved!
981 */
982struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
983 struct amdgpu_vm *vm,
984 struct amdgpu_bo *bo)
985{
986 struct amdgpu_bo_va *bo_va;
987
988 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
989 if (bo_va == NULL) {
990 return NULL;
991 }
992 bo_va->vm = vm;
993 bo_va->bo = bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400994 bo_va->ref_count = 1;
995 INIT_LIST_HEAD(&bo_va->bo_list);
Christian König7fc11952015-07-30 11:53:42 +0200996 INIT_LIST_HEAD(&bo_va->valids);
997 INIT_LIST_HEAD(&bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400998 INIT_LIST_HEAD(&bo_va->vm_status);
Chunming Zhou69b576a2015-11-18 11:17:39 +0800999 mutex_init(&bo_va->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001000 list_add_tail(&bo_va->bo_list, &bo->va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001001
1002 return bo_va;
1003}
1004
1005/**
1006 * amdgpu_vm_bo_map - map bo inside a vm
1007 *
1008 * @adev: amdgpu_device pointer
1009 * @bo_va: bo_va to store the address
1010 * @saddr: where to map the BO
1011 * @offset: requested offset in the BO
1012 * @flags: attributes of pages (read/write/valid/etc.)
1013 *
1014 * Add a mapping of the BO at the specefied addr into the VM.
1015 * Returns 0 for success, error for failure.
1016 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001017 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001018 */
1019int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1020 struct amdgpu_bo_va *bo_va,
1021 uint64_t saddr, uint64_t offset,
1022 uint64_t size, uint32_t flags)
1023{
1024 struct amdgpu_bo_va_mapping *mapping;
1025 struct amdgpu_vm *vm = bo_va->vm;
1026 struct interval_tree_node *it;
1027 unsigned last_pfn, pt_idx;
1028 uint64_t eaddr;
1029 int r;
1030
Christian König0be52de2015-05-18 14:37:27 +02001031 /* validate the parameters */
1032 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
Chunming Zhou49b02b12015-11-13 14:18:38 +08001033 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
Christian König0be52de2015-05-18 14:37:27 +02001034 return -EINVAL;
Christian König0be52de2015-05-18 14:37:27 +02001035
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001036 /* make sure object fit at this offset */
Felix Kuehling005ae952015-11-23 17:43:48 -05001037 eaddr = saddr + size - 1;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001038 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001039 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001040
1041 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
Felix Kuehling005ae952015-11-23 17:43:48 -05001042 if (last_pfn >= adev->vm_manager.max_pfn) {
1043 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001044 last_pfn, adev->vm_manager.max_pfn);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001045 return -EINVAL;
1046 }
1047
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001048 saddr /= AMDGPU_GPU_PAGE_SIZE;
1049 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1050
Chunming Zhouc25867d2015-11-13 13:32:01 +08001051 spin_lock(&vm->it_lock);
Felix Kuehling005ae952015-11-23 17:43:48 -05001052 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001053 spin_unlock(&vm->it_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001054 if (it) {
1055 struct amdgpu_bo_va_mapping *tmp;
1056 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1057 /* bo and tmp overlap, invalid addr */
1058 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1059 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1060 tmp->it.start, tmp->it.last + 1);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001061 r = -EINVAL;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001062 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001063 }
1064
1065 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1066 if (!mapping) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001067 r = -ENOMEM;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001068 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001069 }
1070
1071 INIT_LIST_HEAD(&mapping->list);
1072 mapping->it.start = saddr;
Felix Kuehling005ae952015-11-23 17:43:48 -05001073 mapping->it.last = eaddr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001074 mapping->offset = offset;
1075 mapping->flags = flags;
1076
Chunming Zhou69b576a2015-11-18 11:17:39 +08001077 mutex_lock(&bo_va->mutex);
Christian König7fc11952015-07-30 11:53:42 +02001078 list_add(&mapping->list, &bo_va->invalids);
Chunming Zhou69b576a2015-11-18 11:17:39 +08001079 mutex_unlock(&bo_va->mutex);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001080 spin_lock(&vm->it_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001081 interval_tree_insert(&mapping->it, &vm->va);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001082 spin_unlock(&vm->it_lock);
Christian König93e3e432015-06-09 16:58:33 +02001083 trace_amdgpu_vm_bo_map(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001084
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001085 /* Make sure the page tables are allocated */
1086 saddr >>= amdgpu_vm_block_size;
1087 eaddr >>= amdgpu_vm_block_size;
1088
1089 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1090
1091 if (eaddr > vm->max_pde_used)
1092 vm->max_pde_used = eaddr;
1093
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001094 /* walk over the address space and allocate the page tables */
1095 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
Christian Königbf60efd2015-09-04 10:47:56 +02001096 struct reservation_object *resv = vm->page_directory->tbo.resv;
Christian Königee1782c2015-12-11 21:01:23 +01001097 struct amdgpu_bo_list_entry *entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001098 struct amdgpu_bo *pt;
1099
Christian Königee1782c2015-12-11 21:01:23 +01001100 entry = &vm->page_tables[pt_idx].entry;
1101 if (entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001102 continue;
1103
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001104 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1105 AMDGPU_GPU_PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001106 AMDGPU_GEM_DOMAIN_VRAM,
1107 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
Christian Königbf60efd2015-09-04 10:47:56 +02001108 NULL, resv, &pt);
Chunming Zhou49b02b12015-11-13 14:18:38 +08001109 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001110 goto error_free;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001111
Christian König82b9c552015-11-27 16:49:00 +01001112 /* Keep a reference to the page table to avoid freeing
1113 * them up in the wrong order.
1114 */
1115 pt->parent = amdgpu_bo_ref(vm->page_directory);
1116
Christian König2bd9ccf2016-02-01 12:53:58 +01001117 r = amdgpu_vm_clear_bo(adev, vm, pt);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001118 if (r) {
1119 amdgpu_bo_unref(&pt);
1120 goto error_free;
1121 }
1122
Christian Königee1782c2015-12-11 21:01:23 +01001123 entry->robj = pt;
Christian Königee1782c2015-12-11 21:01:23 +01001124 entry->priority = 0;
1125 entry->tv.bo = &entry->robj->tbo;
1126 entry->tv.shared = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001127 vm->page_tables[pt_idx].addr = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128 }
1129
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001130 return 0;
1131
1132error_free:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001133 list_del(&mapping->list);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001134 spin_lock(&vm->it_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001135 interval_tree_remove(&mapping->it, &vm->va);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001136 spin_unlock(&vm->it_lock);
Christian König93e3e432015-06-09 16:58:33 +02001137 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001138 kfree(mapping);
1139
Chunming Zhouf48b2652015-10-16 14:06:19 +08001140error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001141 return r;
1142}
1143
1144/**
1145 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1146 *
1147 * @adev: amdgpu_device pointer
1148 * @bo_va: bo_va to remove the address from
1149 * @saddr: where to the BO is mapped
1150 *
1151 * Remove a mapping of the BO at the specefied addr from the VM.
1152 * Returns 0 for success, error for failure.
1153 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001154 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001155 */
1156int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1157 struct amdgpu_bo_va *bo_va,
1158 uint64_t saddr)
1159{
1160 struct amdgpu_bo_va_mapping *mapping;
1161 struct amdgpu_vm *vm = bo_va->vm;
Christian König7fc11952015-07-30 11:53:42 +02001162 bool valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163
Christian König6c7fc502015-06-05 20:56:17 +02001164 saddr /= AMDGPU_GPU_PAGE_SIZE;
Chunming Zhou69b576a2015-11-18 11:17:39 +08001165 mutex_lock(&bo_va->mutex);
Christian König7fc11952015-07-30 11:53:42 +02001166 list_for_each_entry(mapping, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001167 if (mapping->it.start == saddr)
1168 break;
1169 }
1170
Christian König7fc11952015-07-30 11:53:42 +02001171 if (&mapping->list == &bo_va->valids) {
1172 valid = false;
1173
1174 list_for_each_entry(mapping, &bo_va->invalids, list) {
1175 if (mapping->it.start == saddr)
1176 break;
1177 }
1178
Chunming Zhou69b576a2015-11-18 11:17:39 +08001179 if (&mapping->list == &bo_va->invalids) {
1180 mutex_unlock(&bo_va->mutex);
Christian König7fc11952015-07-30 11:53:42 +02001181 return -ENOENT;
Chunming Zhou69b576a2015-11-18 11:17:39 +08001182 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 }
Chunming Zhou69b576a2015-11-18 11:17:39 +08001184 mutex_unlock(&bo_va->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001185 list_del(&mapping->list);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001186 spin_lock(&vm->it_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001187 interval_tree_remove(&mapping->it, &vm->va);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001188 spin_unlock(&vm->it_lock);
Christian König93e3e432015-06-09 16:58:33 +02001189 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001190
jimqu81d75a32015-12-04 17:17:00 +08001191 if (valid) {
1192 spin_lock(&vm->freed_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001193 list_add(&mapping->list, &vm->freed);
jimqu81d75a32015-12-04 17:17:00 +08001194 spin_unlock(&vm->freed_lock);
1195 } else {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001196 kfree(mapping);
jimqu81d75a32015-12-04 17:17:00 +08001197 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001198
1199 return 0;
1200}
1201
1202/**
1203 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1204 *
1205 * @adev: amdgpu_device pointer
1206 * @bo_va: requested bo_va
1207 *
Christian König8843dbb2016-01-26 12:17:11 +01001208 * Remove @bo_va->bo from the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001209 *
1210 * Object have to be reserved!
1211 */
1212void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1213 struct amdgpu_bo_va *bo_va)
1214{
1215 struct amdgpu_bo_va_mapping *mapping, *next;
1216 struct amdgpu_vm *vm = bo_va->vm;
1217
1218 list_del(&bo_va->bo_list);
1219
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001220 spin_lock(&vm->status_lock);
1221 list_del(&bo_va->vm_status);
1222 spin_unlock(&vm->status_lock);
1223
Christian König7fc11952015-07-30 11:53:42 +02001224 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001225 list_del(&mapping->list);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001226 spin_lock(&vm->it_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001227 interval_tree_remove(&mapping->it, &vm->va);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001228 spin_unlock(&vm->it_lock);
Christian König93e3e432015-06-09 16:58:33 +02001229 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
jimqu81d75a32015-12-04 17:17:00 +08001230 spin_lock(&vm->freed_lock);
Christian König7fc11952015-07-30 11:53:42 +02001231 list_add(&mapping->list, &vm->freed);
jimqu81d75a32015-12-04 17:17:00 +08001232 spin_unlock(&vm->freed_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001233 }
Christian König7fc11952015-07-30 11:53:42 +02001234 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1235 list_del(&mapping->list);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001236 spin_lock(&vm->it_lock);
Christian König7fc11952015-07-30 11:53:42 +02001237 interval_tree_remove(&mapping->it, &vm->va);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001238 spin_unlock(&vm->it_lock);
Christian König7fc11952015-07-30 11:53:42 +02001239 kfree(mapping);
1240 }
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001241 fence_put(bo_va->last_pt_update);
Chunming Zhou69b576a2015-11-18 11:17:39 +08001242 mutex_destroy(&bo_va->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001243 kfree(bo_va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001244}
1245
1246/**
1247 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1248 *
1249 * @adev: amdgpu_device pointer
1250 * @vm: requested vm
1251 * @bo: amdgpu buffer object
1252 *
Christian König8843dbb2016-01-26 12:17:11 +01001253 * Mark @bo as invalid.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001254 */
1255void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1256 struct amdgpu_bo *bo)
1257{
1258 struct amdgpu_bo_va *bo_va;
1259
1260 list_for_each_entry(bo_va, &bo->va, bo_list) {
Christian König7fc11952015-07-30 11:53:42 +02001261 spin_lock(&bo_va->vm->status_lock);
1262 if (list_empty(&bo_va->vm_status))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001263 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001264 spin_unlock(&bo_va->vm->status_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001265 }
1266}
1267
1268/**
1269 * amdgpu_vm_init - initialize a vm instance
1270 *
1271 * @adev: amdgpu_device pointer
1272 * @vm: requested vm
1273 *
Christian König8843dbb2016-01-26 12:17:11 +01001274 * Init @vm fields.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001275 */
1276int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1277{
1278 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1279 AMDGPU_VM_PTE_COUNT * 8);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001280 unsigned pd_size, pd_entries;
Christian König2d55e452016-02-08 17:37:38 +01001281 unsigned ring_instance;
1282 struct amdgpu_ring *ring;
Christian König2bd9ccf2016-02-01 12:53:58 +01001283 struct amd_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001284 int i, r;
1285
1286 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1287 vm->ids[i].id = 0;
1288 vm->ids[i].flushed_updates = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001289 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001290 vm->va = RB_ROOT;
1291 spin_lock_init(&vm->status_lock);
1292 INIT_LIST_HEAD(&vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001293 INIT_LIST_HEAD(&vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001294 INIT_LIST_HEAD(&vm->freed);
Chunming Zhouc25867d2015-11-13 13:32:01 +08001295 spin_lock_init(&vm->it_lock);
jimqu81d75a32015-12-04 17:17:00 +08001296 spin_lock_init(&vm->freed_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001297 pd_size = amdgpu_vm_directory_size(adev);
1298 pd_entries = amdgpu_vm_num_pdes(adev);
1299
1300 /* allocate page table array */
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001301 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001302 if (vm->page_tables == NULL) {
1303 DRM_ERROR("Cannot allocate memory for page table array\n");
1304 return -ENOMEM;
1305 }
1306
Christian König2bd9ccf2016-02-01 12:53:58 +01001307 /* create scheduler entity for page table updates */
Christian König2d55e452016-02-08 17:37:38 +01001308
1309 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1310 ring_instance %= adev->vm_manager.vm_pte_num_rings;
1311 ring = adev->vm_manager.vm_pte_rings[ring_instance];
Christian König2bd9ccf2016-02-01 12:53:58 +01001312 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1313 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1314 rq, amdgpu_sched_jobs);
1315 if (r)
1316 return r;
1317
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001318 vm->page_directory_fence = NULL;
1319
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320 r = amdgpu_bo_create(adev, pd_size, align, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001321 AMDGPU_GEM_DOMAIN_VRAM,
1322 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
Christian König72d76682015-09-03 17:34:59 +02001323 NULL, NULL, &vm->page_directory);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001324 if (r)
Christian König2bd9ccf2016-02-01 12:53:58 +01001325 goto error_free_sched_entity;
1326
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001327 r = amdgpu_bo_reserve(vm->page_directory, false);
Christian König2bd9ccf2016-02-01 12:53:58 +01001328 if (r)
1329 goto error_free_page_directory;
1330
1331 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001332 amdgpu_bo_unreserve(vm->page_directory);
Christian König2bd9ccf2016-02-01 12:53:58 +01001333 if (r)
1334 goto error_free_page_directory;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001335
1336 return 0;
Christian König2bd9ccf2016-02-01 12:53:58 +01001337
1338error_free_page_directory:
1339 amdgpu_bo_unref(&vm->page_directory);
1340 vm->page_directory = NULL;
1341
1342error_free_sched_entity:
1343 amd_sched_entity_fini(&ring->sched, &vm->entity);
1344
1345 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001346}
1347
1348/**
1349 * amdgpu_vm_fini - tear down a vm instance
1350 *
1351 * @adev: amdgpu_device pointer
1352 * @vm: requested vm
1353 *
Christian König8843dbb2016-01-26 12:17:11 +01001354 * Tear down @vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001355 * Unbind the VM and remove all bos from the vm bo list
1356 */
1357void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1358{
1359 struct amdgpu_bo_va_mapping *mapping, *tmp;
1360 int i;
1361
Christian König2d55e452016-02-08 17:37:38 +01001362 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
Christian König2bd9ccf2016-02-01 12:53:58 +01001363
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001364 if (!RB_EMPTY_ROOT(&vm->va)) {
1365 dev_err(adev->dev, "still active bo inside vm\n");
1366 }
1367 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1368 list_del(&mapping->list);
1369 interval_tree_remove(&mapping->it, &vm->va);
1370 kfree(mapping);
1371 }
1372 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1373 list_del(&mapping->list);
1374 kfree(mapping);
1375 }
1376
1377 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
Christian Königee1782c2015-12-11 21:01:23 +01001378 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001379 drm_free_large(vm->page_tables);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001380
1381 amdgpu_bo_unref(&vm->page_directory);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001382 fence_put(vm->page_directory_fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001383 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Christian König1c16c0a2015-11-14 21:31:40 +01001384 unsigned id = vm->ids[i].id;
1385
1386 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1387 (long)vm, 0);
Chunming Zhou3c623382015-08-20 18:33:59 +08001388 fence_put(vm->ids[i].flushed_updates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001389 }
1390
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001391}
Christian Königea89f8c2015-11-15 20:52:06 +01001392
1393/**
Christian Königa9a78b32016-01-21 10:19:11 +01001394 * amdgpu_vm_manager_init - init the VM manager
1395 *
1396 * @adev: amdgpu_device pointer
1397 *
1398 * Initialize the VM manager structures
1399 */
1400void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1401{
1402 unsigned i;
1403
1404 INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1405
1406 /* skip over VMID 0, since it is the system VM */
1407 for (i = 1; i < adev->vm_manager.num_ids; ++i)
1408 list_add_tail(&adev->vm_manager.ids[i].list,
1409 &adev->vm_manager.ids_lru);
Christian König2d55e452016-02-08 17:37:38 +01001410
1411 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
Christian Königa9a78b32016-01-21 10:19:11 +01001412}
1413
1414/**
Christian Königea89f8c2015-11-15 20:52:06 +01001415 * amdgpu_vm_manager_fini - cleanup VM manager
1416 *
1417 * @adev: amdgpu_device pointer
1418 *
1419 * Cleanup the VM manager and free resources.
1420 */
1421void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1422{
1423 unsigned i;
1424
1425 for (i = 0; i < AMDGPU_NUM_VM; ++i)
Christian König1c16c0a2015-11-14 21:31:40 +01001426 fence_put(adev->vm_manager.ids[i].active);
Christian Königea89f8c2015-11-15 20:52:06 +01001427}