blob: 2d9ec9cac44790fd1fe8bc5034d05592ee410fd2 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chris Wilsonf54d1862016-10-25 13:00:45 +010028#include <linux/dma-fence-array.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32#include "amdgpu_trace.h"
33
34/*
35 * GPUVM
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
51 * SI supports 16.
52 */
53
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040054/* Local structure. Encapsulate some VM table update parameters to reduce
55 * the number of function parameters
56 */
Christian König29efc4f2016-08-04 14:52:50 +020057struct amdgpu_pte_update_params {
Christian König27c5f362016-08-04 15:02:49 +020058 /* amdgpu device we do this update for */
59 struct amdgpu_device *adev;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040060 /* address where to copy page table entries from */
61 uint64_t src;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040062 /* indirect buffer to fill with commands */
63 struct amdgpu_ib *ib;
Christian Königafef8b82016-08-12 13:29:18 +020064 /* Function which actually does the update */
65 void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66 uint64_t addr, unsigned count, uint32_t incr,
Chunming Zhou6b777602016-09-21 16:19:19 +080067 uint64_t flags);
Chunming Zhou4c7e8852016-08-15 11:46:21 +080068 /* indicate update pt or its shadow */
69 bool shadow;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040070};
71
Christian König284710f2017-01-30 11:09:31 +010072/* Helper to disable partial resident texture feature from a fence callback */
73struct amdgpu_prt_cb {
74 struct amdgpu_device *adev;
75 struct dma_fence_cb cb;
76};
77
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078/**
79 * amdgpu_vm_num_pde - return the number of page directory entries
80 *
81 * @adev: amdgpu_device pointer
82 *
Christian König8843dbb2016-01-26 12:17:11 +010083 * Calculate the number of page directory entries.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040084 */
85static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
86{
87 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
88}
89
90/**
91 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
92 *
93 * @adev: amdgpu_device pointer
94 *
Christian König8843dbb2016-01-26 12:17:11 +010095 * Calculate the size of the page directory in bytes.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 */
97static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
98{
99 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
100}
101
102/**
Christian König56467eb2015-12-11 15:16:32 +0100103 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104 *
105 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +0100106 * @validated: head of validation list
Christian König56467eb2015-12-11 15:16:32 +0100107 * @entry: entry to add
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400108 *
109 * Add the page directory to the list of BOs to
Christian König56467eb2015-12-11 15:16:32 +0100110 * validate for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111 */
Christian König56467eb2015-12-11 15:16:32 +0100112void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
113 struct list_head *validated,
114 struct amdgpu_bo_list_entry *entry)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115{
Christian König56467eb2015-12-11 15:16:32 +0100116 entry->robj = vm->page_directory;
Christian König56467eb2015-12-11 15:16:32 +0100117 entry->priority = 0;
118 entry->tv.bo = &vm->page_directory->tbo;
119 entry->tv.shared = true;
Christian König2f568db2016-02-23 12:36:59 +0100120 entry->user_pages = NULL;
Christian König56467eb2015-12-11 15:16:32 +0100121 list_add(&entry->tv.head, validated);
122}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123
Christian König56467eb2015-12-11 15:16:32 +0100124/**
Christian Königf7da30d2016-09-28 12:03:04 +0200125 * amdgpu_vm_validate_pt_bos - validate the page table BOs
Christian König56467eb2015-12-11 15:16:32 +0100126 *
Christian König5a712a82016-06-21 16:28:15 +0200127 * @adev: amdgpu device pointer
Christian König56467eb2015-12-11 15:16:32 +0100128 * @vm: vm providing the BOs
Christian Königf7da30d2016-09-28 12:03:04 +0200129 * @validate: callback to do the validation
130 * @param: parameter for the validation callback
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 *
Christian Königf7da30d2016-09-28 12:03:04 +0200132 * Validate the page table BOs on command submission if neccessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133 */
Christian Königf7da30d2016-09-28 12:03:04 +0200134int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
135 int (*validate)(void *p, struct amdgpu_bo *bo),
136 void *param)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137{
Christian König5a712a82016-06-21 16:28:15 +0200138 uint64_t num_evictions;
Christian Königee1782c2015-12-11 21:01:23 +0100139 unsigned i;
Christian Königf7da30d2016-09-28 12:03:04 +0200140 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400141
Christian König5a712a82016-06-21 16:28:15 +0200142 /* We only need to validate the page tables
143 * if they aren't already valid.
144 */
145 num_evictions = atomic64_read(&adev->num_evictions);
146 if (num_evictions == vm->last_eviction_counter)
Christian Königf7da30d2016-09-28 12:03:04 +0200147 return 0;
Christian König5a712a82016-06-21 16:28:15 +0200148
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400149 /* add the vm page table to the list */
Christian Königee1782c2015-12-11 21:01:23 +0100150 for (i = 0; i <= vm->max_pde_used; ++i) {
Christian König914b4dc2016-09-28 12:27:37 +0200151 struct amdgpu_bo *bo = vm->page_tables[i].bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152
Christian König914b4dc2016-09-28 12:27:37 +0200153 if (!bo)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154 continue;
155
Christian König914b4dc2016-09-28 12:27:37 +0200156 r = validate(param, bo);
Christian Königf7da30d2016-09-28 12:03:04 +0200157 if (r)
158 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400159 }
Christian Königeceb8a12016-01-11 15:35:21 +0100160
Christian Königf7da30d2016-09-28 12:03:04 +0200161 return 0;
Christian Königeceb8a12016-01-11 15:35:21 +0100162}
163
164/**
165 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
166 *
167 * @adev: amdgpu device instance
168 * @vm: vm providing the BOs
169 *
170 * Move the PT BOs to the tail of the LRU.
171 */
172void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
173 struct amdgpu_vm *vm)
174{
175 struct ttm_bo_global *glob = adev->mman.bdev.glob;
176 unsigned i;
177
178 spin_lock(&glob->lru_lock);
179 for (i = 0; i <= vm->max_pde_used; ++i) {
Christian König914b4dc2016-09-28 12:27:37 +0200180 struct amdgpu_bo *bo = vm->page_tables[i].bo;
Christian Königeceb8a12016-01-11 15:35:21 +0100181
Christian König914b4dc2016-09-28 12:27:37 +0200182 if (!bo)
Christian Königeceb8a12016-01-11 15:35:21 +0100183 continue;
184
Christian König914b4dc2016-09-28 12:27:37 +0200185 ttm_bo_move_to_lru_tail(&bo->tbo);
Christian Königeceb8a12016-01-11 15:35:21 +0100186 }
187 spin_unlock(&glob->lru_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188}
189
Christian König663e4572017-03-13 10:13:37 +0100190/**
191 * amdgpu_vm_alloc_pts - Allocate page tables.
192 *
193 * @adev: amdgpu_device pointer
194 * @vm: VM to allocate page tables for
195 * @saddr: Start address which needs to be allocated
196 * @size: Size from start address we need.
197 *
198 * Make sure the page tables are allocated.
199 */
200int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
201 struct amdgpu_vm *vm,
202 uint64_t saddr, uint64_t size)
203{
204 unsigned last_pfn, pt_idx;
205 uint64_t eaddr;
206 int r;
207
208 /* validate the parameters */
209 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
210 return -EINVAL;
211
212 eaddr = saddr + size - 1;
213 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
214 if (last_pfn >= adev->vm_manager.max_pfn) {
215 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
216 last_pfn, adev->vm_manager.max_pfn);
217 return -EINVAL;
218 }
219
220 saddr /= AMDGPU_GPU_PAGE_SIZE;
221 eaddr /= AMDGPU_GPU_PAGE_SIZE;
222
223 saddr >>= amdgpu_vm_block_size;
224 eaddr >>= amdgpu_vm_block_size;
225
226 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
227
228 if (eaddr > vm->max_pde_used)
229 vm->max_pde_used = eaddr;
230
231 /* walk over the address space and allocate the page tables */
232 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
233 struct reservation_object *resv = vm->page_directory->tbo.resv;
234 struct amdgpu_bo *pt;
235
236 if (vm->page_tables[pt_idx].bo)
237 continue;
238
239 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
240 AMDGPU_GPU_PAGE_SIZE, true,
241 AMDGPU_GEM_DOMAIN_VRAM,
242 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
243 AMDGPU_GEM_CREATE_SHADOW |
244 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
245 AMDGPU_GEM_CREATE_VRAM_CLEARED,
246 NULL, resv, &pt);
247 if (r)
248 return r;
249
250 /* Keep a reference to the page table to avoid freeing
251 * them up in the wrong order.
252 */
253 pt->parent = amdgpu_bo_ref(vm->page_directory);
254
255 vm->page_tables[pt_idx].bo = pt;
256 vm->page_tables[pt_idx].addr = 0;
257 }
258
259 return 0;
260}
261
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800262static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
263 struct amdgpu_vm_id *id)
264{
265 return id->current_gpu_reset_count !=
266 atomic_read(&adev->gpu_reset_counter) ? true : false;
267}
268
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269/**
270 * amdgpu_vm_grab_id - allocate the next free VMID
271 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400272 * @vm: vm to allocate id for
Christian König7f8a5292015-07-20 16:09:40 +0200273 * @ring: ring we want to submit job to
274 * @sync: sync object where we add dependencies
Christian König94dd0a42016-01-18 17:01:42 +0100275 * @fence: fence protecting ID from reuse
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400276 *
Christian König7f8a5292015-07-20 16:09:40 +0200277 * Allocate an id for the vm, adding fences to the sync obj as necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400278 */
Christian König7f8a5292015-07-20 16:09:40 +0200279int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100280 struct amdgpu_sync *sync, struct dma_fence *fence,
Chunming Zhoufd53be32016-07-01 17:59:01 +0800281 struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400282{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400283 struct amdgpu_device *adev = ring->adev;
Christian König090b7672016-07-08 10:21:02 +0200284 uint64_t fence_context = adev->fence_context + ring->idx;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100285 struct dma_fence *updates = sync->last_vm_update;
Christian König8d76001e2016-05-23 16:00:32 +0200286 struct amdgpu_vm_id *id, *idle;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100287 struct dma_fence **fences;
Christian König1fbb2e92016-06-01 10:47:36 +0200288 unsigned i;
289 int r = 0;
290
291 fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
292 GFP_KERNEL);
293 if (!fences)
294 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400295
Christian König94dd0a42016-01-18 17:01:42 +0100296 mutex_lock(&adev->vm_manager.lock);
297
Christian König36fd7c52016-05-23 15:30:08 +0200298 /* Check if we have an idle VMID */
Christian König1fbb2e92016-06-01 10:47:36 +0200299 i = 0;
Christian König8d76001e2016-05-23 16:00:32 +0200300 list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
Christian König1fbb2e92016-06-01 10:47:36 +0200301 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
302 if (!fences[i])
Christian König36fd7c52016-05-23 15:30:08 +0200303 break;
Christian König1fbb2e92016-06-01 10:47:36 +0200304 ++i;
Christian König36fd7c52016-05-23 15:30:08 +0200305 }
Christian Königbcb1ba32016-03-08 15:40:11 +0100306
Christian König1fbb2e92016-06-01 10:47:36 +0200307 /* If we can't find a idle VMID to use, wait till one becomes available */
Christian König8d76001e2016-05-23 16:00:32 +0200308 if (&idle->list == &adev->vm_manager.ids_lru) {
Christian König1fbb2e92016-06-01 10:47:36 +0200309 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
310 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
Chris Wilsonf54d1862016-10-25 13:00:45 +0100311 struct dma_fence_array *array;
Christian König1fbb2e92016-06-01 10:47:36 +0200312 unsigned j;
Christian König8d76001e2016-05-23 16:00:32 +0200313
Christian König1fbb2e92016-06-01 10:47:36 +0200314 for (j = 0; j < i; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100315 dma_fence_get(fences[j]);
Christian König8d76001e2016-05-23 16:00:32 +0200316
Chris Wilsonf54d1862016-10-25 13:00:45 +0100317 array = dma_fence_array_create(i, fences, fence_context,
Christian König1fbb2e92016-06-01 10:47:36 +0200318 seqno, true);
319 if (!array) {
320 for (j = 0; j < i; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100321 dma_fence_put(fences[j]);
Christian König1fbb2e92016-06-01 10:47:36 +0200322 kfree(fences);
323 r = -ENOMEM;
324 goto error;
325 }
Christian König8d76001e2016-05-23 16:00:32 +0200326
Christian König8d76001e2016-05-23 16:00:32 +0200327
Christian König1fbb2e92016-06-01 10:47:36 +0200328 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100329 dma_fence_put(&array->base);
Christian König1fbb2e92016-06-01 10:47:36 +0200330 if (r)
331 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200332
Christian König1fbb2e92016-06-01 10:47:36 +0200333 mutex_unlock(&adev->vm_manager.lock);
334 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200335
Christian König1fbb2e92016-06-01 10:47:36 +0200336 }
337 kfree(fences);
Christian König8d76001e2016-05-23 16:00:32 +0200338
Chunming Zhoufd53be32016-07-01 17:59:01 +0800339 job->vm_needs_flush = true;
Christian König1fbb2e92016-06-01 10:47:36 +0200340 /* Check if we can use a VMID already assigned to this VM */
341 i = ring->idx;
342 do {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100343 struct dma_fence *flushed;
Christian König8d76001e2016-05-23 16:00:32 +0200344
Christian König1fbb2e92016-06-01 10:47:36 +0200345 id = vm->ids[i++];
346 if (i == AMDGPU_MAX_RINGS)
347 i = 0;
348
349 /* Check all the prerequisites to using this VMID */
350 if (!id)
351 continue;
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800352 if (amdgpu_vm_is_gpu_reset(adev, id))
Chunming Zhou6adb0512016-06-27 17:06:01 +0800353 continue;
Christian König1fbb2e92016-06-01 10:47:36 +0200354
355 if (atomic64_read(&id->owner) != vm->client_id)
356 continue;
357
Chunming Zhoufd53be32016-07-01 17:59:01 +0800358 if (job->vm_pd_addr != id->pd_gpu_addr)
Christian König1fbb2e92016-06-01 10:47:36 +0200359 continue;
360
Christian König090b7672016-07-08 10:21:02 +0200361 if (!id->last_flush)
362 continue;
363
364 if (id->last_flush->context != fence_context &&
Chris Wilsonf54d1862016-10-25 13:00:45 +0100365 !dma_fence_is_signaled(id->last_flush))
Christian König1fbb2e92016-06-01 10:47:36 +0200366 continue;
367
368 flushed = id->flushed_updates;
369 if (updates &&
Chris Wilsonf54d1862016-10-25 13:00:45 +0100370 (!flushed || dma_fence_is_later(updates, flushed)))
Christian König1fbb2e92016-06-01 10:47:36 +0200371 continue;
372
Christian König3dab83b2016-06-01 13:31:17 +0200373 /* Good we can use this VMID. Remember this submission as
374 * user of the VMID.
375 */
Christian König1fbb2e92016-06-01 10:47:36 +0200376 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
377 if (r)
378 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200379
Chunming Zhou6adb0512016-06-27 17:06:01 +0800380 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König1fbb2e92016-06-01 10:47:36 +0200381 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
382 vm->ids[ring->idx] = id;
Christian König8d76001e2016-05-23 16:00:32 +0200383
Chunming Zhoufd53be32016-07-01 17:59:01 +0800384 job->vm_id = id - adev->vm_manager.ids;
385 job->vm_needs_flush = false;
Christian König0c0fdf12016-07-08 10:48:24 +0200386 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König8d76001e2016-05-23 16:00:32 +0200387
Christian König1fbb2e92016-06-01 10:47:36 +0200388 mutex_unlock(&adev->vm_manager.lock);
389 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200390
Christian König1fbb2e92016-06-01 10:47:36 +0200391 } while (i != ring->idx);
Chunming Zhou8e9fbeb2016-03-17 11:41:37 +0800392
Christian König1fbb2e92016-06-01 10:47:36 +0200393 /* Still no ID to use? Then use the idle one found earlier */
394 id = idle;
395
396 /* Remember this submission as user of the VMID */
397 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
Christian König832a9022016-02-15 12:33:02 +0100398 if (r)
399 goto error;
Christian König4ff37a82016-02-26 16:18:26 +0100400
Chris Wilsonf54d1862016-10-25 13:00:45 +0100401 dma_fence_put(id->first);
402 id->first = dma_fence_get(fence);
Christian König4ff37a82016-02-26 16:18:26 +0100403
Chris Wilsonf54d1862016-10-25 13:00:45 +0100404 dma_fence_put(id->last_flush);
Christian König41d9eb22016-03-01 16:46:18 +0100405 id->last_flush = NULL;
406
Chris Wilsonf54d1862016-10-25 13:00:45 +0100407 dma_fence_put(id->flushed_updates);
408 id->flushed_updates = dma_fence_get(updates);
Christian König4ff37a82016-02-26 16:18:26 +0100409
Chunming Zhoufd53be32016-07-01 17:59:01 +0800410 id->pd_gpu_addr = job->vm_pd_addr;
Chunming Zhoub46b8a82016-06-27 17:04:23 +0800411 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König832a9022016-02-15 12:33:02 +0100412 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
Christian König0ea54b92016-05-04 10:20:01 +0200413 atomic64_set(&id->owner, vm->client_id);
Christian König832a9022016-02-15 12:33:02 +0100414 vm->ids[ring->idx] = id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400415
Chunming Zhoufd53be32016-07-01 17:59:01 +0800416 job->vm_id = id - adev->vm_manager.ids;
Christian König0c0fdf12016-07-08 10:48:24 +0200417 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König832a9022016-02-15 12:33:02 +0100418
419error:
Christian König94dd0a42016-01-18 17:01:42 +0100420 mutex_unlock(&adev->vm_manager.lock);
Christian Königa9a78b32016-01-21 10:19:11 +0100421 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400422}
423
Alex Deucher93dcc372016-06-17 17:05:15 -0400424static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
425{
426 struct amdgpu_device *adev = ring->adev;
Alex Deuchera1255102016-10-13 17:41:13 -0400427 const struct amdgpu_ip_block *ip_block;
Alex Deucher93dcc372016-06-17 17:05:15 -0400428
Christian König21cd9422016-10-05 15:36:39 +0200429 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
Alex Deucher93dcc372016-06-17 17:05:15 -0400430 /* only compute rings */
431 return false;
432
433 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
434 if (!ip_block)
435 return false;
436
Alex Deuchera1255102016-10-13 17:41:13 -0400437 if (ip_block->version->major <= 7) {
Alex Deucher93dcc372016-06-17 17:05:15 -0400438 /* gfx7 has no workaround */
439 return true;
Alex Deuchera1255102016-10-13 17:41:13 -0400440 } else if (ip_block->version->major == 8) {
Alex Deucher93dcc372016-06-17 17:05:15 -0400441 if (adev->gfx.mec_fw_version >= 673)
442 /* gfx8 is fixed in MEC firmware 673 */
443 return false;
444 else
445 return true;
446 }
447 return false;
448}
449
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400450/**
451 * amdgpu_vm_flush - hardware flush the vm
452 *
453 * @ring: ring to use for flush
Christian Königcffadc82016-03-01 13:34:49 +0100454 * @vm_id: vmid number to use
Christian König4ff37a82016-02-26 16:18:26 +0100455 * @pd_addr: address of the page directory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400456 *
Christian König4ff37a82016-02-26 16:18:26 +0100457 * Emit a VM flush when it is necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400458 */
Chunming Zhoufd53be32016-07-01 17:59:01 +0800459int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400460{
Christian König971fe9a92016-03-01 15:09:25 +0100461 struct amdgpu_device *adev = ring->adev;
Chunming Zhoufd53be32016-07-01 17:59:01 +0800462 struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
Christian Königd564a062016-03-01 15:51:53 +0100463 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800464 id->gds_base != job->gds_base ||
465 id->gds_size != job->gds_size ||
466 id->gws_base != job->gws_base ||
467 id->gws_size != job->gws_size ||
468 id->oa_base != job->oa_base ||
469 id->oa_size != job->oa_size);
Christian König41d9eb22016-03-01 16:46:18 +0100470 int r;
Christian Königd564a062016-03-01 15:51:53 +0100471
472 if (ring->funcs->emit_pipeline_sync && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800473 job->vm_needs_flush || gds_switch_needed ||
Alex Deucher93dcc372016-06-17 17:05:15 -0400474 amdgpu_vm_ring_has_compute_vm_bug(ring)))
Christian Königd564a062016-03-01 15:51:53 +0100475 amdgpu_ring_emit_pipeline_sync(ring);
Christian König971fe9a92016-03-01 15:09:25 +0100476
Chunming Zhouaa1c8902016-06-30 13:56:02 +0800477 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
478 amdgpu_vm_is_gpu_reset(adev, id))) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100479 struct dma_fence *fence;
Christian König41d9eb22016-03-01 16:46:18 +0100480
Chunming Zhoufd53be32016-07-01 17:59:01 +0800481 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
482 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
Christian König41d9eb22016-03-01 16:46:18 +0100483
Christian König3dab83b2016-06-01 13:31:17 +0200484 r = amdgpu_fence_emit(ring, &fence);
485 if (r)
486 return r;
487
Christian König41d9eb22016-03-01 16:46:18 +0100488 mutex_lock(&adev->vm_manager.lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100489 dma_fence_put(id->last_flush);
Christian König3dab83b2016-06-01 13:31:17 +0200490 id->last_flush = fence;
Christian König41d9eb22016-03-01 16:46:18 +0100491 mutex_unlock(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400492 }
Christian Königcffadc82016-03-01 13:34:49 +0100493
Christian Königd564a062016-03-01 15:51:53 +0100494 if (gds_switch_needed) {
Chunming Zhoufd53be32016-07-01 17:59:01 +0800495 id->gds_base = job->gds_base;
496 id->gds_size = job->gds_size;
497 id->gws_base = job->gws_base;
498 id->gws_size = job->gws_size;
499 id->oa_base = job->oa_base;
500 id->oa_size = job->oa_size;
501 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
502 job->gds_base, job->gds_size,
503 job->gws_base, job->gws_size,
504 job->oa_base, job->oa_size);
Christian König971fe9a92016-03-01 15:09:25 +0100505 }
Christian König41d9eb22016-03-01 16:46:18 +0100506
507 return 0;
Christian König971fe9a92016-03-01 15:09:25 +0100508}
509
510/**
511 * amdgpu_vm_reset_id - reset VMID to zero
512 *
513 * @adev: amdgpu device structure
514 * @vm_id: vmid number to use
515 *
516 * Reset saved GDW, GWS and OA to force switch on next flush.
517 */
518void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
519{
Christian Königbcb1ba32016-03-08 15:40:11 +0100520 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
Christian König971fe9a92016-03-01 15:09:25 +0100521
Christian Königbcb1ba32016-03-08 15:40:11 +0100522 id->gds_base = 0;
523 id->gds_size = 0;
524 id->gws_base = 0;
525 id->gws_size = 0;
526 id->oa_base = 0;
527 id->oa_size = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400528}
529
530/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400531 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
532 *
533 * @vm: requested vm
534 * @bo: requested buffer object
535 *
Christian König8843dbb2016-01-26 12:17:11 +0100536 * Find @bo inside the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537 * Search inside the @bos vm list for the requested vm
538 * Returns the found bo_va or NULL if none is found
539 *
540 * Object has to be reserved!
541 */
542struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
543 struct amdgpu_bo *bo)
544{
545 struct amdgpu_bo_va *bo_va;
546
547 list_for_each_entry(bo_va, &bo->va, bo_list) {
548 if (bo_va->vm == vm) {
549 return bo_va;
550 }
551 }
552 return NULL;
553}
554
555/**
Christian Königafef8b82016-08-12 13:29:18 +0200556 * amdgpu_vm_do_set_ptes - helper to call the right asic function
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400557 *
Christian König29efc4f2016-08-04 14:52:50 +0200558 * @params: see amdgpu_pte_update_params definition
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400559 * @pe: addr of the page entry
560 * @addr: dst addr to write into pe
561 * @count: number of page entries to update
562 * @incr: increase next addr by incr bytes
563 * @flags: hw access flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400564 *
565 * Traces the parameters and calls the right asic functions
566 * to setup the page table using the DMA.
567 */
Christian Königafef8b82016-08-12 13:29:18 +0200568static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
569 uint64_t pe, uint64_t addr,
570 unsigned count, uint32_t incr,
Chunming Zhou6b777602016-09-21 16:19:19 +0800571 uint64_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400572{
Christian Königec2f05f2016-09-25 16:11:52 +0200573 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400574
Christian Königafef8b82016-08-12 13:29:18 +0200575 if (count < 3) {
Christian Königde9ea7b2016-08-12 11:33:30 +0200576 amdgpu_vm_write_pte(params->adev, params->ib, pe,
577 addr | flags, count, incr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400578
579 } else {
Christian König27c5f362016-08-04 15:02:49 +0200580 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400581 count, incr, flags);
582 }
583}
584
585/**
Christian Königafef8b82016-08-12 13:29:18 +0200586 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
587 *
588 * @params: see amdgpu_pte_update_params definition
589 * @pe: addr of the page entry
590 * @addr: dst addr to write into pe
591 * @count: number of page entries to update
592 * @incr: increase next addr by incr bytes
593 * @flags: hw access flags
594 *
595 * Traces the parameters and calls the DMA function to copy the PTEs.
596 */
597static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
598 uint64_t pe, uint64_t addr,
599 unsigned count, uint32_t incr,
Chunming Zhou6b777602016-09-21 16:19:19 +0800600 uint64_t flags)
Christian Königafef8b82016-08-12 13:29:18 +0200601{
Christian Königec2f05f2016-09-25 16:11:52 +0200602 uint64_t src = (params->src + (addr >> 12) * 8);
Christian Königafef8b82016-08-12 13:29:18 +0200603
Christian Königec2f05f2016-09-25 16:11:52 +0200604
605 trace_amdgpu_vm_copy_ptes(pe, src, count);
606
607 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
Christian Königafef8b82016-08-12 13:29:18 +0200608}
609
610/**
Christian Königb07c9d22015-11-30 13:26:07 +0100611 * amdgpu_vm_map_gart - Resolve gart mapping of addr
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400612 *
Christian Königb07c9d22015-11-30 13:26:07 +0100613 * @pages_addr: optional DMA address to use for lookup
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400614 * @addr: the unmapped addr
615 *
616 * Look up the physical address of the page that the pte resolves
Christian Königb07c9d22015-11-30 13:26:07 +0100617 * to and return the pointer for the page table entry.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400618 */
Christian Königde9ea7b2016-08-12 11:33:30 +0200619static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400620{
621 uint64_t result;
622
Christian Königde9ea7b2016-08-12 11:33:30 +0200623 /* page table offset */
624 result = pages_addr[addr >> PAGE_SHIFT];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400625
Christian Königde9ea7b2016-08-12 11:33:30 +0200626 /* in case cpu page size != gpu page size*/
627 result |= addr & (~PAGE_MASK);
Christian Königb07c9d22015-11-30 13:26:07 +0100628
629 result &= 0xFFFFFFFFFFFFF000ULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400630
631 return result;
632}
633
Christian Königf8991ba2016-09-16 15:36:49 +0200634/*
635 * amdgpu_vm_update_pdes - make sure that page directory is valid
636 *
637 * @adev: amdgpu_device pointer
638 * @vm: requested vm
639 * @start: start of GPU address range
640 * @end: end of GPU address range
641 *
642 * Allocates new page tables if necessary
643 * and updates the page directory.
644 * Returns 0 for success, error for failure.
645 */
646int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
647 struct amdgpu_vm *vm)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400648{
Christian Königf8991ba2016-09-16 15:36:49 +0200649 struct amdgpu_bo *shadow;
Christian König2d55e452016-02-08 17:37:38 +0100650 struct amdgpu_ring *ring;
Christian Königf8991ba2016-09-16 15:36:49 +0200651 uint64_t pd_addr, shadow_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
Christian Königf8991ba2016-09-16 15:36:49 +0200653 uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654 unsigned count = 0, pt_idx, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100655 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200656 struct amdgpu_pte_update_params params;
Dave Airlie220196b2016-10-28 11:33:52 +1000657 struct dma_fence *fence = NULL;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800658
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400659 int r;
660
Christian König2d55e452016-02-08 17:37:38 +0100661 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
Christian Königf8991ba2016-09-16 15:36:49 +0200662 shadow = vm->page_directory->shadow;
Christian König2d55e452016-02-08 17:37:38 +0100663
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400664 /* padding, etc. */
665 ndw = 64;
666
667 /* assume the worst case */
668 ndw += vm->max_pde_used * 6;
669
Christian Königf8991ba2016-09-16 15:36:49 +0200670 pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
671 if (shadow) {
672 r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
673 if (r)
674 return r;
675 shadow_addr = amdgpu_bo_gpu_offset(shadow);
676 ndw *= 2;
677 } else {
678 shadow_addr = 0;
679 }
680
Christian Königd71518b2016-02-01 12:20:25 +0100681 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
682 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400683 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100684
Christian König27c5f362016-08-04 15:02:49 +0200685 memset(&params, 0, sizeof(params));
686 params.adev = adev;
Christian König29efc4f2016-08-04 14:52:50 +0200687 params.ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400688
689 /* walk over the address space and update the page directory */
690 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
Christian König914b4dc2016-09-28 12:27:37 +0200691 struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400692 uint64_t pde, pt;
693
694 if (bo == NULL)
695 continue;
696
Christian König0fc86832016-09-16 11:46:23 +0200697 if (bo->shadow) {
Christian Königf8991ba2016-09-16 15:36:49 +0200698 struct amdgpu_bo *pt_shadow = bo->shadow;
Christian König0fc86832016-09-16 11:46:23 +0200699
Christian Königf8991ba2016-09-16 15:36:49 +0200700 r = amdgpu_ttm_bind(&pt_shadow->tbo,
701 &pt_shadow->tbo.mem);
Christian König0fc86832016-09-16 11:46:23 +0200702 if (r)
703 return r;
704 }
705
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400706 pt = amdgpu_bo_gpu_offset(bo);
Christian Königf8991ba2016-09-16 15:36:49 +0200707 if (vm->page_tables[pt_idx].addr == pt)
708 continue;
709
710 vm->page_tables[pt_idx].addr = pt;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711
712 pde = pd_addr + pt_idx * 8;
713 if (((last_pde + 8 * count) != pde) ||
Christian König96105e52016-08-12 12:59:59 +0200714 ((last_pt + incr * count) != pt) ||
715 (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716
717 if (count) {
Christian Königf8991ba2016-09-16 15:36:49 +0200718 if (shadow)
719 amdgpu_vm_do_set_ptes(&params,
720 last_shadow,
721 last_pt, count,
722 incr,
723 AMDGPU_PTE_VALID);
724
Christian Königafef8b82016-08-12 13:29:18 +0200725 amdgpu_vm_do_set_ptes(&params, last_pde,
726 last_pt, count, incr,
727 AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728 }
729
730 count = 1;
731 last_pde = pde;
Christian Königf8991ba2016-09-16 15:36:49 +0200732 last_shadow = shadow_addr + pt_idx * 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400733 last_pt = pt;
734 } else {
735 ++count;
736 }
737 }
738
Christian Königf8991ba2016-09-16 15:36:49 +0200739 if (count) {
740 if (vm->page_directory->shadow)
741 amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
742 count, incr, AMDGPU_PTE_VALID);
743
Christian Königafef8b82016-08-12 13:29:18 +0200744 amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
745 count, incr, AMDGPU_PTE_VALID);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800746 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400747
Christian Königf8991ba2016-09-16 15:36:49 +0200748 if (params.ib->length_dw == 0) {
749 amdgpu_job_free(job);
750 return 0;
751 }
752
753 amdgpu_ring_pad_ib(ring, params.ib);
754 amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
755 AMDGPU_FENCE_OWNER_VM);
756 if (shadow)
757 amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
758 AMDGPU_FENCE_OWNER_VM);
759
760 WARN_ON(params.ib->length_dw > ndw);
761 r = amdgpu_job_submit(job, ring, &vm->entity,
762 AMDGPU_FENCE_OWNER_VM, &fence);
763 if (r)
764 goto error_free;
765
766 amdgpu_bo_fence(vm->page_directory, fence, true);
Dave Airlie220196b2016-10-28 11:33:52 +1000767 dma_fence_put(vm->page_directory_fence);
768 vm->page_directory_fence = dma_fence_get(fence);
769 dma_fence_put(fence);
Christian Königf8991ba2016-09-16 15:36:49 +0200770
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400771 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800772
773error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100774 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800775 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400776}
777
778/**
Christian König92696dd2016-08-05 13:56:35 +0200779 * amdgpu_vm_update_ptes - make sure that page tables are valid
780 *
781 * @params: see amdgpu_pte_update_params definition
782 * @vm: requested vm
783 * @start: start of GPU address range
784 * @end: end of GPU address range
785 * @dst: destination address to map to, the next dst inside the function
786 * @flags: mapping flags
787 *
788 * Update the page tables in the range @start - @end.
789 */
790static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
791 struct amdgpu_vm *vm,
792 uint64_t start, uint64_t end,
Chunming Zhou6b777602016-09-21 16:19:19 +0800793 uint64_t dst, uint64_t flags)
Christian König92696dd2016-08-05 13:56:35 +0200794{
795 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
796
797 uint64_t cur_pe_start, cur_nptes, cur_dst;
798 uint64_t addr; /* next GPU address to be updated */
799 uint64_t pt_idx;
800 struct amdgpu_bo *pt;
801 unsigned nptes; /* next number of ptes to be updated */
802 uint64_t next_pe_start;
803
804 /* initialize the variables */
805 addr = start;
806 pt_idx = addr >> amdgpu_vm_block_size;
Christian König914b4dc2016-09-28 12:27:37 +0200807 pt = vm->page_tables[pt_idx].bo;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800808 if (params->shadow) {
809 if (!pt->shadow)
810 return;
Christian König914b4dc2016-09-28 12:27:37 +0200811 pt = pt->shadow;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800812 }
Christian König92696dd2016-08-05 13:56:35 +0200813 if ((addr & ~mask) == (end & ~mask))
814 nptes = end - addr;
815 else
816 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
817
818 cur_pe_start = amdgpu_bo_gpu_offset(pt);
819 cur_pe_start += (addr & mask) * 8;
820 cur_nptes = nptes;
821 cur_dst = dst;
822
823 /* for next ptb*/
824 addr += nptes;
825 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
826
827 /* walk over the address space and update the page tables */
828 while (addr < end) {
829 pt_idx = addr >> amdgpu_vm_block_size;
Christian König914b4dc2016-09-28 12:27:37 +0200830 pt = vm->page_tables[pt_idx].bo;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800831 if (params->shadow) {
832 if (!pt->shadow)
833 return;
Christian König914b4dc2016-09-28 12:27:37 +0200834 pt = pt->shadow;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800835 }
Christian König92696dd2016-08-05 13:56:35 +0200836
837 if ((addr & ~mask) == (end & ~mask))
838 nptes = end - addr;
839 else
840 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
841
842 next_pe_start = amdgpu_bo_gpu_offset(pt);
843 next_pe_start += (addr & mask) * 8;
844
Christian König96105e52016-08-12 12:59:59 +0200845 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
846 ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
Christian König92696dd2016-08-05 13:56:35 +0200847 /* The next ptb is consecutive to current ptb.
Christian Königafef8b82016-08-12 13:29:18 +0200848 * Don't call the update function now.
Christian König92696dd2016-08-05 13:56:35 +0200849 * Will update two ptbs together in future.
850 */
851 cur_nptes += nptes;
852 } else {
Christian Königafef8b82016-08-12 13:29:18 +0200853 params->func(params, cur_pe_start, cur_dst, cur_nptes,
854 AMDGPU_GPU_PAGE_SIZE, flags);
Christian König92696dd2016-08-05 13:56:35 +0200855
856 cur_pe_start = next_pe_start;
857 cur_nptes = nptes;
858 cur_dst = dst;
859 }
860
861 /* for next ptb*/
862 addr += nptes;
863 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
864 }
865
Christian Königafef8b82016-08-12 13:29:18 +0200866 params->func(params, cur_pe_start, cur_dst, cur_nptes,
867 AMDGPU_GPU_PAGE_SIZE, flags);
Christian König92696dd2016-08-05 13:56:35 +0200868}
869
870/*
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400871 * amdgpu_vm_frag_ptes - add fragment information to PTEs
872 *
Christian König29efc4f2016-08-04 14:52:50 +0200873 * @params: see amdgpu_pte_update_params definition
Christian König92696dd2016-08-05 13:56:35 +0200874 * @vm: requested vm
875 * @start: first PTE to handle
876 * @end: last PTE to handle
877 * @dst: addr those PTEs should point to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400878 * @flags: hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400879 */
Christian König27c5f362016-08-04 15:02:49 +0200880static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
Christian König92696dd2016-08-05 13:56:35 +0200881 struct amdgpu_vm *vm,
882 uint64_t start, uint64_t end,
Chunming Zhou6b777602016-09-21 16:19:19 +0800883 uint64_t dst, uint64_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400884{
885 /**
886 * The MC L1 TLB supports variable sized pages, based on a fragment
887 * field in the PTE. When this field is set to a non-zero value, page
888 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
889 * flags are considered valid for all PTEs within the fragment range
890 * and corresponding mappings are assumed to be physically contiguous.
891 *
892 * The L1 TLB can store a single PTE for the whole fragment,
893 * significantly increasing the space available for translation
894 * caching. This leads to large improvements in throughput when the
895 * TLB is under pressure.
896 *
897 * The L2 TLB distributes small and large fragments into two
898 * asymmetric partitions. The large fragment cache is significantly
899 * larger. Thus, we try to use large fragments wherever possible.
900 * Userspace can support this by aligning virtual base address and
901 * allocation size to the fragment size.
902 */
903
Christian König80366172016-10-04 13:39:43 +0200904 /* SI and newer are optimized for 64KB */
905 uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
906 uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400907
Christian König92696dd2016-08-05 13:56:35 +0200908 uint64_t frag_start = ALIGN(start, frag_align);
909 uint64_t frag_end = end & ~(frag_align - 1);
Christian König31f6c1f2016-01-26 12:37:49 +0100910
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400911 /* system pages are non continuously */
Christian Königb7fc2cb2016-08-11 16:44:15 +0200912 if (params->src || !(flags & AMDGPU_PTE_VALID) ||
Christian König92696dd2016-08-05 13:56:35 +0200913 (frag_start >= frag_end)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400914
Christian König92696dd2016-08-05 13:56:35 +0200915 amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400916 return;
917 }
918
919 /* handle the 4K area at the beginning */
Christian König92696dd2016-08-05 13:56:35 +0200920 if (start != frag_start) {
921 amdgpu_vm_update_ptes(params, vm, start, frag_start,
922 dst, flags);
923 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400924 }
925
926 /* handle the area in the middle */
Christian König92696dd2016-08-05 13:56:35 +0200927 amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
Christian König80366172016-10-04 13:39:43 +0200928 flags | frag_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400929
930 /* handle the 4K area at the end */
Christian König92696dd2016-08-05 13:56:35 +0200931 if (frag_end != end) {
932 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
933 amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400934 }
935}
936
937/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400938 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
939 *
940 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +0200941 * @exclusive: fence we need to sync to
Christian Königfa3ab3c2016-03-18 21:00:35 +0100942 * @src: address where to copy page table entries from
943 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +0100944 * @vm: requested vm
945 * @start: start of mapped range
946 * @last: last mapped entry
947 * @flags: flags for the entries
948 * @addr: addr to set the area to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400949 * @fence: optional resulting fence
950 *
Christian Königa14faa62016-01-25 14:27:31 +0100951 * Fill in the page table entries between @start and @last.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400952 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400953 */
954static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100955 struct dma_fence *exclusive,
Christian Königfa3ab3c2016-03-18 21:00:35 +0100956 uint64_t src,
957 dma_addr_t *pages_addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400958 struct amdgpu_vm *vm,
Christian Königa14faa62016-01-25 14:27:31 +0100959 uint64_t start, uint64_t last,
Chunming Zhou6b777602016-09-21 16:19:19 +0800960 uint64_t flags, uint64_t addr,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100961 struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400962{
Christian König2d55e452016-02-08 17:37:38 +0100963 struct amdgpu_ring *ring;
Christian Königa1e08d32016-01-26 11:40:46 +0100964 void *owner = AMDGPU_FENCE_OWNER_VM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400965 unsigned nptes, ncmds, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100966 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200967 struct amdgpu_pte_update_params params;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100968 struct dma_fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400969 int r;
970
Christian Königafef8b82016-08-12 13:29:18 +0200971 memset(&params, 0, sizeof(params));
972 params.adev = adev;
973 params.src = src;
974
Christian König2d55e452016-02-08 17:37:38 +0100975 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
Christian König27c5f362016-08-04 15:02:49 +0200976
Christian Königa1e08d32016-01-26 11:40:46 +0100977 /* sync to everything on unmapping */
978 if (!(flags & AMDGPU_PTE_VALID))
979 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
980
Christian Königa14faa62016-01-25 14:27:31 +0100981 nptes = last - start + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400982
983 /*
984 * reserve space for one command every (1 << BLOCK_SIZE)
985 * entries or 2k dwords (whatever is smaller)
986 */
987 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
988
989 /* padding, etc. */
990 ndw = 64;
991
Christian Königb0456f92016-08-11 14:06:54 +0200992 if (src) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400993 /* only copy commands needed */
994 ndw += ncmds * 7;
995
Christian Königafef8b82016-08-12 13:29:18 +0200996 params.func = amdgpu_vm_do_copy_ptes;
997
Christian Königb0456f92016-08-11 14:06:54 +0200998 } else if (pages_addr) {
999 /* copy commands needed */
1000 ndw += ncmds * 7;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001001
Christian Königb0456f92016-08-11 14:06:54 +02001002 /* and also PTEs */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001003 ndw += nptes * 2;
1004
Christian Königafef8b82016-08-12 13:29:18 +02001005 params.func = amdgpu_vm_do_copy_ptes;
1006
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001007 } else {
1008 /* set page commands needed */
1009 ndw += ncmds * 10;
1010
1011 /* two extra commands for begin/end of fragment */
1012 ndw += 2 * 10;
Christian Königafef8b82016-08-12 13:29:18 +02001013
1014 params.func = amdgpu_vm_do_set_ptes;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001015 }
1016
Christian Königd71518b2016-02-01 12:20:25 +01001017 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1018 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001019 return r;
Christian Königd71518b2016-02-01 12:20:25 +01001020
Christian König29efc4f2016-08-04 14:52:50 +02001021 params.ib = &job->ibs[0];
Chunming Zhoud5fc5e82015-07-21 16:52:10 +08001022
Christian Königb0456f92016-08-11 14:06:54 +02001023 if (!src && pages_addr) {
1024 uint64_t *pte;
1025 unsigned i;
1026
1027 /* Put the PTEs at the end of the IB. */
1028 i = ndw - nptes * 2;
1029 pte= (uint64_t *)&(job->ibs->ptr[i]);
1030 params.src = job->ibs->gpu_addr + i * 4;
1031
1032 for (i = 0; i < nptes; ++i) {
1033 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1034 AMDGPU_GPU_PAGE_SIZE);
1035 pte[i] |= flags;
1036 }
Christian Königd7a4ac62016-09-25 11:54:00 +02001037 addr = 0;
Christian Königb0456f92016-08-11 14:06:54 +02001038 }
1039
Christian König3cabaa52016-06-06 10:17:58 +02001040 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1041 if (r)
1042 goto error_free;
1043
Christian Könige86f9ce2016-02-08 12:13:05 +01001044 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
Christian Königa1e08d32016-01-26 11:40:46 +01001045 owner);
1046 if (r)
1047 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001048
Christian Königa1e08d32016-01-26 11:40:46 +01001049 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1050 if (r)
1051 goto error_free;
1052
Chunming Zhou4c7e8852016-08-15 11:46:21 +08001053 params.shadow = true;
1054 amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1055 params.shadow = false;
Christian König92696dd2016-08-05 13:56:35 +02001056 amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001057
Christian König29efc4f2016-08-04 14:52:50 +02001058 amdgpu_ring_pad_ib(ring, params.ib);
1059 WARN_ON(params.ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +01001060 r = amdgpu_job_submit(job, ring, &vm->entity,
1061 AMDGPU_FENCE_OWNER_VM, &f);
Chunming Zhou4af9f072015-08-03 12:57:31 +08001062 if (r)
1063 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001064
Christian Königbf60efd2015-09-04 10:47:56 +02001065 amdgpu_bo_fence(vm->page_directory, f, true);
Christian König284710f2017-01-30 11:09:31 +01001066 dma_fence_put(*fence);
1067 *fence = f;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001068 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +08001069
1070error_free:
Christian Königd71518b2016-02-01 12:20:25 +01001071 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +08001072 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001073}
1074
1075/**
Christian Königa14faa62016-01-25 14:27:31 +01001076 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1077 *
1078 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +02001079 * @exclusive: fence we need to sync to
Christian König8358dce2016-03-30 10:50:25 +02001080 * @gtt_flags: flags as they are used for GTT
1081 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +01001082 * @vm: requested vm
1083 * @mapping: mapped range and flags to use for the update
Christian König8358dce2016-03-30 10:50:25 +02001084 * @flags: HW flags for the mapping
Christian König63e0ba42016-08-16 17:38:37 +02001085 * @nodes: array of drm_mm_nodes with the MC addresses
Christian Königa14faa62016-01-25 14:27:31 +01001086 * @fence: optional resulting fence
1087 *
1088 * Split the mapping into smaller chunks so that each update fits
1089 * into a SDMA IB.
1090 * Returns 0 for success, -EINVAL for failure.
1091 */
1092static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001093 struct dma_fence *exclusive,
Chunming Zhou6b777602016-09-21 16:19:19 +08001094 uint64_t gtt_flags,
Christian König8358dce2016-03-30 10:50:25 +02001095 dma_addr_t *pages_addr,
Christian Königa14faa62016-01-25 14:27:31 +01001096 struct amdgpu_vm *vm,
1097 struct amdgpu_bo_va_mapping *mapping,
Chunming Zhou6b777602016-09-21 16:19:19 +08001098 uint64_t flags,
Christian König63e0ba42016-08-16 17:38:37 +02001099 struct drm_mm_node *nodes,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001100 struct dma_fence **fence)
Christian Königa14faa62016-01-25 14:27:31 +01001101{
Christian König63e0ba42016-08-16 17:38:37 +02001102 uint64_t pfn, src = 0, start = mapping->it.start;
Christian Königa14faa62016-01-25 14:27:31 +01001103 int r;
1104
1105 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1106 * but in case of something, we filter the flags in first place
1107 */
1108 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1109 flags &= ~AMDGPU_PTE_READABLE;
1110 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1111 flags &= ~AMDGPU_PTE_WRITEABLE;
1112
Alex Xie15b31c52017-03-03 16:47:11 -05001113 flags &= ~AMDGPU_PTE_EXECUTABLE;
1114 flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1115
Alex Xieb0fd18b2017-03-03 16:49:39 -05001116 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1117 flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1118
Christian Königa14faa62016-01-25 14:27:31 +01001119 trace_amdgpu_vm_bo_update(mapping);
1120
Christian König63e0ba42016-08-16 17:38:37 +02001121 pfn = mapping->offset >> PAGE_SHIFT;
1122 if (nodes) {
1123 while (pfn >= nodes->size) {
1124 pfn -= nodes->size;
1125 ++nodes;
1126 }
Christian Königfa3ab3c2016-03-18 21:00:35 +01001127 }
Christian Königa14faa62016-01-25 14:27:31 +01001128
Christian König63e0ba42016-08-16 17:38:37 +02001129 do {
1130 uint64_t max_entries;
1131 uint64_t addr, last;
Christian Königa14faa62016-01-25 14:27:31 +01001132
Christian König63e0ba42016-08-16 17:38:37 +02001133 if (nodes) {
1134 addr = nodes->start << PAGE_SHIFT;
1135 max_entries = (nodes->size - pfn) *
1136 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1137 } else {
1138 addr = 0;
1139 max_entries = S64_MAX;
1140 }
Christian Königa14faa62016-01-25 14:27:31 +01001141
Christian König63e0ba42016-08-16 17:38:37 +02001142 if (pages_addr) {
1143 if (flags == gtt_flags)
1144 src = adev->gart.table_addr +
1145 (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1146 else
1147 max_entries = min(max_entries, 16ull * 1024ull);
1148 addr = 0;
1149 } else if (flags & AMDGPU_PTE_VALID) {
1150 addr += adev->vm_manager.vram_base_offset;
1151 }
1152 addr += pfn << PAGE_SHIFT;
1153
1154 last = min((uint64_t)mapping->it.last, start + max_entries - 1);
Christian König3cabaa52016-06-06 10:17:58 +02001155 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1156 src, pages_addr, vm,
Christian Königa14faa62016-01-25 14:27:31 +01001157 start, last, flags, addr,
1158 fence);
1159 if (r)
1160 return r;
1161
Christian König63e0ba42016-08-16 17:38:37 +02001162 pfn += last - start + 1;
1163 if (nodes && nodes->size == pfn) {
1164 pfn = 0;
1165 ++nodes;
1166 }
Christian Königa14faa62016-01-25 14:27:31 +01001167 start = last + 1;
Christian König63e0ba42016-08-16 17:38:37 +02001168
1169 } while (unlikely(start != mapping->it.last + 1));
Christian Königa14faa62016-01-25 14:27:31 +01001170
1171 return 0;
1172}
1173
1174/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001175 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1176 *
1177 * @adev: amdgpu_device pointer
1178 * @bo_va: requested BO and VM object
Christian König99e124f2016-08-16 14:43:17 +02001179 * @clear: if true clear the entries
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001180 *
1181 * Fill in the page table entries for @bo_va.
1182 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 */
1184int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1185 struct amdgpu_bo_va *bo_va,
Christian König99e124f2016-08-16 14:43:17 +02001186 bool clear)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001187{
1188 struct amdgpu_vm *vm = bo_va->vm;
1189 struct amdgpu_bo_va_mapping *mapping;
Christian König8358dce2016-03-30 10:50:25 +02001190 dma_addr_t *pages_addr = NULL;
Chunming Zhou6b777602016-09-21 16:19:19 +08001191 uint64_t gtt_flags, flags;
Christian König99e124f2016-08-16 14:43:17 +02001192 struct ttm_mem_reg *mem;
Christian König63e0ba42016-08-16 17:38:37 +02001193 struct drm_mm_node *nodes;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001194 struct dma_fence *exclusive;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001195 int r;
1196
Christian Königa5f6b5b2017-01-30 11:01:38 +01001197 if (clear || !bo_va->bo) {
Christian König99e124f2016-08-16 14:43:17 +02001198 mem = NULL;
Christian König63e0ba42016-08-16 17:38:37 +02001199 nodes = NULL;
Christian König99e124f2016-08-16 14:43:17 +02001200 exclusive = NULL;
1201 } else {
Christian König8358dce2016-03-30 10:50:25 +02001202 struct ttm_dma_tt *ttm;
1203
Christian König99e124f2016-08-16 14:43:17 +02001204 mem = &bo_va->bo->tbo.mem;
Christian König63e0ba42016-08-16 17:38:37 +02001205 nodes = mem->mm_node;
1206 if (mem->mem_type == TTM_PL_TT) {
Christian König8358dce2016-03-30 10:50:25 +02001207 ttm = container_of(bo_va->bo->tbo.ttm, struct
1208 ttm_dma_tt, ttm);
1209 pages_addr = ttm->dma_address;
Christian König9ab21462015-11-30 14:19:26 +01001210 }
Christian König3cabaa52016-06-06 10:17:58 +02001211 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001212 }
1213
Christian Königa5f6b5b2017-01-30 11:01:38 +01001214 if (bo_va->bo) {
1215 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1216 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1217 adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
1218 flags : 0;
1219 } else {
1220 flags = 0x0;
1221 gtt_flags = ~0x0;
1222 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001223
Christian König7fc11952015-07-30 11:53:42 +02001224 spin_lock(&vm->status_lock);
1225 if (!list_empty(&bo_va->vm_status))
1226 list_splice_init(&bo_va->valids, &bo_va->invalids);
1227 spin_unlock(&vm->status_lock);
1228
1229 list_for_each_entry(mapping, &bo_va->invalids, list) {
Christian König3cabaa52016-06-06 10:17:58 +02001230 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1231 gtt_flags, pages_addr, vm,
Christian König63e0ba42016-08-16 17:38:37 +02001232 mapping, flags, nodes,
Christian König8358dce2016-03-30 10:50:25 +02001233 &bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001234 if (r)
1235 return r;
1236 }
1237
Christian Königd6c10f62015-09-28 12:00:23 +02001238 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1239 list_for_each_entry(mapping, &bo_va->valids, list)
1240 trace_amdgpu_vm_bo_mapping(mapping);
1241
1242 list_for_each_entry(mapping, &bo_va->invalids, list)
1243 trace_amdgpu_vm_bo_mapping(mapping);
1244 }
1245
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001246 spin_lock(&vm->status_lock);
monk.liu6d1d0ef2015-08-14 13:36:41 +08001247 list_splice_init(&bo_va->invalids, &bo_va->valids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001248 list_del_init(&bo_va->vm_status);
Christian König99e124f2016-08-16 14:43:17 +02001249 if (clear)
Christian König7fc11952015-07-30 11:53:42 +02001250 list_add(&bo_va->vm_status, &vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001251 spin_unlock(&vm->status_lock);
1252
1253 return 0;
1254}
1255
1256/**
Christian König284710f2017-01-30 11:09:31 +01001257 * amdgpu_vm_update_prt_state - update the global PRT state
1258 */
1259static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1260{
1261 unsigned long flags;
1262 bool enable;
1263
1264 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
Christian König451bc8e2017-02-14 16:02:52 +01001265 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
Christian König284710f2017-01-30 11:09:31 +01001266 adev->gart.gart_funcs->set_prt(adev, enable);
1267 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1268}
1269
1270/**
Christian König4388fc22017-03-13 10:13:36 +01001271 * amdgpu_vm_prt_get - add a PRT user
Christian König451bc8e2017-02-14 16:02:52 +01001272 */
1273static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1274{
Christian König4388fc22017-03-13 10:13:36 +01001275 if (!adev->gart.gart_funcs->set_prt)
1276 return;
1277
Christian König451bc8e2017-02-14 16:02:52 +01001278 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1279 amdgpu_vm_update_prt_state(adev);
1280}
1281
1282/**
Christian König0b15f2f2017-02-14 15:47:03 +01001283 * amdgpu_vm_prt_put - drop a PRT user
1284 */
1285static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1286{
Christian König451bc8e2017-02-14 16:02:52 +01001287 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
Christian König0b15f2f2017-02-14 15:47:03 +01001288 amdgpu_vm_update_prt_state(adev);
1289}
1290
1291/**
Christian König451bc8e2017-02-14 16:02:52 +01001292 * amdgpu_vm_prt_cb - callback for updating the PRT status
Christian König284710f2017-01-30 11:09:31 +01001293 */
1294static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1295{
1296 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1297
Christian König0b15f2f2017-02-14 15:47:03 +01001298 amdgpu_vm_prt_put(cb->adev);
Christian König284710f2017-01-30 11:09:31 +01001299 kfree(cb);
1300}
1301
1302/**
Christian König451bc8e2017-02-14 16:02:52 +01001303 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1304 */
1305static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1306 struct dma_fence *fence)
1307{
Christian König4388fc22017-03-13 10:13:36 +01001308 struct amdgpu_prt_cb *cb;
Christian König451bc8e2017-02-14 16:02:52 +01001309
Christian König4388fc22017-03-13 10:13:36 +01001310 if (!adev->gart.gart_funcs->set_prt)
1311 return;
1312
1313 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
Christian König451bc8e2017-02-14 16:02:52 +01001314 if (!cb) {
1315 /* Last resort when we are OOM */
1316 if (fence)
1317 dma_fence_wait(fence, false);
1318
1319 amdgpu_vm_prt_put(cb->adev);
1320 } else {
1321 cb->adev = adev;
1322 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1323 amdgpu_vm_prt_cb))
1324 amdgpu_vm_prt_cb(fence, &cb->cb);
1325 }
1326}
1327
1328/**
Christian König284710f2017-01-30 11:09:31 +01001329 * amdgpu_vm_free_mapping - free a mapping
1330 *
1331 * @adev: amdgpu_device pointer
1332 * @vm: requested vm
1333 * @mapping: mapping to be freed
1334 * @fence: fence of the unmap operation
1335 *
1336 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1337 */
1338static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1339 struct amdgpu_vm *vm,
1340 struct amdgpu_bo_va_mapping *mapping,
1341 struct dma_fence *fence)
1342{
Christian König451bc8e2017-02-14 16:02:52 +01001343 if (mapping->flags & AMDGPU_PTE_PRT)
1344 amdgpu_vm_add_prt_cb(adev, fence);
Christian König284710f2017-01-30 11:09:31 +01001345 kfree(mapping);
1346}
1347
1348/**
Christian König451bc8e2017-02-14 16:02:52 +01001349 * amdgpu_vm_prt_fini - finish all prt mappings
1350 *
1351 * @adev: amdgpu_device pointer
1352 * @vm: requested vm
1353 *
1354 * Register a cleanup callback to disable PRT support after VM dies.
1355 */
1356static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1357{
1358 struct reservation_object *resv = vm->page_directory->tbo.resv;
1359 struct dma_fence *excl, **shared;
1360 unsigned i, shared_count;
1361 int r;
1362
1363 r = reservation_object_get_fences_rcu(resv, &excl,
1364 &shared_count, &shared);
1365 if (r) {
1366 /* Not enough memory to grab the fence list, as last resort
1367 * block for all the fences to complete.
1368 */
1369 reservation_object_wait_timeout_rcu(resv, true, false,
1370 MAX_SCHEDULE_TIMEOUT);
1371 return;
1372 }
1373
1374 /* Add a callback for each fence in the reservation object */
1375 amdgpu_vm_prt_get(adev);
1376 amdgpu_vm_add_prt_cb(adev, excl);
1377
1378 for (i = 0; i < shared_count; ++i) {
1379 amdgpu_vm_prt_get(adev);
1380 amdgpu_vm_add_prt_cb(adev, shared[i]);
1381 }
1382
1383 kfree(shared);
1384}
1385
1386/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001387 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1388 *
1389 * @adev: amdgpu_device pointer
1390 * @vm: requested vm
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001391 * @fence: optional resulting fence (unchanged if no work needed to be done
1392 * or if an error occurred)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001393 *
1394 * Make sure all freed BOs are cleared in the PT.
1395 * Returns 0 for success.
1396 *
1397 * PTs have to be reserved and mutex must be locked!
1398 */
1399int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001400 struct amdgpu_vm *vm,
1401 struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001402{
1403 struct amdgpu_bo_va_mapping *mapping;
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001404 struct dma_fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001405 int r;
1406
1407 while (!list_empty(&vm->freed)) {
1408 mapping = list_first_entry(&vm->freed,
1409 struct amdgpu_bo_va_mapping, list);
1410 list_del(&mapping->list);
Christian Könige17841b2016-03-08 17:52:01 +01001411
Christian König3cabaa52016-06-06 10:17:58 +02001412 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001413 0, 0, &f);
1414 amdgpu_vm_free_mapping(adev, vm, mapping, f);
Christian König284710f2017-01-30 11:09:31 +01001415 if (r) {
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001416 dma_fence_put(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001417 return r;
Christian König284710f2017-01-30 11:09:31 +01001418 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001419 }
Nicolai Hähnlef3467812017-03-23 19:36:31 +01001420
1421 if (fence && f) {
1422 dma_fence_put(*fence);
1423 *fence = f;
1424 } else {
1425 dma_fence_put(f);
1426 }
1427
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001428 return 0;
1429
1430}
1431
1432/**
1433 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1434 *
1435 * @adev: amdgpu_device pointer
1436 * @vm: requested vm
1437 *
1438 * Make sure all invalidated BOs are cleared in the PT.
1439 * Returns 0 for success.
1440 *
1441 * PTs have to be reserved and mutex must be locked!
1442 */
1443int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +08001444 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001445{
monk.liucfe2c972015-05-26 15:01:54 +08001446 struct amdgpu_bo_va *bo_va = NULL;
Christian König91e1a522015-07-06 22:06:40 +02001447 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001448
1449 spin_lock(&vm->status_lock);
1450 while (!list_empty(&vm->invalidated)) {
1451 bo_va = list_first_entry(&vm->invalidated,
1452 struct amdgpu_bo_va, vm_status);
1453 spin_unlock(&vm->status_lock);
Christian König32b41ac2016-03-08 18:03:27 +01001454
Christian König99e124f2016-08-16 14:43:17 +02001455 r = amdgpu_vm_bo_update(adev, bo_va, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001456 if (r)
1457 return r;
1458
1459 spin_lock(&vm->status_lock);
1460 }
1461 spin_unlock(&vm->status_lock);
1462
monk.liucfe2c972015-05-26 15:01:54 +08001463 if (bo_va)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001464 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
Christian König91e1a522015-07-06 22:06:40 +02001465
1466 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001467}
1468
1469/**
1470 * amdgpu_vm_bo_add - add a bo to a specific vm
1471 *
1472 * @adev: amdgpu_device pointer
1473 * @vm: requested vm
1474 * @bo: amdgpu buffer object
1475 *
Christian König8843dbb2016-01-26 12:17:11 +01001476 * Add @bo into the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001477 * Add @bo to the list of bos associated with the vm
1478 * Returns newly added bo_va or NULL for failure
1479 *
1480 * Object has to be reserved!
1481 */
1482struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1483 struct amdgpu_vm *vm,
1484 struct amdgpu_bo *bo)
1485{
1486 struct amdgpu_bo_va *bo_va;
1487
1488 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1489 if (bo_va == NULL) {
1490 return NULL;
1491 }
1492 bo_va->vm = vm;
1493 bo_va->bo = bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494 bo_va->ref_count = 1;
1495 INIT_LIST_HEAD(&bo_va->bo_list);
Christian König7fc11952015-07-30 11:53:42 +02001496 INIT_LIST_HEAD(&bo_va->valids);
1497 INIT_LIST_HEAD(&bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001498 INIT_LIST_HEAD(&bo_va->vm_status);
Christian König32b41ac2016-03-08 18:03:27 +01001499
Christian Königa5f6b5b2017-01-30 11:01:38 +01001500 if (bo)
1501 list_add_tail(&bo_va->bo_list, &bo->va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001502
1503 return bo_va;
1504}
1505
1506/**
1507 * amdgpu_vm_bo_map - map bo inside a vm
1508 *
1509 * @adev: amdgpu_device pointer
1510 * @bo_va: bo_va to store the address
1511 * @saddr: where to map the BO
1512 * @offset: requested offset in the BO
1513 * @flags: attributes of pages (read/write/valid/etc.)
1514 *
1515 * Add a mapping of the BO at the specefied addr into the VM.
1516 * Returns 0 for success, error for failure.
1517 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001518 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001519 */
1520int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1521 struct amdgpu_bo_va *bo_va,
1522 uint64_t saddr, uint64_t offset,
Christian König268c3002017-01-18 14:49:43 +01001523 uint64_t size, uint64_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001524{
1525 struct amdgpu_bo_va_mapping *mapping;
1526 struct amdgpu_vm *vm = bo_va->vm;
1527 struct interval_tree_node *it;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001528 uint64_t eaddr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001529
Christian König0be52de2015-05-18 14:37:27 +02001530 /* validate the parameters */
1531 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
Chunming Zhou49b02b12015-11-13 14:18:38 +08001532 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
Christian König0be52de2015-05-18 14:37:27 +02001533 return -EINVAL;
Christian König0be52de2015-05-18 14:37:27 +02001534
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001535 /* make sure object fit at this offset */
Felix Kuehling005ae952015-11-23 17:43:48 -05001536 eaddr = saddr + size - 1;
Christian Königa5f6b5b2017-01-30 11:01:38 +01001537 if (saddr >= eaddr ||
1538 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001539 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001540
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001541 saddr /= AMDGPU_GPU_PAGE_SIZE;
1542 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1543
Felix Kuehling005ae952015-11-23 17:43:48 -05001544 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001545 if (it) {
1546 struct amdgpu_bo_va_mapping *tmp;
1547 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1548 /* bo and tmp overlap, invalid addr */
1549 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1550 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1551 tmp->it.start, tmp->it.last + 1);
Christian König663e4572017-03-13 10:13:37 +01001552 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001553 }
1554
1555 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
Christian König663e4572017-03-13 10:13:37 +01001556 if (!mapping)
1557 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001558
1559 INIT_LIST_HEAD(&mapping->list);
1560 mapping->it.start = saddr;
Felix Kuehling005ae952015-11-23 17:43:48 -05001561 mapping->it.last = eaddr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001562 mapping->offset = offset;
1563 mapping->flags = flags;
1564
Christian König7fc11952015-07-30 11:53:42 +02001565 list_add(&mapping->list, &bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 interval_tree_insert(&mapping->it, &vm->va);
1567
Christian König4388fc22017-03-13 10:13:36 +01001568 if (flags & AMDGPU_PTE_PRT)
1569 amdgpu_vm_prt_get(adev);
1570
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001571 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001572}
1573
1574/**
Christian König80f95c52017-03-13 10:13:39 +01001575 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1576 *
1577 * @adev: amdgpu_device pointer
1578 * @bo_va: bo_va to store the address
1579 * @saddr: where to map the BO
1580 * @offset: requested offset in the BO
1581 * @flags: attributes of pages (read/write/valid/etc.)
1582 *
1583 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1584 * mappings as we do so.
1585 * Returns 0 for success, error for failure.
1586 *
1587 * Object has to be reserved and unreserved outside!
1588 */
1589int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1590 struct amdgpu_bo_va *bo_va,
1591 uint64_t saddr, uint64_t offset,
1592 uint64_t size, uint64_t flags)
1593{
1594 struct amdgpu_bo_va_mapping *mapping;
1595 struct amdgpu_vm *vm = bo_va->vm;
1596 uint64_t eaddr;
1597 int r;
1598
1599 /* validate the parameters */
1600 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1601 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1602 return -EINVAL;
1603
1604 /* make sure object fit at this offset */
1605 eaddr = saddr + size - 1;
1606 if (saddr >= eaddr ||
1607 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1608 return -EINVAL;
1609
1610 /* Allocate all the needed memory */
1611 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1612 if (!mapping)
1613 return -ENOMEM;
1614
1615 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
1616 if (r) {
1617 kfree(mapping);
1618 return r;
1619 }
1620
1621 saddr /= AMDGPU_GPU_PAGE_SIZE;
1622 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1623
1624 mapping->it.start = saddr;
1625 mapping->it.last = eaddr;
1626 mapping->offset = offset;
1627 mapping->flags = flags;
1628
1629 list_add(&mapping->list, &bo_va->invalids);
1630 interval_tree_insert(&mapping->it, &vm->va);
1631
1632 if (flags & AMDGPU_PTE_PRT)
1633 amdgpu_vm_prt_get(adev);
1634
1635 return 0;
1636}
1637
1638/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1640 *
1641 * @adev: amdgpu_device pointer
1642 * @bo_va: bo_va to remove the address from
1643 * @saddr: where to the BO is mapped
1644 *
1645 * Remove a mapping of the BO at the specefied addr from the VM.
1646 * Returns 0 for success, error for failure.
1647 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001648 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001649 */
1650int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1651 struct amdgpu_bo_va *bo_va,
1652 uint64_t saddr)
1653{
1654 struct amdgpu_bo_va_mapping *mapping;
1655 struct amdgpu_vm *vm = bo_va->vm;
Christian König7fc11952015-07-30 11:53:42 +02001656 bool valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001657
Christian König6c7fc502015-06-05 20:56:17 +02001658 saddr /= AMDGPU_GPU_PAGE_SIZE;
Christian König32b41ac2016-03-08 18:03:27 +01001659
Christian König7fc11952015-07-30 11:53:42 +02001660 list_for_each_entry(mapping, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001661 if (mapping->it.start == saddr)
1662 break;
1663 }
1664
Christian König7fc11952015-07-30 11:53:42 +02001665 if (&mapping->list == &bo_va->valids) {
1666 valid = false;
1667
1668 list_for_each_entry(mapping, &bo_va->invalids, list) {
1669 if (mapping->it.start == saddr)
1670 break;
1671 }
1672
Christian König32b41ac2016-03-08 18:03:27 +01001673 if (&mapping->list == &bo_va->invalids)
Christian König7fc11952015-07-30 11:53:42 +02001674 return -ENOENT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001675 }
Christian König32b41ac2016-03-08 18:03:27 +01001676
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001677 list_del(&mapping->list);
1678 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001679 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001680
Christian Könige17841b2016-03-08 17:52:01 +01001681 if (valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001682 list_add(&mapping->list, &vm->freed);
Christian Könige17841b2016-03-08 17:52:01 +01001683 else
Christian König284710f2017-01-30 11:09:31 +01001684 amdgpu_vm_free_mapping(adev, vm, mapping,
1685 bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001686
1687 return 0;
1688}
1689
1690/**
Christian Königdc54d3d2017-03-13 10:13:38 +01001691 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1692 *
1693 * @adev: amdgpu_device pointer
1694 * @vm: VM structure to use
1695 * @saddr: start of the range
1696 * @size: size of the range
1697 *
1698 * Remove all mappings in a range, split them as appropriate.
1699 * Returns 0 for success, error for failure.
1700 */
1701int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1702 struct amdgpu_vm *vm,
1703 uint64_t saddr, uint64_t size)
1704{
1705 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1706 struct interval_tree_node *it;
1707 LIST_HEAD(removed);
1708 uint64_t eaddr;
1709
1710 eaddr = saddr + size - 1;
1711 saddr /= AMDGPU_GPU_PAGE_SIZE;
1712 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1713
1714 /* Allocate all the needed memory */
1715 before = kzalloc(sizeof(*before), GFP_KERNEL);
1716 if (!before)
1717 return -ENOMEM;
Junwei Zhang27f6d612017-03-16 16:09:24 +08001718 INIT_LIST_HEAD(&before->list);
Christian Königdc54d3d2017-03-13 10:13:38 +01001719
1720 after = kzalloc(sizeof(*after), GFP_KERNEL);
1721 if (!after) {
1722 kfree(before);
1723 return -ENOMEM;
1724 }
Junwei Zhang27f6d612017-03-16 16:09:24 +08001725 INIT_LIST_HEAD(&after->list);
Christian Königdc54d3d2017-03-13 10:13:38 +01001726
1727 /* Now gather all removed mappings */
1728 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1729 while (it) {
1730 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1731 it = interval_tree_iter_next(it, saddr, eaddr);
1732
1733 /* Remember mapping split at the start */
1734 if (tmp->it.start < saddr) {
Junwei Zhang27f6d612017-03-16 16:09:24 +08001735 before->it.start = tmp->it.start;
Christian Königdc54d3d2017-03-13 10:13:38 +01001736 before->it.last = saddr - 1;
1737 before->offset = tmp->offset;
1738 before->flags = tmp->flags;
1739 list_add(&before->list, &tmp->list);
1740 }
1741
1742 /* Remember mapping split at the end */
1743 if (tmp->it.last > eaddr) {
1744 after->it.start = eaddr + 1;
1745 after->it.last = tmp->it.last;
1746 after->offset = tmp->offset;
1747 after->offset += after->it.start - tmp->it.start;
1748 after->flags = tmp->flags;
1749 list_add(&after->list, &tmp->list);
1750 }
1751
1752 list_del(&tmp->list);
1753 list_add(&tmp->list, &removed);
1754 }
1755
1756 /* And free them up */
1757 list_for_each_entry_safe(tmp, next, &removed, list) {
1758 interval_tree_remove(&tmp->it, &vm->va);
1759 list_del(&tmp->list);
1760
1761 if (tmp->it.start < saddr)
1762 tmp->it.start = saddr;
1763 if (tmp->it.last > eaddr)
1764 tmp->it.last = eaddr;
1765
1766 list_add(&tmp->list, &vm->freed);
1767 trace_amdgpu_vm_bo_unmap(NULL, tmp);
1768 }
1769
Junwei Zhang27f6d612017-03-16 16:09:24 +08001770 /* Insert partial mapping before the range */
1771 if (!list_empty(&before->list)) {
Christian Königdc54d3d2017-03-13 10:13:38 +01001772 interval_tree_insert(&before->it, &vm->va);
1773 if (before->flags & AMDGPU_PTE_PRT)
1774 amdgpu_vm_prt_get(adev);
1775 } else {
1776 kfree(before);
1777 }
1778
1779 /* Insert partial mapping after the range */
Junwei Zhang27f6d612017-03-16 16:09:24 +08001780 if (!list_empty(&after->list)) {
Christian Königdc54d3d2017-03-13 10:13:38 +01001781 interval_tree_insert(&after->it, &vm->va);
1782 if (after->flags & AMDGPU_PTE_PRT)
1783 amdgpu_vm_prt_get(adev);
1784 } else {
1785 kfree(after);
1786 }
1787
1788 return 0;
1789}
1790
1791/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001792 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1793 *
1794 * @adev: amdgpu_device pointer
1795 * @bo_va: requested bo_va
1796 *
Christian König8843dbb2016-01-26 12:17:11 +01001797 * Remove @bo_va->bo from the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001798 *
1799 * Object have to be reserved!
1800 */
1801void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1802 struct amdgpu_bo_va *bo_va)
1803{
1804 struct amdgpu_bo_va_mapping *mapping, *next;
1805 struct amdgpu_vm *vm = bo_va->vm;
1806
1807 list_del(&bo_va->bo_list);
1808
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001809 spin_lock(&vm->status_lock);
1810 list_del(&bo_va->vm_status);
1811 spin_unlock(&vm->status_lock);
1812
Christian König7fc11952015-07-30 11:53:42 +02001813 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001814 list_del(&mapping->list);
1815 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001816 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Christian König7fc11952015-07-30 11:53:42 +02001817 list_add(&mapping->list, &vm->freed);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001818 }
Christian König7fc11952015-07-30 11:53:42 +02001819 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1820 list_del(&mapping->list);
1821 interval_tree_remove(&mapping->it, &vm->va);
Christian König284710f2017-01-30 11:09:31 +01001822 amdgpu_vm_free_mapping(adev, vm, mapping,
1823 bo_va->last_pt_update);
Christian König7fc11952015-07-30 11:53:42 +02001824 }
Christian König32b41ac2016-03-08 18:03:27 +01001825
Chris Wilsonf54d1862016-10-25 13:00:45 +01001826 dma_fence_put(bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001827 kfree(bo_va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001828}
1829
1830/**
1831 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1832 *
1833 * @adev: amdgpu_device pointer
1834 * @vm: requested vm
1835 * @bo: amdgpu buffer object
1836 *
Christian König8843dbb2016-01-26 12:17:11 +01001837 * Mark @bo as invalid.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001838 */
1839void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1840 struct amdgpu_bo *bo)
1841{
1842 struct amdgpu_bo_va *bo_va;
1843
1844 list_for_each_entry(bo_va, &bo->va, bo_list) {
Christian König7fc11952015-07-30 11:53:42 +02001845 spin_lock(&bo_va->vm->status_lock);
1846 if (list_empty(&bo_va->vm_status))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001847 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001848 spin_unlock(&bo_va->vm->status_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001849 }
1850}
1851
1852/**
1853 * amdgpu_vm_init - initialize a vm instance
1854 *
1855 * @adev: amdgpu_device pointer
1856 * @vm: requested vm
1857 *
Christian König8843dbb2016-01-26 12:17:11 +01001858 * Init @vm fields.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001859 */
1860int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1861{
1862 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1863 AMDGPU_VM_PTE_COUNT * 8);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001864 unsigned pd_size, pd_entries;
Christian König2d55e452016-02-08 17:37:38 +01001865 unsigned ring_instance;
1866 struct amdgpu_ring *ring;
Christian König2bd9ccf2016-02-01 12:53:58 +01001867 struct amd_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001868 int i, r;
1869
Christian Königbcb1ba32016-03-08 15:40:11 +01001870 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1871 vm->ids[i] = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001872 vm->va = RB_ROOT;
Chunming Zhou031e2982016-04-25 10:19:13 +08001873 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001874 spin_lock_init(&vm->status_lock);
1875 INIT_LIST_HEAD(&vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001876 INIT_LIST_HEAD(&vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001877 INIT_LIST_HEAD(&vm->freed);
Christian König20250212016-03-08 17:58:35 +01001878
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001879 pd_size = amdgpu_vm_directory_size(adev);
1880 pd_entries = amdgpu_vm_num_pdes(adev);
1881
1882 /* allocate page table array */
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001883 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001884 if (vm->page_tables == NULL) {
1885 DRM_ERROR("Cannot allocate memory for page table array\n");
1886 return -ENOMEM;
1887 }
1888
Christian König2bd9ccf2016-02-01 12:53:58 +01001889 /* create scheduler entity for page table updates */
Christian König2d55e452016-02-08 17:37:38 +01001890
1891 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1892 ring_instance %= adev->vm_manager.vm_pte_num_rings;
1893 ring = adev->vm_manager.vm_pte_rings[ring_instance];
Christian König2bd9ccf2016-02-01 12:53:58 +01001894 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1895 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1896 rq, amdgpu_sched_jobs);
1897 if (r)
Chunming Zhou64827ad2016-07-28 17:20:32 +08001898 goto err;
Christian König2bd9ccf2016-02-01 12:53:58 +01001899
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001900 vm->page_directory_fence = NULL;
1901
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001902 r = amdgpu_bo_create(adev, pd_size, align, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001903 AMDGPU_GEM_DOMAIN_VRAM,
Chunming Zhou1baa4392016-08-04 13:59:32 +08001904 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
Christian König03f48dd2016-08-15 17:00:22 +02001905 AMDGPU_GEM_CREATE_SHADOW |
Christian König617859e2016-11-17 15:40:02 +01001906 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1907 AMDGPU_GEM_CREATE_VRAM_CLEARED,
Christian König72d76682015-09-03 17:34:59 +02001908 NULL, NULL, &vm->page_directory);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001909 if (r)
Christian König2bd9ccf2016-02-01 12:53:58 +01001910 goto error_free_sched_entity;
1911
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001912 r = amdgpu_bo_reserve(vm->page_directory, false);
Christian König2bd9ccf2016-02-01 12:53:58 +01001913 if (r)
1914 goto error_free_page_directory;
1915
Christian König5a712a82016-06-21 16:28:15 +02001916 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
Christian König2a82ec212016-09-16 13:11:45 +02001917 amdgpu_bo_unreserve(vm->page_directory);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001918
1919 return 0;
Christian König2bd9ccf2016-02-01 12:53:58 +01001920
1921error_free_page_directory:
Christian König2698f622016-09-16 13:06:09 +02001922 amdgpu_bo_unref(&vm->page_directory->shadow);
Christian König2bd9ccf2016-02-01 12:53:58 +01001923 amdgpu_bo_unref(&vm->page_directory);
1924 vm->page_directory = NULL;
1925
1926error_free_sched_entity:
1927 amd_sched_entity_fini(&ring->sched, &vm->entity);
1928
Chunming Zhou64827ad2016-07-28 17:20:32 +08001929err:
1930 drm_free_large(vm->page_tables);
1931
Christian König2bd9ccf2016-02-01 12:53:58 +01001932 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001933}
1934
1935/**
1936 * amdgpu_vm_fini - tear down a vm instance
1937 *
1938 * @adev: amdgpu_device pointer
1939 * @vm: requested vm
1940 *
Christian König8843dbb2016-01-26 12:17:11 +01001941 * Tear down @vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001942 * Unbind the VM and remove all bos from the vm bo list
1943 */
1944void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1945{
1946 struct amdgpu_bo_va_mapping *mapping, *tmp;
Christian König4388fc22017-03-13 10:13:36 +01001947 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001948 int i;
1949
Christian König2d55e452016-02-08 17:37:38 +01001950 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
Christian König2bd9ccf2016-02-01 12:53:58 +01001951
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001952 if (!RB_EMPTY_ROOT(&vm->va)) {
1953 dev_err(adev->dev, "still active bo inside vm\n");
1954 }
1955 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1956 list_del(&mapping->list);
1957 interval_tree_remove(&mapping->it, &vm->va);
1958 kfree(mapping);
1959 }
1960 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
Christian König4388fc22017-03-13 10:13:36 +01001961 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
Christian König451bc8e2017-02-14 16:02:52 +01001962 amdgpu_vm_prt_fini(adev, vm);
Christian König4388fc22017-03-13 10:13:36 +01001963 prt_fini_needed = false;
Christian König451bc8e2017-02-14 16:02:52 +01001964 }
Christian König284710f2017-01-30 11:09:31 +01001965
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001966 list_del(&mapping->list);
Christian König451bc8e2017-02-14 16:02:52 +01001967 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001968 }
1969
Chunming Zhou1baa4392016-08-04 13:59:32 +08001970 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
Christian König914b4dc2016-09-28 12:27:37 +02001971 struct amdgpu_bo *pt = vm->page_tables[i].bo;
Christian König2698f622016-09-16 13:06:09 +02001972
1973 if (!pt)
1974 continue;
1975
1976 amdgpu_bo_unref(&pt->shadow);
1977 amdgpu_bo_unref(&pt);
Chunming Zhou1baa4392016-08-04 13:59:32 +08001978 }
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001979 drm_free_large(vm->page_tables);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001980
Christian König2698f622016-09-16 13:06:09 +02001981 amdgpu_bo_unref(&vm->page_directory->shadow);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982 amdgpu_bo_unref(&vm->page_directory);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001983 dma_fence_put(vm->page_directory_fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001984}
Christian Königea89f8c2015-11-15 20:52:06 +01001985
1986/**
Christian Königa9a78b32016-01-21 10:19:11 +01001987 * amdgpu_vm_manager_init - init the VM manager
1988 *
1989 * @adev: amdgpu_device pointer
1990 *
1991 * Initialize the VM manager structures
1992 */
1993void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1994{
1995 unsigned i;
1996
1997 INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1998
1999 /* skip over VMID 0, since it is the system VM */
Christian König971fe9a92016-03-01 15:09:25 +01002000 for (i = 1; i < adev->vm_manager.num_ids; ++i) {
2001 amdgpu_vm_reset_id(adev, i);
Christian König832a9022016-02-15 12:33:02 +01002002 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
Christian Königa9a78b32016-01-21 10:19:11 +01002003 list_add_tail(&adev->vm_manager.ids[i].list,
2004 &adev->vm_manager.ids_lru);
Christian König971fe9a92016-03-01 15:09:25 +01002005 }
Christian König2d55e452016-02-08 17:37:38 +01002006
Chris Wilsonf54d1862016-10-25 13:00:45 +01002007 adev->vm_manager.fence_context =
2008 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Christian König1fbb2e92016-06-01 10:47:36 +02002009 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2010 adev->vm_manager.seqno[i] = 0;
2011
Christian König2d55e452016-02-08 17:37:38 +01002012 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
Christian Königb1c8a812016-05-04 10:34:03 +02002013 atomic64_set(&adev->vm_manager.client_counter, 0);
Christian König284710f2017-01-30 11:09:31 +01002014 spin_lock_init(&adev->vm_manager.prt_lock);
Christian König451bc8e2017-02-14 16:02:52 +01002015 atomic_set(&adev->vm_manager.num_prt_users, 0);
Christian Königa9a78b32016-01-21 10:19:11 +01002016}
2017
2018/**
Christian Königea89f8c2015-11-15 20:52:06 +01002019 * amdgpu_vm_manager_fini - cleanup VM manager
2020 *
2021 * @adev: amdgpu_device pointer
2022 *
2023 * Cleanup the VM manager and free resources.
2024 */
2025void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2026{
2027 unsigned i;
2028
Christian Königbcb1ba32016-03-08 15:40:11 +01002029 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
2030 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
2031
Chris Wilsonf54d1862016-10-25 13:00:45 +01002032 dma_fence_put(adev->vm_manager.ids[i].first);
Christian König832a9022016-02-15 12:33:02 +01002033 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
Chris Wilsonf54d1862016-10-25 13:00:45 +01002034 dma_fence_put(id->flushed_updates);
Dave Airlie7b624ad2016-11-07 09:37:09 +10002035 dma_fence_put(id->last_flush);
Christian Königbcb1ba32016-03-08 15:40:11 +01002036 }
Christian Königea89f8c2015-11-15 20:52:06 +01002037}