blob: 577abfd3879eff26f6bc4b136444bf774937ce24 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Christian König1fbb2e92016-06-01 10:47:36 +020028#include <linux/fence-array.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32#include "amdgpu_trace.h"
33
34/*
35 * GPUVM
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
51 * SI supports 16.
52 */
53
Christian König4ff37a82016-02-26 16:18:26 +010054/* Special value that no flush is necessary */
55#define AMDGPU_VM_NO_FLUSH (~0ll)
56
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040057/* Local structure. Encapsulate some VM table update parameters to reduce
58 * the number of function parameters
59 */
Christian König29efc4f2016-08-04 14:52:50 +020060struct amdgpu_pte_update_params {
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040061 /* address where to copy page table entries from */
62 uint64_t src;
63 /* DMA addresses to use for mapping */
64 dma_addr_t *pages_addr;
65 /* indirect buffer to fill with commands */
66 struct amdgpu_ib *ib;
67};
68
Alex Deucherd38ceaf2015-04-20 16:55:21 -040069/**
70 * amdgpu_vm_num_pde - return the number of page directory entries
71 *
72 * @adev: amdgpu_device pointer
73 *
Christian König8843dbb2016-01-26 12:17:11 +010074 * Calculate the number of page directory entries.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 */
76static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
77{
78 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
79}
80
81/**
82 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
83 *
84 * @adev: amdgpu_device pointer
85 *
Christian König8843dbb2016-01-26 12:17:11 +010086 * Calculate the size of the page directory in bytes.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 */
88static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
89{
90 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
91}
92
93/**
Christian König56467eb2015-12-11 15:16:32 +010094 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
Alex Deucherd38ceaf2015-04-20 16:55:21 -040095 *
96 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +010097 * @validated: head of validation list
Christian König56467eb2015-12-11 15:16:32 +010098 * @entry: entry to add
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099 *
100 * Add the page directory to the list of BOs to
Christian König56467eb2015-12-11 15:16:32 +0100101 * validate for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102 */
Christian König56467eb2015-12-11 15:16:32 +0100103void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
104 struct list_head *validated,
105 struct amdgpu_bo_list_entry *entry)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106{
Christian König56467eb2015-12-11 15:16:32 +0100107 entry->robj = vm->page_directory;
Christian König56467eb2015-12-11 15:16:32 +0100108 entry->priority = 0;
109 entry->tv.bo = &vm->page_directory->tbo;
110 entry->tv.shared = true;
Christian König2f568db2016-02-23 12:36:59 +0100111 entry->user_pages = NULL;
Christian König56467eb2015-12-11 15:16:32 +0100112 list_add(&entry->tv.head, validated);
113}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114
Christian König56467eb2015-12-11 15:16:32 +0100115/**
Christian Königee1782c2015-12-11 21:01:23 +0100116 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
Christian König56467eb2015-12-11 15:16:32 +0100117 *
Christian König5a712a82016-06-21 16:28:15 +0200118 * @adev: amdgpu device pointer
Christian König56467eb2015-12-11 15:16:32 +0100119 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +0100120 * @duplicates: head of duplicates list
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121 *
Christian Königee1782c2015-12-11 21:01:23 +0100122 * Add the page directory to the BO duplicates list
123 * for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124 */
Christian König5a712a82016-06-21 16:28:15 +0200125void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
126 struct list_head *duplicates)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127{
Christian König5a712a82016-06-21 16:28:15 +0200128 uint64_t num_evictions;
Christian Königee1782c2015-12-11 21:01:23 +0100129 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130
Christian König5a712a82016-06-21 16:28:15 +0200131 /* We only need to validate the page tables
132 * if they aren't already valid.
133 */
134 num_evictions = atomic64_read(&adev->num_evictions);
135 if (num_evictions == vm->last_eviction_counter)
136 return;
137
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138 /* add the vm page table to the list */
Christian Königee1782c2015-12-11 21:01:23 +0100139 for (i = 0; i <= vm->max_pde_used; ++i) {
140 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400141
Christian Königee1782c2015-12-11 21:01:23 +0100142 if (!entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143 continue;
144
Christian Königee1782c2015-12-11 21:01:23 +0100145 list_add(&entry->tv.head, duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 }
Christian Königeceb8a12016-01-11 15:35:21 +0100147
148}
149
150/**
151 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
152 *
153 * @adev: amdgpu device instance
154 * @vm: vm providing the BOs
155 *
156 * Move the PT BOs to the tail of the LRU.
157 */
158void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
159 struct amdgpu_vm *vm)
160{
161 struct ttm_bo_global *glob = adev->mman.bdev.glob;
162 unsigned i;
163
164 spin_lock(&glob->lru_lock);
165 for (i = 0; i <= vm->max_pde_used; ++i) {
166 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
167
168 if (!entry->robj)
169 continue;
170
171 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
172 }
173 spin_unlock(&glob->lru_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174}
175
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800176static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
177 struct amdgpu_vm_id *id)
178{
179 return id->current_gpu_reset_count !=
180 atomic_read(&adev->gpu_reset_counter) ? true : false;
181}
182
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183/**
184 * amdgpu_vm_grab_id - allocate the next free VMID
185 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186 * @vm: vm to allocate id for
Christian König7f8a5292015-07-20 16:09:40 +0200187 * @ring: ring we want to submit job to
188 * @sync: sync object where we add dependencies
Christian König94dd0a42016-01-18 17:01:42 +0100189 * @fence: fence protecting ID from reuse
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400190 *
Christian König7f8a5292015-07-20 16:09:40 +0200191 * Allocate an id for the vm, adding fences to the sync obj as necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400192 */
Christian König7f8a5292015-07-20 16:09:40 +0200193int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
Christian König4ff37a82016-02-26 16:18:26 +0100194 struct amdgpu_sync *sync, struct fence *fence,
Chunming Zhoufd53be32016-07-01 17:59:01 +0800195 struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400196{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400197 struct amdgpu_device *adev = ring->adev;
Christian König090b7672016-07-08 10:21:02 +0200198 uint64_t fence_context = adev->fence_context + ring->idx;
Christian König4ff37a82016-02-26 16:18:26 +0100199 struct fence *updates = sync->last_vm_update;
Christian König8d76001e2016-05-23 16:00:32 +0200200 struct amdgpu_vm_id *id, *idle;
Christian König1fbb2e92016-06-01 10:47:36 +0200201 struct fence **fences;
202 unsigned i;
203 int r = 0;
204
205 fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
206 GFP_KERNEL);
207 if (!fences)
208 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400209
Christian König94dd0a42016-01-18 17:01:42 +0100210 mutex_lock(&adev->vm_manager.lock);
211
Christian König36fd7c52016-05-23 15:30:08 +0200212 /* Check if we have an idle VMID */
Christian König1fbb2e92016-06-01 10:47:36 +0200213 i = 0;
Christian König8d76001e2016-05-23 16:00:32 +0200214 list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
Christian König1fbb2e92016-06-01 10:47:36 +0200215 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
216 if (!fences[i])
Christian König36fd7c52016-05-23 15:30:08 +0200217 break;
Christian König1fbb2e92016-06-01 10:47:36 +0200218 ++i;
Christian König36fd7c52016-05-23 15:30:08 +0200219 }
Christian Königbcb1ba32016-03-08 15:40:11 +0100220
Christian König1fbb2e92016-06-01 10:47:36 +0200221 /* If we can't find a idle VMID to use, wait till one becomes available */
Christian König8d76001e2016-05-23 16:00:32 +0200222 if (&idle->list == &adev->vm_manager.ids_lru) {
Christian König1fbb2e92016-06-01 10:47:36 +0200223 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
224 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
225 struct fence_array *array;
226 unsigned j;
Christian König8d76001e2016-05-23 16:00:32 +0200227
Christian König1fbb2e92016-06-01 10:47:36 +0200228 for (j = 0; j < i; ++j)
229 fence_get(fences[j]);
Christian König8d76001e2016-05-23 16:00:32 +0200230
Christian König1fbb2e92016-06-01 10:47:36 +0200231 array = fence_array_create(i, fences, fence_context,
232 seqno, true);
233 if (!array) {
234 for (j = 0; j < i; ++j)
235 fence_put(fences[j]);
236 kfree(fences);
237 r = -ENOMEM;
238 goto error;
239 }
Christian König8d76001e2016-05-23 16:00:32 +0200240
Christian König8d76001e2016-05-23 16:00:32 +0200241
Christian König1fbb2e92016-06-01 10:47:36 +0200242 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
243 fence_put(&array->base);
244 if (r)
245 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200246
Christian König1fbb2e92016-06-01 10:47:36 +0200247 mutex_unlock(&adev->vm_manager.lock);
248 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200249
Christian König1fbb2e92016-06-01 10:47:36 +0200250 }
251 kfree(fences);
Christian König8d76001e2016-05-23 16:00:32 +0200252
Chunming Zhoufd53be32016-07-01 17:59:01 +0800253 job->vm_needs_flush = true;
Christian König1fbb2e92016-06-01 10:47:36 +0200254 /* Check if we can use a VMID already assigned to this VM */
255 i = ring->idx;
256 do {
257 struct fence *flushed;
Christian König8d76001e2016-05-23 16:00:32 +0200258
Christian König1fbb2e92016-06-01 10:47:36 +0200259 id = vm->ids[i++];
260 if (i == AMDGPU_MAX_RINGS)
261 i = 0;
262
263 /* Check all the prerequisites to using this VMID */
264 if (!id)
265 continue;
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800266 if (amdgpu_vm_is_gpu_reset(adev, id))
Chunming Zhou6adb0512016-06-27 17:06:01 +0800267 continue;
Christian König1fbb2e92016-06-01 10:47:36 +0200268
269 if (atomic64_read(&id->owner) != vm->client_id)
270 continue;
271
Chunming Zhoufd53be32016-07-01 17:59:01 +0800272 if (job->vm_pd_addr != id->pd_gpu_addr)
Christian König1fbb2e92016-06-01 10:47:36 +0200273 continue;
274
Christian König090b7672016-07-08 10:21:02 +0200275 if (!id->last_flush)
276 continue;
277
278 if (id->last_flush->context != fence_context &&
279 !fence_is_signaled(id->last_flush))
Christian König1fbb2e92016-06-01 10:47:36 +0200280 continue;
281
282 flushed = id->flushed_updates;
283 if (updates &&
284 (!flushed || fence_is_later(updates, flushed)))
285 continue;
286
Christian König3dab83b2016-06-01 13:31:17 +0200287 /* Good we can use this VMID. Remember this submission as
288 * user of the VMID.
289 */
Christian König1fbb2e92016-06-01 10:47:36 +0200290 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
291 if (r)
292 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200293
Chunming Zhou6adb0512016-06-27 17:06:01 +0800294 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König1fbb2e92016-06-01 10:47:36 +0200295 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
296 vm->ids[ring->idx] = id;
Christian König8d76001e2016-05-23 16:00:32 +0200297
Chunming Zhoufd53be32016-07-01 17:59:01 +0800298 job->vm_id = id - adev->vm_manager.ids;
299 job->vm_needs_flush = false;
Christian König0c0fdf12016-07-08 10:48:24 +0200300 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König8d76001e2016-05-23 16:00:32 +0200301
Christian König1fbb2e92016-06-01 10:47:36 +0200302 mutex_unlock(&adev->vm_manager.lock);
303 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200304
Christian König1fbb2e92016-06-01 10:47:36 +0200305 } while (i != ring->idx);
Chunming Zhou8e9fbeb2016-03-17 11:41:37 +0800306
Christian König1fbb2e92016-06-01 10:47:36 +0200307 /* Still no ID to use? Then use the idle one found earlier */
308 id = idle;
309
310 /* Remember this submission as user of the VMID */
311 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
Christian König832a9022016-02-15 12:33:02 +0100312 if (r)
313 goto error;
Christian König4ff37a82016-02-26 16:18:26 +0100314
Christian König832a9022016-02-15 12:33:02 +0100315 fence_put(id->first);
316 id->first = fence_get(fence);
Christian König4ff37a82016-02-26 16:18:26 +0100317
Christian König41d9eb22016-03-01 16:46:18 +0100318 fence_put(id->last_flush);
319 id->last_flush = NULL;
320
Christian König832a9022016-02-15 12:33:02 +0100321 fence_put(id->flushed_updates);
322 id->flushed_updates = fence_get(updates);
Christian König4ff37a82016-02-26 16:18:26 +0100323
Chunming Zhoufd53be32016-07-01 17:59:01 +0800324 id->pd_gpu_addr = job->vm_pd_addr;
Chunming Zhoub46b8a82016-06-27 17:04:23 +0800325 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König832a9022016-02-15 12:33:02 +0100326 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
Christian König0ea54b92016-05-04 10:20:01 +0200327 atomic64_set(&id->owner, vm->client_id);
Christian König832a9022016-02-15 12:33:02 +0100328 vm->ids[ring->idx] = id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400329
Chunming Zhoufd53be32016-07-01 17:59:01 +0800330 job->vm_id = id - adev->vm_manager.ids;
Christian König0c0fdf12016-07-08 10:48:24 +0200331 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König832a9022016-02-15 12:33:02 +0100332
333error:
Christian König94dd0a42016-01-18 17:01:42 +0100334 mutex_unlock(&adev->vm_manager.lock);
Christian Königa9a78b32016-01-21 10:19:11 +0100335 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400336}
337
Alex Deucher93dcc372016-06-17 17:05:15 -0400338static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
339{
340 struct amdgpu_device *adev = ring->adev;
341 const struct amdgpu_ip_block_version *ip_block;
342
343 if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
344 /* only compute rings */
345 return false;
346
347 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
348 if (!ip_block)
349 return false;
350
351 if (ip_block->major <= 7) {
352 /* gfx7 has no workaround */
353 return true;
354 } else if (ip_block->major == 8) {
355 if (adev->gfx.mec_fw_version >= 673)
356 /* gfx8 is fixed in MEC firmware 673 */
357 return false;
358 else
359 return true;
360 }
361 return false;
362}
363
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400364/**
365 * amdgpu_vm_flush - hardware flush the vm
366 *
367 * @ring: ring to use for flush
Christian Königcffadc82016-03-01 13:34:49 +0100368 * @vm_id: vmid number to use
Christian König4ff37a82016-02-26 16:18:26 +0100369 * @pd_addr: address of the page directory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400370 *
Christian König4ff37a82016-02-26 16:18:26 +0100371 * Emit a VM flush when it is necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400372 */
Chunming Zhoufd53be32016-07-01 17:59:01 +0800373int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400374{
Christian König971fe9a92016-03-01 15:09:25 +0100375 struct amdgpu_device *adev = ring->adev;
Chunming Zhoufd53be32016-07-01 17:59:01 +0800376 struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
Christian Königd564a062016-03-01 15:51:53 +0100377 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800378 id->gds_base != job->gds_base ||
379 id->gds_size != job->gds_size ||
380 id->gws_base != job->gws_base ||
381 id->gws_size != job->gws_size ||
382 id->oa_base != job->oa_base ||
383 id->oa_size != job->oa_size);
Christian König41d9eb22016-03-01 16:46:18 +0100384 int r;
Christian Königd564a062016-03-01 15:51:53 +0100385
386 if (ring->funcs->emit_pipeline_sync && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800387 job->vm_needs_flush || gds_switch_needed ||
Alex Deucher93dcc372016-06-17 17:05:15 -0400388 amdgpu_vm_ring_has_compute_vm_bug(ring)))
Christian Königd564a062016-03-01 15:51:53 +0100389 amdgpu_ring_emit_pipeline_sync(ring);
Christian König971fe9a92016-03-01 15:09:25 +0100390
Chunming Zhouaa1c8902016-06-30 13:56:02 +0800391 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
392 amdgpu_vm_is_gpu_reset(adev, id))) {
Christian König41d9eb22016-03-01 16:46:18 +0100393 struct fence *fence;
394
Chunming Zhoufd53be32016-07-01 17:59:01 +0800395 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
396 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
Christian König41d9eb22016-03-01 16:46:18 +0100397
Christian König3dab83b2016-06-01 13:31:17 +0200398 r = amdgpu_fence_emit(ring, &fence);
399 if (r)
400 return r;
401
Christian König41d9eb22016-03-01 16:46:18 +0100402 mutex_lock(&adev->vm_manager.lock);
Christian König3dab83b2016-06-01 13:31:17 +0200403 fence_put(id->last_flush);
404 id->last_flush = fence;
Christian König41d9eb22016-03-01 16:46:18 +0100405 mutex_unlock(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400406 }
Christian Königcffadc82016-03-01 13:34:49 +0100407
Christian Königd564a062016-03-01 15:51:53 +0100408 if (gds_switch_needed) {
Chunming Zhoufd53be32016-07-01 17:59:01 +0800409 id->gds_base = job->gds_base;
410 id->gds_size = job->gds_size;
411 id->gws_base = job->gws_base;
412 id->gws_size = job->gws_size;
413 id->oa_base = job->oa_base;
414 id->oa_size = job->oa_size;
415 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
416 job->gds_base, job->gds_size,
417 job->gws_base, job->gws_size,
418 job->oa_base, job->oa_size);
Christian König971fe9a92016-03-01 15:09:25 +0100419 }
Christian König41d9eb22016-03-01 16:46:18 +0100420
421 return 0;
Christian König971fe9a92016-03-01 15:09:25 +0100422}
423
424/**
425 * amdgpu_vm_reset_id - reset VMID to zero
426 *
427 * @adev: amdgpu device structure
428 * @vm_id: vmid number to use
429 *
430 * Reset saved GDW, GWS and OA to force switch on next flush.
431 */
432void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
433{
Christian Königbcb1ba32016-03-08 15:40:11 +0100434 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
Christian König971fe9a92016-03-01 15:09:25 +0100435
Christian Königbcb1ba32016-03-08 15:40:11 +0100436 id->gds_base = 0;
437 id->gds_size = 0;
438 id->gws_base = 0;
439 id->gws_size = 0;
440 id->oa_base = 0;
441 id->oa_size = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400442}
443
444/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400445 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
446 *
447 * @vm: requested vm
448 * @bo: requested buffer object
449 *
Christian König8843dbb2016-01-26 12:17:11 +0100450 * Find @bo inside the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 * Search inside the @bos vm list for the requested vm
452 * Returns the found bo_va or NULL if none is found
453 *
454 * Object has to be reserved!
455 */
456struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
457 struct amdgpu_bo *bo)
458{
459 struct amdgpu_bo_va *bo_va;
460
461 list_for_each_entry(bo_va, &bo->va, bo_list) {
462 if (bo_va->vm == vm) {
463 return bo_va;
464 }
465 }
466 return NULL;
467}
468
469/**
470 * amdgpu_vm_update_pages - helper to call the right asic function
471 *
472 * @adev: amdgpu_device pointer
Christian König29efc4f2016-08-04 14:52:50 +0200473 * @params: see amdgpu_pte_update_params definition
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400474 * @pe: addr of the page entry
475 * @addr: dst addr to write into pe
476 * @count: number of page entries to update
477 * @incr: increase next addr by incr bytes
478 * @flags: hw access flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400479 *
480 * Traces the parameters and calls the right asic functions
481 * to setup the page table using the DMA.
482 */
483static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
Christian König29efc4f2016-08-04 14:52:50 +0200484 struct amdgpu_pte_update_params *params,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400485 uint64_t pe, uint64_t addr,
486 unsigned count, uint32_t incr,
Christian König9ab21462015-11-30 14:19:26 +0100487 uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400488{
489 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
490
Christian König29efc4f2016-08-04 14:52:50 +0200491 if (params->src) {
492 amdgpu_vm_copy_pte(adev, params->ib,
493 pe, (params->src + (addr >> 12) * 8), count);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494
Christian König29efc4f2016-08-04 14:52:50 +0200495 } else if (params->pages_addr) {
496 amdgpu_vm_write_pte(adev, params->ib,
497 params->pages_addr,
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -0400498 pe, addr, count, incr, flags);
Christian Königb07c9d22015-11-30 13:26:07 +0100499
500 } else if (count < 3) {
Christian König29efc4f2016-08-04 14:52:50 +0200501 amdgpu_vm_write_pte(adev, params->ib, NULL, pe, addr,
Christian Königb07c9d22015-11-30 13:26:07 +0100502 count, incr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503
504 } else {
Christian König29efc4f2016-08-04 14:52:50 +0200505 amdgpu_vm_set_pte_pde(adev, params->ib, pe, addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400506 count, incr, flags);
507 }
508}
509
510/**
511 * amdgpu_vm_clear_bo - initially clear the page dir/table
512 *
513 * @adev: amdgpu_device pointer
514 * @bo: bo to clear
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800515 *
516 * need to reserve bo first before calling it.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400517 */
518static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
Christian König2bd9ccf2016-02-01 12:53:58 +0100519 struct amdgpu_vm *vm,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400520 struct amdgpu_bo *bo)
521{
Christian König2d55e452016-02-08 17:37:38 +0100522 struct amdgpu_ring *ring;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800523 struct fence *fence = NULL;
Christian Königd71518b2016-02-01 12:20:25 +0100524 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200525 struct amdgpu_pte_update_params params;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400526 unsigned entries;
527 uint64_t addr;
528 int r;
529
Christian König29efc4f2016-08-04 14:52:50 +0200530 memset(&params, 0, sizeof(params));
Christian König2d55e452016-02-08 17:37:38 +0100531 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
532
monk.liuca952612015-05-25 14:44:05 +0800533 r = reservation_object_reserve_shared(bo->tbo.resv);
534 if (r)
535 return r;
536
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
538 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800539 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400540
541 addr = amdgpu_bo_gpu_offset(bo);
542 entries = amdgpu_bo_size(bo) / 8;
543
Christian Königd71518b2016-02-01 12:20:25 +0100544 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
545 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800546 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400547
Christian König29efc4f2016-08-04 14:52:50 +0200548 params.ib = &job->ibs[0];
549 amdgpu_vm_update_pages(adev, &params, addr, 0, entries,
Christian Königd71518b2016-02-01 12:20:25 +0100550 0, 0);
551 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
552
553 WARN_ON(job->ibs[0].length_dw > 64);
Christian König2bd9ccf2016-02-01 12:53:58 +0100554 r = amdgpu_job_submit(job, ring, &vm->entity,
555 AMDGPU_FENCE_OWNER_VM, &fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400556 if (r)
557 goto error_free;
558
Christian Königd71518b2016-02-01 12:20:25 +0100559 amdgpu_bo_fence(bo, fence, true);
Chunming Zhou281b4222015-08-12 12:58:31 +0800560 fence_put(fence);
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800561 return 0;
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800562
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400563error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100564 amdgpu_job_free(job);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400565
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800566error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400567 return r;
568}
569
570/**
Christian Königb07c9d22015-11-30 13:26:07 +0100571 * amdgpu_vm_map_gart - Resolve gart mapping of addr
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400572 *
Christian Königb07c9d22015-11-30 13:26:07 +0100573 * @pages_addr: optional DMA address to use for lookup
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400574 * @addr: the unmapped addr
575 *
576 * Look up the physical address of the page that the pte resolves
Christian Königb07c9d22015-11-30 13:26:07 +0100577 * to and return the pointer for the page table entry.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400578 */
Christian Königb07c9d22015-11-30 13:26:07 +0100579uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400580{
581 uint64_t result;
582
Christian Königb07c9d22015-11-30 13:26:07 +0100583 if (pages_addr) {
584 /* page table offset */
585 result = pages_addr[addr >> PAGE_SHIFT];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400586
Christian Königb07c9d22015-11-30 13:26:07 +0100587 /* in case cpu page size != gpu page size*/
588 result |= addr & (~PAGE_MASK);
589
590 } else {
591 /* No mapping required */
592 result = addr;
593 }
594
595 result &= 0xFFFFFFFFFFFFF000ULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400596
597 return result;
598}
599
600/**
601 * amdgpu_vm_update_pdes - make sure that page directory is valid
602 *
603 * @adev: amdgpu_device pointer
604 * @vm: requested vm
605 * @start: start of GPU address range
606 * @end: end of GPU address range
607 *
608 * Allocates new page tables if necessary
Christian König8843dbb2016-01-26 12:17:11 +0100609 * and updates the page directory.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400610 * Returns 0 for success, error for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400611 */
612int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
613 struct amdgpu_vm *vm)
614{
Christian König2d55e452016-02-08 17:37:38 +0100615 struct amdgpu_ring *ring;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400616 struct amdgpu_bo *pd = vm->page_directory;
617 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
618 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
619 uint64_t last_pde = ~0, last_pt = ~0;
620 unsigned count = 0, pt_idx, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100621 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200622 struct amdgpu_pte_update_params params;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800623 struct fence *fence = NULL;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800624
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400625 int r;
626
Christian König29efc4f2016-08-04 14:52:50 +0200627 memset(&params, 0, sizeof(params));
Christian König2d55e452016-02-08 17:37:38 +0100628 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
629
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400630 /* padding, etc. */
631 ndw = 64;
632
633 /* assume the worst case */
634 ndw += vm->max_pde_used * 6;
635
Christian Königd71518b2016-02-01 12:20:25 +0100636 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
637 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400638 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100639
Christian König29efc4f2016-08-04 14:52:50 +0200640 params.ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400641
642 /* walk over the address space and update the page directory */
643 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
Christian Königee1782c2015-12-11 21:01:23 +0100644 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400645 uint64_t pde, pt;
646
647 if (bo == NULL)
648 continue;
649
650 pt = amdgpu_bo_gpu_offset(bo);
651 if (vm->page_tables[pt_idx].addr == pt)
652 continue;
653 vm->page_tables[pt_idx].addr = pt;
654
655 pde = pd_addr + pt_idx * 8;
656 if (((last_pde + 8 * count) != pde) ||
657 ((last_pt + incr * count) != pt)) {
658
659 if (count) {
Christian König29efc4f2016-08-04 14:52:50 +0200660 amdgpu_vm_update_pages(adev, &params,
Christian König9ab21462015-11-30 14:19:26 +0100661 last_pde, last_pt,
662 count, incr,
663 AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400664 }
665
666 count = 1;
667 last_pde = pde;
668 last_pt = pt;
669 } else {
670 ++count;
671 }
672 }
673
674 if (count)
Christian König29efc4f2016-08-04 14:52:50 +0200675 amdgpu_vm_update_pages(adev, &params,
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -0400676 last_pde, last_pt,
677 count, incr, AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400678
Christian König29efc4f2016-08-04 14:52:50 +0200679 if (params.ib->length_dw != 0) {
680 amdgpu_ring_pad_ib(ring, params.ib);
Christian Könige86f9ce2016-02-08 12:13:05 +0100681 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
682 AMDGPU_FENCE_OWNER_VM);
Christian König29efc4f2016-08-04 14:52:50 +0200683 WARN_ON(params.ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +0100684 r = amdgpu_job_submit(job, ring, &vm->entity,
685 AMDGPU_FENCE_OWNER_VM, &fence);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800686 if (r)
687 goto error_free;
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200688
Chunming Zhou4af9f072015-08-03 12:57:31 +0800689 amdgpu_bo_fence(pd, fence, true);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200690 fence_put(vm->page_directory_fence);
691 vm->page_directory_fence = fence_get(fence);
Chunming Zhou281b4222015-08-12 12:58:31 +0800692 fence_put(fence);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800693
Christian Königd71518b2016-02-01 12:20:25 +0100694 } else {
695 amdgpu_job_free(job);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800696 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400697
698 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800699
700error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100701 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800702 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703}
704
705/**
706 * amdgpu_vm_frag_ptes - add fragment information to PTEs
707 *
708 * @adev: amdgpu_device pointer
Christian König29efc4f2016-08-04 14:52:50 +0200709 * @params: see amdgpu_pte_update_params definition
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400710 * @pe_start: first PTE to handle
711 * @pe_end: last PTE to handle
712 * @addr: addr those PTEs should point to
713 * @flags: hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400714 */
715static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
Christian König29efc4f2016-08-04 14:52:50 +0200716 struct amdgpu_pte_update_params *params,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400717 uint64_t pe_start, uint64_t pe_end,
Christian König9ab21462015-11-30 14:19:26 +0100718 uint64_t addr, uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400719{
720 /**
721 * The MC L1 TLB supports variable sized pages, based on a fragment
722 * field in the PTE. When this field is set to a non-zero value, page
723 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
724 * flags are considered valid for all PTEs within the fragment range
725 * and corresponding mappings are assumed to be physically contiguous.
726 *
727 * The L1 TLB can store a single PTE for the whole fragment,
728 * significantly increasing the space available for translation
729 * caching. This leads to large improvements in throughput when the
730 * TLB is under pressure.
731 *
732 * The L2 TLB distributes small and large fragments into two
733 * asymmetric partitions. The large fragment cache is significantly
734 * larger. Thus, we try to use large fragments wherever possible.
735 * Userspace can support this by aligning virtual base address and
736 * allocation size to the fragment size.
737 */
738
739 /* SI and newer are optimized for 64KB */
Christian König1303c732016-08-03 17:46:42 +0200740 uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400741 uint64_t frag_align = 0x80;
742
743 uint64_t frag_start = ALIGN(pe_start, frag_align);
744 uint64_t frag_end = pe_end & ~(frag_align - 1);
745
746 unsigned count;
747
Christian König31f6c1f2016-01-26 12:37:49 +0100748 /* Abort early if there isn't anything to do */
749 if (pe_start == pe_end)
750 return;
751
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400752 /* system pages are non continuously */
Christian König29efc4f2016-08-04 14:52:50 +0200753 if (params->src || params->pages_addr ||
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -0400754 !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400755
756 count = (pe_end - pe_start) / 8;
Christian König29efc4f2016-08-04 14:52:50 +0200757 amdgpu_vm_update_pages(adev, params, pe_start,
Christian König9ab21462015-11-30 14:19:26 +0100758 addr, count, AMDGPU_GPU_PAGE_SIZE,
759 flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400760 return;
761 }
762
763 /* handle the 4K area at the beginning */
764 if (pe_start != frag_start) {
765 count = (frag_start - pe_start) / 8;
Christian König29efc4f2016-08-04 14:52:50 +0200766 amdgpu_vm_update_pages(adev, params, pe_start, addr,
Christian König9ab21462015-11-30 14:19:26 +0100767 count, AMDGPU_GPU_PAGE_SIZE, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400768 addr += AMDGPU_GPU_PAGE_SIZE * count;
769 }
770
771 /* handle the area in the middle */
772 count = (frag_end - frag_start) / 8;
Christian König29efc4f2016-08-04 14:52:50 +0200773 amdgpu_vm_update_pages(adev, params, frag_start, addr, count,
Christian König9ab21462015-11-30 14:19:26 +0100774 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400775
776 /* handle the 4K area at the end */
777 if (frag_end != pe_end) {
778 addr += AMDGPU_GPU_PAGE_SIZE * count;
779 count = (pe_end - frag_end) / 8;
Christian König29efc4f2016-08-04 14:52:50 +0200780 amdgpu_vm_update_pages(adev, params, frag_end, addr,
Christian König9ab21462015-11-30 14:19:26 +0100781 count, AMDGPU_GPU_PAGE_SIZE, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400782 }
783}
784
785/**
786 * amdgpu_vm_update_ptes - make sure that page tables are valid
787 *
788 * @adev: amdgpu_device pointer
Christian König29efc4f2016-08-04 14:52:50 +0200789 * @params: see amdgpu_pte_update_params definition
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400790 * @vm: requested vm
791 * @start: start of GPU address range
792 * @end: end of GPU address range
Alex Xie677131a2016-06-06 18:13:26 -0400793 * @dst: destination address to map to, the next dst inside the function
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400794 * @flags: mapping flags
795 *
Christian König8843dbb2016-01-26 12:17:11 +0100796 * Update the page tables in the range @start - @end.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400797 */
Christian Königa1e08d32016-01-26 11:40:46 +0100798static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
Christian König29efc4f2016-08-04 14:52:50 +0200799 struct amdgpu_pte_update_params *params,
Christian Königa1e08d32016-01-26 11:40:46 +0100800 struct amdgpu_vm *vm,
Christian Königa1e08d32016-01-26 11:40:46 +0100801 uint64_t start, uint64_t end,
802 uint64_t dst, uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400803{
Christian König31f6c1f2016-01-26 12:37:49 +0100804 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
805
Alex Xie21718492016-06-06 18:21:09 -0400806 uint64_t cur_pe_start, cur_pe_end, cur_dst;
Alex Xie677131a2016-06-06 18:13:26 -0400807 uint64_t addr; /* next GPU address to be updated */
Alex Xie21718492016-06-06 18:21:09 -0400808 uint64_t pt_idx;
809 struct amdgpu_bo *pt;
810 unsigned nptes; /* next number of ptes to be updated */
811 uint64_t next_pe_start;
812
813 /* initialize the variables */
814 addr = start;
815 pt_idx = addr >> amdgpu_vm_block_size;
816 pt = vm->page_tables[pt_idx].entry.robj;
817
818 if ((addr & ~mask) == (end & ~mask))
819 nptes = end - addr;
820 else
821 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
822
823 cur_pe_start = amdgpu_bo_gpu_offset(pt);
824 cur_pe_start += (addr & mask) * 8;
825 cur_pe_end = cur_pe_start + 8 * nptes;
826 cur_dst = dst;
827
828 /* for next ptb*/
829 addr += nptes;
830 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400831
832 /* walk over the address space and update the page tables */
Alex Xie21718492016-06-06 18:21:09 -0400833 while (addr < end) {
834 pt_idx = addr >> amdgpu_vm_block_size;
835 pt = vm->page_tables[pt_idx].entry.robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400836
837 if ((addr & ~mask) == (end & ~mask))
838 nptes = end - addr;
839 else
840 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
841
Alex Xie677131a2016-06-06 18:13:26 -0400842 next_pe_start = amdgpu_bo_gpu_offset(pt);
843 next_pe_start += (addr & mask) * 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400844
Alex Xie3a6f8e02016-06-06 18:14:57 -0400845 if (cur_pe_end == next_pe_start) {
846 /* The next ptb is consecutive to current ptb.
847 * Don't call amdgpu_vm_frag_ptes now.
848 * Will update two ptbs together in future.
849 */
850 cur_pe_end += 8 * nptes;
851 } else {
Christian König29efc4f2016-08-04 14:52:50 +0200852 amdgpu_vm_frag_ptes(adev, params,
Alex Xie677131a2016-06-06 18:13:26 -0400853 cur_pe_start, cur_pe_end,
854 cur_dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400855
Alex Xie677131a2016-06-06 18:13:26 -0400856 cur_pe_start = next_pe_start;
857 cur_pe_end = next_pe_start + 8 * nptes;
858 cur_dst = dst;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400859 }
860
Alex Xie21718492016-06-06 18:21:09 -0400861 /* for next ptb*/
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400862 addr += nptes;
863 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
864 }
865
Christian König29efc4f2016-08-04 14:52:50 +0200866 amdgpu_vm_frag_ptes(adev, params, cur_pe_start,
Alex Xie677131a2016-06-06 18:13:26 -0400867 cur_pe_end, cur_dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400868}
869
870/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400871 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
872 *
873 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +0200874 * @exclusive: fence we need to sync to
Christian Königfa3ab3c2016-03-18 21:00:35 +0100875 * @src: address where to copy page table entries from
876 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +0100877 * @vm: requested vm
878 * @start: start of mapped range
879 * @last: last mapped entry
880 * @flags: flags for the entries
881 * @addr: addr to set the area to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400882 * @fence: optional resulting fence
883 *
Christian Königa14faa62016-01-25 14:27:31 +0100884 * Fill in the page table entries between @start and @last.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400885 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400886 */
887static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
Christian König3cabaa52016-06-06 10:17:58 +0200888 struct fence *exclusive,
Christian Königfa3ab3c2016-03-18 21:00:35 +0100889 uint64_t src,
890 dma_addr_t *pages_addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400891 struct amdgpu_vm *vm,
Christian Königa14faa62016-01-25 14:27:31 +0100892 uint64_t start, uint64_t last,
893 uint32_t flags, uint64_t addr,
894 struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400895{
Christian König2d55e452016-02-08 17:37:38 +0100896 struct amdgpu_ring *ring;
Christian Königa1e08d32016-01-26 11:40:46 +0100897 void *owner = AMDGPU_FENCE_OWNER_VM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400898 unsigned nptes, ncmds, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100899 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200900 struct amdgpu_pte_update_params params;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800901 struct fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400902 int r;
903
Christian König2d55e452016-02-08 17:37:38 +0100904 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
Christian König29efc4f2016-08-04 14:52:50 +0200905 memset(&params, 0, sizeof(params));
906 params.src = src;
907 params.pages_addr = pages_addr;
Christian König2d55e452016-02-08 17:37:38 +0100908
Christian Königa1e08d32016-01-26 11:40:46 +0100909 /* sync to everything on unmapping */
910 if (!(flags & AMDGPU_PTE_VALID))
911 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
912
Christian Königa14faa62016-01-25 14:27:31 +0100913 nptes = last - start + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400914
915 /*
916 * reserve space for one command every (1 << BLOCK_SIZE)
917 * entries or 2k dwords (whatever is smaller)
918 */
919 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
920
921 /* padding, etc. */
922 ndw = 64;
923
Christian König29efc4f2016-08-04 14:52:50 +0200924 if (params.src) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400925 /* only copy commands needed */
926 ndw += ncmds * 7;
927
Christian König29efc4f2016-08-04 14:52:50 +0200928 } else if (params.pages_addr) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400929 /* header for write data commands */
930 ndw += ncmds * 4;
931
932 /* body of write data command */
933 ndw += nptes * 2;
934
935 } else {
936 /* set page commands needed */
937 ndw += ncmds * 10;
938
939 /* two extra commands for begin/end of fragment */
940 ndw += 2 * 10;
941 }
942
Christian Königd71518b2016-02-01 12:20:25 +0100943 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
944 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400945 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100946
Christian König29efc4f2016-08-04 14:52:50 +0200947 params.ib = &job->ibs[0];
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800948
Christian König3cabaa52016-06-06 10:17:58 +0200949 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
950 if (r)
951 goto error_free;
952
Christian Könige86f9ce2016-02-08 12:13:05 +0100953 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
Christian Königa1e08d32016-01-26 11:40:46 +0100954 owner);
955 if (r)
956 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400957
Christian Königa1e08d32016-01-26 11:40:46 +0100958 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
959 if (r)
960 goto error_free;
961
Christian König29efc4f2016-08-04 14:52:50 +0200962 amdgpu_vm_update_ptes(adev, &params, vm, start,
Christian Königfa3ab3c2016-03-18 21:00:35 +0100963 last + 1, addr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400964
Christian König29efc4f2016-08-04 14:52:50 +0200965 amdgpu_ring_pad_ib(ring, params.ib);
966 WARN_ON(params.ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +0100967 r = amdgpu_job_submit(job, ring, &vm->entity,
968 AMDGPU_FENCE_OWNER_VM, &f);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800969 if (r)
970 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400971
Christian Königbf60efd2015-09-04 10:47:56 +0200972 amdgpu_bo_fence(vm->page_directory, f, true);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800973 if (fence) {
974 fence_put(*fence);
975 *fence = fence_get(f);
976 }
Chunming Zhou281b4222015-08-12 12:58:31 +0800977 fence_put(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400978 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800979
980error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100981 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800982 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983}
984
985/**
Christian Königa14faa62016-01-25 14:27:31 +0100986 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
987 *
988 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +0200989 * @exclusive: fence we need to sync to
Christian König8358dce2016-03-30 10:50:25 +0200990 * @gtt_flags: flags as they are used for GTT
991 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +0100992 * @vm: requested vm
993 * @mapping: mapped range and flags to use for the update
994 * @addr: addr to set the area to
Christian König8358dce2016-03-30 10:50:25 +0200995 * @flags: HW flags for the mapping
Christian Königa14faa62016-01-25 14:27:31 +0100996 * @fence: optional resulting fence
997 *
998 * Split the mapping into smaller chunks so that each update fits
999 * into a SDMA IB.
1000 * Returns 0 for success, -EINVAL for failure.
1001 */
1002static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
Christian König3cabaa52016-06-06 10:17:58 +02001003 struct fence *exclusive,
Christian Königa14faa62016-01-25 14:27:31 +01001004 uint32_t gtt_flags,
Christian König8358dce2016-03-30 10:50:25 +02001005 dma_addr_t *pages_addr,
Christian Königa14faa62016-01-25 14:27:31 +01001006 struct amdgpu_vm *vm,
1007 struct amdgpu_bo_va_mapping *mapping,
Christian Königfa3ab3c2016-03-18 21:00:35 +01001008 uint32_t flags, uint64_t addr,
1009 struct fence **fence)
Christian Königa14faa62016-01-25 14:27:31 +01001010{
1011 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1012
Christian Königfa3ab3c2016-03-18 21:00:35 +01001013 uint64_t src = 0, start = mapping->it.start;
Christian Königa14faa62016-01-25 14:27:31 +01001014 int r;
1015
1016 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1017 * but in case of something, we filter the flags in first place
1018 */
1019 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1020 flags &= ~AMDGPU_PTE_READABLE;
1021 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1022 flags &= ~AMDGPU_PTE_WRITEABLE;
1023
1024 trace_amdgpu_vm_bo_update(mapping);
1025
Christian König8358dce2016-03-30 10:50:25 +02001026 if (pages_addr) {
Christian Königfa3ab3c2016-03-18 21:00:35 +01001027 if (flags == gtt_flags)
1028 src = adev->gart.table_addr + (addr >> 12) * 8;
Christian Königfa3ab3c2016-03-18 21:00:35 +01001029 addr = 0;
1030 }
Christian Königa14faa62016-01-25 14:27:31 +01001031 addr += mapping->offset;
1032
Christian König8358dce2016-03-30 10:50:25 +02001033 if (!pages_addr || src)
Christian König3cabaa52016-06-06 10:17:58 +02001034 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1035 src, pages_addr, vm,
Christian Königa14faa62016-01-25 14:27:31 +01001036 start, mapping->it.last,
1037 flags, addr, fence);
1038
1039 while (start != mapping->it.last + 1) {
1040 uint64_t last;
1041
Felix Kuehlingfb29b572016-03-03 19:13:20 -05001042 last = min((uint64_t)mapping->it.last, start + max_size - 1);
Christian König3cabaa52016-06-06 10:17:58 +02001043 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1044 src, pages_addr, vm,
Christian Königa14faa62016-01-25 14:27:31 +01001045 start, last, flags, addr,
1046 fence);
1047 if (r)
1048 return r;
1049
1050 start = last + 1;
Felix Kuehlingfb29b572016-03-03 19:13:20 -05001051 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
Christian Königa14faa62016-01-25 14:27:31 +01001052 }
1053
1054 return 0;
1055}
1056
1057/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001058 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1059 *
1060 * @adev: amdgpu_device pointer
1061 * @bo_va: requested BO and VM object
1062 * @mem: ttm mem
1063 *
1064 * Fill in the page table entries for @bo_va.
1065 * Returns 0 for success, -EINVAL for failure.
1066 *
1067 * Object have to be reserved and mutex must be locked!
1068 */
1069int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1070 struct amdgpu_bo_va *bo_va,
1071 struct ttm_mem_reg *mem)
1072{
1073 struct amdgpu_vm *vm = bo_va->vm;
1074 struct amdgpu_bo_va_mapping *mapping;
Christian König8358dce2016-03-30 10:50:25 +02001075 dma_addr_t *pages_addr = NULL;
Christian Königfa3ab3c2016-03-18 21:00:35 +01001076 uint32_t gtt_flags, flags;
Christian König3cabaa52016-06-06 10:17:58 +02001077 struct fence *exclusive;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001078 uint64_t addr;
1079 int r;
1080
1081 if (mem) {
Christian König8358dce2016-03-30 10:50:25 +02001082 struct ttm_dma_tt *ttm;
1083
Christian Königb7d698d2015-09-07 12:32:09 +02001084 addr = (u64)mem->start << PAGE_SHIFT;
Christian König9ab21462015-11-30 14:19:26 +01001085 switch (mem->mem_type) {
1086 case TTM_PL_TT:
Christian König8358dce2016-03-30 10:50:25 +02001087 ttm = container_of(bo_va->bo->tbo.ttm, struct
1088 ttm_dma_tt, ttm);
1089 pages_addr = ttm->dma_address;
Christian König9ab21462015-11-30 14:19:26 +01001090 break;
1091
1092 case TTM_PL_VRAM:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001093 addr += adev->vm_manager.vram_base_offset;
Christian König9ab21462015-11-30 14:19:26 +01001094 break;
1095
1096 default:
1097 break;
1098 }
Christian König3cabaa52016-06-06 10:17:58 +02001099
1100 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001101 } else {
1102 addr = 0;
Christian König3cabaa52016-06-06 10:17:58 +02001103 exclusive = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001104 }
1105
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001106 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
Christian Königfa3ab3c2016-03-18 21:00:35 +01001107 gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001108
Christian König7fc11952015-07-30 11:53:42 +02001109 spin_lock(&vm->status_lock);
1110 if (!list_empty(&bo_va->vm_status))
1111 list_splice_init(&bo_va->valids, &bo_va->invalids);
1112 spin_unlock(&vm->status_lock);
1113
1114 list_for_each_entry(mapping, &bo_va->invalids, list) {
Christian König3cabaa52016-06-06 10:17:58 +02001115 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1116 gtt_flags, pages_addr, vm,
Christian König8358dce2016-03-30 10:50:25 +02001117 mapping, flags, addr,
1118 &bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001119 if (r)
1120 return r;
1121 }
1122
Christian Königd6c10f62015-09-28 12:00:23 +02001123 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1124 list_for_each_entry(mapping, &bo_va->valids, list)
1125 trace_amdgpu_vm_bo_mapping(mapping);
1126
1127 list_for_each_entry(mapping, &bo_va->invalids, list)
1128 trace_amdgpu_vm_bo_mapping(mapping);
1129 }
1130
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001131 spin_lock(&vm->status_lock);
monk.liu6d1d0ef2015-08-14 13:36:41 +08001132 list_splice_init(&bo_va->invalids, &bo_va->valids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001133 list_del_init(&bo_va->vm_status);
Christian König7fc11952015-07-30 11:53:42 +02001134 if (!mem)
1135 list_add(&bo_va->vm_status, &vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001136 spin_unlock(&vm->status_lock);
1137
1138 return 0;
1139}
1140
1141/**
1142 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1143 *
1144 * @adev: amdgpu_device pointer
1145 * @vm: requested vm
1146 *
1147 * Make sure all freed BOs are cleared in the PT.
1148 * Returns 0 for success.
1149 *
1150 * PTs have to be reserved and mutex must be locked!
1151 */
1152int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1153 struct amdgpu_vm *vm)
1154{
1155 struct amdgpu_bo_va_mapping *mapping;
1156 int r;
1157
1158 while (!list_empty(&vm->freed)) {
1159 mapping = list_first_entry(&vm->freed,
1160 struct amdgpu_bo_va_mapping, list);
1161 list_del(&mapping->list);
Christian Könige17841b2016-03-08 17:52:01 +01001162
Christian König3cabaa52016-06-06 10:17:58 +02001163 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
Christian Königfa3ab3c2016-03-18 21:00:35 +01001164 0, 0, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001165 kfree(mapping);
1166 if (r)
1167 return r;
1168
1169 }
1170 return 0;
1171
1172}
1173
1174/**
1175 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1176 *
1177 * @adev: amdgpu_device pointer
1178 * @vm: requested vm
1179 *
1180 * Make sure all invalidated BOs are cleared in the PT.
1181 * Returns 0 for success.
1182 *
1183 * PTs have to be reserved and mutex must be locked!
1184 */
1185int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +08001186 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001187{
monk.liucfe2c972015-05-26 15:01:54 +08001188 struct amdgpu_bo_va *bo_va = NULL;
Christian König91e1a522015-07-06 22:06:40 +02001189 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001190
1191 spin_lock(&vm->status_lock);
1192 while (!list_empty(&vm->invalidated)) {
1193 bo_va = list_first_entry(&vm->invalidated,
1194 struct amdgpu_bo_va, vm_status);
1195 spin_unlock(&vm->status_lock);
Christian König32b41ac2016-03-08 18:03:27 +01001196
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1198 if (r)
1199 return r;
1200
1201 spin_lock(&vm->status_lock);
1202 }
1203 spin_unlock(&vm->status_lock);
1204
monk.liucfe2c972015-05-26 15:01:54 +08001205 if (bo_va)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001206 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
Christian König91e1a522015-07-06 22:06:40 +02001207
1208 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001209}
1210
1211/**
1212 * amdgpu_vm_bo_add - add a bo to a specific vm
1213 *
1214 * @adev: amdgpu_device pointer
1215 * @vm: requested vm
1216 * @bo: amdgpu buffer object
1217 *
Christian König8843dbb2016-01-26 12:17:11 +01001218 * Add @bo into the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001219 * Add @bo to the list of bos associated with the vm
1220 * Returns newly added bo_va or NULL for failure
1221 *
1222 * Object has to be reserved!
1223 */
1224struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1225 struct amdgpu_vm *vm,
1226 struct amdgpu_bo *bo)
1227{
1228 struct amdgpu_bo_va *bo_va;
1229
1230 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1231 if (bo_va == NULL) {
1232 return NULL;
1233 }
1234 bo_va->vm = vm;
1235 bo_va->bo = bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001236 bo_va->ref_count = 1;
1237 INIT_LIST_HEAD(&bo_va->bo_list);
Christian König7fc11952015-07-30 11:53:42 +02001238 INIT_LIST_HEAD(&bo_va->valids);
1239 INIT_LIST_HEAD(&bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001240 INIT_LIST_HEAD(&bo_va->vm_status);
Christian König32b41ac2016-03-08 18:03:27 +01001241
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001242 list_add_tail(&bo_va->bo_list, &bo->va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001243
1244 return bo_va;
1245}
1246
1247/**
1248 * amdgpu_vm_bo_map - map bo inside a vm
1249 *
1250 * @adev: amdgpu_device pointer
1251 * @bo_va: bo_va to store the address
1252 * @saddr: where to map the BO
1253 * @offset: requested offset in the BO
1254 * @flags: attributes of pages (read/write/valid/etc.)
1255 *
1256 * Add a mapping of the BO at the specefied addr into the VM.
1257 * Returns 0 for success, error for failure.
1258 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001259 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001260 */
1261int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1262 struct amdgpu_bo_va *bo_va,
1263 uint64_t saddr, uint64_t offset,
1264 uint64_t size, uint32_t flags)
1265{
1266 struct amdgpu_bo_va_mapping *mapping;
1267 struct amdgpu_vm *vm = bo_va->vm;
1268 struct interval_tree_node *it;
1269 unsigned last_pfn, pt_idx;
1270 uint64_t eaddr;
1271 int r;
1272
Christian König0be52de2015-05-18 14:37:27 +02001273 /* validate the parameters */
1274 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
Chunming Zhou49b02b12015-11-13 14:18:38 +08001275 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
Christian König0be52de2015-05-18 14:37:27 +02001276 return -EINVAL;
Christian König0be52de2015-05-18 14:37:27 +02001277
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001278 /* make sure object fit at this offset */
Felix Kuehling005ae952015-11-23 17:43:48 -05001279 eaddr = saddr + size - 1;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001280 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001281 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001282
1283 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
Felix Kuehling005ae952015-11-23 17:43:48 -05001284 if (last_pfn >= adev->vm_manager.max_pfn) {
1285 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001286 last_pfn, adev->vm_manager.max_pfn);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001287 return -EINVAL;
1288 }
1289
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001290 saddr /= AMDGPU_GPU_PAGE_SIZE;
1291 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1292
Felix Kuehling005ae952015-11-23 17:43:48 -05001293 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001294 if (it) {
1295 struct amdgpu_bo_va_mapping *tmp;
1296 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1297 /* bo and tmp overlap, invalid addr */
1298 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1299 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1300 tmp->it.start, tmp->it.last + 1);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001301 r = -EINVAL;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001302 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001303 }
1304
1305 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1306 if (!mapping) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001307 r = -ENOMEM;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001308 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001309 }
1310
1311 INIT_LIST_HEAD(&mapping->list);
1312 mapping->it.start = saddr;
Felix Kuehling005ae952015-11-23 17:43:48 -05001313 mapping->it.last = eaddr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001314 mapping->offset = offset;
1315 mapping->flags = flags;
1316
Christian König7fc11952015-07-30 11:53:42 +02001317 list_add(&mapping->list, &bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001318 interval_tree_insert(&mapping->it, &vm->va);
1319
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320 /* Make sure the page tables are allocated */
1321 saddr >>= amdgpu_vm_block_size;
1322 eaddr >>= amdgpu_vm_block_size;
1323
1324 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1325
1326 if (eaddr > vm->max_pde_used)
1327 vm->max_pde_used = eaddr;
1328
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001329 /* walk over the address space and allocate the page tables */
1330 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
Christian Königbf60efd2015-09-04 10:47:56 +02001331 struct reservation_object *resv = vm->page_directory->tbo.resv;
Christian Königee1782c2015-12-11 21:01:23 +01001332 struct amdgpu_bo_list_entry *entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001333 struct amdgpu_bo *pt;
1334
Christian Königee1782c2015-12-11 21:01:23 +01001335 entry = &vm->page_tables[pt_idx].entry;
1336 if (entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001337 continue;
1338
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001339 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1340 AMDGPU_GPU_PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001341 AMDGPU_GEM_DOMAIN_VRAM,
1342 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
Christian Königbf60efd2015-09-04 10:47:56 +02001343 NULL, resv, &pt);
Chunming Zhou49b02b12015-11-13 14:18:38 +08001344 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001345 goto error_free;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001346
Christian König82b9c552015-11-27 16:49:00 +01001347 /* Keep a reference to the page table to avoid freeing
1348 * them up in the wrong order.
1349 */
1350 pt->parent = amdgpu_bo_ref(vm->page_directory);
1351
Christian König2bd9ccf2016-02-01 12:53:58 +01001352 r = amdgpu_vm_clear_bo(adev, vm, pt);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001353 if (r) {
1354 amdgpu_bo_unref(&pt);
1355 goto error_free;
1356 }
1357
Christian Königee1782c2015-12-11 21:01:23 +01001358 entry->robj = pt;
Christian Königee1782c2015-12-11 21:01:23 +01001359 entry->priority = 0;
1360 entry->tv.bo = &entry->robj->tbo;
1361 entry->tv.shared = true;
Christian König2f568db2016-02-23 12:36:59 +01001362 entry->user_pages = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001363 vm->page_tables[pt_idx].addr = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001364 }
1365
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001366 return 0;
1367
1368error_free:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001369 list_del(&mapping->list);
1370 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001371 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001372 kfree(mapping);
1373
Chunming Zhouf48b2652015-10-16 14:06:19 +08001374error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001375 return r;
1376}
1377
1378/**
1379 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1380 *
1381 * @adev: amdgpu_device pointer
1382 * @bo_va: bo_va to remove the address from
1383 * @saddr: where to the BO is mapped
1384 *
1385 * Remove a mapping of the BO at the specefied addr from the VM.
1386 * Returns 0 for success, error for failure.
1387 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001388 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001389 */
1390int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1391 struct amdgpu_bo_va *bo_va,
1392 uint64_t saddr)
1393{
1394 struct amdgpu_bo_va_mapping *mapping;
1395 struct amdgpu_vm *vm = bo_va->vm;
Christian König7fc11952015-07-30 11:53:42 +02001396 bool valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001397
Christian König6c7fc502015-06-05 20:56:17 +02001398 saddr /= AMDGPU_GPU_PAGE_SIZE;
Christian König32b41ac2016-03-08 18:03:27 +01001399
Christian König7fc11952015-07-30 11:53:42 +02001400 list_for_each_entry(mapping, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001401 if (mapping->it.start == saddr)
1402 break;
1403 }
1404
Christian König7fc11952015-07-30 11:53:42 +02001405 if (&mapping->list == &bo_va->valids) {
1406 valid = false;
1407
1408 list_for_each_entry(mapping, &bo_va->invalids, list) {
1409 if (mapping->it.start == saddr)
1410 break;
1411 }
1412
Christian König32b41ac2016-03-08 18:03:27 +01001413 if (&mapping->list == &bo_va->invalids)
Christian König7fc11952015-07-30 11:53:42 +02001414 return -ENOENT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001415 }
Christian König32b41ac2016-03-08 18:03:27 +01001416
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001417 list_del(&mapping->list);
1418 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001419 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001420
Christian Könige17841b2016-03-08 17:52:01 +01001421 if (valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001422 list_add(&mapping->list, &vm->freed);
Christian Könige17841b2016-03-08 17:52:01 +01001423 else
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001424 kfree(mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001425
1426 return 0;
1427}
1428
1429/**
1430 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1431 *
1432 * @adev: amdgpu_device pointer
1433 * @bo_va: requested bo_va
1434 *
Christian König8843dbb2016-01-26 12:17:11 +01001435 * Remove @bo_va->bo from the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001436 *
1437 * Object have to be reserved!
1438 */
1439void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1440 struct amdgpu_bo_va *bo_va)
1441{
1442 struct amdgpu_bo_va_mapping *mapping, *next;
1443 struct amdgpu_vm *vm = bo_va->vm;
1444
1445 list_del(&bo_va->bo_list);
1446
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001447 spin_lock(&vm->status_lock);
1448 list_del(&bo_va->vm_status);
1449 spin_unlock(&vm->status_lock);
1450
Christian König7fc11952015-07-30 11:53:42 +02001451 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001452 list_del(&mapping->list);
1453 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001454 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Christian König7fc11952015-07-30 11:53:42 +02001455 list_add(&mapping->list, &vm->freed);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001456 }
Christian König7fc11952015-07-30 11:53:42 +02001457 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1458 list_del(&mapping->list);
1459 interval_tree_remove(&mapping->it, &vm->va);
1460 kfree(mapping);
1461 }
Christian König32b41ac2016-03-08 18:03:27 +01001462
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001463 fence_put(bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001464 kfree(bo_va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001465}
1466
1467/**
1468 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1469 *
1470 * @adev: amdgpu_device pointer
1471 * @vm: requested vm
1472 * @bo: amdgpu buffer object
1473 *
Christian König8843dbb2016-01-26 12:17:11 +01001474 * Mark @bo as invalid.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001475 */
1476void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1477 struct amdgpu_bo *bo)
1478{
1479 struct amdgpu_bo_va *bo_va;
1480
1481 list_for_each_entry(bo_va, &bo->va, bo_list) {
Christian König7fc11952015-07-30 11:53:42 +02001482 spin_lock(&bo_va->vm->status_lock);
1483 if (list_empty(&bo_va->vm_status))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001484 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001485 spin_unlock(&bo_va->vm->status_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001486 }
1487}
1488
1489/**
1490 * amdgpu_vm_init - initialize a vm instance
1491 *
1492 * @adev: amdgpu_device pointer
1493 * @vm: requested vm
1494 *
Christian König8843dbb2016-01-26 12:17:11 +01001495 * Init @vm fields.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001496 */
1497int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1498{
1499 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1500 AMDGPU_VM_PTE_COUNT * 8);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001501 unsigned pd_size, pd_entries;
Christian König2d55e452016-02-08 17:37:38 +01001502 unsigned ring_instance;
1503 struct amdgpu_ring *ring;
Christian König2bd9ccf2016-02-01 12:53:58 +01001504 struct amd_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001505 int i, r;
1506
Christian Königbcb1ba32016-03-08 15:40:11 +01001507 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1508 vm->ids[i] = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001509 vm->va = RB_ROOT;
Chunming Zhou031e2982016-04-25 10:19:13 +08001510 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001511 spin_lock_init(&vm->status_lock);
1512 INIT_LIST_HEAD(&vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001513 INIT_LIST_HEAD(&vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001514 INIT_LIST_HEAD(&vm->freed);
Christian König20250212016-03-08 17:58:35 +01001515
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001516 pd_size = amdgpu_vm_directory_size(adev);
1517 pd_entries = amdgpu_vm_num_pdes(adev);
1518
1519 /* allocate page table array */
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001520 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001521 if (vm->page_tables == NULL) {
1522 DRM_ERROR("Cannot allocate memory for page table array\n");
1523 return -ENOMEM;
1524 }
1525
Christian König2bd9ccf2016-02-01 12:53:58 +01001526 /* create scheduler entity for page table updates */
Christian König2d55e452016-02-08 17:37:38 +01001527
1528 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1529 ring_instance %= adev->vm_manager.vm_pte_num_rings;
1530 ring = adev->vm_manager.vm_pte_rings[ring_instance];
Christian König2bd9ccf2016-02-01 12:53:58 +01001531 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1532 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1533 rq, amdgpu_sched_jobs);
1534 if (r)
1535 return r;
1536
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001537 vm->page_directory_fence = NULL;
1538
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001539 r = amdgpu_bo_create(adev, pd_size, align, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001540 AMDGPU_GEM_DOMAIN_VRAM,
1541 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
Christian König72d76682015-09-03 17:34:59 +02001542 NULL, NULL, &vm->page_directory);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001543 if (r)
Christian König2bd9ccf2016-02-01 12:53:58 +01001544 goto error_free_sched_entity;
1545
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001546 r = amdgpu_bo_reserve(vm->page_directory, false);
Christian König2bd9ccf2016-02-01 12:53:58 +01001547 if (r)
1548 goto error_free_page_directory;
1549
1550 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001551 amdgpu_bo_unreserve(vm->page_directory);
Christian König2bd9ccf2016-02-01 12:53:58 +01001552 if (r)
1553 goto error_free_page_directory;
Christian König5a712a82016-06-21 16:28:15 +02001554 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001555
1556 return 0;
Christian König2bd9ccf2016-02-01 12:53:58 +01001557
1558error_free_page_directory:
1559 amdgpu_bo_unref(&vm->page_directory);
1560 vm->page_directory = NULL;
1561
1562error_free_sched_entity:
1563 amd_sched_entity_fini(&ring->sched, &vm->entity);
1564
1565 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566}
1567
1568/**
1569 * amdgpu_vm_fini - tear down a vm instance
1570 *
1571 * @adev: amdgpu_device pointer
1572 * @vm: requested vm
1573 *
Christian König8843dbb2016-01-26 12:17:11 +01001574 * Tear down @vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001575 * Unbind the VM and remove all bos from the vm bo list
1576 */
1577void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1578{
1579 struct amdgpu_bo_va_mapping *mapping, *tmp;
1580 int i;
1581
Christian König2d55e452016-02-08 17:37:38 +01001582 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
Christian König2bd9ccf2016-02-01 12:53:58 +01001583
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 if (!RB_EMPTY_ROOT(&vm->va)) {
1585 dev_err(adev->dev, "still active bo inside vm\n");
1586 }
1587 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1588 list_del(&mapping->list);
1589 interval_tree_remove(&mapping->it, &vm->va);
1590 kfree(mapping);
1591 }
1592 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1593 list_del(&mapping->list);
1594 kfree(mapping);
1595 }
1596
1597 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
Christian Königee1782c2015-12-11 21:01:23 +01001598 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001599 drm_free_large(vm->page_tables);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001600
1601 amdgpu_bo_unref(&vm->page_directory);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001602 fence_put(vm->page_directory_fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001603}
Christian Königea89f8c2015-11-15 20:52:06 +01001604
1605/**
Christian Königa9a78b32016-01-21 10:19:11 +01001606 * amdgpu_vm_manager_init - init the VM manager
1607 *
1608 * @adev: amdgpu_device pointer
1609 *
1610 * Initialize the VM manager structures
1611 */
1612void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1613{
1614 unsigned i;
1615
1616 INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1617
1618 /* skip over VMID 0, since it is the system VM */
Christian König971fe9a92016-03-01 15:09:25 +01001619 for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1620 amdgpu_vm_reset_id(adev, i);
Christian König832a9022016-02-15 12:33:02 +01001621 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
Christian Königa9a78b32016-01-21 10:19:11 +01001622 list_add_tail(&adev->vm_manager.ids[i].list,
1623 &adev->vm_manager.ids_lru);
Christian König971fe9a92016-03-01 15:09:25 +01001624 }
Christian König2d55e452016-02-08 17:37:38 +01001625
Christian König1fbb2e92016-06-01 10:47:36 +02001626 adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1627 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1628 adev->vm_manager.seqno[i] = 0;
1629
Christian König2d55e452016-02-08 17:37:38 +01001630 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
Christian Königb1c8a812016-05-04 10:34:03 +02001631 atomic64_set(&adev->vm_manager.client_counter, 0);
Christian Königa9a78b32016-01-21 10:19:11 +01001632}
1633
1634/**
Christian Königea89f8c2015-11-15 20:52:06 +01001635 * amdgpu_vm_manager_fini - cleanup VM manager
1636 *
1637 * @adev: amdgpu_device pointer
1638 *
1639 * Cleanup the VM manager and free resources.
1640 */
1641void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1642{
1643 unsigned i;
1644
Christian Königbcb1ba32016-03-08 15:40:11 +01001645 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1646 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1647
Christian König832a9022016-02-15 12:33:02 +01001648 fence_put(adev->vm_manager.ids[i].first);
1649 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
Christian Königbcb1ba32016-03-08 15:40:11 +01001650 fence_put(id->flushed_updates);
1651 }
Christian Königea89f8c2015-11-15 20:52:06 +01001652}