blob: 4ff285aae9f58cd2e070d149882cc0899f044eb5 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Christian König1fbb2e92016-06-01 10:47:36 +020028#include <linux/fence-array.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32#include "amdgpu_trace.h"
33
34/*
35 * GPUVM
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
51 * SI supports 16.
52 */
53
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040054/* Local structure. Encapsulate some VM table update parameters to reduce
55 * the number of function parameters
56 */
Christian König29efc4f2016-08-04 14:52:50 +020057struct amdgpu_pte_update_params {
Christian König27c5f362016-08-04 15:02:49 +020058 /* amdgpu device we do this update for */
59 struct amdgpu_device *adev;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040060 /* address where to copy page table entries from */
61 uint64_t src;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040062 /* indirect buffer to fill with commands */
63 struct amdgpu_ib *ib;
Christian Königafef8b82016-08-12 13:29:18 +020064 /* Function which actually does the update */
65 void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66 uint64_t addr, unsigned count, uint32_t incr,
67 uint32_t flags);
Chunming Zhou4c7e8852016-08-15 11:46:21 +080068 /* indicate update pt or its shadow */
69 bool shadow;
Harish Kasiviswanathanf4833c42016-04-21 10:40:18 -040070};
71
Alex Deucherd38ceaf2015-04-20 16:55:21 -040072/**
73 * amdgpu_vm_num_pde - return the number of page directory entries
74 *
75 * @adev: amdgpu_device pointer
76 *
Christian König8843dbb2016-01-26 12:17:11 +010077 * Calculate the number of page directory entries.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078 */
79static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
80{
81 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
82}
83
84/**
85 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
86 *
87 * @adev: amdgpu_device pointer
88 *
Christian König8843dbb2016-01-26 12:17:11 +010089 * Calculate the size of the page directory in bytes.
Alex Deucherd38ceaf2015-04-20 16:55:21 -040090 */
91static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
92{
93 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
94}
95
96/**
Christian König56467eb2015-12-11 15:16:32 +010097 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 *
99 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +0100100 * @validated: head of validation list
Christian König56467eb2015-12-11 15:16:32 +0100101 * @entry: entry to add
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102 *
103 * Add the page directory to the list of BOs to
Christian König56467eb2015-12-11 15:16:32 +0100104 * validate for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105 */
Christian König56467eb2015-12-11 15:16:32 +0100106void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
107 struct list_head *validated,
108 struct amdgpu_bo_list_entry *entry)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109{
Christian König56467eb2015-12-11 15:16:32 +0100110 entry->robj = vm->page_directory;
Christian König56467eb2015-12-11 15:16:32 +0100111 entry->priority = 0;
112 entry->tv.bo = &vm->page_directory->tbo;
113 entry->tv.shared = true;
Christian König2f568db2016-02-23 12:36:59 +0100114 entry->user_pages = NULL;
Christian König56467eb2015-12-11 15:16:32 +0100115 list_add(&entry->tv.head, validated);
116}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117
Christian König56467eb2015-12-11 15:16:32 +0100118/**
Christian Königee1782c2015-12-11 21:01:23 +0100119 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
Christian König56467eb2015-12-11 15:16:32 +0100120 *
Christian König5a712a82016-06-21 16:28:15 +0200121 * @adev: amdgpu device pointer
Christian König56467eb2015-12-11 15:16:32 +0100122 * @vm: vm providing the BOs
Christian König3c0eea62015-12-11 14:39:05 +0100123 * @duplicates: head of duplicates list
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124 *
Christian Königee1782c2015-12-11 21:01:23 +0100125 * Add the page directory to the BO duplicates list
126 * for command submission.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127 */
Christian König5a712a82016-06-21 16:28:15 +0200128void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
129 struct list_head *duplicates)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130{
Christian König5a712a82016-06-21 16:28:15 +0200131 uint64_t num_evictions;
Christian Königee1782c2015-12-11 21:01:23 +0100132 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133
Christian König5a712a82016-06-21 16:28:15 +0200134 /* We only need to validate the page tables
135 * if they aren't already valid.
136 */
137 num_evictions = atomic64_read(&adev->num_evictions);
138 if (num_evictions == vm->last_eviction_counter)
139 return;
140
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400141 /* add the vm page table to the list */
Christian Königee1782c2015-12-11 21:01:23 +0100142 for (i = 0; i <= vm->max_pde_used; ++i) {
143 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144
Christian Königee1782c2015-12-11 21:01:23 +0100145 if (!entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 continue;
147
Christian Königee1782c2015-12-11 21:01:23 +0100148 list_add(&entry->tv.head, duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400149 }
Christian Königeceb8a12016-01-11 15:35:21 +0100150
151}
152
153/**
154 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
155 *
156 * @adev: amdgpu device instance
157 * @vm: vm providing the BOs
158 *
159 * Move the PT BOs to the tail of the LRU.
160 */
161void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
162 struct amdgpu_vm *vm)
163{
164 struct ttm_bo_global *glob = adev->mman.bdev.glob;
165 unsigned i;
166
167 spin_lock(&glob->lru_lock);
168 for (i = 0; i <= vm->max_pde_used; ++i) {
169 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
170
171 if (!entry->robj)
172 continue;
173
174 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
175 }
176 spin_unlock(&glob->lru_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400177}
178
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800179static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
180 struct amdgpu_vm_id *id)
181{
182 return id->current_gpu_reset_count !=
183 atomic_read(&adev->gpu_reset_counter) ? true : false;
184}
185
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186/**
187 * amdgpu_vm_grab_id - allocate the next free VMID
188 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400189 * @vm: vm to allocate id for
Christian König7f8a5292015-07-20 16:09:40 +0200190 * @ring: ring we want to submit job to
191 * @sync: sync object where we add dependencies
Christian König94dd0a42016-01-18 17:01:42 +0100192 * @fence: fence protecting ID from reuse
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 *
Christian König7f8a5292015-07-20 16:09:40 +0200194 * Allocate an id for the vm, adding fences to the sync obj as necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400195 */
Christian König7f8a5292015-07-20 16:09:40 +0200196int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
Christian König4ff37a82016-02-26 16:18:26 +0100197 struct amdgpu_sync *sync, struct fence *fence,
Chunming Zhoufd53be32016-07-01 17:59:01 +0800198 struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400200 struct amdgpu_device *adev = ring->adev;
Christian König090b7672016-07-08 10:21:02 +0200201 uint64_t fence_context = adev->fence_context + ring->idx;
Christian König4ff37a82016-02-26 16:18:26 +0100202 struct fence *updates = sync->last_vm_update;
Christian König8d76001e2016-05-23 16:00:32 +0200203 struct amdgpu_vm_id *id, *idle;
Christian König1fbb2e92016-06-01 10:47:36 +0200204 struct fence **fences;
205 unsigned i;
206 int r = 0;
207
208 fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
209 GFP_KERNEL);
210 if (!fences)
211 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400212
Christian König94dd0a42016-01-18 17:01:42 +0100213 mutex_lock(&adev->vm_manager.lock);
214
Christian König36fd7c52016-05-23 15:30:08 +0200215 /* Check if we have an idle VMID */
Christian König1fbb2e92016-06-01 10:47:36 +0200216 i = 0;
Christian König8d76001e2016-05-23 16:00:32 +0200217 list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
Christian König1fbb2e92016-06-01 10:47:36 +0200218 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
219 if (!fences[i])
Christian König36fd7c52016-05-23 15:30:08 +0200220 break;
Christian König1fbb2e92016-06-01 10:47:36 +0200221 ++i;
Christian König36fd7c52016-05-23 15:30:08 +0200222 }
Christian Königbcb1ba32016-03-08 15:40:11 +0100223
Christian König1fbb2e92016-06-01 10:47:36 +0200224 /* If we can't find a idle VMID to use, wait till one becomes available */
Christian König8d76001e2016-05-23 16:00:32 +0200225 if (&idle->list == &adev->vm_manager.ids_lru) {
Christian König1fbb2e92016-06-01 10:47:36 +0200226 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
227 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
228 struct fence_array *array;
229 unsigned j;
Christian König8d76001e2016-05-23 16:00:32 +0200230
Christian König1fbb2e92016-06-01 10:47:36 +0200231 for (j = 0; j < i; ++j)
232 fence_get(fences[j]);
Christian König8d76001e2016-05-23 16:00:32 +0200233
Christian König1fbb2e92016-06-01 10:47:36 +0200234 array = fence_array_create(i, fences, fence_context,
235 seqno, true);
236 if (!array) {
237 for (j = 0; j < i; ++j)
238 fence_put(fences[j]);
239 kfree(fences);
240 r = -ENOMEM;
241 goto error;
242 }
Christian König8d76001e2016-05-23 16:00:32 +0200243
Christian König8d76001e2016-05-23 16:00:32 +0200244
Christian König1fbb2e92016-06-01 10:47:36 +0200245 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
246 fence_put(&array->base);
247 if (r)
248 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200249
Christian König1fbb2e92016-06-01 10:47:36 +0200250 mutex_unlock(&adev->vm_manager.lock);
251 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200252
Christian König1fbb2e92016-06-01 10:47:36 +0200253 }
254 kfree(fences);
Christian König8d76001e2016-05-23 16:00:32 +0200255
Chunming Zhoufd53be32016-07-01 17:59:01 +0800256 job->vm_needs_flush = true;
Christian König1fbb2e92016-06-01 10:47:36 +0200257 /* Check if we can use a VMID already assigned to this VM */
258 i = ring->idx;
259 do {
260 struct fence *flushed;
Christian König8d76001e2016-05-23 16:00:32 +0200261
Christian König1fbb2e92016-06-01 10:47:36 +0200262 id = vm->ids[i++];
263 if (i == AMDGPU_MAX_RINGS)
264 i = 0;
265
266 /* Check all the prerequisites to using this VMID */
267 if (!id)
268 continue;
Chunming Zhou192b7dc2016-06-29 14:01:15 +0800269 if (amdgpu_vm_is_gpu_reset(adev, id))
Chunming Zhou6adb0512016-06-27 17:06:01 +0800270 continue;
Christian König1fbb2e92016-06-01 10:47:36 +0200271
272 if (atomic64_read(&id->owner) != vm->client_id)
273 continue;
274
Chunming Zhoufd53be32016-07-01 17:59:01 +0800275 if (job->vm_pd_addr != id->pd_gpu_addr)
Christian König1fbb2e92016-06-01 10:47:36 +0200276 continue;
277
Christian König090b7672016-07-08 10:21:02 +0200278 if (!id->last_flush)
279 continue;
280
281 if (id->last_flush->context != fence_context &&
282 !fence_is_signaled(id->last_flush))
Christian König1fbb2e92016-06-01 10:47:36 +0200283 continue;
284
285 flushed = id->flushed_updates;
286 if (updates &&
287 (!flushed || fence_is_later(updates, flushed)))
288 continue;
289
Christian König3dab83b2016-06-01 13:31:17 +0200290 /* Good we can use this VMID. Remember this submission as
291 * user of the VMID.
292 */
Christian König1fbb2e92016-06-01 10:47:36 +0200293 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
294 if (r)
295 goto error;
Christian König8d76001e2016-05-23 16:00:32 +0200296
Chunming Zhou6adb0512016-06-27 17:06:01 +0800297 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König1fbb2e92016-06-01 10:47:36 +0200298 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
299 vm->ids[ring->idx] = id;
Christian König8d76001e2016-05-23 16:00:32 +0200300
Chunming Zhoufd53be32016-07-01 17:59:01 +0800301 job->vm_id = id - adev->vm_manager.ids;
302 job->vm_needs_flush = false;
Christian König0c0fdf12016-07-08 10:48:24 +0200303 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König8d76001e2016-05-23 16:00:32 +0200304
Christian König1fbb2e92016-06-01 10:47:36 +0200305 mutex_unlock(&adev->vm_manager.lock);
306 return 0;
Christian König8d76001e2016-05-23 16:00:32 +0200307
Christian König1fbb2e92016-06-01 10:47:36 +0200308 } while (i != ring->idx);
Chunming Zhou8e9fbeb2016-03-17 11:41:37 +0800309
Christian König1fbb2e92016-06-01 10:47:36 +0200310 /* Still no ID to use? Then use the idle one found earlier */
311 id = idle;
312
313 /* Remember this submission as user of the VMID */
314 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
Christian König832a9022016-02-15 12:33:02 +0100315 if (r)
316 goto error;
Christian König4ff37a82016-02-26 16:18:26 +0100317
Christian König832a9022016-02-15 12:33:02 +0100318 fence_put(id->first);
319 id->first = fence_get(fence);
Christian König4ff37a82016-02-26 16:18:26 +0100320
Christian König41d9eb22016-03-01 16:46:18 +0100321 fence_put(id->last_flush);
322 id->last_flush = NULL;
323
Christian König832a9022016-02-15 12:33:02 +0100324 fence_put(id->flushed_updates);
325 id->flushed_updates = fence_get(updates);
Christian König4ff37a82016-02-26 16:18:26 +0100326
Chunming Zhoufd53be32016-07-01 17:59:01 +0800327 id->pd_gpu_addr = job->vm_pd_addr;
Chunming Zhoub46b8a82016-06-27 17:04:23 +0800328 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
Christian König832a9022016-02-15 12:33:02 +0100329 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
Christian König0ea54b92016-05-04 10:20:01 +0200330 atomic64_set(&id->owner, vm->client_id);
Christian König832a9022016-02-15 12:33:02 +0100331 vm->ids[ring->idx] = id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400332
Chunming Zhoufd53be32016-07-01 17:59:01 +0800333 job->vm_id = id - adev->vm_manager.ids;
Christian König0c0fdf12016-07-08 10:48:24 +0200334 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
Christian König832a9022016-02-15 12:33:02 +0100335
336error:
Christian König94dd0a42016-01-18 17:01:42 +0100337 mutex_unlock(&adev->vm_manager.lock);
Christian Königa9a78b32016-01-21 10:19:11 +0100338 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400339}
340
Alex Deucher93dcc372016-06-17 17:05:15 -0400341static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
342{
343 struct amdgpu_device *adev = ring->adev;
344 const struct amdgpu_ip_block_version *ip_block;
345
346 if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
347 /* only compute rings */
348 return false;
349
350 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
351 if (!ip_block)
352 return false;
353
354 if (ip_block->major <= 7) {
355 /* gfx7 has no workaround */
356 return true;
357 } else if (ip_block->major == 8) {
358 if (adev->gfx.mec_fw_version >= 673)
359 /* gfx8 is fixed in MEC firmware 673 */
360 return false;
361 else
362 return true;
363 }
364 return false;
365}
366
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400367/**
368 * amdgpu_vm_flush - hardware flush the vm
369 *
370 * @ring: ring to use for flush
Christian Königcffadc82016-03-01 13:34:49 +0100371 * @vm_id: vmid number to use
Christian König4ff37a82016-02-26 16:18:26 +0100372 * @pd_addr: address of the page directory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373 *
Christian König4ff37a82016-02-26 16:18:26 +0100374 * Emit a VM flush when it is necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400375 */
Chunming Zhoufd53be32016-07-01 17:59:01 +0800376int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400377{
Christian König971fe9a92016-03-01 15:09:25 +0100378 struct amdgpu_device *adev = ring->adev;
Chunming Zhoufd53be32016-07-01 17:59:01 +0800379 struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
Christian Königd564a062016-03-01 15:51:53 +0100380 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800381 id->gds_base != job->gds_base ||
382 id->gds_size != job->gds_size ||
383 id->gws_base != job->gws_base ||
384 id->gws_size != job->gws_size ||
385 id->oa_base != job->oa_base ||
386 id->oa_size != job->oa_size);
Christian König41d9eb22016-03-01 16:46:18 +0100387 int r;
Christian Königd564a062016-03-01 15:51:53 +0100388
389 if (ring->funcs->emit_pipeline_sync && (
Chunming Zhoufd53be32016-07-01 17:59:01 +0800390 job->vm_needs_flush || gds_switch_needed ||
Alex Deucher93dcc372016-06-17 17:05:15 -0400391 amdgpu_vm_ring_has_compute_vm_bug(ring)))
Christian Königd564a062016-03-01 15:51:53 +0100392 amdgpu_ring_emit_pipeline_sync(ring);
Christian König971fe9a92016-03-01 15:09:25 +0100393
Chunming Zhouaa1c8902016-06-30 13:56:02 +0800394 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
395 amdgpu_vm_is_gpu_reset(adev, id))) {
Christian König41d9eb22016-03-01 16:46:18 +0100396 struct fence *fence;
397
Chunming Zhoufd53be32016-07-01 17:59:01 +0800398 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
399 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
Christian König41d9eb22016-03-01 16:46:18 +0100400
Christian König3dab83b2016-06-01 13:31:17 +0200401 r = amdgpu_fence_emit(ring, &fence);
402 if (r)
403 return r;
404
Christian König41d9eb22016-03-01 16:46:18 +0100405 mutex_lock(&adev->vm_manager.lock);
Christian König3dab83b2016-06-01 13:31:17 +0200406 fence_put(id->last_flush);
407 id->last_flush = fence;
Christian König41d9eb22016-03-01 16:46:18 +0100408 mutex_unlock(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400409 }
Christian Königcffadc82016-03-01 13:34:49 +0100410
Christian Königd564a062016-03-01 15:51:53 +0100411 if (gds_switch_needed) {
Chunming Zhoufd53be32016-07-01 17:59:01 +0800412 id->gds_base = job->gds_base;
413 id->gds_size = job->gds_size;
414 id->gws_base = job->gws_base;
415 id->gws_size = job->gws_size;
416 id->oa_base = job->oa_base;
417 id->oa_size = job->oa_size;
418 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
419 job->gds_base, job->gds_size,
420 job->gws_base, job->gws_size,
421 job->oa_base, job->oa_size);
Christian König971fe9a92016-03-01 15:09:25 +0100422 }
Christian König41d9eb22016-03-01 16:46:18 +0100423
424 return 0;
Christian König971fe9a92016-03-01 15:09:25 +0100425}
426
427/**
428 * amdgpu_vm_reset_id - reset VMID to zero
429 *
430 * @adev: amdgpu device structure
431 * @vm_id: vmid number to use
432 *
433 * Reset saved GDW, GWS and OA to force switch on next flush.
434 */
435void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
436{
Christian Königbcb1ba32016-03-08 15:40:11 +0100437 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
Christian König971fe9a92016-03-01 15:09:25 +0100438
Christian Königbcb1ba32016-03-08 15:40:11 +0100439 id->gds_base = 0;
440 id->gds_size = 0;
441 id->gws_base = 0;
442 id->gws_size = 0;
443 id->oa_base = 0;
444 id->oa_size = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400445}
446
447/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400448 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
449 *
450 * @vm: requested vm
451 * @bo: requested buffer object
452 *
Christian König8843dbb2016-01-26 12:17:11 +0100453 * Find @bo inside the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400454 * Search inside the @bos vm list for the requested vm
455 * Returns the found bo_va or NULL if none is found
456 *
457 * Object has to be reserved!
458 */
459struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
460 struct amdgpu_bo *bo)
461{
462 struct amdgpu_bo_va *bo_va;
463
464 list_for_each_entry(bo_va, &bo->va, bo_list) {
465 if (bo_va->vm == vm) {
466 return bo_va;
467 }
468 }
469 return NULL;
470}
471
472/**
Christian Königafef8b82016-08-12 13:29:18 +0200473 * amdgpu_vm_do_set_ptes - helper to call the right asic function
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400474 *
Christian König29efc4f2016-08-04 14:52:50 +0200475 * @params: see amdgpu_pte_update_params definition
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400476 * @pe: addr of the page entry
477 * @addr: dst addr to write into pe
478 * @count: number of page entries to update
479 * @incr: increase next addr by incr bytes
480 * @flags: hw access flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400481 *
482 * Traces the parameters and calls the right asic functions
483 * to setup the page table using the DMA.
484 */
Christian Königafef8b82016-08-12 13:29:18 +0200485static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
486 uint64_t pe, uint64_t addr,
487 unsigned count, uint32_t incr,
488 uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400489{
490 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
491
Christian Königafef8b82016-08-12 13:29:18 +0200492 if (count < 3) {
Christian Königde9ea7b2016-08-12 11:33:30 +0200493 amdgpu_vm_write_pte(params->adev, params->ib, pe,
494 addr | flags, count, incr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400495
496 } else {
Christian König27c5f362016-08-04 15:02:49 +0200497 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400498 count, incr, flags);
499 }
500}
501
502/**
Christian Königafef8b82016-08-12 13:29:18 +0200503 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
504 *
505 * @params: see amdgpu_pte_update_params definition
506 * @pe: addr of the page entry
507 * @addr: dst addr to write into pe
508 * @count: number of page entries to update
509 * @incr: increase next addr by incr bytes
510 * @flags: hw access flags
511 *
512 * Traces the parameters and calls the DMA function to copy the PTEs.
513 */
514static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
515 uint64_t pe, uint64_t addr,
516 unsigned count, uint32_t incr,
517 uint32_t flags)
518{
519 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
520
521 amdgpu_vm_copy_pte(params->adev, params->ib, pe,
522 (params->src + (addr >> 12) * 8), count);
523}
524
525/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400526 * amdgpu_vm_clear_bo - initially clear the page dir/table
527 *
528 * @adev: amdgpu_device pointer
529 * @bo: bo to clear
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800530 *
531 * need to reserve bo first before calling it.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400532 */
533static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
Christian König2bd9ccf2016-02-01 12:53:58 +0100534 struct amdgpu_vm *vm,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400535 struct amdgpu_bo *bo)
536{
Christian König2d55e452016-02-08 17:37:38 +0100537 struct amdgpu_ring *ring;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800538 struct fence *fence = NULL;
Christian Königd71518b2016-02-01 12:20:25 +0100539 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200540 struct amdgpu_pte_update_params params;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400541 unsigned entries;
542 uint64_t addr;
543 int r;
544
Christian König2d55e452016-02-08 17:37:38 +0100545 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
546
monk.liuca952612015-05-25 14:44:05 +0800547 r = reservation_object_reserve_shared(bo->tbo.resv);
548 if (r)
549 return r;
550
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400551 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
552 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800553 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400554
Christian König0fc86832016-09-16 11:46:23 +0200555 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
556 if (r)
557 goto error;
558
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400559 addr = amdgpu_bo_gpu_offset(bo);
560 entries = amdgpu_bo_size(bo) / 8;
561
Christian Königd71518b2016-02-01 12:20:25 +0100562 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
563 if (r)
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800564 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400565
Christian König27c5f362016-08-04 15:02:49 +0200566 memset(&params, 0, sizeof(params));
567 params.adev = adev;
Christian König29efc4f2016-08-04 14:52:50 +0200568 params.ib = &job->ibs[0];
Christian Königafef8b82016-08-12 13:29:18 +0200569 amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
Christian Königd71518b2016-02-01 12:20:25 +0100570 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
571
572 WARN_ON(job->ibs[0].length_dw > 64);
Christian König2bd9ccf2016-02-01 12:53:58 +0100573 r = amdgpu_job_submit(job, ring, &vm->entity,
574 AMDGPU_FENCE_OWNER_VM, &fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400575 if (r)
576 goto error_free;
577
Christian Königd71518b2016-02-01 12:20:25 +0100578 amdgpu_bo_fence(bo, fence, true);
Chunming Zhou281b4222015-08-12 12:58:31 +0800579 fence_put(fence);
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800580 return 0;
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800581
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400582error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100583 amdgpu_job_free(job);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400584
Chunming Zhouef9f0a82015-11-13 13:43:22 +0800585error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400586 return r;
587}
588
589/**
Christian Königb07c9d22015-11-30 13:26:07 +0100590 * amdgpu_vm_map_gart - Resolve gart mapping of addr
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400591 *
Christian Königb07c9d22015-11-30 13:26:07 +0100592 * @pages_addr: optional DMA address to use for lookup
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400593 * @addr: the unmapped addr
594 *
595 * Look up the physical address of the page that the pte resolves
Christian Königb07c9d22015-11-30 13:26:07 +0100596 * to and return the pointer for the page table entry.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400597 */
Christian Königde9ea7b2016-08-12 11:33:30 +0200598static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400599{
600 uint64_t result;
601
Christian Königde9ea7b2016-08-12 11:33:30 +0200602 /* page table offset */
603 result = pages_addr[addr >> PAGE_SHIFT];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400604
Christian Königde9ea7b2016-08-12 11:33:30 +0200605 /* in case cpu page size != gpu page size*/
606 result |= addr & (~PAGE_MASK);
Christian Königb07c9d22015-11-30 13:26:07 +0100607
608 result &= 0xFFFFFFFFFFFFF000ULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400609
610 return result;
611}
612
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800613static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
614 struct amdgpu_vm *vm,
615 bool shadow)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400616{
Christian König2d55e452016-02-08 17:37:38 +0100617 struct amdgpu_ring *ring;
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800618 struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
619 vm->page_directory;
620 uint64_t pd_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400621 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
622 uint64_t last_pde = ~0, last_pt = ~0;
623 unsigned count = 0, pt_idx, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100624 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200625 struct amdgpu_pte_update_params params;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800626 struct fence *fence = NULL;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800627
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400628 int r;
629
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800630 if (!pd)
631 return 0;
Christian König0fc86832016-09-16 11:46:23 +0200632
633 r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
634 if (r)
635 return r;
636
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800637 pd_addr = amdgpu_bo_gpu_offset(pd);
Christian König2d55e452016-02-08 17:37:38 +0100638 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
639
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400640 /* padding, etc. */
641 ndw = 64;
642
643 /* assume the worst case */
644 ndw += vm->max_pde_used * 6;
645
Christian Königd71518b2016-02-01 12:20:25 +0100646 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
647 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400648 return r;
Christian Königd71518b2016-02-01 12:20:25 +0100649
Christian König27c5f362016-08-04 15:02:49 +0200650 memset(&params, 0, sizeof(params));
651 params.adev = adev;
Christian König29efc4f2016-08-04 14:52:50 +0200652 params.ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400653
654 /* walk over the address space and update the page directory */
655 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
Christian Königee1782c2015-12-11 21:01:23 +0100656 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400657 uint64_t pde, pt;
658
659 if (bo == NULL)
660 continue;
661
Christian König0fc86832016-09-16 11:46:23 +0200662 if (bo->shadow) {
663 struct amdgpu_bo *shadow = bo->shadow;
664
665 r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
666 if (r)
667 return r;
668 }
669
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400670 pt = amdgpu_bo_gpu_offset(bo);
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800671 if (!shadow) {
672 if (vm->page_tables[pt_idx].addr == pt)
673 continue;
674 vm->page_tables[pt_idx].addr = pt;
675 } else {
676 if (vm->page_tables[pt_idx].shadow_addr == pt)
677 continue;
678 vm->page_tables[pt_idx].shadow_addr = pt;
679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400680
681 pde = pd_addr + pt_idx * 8;
682 if (((last_pde + 8 * count) != pde) ||
Christian König96105e52016-08-12 12:59:59 +0200683 ((last_pt + incr * count) != pt) ||
684 (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400685
686 if (count) {
Christian Königafef8b82016-08-12 13:29:18 +0200687 amdgpu_vm_do_set_ptes(&params, last_pde,
688 last_pt, count, incr,
689 AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400690 }
691
692 count = 1;
693 last_pde = pde;
694 last_pt = pt;
695 } else {
696 ++count;
697 }
698 }
699
700 if (count)
Christian Königafef8b82016-08-12 13:29:18 +0200701 amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
702 count, incr, AMDGPU_PTE_VALID);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703
Christian König29efc4f2016-08-04 14:52:50 +0200704 if (params.ib->length_dw != 0) {
705 amdgpu_ring_pad_ib(ring, params.ib);
Christian Könige86f9ce2016-02-08 12:13:05 +0100706 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
707 AMDGPU_FENCE_OWNER_VM);
Christian König29efc4f2016-08-04 14:52:50 +0200708 WARN_ON(params.ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +0100709 r = amdgpu_job_submit(job, ring, &vm->entity,
710 AMDGPU_FENCE_OWNER_VM, &fence);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800711 if (r)
712 goto error_free;
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200713
Chunming Zhou4af9f072015-08-03 12:57:31 +0800714 amdgpu_bo_fence(pd, fence, true);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +0200715 fence_put(vm->page_directory_fence);
716 vm->page_directory_fence = fence_get(fence);
Chunming Zhou281b4222015-08-12 12:58:31 +0800717 fence_put(fence);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800718
Christian Königd71518b2016-02-01 12:20:25 +0100719 } else {
720 amdgpu_job_free(job);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800721 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400722
723 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800724
725error_free:
Christian Königd71518b2016-02-01 12:20:25 +0100726 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +0800727 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728}
729
Chunming Zhou6557e3d2016-08-15 11:36:54 +0800730/*
731 * amdgpu_vm_update_pdes - make sure that page directory is valid
732 *
733 * @adev: amdgpu_device pointer
734 * @vm: requested vm
735 * @start: start of GPU address range
736 * @end: end of GPU address range
737 *
738 * Allocates new page tables if necessary
739 * and updates the page directory.
740 * Returns 0 for success, error for failure.
741 */
742int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
743 struct amdgpu_vm *vm)
744{
745 int r;
746
747 r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
748 if (r)
749 return r;
750 return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
751}
752
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400753/**
Christian König92696dd2016-08-05 13:56:35 +0200754 * amdgpu_vm_update_ptes - make sure that page tables are valid
755 *
756 * @params: see amdgpu_pte_update_params definition
757 * @vm: requested vm
758 * @start: start of GPU address range
759 * @end: end of GPU address range
760 * @dst: destination address to map to, the next dst inside the function
761 * @flags: mapping flags
762 *
763 * Update the page tables in the range @start - @end.
764 */
765static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
766 struct amdgpu_vm *vm,
767 uint64_t start, uint64_t end,
768 uint64_t dst, uint32_t flags)
769{
770 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
771
772 uint64_t cur_pe_start, cur_nptes, cur_dst;
773 uint64_t addr; /* next GPU address to be updated */
774 uint64_t pt_idx;
775 struct amdgpu_bo *pt;
776 unsigned nptes; /* next number of ptes to be updated */
777 uint64_t next_pe_start;
778
779 /* initialize the variables */
780 addr = start;
781 pt_idx = addr >> amdgpu_vm_block_size;
782 pt = vm->page_tables[pt_idx].entry.robj;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800783 if (params->shadow) {
784 if (!pt->shadow)
785 return;
786 pt = vm->page_tables[pt_idx].entry.robj->shadow;
787 }
Christian König92696dd2016-08-05 13:56:35 +0200788 if ((addr & ~mask) == (end & ~mask))
789 nptes = end - addr;
790 else
791 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
792
793 cur_pe_start = amdgpu_bo_gpu_offset(pt);
794 cur_pe_start += (addr & mask) * 8;
795 cur_nptes = nptes;
796 cur_dst = dst;
797
798 /* for next ptb*/
799 addr += nptes;
800 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
801
802 /* walk over the address space and update the page tables */
803 while (addr < end) {
804 pt_idx = addr >> amdgpu_vm_block_size;
805 pt = vm->page_tables[pt_idx].entry.robj;
Chunming Zhou4c7e8852016-08-15 11:46:21 +0800806 if (params->shadow) {
807 if (!pt->shadow)
808 return;
809 pt = vm->page_tables[pt_idx].entry.robj->shadow;
810 }
Christian König92696dd2016-08-05 13:56:35 +0200811
812 if ((addr & ~mask) == (end & ~mask))
813 nptes = end - addr;
814 else
815 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
816
817 next_pe_start = amdgpu_bo_gpu_offset(pt);
818 next_pe_start += (addr & mask) * 8;
819
Christian König96105e52016-08-12 12:59:59 +0200820 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
821 ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
Christian König92696dd2016-08-05 13:56:35 +0200822 /* The next ptb is consecutive to current ptb.
Christian Königafef8b82016-08-12 13:29:18 +0200823 * Don't call the update function now.
Christian König92696dd2016-08-05 13:56:35 +0200824 * Will update two ptbs together in future.
825 */
826 cur_nptes += nptes;
827 } else {
Christian Königafef8b82016-08-12 13:29:18 +0200828 params->func(params, cur_pe_start, cur_dst, cur_nptes,
829 AMDGPU_GPU_PAGE_SIZE, flags);
Christian König92696dd2016-08-05 13:56:35 +0200830
831 cur_pe_start = next_pe_start;
832 cur_nptes = nptes;
833 cur_dst = dst;
834 }
835
836 /* for next ptb*/
837 addr += nptes;
838 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
839 }
840
Christian Königafef8b82016-08-12 13:29:18 +0200841 params->func(params, cur_pe_start, cur_dst, cur_nptes,
842 AMDGPU_GPU_PAGE_SIZE, flags);
Christian König92696dd2016-08-05 13:56:35 +0200843}
844
845/*
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400846 * amdgpu_vm_frag_ptes - add fragment information to PTEs
847 *
Christian König29efc4f2016-08-04 14:52:50 +0200848 * @params: see amdgpu_pte_update_params definition
Christian König92696dd2016-08-05 13:56:35 +0200849 * @vm: requested vm
850 * @start: first PTE to handle
851 * @end: last PTE to handle
852 * @dst: addr those PTEs should point to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400853 * @flags: hw mapping flags
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400854 */
Christian König27c5f362016-08-04 15:02:49 +0200855static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
Christian König92696dd2016-08-05 13:56:35 +0200856 struct amdgpu_vm *vm,
857 uint64_t start, uint64_t end,
858 uint64_t dst, uint32_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400859{
860 /**
861 * The MC L1 TLB supports variable sized pages, based on a fragment
862 * field in the PTE. When this field is set to a non-zero value, page
863 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
864 * flags are considered valid for all PTEs within the fragment range
865 * and corresponding mappings are assumed to be physically contiguous.
866 *
867 * The L1 TLB can store a single PTE for the whole fragment,
868 * significantly increasing the space available for translation
869 * caching. This leads to large improvements in throughput when the
870 * TLB is under pressure.
871 *
872 * The L2 TLB distributes small and large fragments into two
873 * asymmetric partitions. The large fragment cache is significantly
874 * larger. Thus, we try to use large fragments wherever possible.
875 * Userspace can support this by aligning virtual base address and
876 * allocation size to the fragment size.
877 */
878
Christian Könige2b84e42016-08-08 14:40:18 +0200879 const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400880
Christian König92696dd2016-08-05 13:56:35 +0200881 uint64_t frag_start = ALIGN(start, frag_align);
882 uint64_t frag_end = end & ~(frag_align - 1);
Christian König31f6c1f2016-01-26 12:37:49 +0100883
Christian Könige2b84e42016-08-08 14:40:18 +0200884 uint32_t frag;
885
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400886 /* system pages are non continuously */
Christian Königb7fc2cb2016-08-11 16:44:15 +0200887 if (params->src || !(flags & AMDGPU_PTE_VALID) ||
Christian König92696dd2016-08-05 13:56:35 +0200888 (frag_start >= frag_end)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400889
Christian König92696dd2016-08-05 13:56:35 +0200890 amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400891 return;
892 }
893
Christian Könige2b84e42016-08-08 14:40:18 +0200894 /* use more than 64KB fragment size if possible */
895 frag = lower_32_bits(frag_start | frag_end);
896 frag = likely(frag) ? __ffs(frag) : 31;
897
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400898 /* handle the 4K area at the beginning */
Christian König92696dd2016-08-05 13:56:35 +0200899 if (start != frag_start) {
900 amdgpu_vm_update_ptes(params, vm, start, frag_start,
901 dst, flags);
902 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400903 }
904
905 /* handle the area in the middle */
Christian König92696dd2016-08-05 13:56:35 +0200906 amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
Christian Könige2b84e42016-08-08 14:40:18 +0200907 flags | AMDGPU_PTE_FRAG(frag));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400908
909 /* handle the 4K area at the end */
Christian König92696dd2016-08-05 13:56:35 +0200910 if (frag_end != end) {
911 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
912 amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400913 }
914}
915
916/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400917 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
918 *
919 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +0200920 * @exclusive: fence we need to sync to
Christian Königfa3ab3c2016-03-18 21:00:35 +0100921 * @src: address where to copy page table entries from
922 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +0100923 * @vm: requested vm
924 * @start: start of mapped range
925 * @last: last mapped entry
926 * @flags: flags for the entries
927 * @addr: addr to set the area to
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400928 * @fence: optional resulting fence
929 *
Christian Königa14faa62016-01-25 14:27:31 +0100930 * Fill in the page table entries between @start and @last.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400931 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400932 */
933static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
Christian König3cabaa52016-06-06 10:17:58 +0200934 struct fence *exclusive,
Christian Königfa3ab3c2016-03-18 21:00:35 +0100935 uint64_t src,
936 dma_addr_t *pages_addr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400937 struct amdgpu_vm *vm,
Christian Königa14faa62016-01-25 14:27:31 +0100938 uint64_t start, uint64_t last,
939 uint32_t flags, uint64_t addr,
940 struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400941{
Christian König2d55e452016-02-08 17:37:38 +0100942 struct amdgpu_ring *ring;
Christian Königa1e08d32016-01-26 11:40:46 +0100943 void *owner = AMDGPU_FENCE_OWNER_VM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400944 unsigned nptes, ncmds, ndw;
Christian Königd71518b2016-02-01 12:20:25 +0100945 struct amdgpu_job *job;
Christian König29efc4f2016-08-04 14:52:50 +0200946 struct amdgpu_pte_update_params params;
Chunming Zhou4af9f072015-08-03 12:57:31 +0800947 struct fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400948 int r;
949
Christian Königafef8b82016-08-12 13:29:18 +0200950 memset(&params, 0, sizeof(params));
951 params.adev = adev;
952 params.src = src;
953
Christian König2d55e452016-02-08 17:37:38 +0100954 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
Christian König27c5f362016-08-04 15:02:49 +0200955
Christian König29efc4f2016-08-04 14:52:50 +0200956 memset(&params, 0, sizeof(params));
Christian König27c5f362016-08-04 15:02:49 +0200957 params.adev = adev;
Christian König29efc4f2016-08-04 14:52:50 +0200958 params.src = src;
Christian König2d55e452016-02-08 17:37:38 +0100959
Christian Königa1e08d32016-01-26 11:40:46 +0100960 /* sync to everything on unmapping */
961 if (!(flags & AMDGPU_PTE_VALID))
962 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
963
Christian Königa14faa62016-01-25 14:27:31 +0100964 nptes = last - start + 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400965
966 /*
967 * reserve space for one command every (1 << BLOCK_SIZE)
968 * entries or 2k dwords (whatever is smaller)
969 */
970 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
971
972 /* padding, etc. */
973 ndw = 64;
974
Christian Königb0456f92016-08-11 14:06:54 +0200975 if (src) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400976 /* only copy commands needed */
977 ndw += ncmds * 7;
978
Christian Königafef8b82016-08-12 13:29:18 +0200979 params.func = amdgpu_vm_do_copy_ptes;
980
Christian Königb0456f92016-08-11 14:06:54 +0200981 } else if (pages_addr) {
982 /* copy commands needed */
983 ndw += ncmds * 7;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400984
Christian Königb0456f92016-08-11 14:06:54 +0200985 /* and also PTEs */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400986 ndw += nptes * 2;
987
Christian Königafef8b82016-08-12 13:29:18 +0200988 params.func = amdgpu_vm_do_copy_ptes;
989
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400990 } else {
991 /* set page commands needed */
992 ndw += ncmds * 10;
993
994 /* two extra commands for begin/end of fragment */
995 ndw += 2 * 10;
Christian Königafef8b82016-08-12 13:29:18 +0200996
997 params.func = amdgpu_vm_do_set_ptes;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400998 }
999
Christian Königd71518b2016-02-01 12:20:25 +01001000 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1001 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001002 return r;
Christian Königd71518b2016-02-01 12:20:25 +01001003
Christian König29efc4f2016-08-04 14:52:50 +02001004 params.ib = &job->ibs[0];
Chunming Zhoud5fc5e82015-07-21 16:52:10 +08001005
Christian Königb0456f92016-08-11 14:06:54 +02001006 if (!src && pages_addr) {
1007 uint64_t *pte;
1008 unsigned i;
1009
1010 /* Put the PTEs at the end of the IB. */
1011 i = ndw - nptes * 2;
1012 pte= (uint64_t *)&(job->ibs->ptr[i]);
1013 params.src = job->ibs->gpu_addr + i * 4;
1014
1015 for (i = 0; i < nptes; ++i) {
1016 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1017 AMDGPU_GPU_PAGE_SIZE);
1018 pte[i] |= flags;
1019 }
Christian Königd7a4ac62016-09-25 11:54:00 +02001020 addr = 0;
Christian Königb0456f92016-08-11 14:06:54 +02001021 }
1022
Christian König3cabaa52016-06-06 10:17:58 +02001023 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1024 if (r)
1025 goto error_free;
1026
Christian Könige86f9ce2016-02-08 12:13:05 +01001027 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
Christian Königa1e08d32016-01-26 11:40:46 +01001028 owner);
1029 if (r)
1030 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001031
Christian Königa1e08d32016-01-26 11:40:46 +01001032 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1033 if (r)
1034 goto error_free;
1035
Chunming Zhou4c7e8852016-08-15 11:46:21 +08001036 params.shadow = true;
1037 amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1038 params.shadow = false;
Christian König92696dd2016-08-05 13:56:35 +02001039 amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001040
Christian König29efc4f2016-08-04 14:52:50 +02001041 amdgpu_ring_pad_ib(ring, params.ib);
1042 WARN_ON(params.ib->length_dw > ndw);
Christian König2bd9ccf2016-02-01 12:53:58 +01001043 r = amdgpu_job_submit(job, ring, &vm->entity,
1044 AMDGPU_FENCE_OWNER_VM, &f);
Chunming Zhou4af9f072015-08-03 12:57:31 +08001045 if (r)
1046 goto error_free;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001047
Christian Königbf60efd2015-09-04 10:47:56 +02001048 amdgpu_bo_fence(vm->page_directory, f, true);
Chunming Zhou4af9f072015-08-03 12:57:31 +08001049 if (fence) {
1050 fence_put(*fence);
1051 *fence = fence_get(f);
1052 }
Chunming Zhou281b4222015-08-12 12:58:31 +08001053 fence_put(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001054 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +08001055
1056error_free:
Christian Königd71518b2016-02-01 12:20:25 +01001057 amdgpu_job_free(job);
Chunming Zhou4af9f072015-08-03 12:57:31 +08001058 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001059}
1060
1061/**
Christian Königa14faa62016-01-25 14:27:31 +01001062 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1063 *
1064 * @adev: amdgpu_device pointer
Christian König3cabaa52016-06-06 10:17:58 +02001065 * @exclusive: fence we need to sync to
Christian König8358dce2016-03-30 10:50:25 +02001066 * @gtt_flags: flags as they are used for GTT
1067 * @pages_addr: DMA addresses to use for mapping
Christian Königa14faa62016-01-25 14:27:31 +01001068 * @vm: requested vm
1069 * @mapping: mapped range and flags to use for the update
1070 * @addr: addr to set the area to
Christian König8358dce2016-03-30 10:50:25 +02001071 * @flags: HW flags for the mapping
Christian Königa14faa62016-01-25 14:27:31 +01001072 * @fence: optional resulting fence
1073 *
1074 * Split the mapping into smaller chunks so that each update fits
1075 * into a SDMA IB.
1076 * Returns 0 for success, -EINVAL for failure.
1077 */
1078static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
Christian König3cabaa52016-06-06 10:17:58 +02001079 struct fence *exclusive,
Christian Königa14faa62016-01-25 14:27:31 +01001080 uint32_t gtt_flags,
Christian König8358dce2016-03-30 10:50:25 +02001081 dma_addr_t *pages_addr,
Christian Königa14faa62016-01-25 14:27:31 +01001082 struct amdgpu_vm *vm,
1083 struct amdgpu_bo_va_mapping *mapping,
Christian Königfa3ab3c2016-03-18 21:00:35 +01001084 uint32_t flags, uint64_t addr,
1085 struct fence **fence)
Christian Königa14faa62016-01-25 14:27:31 +01001086{
1087 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1088
Christian Königfa3ab3c2016-03-18 21:00:35 +01001089 uint64_t src = 0, start = mapping->it.start;
Christian Königa14faa62016-01-25 14:27:31 +01001090 int r;
1091
1092 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1093 * but in case of something, we filter the flags in first place
1094 */
1095 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1096 flags &= ~AMDGPU_PTE_READABLE;
1097 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1098 flags &= ~AMDGPU_PTE_WRITEABLE;
1099
1100 trace_amdgpu_vm_bo_update(mapping);
1101
Christian König8358dce2016-03-30 10:50:25 +02001102 if (pages_addr) {
Christian Königfa3ab3c2016-03-18 21:00:35 +01001103 if (flags == gtt_flags)
1104 src = adev->gart.table_addr + (addr >> 12) * 8;
Christian Königfa3ab3c2016-03-18 21:00:35 +01001105 addr = 0;
1106 }
Christian Königa14faa62016-01-25 14:27:31 +01001107 addr += mapping->offset;
1108
Christian König8358dce2016-03-30 10:50:25 +02001109 if (!pages_addr || src)
Christian König3cabaa52016-06-06 10:17:58 +02001110 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1111 src, pages_addr, vm,
Christian Königa14faa62016-01-25 14:27:31 +01001112 start, mapping->it.last,
1113 flags, addr, fence);
1114
1115 while (start != mapping->it.last + 1) {
1116 uint64_t last;
1117
Felix Kuehlingfb29b572016-03-03 19:13:20 -05001118 last = min((uint64_t)mapping->it.last, start + max_size - 1);
Christian König3cabaa52016-06-06 10:17:58 +02001119 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1120 src, pages_addr, vm,
Christian Königa14faa62016-01-25 14:27:31 +01001121 start, last, flags, addr,
1122 fence);
1123 if (r)
1124 return r;
1125
1126 start = last + 1;
Felix Kuehlingfb29b572016-03-03 19:13:20 -05001127 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
Christian Königa14faa62016-01-25 14:27:31 +01001128 }
1129
1130 return 0;
1131}
1132
1133/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001134 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1135 *
1136 * @adev: amdgpu_device pointer
1137 * @bo_va: requested BO and VM object
Christian König99e124f2016-08-16 14:43:17 +02001138 * @clear: if true clear the entries
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001139 *
1140 * Fill in the page table entries for @bo_va.
1141 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001142 */
1143int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1144 struct amdgpu_bo_va *bo_va,
Christian König99e124f2016-08-16 14:43:17 +02001145 bool clear)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001146{
1147 struct amdgpu_vm *vm = bo_va->vm;
1148 struct amdgpu_bo_va_mapping *mapping;
Christian König8358dce2016-03-30 10:50:25 +02001149 dma_addr_t *pages_addr = NULL;
Christian Königfa3ab3c2016-03-18 21:00:35 +01001150 uint32_t gtt_flags, flags;
Christian König99e124f2016-08-16 14:43:17 +02001151 struct ttm_mem_reg *mem;
Christian König3cabaa52016-06-06 10:17:58 +02001152 struct fence *exclusive;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001153 uint64_t addr;
1154 int r;
1155
Christian König99e124f2016-08-16 14:43:17 +02001156 if (clear) {
1157 mem = NULL;
1158 addr = 0;
1159 exclusive = NULL;
1160 } else {
Christian König8358dce2016-03-30 10:50:25 +02001161 struct ttm_dma_tt *ttm;
1162
Christian König99e124f2016-08-16 14:43:17 +02001163 mem = &bo_va->bo->tbo.mem;
Christian Königb7d698d2015-09-07 12:32:09 +02001164 addr = (u64)mem->start << PAGE_SHIFT;
Christian König9ab21462015-11-30 14:19:26 +01001165 switch (mem->mem_type) {
1166 case TTM_PL_TT:
Christian König8358dce2016-03-30 10:50:25 +02001167 ttm = container_of(bo_va->bo->tbo.ttm, struct
1168 ttm_dma_tt, ttm);
1169 pages_addr = ttm->dma_address;
Christian König9ab21462015-11-30 14:19:26 +01001170 break;
1171
1172 case TTM_PL_VRAM:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001173 addr += adev->vm_manager.vram_base_offset;
Christian König9ab21462015-11-30 14:19:26 +01001174 break;
1175
1176 default:
1177 break;
1178 }
Christian König3cabaa52016-06-06 10:17:58 +02001179
1180 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001181 }
1182
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
Christian Königc855e252016-09-05 17:00:57 +02001184 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1185 adev == bo_va->bo->adev) ? flags : 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001186
Christian König7fc11952015-07-30 11:53:42 +02001187 spin_lock(&vm->status_lock);
1188 if (!list_empty(&bo_va->vm_status))
1189 list_splice_init(&bo_va->valids, &bo_va->invalids);
1190 spin_unlock(&vm->status_lock);
1191
1192 list_for_each_entry(mapping, &bo_va->invalids, list) {
Christian König3cabaa52016-06-06 10:17:58 +02001193 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1194 gtt_flags, pages_addr, vm,
Christian König8358dce2016-03-30 10:50:25 +02001195 mapping, flags, addr,
1196 &bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 if (r)
1198 return r;
1199 }
1200
Christian Königd6c10f62015-09-28 12:00:23 +02001201 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1202 list_for_each_entry(mapping, &bo_va->valids, list)
1203 trace_amdgpu_vm_bo_mapping(mapping);
1204
1205 list_for_each_entry(mapping, &bo_va->invalids, list)
1206 trace_amdgpu_vm_bo_mapping(mapping);
1207 }
1208
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001209 spin_lock(&vm->status_lock);
monk.liu6d1d0ef2015-08-14 13:36:41 +08001210 list_splice_init(&bo_va->invalids, &bo_va->valids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001211 list_del_init(&bo_va->vm_status);
Christian König99e124f2016-08-16 14:43:17 +02001212 if (clear)
Christian König7fc11952015-07-30 11:53:42 +02001213 list_add(&bo_va->vm_status, &vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001214 spin_unlock(&vm->status_lock);
1215
1216 return 0;
1217}
1218
1219/**
1220 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1221 *
1222 * @adev: amdgpu_device pointer
1223 * @vm: requested vm
1224 *
1225 * Make sure all freed BOs are cleared in the PT.
1226 * Returns 0 for success.
1227 *
1228 * PTs have to be reserved and mutex must be locked!
1229 */
1230int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1231 struct amdgpu_vm *vm)
1232{
1233 struct amdgpu_bo_va_mapping *mapping;
1234 int r;
1235
1236 while (!list_empty(&vm->freed)) {
1237 mapping = list_first_entry(&vm->freed,
1238 struct amdgpu_bo_va_mapping, list);
1239 list_del(&mapping->list);
Christian Könige17841b2016-03-08 17:52:01 +01001240
Christian König3cabaa52016-06-06 10:17:58 +02001241 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
Christian Königfa3ab3c2016-03-18 21:00:35 +01001242 0, 0, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001243 kfree(mapping);
1244 if (r)
1245 return r;
1246
1247 }
1248 return 0;
1249
1250}
1251
1252/**
1253 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1254 *
1255 * @adev: amdgpu_device pointer
1256 * @vm: requested vm
1257 *
1258 * Make sure all invalidated BOs are cleared in the PT.
1259 * Returns 0 for success.
1260 *
1261 * PTs have to be reserved and mutex must be locked!
1262 */
1263int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +08001264 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001265{
monk.liucfe2c972015-05-26 15:01:54 +08001266 struct amdgpu_bo_va *bo_va = NULL;
Christian König91e1a522015-07-06 22:06:40 +02001267 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001268
1269 spin_lock(&vm->status_lock);
1270 while (!list_empty(&vm->invalidated)) {
1271 bo_va = list_first_entry(&vm->invalidated,
1272 struct amdgpu_bo_va, vm_status);
1273 spin_unlock(&vm->status_lock);
Christian König32b41ac2016-03-08 18:03:27 +01001274
Christian König99e124f2016-08-16 14:43:17 +02001275 r = amdgpu_vm_bo_update(adev, bo_va, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001276 if (r)
1277 return r;
1278
1279 spin_lock(&vm->status_lock);
1280 }
1281 spin_unlock(&vm->status_lock);
1282
monk.liucfe2c972015-05-26 15:01:54 +08001283 if (bo_va)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001284 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
Christian König91e1a522015-07-06 22:06:40 +02001285
1286 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001287}
1288
1289/**
1290 * amdgpu_vm_bo_add - add a bo to a specific vm
1291 *
1292 * @adev: amdgpu_device pointer
1293 * @vm: requested vm
1294 * @bo: amdgpu buffer object
1295 *
Christian König8843dbb2016-01-26 12:17:11 +01001296 * Add @bo into the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001297 * Add @bo to the list of bos associated with the vm
1298 * Returns newly added bo_va or NULL for failure
1299 *
1300 * Object has to be reserved!
1301 */
1302struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1303 struct amdgpu_vm *vm,
1304 struct amdgpu_bo *bo)
1305{
1306 struct amdgpu_bo_va *bo_va;
1307
1308 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1309 if (bo_va == NULL) {
1310 return NULL;
1311 }
1312 bo_va->vm = vm;
1313 bo_va->bo = bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001314 bo_va->ref_count = 1;
1315 INIT_LIST_HEAD(&bo_va->bo_list);
Christian König7fc11952015-07-30 11:53:42 +02001316 INIT_LIST_HEAD(&bo_va->valids);
1317 INIT_LIST_HEAD(&bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001318 INIT_LIST_HEAD(&bo_va->vm_status);
Christian König32b41ac2016-03-08 18:03:27 +01001319
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320 list_add_tail(&bo_va->bo_list, &bo->va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001321
1322 return bo_va;
1323}
1324
1325/**
1326 * amdgpu_vm_bo_map - map bo inside a vm
1327 *
1328 * @adev: amdgpu_device pointer
1329 * @bo_va: bo_va to store the address
1330 * @saddr: where to map the BO
1331 * @offset: requested offset in the BO
1332 * @flags: attributes of pages (read/write/valid/etc.)
1333 *
1334 * Add a mapping of the BO at the specefied addr into the VM.
1335 * Returns 0 for success, error for failure.
1336 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001337 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001338 */
1339int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1340 struct amdgpu_bo_va *bo_va,
1341 uint64_t saddr, uint64_t offset,
1342 uint64_t size, uint32_t flags)
1343{
1344 struct amdgpu_bo_va_mapping *mapping;
1345 struct amdgpu_vm *vm = bo_va->vm;
1346 struct interval_tree_node *it;
1347 unsigned last_pfn, pt_idx;
1348 uint64_t eaddr;
1349 int r;
1350
Christian König0be52de2015-05-18 14:37:27 +02001351 /* validate the parameters */
1352 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
Chunming Zhou49b02b12015-11-13 14:18:38 +08001353 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
Christian König0be52de2015-05-18 14:37:27 +02001354 return -EINVAL;
Christian König0be52de2015-05-18 14:37:27 +02001355
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001356 /* make sure object fit at this offset */
Felix Kuehling005ae952015-11-23 17:43:48 -05001357 eaddr = saddr + size - 1;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001358 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001359 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001360
1361 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
Felix Kuehling005ae952015-11-23 17:43:48 -05001362 if (last_pfn >= adev->vm_manager.max_pfn) {
1363 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001364 last_pfn, adev->vm_manager.max_pfn);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001365 return -EINVAL;
1366 }
1367
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001368 saddr /= AMDGPU_GPU_PAGE_SIZE;
1369 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1370
Felix Kuehling005ae952015-11-23 17:43:48 -05001371 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001372 if (it) {
1373 struct amdgpu_bo_va_mapping *tmp;
1374 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1375 /* bo and tmp overlap, invalid addr */
1376 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1377 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1378 tmp->it.start, tmp->it.last + 1);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001379 r = -EINVAL;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001380 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001381 }
1382
1383 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1384 if (!mapping) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001385 r = -ENOMEM;
Chunming Zhouf48b2652015-10-16 14:06:19 +08001386 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001387 }
1388
1389 INIT_LIST_HEAD(&mapping->list);
1390 mapping->it.start = saddr;
Felix Kuehling005ae952015-11-23 17:43:48 -05001391 mapping->it.last = eaddr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001392 mapping->offset = offset;
1393 mapping->flags = flags;
1394
Christian König7fc11952015-07-30 11:53:42 +02001395 list_add(&mapping->list, &bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001396 interval_tree_insert(&mapping->it, &vm->va);
1397
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001398 /* Make sure the page tables are allocated */
1399 saddr >>= amdgpu_vm_block_size;
1400 eaddr >>= amdgpu_vm_block_size;
1401
1402 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1403
1404 if (eaddr > vm->max_pde_used)
1405 vm->max_pde_used = eaddr;
1406
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001407 /* walk over the address space and allocate the page tables */
1408 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
Christian Königbf60efd2015-09-04 10:47:56 +02001409 struct reservation_object *resv = vm->page_directory->tbo.resv;
Christian Königee1782c2015-12-11 21:01:23 +01001410 struct amdgpu_bo_list_entry *entry;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001411 struct amdgpu_bo *pt;
1412
Christian Königee1782c2015-12-11 21:01:23 +01001413 entry = &vm->page_tables[pt_idx].entry;
1414 if (entry->robj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001415 continue;
1416
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001417 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1418 AMDGPU_GPU_PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001419 AMDGPU_GEM_DOMAIN_VRAM,
Chunming Zhou1baa4392016-08-04 13:59:32 +08001420 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1421 AMDGPU_GEM_CREATE_SHADOW,
Christian Königbf60efd2015-09-04 10:47:56 +02001422 NULL, resv, &pt);
Chunming Zhou49b02b12015-11-13 14:18:38 +08001423 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001424 goto error_free;
Chunming Zhou49b02b12015-11-13 14:18:38 +08001425
Christian König82b9c552015-11-27 16:49:00 +01001426 /* Keep a reference to the page table to avoid freeing
1427 * them up in the wrong order.
1428 */
1429 pt->parent = amdgpu_bo_ref(vm->page_directory);
1430
Christian König2bd9ccf2016-02-01 12:53:58 +01001431 r = amdgpu_vm_clear_bo(adev, vm, pt);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001432 if (r) {
1433 amdgpu_bo_unref(&pt);
1434 goto error_free;
1435 }
1436
Christian Königee1782c2015-12-11 21:01:23 +01001437 entry->robj = pt;
Christian Königee1782c2015-12-11 21:01:23 +01001438 entry->priority = 0;
1439 entry->tv.bo = &entry->robj->tbo;
1440 entry->tv.shared = true;
Christian König2f568db2016-02-23 12:36:59 +01001441 entry->user_pages = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001442 vm->page_tables[pt_idx].addr = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001443 }
1444
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001445 return 0;
1446
1447error_free:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001448 list_del(&mapping->list);
1449 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001450 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001451 kfree(mapping);
1452
Chunming Zhouf48b2652015-10-16 14:06:19 +08001453error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001454 return r;
1455}
1456
1457/**
1458 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1459 *
1460 * @adev: amdgpu_device pointer
1461 * @bo_va: bo_va to remove the address from
1462 * @saddr: where to the BO is mapped
1463 *
1464 * Remove a mapping of the BO at the specefied addr from the VM.
1465 * Returns 0 for success, error for failure.
1466 *
Chunming Zhou49b02b12015-11-13 14:18:38 +08001467 * Object has to be reserved and unreserved outside!
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001468 */
1469int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1470 struct amdgpu_bo_va *bo_va,
1471 uint64_t saddr)
1472{
1473 struct amdgpu_bo_va_mapping *mapping;
1474 struct amdgpu_vm *vm = bo_va->vm;
Christian König7fc11952015-07-30 11:53:42 +02001475 bool valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001476
Christian König6c7fc502015-06-05 20:56:17 +02001477 saddr /= AMDGPU_GPU_PAGE_SIZE;
Christian König32b41ac2016-03-08 18:03:27 +01001478
Christian König7fc11952015-07-30 11:53:42 +02001479 list_for_each_entry(mapping, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001480 if (mapping->it.start == saddr)
1481 break;
1482 }
1483
Christian König7fc11952015-07-30 11:53:42 +02001484 if (&mapping->list == &bo_va->valids) {
1485 valid = false;
1486
1487 list_for_each_entry(mapping, &bo_va->invalids, list) {
1488 if (mapping->it.start == saddr)
1489 break;
1490 }
1491
Christian König32b41ac2016-03-08 18:03:27 +01001492 if (&mapping->list == &bo_va->invalids)
Christian König7fc11952015-07-30 11:53:42 +02001493 return -ENOENT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494 }
Christian König32b41ac2016-03-08 18:03:27 +01001495
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001496 list_del(&mapping->list);
1497 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001498 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001499
Christian Könige17841b2016-03-08 17:52:01 +01001500 if (valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001501 list_add(&mapping->list, &vm->freed);
Christian Könige17841b2016-03-08 17:52:01 +01001502 else
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001503 kfree(mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001504
1505 return 0;
1506}
1507
1508/**
1509 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1510 *
1511 * @adev: amdgpu_device pointer
1512 * @bo_va: requested bo_va
1513 *
Christian König8843dbb2016-01-26 12:17:11 +01001514 * Remove @bo_va->bo from the requested vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001515 *
1516 * Object have to be reserved!
1517 */
1518void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1519 struct amdgpu_bo_va *bo_va)
1520{
1521 struct amdgpu_bo_va_mapping *mapping, *next;
1522 struct amdgpu_vm *vm = bo_va->vm;
1523
1524 list_del(&bo_va->bo_list);
1525
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001526 spin_lock(&vm->status_lock);
1527 list_del(&bo_va->vm_status);
1528 spin_unlock(&vm->status_lock);
1529
Christian König7fc11952015-07-30 11:53:42 +02001530 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001531 list_del(&mapping->list);
1532 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001533 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Christian König7fc11952015-07-30 11:53:42 +02001534 list_add(&mapping->list, &vm->freed);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001535 }
Christian König7fc11952015-07-30 11:53:42 +02001536 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1537 list_del(&mapping->list);
1538 interval_tree_remove(&mapping->it, &vm->va);
1539 kfree(mapping);
1540 }
Christian König32b41ac2016-03-08 18:03:27 +01001541
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001542 fence_put(bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001543 kfree(bo_va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001544}
1545
1546/**
1547 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1548 *
1549 * @adev: amdgpu_device pointer
1550 * @vm: requested vm
1551 * @bo: amdgpu buffer object
1552 *
Christian König8843dbb2016-01-26 12:17:11 +01001553 * Mark @bo as invalid.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001554 */
1555void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1556 struct amdgpu_bo *bo)
1557{
1558 struct amdgpu_bo_va *bo_va;
1559
1560 list_for_each_entry(bo_va, &bo->va, bo_list) {
Christian König7fc11952015-07-30 11:53:42 +02001561 spin_lock(&bo_va->vm->status_lock);
1562 if (list_empty(&bo_va->vm_status))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001563 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001564 spin_unlock(&bo_va->vm->status_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001565 }
1566}
1567
1568/**
1569 * amdgpu_vm_init - initialize a vm instance
1570 *
1571 * @adev: amdgpu_device pointer
1572 * @vm: requested vm
1573 *
Christian König8843dbb2016-01-26 12:17:11 +01001574 * Init @vm fields.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001575 */
1576int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1577{
1578 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1579 AMDGPU_VM_PTE_COUNT * 8);
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001580 unsigned pd_size, pd_entries;
Christian König2d55e452016-02-08 17:37:38 +01001581 unsigned ring_instance;
1582 struct amdgpu_ring *ring;
Christian König2bd9ccf2016-02-01 12:53:58 +01001583 struct amd_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 int i, r;
1585
Christian Königbcb1ba32016-03-08 15:40:11 +01001586 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1587 vm->ids[i] = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001588 vm->va = RB_ROOT;
Chunming Zhou031e2982016-04-25 10:19:13 +08001589 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001590 spin_lock_init(&vm->status_lock);
1591 INIT_LIST_HEAD(&vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001592 INIT_LIST_HEAD(&vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001593 INIT_LIST_HEAD(&vm->freed);
Christian König20250212016-03-08 17:58:35 +01001594
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001595 pd_size = amdgpu_vm_directory_size(adev);
1596 pd_entries = amdgpu_vm_num_pdes(adev);
1597
1598 /* allocate page table array */
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001599 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001600 if (vm->page_tables == NULL) {
1601 DRM_ERROR("Cannot allocate memory for page table array\n");
1602 return -ENOMEM;
1603 }
1604
Christian König2bd9ccf2016-02-01 12:53:58 +01001605 /* create scheduler entity for page table updates */
Christian König2d55e452016-02-08 17:37:38 +01001606
1607 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1608 ring_instance %= adev->vm_manager.vm_pte_num_rings;
1609 ring = adev->vm_manager.vm_pte_rings[ring_instance];
Christian König2bd9ccf2016-02-01 12:53:58 +01001610 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1611 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1612 rq, amdgpu_sched_jobs);
1613 if (r)
1614 return r;
1615
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001616 vm->page_directory_fence = NULL;
1617
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001618 r = amdgpu_bo_create(adev, pd_size, align, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001619 AMDGPU_GEM_DOMAIN_VRAM,
Chunming Zhou1baa4392016-08-04 13:59:32 +08001620 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1621 AMDGPU_GEM_CREATE_SHADOW,
Christian König72d76682015-09-03 17:34:59 +02001622 NULL, NULL, &vm->page_directory);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001623 if (r)
Christian König2bd9ccf2016-02-01 12:53:58 +01001624 goto error_free_sched_entity;
1625
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001626 r = amdgpu_bo_reserve(vm->page_directory, false);
Christian König2bd9ccf2016-02-01 12:53:58 +01001627 if (r)
1628 goto error_free_page_directory;
1629
1630 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
Chunming Zhouef9f0a82015-11-13 13:43:22 +08001631 amdgpu_bo_unreserve(vm->page_directory);
Christian König2bd9ccf2016-02-01 12:53:58 +01001632 if (r)
1633 goto error_free_page_directory;
Christian König5a712a82016-06-21 16:28:15 +02001634 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001635
1636 return 0;
Christian König2bd9ccf2016-02-01 12:53:58 +01001637
1638error_free_page_directory:
1639 amdgpu_bo_unref(&vm->page_directory);
1640 vm->page_directory = NULL;
1641
1642error_free_sched_entity:
1643 amd_sched_entity_fini(&ring->sched, &vm->entity);
1644
1645 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001646}
1647
1648/**
1649 * amdgpu_vm_fini - tear down a vm instance
1650 *
1651 * @adev: amdgpu_device pointer
1652 * @vm: requested vm
1653 *
Christian König8843dbb2016-01-26 12:17:11 +01001654 * Tear down @vm.
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001655 * Unbind the VM and remove all bos from the vm bo list
1656 */
1657void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1658{
1659 struct amdgpu_bo_va_mapping *mapping, *tmp;
1660 int i;
1661
Christian König2d55e452016-02-08 17:37:38 +01001662 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
Christian König2bd9ccf2016-02-01 12:53:58 +01001663
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001664 if (!RB_EMPTY_ROOT(&vm->va)) {
1665 dev_err(adev->dev, "still active bo inside vm\n");
1666 }
1667 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1668 list_del(&mapping->list);
1669 interval_tree_remove(&mapping->it, &vm->va);
1670 kfree(mapping);
1671 }
1672 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1673 list_del(&mapping->list);
1674 kfree(mapping);
1675 }
1676
Chunming Zhou1baa4392016-08-04 13:59:32 +08001677 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1678 if (vm->page_tables[i].entry.robj &&
1679 vm->page_tables[i].entry.robj->shadow)
1680 amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
Christian Königee1782c2015-12-11 21:01:23 +01001681 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
Chunming Zhou1baa4392016-08-04 13:59:32 +08001682 }
Michel Dänzer9571e1d2016-01-19 17:59:46 +09001683 drm_free_large(vm->page_tables);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001684
Chunming Zhou1baa4392016-08-04 13:59:32 +08001685 if (vm->page_directory->shadow)
1686 amdgpu_bo_unref(&vm->page_directory->shadow);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001687 amdgpu_bo_unref(&vm->page_directory);
Bas Nieuwenhuizen05906de2015-08-14 20:08:40 +02001688 fence_put(vm->page_directory_fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001689}
Christian Königea89f8c2015-11-15 20:52:06 +01001690
1691/**
Christian Königa9a78b32016-01-21 10:19:11 +01001692 * amdgpu_vm_manager_init - init the VM manager
1693 *
1694 * @adev: amdgpu_device pointer
1695 *
1696 * Initialize the VM manager structures
1697 */
1698void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1699{
1700 unsigned i;
1701
1702 INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1703
1704 /* skip over VMID 0, since it is the system VM */
Christian König971fe9a92016-03-01 15:09:25 +01001705 for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1706 amdgpu_vm_reset_id(adev, i);
Christian König832a9022016-02-15 12:33:02 +01001707 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
Christian Königa9a78b32016-01-21 10:19:11 +01001708 list_add_tail(&adev->vm_manager.ids[i].list,
1709 &adev->vm_manager.ids_lru);
Christian König971fe9a92016-03-01 15:09:25 +01001710 }
Christian König2d55e452016-02-08 17:37:38 +01001711
Christian König1fbb2e92016-06-01 10:47:36 +02001712 adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1713 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1714 adev->vm_manager.seqno[i] = 0;
1715
Christian König2d55e452016-02-08 17:37:38 +01001716 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
Christian Königb1c8a812016-05-04 10:34:03 +02001717 atomic64_set(&adev->vm_manager.client_counter, 0);
Christian Königa9a78b32016-01-21 10:19:11 +01001718}
1719
1720/**
Christian Königea89f8c2015-11-15 20:52:06 +01001721 * amdgpu_vm_manager_fini - cleanup VM manager
1722 *
1723 * @adev: amdgpu_device pointer
1724 *
1725 * Cleanup the VM manager and free resources.
1726 */
1727void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1728{
1729 unsigned i;
1730
Christian Königbcb1ba32016-03-08 15:40:11 +01001731 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1732 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1733
Christian König832a9022016-02-15 12:33:02 +01001734 fence_put(adev->vm_manager.ids[i].first);
1735 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
Christian Königbcb1ba32016-03-08 15:40:11 +01001736 fence_put(id->flushed_updates);
1737 }
Christian Königea89f8c2015-11-15 20:52:06 +01001738}