blob: 44b6918a87baacfc6cbdda29ab35791a8af791ea [file] [log] [blame]
Christian König2280ab52014-02-20 10:25:15 +01001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31#include "radeon_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
63}
64
65/**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73{
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75}
76
77/**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85int radeon_vm_manager_init(struct radeon_device *rdev)
86{
Christian König2280ab52014-02-20 10:25:15 +010087 int r;
Christian König2280ab52014-02-20 10:25:15 +010088
89 if (!rdev->vm_manager.enabled) {
Christian König2280ab52014-02-20 10:25:15 +010090 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
Christian König2280ab52014-02-20 10:25:15 +010095 }
96 return 0;
97}
98
99/**
Christian König2280ab52014-02-20 10:25:15 +0100100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106void radeon_vm_manager_fini(struct radeon_device *rdev)
107{
Christian König2280ab52014-02-20 10:25:15 +0100108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
113 mutex_lock(&rdev->vm_manager.lock);
Christian König6d2f2942014-02-20 13:42:17 +0100114 for (i = 0; i < RADEON_NUM_VM; ++i)
Christian König2280ab52014-02-20 10:25:15 +0100115 radeon_fence_unref(&rdev->vm_manager.active[i]);
Christian König2280ab52014-02-20 10:25:15 +0100116 radeon_asic_vm_fini(rdev);
Christian König2280ab52014-02-20 10:25:15 +0100117 rdev->vm_manager.enabled = false;
Christian König6d2f2942014-02-20 13:42:17 +0100118 mutex_unlock(&rdev->vm_manager.lock);
Christian König2280ab52014-02-20 10:25:15 +0100119}
120
121/**
Christian König6d2f2942014-02-20 13:42:17 +0100122 * radeon_vm_get_bos - add the vm BOs to a validation list
Christian König2280ab52014-02-20 10:25:15 +0100123 *
Christian König6d2f2942014-02-20 13:42:17 +0100124 * @vm: vm providing the BOs
125 * @head: head of validation list
Christian König2280ab52014-02-20 10:25:15 +0100126 *
Christian König6d2f2942014-02-20 13:42:17 +0100127 * Add the page directory to the list of BOs to
128 * validate for command submission (cayman+).
Christian König2280ab52014-02-20 10:25:15 +0100129 */
Christian König6d2f2942014-02-20 13:42:17 +0100130struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
131 struct radeon_vm *vm,
132 struct list_head *head)
Christian König2280ab52014-02-20 10:25:15 +0100133{
Christian König6d2f2942014-02-20 13:42:17 +0100134 struct radeon_bo_list *list;
135 unsigned i, idx, size;
Christian König2280ab52014-02-20 10:25:15 +0100136
Christian König6d2f2942014-02-20 13:42:17 +0100137 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_bo_list);
138 list = kmalloc(size, GFP_KERNEL);
139 if (!list)
140 return NULL;
Christian König2280ab52014-02-20 10:25:15 +0100141
Christian König6d2f2942014-02-20 13:42:17 +0100142 /* add the vm page table to the list */
143 list[0].bo = vm->page_directory;
144 list[0].domain = RADEON_GEM_DOMAIN_VRAM;
145 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
146 list[0].tv.bo = &vm->page_directory->tbo;
147 list_add(&list[0].tv.head, head);
Christian König2280ab52014-02-20 10:25:15 +0100148
Christian König6d2f2942014-02-20 13:42:17 +0100149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
150 if (!vm->page_tables[i].bo)
151 continue;
Christian König2280ab52014-02-20 10:25:15 +0100152
Christian König6d2f2942014-02-20 13:42:17 +0100153 list[idx].bo = vm->page_tables[i].bo;
154 list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
155 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
156 list[idx].tv.bo = &list[idx].bo->tbo;
157 list_add(&list[idx++].tv.head, head);
Christian König2280ab52014-02-20 10:25:15 +0100158 }
159
Christian König6d2f2942014-02-20 13:42:17 +0100160 return list;
Christian König2280ab52014-02-20 10:25:15 +0100161}
162
163/**
164 * radeon_vm_grab_id - allocate the next free VMID
165 *
166 * @rdev: radeon_device pointer
167 * @vm: vm to allocate id for
168 * @ring: ring we want to submit job to
169 *
170 * Allocate an id for the vm (cayman+).
171 * Returns the fence we need to sync to (if any).
172 *
173 * Global and local mutex must be locked!
174 */
175struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
176 struct radeon_vm *vm, int ring)
177{
178 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
179 unsigned choices[2] = {};
180 unsigned i;
181
182 /* check if the id is still valid */
183 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
184 return NULL;
185
186 /* we definately need to flush */
187 radeon_fence_unref(&vm->last_flush);
188
189 /* skip over VMID 0, since it is the system VM */
190 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
191 struct radeon_fence *fence = rdev->vm_manager.active[i];
192
193 if (fence == NULL) {
194 /* found a free one */
195 vm->id = i;
196 trace_radeon_vm_grab_id(vm->id, ring);
197 return NULL;
198 }
199
200 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
201 best[fence->ring] = fence;
202 choices[fence->ring == ring ? 0 : 1] = i;
203 }
204 }
205
206 for (i = 0; i < 2; ++i) {
207 if (choices[i]) {
208 vm->id = choices[i];
209 trace_radeon_vm_grab_id(vm->id, ring);
210 return rdev->vm_manager.active[choices[i]];
211 }
212 }
213
214 /* should never happen */
215 BUG();
216 return NULL;
217}
218
219/**
Christian Königfa688342014-02-20 10:47:05 +0100220 * radeon_vm_flush - hardware flush the vm
221 *
222 * @rdev: radeon_device pointer
223 * @vm: vm we want to flush
224 * @ring: ring to use for flush
225 *
226 * Flush the vm (cayman+).
227 *
228 * Global and local mutex must be locked!
229 */
230void radeon_vm_flush(struct radeon_device *rdev,
231 struct radeon_vm *vm,
232 int ring)
233{
Christian König6d2f2942014-02-20 13:42:17 +0100234 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
235
Christian Königfa688342014-02-20 10:47:05 +0100236 /* if we can't remember our last VM flush then flush now! */
237 /* XXX figure out why we have to flush all the time */
Christian König6d2f2942014-02-20 13:42:17 +0100238 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
239 vm->pd_gpu_addr = pd_addr;
Christian Königfa688342014-02-20 10:47:05 +0100240 radeon_ring_vm_flush(rdev, ring, vm);
Christian König6d2f2942014-02-20 13:42:17 +0100241 }
Christian Königfa688342014-02-20 10:47:05 +0100242}
243
244/**
Christian König2280ab52014-02-20 10:25:15 +0100245 * radeon_vm_fence - remember fence for vm
246 *
247 * @rdev: radeon_device pointer
248 * @vm: vm we want to fence
249 * @fence: fence to remember
250 *
251 * Fence the vm (cayman+).
252 * Set the fence used to protect page table and id.
253 *
254 * Global and local mutex must be locked!
255 */
256void radeon_vm_fence(struct radeon_device *rdev,
257 struct radeon_vm *vm,
258 struct radeon_fence *fence)
259{
Christian König2280ab52014-02-20 10:25:15 +0100260 radeon_fence_unref(&vm->fence);
261 vm->fence = radeon_fence_ref(fence);
262
Christian Königfa688342014-02-20 10:47:05 +0100263 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
264 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
265
Christian König2280ab52014-02-20 10:25:15 +0100266 radeon_fence_unref(&vm->last_id_use);
267 vm->last_id_use = radeon_fence_ref(fence);
Christian Königfa688342014-02-20 10:47:05 +0100268
269 /* we just flushed the VM, remember that */
270 if (!vm->last_flush)
271 vm->last_flush = radeon_fence_ref(fence);
Christian König2280ab52014-02-20 10:25:15 +0100272}
273
274/**
275 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
276 *
277 * @vm: requested vm
278 * @bo: requested buffer object
279 *
280 * Find @bo inside the requested vm (cayman+).
281 * Search inside the @bos vm list for the requested vm
282 * Returns the found bo_va or NULL if none is found
283 *
284 * Object has to be reserved!
285 */
286struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
287 struct radeon_bo *bo)
288{
289 struct radeon_bo_va *bo_va;
290
291 list_for_each_entry(bo_va, &bo->va, bo_list) {
292 if (bo_va->vm == vm) {
293 return bo_va;
294 }
295 }
296 return NULL;
297}
298
299/**
300 * radeon_vm_bo_add - add a bo to a specific vm
301 *
302 * @rdev: radeon_device pointer
303 * @vm: requested vm
304 * @bo: radeon buffer object
305 *
306 * Add @bo into the requested vm (cayman+).
307 * Add @bo to the list of bos associated with the vm
308 * Returns newly added bo_va or NULL for failure
309 *
310 * Object has to be reserved!
311 */
312struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
313 struct radeon_vm *vm,
314 struct radeon_bo *bo)
315{
316 struct radeon_bo_va *bo_va;
317
318 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
319 if (bo_va == NULL) {
320 return NULL;
321 }
322 bo_va->vm = vm;
323 bo_va->bo = bo;
324 bo_va->soffset = 0;
325 bo_va->eoffset = 0;
326 bo_va->flags = 0;
327 bo_va->valid = false;
328 bo_va->ref_count = 1;
329 INIT_LIST_HEAD(&bo_va->bo_list);
330 INIT_LIST_HEAD(&bo_va->vm_list);
331
332 mutex_lock(&vm->mutex);
333 list_add(&bo_va->vm_list, &vm->va);
334 list_add_tail(&bo_va->bo_list, &bo->va);
335 mutex_unlock(&vm->mutex);
336
337 return bo_va;
338}
339
340/**
Christian König6d2f2942014-02-20 13:42:17 +0100341 * radeon_vm_clear_bo - initially clear the page dir/table
342 *
343 * @rdev: radeon_device pointer
344 * @bo: bo to clear
345 */
346static int radeon_vm_clear_bo(struct radeon_device *rdev,
347 struct radeon_bo *bo)
348{
349 struct ttm_validate_buffer tv;
350 struct ww_acquire_ctx ticket;
351 struct list_head head;
352 struct radeon_ib ib;
353 unsigned entries;
354 uint64_t addr;
355 int r;
356
357 memset(&tv, 0, sizeof(tv));
358 tv.bo = &bo->tbo;
359
360 INIT_LIST_HEAD(&head);
361 list_add(&tv.head, &head);
362
363 r = ttm_eu_reserve_buffers(&ticket, &head);
364 if (r)
365 return r;
366
367 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
368 if (r)
369 goto error;
370
371 addr = radeon_bo_gpu_offset(bo);
372 entries = radeon_bo_size(bo) / 8;
373
374 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
375 NULL, entries * 2 + 64);
376 if (r)
377 goto error;
378
379 ib.length_dw = 0;
380
381 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
382
383 r = radeon_ib_schedule(rdev, &ib, NULL);
384 if (r)
385 goto error;
386
387 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
388 radeon_ib_free(rdev, &ib);
389
390 return 0;
391
392error:
393 ttm_eu_backoff_reservation(&ticket, &head);
394 return r;
395}
396
397/**
Christian König2280ab52014-02-20 10:25:15 +0100398 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
399 *
400 * @rdev: radeon_device pointer
401 * @bo_va: bo_va to store the address
402 * @soffset: requested offset of the buffer in the VM address space
403 * @flags: attributes of pages (read/write/valid/etc.)
404 *
405 * Set offset of @bo_va (cayman+).
406 * Validate and set the offset requested within the vm address space.
407 * Returns 0 for success, error for failure.
408 *
409 * Object has to be reserved!
410 */
411int radeon_vm_bo_set_addr(struct radeon_device *rdev,
412 struct radeon_bo_va *bo_va,
413 uint64_t soffset,
414 uint32_t flags)
415{
416 uint64_t size = radeon_bo_size(bo_va->bo);
417 uint64_t eoffset, last_offset = 0;
418 struct radeon_vm *vm = bo_va->vm;
419 struct radeon_bo_va *tmp;
420 struct list_head *head;
Christian König6d2f2942014-02-20 13:42:17 +0100421 unsigned last_pfn, pt_idx;
422 int r;
Christian König2280ab52014-02-20 10:25:15 +0100423
424 if (soffset) {
425 /* make sure object fit at this offset */
426 eoffset = soffset + size;
427 if (soffset >= eoffset) {
428 return -EINVAL;
429 }
430
431 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
432 if (last_pfn > rdev->vm_manager.max_pfn) {
433 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
434 last_pfn, rdev->vm_manager.max_pfn);
435 return -EINVAL;
436 }
437
438 } else {
439 eoffset = last_pfn = 0;
440 }
441
442 mutex_lock(&vm->mutex);
443 head = &vm->va;
444 last_offset = 0;
445 list_for_each_entry(tmp, &vm->va, vm_list) {
446 if (bo_va == tmp) {
447 /* skip over currently modified bo */
448 continue;
449 }
450
451 if (soffset >= last_offset && eoffset <= tmp->soffset) {
452 /* bo can be added before this one */
453 break;
454 }
455 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
456 /* bo and tmp overlap, invalid offset */
457 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
458 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
459 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
460 mutex_unlock(&vm->mutex);
461 return -EINVAL;
462 }
463 last_offset = tmp->eoffset;
464 head = &tmp->vm_list;
465 }
466
467 bo_va->soffset = soffset;
468 bo_va->eoffset = eoffset;
469 bo_va->flags = flags;
470 bo_va->valid = false;
471 list_move(&bo_va->vm_list, head);
472
Christian König6d2f2942014-02-20 13:42:17 +0100473 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
474 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
475
476 if (eoffset > vm->max_pde_used)
477 vm->max_pde_used = eoffset;
478
479 radeon_bo_unreserve(bo_va->bo);
480
481 /* walk over the address space and allocate the page tables */
482 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
483 struct radeon_bo *pt;
484
485 if (vm->page_tables[pt_idx].bo)
486 continue;
487
488 /* drop mutex to allocate and clear page table */
489 mutex_unlock(&vm->mutex);
490
491 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
492 RADEON_GPU_PAGE_SIZE, false,
493 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
494 if (r)
495 return r;
496
497 r = radeon_vm_clear_bo(rdev, pt);
498 if (r) {
499 radeon_bo_unref(&pt);
500 radeon_bo_reserve(bo_va->bo, false);
501 return r;
502 }
503
504 /* aquire mutex again */
505 mutex_lock(&vm->mutex);
506 if (vm->page_tables[pt_idx].bo) {
507 /* someone else allocated the pt in the meantime */
508 mutex_unlock(&vm->mutex);
509 radeon_bo_unref(&pt);
510 mutex_lock(&vm->mutex);
511 continue;
512 }
513
514 vm->page_tables[pt_idx].addr = 0;
515 vm->page_tables[pt_idx].bo = pt;
516 }
517
Christian König2280ab52014-02-20 10:25:15 +0100518 mutex_unlock(&vm->mutex);
Christian König6d2f2942014-02-20 13:42:17 +0100519 return radeon_bo_reserve(bo_va->bo, false);
Christian König2280ab52014-02-20 10:25:15 +0100520}
521
522/**
523 * radeon_vm_map_gart - get the physical address of a gart page
524 *
525 * @rdev: radeon_device pointer
526 * @addr: the unmapped addr
527 *
528 * Look up the physical address of the page that the pte resolves
529 * to (cayman+).
530 * Returns the physical address of the page.
531 */
532uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
533{
534 uint64_t result;
535
536 /* page table offset */
537 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
538
539 /* in case cpu page size != gpu page size*/
540 result |= addr & (~PAGE_MASK);
541
542 return result;
543}
544
545/**
546 * radeon_vm_page_flags - translate page flags to what the hw uses
547 *
548 * @flags: flags comming from userspace
549 *
550 * Translate the flags the userspace ABI uses to hw flags.
551 */
552static uint32_t radeon_vm_page_flags(uint32_t flags)
553{
554 uint32_t hw_flags = 0;
555 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
556 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
557 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
558 if (flags & RADEON_VM_PAGE_SYSTEM) {
559 hw_flags |= R600_PTE_SYSTEM;
560 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
561 }
562 return hw_flags;
563}
564
565/**
566 * radeon_vm_update_pdes - make sure that page directory is valid
567 *
568 * @rdev: radeon_device pointer
569 * @vm: requested vm
570 * @start: start of GPU address range
571 * @end: end of GPU address range
572 *
573 * Allocates new page tables if necessary
574 * and updates the page directory (cayman+).
575 * Returns 0 for success, error for failure.
576 *
577 * Global and local mutex must be locked!
578 */
Christian König6d2f2942014-02-20 13:42:17 +0100579int radeon_vm_update_page_directory(struct radeon_device *rdev,
580 struct radeon_vm *vm)
Christian König2280ab52014-02-20 10:25:15 +0100581{
582 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
583
Christian König6d2f2942014-02-20 13:42:17 +0100584 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
Christian König2280ab52014-02-20 10:25:15 +0100585 uint64_t last_pde = ~0, last_pt = ~0;
Christian König6d2f2942014-02-20 13:42:17 +0100586 unsigned count = 0, pt_idx, ndw;
587 struct radeon_ib ib;
Christian König2280ab52014-02-20 10:25:15 +0100588 int r;
589
Christian König6d2f2942014-02-20 13:42:17 +0100590 /* padding, etc. */
591 ndw = 64;
592
593 /* assume the worst case */
594 ndw += vm->max_pde_used * 12;
595
596 /* update too big for an IB */
597 if (ndw > 0xfffff)
598 return -ENOMEM;
599
600 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
601 if (r)
602 return r;
603 ib.length_dw = 0;
Christian König2280ab52014-02-20 10:25:15 +0100604
605 /* walk over the address space and update the page directory */
Christian König6d2f2942014-02-20 13:42:17 +0100606 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
607 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
Christian König2280ab52014-02-20 10:25:15 +0100608 uint64_t pde, pt;
609
Christian König6d2f2942014-02-20 13:42:17 +0100610 if (bo == NULL)
Christian König2280ab52014-02-20 10:25:15 +0100611 continue;
612
Christian König6d2f2942014-02-20 13:42:17 +0100613 pt = radeon_bo_gpu_offset(bo);
614 if (vm->page_tables[pt_idx].addr == pt)
615 continue;
616 vm->page_tables[pt_idx].addr = pt;
Christian König2280ab52014-02-20 10:25:15 +0100617
Christian König6d2f2942014-02-20 13:42:17 +0100618 pde = pd_addr + pt_idx * 8;
Christian König2280ab52014-02-20 10:25:15 +0100619 if (((last_pde + 8 * count) != pde) ||
620 ((last_pt + incr * count) != pt)) {
621
622 if (count) {
Christian König6d2f2942014-02-20 13:42:17 +0100623 radeon_asic_vm_set_page(rdev, &ib, last_pde,
Christian König2280ab52014-02-20 10:25:15 +0100624 last_pt, count, incr,
625 R600_PTE_VALID);
Christian König2280ab52014-02-20 10:25:15 +0100626 }
627
628 count = 1;
629 last_pde = pde;
630 last_pt = pt;
631 } else {
632 ++count;
633 }
634 }
635
Christian König6d2f2942014-02-20 13:42:17 +0100636 if (count)
637 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
Christian König2280ab52014-02-20 10:25:15 +0100638 incr, R600_PTE_VALID);
639
Christian König6d2f2942014-02-20 13:42:17 +0100640 if (ib.length_dw != 0) {
641 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
642 r = radeon_ib_schedule(rdev, &ib, NULL);
643 if (r) {
644 radeon_ib_free(rdev, &ib);
645 return r;
646 }
647 radeon_fence_unref(&vm->fence);
648 vm->fence = radeon_fence_ref(ib.fence);
649 radeon_fence_unref(&vm->last_flush);
Christian König2280ab52014-02-20 10:25:15 +0100650 }
Christian König6d2f2942014-02-20 13:42:17 +0100651 radeon_ib_free(rdev, &ib);
Christian König2280ab52014-02-20 10:25:15 +0100652
653 return 0;
654}
655
656/**
657 * radeon_vm_update_ptes - make sure that page tables are valid
658 *
659 * @rdev: radeon_device pointer
660 * @vm: requested vm
661 * @start: start of GPU address range
662 * @end: end of GPU address range
663 * @dst: destination address to map to
664 * @flags: mapping flags
665 *
666 * Update the page tables in the range @start - @end (cayman+).
667 *
668 * Global and local mutex must be locked!
669 */
670static void radeon_vm_update_ptes(struct radeon_device *rdev,
671 struct radeon_vm *vm,
672 struct radeon_ib *ib,
673 uint64_t start, uint64_t end,
674 uint64_t dst, uint32_t flags)
675{
676 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
677
678 uint64_t last_pte = ~0, last_dst = ~0;
679 unsigned count = 0;
680 uint64_t addr;
681
682 start = start / RADEON_GPU_PAGE_SIZE;
683 end = end / RADEON_GPU_PAGE_SIZE;
684
685 /* walk over the address space and update the page tables */
686 for (addr = start; addr < end; ) {
687 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
688 unsigned nptes;
689 uint64_t pte;
690
691 if ((addr & ~mask) == (end & ~mask))
692 nptes = end - addr;
693 else
694 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
695
Christian König6d2f2942014-02-20 13:42:17 +0100696 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
Christian König2280ab52014-02-20 10:25:15 +0100697 pte += (addr & mask) * 8;
698
699 if ((last_pte + 8 * count) != pte) {
700
701 if (count) {
702 radeon_asic_vm_set_page(rdev, ib, last_pte,
703 last_dst, count,
704 RADEON_GPU_PAGE_SIZE,
705 flags);
706 }
707
708 count = nptes;
709 last_pte = pte;
710 last_dst = dst;
711 } else {
712 count += nptes;
713 }
714
715 addr += nptes;
716 dst += nptes * RADEON_GPU_PAGE_SIZE;
717 }
718
719 if (count) {
720 radeon_asic_vm_set_page(rdev, ib, last_pte,
721 last_dst, count,
722 RADEON_GPU_PAGE_SIZE, flags);
723 }
724}
725
726/**
727 * radeon_vm_bo_update - map a bo into the vm page table
728 *
729 * @rdev: radeon_device pointer
730 * @vm: requested vm
731 * @bo: radeon buffer object
732 * @mem: ttm mem
733 *
734 * Fill in the page table entries for @bo (cayman+).
735 * Returns 0 for success, -EINVAL for failure.
736 *
737 * Object have to be reserved & global and local mutex must be locked!
738 */
739int radeon_vm_bo_update(struct radeon_device *rdev,
740 struct radeon_vm *vm,
741 struct radeon_bo *bo,
742 struct ttm_mem_reg *mem)
743{
744 struct radeon_ib ib;
745 struct radeon_bo_va *bo_va;
Christian König6d2f2942014-02-20 13:42:17 +0100746 unsigned nptes, ndw;
Christian König2280ab52014-02-20 10:25:15 +0100747 uint64_t addr;
748 int r;
749
Christian König2280ab52014-02-20 10:25:15 +0100750 bo_va = radeon_vm_bo_find(vm, bo);
751 if (bo_va == NULL) {
752 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
753 return -EINVAL;
754 }
755
756 if (!bo_va->soffset) {
757 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
758 bo, vm);
759 return -EINVAL;
760 }
761
762 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
763 return 0;
764
765 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
766 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
767 if (mem) {
768 addr = mem->start << PAGE_SHIFT;
769 if (mem->mem_type != TTM_PL_SYSTEM) {
770 bo_va->flags |= RADEON_VM_PAGE_VALID;
771 bo_va->valid = true;
772 }
773 if (mem->mem_type == TTM_PL_TT) {
774 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
775 } else {
776 addr += rdev->vm_manager.vram_base_offset;
777 }
778 } else {
779 addr = 0;
780 bo_va->valid = false;
781 }
782
783 trace_radeon_vm_bo_update(bo_va);
784
785 nptes = radeon_bo_ngpu_pages(bo);
786
Christian König2280ab52014-02-20 10:25:15 +0100787 /* padding, etc. */
788 ndw = 64;
789
790 if (RADEON_VM_BLOCK_SIZE > 11)
791 /* reserve space for one header for every 2k dwords */
792 ndw += (nptes >> 11) * 4;
793 else
794 /* reserve space for one header for
795 every (1 << BLOCK_SIZE) entries */
796 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
797
798 /* reserve space for pte addresses */
799 ndw += nptes * 2;
800
Christian König2280ab52014-02-20 10:25:15 +0100801 /* update too big for an IB */
802 if (ndw > 0xfffff)
803 return -ENOMEM;
804
805 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
806 if (r)
807 return r;
808 ib.length_dw = 0;
809
Christian König2280ab52014-02-20 10:25:15 +0100810 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
811 addr, radeon_vm_page_flags(bo_va->flags));
812
813 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
814 r = radeon_ib_schedule(rdev, &ib, NULL);
815 if (r) {
816 radeon_ib_free(rdev, &ib);
817 return r;
818 }
819 radeon_fence_unref(&vm->fence);
820 vm->fence = radeon_fence_ref(ib.fence);
821 radeon_ib_free(rdev, &ib);
822 radeon_fence_unref(&vm->last_flush);
823
824 return 0;
825}
826
827/**
828 * radeon_vm_bo_rmv - remove a bo to a specific vm
829 *
830 * @rdev: radeon_device pointer
831 * @bo_va: requested bo_va
832 *
833 * Remove @bo_va->bo from the requested vm (cayman+).
834 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
835 * remove the ptes for @bo_va in the page table.
836 * Returns 0 for success.
837 *
838 * Object have to be reserved!
839 */
840int radeon_vm_bo_rmv(struct radeon_device *rdev,
841 struct radeon_bo_va *bo_va)
842{
843 int r = 0;
844
845 mutex_lock(&rdev->vm_manager.lock);
846 mutex_lock(&bo_va->vm->mutex);
847 if (bo_va->soffset) {
848 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
849 }
850 mutex_unlock(&rdev->vm_manager.lock);
851 list_del(&bo_va->vm_list);
852 mutex_unlock(&bo_va->vm->mutex);
853 list_del(&bo_va->bo_list);
854
855 kfree(bo_va);
856 return r;
857}
858
859/**
860 * radeon_vm_bo_invalidate - mark the bo as invalid
861 *
862 * @rdev: radeon_device pointer
863 * @vm: requested vm
864 * @bo: radeon buffer object
865 *
866 * Mark @bo as invalid (cayman+).
867 */
868void radeon_vm_bo_invalidate(struct radeon_device *rdev,
869 struct radeon_bo *bo)
870{
871 struct radeon_bo_va *bo_va;
872
873 list_for_each_entry(bo_va, &bo->va, bo_list) {
874 bo_va->valid = false;
875 }
876}
877
878/**
879 * radeon_vm_init - initialize a vm instance
880 *
881 * @rdev: radeon_device pointer
882 * @vm: requested vm
883 *
884 * Init @vm fields (cayman+).
885 */
Christian König6d2f2942014-02-20 13:42:17 +0100886int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
Christian König2280ab52014-02-20 10:25:15 +0100887{
Christian König6d2f2942014-02-20 13:42:17 +0100888 unsigned pd_size, pd_entries, pts_size;
889 int r;
890
Christian König2280ab52014-02-20 10:25:15 +0100891 vm->id = 0;
892 vm->fence = NULL;
893 vm->last_flush = NULL;
894 vm->last_id_use = NULL;
895 mutex_init(&vm->mutex);
Christian König2280ab52014-02-20 10:25:15 +0100896 INIT_LIST_HEAD(&vm->va);
Christian König6d2f2942014-02-20 13:42:17 +0100897
898 pd_size = radeon_vm_directory_size(rdev);
899 pd_entries = radeon_vm_num_pdes(rdev);
900
901 /* allocate page table array */
902 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
903 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
904 if (vm->page_tables == NULL) {
905 DRM_ERROR("Cannot allocate memory for page table array\n");
906 return -ENOMEM;
907 }
908
909 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
910 RADEON_GEM_DOMAIN_VRAM, NULL,
911 &vm->page_directory);
912 if (r)
913 return r;
914
915 r = radeon_vm_clear_bo(rdev, vm->page_directory);
916 if (r) {
917 radeon_bo_unref(&vm->page_directory);
918 vm->page_directory = NULL;
919 return r;
920 }
921
922 return 0;
Christian König2280ab52014-02-20 10:25:15 +0100923}
924
925/**
926 * radeon_vm_fini - tear down a vm instance
927 *
928 * @rdev: radeon_device pointer
929 * @vm: requested vm
930 *
931 * Tear down @vm (cayman+).
932 * Unbind the VM and remove all bos from the vm bo list
933 */
934void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
935{
936 struct radeon_bo_va *bo_va, *tmp;
Christian König6d2f2942014-02-20 13:42:17 +0100937 int i, r;
Christian König2280ab52014-02-20 10:25:15 +0100938
939 if (!list_empty(&vm->va)) {
940 dev_err(rdev->dev, "still active bo inside vm\n");
941 }
942 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
943 list_del_init(&bo_va->vm_list);
944 r = radeon_bo_reserve(bo_va->bo, false);
945 if (!r) {
946 list_del_init(&bo_va->bo_list);
947 radeon_bo_unreserve(bo_va->bo);
948 kfree(bo_va);
949 }
950 }
Christian König6d2f2942014-02-20 13:42:17 +0100951
952
953 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
954 radeon_bo_unref(&vm->page_tables[i].bo);
955 kfree(vm->page_tables);
956
957 radeon_bo_unref(&vm->page_directory);
958
Christian König2280ab52014-02-20 10:25:15 +0100959 radeon_fence_unref(&vm->fence);
960 radeon_fence_unref(&vm->last_flush);
961 radeon_fence_unref(&vm->last_id_use);
Christian König6d2f2942014-02-20 13:42:17 +0100962
963 mutex_destroy(&vm->mutex);
Christian König2280ab52014-02-20 10:25:15 +0100964}