blob: ee11c5073726e59a8d89fb4125325352d4499655 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31#include "radeon_reg.h"
32
33/*
34 * Common GART table functions.
35 */
36int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37{
38 void *ptr;
39
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
42 if (ptr == NULL) {
43 return -ENOMEM;
44 }
45#ifdef CONFIG_X86
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
50 }
51#endif
Jerome Glissec9a1be92011-11-03 11:16:49 -040052 rdev->gart.ptr = ptr;
53 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020054 return 0;
55}
56
57void radeon_gart_table_ram_free(struct radeon_device *rdev)
58{
Jerome Glissec9a1be92011-11-03 11:16:49 -040059 if (rdev->gart.ptr == NULL) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +020060 return;
61 }
62#ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
Jerome Glissec9a1be92011-11-03 11:16:49 -040065 set_memory_wb((unsigned long)rdev->gart.ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +020066 rdev->gart.table_size >> PAGE_SHIFT);
67 }
68#endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
Jerome Glissec9a1be92011-11-03 11:16:49 -040070 (void *)rdev->gart.ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +020071 rdev->gart.table_addr);
Jerome Glissec9a1be92011-11-03 11:16:49 -040072 rdev->gart.ptr = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020073 rdev->gart.table_addr = 0;
74}
75
76int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77{
Jerome Glisse771fe6b2009-06-05 14:42:42 +020078 int r;
79
Jerome Glissec9a1be92011-11-03 11:16:49 -040080 if (rdev->gart.robj == NULL) {
Daniel Vetter441921d2011-02-18 17:59:16 +010081 r = radeon_bo_create(rdev, rdev->gart.table_size,
Alex Deucher268b2512010-11-17 19:00:26 -050082 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
Alex Deucher40f5cf92012-05-10 18:33:13 -040083 NULL, &rdev->gart.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084 if (r) {
85 return r;
86 }
87 }
Jerome Glisse4aac0472009-09-14 18:29:49 +020088 return 0;
89}
90
91int radeon_gart_table_vram_pin(struct radeon_device *rdev)
92{
93 uint64_t gpu_addr;
94 int r;
95
Jerome Glissec9a1be92011-11-03 11:16:49 -040096 r = radeon_bo_reserve(rdev->gart.robj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +010097 if (unlikely(r != 0))
98 return r;
Jerome Glissec9a1be92011-11-03 11:16:49 -040099 r = radeon_bo_pin(rdev->gart.robj,
Jerome Glisse4c788672009-11-20 14:29:23 +0100100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101 if (r) {
Jerome Glissec9a1be92011-11-03 11:16:49 -0400102 radeon_bo_unreserve(rdev->gart.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200103 return r;
104 }
Jerome Glissec9a1be92011-11-03 11:16:49 -0400105 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
Jerome Glisse4c788672009-11-20 14:29:23 +0100106 if (r)
Jerome Glissec9a1be92011-11-03 11:16:49 -0400107 radeon_bo_unpin(rdev->gart.robj);
108 radeon_bo_unreserve(rdev->gart.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109 rdev->gart.table_addr = gpu_addr;
Jerome Glisse4c788672009-11-20 14:29:23 +0100110 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200111}
112
Jerome Glissec9a1be92011-11-03 11:16:49 -0400113void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200114{
Jerome Glisse4c788672009-11-20 14:29:23 +0100115 int r;
116
Jerome Glissec9a1be92011-11-03 11:16:49 -0400117 if (rdev->gart.robj == NULL) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200118 return;
119 }
Jerome Glissec9a1be92011-11-03 11:16:49 -0400120 r = radeon_bo_reserve(rdev->gart.robj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100121 if (likely(r == 0)) {
Jerome Glissec9a1be92011-11-03 11:16:49 -0400122 radeon_bo_kunmap(rdev->gart.robj);
123 radeon_bo_unpin(rdev->gart.robj);
124 radeon_bo_unreserve(rdev->gart.robj);
125 rdev->gart.ptr = NULL;
Jerome Glisse4c788672009-11-20 14:29:23 +0100126 }
Jerome Glissec9a1be92011-11-03 11:16:49 -0400127}
128
129void radeon_gart_table_vram_free(struct radeon_device *rdev)
130{
131 if (rdev->gart.robj == NULL) {
132 return;
133 }
134 radeon_gart_table_vram_unpin(rdev);
135 radeon_bo_unref(&rdev->gart.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200136}
137
138
139
140
141/*
142 * Common gart functions.
143 */
144void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
145 int pages)
146{
147 unsigned t;
148 unsigned p;
149 int i, j;
Dave Airlie82568562010-02-05 16:00:07 +1000150 u64 page_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200151
152 if (!rdev->gart.ready) {
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000153 WARN(1, "trying to unbind memory from uninitialized GART !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200154 return;
155 }
Matt Turnera77f1712009-10-14 00:34:41 -0400156 t = offset / RADEON_GPU_PAGE_SIZE;
157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200158 for (i = 0; i < pages; i++, p++) {
159 if (rdev->gart.pages[p]) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200160 rdev->gart.pages[p] = NULL;
Dave Airlie82568562010-02-05 16:00:07 +1000161 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
162 page_base = rdev->gart.pages_addr[p];
Matt Turnera77f1712009-10-14 00:34:41 -0400163 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
Jerome Glissec9a1be92011-11-03 11:16:49 -0400164 if (rdev->gart.ptr) {
165 radeon_gart_set_page(rdev, t, page_base);
166 }
Dave Airlie82568562010-02-05 16:00:07 +1000167 page_base += RADEON_GPU_PAGE_SIZE;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200168 }
169 }
170 }
171 mb();
172 radeon_gart_tlb_flush(rdev);
173}
174
175int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
Konrad Rzeszutek Wilkc39d3512010-12-02 11:04:29 -0500176 int pages, struct page **pagelist, dma_addr_t *dma_addr)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200177{
178 unsigned t;
179 unsigned p;
180 uint64_t page_base;
181 int i, j;
182
183 if (!rdev->gart.ready) {
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000184 WARN(1, "trying to bind memory to uninitialized GART !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200185 return -EINVAL;
186 }
Matt Turnera77f1712009-10-14 00:34:41 -0400187 t = offset / RADEON_GPU_PAGE_SIZE;
188 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200189
190 for (i = 0; i < pages; i++, p++) {
Konrad Rzeszutek Wilkc52494f2011-10-17 17:15:08 -0400191 rdev->gart.pages_addr[p] = dma_addr[i];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200192 rdev->gart.pages[p] = pagelist[i];
Jerome Glissec9a1be92011-11-03 11:16:49 -0400193 if (rdev->gart.ptr) {
194 page_base = rdev->gart.pages_addr[p];
195 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196 radeon_gart_set_page(rdev, t, page_base);
197 page_base += RADEON_GPU_PAGE_SIZE;
198 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200199 }
200 }
201 mb();
202 radeon_gart_tlb_flush(rdev);
203 return 0;
204}
205
Dave Airlie82568562010-02-05 16:00:07 +1000206void radeon_gart_restore(struct radeon_device *rdev)
207{
208 int i, j, t;
209 u64 page_base;
210
Jerome Glissec9a1be92011-11-03 11:16:49 -0400211 if (!rdev->gart.ptr) {
212 return;
213 }
Dave Airlie82568562010-02-05 16:00:07 +1000214 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
215 page_base = rdev->gart.pages_addr[i];
216 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
217 radeon_gart_set_page(rdev, t, page_base);
218 page_base += RADEON_GPU_PAGE_SIZE;
219 }
220 }
221 mb();
222 radeon_gart_tlb_flush(rdev);
223}
224
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200225int radeon_gart_init(struct radeon_device *rdev)
226{
Dave Airlie82568562010-02-05 16:00:07 +1000227 int r, i;
228
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200229 if (rdev->gart.pages) {
230 return 0;
231 }
Matt Turnera77f1712009-10-14 00:34:41 -0400232 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
233 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200234 DRM_ERROR("Page size is smaller than GPU page size!\n");
235 return -EINVAL;
236 }
Dave Airlie82568562010-02-05 16:00:07 +1000237 r = radeon_dummy_page_init(rdev);
238 if (r)
239 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200240 /* Compute table size */
241 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
Matt Turnera77f1712009-10-14 00:34:41 -0400242 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200243 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
244 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
245 /* Allocate pages table */
246 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
247 GFP_KERNEL);
248 if (rdev->gart.pages == NULL) {
249 radeon_gart_fini(rdev);
250 return -ENOMEM;
251 }
252 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
253 rdev->gart.num_cpu_pages, GFP_KERNEL);
254 if (rdev->gart.pages_addr == NULL) {
255 radeon_gart_fini(rdev);
256 return -ENOMEM;
257 }
Dave Airlie82568562010-02-05 16:00:07 +1000258 /* set GART entry to point to the dummy page by default */
259 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
260 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
261 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200262 return 0;
263}
264
265void radeon_gart_fini(struct radeon_device *rdev)
266{
267 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
268 /* unbind pages */
269 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
270 }
271 rdev->gart.ready = false;
272 kfree(rdev->gart.pages);
273 kfree(rdev->gart.pages_addr);
274 rdev->gart.pages = NULL;
275 rdev->gart.pages_addr = NULL;
Alex Deucher92656d72011-04-12 13:32:13 -0400276
277 radeon_dummy_page_fini(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200278}
Jerome Glisse721604a2012-01-05 22:11:05 -0500279
280/*
281 * vm helpers
282 *
283 * TODO bind a default page at vm initialization for default address
284 */
285int radeon_vm_manager_init(struct radeon_device *rdev)
286{
287 int r;
288
Alex Deucher67e915e2012-01-06 09:38:15 -0500289 rdev->vm_manager.enabled = false;
290
Jerome Glisse721604a2012-01-05 22:11:05 -0500291 /* mark first vm as always in use, it's the system one */
292 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
293 rdev->vm_manager.max_pfn * 8,
294 RADEON_GEM_DOMAIN_VRAM);
295 if (r) {
296 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
297 (rdev->vm_manager.max_pfn * 8) >> 10);
298 return r;
299 }
Alex Deucher67e915e2012-01-06 09:38:15 -0500300
301 r = rdev->vm_manager.funcs->init(rdev);
302 if (r == 0)
303 rdev->vm_manager.enabled = true;
304
305 return r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500306}
307
Christian König36ff39c2012-05-09 10:07:08 +0200308/* global mutex must be lock */
Jerome Glisse721604a2012-01-05 22:11:05 -0500309static void radeon_vm_unbind_locked(struct radeon_device *rdev,
310 struct radeon_vm *vm)
311{
312 struct radeon_bo_va *bo_va;
313
314 if (vm->id == -1) {
315 return;
316 }
317
318 /* wait for vm use to end */
Christian König35e56bd2012-06-25 15:13:50 +0200319 while (vm->fence) {
320 int r;
321 r = radeon_fence_wait(vm->fence, false);
322 if (r)
323 DRM_ERROR("error while waiting for fence: %d\n", r);
324 if (r == -EDEADLK) {
325 mutex_unlock(&rdev->vm_manager.lock);
326 r = radeon_gpu_reset(rdev);
327 mutex_lock(&rdev->vm_manager.lock);
328 if (!r)
329 continue;
330 }
331 break;
Jerome Glisse721604a2012-01-05 22:11:05 -0500332 }
Christian König35e56bd2012-06-25 15:13:50 +0200333 radeon_fence_unref(&vm->fence);
Jerome Glisse721604a2012-01-05 22:11:05 -0500334
335 /* hw unbind */
336 rdev->vm_manager.funcs->unbind(rdev, vm);
337 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
338 list_del_init(&vm->list);
339 vm->id = -1;
Christian König557017a2012-05-09 15:34:54 +0200340 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
Jerome Glisse721604a2012-01-05 22:11:05 -0500341 vm->pt = NULL;
342
343 list_for_each_entry(bo_va, &vm->va, vm_list) {
344 bo_va->valid = false;
345 }
346}
347
348void radeon_vm_manager_fini(struct radeon_device *rdev)
349{
350 if (rdev->vm_manager.sa_manager.bo == NULL)
351 return;
352 radeon_vm_manager_suspend(rdev);
353 rdev->vm_manager.funcs->fini(rdev);
354 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
Alex Deucher67e915e2012-01-06 09:38:15 -0500355 rdev->vm_manager.enabled = false;
Jerome Glisse721604a2012-01-05 22:11:05 -0500356}
357
358int radeon_vm_manager_start(struct radeon_device *rdev)
359{
360 if (rdev->vm_manager.sa_manager.bo == NULL) {
361 return -EINVAL;
362 }
363 return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
364}
365
366int radeon_vm_manager_suspend(struct radeon_device *rdev)
367{
368 struct radeon_vm *vm, *tmp;
369
Christian König36ff39c2012-05-09 10:07:08 +0200370 mutex_lock(&rdev->vm_manager.lock);
Jerome Glisse721604a2012-01-05 22:11:05 -0500371 /* unbind all active vm */
372 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
373 radeon_vm_unbind_locked(rdev, vm);
374 }
375 rdev->vm_manager.funcs->fini(rdev);
Christian König36ff39c2012-05-09 10:07:08 +0200376 mutex_unlock(&rdev->vm_manager.lock);
Jerome Glisse721604a2012-01-05 22:11:05 -0500377 return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
378}
379
Christian König36ff39c2012-05-09 10:07:08 +0200380/* global mutex must be locked */
Jerome Glisse721604a2012-01-05 22:11:05 -0500381void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
382{
383 mutex_lock(&vm->mutex);
384 radeon_vm_unbind_locked(rdev, vm);
385 mutex_unlock(&vm->mutex);
386}
387
Christian König36ff39c2012-05-09 10:07:08 +0200388/* global and local mutex must be locked */
Jerome Glisse721604a2012-01-05 22:11:05 -0500389int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
390{
391 struct radeon_vm *vm_evict;
392 unsigned i;
393 int id = -1, r;
394
395 if (vm == NULL) {
396 return -EINVAL;
397 }
398
399 if (vm->id != -1) {
400 /* update lru */
401 list_del_init(&vm->list);
402 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
403 return 0;
404 }
405
406retry:
407 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
408 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
Christian König557017a2012-05-09 15:34:54 +0200409 RADEON_GPU_PAGE_SIZE, false);
Jerome Glisse721604a2012-01-05 22:11:05 -0500410 if (r) {
411 if (list_empty(&rdev->vm_manager.lru_vm)) {
412 return r;
413 }
414 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
415 radeon_vm_unbind(rdev, vm_evict);
416 goto retry;
417 }
Christian König2e0d9912012-05-09 15:34:53 +0200418 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
419 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
Jerome Glisse721604a2012-01-05 22:11:05 -0500420 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
421
422retry_id:
423 /* search for free vm */
424 for (i = 0; i < rdev->vm_manager.nvm; i++) {
425 if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
426 id = i;
427 break;
428 }
429 }
430 /* evict vm if necessary */
431 if (id == -1) {
432 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
433 radeon_vm_unbind(rdev, vm_evict);
434 goto retry_id;
435 }
436
437 /* do hw bind */
438 r = rdev->vm_manager.funcs->bind(rdev, vm, id);
439 if (r) {
Christian König557017a2012-05-09 15:34:54 +0200440 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
Jerome Glisse721604a2012-01-05 22:11:05 -0500441 return r;
442 }
443 rdev->vm_manager.use_bitmap |= 1 << id;
444 vm->id = id;
445 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
Jerome Glissec507f7e2012-05-09 15:34:58 +0200446 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
447 &rdev->ring_tmp_bo.bo->tbo.mem);
Jerome Glisse721604a2012-01-05 22:11:05 -0500448}
449
450/* object have to be reserved */
451int radeon_vm_bo_add(struct radeon_device *rdev,
452 struct radeon_vm *vm,
453 struct radeon_bo *bo,
454 uint64_t offset,
455 uint32_t flags)
456{
457 struct radeon_bo_va *bo_va, *tmp;
458 struct list_head *head;
459 uint64_t size = radeon_bo_size(bo), last_offset = 0;
460 unsigned last_pfn;
461
462 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
463 if (bo_va == NULL) {
464 return -ENOMEM;
465 }
466 bo_va->vm = vm;
467 bo_va->bo = bo;
468 bo_va->soffset = offset;
469 bo_va->eoffset = offset + size;
470 bo_va->flags = flags;
471 bo_va->valid = false;
472 INIT_LIST_HEAD(&bo_va->bo_list);
473 INIT_LIST_HEAD(&bo_va->vm_list);
474 /* make sure object fit at this offset */
475 if (bo_va->soffset >= bo_va->eoffset) {
476 kfree(bo_va);
477 return -EINVAL;
478 }
479
480 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
481 if (last_pfn > rdev->vm_manager.max_pfn) {
482 kfree(bo_va);
483 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
484 last_pfn, rdev->vm_manager.max_pfn);
485 return -EINVAL;
486 }
487
488 mutex_lock(&vm->mutex);
489 if (last_pfn > vm->last_pfn) {
Christian Königbb409152012-06-03 16:09:43 +0200490 /* release mutex and lock in right order */
491 mutex_unlock(&vm->mutex);
Christian König36ff39c2012-05-09 10:07:08 +0200492 mutex_lock(&rdev->vm_manager.lock);
Christian Königbb409152012-06-03 16:09:43 +0200493 mutex_lock(&vm->mutex);
494 /* and check again */
495 if (last_pfn > vm->last_pfn) {
496 /* grow va space 32M by 32M */
497 unsigned align = ((32 << 20) >> 12) - 1;
498 radeon_vm_unbind_locked(rdev, vm);
499 vm->last_pfn = (last_pfn + align) & ~align;
500 }
Christian König36ff39c2012-05-09 10:07:08 +0200501 mutex_unlock(&rdev->vm_manager.lock);
Jerome Glisse721604a2012-01-05 22:11:05 -0500502 }
503 head = &vm->va;
504 last_offset = 0;
505 list_for_each_entry(tmp, &vm->va, vm_list) {
506 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
507 /* bo can be added before this one */
508 break;
509 }
510 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
511 /* bo and tmp overlap, invalid offset */
Jerome Glisse721604a2012-01-05 22:11:05 -0500512 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
513 bo, (unsigned)bo_va->soffset, tmp->bo,
514 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
Dan Carpenter55ba70c2012-01-09 15:44:50 +0300515 kfree(bo_va);
Jerome Glisse721604a2012-01-05 22:11:05 -0500516 mutex_unlock(&vm->mutex);
517 return -EINVAL;
518 }
519 last_offset = tmp->eoffset;
520 head = &tmp->vm_list;
521 }
522 list_add(&bo_va->vm_list, head);
523 list_add_tail(&bo_va->bo_list, &bo->va);
524 mutex_unlock(&vm->mutex);
525 return 0;
526}
527
528static u64 radeon_vm_get_addr(struct radeon_device *rdev,
529 struct ttm_mem_reg *mem,
530 unsigned pfn)
531{
532 u64 addr = 0;
533
534 switch (mem->mem_type) {
535 case TTM_PL_VRAM:
536 addr = (mem->start << PAGE_SHIFT);
537 addr += pfn * RADEON_GPU_PAGE_SIZE;
538 addr += rdev->vm_manager.vram_base_offset;
539 break;
540 case TTM_PL_TT:
541 /* offset inside page table */
542 addr = mem->start << PAGE_SHIFT;
543 addr += pfn * RADEON_GPU_PAGE_SIZE;
544 addr = addr >> PAGE_SHIFT;
545 /* page table offset */
546 addr = rdev->gart.pages_addr[addr];
547 /* in case cpu page size != gpu page size*/
548 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
549 break;
550 default:
551 break;
552 }
553 return addr;
554}
555
Christian König36ff39c2012-05-09 10:07:08 +0200556/* object have to be reserved & global and local mutex must be locked */
Jerome Glisse721604a2012-01-05 22:11:05 -0500557int radeon_vm_bo_update_pte(struct radeon_device *rdev,
558 struct radeon_vm *vm,
559 struct radeon_bo *bo,
560 struct ttm_mem_reg *mem)
561{
562 struct radeon_bo_va *bo_va;
563 unsigned ngpu_pages, i;
564 uint64_t addr = 0, pfn;
565 uint32_t flags;
566
567 /* nothing to do if vm isn't bound */
568 if (vm->id == -1)
Jesper Juhl04bd27a2012-02-26 23:51:53 +0100569 return 0;
Jerome Glisse721604a2012-01-05 22:11:05 -0500570
571 bo_va = radeon_bo_va(bo, vm);
572 if (bo_va == NULL) {
573 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
574 return -EINVAL;
575 }
576
577 if (bo_va->valid)
578 return 0;
579
580 ngpu_pages = radeon_bo_ngpu_pages(bo);
581 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
582 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
583 if (mem) {
584 if (mem->mem_type != TTM_PL_SYSTEM) {
585 bo_va->flags |= RADEON_VM_PAGE_VALID;
586 bo_va->valid = true;
587 }
588 if (mem->mem_type == TTM_PL_TT) {
589 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
590 }
591 }
592 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
593 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
594 for (i = 0, addr = 0; i < ngpu_pages; i++) {
595 if (mem && bo_va->valid) {
596 addr = radeon_vm_get_addr(rdev, mem, i);
597 }
598 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
599 }
600 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
601 return 0;
602}
603
604/* object have to be reserved */
605int radeon_vm_bo_rmv(struct radeon_device *rdev,
606 struct radeon_vm *vm,
607 struct radeon_bo *bo)
608{
609 struct radeon_bo_va *bo_va;
610
611 bo_va = radeon_bo_va(bo, vm);
612 if (bo_va == NULL)
613 return 0;
614
Christian König36ff39c2012-05-09 10:07:08 +0200615 mutex_lock(&rdev->vm_manager.lock);
Christian Königbb409152012-06-03 16:09:43 +0200616 mutex_lock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500617 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
Christian König36ff39c2012-05-09 10:07:08 +0200618 mutex_unlock(&rdev->vm_manager.lock);
Jerome Glisse721604a2012-01-05 22:11:05 -0500619 list_del(&bo_va->vm_list);
Dan Carpentera7eef882012-01-09 15:45:41 +0300620 mutex_unlock(&vm->mutex);
Sebastian Biemueller108b0d32012-02-29 11:04:52 -0500621 list_del(&bo_va->bo_list);
Jerome Glisse721604a2012-01-05 22:11:05 -0500622
623 kfree(bo_va);
624 return 0;
625}
626
627void radeon_vm_bo_invalidate(struct radeon_device *rdev,
628 struct radeon_bo *bo)
629{
630 struct radeon_bo_va *bo_va;
631
632 BUG_ON(!atomic_read(&bo->tbo.reserved));
633 list_for_each_entry(bo_va, &bo->va, bo_list) {
634 bo_va->valid = false;
635 }
636}
637
638int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
639{
640 int r;
641
642 vm->id = -1;
643 vm->fence = NULL;
644 mutex_init(&vm->mutex);
645 INIT_LIST_HEAD(&vm->list);
646 INIT_LIST_HEAD(&vm->va);
647 vm->last_pfn = 0;
648 /* map the ib pool buffer at 0 in virtual address space, set
649 * read only
650 */
Jerome Glissec507f7e2012-05-09 15:34:58 +0200651 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
Jerome Glisse721604a2012-01-05 22:11:05 -0500652 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
653 return r;
654}
655
656void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
657{
658 struct radeon_bo_va *bo_va, *tmp;
659 int r;
660
Christian König36ff39c2012-05-09 10:07:08 +0200661 mutex_lock(&rdev->vm_manager.lock);
Christian Königbb409152012-06-03 16:09:43 +0200662 mutex_lock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500663 radeon_vm_unbind_locked(rdev, vm);
Christian König36ff39c2012-05-09 10:07:08 +0200664 mutex_unlock(&rdev->vm_manager.lock);
Jerome Glisse721604a2012-01-05 22:11:05 -0500665
666 /* remove all bo */
Jerome Glissec507f7e2012-05-09 15:34:58 +0200667 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
Jerome Glisse721604a2012-01-05 22:11:05 -0500668 if (!r) {
Jerome Glissec507f7e2012-05-09 15:34:58 +0200669 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
Jerome Glisse721604a2012-01-05 22:11:05 -0500670 list_del_init(&bo_va->bo_list);
671 list_del_init(&bo_va->vm_list);
Jerome Glissec507f7e2012-05-09 15:34:58 +0200672 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
Jerome Glisse721604a2012-01-05 22:11:05 -0500673 kfree(bo_va);
674 }
675 if (!list_empty(&vm->va)) {
676 dev_err(rdev->dev, "still active bo inside vm\n");
677 }
678 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
679 list_del_init(&bo_va->vm_list);
680 r = radeon_bo_reserve(bo_va->bo, false);
681 if (!r) {
682 list_del_init(&bo_va->bo_list);
683 radeon_bo_unreserve(bo_va->bo);
684 kfree(bo_va);
685 }
686 }
687 mutex_unlock(&vm->mutex);
688}