blob: 6135f58169ce7c95522fae6b1ea4d97e397c27fe [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
Ben Skeggs42311ff2010-08-04 12:07:08 +100042 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043}
44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000046 bool evict, bool no_wait_reserve,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048{
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020051 int ret;
52
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_tt_unbind(ttm);
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 TTM_PL_MASK_MEM);
58 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020059 }
60
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
63 return ret;
64
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
68 return ret;
69 }
70
71 *old_mem = *new_mem;
72 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080073
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020074 return 0;
75}
76EXPORT_SYMBOL(ttm_bo_move_ttm);
77
Thomas Hellstromeba67092010-11-11 09:41:57 +010078int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
Jerome Glisse82c5da62010-04-09 14:39:23 +020079{
Thomas Hellstromeba67092010-11-11 09:41:57 +010080 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
89
90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200156 int ret;
157
Thomas Hellstromeba67092010-11-11 09:41:57 +0100158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200163 if (unlikely(ret != 0))
164 return ret;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200169 }
170 return 0;
171}
172
Thomas Hellstromeba67092010-11-11 09:41:57 +0100173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200174{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100175 struct ttm_mem_reg *mem = &bo->mem;
176
177 if (mem->bus.io_reserved_vm) {
178 mem->bus.io_reserved_vm = false;
179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200181 }
182}
183
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185 void **virtual)
186{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200188 int ret;
189 void *addr;
190
191 *virtual = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100192 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200193 ret = ttm_mem_io_reserve(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100194 ttm_mem_io_unlock(man);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200195 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200196 return ret;
197
Jerome Glisse82c5da62010-04-09 14:39:23 +0200198 if (mem->bus.addr) {
199 addr = mem->bus.addr;
200 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200201 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200202 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200203 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205 if (!addr) {
Thomas Hellstromeba67092010-11-11 09:41:57 +0100206 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200207 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100208 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200209 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200210 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200211 }
212 *virtual = addr;
213 return 0;
214}
215
216void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217 void *virtual)
218{
219 struct ttm_mem_type_manager *man;
220
221 man = &bdev->man[mem->mem_type];
222
Jerome Glisse0c321c72010-04-07 10:21:27 +0000223 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200224 iounmap(virtual);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100225 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200226 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100227 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200228}
229
230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231{
232 uint32_t *dstP =
233 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234 uint32_t *srcP =
235 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237 int i;
238 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239 iowrite32(ioread32(srcP++), dstP++);
240 return 0;
241}
242
243static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200244 unsigned long page,
245 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200246{
247 struct page *d = ttm_tt_get_page(ttm, page);
248 void *dst;
249
250 if (!d)
251 return -ENOMEM;
252
253 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200254
255#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700256 dst = kmap_atomic_prot(d, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200257#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200258 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200259 dst = vmap(&d, 1, 0, prot);
260 else
261 dst = kmap(d);
262#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200263 if (!dst)
264 return -ENOMEM;
265
266 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200267
268#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700269 kunmap_atomic(dst);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200270#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200271 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200272 vunmap(dst);
273 else
274 kunmap(d);
275#endif
276
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200277 return 0;
278}
279
280static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200281 unsigned long page,
282 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200283{
284 struct page *s = ttm_tt_get_page(ttm, page);
285 void *src;
286
287 if (!s)
288 return -ENOMEM;
289
290 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200291#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700292 src = kmap_atomic_prot(s, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200293#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200294 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200295 src = vmap(&s, 1, 0, prot);
296 else
297 src = kmap(s);
298#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200299 if (!src)
300 return -ENOMEM;
301
302 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200303
304#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700305 kunmap_atomic(src);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200306#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200307 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200308 vunmap(src);
309 else
310 kunmap(s);
311#endif
312
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200313 return 0;
314}
315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000317 bool evict, bool no_wait_reserve, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200319{
320 struct ttm_bo_device *bdev = bo->bdev;
321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322 struct ttm_tt *ttm = bo->ttm;
323 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100324 struct ttm_mem_reg old_copy;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200325 void *old_iomap;
326 void *new_iomap;
327 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200328 unsigned long i;
329 unsigned long page;
330 unsigned long add = 0;
331 int dir;
332
333 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334 if (ret)
335 return ret;
336 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337 if (ret)
338 goto out;
339
340 if (old_iomap == NULL && new_iomap == NULL)
341 goto out2;
342 if (old_iomap == NULL && ttm == NULL)
343 goto out2;
344
345 add = 0;
346 dir = 1;
347
348 if ((old_mem->mem_type == new_mem->mem_type) &&
Ben Skeggsd961db72010-08-05 10:48:18 +1000349 (new_mem->start < old_mem->start + old_mem->size)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200350 dir = -1;
351 add = new_mem->num_pages - 1;
352 }
353
354 for (i = 0; i < new_mem->num_pages; ++i) {
355 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200356 if (old_iomap == NULL) {
357 pgprot_t prot = ttm_io_prot(old_mem->placement,
358 PAGE_KERNEL);
359 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
360 prot);
361 } else if (new_iomap == NULL) {
362 pgprot_t prot = ttm_io_prot(new_mem->placement,
363 PAGE_KERNEL);
364 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
365 prot);
366 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200367 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
368 if (ret)
369 goto out1;
370 }
371 mb();
372out2:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100373 old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200374 *old_mem = *new_mem;
375 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200376
377 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
378 ttm_tt_unbind(ttm);
379 ttm_tt_destroy(ttm);
380 bo->ttm = NULL;
381 }
382
383out1:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100384 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385out:
386 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
Ben Skeggsb921bae2010-12-16 14:47:46 +1000387 ttm_bo_mem_put(bo, &old_copy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200388 return ret;
389}
390EXPORT_SYMBOL(ttm_bo_move_memcpy);
391
392static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
393{
394 kfree(bo);
395}
396
397/**
398 * ttm_buffer_object_transfer
399 *
400 * @bo: A pointer to a struct ttm_buffer_object.
401 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
402 * holding the data of @bo with the old placement.
403 *
404 * This is a utility function that may be called after an accelerated move
405 * has been scheduled. A new buffer object is created as a placeholder for
406 * the old data while it's being copied. When that buffer object is idle,
407 * it can be destroyed, releasing the space of the old placement.
408 * Returns:
409 * !0: Failure.
410 */
411
412static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
413 struct ttm_buffer_object **new_obj)
414{
415 struct ttm_buffer_object *fbo;
416 struct ttm_bo_device *bdev = bo->bdev;
417 struct ttm_bo_driver *driver = bdev->driver;
418
419 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
420 if (!fbo)
421 return -ENOMEM;
422
423 *fbo = *bo;
424
425 /**
426 * Fix up members that we shouldn't copy directly:
427 * TODO: Explicit member copy would probably be better here.
428 */
429
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200430 init_waitqueue_head(&fbo->event_queue);
431 INIT_LIST_HEAD(&fbo->ddestroy);
432 INIT_LIST_HEAD(&fbo->lru);
433 INIT_LIST_HEAD(&fbo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200435 fbo->vm_node = NULL;
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200436 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200437
438 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000439 fbo->sync_obj_read = driver->sync_obj_ref(bo->sync_obj_read);
440 fbo->sync_obj_write = driver->sync_obj_ref(bo->sync_obj_write);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200441 kref_init(&fbo->list_kref);
442 kref_init(&fbo->kref);
443 fbo->destroy = &ttm_transfered_destroy;
444
445 *new_obj = fbo;
446 return 0;
447}
448
449pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
450{
451#if defined(__i386__) || defined(__x86_64__)
452 if (caching_flags & TTM_PL_FLAG_WC)
453 tmp = pgprot_writecombine(tmp);
454 else if (boot_cpu_data.x86 > 3)
455 tmp = pgprot_noncached(tmp);
456
457#elif defined(__powerpc__)
458 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
459 pgprot_val(tmp) |= _PAGE_NO_CACHE;
460 if (caching_flags & TTM_PL_FLAG_UNCACHED)
461 pgprot_val(tmp) |= _PAGE_GUARDED;
462 }
463#endif
464#if defined(__ia64__)
465 if (caching_flags & TTM_PL_FLAG_WC)
466 tmp = pgprot_writecombine(tmp);
467 else
468 tmp = pgprot_noncached(tmp);
469#endif
470#if defined(__sparc__)
471 if (!(caching_flags & TTM_PL_FLAG_CACHED))
472 tmp = pgprot_noncached(tmp);
473#endif
474 return tmp;
475}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100476EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200477
478static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200479 unsigned long offset,
480 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200481 struct ttm_bo_kmap_obj *map)
482{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200483 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200484
Jerome Glisse82c5da62010-04-09 14:39:23 +0200485 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200486 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200487 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200488 } else {
489 map->bo_kmap_type = ttm_bo_map_iomap;
490 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200491 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
492 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200493 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200494 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
495 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200496 }
497 return (!map->virtual) ? -ENOMEM : 0;
498}
499
500static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
501 unsigned long start_page,
502 unsigned long num_pages,
503 struct ttm_bo_kmap_obj *map)
504{
505 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
506 struct ttm_tt *ttm = bo->ttm;
507 struct page *d;
508 int i;
509
510 BUG_ON(!ttm);
511 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
512 /*
513 * We're mapping a single page, and the desired
514 * page protection is consistent with the bo.
515 */
516
517 map->bo_kmap_type = ttm_bo_map_kmap;
518 map->page = ttm_tt_get_page(ttm, start_page);
519 map->virtual = kmap(map->page);
520 } else {
521 /*
522 * Populate the part we're mapping;
523 */
524 for (i = start_page; i < start_page + num_pages; ++i) {
525 d = ttm_tt_get_page(ttm, i);
526 if (!d)
527 return -ENOMEM;
528 }
529
530 /*
531 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200532 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200533 */
534 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
535 PAGE_KERNEL :
536 ttm_io_prot(mem->placement, PAGE_KERNEL);
537 map->bo_kmap_type = ttm_bo_map_vmap;
538 map->virtual = vmap(ttm->pages + start_page, num_pages,
539 0, prot);
540 }
541 return (!map->virtual) ? -ENOMEM : 0;
542}
543
544int ttm_bo_kmap(struct ttm_buffer_object *bo,
545 unsigned long start_page, unsigned long num_pages,
546 struct ttm_bo_kmap_obj *map)
547{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100548 struct ttm_mem_type_manager *man =
549 &bo->bdev->man[bo->mem.mem_type];
Jerome Glisse82c5da62010-04-09 14:39:23 +0200550 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200552
553 BUG_ON(!list_empty(&bo->swap));
554 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200555 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 if (num_pages > bo->num_pages)
557 return -EINVAL;
558 if (start_page > bo->num_pages)
559 return -EINVAL;
560#if 0
561 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
562 return -EPERM;
563#endif
Thomas Hellstromeba67092010-11-11 09:41:57 +0100564 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200565 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100566 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200567 if (ret)
568 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200569 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200570 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
571 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200572 offset = start_page << PAGE_SHIFT;
573 size = num_pages << PAGE_SHIFT;
574 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200575 }
576}
577EXPORT_SYMBOL(ttm_bo_kmap);
578
579void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
580{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100581 struct ttm_buffer_object *bo = map->bo;
582 struct ttm_mem_type_manager *man =
583 &bo->bdev->man[bo->mem.mem_type];
584
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200585 if (!map->virtual)
586 return;
587 switch (map->bo_kmap_type) {
588 case ttm_bo_map_iomap:
589 iounmap(map->virtual);
590 break;
591 case ttm_bo_map_vmap:
592 vunmap(map->virtual);
593 break;
594 case ttm_bo_map_kmap:
595 kunmap(map->page);
596 break;
597 case ttm_bo_map_premapped:
598 break;
599 default:
600 BUG();
601 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100602 (void) ttm_mem_io_lock(man, false);
603 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
604 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200605 map->virtual = NULL;
606 map->page = NULL;
607}
608EXPORT_SYMBOL(ttm_bo_kunmap);
609
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200610int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
611 void *sync_obj,
612 void *sync_obj_arg,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000613 bool evict, bool no_wait_reserve,
614 bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200615 struct ttm_mem_reg *new_mem)
616{
617 struct ttm_bo_device *bdev = bo->bdev;
618 struct ttm_bo_driver *driver = bdev->driver;
619 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
620 struct ttm_mem_reg *old_mem = &bo->mem;
621 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200622 struct ttm_buffer_object *ghost_obj;
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000623 void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200624
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000625 spin_lock(&bdev->fence_lock);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000626 if (bo->sync_obj)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200627 tmp_obj = bo->sync_obj;
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000628 if (bo->sync_obj_read)
629 tmp_obj_read = bo->sync_obj_read;
630 if (bo->sync_obj_write)
631 tmp_obj_write = bo->sync_obj_write;
632
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200633 bo->sync_obj = driver->sync_obj_ref(sync_obj);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000634 bo->sync_obj_read = driver->sync_obj_ref(sync_obj);
635 bo->sync_obj_write = driver->sync_obj_ref(sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200636 bo->sync_obj_arg = sync_obj_arg;
637 if (evict) {
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000638 ret = ttm_bo_wait(bo, false, false, false,
639 TTM_USAGE_READWRITE);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000640 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200641 if (tmp_obj)
642 driver->sync_obj_unref(&tmp_obj);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000643 if (tmp_obj_read)
644 driver->sync_obj_unref(&tmp_obj_read);
645 if (tmp_obj_write)
646 driver->sync_obj_unref(&tmp_obj_write);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200647 if (ret)
648 return ret;
649
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200650 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
651 (bo->ttm != NULL)) {
652 ttm_tt_unbind(bo->ttm);
653 ttm_tt_destroy(bo->ttm);
654 bo->ttm = NULL;
655 }
Ben Skeggseac20952011-08-22 03:15:04 +0000656 ttm_bo_free_old_node(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200657 } else {
658 /**
659 * This should help pipeline ordinary buffer moves.
660 *
661 * Hang old buffer memory on a new buffer object,
662 * and leave it to be released when the GPU
663 * operation has completed.
664 */
665
666 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000667 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200668 if (tmp_obj)
669 driver->sync_obj_unref(&tmp_obj);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000670 if (tmp_obj_read)
671 driver->sync_obj_unref(&tmp_obj_read);
672 if (tmp_obj_write)
673 driver->sync_obj_unref(&tmp_obj_write);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200674
675 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
676 if (ret)
677 return ret;
678
679 /**
680 * If we're not moving to fixed memory, the TTM object
681 * needs to stay alive. Otherwhise hang it on the ghost
682 * bo to be unbound and destroyed.
683 */
684
685 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
686 ghost_obj->ttm = NULL;
687 else
688 bo->ttm = NULL;
689
690 ttm_bo_unreserve(ghost_obj);
691 ttm_bo_unref(&ghost_obj);
692 }
693
694 *old_mem = *new_mem;
695 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800696
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200697 return 0;
698}
699EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);