blob: e8a73e65da69cb0572c89f73ccb101841ded222b [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
42 struct ttm_mem_reg *old_mem = &bo->mem;
43
44 if (old_mem->mm_node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +020045 spin_lock(&bo->glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020046 drm_mm_put_block(old_mem->mm_node);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020047 spin_unlock(&bo->glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048 }
49 old_mem->mm_node = NULL;
50}
51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000053 bool evict, bool no_wait_reserve,
54 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020055{
56 struct ttm_tt *ttm = bo->ttm;
57 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020058 int ret;
59
60 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ttm_tt_unbind(ttm);
62 ttm_bo_free_old_node(bo);
63 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
64 TTM_PL_MASK_MEM);
65 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020066 }
67
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
69 if (unlikely(ret != 0))
70 return ret;
71
72 if (new_mem->mem_type != TTM_PL_SYSTEM) {
73 ret = ttm_tt_bind(ttm, new_mem);
74 if (unlikely(ret != 0))
75 return ret;
76 }
77
78 *old_mem = *new_mem;
79 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080080
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020081 return 0;
82}
83EXPORT_SYMBOL(ttm_bo_move_ttm);
84
Jerome Glisse82c5da62010-04-09 14:39:23 +020085int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
86{
Jerome Glisse82c5da62010-04-09 14:39:23 +020087 int ret;
88
Jerome Glisse0c321c72010-04-07 10:21:27 +000089 if (!mem->bus.io_reserved) {
90 mem->bus.io_reserved = true;
91 ret = bdev->driver->io_mem_reserve(bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +020092 if (unlikely(ret != 0))
93 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +020094 }
95 return 0;
96}
97
98void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
99{
100 if (bdev->driver->io_mem_reserve) {
101 if (mem->bus.io_reserved) {
102 mem->bus.io_reserved = false;
103 bdev->driver->io_mem_free(bdev, mem);
104 }
105 }
106}
107
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200108int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
109 void **virtual)
110{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200111 int ret;
112 void *addr;
113
114 *virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200115 ret = ttm_mem_io_reserve(bdev, mem);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200116 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200117 return ret;
118
Jerome Glisse82c5da62010-04-09 14:39:23 +0200119 if (mem->bus.addr) {
120 addr = mem->bus.addr;
121 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200122 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200123 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200124 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200125 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
126 if (!addr) {
127 ttm_mem_io_free(bdev, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200128 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200129 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200130 }
131 *virtual = addr;
132 return 0;
133}
134
135void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
136 void *virtual)
137{
138 struct ttm_mem_type_manager *man;
139
140 man = &bdev->man[mem->mem_type];
141
Jerome Glisse0c321c72010-04-07 10:21:27 +0000142 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200143 iounmap(virtual);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200144 ttm_mem_io_free(bdev, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200145}
146
147static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
148{
149 uint32_t *dstP =
150 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
151 uint32_t *srcP =
152 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
153
154 int i;
155 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
156 iowrite32(ioread32(srcP++), dstP++);
157 return 0;
158}
159
160static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200161 unsigned long page,
162 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200163{
164 struct page *d = ttm_tt_get_page(ttm, page);
165 void *dst;
166
167 if (!d)
168 return -ENOMEM;
169
170 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200171
172#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700173 dst = kmap_atomic_prot(d, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200174#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200175 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200176 dst = vmap(&d, 1, 0, prot);
177 else
178 dst = kmap(d);
179#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200180 if (!dst)
181 return -ENOMEM;
182
183 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200184
185#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700186 kunmap_atomic(dst);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200187#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200188 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200189 vunmap(dst);
190 else
191 kunmap(d);
192#endif
193
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194 return 0;
195}
196
197static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200198 unsigned long page,
199 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200200{
201 struct page *s = ttm_tt_get_page(ttm, page);
202 void *src;
203
204 if (!s)
205 return -ENOMEM;
206
207 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200208#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700209 src = kmap_atomic_prot(s, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200210#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200211 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200212 src = vmap(&s, 1, 0, prot);
213 else
214 src = kmap(s);
215#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200216 if (!src)
217 return -ENOMEM;
218
219 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200220
221#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700222 kunmap_atomic(src);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200223#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200224 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200225 vunmap(src);
226 else
227 kunmap(s);
228#endif
229
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200230 return 0;
231}
232
233int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000234 bool evict, bool no_wait_reserve, bool no_wait_gpu,
235 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200236{
237 struct ttm_bo_device *bdev = bo->bdev;
238 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
239 struct ttm_tt *ttm = bo->ttm;
240 struct ttm_mem_reg *old_mem = &bo->mem;
241 struct ttm_mem_reg old_copy = *old_mem;
242 void *old_iomap;
243 void *new_iomap;
244 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245 unsigned long i;
246 unsigned long page;
247 unsigned long add = 0;
248 int dir;
249
250 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
251 if (ret)
252 return ret;
253 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
254 if (ret)
255 goto out;
256
257 if (old_iomap == NULL && new_iomap == NULL)
258 goto out2;
259 if (old_iomap == NULL && ttm == NULL)
260 goto out2;
261
262 add = 0;
263 dir = 1;
264
265 if ((old_mem->mem_type == new_mem->mem_type) &&
266 (new_mem->mm_node->start <
267 old_mem->mm_node->start + old_mem->mm_node->size)) {
268 dir = -1;
269 add = new_mem->num_pages - 1;
270 }
271
272 for (i = 0; i < new_mem->num_pages; ++i) {
273 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200274 if (old_iomap == NULL) {
275 pgprot_t prot = ttm_io_prot(old_mem->placement,
276 PAGE_KERNEL);
277 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
278 prot);
279 } else if (new_iomap == NULL) {
280 pgprot_t prot = ttm_io_prot(new_mem->placement,
281 PAGE_KERNEL);
282 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
283 prot);
284 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200285 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
286 if (ret)
287 goto out1;
288 }
289 mb();
290out2:
291 ttm_bo_free_old_node(bo);
292
293 *old_mem = *new_mem;
294 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295
296 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
297 ttm_tt_unbind(ttm);
298 ttm_tt_destroy(ttm);
299 bo->ttm = NULL;
300 }
301
302out1:
303 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
304out:
305 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
306 return ret;
307}
308EXPORT_SYMBOL(ttm_bo_move_memcpy);
309
310static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
311{
312 kfree(bo);
313}
314
315/**
316 * ttm_buffer_object_transfer
317 *
318 * @bo: A pointer to a struct ttm_buffer_object.
319 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
320 * holding the data of @bo with the old placement.
321 *
322 * This is a utility function that may be called after an accelerated move
323 * has been scheduled. A new buffer object is created as a placeholder for
324 * the old data while it's being copied. When that buffer object is idle,
325 * it can be destroyed, releasing the space of the old placement.
326 * Returns:
327 * !0: Failure.
328 */
329
330static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
331 struct ttm_buffer_object **new_obj)
332{
333 struct ttm_buffer_object *fbo;
334 struct ttm_bo_device *bdev = bo->bdev;
335 struct ttm_bo_driver *driver = bdev->driver;
336
337 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
338 if (!fbo)
339 return -ENOMEM;
340
341 *fbo = *bo;
342
343 /**
344 * Fix up members that we shouldn't copy directly:
345 * TODO: Explicit member copy would probably be better here.
346 */
347
348 spin_lock_init(&fbo->lock);
349 init_waitqueue_head(&fbo->event_queue);
350 INIT_LIST_HEAD(&fbo->ddestroy);
351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL;
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200354 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200355
356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200357 kref_init(&fbo->list_kref);
358 kref_init(&fbo->kref);
359 fbo->destroy = &ttm_transfered_destroy;
360
361 *new_obj = fbo;
362 return 0;
363}
364
365pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
366{
367#if defined(__i386__) || defined(__x86_64__)
368 if (caching_flags & TTM_PL_FLAG_WC)
369 tmp = pgprot_writecombine(tmp);
370 else if (boot_cpu_data.x86 > 3)
371 tmp = pgprot_noncached(tmp);
372
373#elif defined(__powerpc__)
374 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
375 pgprot_val(tmp) |= _PAGE_NO_CACHE;
376 if (caching_flags & TTM_PL_FLAG_UNCACHED)
377 pgprot_val(tmp) |= _PAGE_GUARDED;
378 }
379#endif
380#if defined(__ia64__)
381 if (caching_flags & TTM_PL_FLAG_WC)
382 tmp = pgprot_writecombine(tmp);
383 else
384 tmp = pgprot_noncached(tmp);
385#endif
386#if defined(__sparc__)
387 if (!(caching_flags & TTM_PL_FLAG_CACHED))
388 tmp = pgprot_noncached(tmp);
389#endif
390 return tmp;
391}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100392EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200393
394static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200395 unsigned long offset,
396 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397 struct ttm_bo_kmap_obj *map)
398{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200399 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200400
Jerome Glisse82c5da62010-04-09 14:39:23 +0200401 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200402 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200403 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200404 } else {
405 map->bo_kmap_type = ttm_bo_map_iomap;
406 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200407 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
408 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200409 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200410 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
411 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200412 }
413 return (!map->virtual) ? -ENOMEM : 0;
414}
415
416static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
417 unsigned long start_page,
418 unsigned long num_pages,
419 struct ttm_bo_kmap_obj *map)
420{
421 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
422 struct ttm_tt *ttm = bo->ttm;
423 struct page *d;
424 int i;
425
426 BUG_ON(!ttm);
427 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
428 /*
429 * We're mapping a single page, and the desired
430 * page protection is consistent with the bo.
431 */
432
433 map->bo_kmap_type = ttm_bo_map_kmap;
434 map->page = ttm_tt_get_page(ttm, start_page);
435 map->virtual = kmap(map->page);
436 } else {
437 /*
438 * Populate the part we're mapping;
439 */
440 for (i = start_page; i < start_page + num_pages; ++i) {
441 d = ttm_tt_get_page(ttm, i);
442 if (!d)
443 return -ENOMEM;
444 }
445
446 /*
447 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200448 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200449 */
450 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
451 PAGE_KERNEL :
452 ttm_io_prot(mem->placement, PAGE_KERNEL);
453 map->bo_kmap_type = ttm_bo_map_vmap;
454 map->virtual = vmap(ttm->pages + start_page, num_pages,
455 0, prot);
456 }
457 return (!map->virtual) ? -ENOMEM : 0;
458}
459
460int ttm_bo_kmap(struct ttm_buffer_object *bo,
461 unsigned long start_page, unsigned long num_pages,
462 struct ttm_bo_kmap_obj *map)
463{
Jerome Glisse82c5da62010-04-09 14:39:23 +0200464 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200465 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200466
467 BUG_ON(!list_empty(&bo->swap));
468 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200469 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200470 if (num_pages > bo->num_pages)
471 return -EINVAL;
472 if (start_page > bo->num_pages)
473 return -EINVAL;
474#if 0
475 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
476 return -EPERM;
477#endif
Jerome Glisse82c5da62010-04-09 14:39:23 +0200478 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200479 if (ret)
480 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200481 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200482 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
483 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200484 offset = start_page << PAGE_SHIFT;
485 size = num_pages << PAGE_SHIFT;
486 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200487 }
488}
489EXPORT_SYMBOL(ttm_bo_kmap);
490
491void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
492{
493 if (!map->virtual)
494 return;
495 switch (map->bo_kmap_type) {
496 case ttm_bo_map_iomap:
497 iounmap(map->virtual);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200498 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200499 break;
500 case ttm_bo_map_vmap:
501 vunmap(map->virtual);
502 break;
503 case ttm_bo_map_kmap:
504 kunmap(map->page);
505 break;
506 case ttm_bo_map_premapped:
507 break;
508 default:
509 BUG();
510 }
511 map->virtual = NULL;
512 map->page = NULL;
513}
514EXPORT_SYMBOL(ttm_bo_kunmap);
515
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200516int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
517 void *sync_obj,
518 void *sync_obj_arg,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000519 bool evict, bool no_wait_reserve,
520 bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200521 struct ttm_mem_reg *new_mem)
522{
523 struct ttm_bo_device *bdev = bo->bdev;
524 struct ttm_bo_driver *driver = bdev->driver;
525 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
526 struct ttm_mem_reg *old_mem = &bo->mem;
527 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200528 struct ttm_buffer_object *ghost_obj;
529 void *tmp_obj = NULL;
530
531 spin_lock(&bo->lock);
532 if (bo->sync_obj) {
533 tmp_obj = bo->sync_obj;
534 bo->sync_obj = NULL;
535 }
536 bo->sync_obj = driver->sync_obj_ref(sync_obj);
537 bo->sync_obj_arg = sync_obj_arg;
538 if (evict) {
539 ret = ttm_bo_wait(bo, false, false, false);
540 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200541 if (tmp_obj)
542 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200543 if (ret)
544 return ret;
545
546 ttm_bo_free_old_node(bo);
547 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
548 (bo->ttm != NULL)) {
549 ttm_tt_unbind(bo->ttm);
550 ttm_tt_destroy(bo->ttm);
551 bo->ttm = NULL;
552 }
553 } else {
554 /**
555 * This should help pipeline ordinary buffer moves.
556 *
557 * Hang old buffer memory on a new buffer object,
558 * and leave it to be released when the GPU
559 * operation has completed.
560 */
561
562 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
563 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200564 if (tmp_obj)
565 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200566
567 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
568 if (ret)
569 return ret;
570
571 /**
572 * If we're not moving to fixed memory, the TTM object
573 * needs to stay alive. Otherwhise hang it on the ghost
574 * bo to be unbound and destroyed.
575 */
576
577 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
578 ghost_obj->ttm = NULL;
579 else
580 bo->ttm = NULL;
581
582 ttm_bo_unreserve(ghost_obj);
583 ttm_bo_unref(&ghost_obj);
584 }
585
586 *old_mem = *new_mem;
587 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800588
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200589 return 0;
590}
591EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);