blob: 0ebfe0d9493165b24956f81616b06b2bba9fb1d1 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
Ben Skeggs42311ff2010-08-04 12:07:08 +100042 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043}
44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000046 bool evict, bool no_wait_reserve,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048{
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020051 int ret;
52
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_tt_unbind(ttm);
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 TTM_PL_MASK_MEM);
58 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020059 }
60
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
63 return ret;
64
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
68 return ret;
69 }
70
71 *old_mem = *new_mem;
72 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080073
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020074 return 0;
75}
76EXPORT_SYMBOL(ttm_bo_move_ttm);
77
Jerome Glisse82c5da62010-04-09 14:39:23 +020078int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
79{
Jerome Glisse82c5da62010-04-09 14:39:23 +020080 int ret;
81
Jerome Glisse0c321c72010-04-07 10:21:27 +000082 if (!mem->bus.io_reserved) {
83 mem->bus.io_reserved = true;
84 ret = bdev->driver->io_mem_reserve(bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +020085 if (unlikely(ret != 0))
86 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +020087 }
88 return 0;
89}
90
91void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
92{
93 if (bdev->driver->io_mem_reserve) {
94 if (mem->bus.io_reserved) {
95 mem->bus.io_reserved = false;
96 bdev->driver->io_mem_free(bdev, mem);
97 }
98 }
99}
100
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200101int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
102 void **virtual)
103{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200104 int ret;
105 void *addr;
106
107 *virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200108 ret = ttm_mem_io_reserve(bdev, mem);
Jerome Glisse9e511592010-05-05 11:02:44 +0200109 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200110 return ret;
111
Jerome Glisse82c5da62010-04-09 14:39:23 +0200112 if (mem->bus.addr) {
113 addr = mem->bus.addr;
114 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200115 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200116 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200117 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
119 if (!addr) {
120 ttm_mem_io_free(bdev, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200121 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200122 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200123 }
124 *virtual = addr;
125 return 0;
126}
127
128void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
129 void *virtual)
130{
131 struct ttm_mem_type_manager *man;
132
133 man = &bdev->man[mem->mem_type];
134
Jerome Glisse0c321c72010-04-07 10:21:27 +0000135 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200136 iounmap(virtual);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200137 ttm_mem_io_free(bdev, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200138}
139
140static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
141{
142 uint32_t *dstP =
143 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
144 uint32_t *srcP =
145 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
146
147 int i;
148 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
149 iowrite32(ioread32(srcP++), dstP++);
150 return 0;
151}
152
153static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200154 unsigned long page,
155 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200156{
157 struct page *d = ttm_tt_get_page(ttm, page);
158 void *dst;
159
160 if (!d)
161 return -ENOMEM;
162
163 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200164
165#ifdef CONFIG_X86
166 dst = kmap_atomic_prot(d, KM_USER0, prot);
167#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200168 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200169 dst = vmap(&d, 1, 0, prot);
170 else
171 dst = kmap(d);
172#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200173 if (!dst)
174 return -ENOMEM;
175
176 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200177
178#ifdef CONFIG_X86
179 kunmap_atomic(dst, KM_USER0);
180#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200181 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200182 vunmap(dst);
183 else
184 kunmap(d);
185#endif
186
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200187 return 0;
188}
189
190static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200191 unsigned long page,
192 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200193{
194 struct page *s = ttm_tt_get_page(ttm, page);
195 void *src;
196
197 if (!s)
198 return -ENOMEM;
199
200 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200201#ifdef CONFIG_X86
202 src = kmap_atomic_prot(s, KM_USER0, prot);
203#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200204 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200205 src = vmap(&s, 1, 0, prot);
206 else
207 src = kmap(s);
208#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200209 if (!src)
210 return -ENOMEM;
211
212 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200213
214#ifdef CONFIG_X86
215 kunmap_atomic(src, KM_USER0);
216#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200217 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200218 vunmap(src);
219 else
220 kunmap(s);
221#endif
222
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200223 return 0;
224}
225
226int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000227 bool evict, bool no_wait_reserve, bool no_wait_gpu,
228 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200229{
230 struct ttm_bo_device *bdev = bo->bdev;
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem;
235 void *old_iomap;
236 void *new_iomap;
237 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238 unsigned long i;
239 unsigned long page;
240 unsigned long add = 0;
241 int dir;
242
243 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
244 if (ret)
245 return ret;
246 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
247 if (ret)
248 goto out;
249
250 if (old_iomap == NULL && new_iomap == NULL)
251 goto out2;
252 if (old_iomap == NULL && ttm == NULL)
253 goto out2;
254
255 add = 0;
256 dir = 1;
257
258 if ((old_mem->mem_type == new_mem->mem_type) &&
259 (new_mem->mm_node->start <
260 old_mem->mm_node->start + old_mem->mm_node->size)) {
261 dir = -1;
262 add = new_mem->num_pages - 1;
263 }
264
265 for (i = 0; i < new_mem->num_pages; ++i) {
266 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200267 if (old_iomap == NULL) {
268 pgprot_t prot = ttm_io_prot(old_mem->placement,
269 PAGE_KERNEL);
270 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
271 prot);
272 } else if (new_iomap == NULL) {
273 pgprot_t prot = ttm_io_prot(new_mem->placement,
274 PAGE_KERNEL);
275 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
276 prot);
277 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200278 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
279 if (ret)
280 goto out1;
281 }
282 mb();
283out2:
284 ttm_bo_free_old_node(bo);
285
286 *old_mem = *new_mem;
287 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288
289 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
290 ttm_tt_unbind(ttm);
291 ttm_tt_destroy(ttm);
292 bo->ttm = NULL;
293 }
294
295out1:
296 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
297out:
298 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
299 return ret;
300}
301EXPORT_SYMBOL(ttm_bo_move_memcpy);
302
303static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
304{
305 kfree(bo);
306}
307
308/**
309 * ttm_buffer_object_transfer
310 *
311 * @bo: A pointer to a struct ttm_buffer_object.
312 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
313 * holding the data of @bo with the old placement.
314 *
315 * This is a utility function that may be called after an accelerated move
316 * has been scheduled. A new buffer object is created as a placeholder for
317 * the old data while it's being copied. When that buffer object is idle,
318 * it can be destroyed, releasing the space of the old placement.
319 * Returns:
320 * !0: Failure.
321 */
322
323static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
324 struct ttm_buffer_object **new_obj)
325{
326 struct ttm_buffer_object *fbo;
327 struct ttm_bo_device *bdev = bo->bdev;
328 struct ttm_bo_driver *driver = bdev->driver;
329
330 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
331 if (!fbo)
332 return -ENOMEM;
333
334 *fbo = *bo;
335
336 /**
337 * Fix up members that we shouldn't copy directly:
338 * TODO: Explicit member copy would probably be better here.
339 */
340
341 spin_lock_init(&fbo->lock);
342 init_waitqueue_head(&fbo->event_queue);
343 INIT_LIST_HEAD(&fbo->ddestroy);
344 INIT_LIST_HEAD(&fbo->lru);
345 INIT_LIST_HEAD(&fbo->swap);
346 fbo->vm_node = NULL;
347
348 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200349 kref_init(&fbo->list_kref);
350 kref_init(&fbo->kref);
351 fbo->destroy = &ttm_transfered_destroy;
352
353 *new_obj = fbo;
354 return 0;
355}
356
357pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
358{
359#if defined(__i386__) || defined(__x86_64__)
360 if (caching_flags & TTM_PL_FLAG_WC)
361 tmp = pgprot_writecombine(tmp);
362 else if (boot_cpu_data.x86 > 3)
363 tmp = pgprot_noncached(tmp);
364
365#elif defined(__powerpc__)
366 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
367 pgprot_val(tmp) |= _PAGE_NO_CACHE;
368 if (caching_flags & TTM_PL_FLAG_UNCACHED)
369 pgprot_val(tmp) |= _PAGE_GUARDED;
370 }
371#endif
372#if defined(__ia64__)
373 if (caching_flags & TTM_PL_FLAG_WC)
374 tmp = pgprot_writecombine(tmp);
375 else
376 tmp = pgprot_noncached(tmp);
377#endif
378#if defined(__sparc__)
379 if (!(caching_flags & TTM_PL_FLAG_CACHED))
380 tmp = pgprot_noncached(tmp);
381#endif
382 return tmp;
383}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100384EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385
386static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200387 unsigned long offset,
388 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200389 struct ttm_bo_kmap_obj *map)
390{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200392
Jerome Glisse82c5da62010-04-09 14:39:23 +0200393 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200394 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200395 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200396 } else {
397 map->bo_kmap_type = ttm_bo_map_iomap;
398 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200399 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
400 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200401 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200402 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
403 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200404 }
405 return (!map->virtual) ? -ENOMEM : 0;
406}
407
408static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
409 unsigned long start_page,
410 unsigned long num_pages,
411 struct ttm_bo_kmap_obj *map)
412{
413 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
414 struct ttm_tt *ttm = bo->ttm;
415 struct page *d;
416 int i;
417
418 BUG_ON(!ttm);
419 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
420 /*
421 * We're mapping a single page, and the desired
422 * page protection is consistent with the bo.
423 */
424
425 map->bo_kmap_type = ttm_bo_map_kmap;
426 map->page = ttm_tt_get_page(ttm, start_page);
427 map->virtual = kmap(map->page);
428 } else {
429 /*
430 * Populate the part we're mapping;
431 */
432 for (i = start_page; i < start_page + num_pages; ++i) {
433 d = ttm_tt_get_page(ttm, i);
434 if (!d)
435 return -ENOMEM;
436 }
437
438 /*
439 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200440 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200441 */
442 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
443 PAGE_KERNEL :
444 ttm_io_prot(mem->placement, PAGE_KERNEL);
445 map->bo_kmap_type = ttm_bo_map_vmap;
446 map->virtual = vmap(ttm->pages + start_page, num_pages,
447 0, prot);
448 }
449 return (!map->virtual) ? -ENOMEM : 0;
450}
451
452int ttm_bo_kmap(struct ttm_buffer_object *bo,
453 unsigned long start_page, unsigned long num_pages,
454 struct ttm_bo_kmap_obj *map)
455{
Jerome Glisse82c5da62010-04-09 14:39:23 +0200456 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200457 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200458
459 BUG_ON(!list_empty(&bo->swap));
460 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200461 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200462 if (num_pages > bo->num_pages)
463 return -EINVAL;
464 if (start_page > bo->num_pages)
465 return -EINVAL;
466#if 0
467 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
468 return -EPERM;
469#endif
Jerome Glisse82c5da62010-04-09 14:39:23 +0200470 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200471 if (ret)
472 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200473 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200474 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
475 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200476 offset = start_page << PAGE_SHIFT;
477 size = num_pages << PAGE_SHIFT;
478 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200479 }
480}
481EXPORT_SYMBOL(ttm_bo_kmap);
482
483void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
484{
485 if (!map->virtual)
486 return;
487 switch (map->bo_kmap_type) {
488 case ttm_bo_map_iomap:
489 iounmap(map->virtual);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200490 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200491 break;
492 case ttm_bo_map_vmap:
493 vunmap(map->virtual);
494 break;
495 case ttm_bo_map_kmap:
496 kunmap(map->page);
497 break;
498 case ttm_bo_map_premapped:
499 break;
500 default:
501 BUG();
502 }
503 map->virtual = NULL;
504 map->page = NULL;
505}
506EXPORT_SYMBOL(ttm_bo_kunmap);
507
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200508int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
509 void *sync_obj,
510 void *sync_obj_arg,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000511 bool evict, bool no_wait_reserve,
512 bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200513 struct ttm_mem_reg *new_mem)
514{
515 struct ttm_bo_device *bdev = bo->bdev;
516 struct ttm_bo_driver *driver = bdev->driver;
517 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
518 struct ttm_mem_reg *old_mem = &bo->mem;
519 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200520 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL;
522
523 spin_lock(&bo->lock);
524 if (bo->sync_obj) {
525 tmp_obj = bo->sync_obj;
526 bo->sync_obj = NULL;
527 }
528 bo->sync_obj = driver->sync_obj_ref(sync_obj);
529 bo->sync_obj_arg = sync_obj_arg;
530 if (evict) {
531 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200533 if (tmp_obj)
534 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200535 if (ret)
536 return ret;
537
538 ttm_bo_free_old_node(bo);
539 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
540 (bo->ttm != NULL)) {
541 ttm_tt_unbind(bo->ttm);
542 ttm_tt_destroy(bo->ttm);
543 bo->ttm = NULL;
544 }
545 } else {
546 /**
547 * This should help pipeline ordinary buffer moves.
548 *
549 * Hang old buffer memory on a new buffer object,
550 * and leave it to be released when the GPU
551 * operation has completed.
552 */
553
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200556 if (tmp_obj)
557 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200558
559 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
560 if (ret)
561 return ret;
562
563 /**
564 * If we're not moving to fixed memory, the TTM object
565 * needs to stay alive. Otherwhise hang it on the ghost
566 * bo to be unbound and destroyed.
567 */
568
569 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
570 ghost_obj->ttm = NULL;
571 else
572 bo->ttm = NULL;
573
574 ttm_bo_unreserve(ghost_obj);
575 ttm_bo_unref(&ghost_obj);
576 }
577
578 *old_mem = *new_mem;
579 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800580
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200581 return 0;
582}
583EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);