blob: ceae52f45c396fcb04dcd7dcd26cc29c679393b2 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/module.h>
38
39void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40{
41 struct ttm_mem_reg *old_mem = &bo->mem;
42
43 if (old_mem->mm_node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +020044 spin_lock(&bo->glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045 drm_mm_put_block(old_mem->mm_node);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020046 spin_unlock(&bo->glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020047 }
48 old_mem->mm_node = NULL;
49}
50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
53{
54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret;
58
59 if (old_mem->mem_type != TTM_PL_SYSTEM) {
60 ttm_tt_unbind(ttm);
61 ttm_bo_free_old_node(bo);
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 }
67
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
69 if (unlikely(ret != 0))
70 return ret;
71
72 if (new_mem->mem_type != TTM_PL_SYSTEM) {
73 ret = ttm_tt_bind(ttm, new_mem);
74 if (unlikely(ret != 0))
75 return ret;
76 }
77
78 *old_mem = *new_mem;
79 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
81 return 0;
82}
83EXPORT_SYMBOL(ttm_bo_move_ttm);
84
85int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
86 void **virtual)
87{
88 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
89 unsigned long bus_offset;
90 unsigned long bus_size;
91 unsigned long bus_base;
92 int ret;
93 void *addr;
94
95 *virtual = NULL;
96 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
97 if (ret || bus_size == 0)
98 return ret;
99
100 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
101 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
102 else {
103 if (mem->placement & TTM_PL_FLAG_WC)
104 addr = ioremap_wc(bus_base + bus_offset, bus_size);
105 else
106 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
107 if (!addr)
108 return -ENOMEM;
109 }
110 *virtual = addr;
111 return 0;
112}
113
114void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
115 void *virtual)
116{
117 struct ttm_mem_type_manager *man;
118
119 man = &bdev->man[mem->mem_type];
120
121 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
122 iounmap(virtual);
123}
124
125static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
126{
127 uint32_t *dstP =
128 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
129 uint32_t *srcP =
130 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
131
132 int i;
133 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
134 iowrite32(ioread32(srcP++), dstP++);
135 return 0;
136}
137
138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200139 unsigned long page,
140 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200141{
142 struct page *d = ttm_tt_get_page(ttm, page);
143 void *dst;
144
145 if (!d)
146 return -ENOMEM;
147
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200149
150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200154 dst = vmap(&d, 1, 0, prot);
155 else
156 dst = kmap(d);
157#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200158 if (!dst)
159 return -ENOMEM;
160
161 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200162
163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0);
165#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200166 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200167 vunmap(dst);
168 else
169 kunmap(d);
170#endif
171
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200172 return 0;
173}
174
175static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200176 unsigned long page,
177 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200178{
179 struct page *s = ttm_tt_get_page(ttm, page);
180 void *src;
181
182 if (!s)
183 return -ENOMEM;
184
185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200189 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200190 src = vmap(&s, 1, 0, prot);
191 else
192 src = kmap(s);
193#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194 if (!src)
195 return -ENOMEM;
196
197 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200198
199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0);
201#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200202 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200203 vunmap(src);
204 else
205 kunmap(s);
206#endif
207
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208 return 0;
209}
210
211int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
212 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
213{
214 struct ttm_bo_device *bdev = bo->bdev;
215 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
216 struct ttm_tt *ttm = bo->ttm;
217 struct ttm_mem_reg *old_mem = &bo->mem;
218 struct ttm_mem_reg old_copy = *old_mem;
219 void *old_iomap;
220 void *new_iomap;
221 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i;
224 unsigned long page;
225 unsigned long add = 0;
226 int dir;
227
228 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
229 if (ret)
230 return ret;
231 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
232 if (ret)
233 goto out;
234
235 if (old_iomap == NULL && new_iomap == NULL)
236 goto out2;
237 if (old_iomap == NULL && ttm == NULL)
238 goto out2;
239
240 add = 0;
241 dir = 1;
242
243 if ((old_mem->mem_type == new_mem->mem_type) &&
244 (new_mem->mm_node->start <
245 old_mem->mm_node->start + old_mem->mm_node->size)) {
246 dir = -1;
247 add = new_mem->num_pages - 1;
248 }
249
250 for (i = 0; i < new_mem->num_pages; ++i) {
251 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200252 if (old_iomap == NULL) {
253 pgprot_t prot = ttm_io_prot(old_mem->placement,
254 PAGE_KERNEL);
255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
256 prot);
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
259 PAGE_KERNEL);
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261 prot);
262 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
264 if (ret)
265 goto out1;
266 }
267 mb();
268out2:
269 ttm_bo_free_old_node(bo);
270
271 *old_mem = *new_mem;
272 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm);
277 ttm_tt_destroy(ttm);
278 bo->ttm = NULL;
279 }
280
281out1:
282 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
283out:
284 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
285 return ret;
286}
287EXPORT_SYMBOL(ttm_bo_move_memcpy);
288
289static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
290{
291 kfree(bo);
292}
293
294/**
295 * ttm_buffer_object_transfer
296 *
297 * @bo: A pointer to a struct ttm_buffer_object.
298 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
299 * holding the data of @bo with the old placement.
300 *
301 * This is a utility function that may be called after an accelerated move
302 * has been scheduled. A new buffer object is created as a placeholder for
303 * the old data while it's being copied. When that buffer object is idle,
304 * it can be destroyed, releasing the space of the old placement.
305 * Returns:
306 * !0: Failure.
307 */
308
309static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
310 struct ttm_buffer_object **new_obj)
311{
312 struct ttm_buffer_object *fbo;
313 struct ttm_bo_device *bdev = bo->bdev;
314 struct ttm_bo_driver *driver = bdev->driver;
315
316 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
317 if (!fbo)
318 return -ENOMEM;
319
320 *fbo = *bo;
321
322 /**
323 * Fix up members that we shouldn't copy directly:
324 * TODO: Explicit member copy would probably be better here.
325 */
326
327 spin_lock_init(&fbo->lock);
328 init_waitqueue_head(&fbo->event_queue);
329 INIT_LIST_HEAD(&fbo->ddestroy);
330 INIT_LIST_HEAD(&fbo->lru);
331 INIT_LIST_HEAD(&fbo->swap);
332 fbo->vm_node = NULL;
333
334 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
335 if (fbo->mem.mm_node)
336 fbo->mem.mm_node->private = (void *)fbo;
337 kref_init(&fbo->list_kref);
338 kref_init(&fbo->kref);
339 fbo->destroy = &ttm_transfered_destroy;
340
341 *new_obj = fbo;
342 return 0;
343}
344
345pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
346{
347#if defined(__i386__) || defined(__x86_64__)
348 if (caching_flags & TTM_PL_FLAG_WC)
349 tmp = pgprot_writecombine(tmp);
350 else if (boot_cpu_data.x86 > 3)
351 tmp = pgprot_noncached(tmp);
352
353#elif defined(__powerpc__)
354 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
355 pgprot_val(tmp) |= _PAGE_NO_CACHE;
356 if (caching_flags & TTM_PL_FLAG_UNCACHED)
357 pgprot_val(tmp) |= _PAGE_GUARDED;
358 }
359#endif
360#if defined(__ia64__)
361 if (caching_flags & TTM_PL_FLAG_WC)
362 tmp = pgprot_writecombine(tmp);
363 else
364 tmp = pgprot_noncached(tmp);
365#endif
366#if defined(__sparc__)
367 if (!(caching_flags & TTM_PL_FLAG_CACHED))
368 tmp = pgprot_noncached(tmp);
369#endif
370 return tmp;
371}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100372EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200373
374static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
375 unsigned long bus_base,
376 unsigned long bus_offset,
377 unsigned long bus_size,
378 struct ttm_bo_kmap_obj *map)
379{
380 struct ttm_bo_device *bdev = bo->bdev;
381 struct ttm_mem_reg *mem = &bo->mem;
382 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
383
384 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
385 map->bo_kmap_type = ttm_bo_map_premapped;
386 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
387 } else {
388 map->bo_kmap_type = ttm_bo_map_iomap;
389 if (mem->placement & TTM_PL_FLAG_WC)
390 map->virtual = ioremap_wc(bus_base + bus_offset,
391 bus_size);
392 else
393 map->virtual = ioremap_nocache(bus_base + bus_offset,
394 bus_size);
395 }
396 return (!map->virtual) ? -ENOMEM : 0;
397}
398
399static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
400 unsigned long start_page,
401 unsigned long num_pages,
402 struct ttm_bo_kmap_obj *map)
403{
404 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
405 struct ttm_tt *ttm = bo->ttm;
406 struct page *d;
407 int i;
408
409 BUG_ON(!ttm);
410 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
411 /*
412 * We're mapping a single page, and the desired
413 * page protection is consistent with the bo.
414 */
415
416 map->bo_kmap_type = ttm_bo_map_kmap;
417 map->page = ttm_tt_get_page(ttm, start_page);
418 map->virtual = kmap(map->page);
419 } else {
420 /*
421 * Populate the part we're mapping;
422 */
423 for (i = start_page; i < start_page + num_pages; ++i) {
424 d = ttm_tt_get_page(ttm, i);
425 if (!d)
426 return -ENOMEM;
427 }
428
429 /*
430 * We need to use vmap to get the desired page protection
431 * or to make the buffer object look contigous.
432 */
433 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
434 PAGE_KERNEL :
435 ttm_io_prot(mem->placement, PAGE_KERNEL);
436 map->bo_kmap_type = ttm_bo_map_vmap;
437 map->virtual = vmap(ttm->pages + start_page, num_pages,
438 0, prot);
439 }
440 return (!map->virtual) ? -ENOMEM : 0;
441}
442
443int ttm_bo_kmap(struct ttm_buffer_object *bo,
444 unsigned long start_page, unsigned long num_pages,
445 struct ttm_bo_kmap_obj *map)
446{
447 int ret;
448 unsigned long bus_base;
449 unsigned long bus_offset;
450 unsigned long bus_size;
451
452 BUG_ON(!list_empty(&bo->swap));
453 map->virtual = NULL;
454 if (num_pages > bo->num_pages)
455 return -EINVAL;
456 if (start_page > bo->num_pages)
457 return -EINVAL;
458#if 0
459 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
460 return -EPERM;
461#endif
462 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
463 &bus_offset, &bus_size);
464 if (ret)
465 return ret;
466 if (bus_size == 0) {
467 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
468 } else {
469 bus_offset += start_page << PAGE_SHIFT;
470 bus_size = num_pages << PAGE_SHIFT;
471 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
472 }
473}
474EXPORT_SYMBOL(ttm_bo_kmap);
475
476void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
477{
478 if (!map->virtual)
479 return;
480 switch (map->bo_kmap_type) {
481 case ttm_bo_map_iomap:
482 iounmap(map->virtual);
483 break;
484 case ttm_bo_map_vmap:
485 vunmap(map->virtual);
486 break;
487 case ttm_bo_map_kmap:
488 kunmap(map->page);
489 break;
490 case ttm_bo_map_premapped:
491 break;
492 default:
493 BUG();
494 }
495 map->virtual = NULL;
496 map->page = NULL;
497}
498EXPORT_SYMBOL(ttm_bo_kunmap);
499
500int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
501 unsigned long dst_offset,
502 unsigned long *pfn, pgprot_t *prot)
503{
504 struct ttm_mem_reg *mem = &bo->mem;
505 struct ttm_bo_device *bdev = bo->bdev;
506 unsigned long bus_offset;
507 unsigned long bus_size;
508 unsigned long bus_base;
509 int ret;
510 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
511 &bus_size);
512 if (ret)
513 return -EINVAL;
514 if (bus_size != 0)
515 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
516 else
517 if (!bo->ttm)
518 return -EINVAL;
519 else
520 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
521 dst_offset >>
522 PAGE_SHIFT));
523 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
524 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
525
526 return 0;
527}
528
529int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
530 void *sync_obj,
531 void *sync_obj_arg,
532 bool evict, bool no_wait,
533 struct ttm_mem_reg *new_mem)
534{
535 struct ttm_bo_device *bdev = bo->bdev;
536 struct ttm_bo_driver *driver = bdev->driver;
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL;
543
544 spin_lock(&bo->lock);
545 if (bo->sync_obj) {
546 tmp_obj = bo->sync_obj;
547 bo->sync_obj = NULL;
548 }
549 bo->sync_obj = driver->sync_obj_ref(sync_obj);
550 bo->sync_obj_arg = sync_obj_arg;
551 if (evict) {
552 ret = ttm_bo_wait(bo, false, false, false);
553 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200554 if (tmp_obj)
555 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 if (ret)
557 return ret;
558
559 ttm_bo_free_old_node(bo);
560 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
561 (bo->ttm != NULL)) {
562 ttm_tt_unbind(bo->ttm);
563 ttm_tt_destroy(bo->ttm);
564 bo->ttm = NULL;
565 }
566 } else {
567 /**
568 * This should help pipeline ordinary buffer moves.
569 *
570 * Hang old buffer memory on a new buffer object,
571 * and leave it to be released when the GPU
572 * operation has completed.
573 */
574
575 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
576 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200577 if (tmp_obj)
578 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200579
580 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
581 if (ret)
582 return ret;
583
584 /**
585 * If we're not moving to fixed memory, the TTM object
586 * needs to stay alive. Otherwhise hang it on the ghost
587 * bo to be unbound and destroyed.
588 */
589
590 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
591 ghost_obj->ttm = NULL;
592 else
593 bo->ttm = NULL;
594
595 ttm_bo_unreserve(ghost_obj);
596 ttm_bo_unref(&ghost_obj);
597 }
598
599 *old_mem = *new_mem;
600 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
602 return 0;
603}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);