blob: 3e5d0c4ad85c607757fbfa8ea0dad19137332e1d [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/module.h>
38
39void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40{
41 struct ttm_mem_reg *old_mem = &bo->mem;
42
43 if (old_mem->mm_node) {
44 spin_lock(&bo->bdev->lru_lock);
45 drm_mm_put_block(old_mem->mm_node);
46 spin_unlock(&bo->bdev->lru_lock);
47 }
48 old_mem->mm_node = NULL;
49}
50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
53{
54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret;
58
59 if (old_mem->mem_type != TTM_PL_SYSTEM) {
60 ttm_tt_unbind(ttm);
61 ttm_bo_free_old_node(bo);
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 }
67
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
69 if (unlikely(ret != 0))
70 return ret;
71
72 if (new_mem->mem_type != TTM_PL_SYSTEM) {
73 ret = ttm_tt_bind(ttm, new_mem);
74 if (unlikely(ret != 0))
75 return ret;
76 }
77
78 *old_mem = *new_mem;
79 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
81 return 0;
82}
83EXPORT_SYMBOL(ttm_bo_move_ttm);
84
85int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
86 void **virtual)
87{
88 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
89 unsigned long bus_offset;
90 unsigned long bus_size;
91 unsigned long bus_base;
92 int ret;
93 void *addr;
94
95 *virtual = NULL;
96 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
97 if (ret || bus_size == 0)
98 return ret;
99
100 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
101 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
102 else {
103 if (mem->placement & TTM_PL_FLAG_WC)
104 addr = ioremap_wc(bus_base + bus_offset, bus_size);
105 else
106 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
107 if (!addr)
108 return -ENOMEM;
109 }
110 *virtual = addr;
111 return 0;
112}
113
114void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
115 void *virtual)
116{
117 struct ttm_mem_type_manager *man;
118
119 man = &bdev->man[mem->mem_type];
120
121 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
122 iounmap(virtual);
123}
124
125static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
126{
127 uint32_t *dstP =
128 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
129 uint32_t *srcP =
130 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
131
132 int i;
133 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
134 iowrite32(ioread32(srcP++), dstP++);
135 return 0;
136}
137
138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
139 unsigned long page)
140{
141 struct page *d = ttm_tt_get_page(ttm, page);
142 void *dst;
143
144 if (!d)
145 return -ENOMEM;
146
147 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
148 dst = kmap(d);
149 if (!dst)
150 return -ENOMEM;
151
152 memcpy_fromio(dst, src, PAGE_SIZE);
153 kunmap(d);
154 return 0;
155}
156
157static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
158 unsigned long page)
159{
160 struct page *s = ttm_tt_get_page(ttm, page);
161 void *src;
162
163 if (!s)
164 return -ENOMEM;
165
166 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
167 src = kmap(s);
168 if (!src)
169 return -ENOMEM;
170
171 memcpy_toio(dst, src, PAGE_SIZE);
172 kunmap(s);
173 return 0;
174}
175
176int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
177 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
178{
179 struct ttm_bo_device *bdev = bo->bdev;
180 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
181 struct ttm_tt *ttm = bo->ttm;
182 struct ttm_mem_reg *old_mem = &bo->mem;
183 struct ttm_mem_reg old_copy = *old_mem;
184 void *old_iomap;
185 void *new_iomap;
186 int ret;
187 uint32_t save_flags = old_mem->placement;
188 unsigned long i;
189 unsigned long page;
190 unsigned long add = 0;
191 int dir;
192
193 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
194 if (ret)
195 return ret;
196 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
197 if (ret)
198 goto out;
199
200 if (old_iomap == NULL && new_iomap == NULL)
201 goto out2;
202 if (old_iomap == NULL && ttm == NULL)
203 goto out2;
204
205 add = 0;
206 dir = 1;
207
208 if ((old_mem->mem_type == new_mem->mem_type) &&
209 (new_mem->mm_node->start <
210 old_mem->mm_node->start + old_mem->mm_node->size)) {
211 dir = -1;
212 add = new_mem->num_pages - 1;
213 }
214
215 for (i = 0; i < new_mem->num_pages; ++i) {
216 page = i * dir + add;
217 if (old_iomap == NULL)
218 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
219 else if (new_iomap == NULL)
220 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
221 else
222 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
223 if (ret)
224 goto out1;
225 }
226 mb();
227out2:
228 ttm_bo_free_old_node(bo);
229
230 *old_mem = *new_mem;
231 new_mem->mm_node = NULL;
232 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
233
234 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
235 ttm_tt_unbind(ttm);
236 ttm_tt_destroy(ttm);
237 bo->ttm = NULL;
238 }
239
240out1:
241 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
242out:
243 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
244 return ret;
245}
246EXPORT_SYMBOL(ttm_bo_move_memcpy);
247
248static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
249{
250 kfree(bo);
251}
252
253/**
254 * ttm_buffer_object_transfer
255 *
256 * @bo: A pointer to a struct ttm_buffer_object.
257 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
258 * holding the data of @bo with the old placement.
259 *
260 * This is a utility function that may be called after an accelerated move
261 * has been scheduled. A new buffer object is created as a placeholder for
262 * the old data while it's being copied. When that buffer object is idle,
263 * it can be destroyed, releasing the space of the old placement.
264 * Returns:
265 * !0: Failure.
266 */
267
268static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
269 struct ttm_buffer_object **new_obj)
270{
271 struct ttm_buffer_object *fbo;
272 struct ttm_bo_device *bdev = bo->bdev;
273 struct ttm_bo_driver *driver = bdev->driver;
274
275 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
276 if (!fbo)
277 return -ENOMEM;
278
279 *fbo = *bo;
280
281 /**
282 * Fix up members that we shouldn't copy directly:
283 * TODO: Explicit member copy would probably be better here.
284 */
285
286 spin_lock_init(&fbo->lock);
287 init_waitqueue_head(&fbo->event_queue);
288 INIT_LIST_HEAD(&fbo->ddestroy);
289 INIT_LIST_HEAD(&fbo->lru);
290 INIT_LIST_HEAD(&fbo->swap);
291 fbo->vm_node = NULL;
292
293 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
294 if (fbo->mem.mm_node)
295 fbo->mem.mm_node->private = (void *)fbo;
296 kref_init(&fbo->list_kref);
297 kref_init(&fbo->kref);
298 fbo->destroy = &ttm_transfered_destroy;
299
300 *new_obj = fbo;
301 return 0;
302}
303
304pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
305{
306#if defined(__i386__) || defined(__x86_64__)
307 if (caching_flags & TTM_PL_FLAG_WC)
308 tmp = pgprot_writecombine(tmp);
309 else if (boot_cpu_data.x86 > 3)
310 tmp = pgprot_noncached(tmp);
311
312#elif defined(__powerpc__)
313 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
314 pgprot_val(tmp) |= _PAGE_NO_CACHE;
315 if (caching_flags & TTM_PL_FLAG_UNCACHED)
316 pgprot_val(tmp) |= _PAGE_GUARDED;
317 }
318#endif
319#if defined(__ia64__)
320 if (caching_flags & TTM_PL_FLAG_WC)
321 tmp = pgprot_writecombine(tmp);
322 else
323 tmp = pgprot_noncached(tmp);
324#endif
325#if defined(__sparc__)
326 if (!(caching_flags & TTM_PL_FLAG_CACHED))
327 tmp = pgprot_noncached(tmp);
328#endif
329 return tmp;
330}
331
332static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
333 unsigned long bus_base,
334 unsigned long bus_offset,
335 unsigned long bus_size,
336 struct ttm_bo_kmap_obj *map)
337{
338 struct ttm_bo_device *bdev = bo->bdev;
339 struct ttm_mem_reg *mem = &bo->mem;
340 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
341
342 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
343 map->bo_kmap_type = ttm_bo_map_premapped;
344 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
345 } else {
346 map->bo_kmap_type = ttm_bo_map_iomap;
347 if (mem->placement & TTM_PL_FLAG_WC)
348 map->virtual = ioremap_wc(bus_base + bus_offset,
349 bus_size);
350 else
351 map->virtual = ioremap_nocache(bus_base + bus_offset,
352 bus_size);
353 }
354 return (!map->virtual) ? -ENOMEM : 0;
355}
356
357static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
358 unsigned long start_page,
359 unsigned long num_pages,
360 struct ttm_bo_kmap_obj *map)
361{
362 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
363 struct ttm_tt *ttm = bo->ttm;
364 struct page *d;
365 int i;
366
367 BUG_ON(!ttm);
368 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
369 /*
370 * We're mapping a single page, and the desired
371 * page protection is consistent with the bo.
372 */
373
374 map->bo_kmap_type = ttm_bo_map_kmap;
375 map->page = ttm_tt_get_page(ttm, start_page);
376 map->virtual = kmap(map->page);
377 } else {
378 /*
379 * Populate the part we're mapping;
380 */
381 for (i = start_page; i < start_page + num_pages; ++i) {
382 d = ttm_tt_get_page(ttm, i);
383 if (!d)
384 return -ENOMEM;
385 }
386
387 /*
388 * We need to use vmap to get the desired page protection
389 * or to make the buffer object look contigous.
390 */
391 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
392 PAGE_KERNEL :
393 ttm_io_prot(mem->placement, PAGE_KERNEL);
394 map->bo_kmap_type = ttm_bo_map_vmap;
395 map->virtual = vmap(ttm->pages + start_page, num_pages,
396 0, prot);
397 }
398 return (!map->virtual) ? -ENOMEM : 0;
399}
400
401int ttm_bo_kmap(struct ttm_buffer_object *bo,
402 unsigned long start_page, unsigned long num_pages,
403 struct ttm_bo_kmap_obj *map)
404{
405 int ret;
406 unsigned long bus_base;
407 unsigned long bus_offset;
408 unsigned long bus_size;
409
410 BUG_ON(!list_empty(&bo->swap));
411 map->virtual = NULL;
412 if (num_pages > bo->num_pages)
413 return -EINVAL;
414 if (start_page > bo->num_pages)
415 return -EINVAL;
416#if 0
417 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
418 return -EPERM;
419#endif
420 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
421 &bus_offset, &bus_size);
422 if (ret)
423 return ret;
424 if (bus_size == 0) {
425 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
426 } else {
427 bus_offset += start_page << PAGE_SHIFT;
428 bus_size = num_pages << PAGE_SHIFT;
429 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
430 }
431}
432EXPORT_SYMBOL(ttm_bo_kmap);
433
434void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
435{
436 if (!map->virtual)
437 return;
438 switch (map->bo_kmap_type) {
439 case ttm_bo_map_iomap:
440 iounmap(map->virtual);
441 break;
442 case ttm_bo_map_vmap:
443 vunmap(map->virtual);
444 break;
445 case ttm_bo_map_kmap:
446 kunmap(map->page);
447 break;
448 case ttm_bo_map_premapped:
449 break;
450 default:
451 BUG();
452 }
453 map->virtual = NULL;
454 map->page = NULL;
455}
456EXPORT_SYMBOL(ttm_bo_kunmap);
457
458int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
459 unsigned long dst_offset,
460 unsigned long *pfn, pgprot_t *prot)
461{
462 struct ttm_mem_reg *mem = &bo->mem;
463 struct ttm_bo_device *bdev = bo->bdev;
464 unsigned long bus_offset;
465 unsigned long bus_size;
466 unsigned long bus_base;
467 int ret;
468 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
469 &bus_size);
470 if (ret)
471 return -EINVAL;
472 if (bus_size != 0)
473 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
474 else
475 if (!bo->ttm)
476 return -EINVAL;
477 else
478 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
479 dst_offset >>
480 PAGE_SHIFT));
481 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
482 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
483
484 return 0;
485}
486
487int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
488 void *sync_obj,
489 void *sync_obj_arg,
490 bool evict, bool no_wait,
491 struct ttm_mem_reg *new_mem)
492{
493 struct ttm_bo_device *bdev = bo->bdev;
494 struct ttm_bo_driver *driver = bdev->driver;
495 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
496 struct ttm_mem_reg *old_mem = &bo->mem;
497 int ret;
498 uint32_t save_flags = old_mem->placement;
499 struct ttm_buffer_object *ghost_obj;
500 void *tmp_obj = NULL;
501
502 spin_lock(&bo->lock);
503 if (bo->sync_obj) {
504 tmp_obj = bo->sync_obj;
505 bo->sync_obj = NULL;
506 }
507 bo->sync_obj = driver->sync_obj_ref(sync_obj);
508 bo->sync_obj_arg = sync_obj_arg;
509 if (evict) {
510 ret = ttm_bo_wait(bo, false, false, false);
511 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200512 if (tmp_obj)
513 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200514 if (ret)
515 return ret;
516
517 ttm_bo_free_old_node(bo);
518 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
519 (bo->ttm != NULL)) {
520 ttm_tt_unbind(bo->ttm);
521 ttm_tt_destroy(bo->ttm);
522 bo->ttm = NULL;
523 }
524 } else {
525 /**
526 * This should help pipeline ordinary buffer moves.
527 *
528 * Hang old buffer memory on a new buffer object,
529 * and leave it to be released when the GPU
530 * operation has completed.
531 */
532
533 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
534 spin_unlock(&bo->lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200535 if (tmp_obj)
536 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200537
538 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
539 if (ret)
540 return ret;
541
542 /**
543 * If we're not moving to fixed memory, the TTM object
544 * needs to stay alive. Otherwhise hang it on the ghost
545 * bo to be unbound and destroyed.
546 */
547
548 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
549 ghost_obj->ttm = NULL;
550 else
551 bo->ttm = NULL;
552
553 ttm_bo_unreserve(ghost_obj);
554 ttm_bo_unref(&ghost_obj);
555 }
556
557 *old_mem = *new_mem;
558 new_mem->mm_node = NULL;
559 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
560 return 0;
561}
562EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);