blob: 8369e35c0dce744933331636cdd680b04f66b39f [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
David Herrmann72525b32013-07-24 21:08:53 +020033#include <drm/drm_vma_manager.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020034#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020039#include <linux/module.h>
40
41void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42{
Ben Skeggs42311ff2010-08-04 12:07:08 +100043 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044}
45
46int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +000047 bool evict,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000048 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049{
50 struct ttm_tt *ttm = bo->ttm;
51 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020052 int ret;
53
54 if (old_mem->mem_type != TTM_PL_SYSTEM) {
55 ttm_tt_unbind(ttm);
56 ttm_bo_free_old_node(bo);
57 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58 TTM_PL_MASK_MEM);
59 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020060 }
61
62 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63 if (unlikely(ret != 0))
64 return ret;
65
66 if (new_mem->mem_type != TTM_PL_SYSTEM) {
67 ret = ttm_tt_bind(ttm, new_mem);
68 if (unlikely(ret != 0))
69 return ret;
70 }
71
72 *old_mem = *new_mem;
73 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080074
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020075 return 0;
76}
77EXPORT_SYMBOL(ttm_bo_move_ttm);
78
Thomas Hellstromeba67092010-11-11 09:41:57 +010079int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
Jerome Glisse82c5da62010-04-09 14:39:23 +020080{
Thomas Hellstromeba67092010-11-11 09:41:57 +010081 if (likely(man->io_reserve_fastpath))
82 return 0;
83
84 if (interruptible)
85 return mutex_lock_interruptible(&man->io_reserve_mutex);
86
87 mutex_lock(&man->io_reserve_mutex);
88 return 0;
89}
Dave Airlieafe68042013-01-22 13:56:04 +100090EXPORT_SYMBOL(ttm_mem_io_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +010091
92void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93{
94 if (likely(man->io_reserve_fastpath))
95 return;
96
97 mutex_unlock(&man->io_reserve_mutex);
98}
Dave Airlieafe68042013-01-22 13:56:04 +100099EXPORT_SYMBOL(ttm_mem_io_unlock);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100100
101static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102{
103 struct ttm_buffer_object *bo;
104
105 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
106 return -EAGAIN;
107
108 bo = list_first_entry(&man->io_reserve_lru,
109 struct ttm_buffer_object,
110 io_reserve_lru);
111 list_del_init(&bo->io_reserve_lru);
112 ttm_bo_unmap_virtual_locked(bo);
113
114 return 0;
115}
116
Dave Airlieafe68042013-01-22 13:56:04 +1000117
118int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100120{
121 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122 int ret = 0;
123
124 if (!bdev->driver->io_mem_reserve)
125 return 0;
126 if (likely(man->io_reserve_fastpath))
127 return bdev->driver->io_mem_reserve(bdev, mem);
128
129 if (bdev->driver->io_mem_reserve &&
130 mem->bus.io_reserved_count++ == 0) {
131retry:
132 ret = bdev->driver->io_mem_reserve(bdev, mem);
133 if (ret == -EAGAIN) {
134 ret = ttm_mem_io_evict(man);
135 if (ret == 0)
136 goto retry;
137 }
138 }
139 return ret;
140}
Dave Airlieafe68042013-01-22 13:56:04 +1000141EXPORT_SYMBOL(ttm_mem_io_reserve);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100142
Dave Airlieafe68042013-01-22 13:56:04 +1000143void ttm_mem_io_free(struct ttm_bo_device *bdev,
144 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100145{
146 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
147
148 if (likely(man->io_reserve_fastpath))
149 return;
150
151 if (bdev->driver->io_mem_reserve &&
152 --mem->bus.io_reserved_count == 0 &&
153 bdev->driver->io_mem_free)
154 bdev->driver->io_mem_free(bdev, mem);
155
156}
Dave Airlieafe68042013-01-22 13:56:04 +1000157EXPORT_SYMBOL(ttm_mem_io_free);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100158
159int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160{
161 struct ttm_mem_reg *mem = &bo->mem;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200162 int ret;
163
Thomas Hellstromeba67092010-11-11 09:41:57 +0100164 if (!mem->bus.io_reserved_vm) {
165 struct ttm_mem_type_manager *man =
166 &bo->bdev->man[mem->mem_type];
167
168 ret = ttm_mem_io_reserve(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200169 if (unlikely(ret != 0))
170 return ret;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100171 mem->bus.io_reserved_vm = true;
172 if (man->use_io_reserve_lru)
173 list_add_tail(&bo->io_reserve_lru,
174 &man->io_reserve_lru);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200175 }
176 return 0;
177}
178
Thomas Hellstromeba67092010-11-11 09:41:57 +0100179void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200180{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100181 struct ttm_mem_reg *mem = &bo->mem;
182
183 if (mem->bus.io_reserved_vm) {
184 mem->bus.io_reserved_vm = false;
185 list_del_init(&bo->io_reserve_lru);
186 ttm_mem_io_free(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200187 }
188}
189
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200190int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191 void **virtual)
192{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100193 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194 int ret;
195 void *addr;
196
197 *virtual = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100198 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200199 ret = ttm_mem_io_reserve(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100200 ttm_mem_io_unlock(man);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200201 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200202 return ret;
203
Jerome Glisse82c5da62010-04-09 14:39:23 +0200204 if (mem->bus.addr) {
205 addr = mem->bus.addr;
206 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200207 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200208 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200209 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200210 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
211 if (!addr) {
Thomas Hellstromeba67092010-11-11 09:41:57 +0100212 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200213 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100214 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200215 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200216 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200217 }
218 *virtual = addr;
219 return 0;
220}
221
222void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
223 void *virtual)
224{
225 struct ttm_mem_type_manager *man;
226
227 man = &bdev->man[mem->mem_type];
228
Jerome Glisse0c321c72010-04-07 10:21:27 +0000229 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200230 iounmap(virtual);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100231 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200232 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100233 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200234}
235
236static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
237{
238 uint32_t *dstP =
239 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
240 uint32_t *srcP =
241 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
242
243 int i;
244 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245 iowrite32(ioread32(srcP++), dstP++);
246 return 0;
247}
248
249static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200250 unsigned long page,
251 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200252{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400253 struct page *d = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200254 void *dst;
255
256 if (!d)
257 return -ENOMEM;
258
259 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200260
261#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700262 dst = kmap_atomic_prot(d, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200263#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200264 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200265 dst = vmap(&d, 1, 0, prot);
266 else
267 dst = kmap(d);
268#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200269 if (!dst)
270 return -ENOMEM;
271
272 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200273
274#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700275 kunmap_atomic(dst);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200276#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200277 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200278 vunmap(dst);
279 else
280 kunmap(d);
281#endif
282
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200283 return 0;
284}
285
286static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200287 unsigned long page,
288 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200289{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400290 struct page *s = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200291 void *src;
292
293 if (!s)
294 return -ENOMEM;
295
296 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200297#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700298 src = kmap_atomic_prot(s, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200299#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200300 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200301 src = vmap(&s, 1, 0, prot);
302 else
303 src = kmap(s);
304#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200305 if (!src)
306 return -ENOMEM;
307
308 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200309
310#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700311 kunmap_atomic(src);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200312#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200313 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200314 vunmap(src);
315 else
316 kunmap(s);
317#endif
318
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200319 return 0;
320}
321
322int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000323 bool evict, bool no_wait_gpu,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000324 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200325{
326 struct ttm_bo_device *bdev = bo->bdev;
327 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
328 struct ttm_tt *ttm = bo->ttm;
329 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstrome22469c2011-10-17 13:27:34 +0200330 struct ttm_mem_reg old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200331 void *old_iomap;
332 void *new_iomap;
333 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200334 unsigned long i;
335 unsigned long page;
336 unsigned long add = 0;
337 int dir;
338
339 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
340 if (ret)
341 return ret;
342 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
343 if (ret)
344 goto out;
345
346 if (old_iomap == NULL && new_iomap == NULL)
347 goto out2;
348 if (old_iomap == NULL && ttm == NULL)
349 goto out2;
350
Jakob Bornecrantz9a0599d2013-10-30 02:46:56 -0700351 /* TTM might be null for moves within the same region.
352 */
353 if (ttm && ttm->state == tt_unpopulated) {
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400354 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
Dave Airlie63054182013-01-16 14:25:44 +1000355 if (ret) {
356 /* if we fail here don't nuke the mm node
357 * as the bo still owns it */
358 old_copy.mm_node = NULL;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400359 goto out1;
Dave Airlie63054182013-01-16 14:25:44 +1000360 }
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400361 }
362
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200363 add = 0;
364 dir = 1;
365
366 if ((old_mem->mem_type == new_mem->mem_type) &&
Ben Skeggsd961db72010-08-05 10:48:18 +1000367 (new_mem->start < old_mem->start + old_mem->size)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200368 dir = -1;
369 add = new_mem->num_pages - 1;
370 }
371
372 for (i = 0; i < new_mem->num_pages; ++i) {
373 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200374 if (old_iomap == NULL) {
375 pgprot_t prot = ttm_io_prot(old_mem->placement,
376 PAGE_KERNEL);
377 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
378 prot);
379 } else if (new_iomap == NULL) {
380 pgprot_t prot = ttm_io_prot(new_mem->placement,
381 PAGE_KERNEL);
382 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
383 prot);
384 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
Dave Airlie63054182013-01-16 14:25:44 +1000386 if (ret) {
387 /* failing here, means keep old copy as-is */
388 old_copy.mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200389 goto out1;
Dave Airlie63054182013-01-16 14:25:44 +1000390 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391 }
392 mb();
393out2:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100394 old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200395 *old_mem = *new_mem;
396 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397
398 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
399 ttm_tt_unbind(ttm);
400 ttm_tt_destroy(ttm);
401 bo->ttm = NULL;
402 }
403
404out1:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100405 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200406out:
407 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
Ben Skeggsb921bae2010-12-16 14:47:46 +1000408 ttm_bo_mem_put(bo, &old_copy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200409 return ret;
410}
411EXPORT_SYMBOL(ttm_bo_move_memcpy);
412
413static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
414{
415 kfree(bo);
416}
417
418/**
419 * ttm_buffer_object_transfer
420 *
421 * @bo: A pointer to a struct ttm_buffer_object.
422 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
423 * holding the data of @bo with the old placement.
424 *
425 * This is a utility function that may be called after an accelerated move
426 * has been scheduled. A new buffer object is created as a placeholder for
427 * the old data while it's being copied. When that buffer object is idle,
428 * it can be destroyed, releasing the space of the old placement.
429 * Returns:
430 * !0: Failure.
431 */
432
433static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
434 struct ttm_buffer_object **new_obj)
435{
436 struct ttm_buffer_object *fbo;
437 struct ttm_bo_device *bdev = bo->bdev;
438 struct ttm_bo_driver *driver = bdev->driver;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200439 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200440
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100441 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200442 if (!fbo)
443 return -ENOMEM;
444
445 *fbo = *bo;
446
447 /**
448 * Fix up members that we shouldn't copy directly:
449 * TODO: Explicit member copy would probably be better here.
450 */
451
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200452 INIT_LIST_HEAD(&fbo->ddestroy);
453 INIT_LIST_HEAD(&fbo->lru);
454 INIT_LIST_HEAD(&fbo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100455 INIT_LIST_HEAD(&fbo->io_reserve_lru);
David Herrmann72525b32013-07-24 21:08:53 +0200456 drm_vma_node_reset(&fbo->vma_node);
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200457 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200458
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100459 spin_lock(&bdev->fence_lock);
460 if (bo->sync_obj)
461 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
462 else
463 fbo->sync_obj = NULL;
464 spin_unlock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200465 kref_init(&fbo->list_kref);
466 kref_init(&fbo->kref);
467 fbo->destroy = &ttm_transfered_destroy;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500468 fbo->acc_size = 0;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200469 fbo->resv = &fbo->ttm_resv;
470 reservation_object_init(fbo->resv);
471 ret = ww_mutex_trylock(&fbo->resv->lock);
472 WARN_ON(!ret);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200473
474 *new_obj = fbo;
475 return 0;
476}
477
478pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
479{
480#if defined(__i386__) || defined(__x86_64__)
481 if (caching_flags & TTM_PL_FLAG_WC)
482 tmp = pgprot_writecombine(tmp);
483 else if (boot_cpu_data.x86 > 3)
484 tmp = pgprot_noncached(tmp);
485
486#elif defined(__powerpc__)
487 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
488 pgprot_val(tmp) |= _PAGE_NO_CACHE;
489 if (caching_flags & TTM_PL_FLAG_UNCACHED)
490 pgprot_val(tmp) |= _PAGE_GUARDED;
491 }
492#endif
493#if defined(__ia64__)
494 if (caching_flags & TTM_PL_FLAG_WC)
495 tmp = pgprot_writecombine(tmp);
496 else
497 tmp = pgprot_noncached(tmp);
498#endif
Huacai Chen04cf55e2012-08-11 09:32:17 +0000499#if defined(__sparc__) || defined(__mips__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200500 if (!(caching_flags & TTM_PL_FLAG_CACHED))
501 tmp = pgprot_noncached(tmp);
502#endif
503 return tmp;
504}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100505EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200506
507static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200508 unsigned long offset,
509 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200510 struct ttm_bo_kmap_obj *map)
511{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200512 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200513
Jerome Glisse82c5da62010-04-09 14:39:23 +0200514 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200515 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200516 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200517 } else {
518 map->bo_kmap_type = ttm_bo_map_iomap;
519 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200520 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200522 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200523 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200525 }
526 return (!map->virtual) ? -ENOMEM : 0;
527}
528
529static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
530 unsigned long start_page,
531 unsigned long num_pages,
532 struct ttm_bo_kmap_obj *map)
533{
534 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
535 struct ttm_tt *ttm = bo->ttm;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400536 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200537
538 BUG_ON(!ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400539
540 if (ttm->state == tt_unpopulated) {
541 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
542 if (ret)
543 return ret;
544 }
545
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200546 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
547 /*
548 * We're mapping a single page, and the desired
549 * page protection is consistent with the bo.
550 */
551
552 map->bo_kmap_type = ttm_bo_map_kmap;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400553 map->page = ttm->pages[start_page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200554 map->virtual = kmap(map->page);
555 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 /*
557 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200558 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 */
560 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
561 PAGE_KERNEL :
562 ttm_io_prot(mem->placement, PAGE_KERNEL);
563 map->bo_kmap_type = ttm_bo_map_vmap;
564 map->virtual = vmap(ttm->pages + start_page, num_pages,
565 0, prot);
566 }
567 return (!map->virtual) ? -ENOMEM : 0;
568}
569
570int ttm_bo_kmap(struct ttm_buffer_object *bo,
571 unsigned long start_page, unsigned long num_pages,
572 struct ttm_bo_kmap_obj *map)
573{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100574 struct ttm_mem_type_manager *man =
575 &bo->bdev->man[bo->mem.mem_type];
Jerome Glisse82c5da62010-04-09 14:39:23 +0200576 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200577 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200578
579 BUG_ON(!list_empty(&bo->swap));
580 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200581 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200582 if (num_pages > bo->num_pages)
583 return -EINVAL;
584 if (start_page > bo->num_pages)
585 return -EINVAL;
586#if 0
587 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
588 return -EPERM;
589#endif
Thomas Hellstromeba67092010-11-11 09:41:57 +0100590 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200591 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100592 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200593 if (ret)
594 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200595 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200596 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
597 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200598 offset = start_page << PAGE_SHIFT;
599 size = num_pages << PAGE_SHIFT;
600 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200601 }
602}
603EXPORT_SYMBOL(ttm_bo_kmap);
604
605void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
606{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100607 struct ttm_buffer_object *bo = map->bo;
608 struct ttm_mem_type_manager *man =
609 &bo->bdev->man[bo->mem.mem_type];
610
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200611 if (!map->virtual)
612 return;
613 switch (map->bo_kmap_type) {
614 case ttm_bo_map_iomap:
615 iounmap(map->virtual);
616 break;
617 case ttm_bo_map_vmap:
618 vunmap(map->virtual);
619 break;
620 case ttm_bo_map_kmap:
621 kunmap(map->page);
622 break;
623 case ttm_bo_map_premapped:
624 break;
625 default:
626 BUG();
627 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100628 (void) ttm_mem_io_lock(man, false);
629 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
630 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200631 map->virtual = NULL;
632 map->page = NULL;
633}
634EXPORT_SYMBOL(ttm_bo_kunmap);
635
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200636int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
637 void *sync_obj,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000638 bool evict,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000639 bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200640 struct ttm_mem_reg *new_mem)
641{
642 struct ttm_bo_device *bdev = bo->bdev;
643 struct ttm_bo_driver *driver = bdev->driver;
644 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
645 struct ttm_mem_reg *old_mem = &bo->mem;
646 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200647 struct ttm_buffer_object *ghost_obj;
648 void *tmp_obj = NULL;
649
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000650 spin_lock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200651 if (bo->sync_obj) {
652 tmp_obj = bo->sync_obj;
653 bo->sync_obj = NULL;
654 }
655 bo->sync_obj = driver->sync_obj_ref(sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200656 if (evict) {
657 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000658 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200659 if (tmp_obj)
660 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200661 if (ret)
662 return ret;
663
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200664 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
665 (bo->ttm != NULL)) {
666 ttm_tt_unbind(bo->ttm);
667 ttm_tt_destroy(bo->ttm);
668 bo->ttm = NULL;
669 }
Ben Skeggseac20952011-08-22 03:15:04 +0000670 ttm_bo_free_old_node(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200671 } else {
672 /**
673 * This should help pipeline ordinary buffer moves.
674 *
675 * Hang old buffer memory on a new buffer object,
676 * and leave it to be released when the GPU
677 * operation has completed.
678 */
679
680 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000681 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200682 if (tmp_obj)
683 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200684
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100685 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200686 if (ret)
687 return ret;
688
689 /**
690 * If we're not moving to fixed memory, the TTM object
691 * needs to stay alive. Otherwhise hang it on the ghost
692 * bo to be unbound and destroyed.
693 */
694
695 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
696 ghost_obj->ttm = NULL;
697 else
698 bo->ttm = NULL;
699
700 ttm_bo_unreserve(ghost_obj);
701 ttm_bo_unref(&ghost_obj);
702 }
703
704 *old_mem = *new_mem;
705 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800706
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200707 return 0;
708}
709EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);