blob: 2df602a35f9291ce178a6634ad9d0531ff331df4 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
David Herrmann72525b32013-07-24 21:08:53 +020033#include <drm/drm_vma_manager.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020034#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020039#include <linux/module.h>
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +020040#include <linux/reservation.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{
Ben Skeggs42311ff2010-08-04 12:07:08 +100044 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045}
46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +000048 bool evict,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000049 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050{
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020053 int ret;
54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
Christian König2ff2bf12016-07-21 12:18:19 +020056 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020057 ttm_bo_free_old_node(bo);
58 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59 TTM_PL_MASK_MEM);
60 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020061 }
62
63 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
64 if (unlikely(ret != 0))
65 return ret;
66
67 if (new_mem->mem_type != TTM_PL_SYSTEM) {
68 ret = ttm_tt_bind(ttm, new_mem);
69 if (unlikely(ret != 0))
70 return ret;
71 }
72
73 *old_mem = *new_mem;
74 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080075
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020076 return 0;
77}
78EXPORT_SYMBOL(ttm_bo_move_ttm);
79
Thomas Hellstromeba67092010-11-11 09:41:57 +010080int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
Jerome Glisse82c5da62010-04-09 14:39:23 +020081{
Thomas Hellstromeba67092010-11-11 09:41:57 +010082 if (likely(man->io_reserve_fastpath))
83 return 0;
84
85 if (interruptible)
86 return mutex_lock_interruptible(&man->io_reserve_mutex);
87
88 mutex_lock(&man->io_reserve_mutex);
89 return 0;
90}
Dave Airlieafe68042013-01-22 13:56:04 +100091EXPORT_SYMBOL(ttm_mem_io_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +010092
93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94{
95 if (likely(man->io_reserve_fastpath))
96 return;
97
98 mutex_unlock(&man->io_reserve_mutex);
99}
Dave Airlieafe68042013-01-22 13:56:04 +1000100EXPORT_SYMBOL(ttm_mem_io_unlock);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104 struct ttm_buffer_object *bo;
105
106 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107 return -EAGAIN;
108
109 bo = list_first_entry(&man->io_reserve_lru,
110 struct ttm_buffer_object,
111 io_reserve_lru);
112 list_del_init(&bo->io_reserve_lru);
113 ttm_bo_unmap_virtual_locked(bo);
114
115 return 0;
116}
117
Dave Airlieafe68042013-01-22 13:56:04 +1000118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100121{
122 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123 int ret = 0;
124
125 if (!bdev->driver->io_mem_reserve)
126 return 0;
127 if (likely(man->io_reserve_fastpath))
128 return bdev->driver->io_mem_reserve(bdev, mem);
129
130 if (bdev->driver->io_mem_reserve &&
131 mem->bus.io_reserved_count++ == 0) {
132retry:
133 ret = bdev->driver->io_mem_reserve(bdev, mem);
134 if (ret == -EAGAIN) {
135 ret = ttm_mem_io_evict(man);
136 if (ret == 0)
137 goto retry;
138 }
139 }
140 return ret;
141}
Dave Airlieafe68042013-01-22 13:56:04 +1000142EXPORT_SYMBOL(ttm_mem_io_reserve);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100143
Dave Airlieafe68042013-01-22 13:56:04 +1000144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100146{
147 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149 if (likely(man->io_reserve_fastpath))
150 return;
151
152 if (bdev->driver->io_mem_reserve &&
153 --mem->bus.io_reserved_count == 0 &&
154 bdev->driver->io_mem_free)
155 bdev->driver->io_mem_free(bdev, mem);
156
157}
Dave Airlieafe68042013-01-22 13:56:04 +1000158EXPORT_SYMBOL(ttm_mem_io_free);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162 struct ttm_mem_reg *mem = &bo->mem;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200163 int ret;
164
Thomas Hellstromeba67092010-11-11 09:41:57 +0100165 if (!mem->bus.io_reserved_vm) {
166 struct ttm_mem_type_manager *man =
167 &bo->bdev->man[mem->mem_type];
168
169 ret = ttm_mem_io_reserve(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200170 if (unlikely(ret != 0))
171 return ret;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100172 mem->bus.io_reserved_vm = true;
173 if (man->use_io_reserve_lru)
174 list_add_tail(&bo->io_reserve_lru,
175 &man->io_reserve_lru);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200176 }
177 return 0;
178}
179
Thomas Hellstromeba67092010-11-11 09:41:57 +0100180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200181{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100182 struct ttm_mem_reg *mem = &bo->mem;
183
184 if (mem->bus.io_reserved_vm) {
185 mem->bus.io_reserved_vm = false;
186 list_del_init(&bo->io_reserve_lru);
187 ttm_mem_io_free(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200188 }
189}
190
Rashika Kheriadcbff152014-01-06 22:14:27 +0530191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200192 void **virtual)
193{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200195 int ret;
196 void *addr;
197
198 *virtual = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100199 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200200 ret = ttm_mem_io_reserve(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100201 ttm_mem_io_unlock(man);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200202 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200203 return ret;
204
Jerome Glisse82c5da62010-04-09 14:39:23 +0200205 if (mem->bus.addr) {
206 addr = mem->bus.addr;
207 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200209 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200210 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200211 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212 if (!addr) {
Thomas Hellstromeba67092010-11-11 09:41:57 +0100213 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200214 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100215 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200216 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200217 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200218 }
219 *virtual = addr;
220 return 0;
221}
222
Rashika Kheriadcbff152014-01-06 22:14:27 +0530223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200224 void *virtual)
225{
226 struct ttm_mem_type_manager *man;
227
228 man = &bdev->man[mem->mem_type];
229
Jerome Glisse0c321c72010-04-07 10:21:27 +0000230 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200231 iounmap(virtual);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100232 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200233 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100234 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239 uint32_t *dstP =
240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241 uint32_t *srcP =
242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244 int i;
245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246 iowrite32(ioread32(srcP++), dstP++);
247 return 0;
248}
249
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200251 unsigned long page,
252 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200253{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400254 struct page *d = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200255 void *dst;
256
257 if (!d)
258 return -ENOMEM;
259
260 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200261
262#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700263 dst = kmap_atomic_prot(d, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200264#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200265 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200266 dst = vmap(&d, 1, 0, prot);
267 else
268 dst = kmap(d);
269#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200270 if (!dst)
271 return -ENOMEM;
272
273 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200274
275#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700276 kunmap_atomic(dst);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200277#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200278 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200279 vunmap(dst);
280 else
281 kunmap(d);
282#endif
283
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200284 return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200288 unsigned long page,
289 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200290{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400291 struct page *s = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200292 void *src;
293
294 if (!s)
295 return -ENOMEM;
296
297 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200298#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700299 src = kmap_atomic_prot(s, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200300#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200301 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200302 src = vmap(&s, 1, 0, prot);
303 else
304 src = kmap(s);
305#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200306 if (!src)
307 return -ENOMEM;
308
309 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200310
311#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700312 kunmap_atomic(src);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200313#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200314 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200315 vunmap(src);
316 else
317 kunmap(s);
318#endif
319
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200320 return 0;
321}
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Christian König77dfc282016-06-06 10:17:54 +0200324 bool evict, bool interruptible,
325 bool no_wait_gpu,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000326 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200327{
328 struct ttm_bo_device *bdev = bo->bdev;
329 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
330 struct ttm_tt *ttm = bo->ttm;
331 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstrome22469c2011-10-17 13:27:34 +0200332 struct ttm_mem_reg old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200333 void *old_iomap;
334 void *new_iomap;
335 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200336 unsigned long i;
337 unsigned long page;
338 unsigned long add = 0;
339 int dir;
340
Christian König77dfc282016-06-06 10:17:54 +0200341 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
342 if (ret)
343 return ret;
344
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200345 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
346 if (ret)
347 return ret;
348 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
349 if (ret)
350 goto out;
351
Thomas Hellstromda95c782013-10-30 03:29:50 -0700352 /*
353 * Single TTM move. NOP.
354 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200355 if (old_iomap == NULL && new_iomap == NULL)
356 goto out2;
Thomas Hellstromda95c782013-10-30 03:29:50 -0700357
358 /*
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800359 * Don't move nonexistent data. Clear destination instead.
Thomas Hellstromda95c782013-10-30 03:29:50 -0700360 */
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800361 if (old_iomap == NULL &&
Thomas Hellstrom2e6d8b42013-12-21 22:23:02 +0100362 (ttm == NULL || (ttm->state == tt_unpopulated &&
363 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800364 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200365 goto out2;
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800366 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200367
Thomas Hellstromda95c782013-10-30 03:29:50 -0700368 /*
369 * TTM might be null for moves within the same region.
Jakob Bornecrantz9a0599d2013-10-30 02:46:56 -0700370 */
371 if (ttm && ttm->state == tt_unpopulated) {
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400372 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
Thomas Hellstromda95c782013-10-30 03:29:50 -0700373 if (ret)
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400374 goto out1;
375 }
376
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200377 add = 0;
378 dir = 1;
379
380 if ((old_mem->mem_type == new_mem->mem_type) &&
Ben Skeggsd961db72010-08-05 10:48:18 +1000381 (new_mem->start < old_mem->start + old_mem->size)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200382 dir = -1;
383 add = new_mem->num_pages - 1;
384 }
385
386 for (i = 0; i < new_mem->num_pages; ++i) {
387 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200388 if (old_iomap == NULL) {
389 pgprot_t prot = ttm_io_prot(old_mem->placement,
390 PAGE_KERNEL);
391 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
392 prot);
393 } else if (new_iomap == NULL) {
394 pgprot_t prot = ttm_io_prot(new_mem->placement,
395 PAGE_KERNEL);
396 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
397 prot);
398 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200399 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
Thomas Hellstromda95c782013-10-30 03:29:50 -0700400 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200401 goto out1;
402 }
403 mb();
404out2:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100405 old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200406 *old_mem = *new_mem;
407 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200408
Christian König4279cb12016-06-06 10:17:51 +0200409 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200410 ttm_tt_destroy(ttm);
411 bo->ttm = NULL;
412 }
413
414out1:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100415 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200416out:
417 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
Thomas Hellstromda95c782013-10-30 03:29:50 -0700418
419 /*
420 * On error, keep the mm node!
421 */
422 if (!ret)
423 ttm_bo_mem_put(bo, &old_copy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200424 return ret;
425}
426EXPORT_SYMBOL(ttm_bo_move_memcpy);
427
428static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
429{
430 kfree(bo);
431}
432
433/**
434 * ttm_buffer_object_transfer
435 *
436 * @bo: A pointer to a struct ttm_buffer_object.
437 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
438 * holding the data of @bo with the old placement.
439 *
440 * This is a utility function that may be called after an accelerated move
441 * has been scheduled. A new buffer object is created as a placeholder for
442 * the old data while it's being copied. When that buffer object is idle,
443 * it can be destroyed, releasing the space of the old placement.
444 * Returns:
445 * !0: Failure.
446 */
447
448static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
449 struct ttm_buffer_object **new_obj)
450{
451 struct ttm_buffer_object *fbo;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200452 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200453
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100454 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200455 if (!fbo)
456 return -ENOMEM;
457
458 *fbo = *bo;
459
460 /**
461 * Fix up members that we shouldn't copy directly:
462 * TODO: Explicit member copy would probably be better here.
463 */
464
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200465 INIT_LIST_HEAD(&fbo->ddestroy);
466 INIT_LIST_HEAD(&fbo->lru);
467 INIT_LIST_HEAD(&fbo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100468 INIT_LIST_HEAD(&fbo->io_reserve_lru);
Christian König5bc73062016-06-15 13:44:01 +0200469 fbo->moving = NULL;
David Herrmann72525b32013-07-24 21:08:53 +0200470 drm_vma_node_reset(&fbo->vma_node);
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200471 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200472
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200473 kref_init(&fbo->list_kref);
474 kref_init(&fbo->kref);
475 fbo->destroy = &ttm_transfered_destroy;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500476 fbo->acc_size = 0;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200477 fbo->resv = &fbo->ttm_resv;
478 reservation_object_init(fbo->resv);
479 ret = ww_mutex_trylock(&fbo->resv->lock);
480 WARN_ON(!ret);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200481
482 *new_obj = fbo;
483 return 0;
484}
485
486pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
487{
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000488 /* Cached mappings need no adjustment */
489 if (caching_flags & TTM_PL_FLAG_CACHED)
490 return tmp;
491
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200492#if defined(__i386__) || defined(__x86_64__)
493 if (caching_flags & TTM_PL_FLAG_WC)
494 tmp = pgprot_writecombine(tmp);
495 else if (boot_cpu_data.x86 > 3)
496 tmp = pgprot_noncached(tmp);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200497#endif
Alexandre Courbotf135b972015-07-01 17:32:29 +0900498#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
499 defined(__powerpc__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200500 if (caching_flags & TTM_PL_FLAG_WC)
501 tmp = pgprot_writecombine(tmp);
502 else
503 tmp = pgprot_noncached(tmp);
504#endif
Huacai Chen04cf55e2012-08-11 09:32:17 +0000505#if defined(__sparc__) || defined(__mips__)
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000506 tmp = pgprot_noncached(tmp);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200507#endif
508 return tmp;
509}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100510EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200511
512static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200513 unsigned long offset,
514 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200515 struct ttm_bo_kmap_obj *map)
516{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200517 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200518
Jerome Glisse82c5da62010-04-09 14:39:23 +0200519 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200520 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200521 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200522 } else {
523 map->bo_kmap_type = ttm_bo_map_iomap;
524 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200525 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
526 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200527 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200528 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
529 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200530 }
531 return (!map->virtual) ? -ENOMEM : 0;
532}
533
534static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
535 unsigned long start_page,
536 unsigned long num_pages,
537 struct ttm_bo_kmap_obj *map)
538{
539 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
540 struct ttm_tt *ttm = bo->ttm;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400541 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200542
543 BUG_ON(!ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400544
545 if (ttm->state == tt_unpopulated) {
546 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
547 if (ret)
548 return ret;
549 }
550
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
552 /*
553 * We're mapping a single page, and the desired
554 * page protection is consistent with the bo.
555 */
556
557 map->bo_kmap_type = ttm_bo_map_kmap;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400558 map->page = ttm->pages[start_page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 map->virtual = kmap(map->page);
560 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200561 /*
562 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200563 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200564 */
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000565 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200566 map->bo_kmap_type = ttm_bo_map_vmap;
567 map->virtual = vmap(ttm->pages + start_page, num_pages,
568 0, prot);
569 }
570 return (!map->virtual) ? -ENOMEM : 0;
571}
572
573int ttm_bo_kmap(struct ttm_buffer_object *bo,
574 unsigned long start_page, unsigned long num_pages,
575 struct ttm_bo_kmap_obj *map)
576{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100577 struct ttm_mem_type_manager *man =
578 &bo->bdev->man[bo->mem.mem_type];
Jerome Glisse82c5da62010-04-09 14:39:23 +0200579 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200580 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200581
582 BUG_ON(!list_empty(&bo->swap));
583 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200584 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200585 if (num_pages > bo->num_pages)
586 return -EINVAL;
587 if (start_page > bo->num_pages)
588 return -EINVAL;
589#if 0
Daniel Vetter4cda8782013-12-11 11:34:46 +0100590 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200591 return -EPERM;
592#endif
Thomas Hellstromeba67092010-11-11 09:41:57 +0100593 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200594 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100595 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200596 if (ret)
597 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200598 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200599 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
600 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200601 offset = start_page << PAGE_SHIFT;
602 size = num_pages << PAGE_SHIFT;
603 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200604 }
605}
606EXPORT_SYMBOL(ttm_bo_kmap);
607
608void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
609{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100610 struct ttm_buffer_object *bo = map->bo;
611 struct ttm_mem_type_manager *man =
612 &bo->bdev->man[bo->mem.mem_type];
613
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200614 if (!map->virtual)
615 return;
616 switch (map->bo_kmap_type) {
617 case ttm_bo_map_iomap:
618 iounmap(map->virtual);
619 break;
620 case ttm_bo_map_vmap:
621 vunmap(map->virtual);
622 break;
623 case ttm_bo_map_kmap:
624 kunmap(map->page);
625 break;
626 case ttm_bo_map_premapped:
627 break;
628 default:
629 BUG();
630 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100631 (void) ttm_mem_io_lock(man, false);
632 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
633 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200634 map->virtual = NULL;
635 map->page = NULL;
636}
637EXPORT_SYMBOL(ttm_bo_kunmap);
638
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200639int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200640 struct fence *fence,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000641 bool evict,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200642 struct ttm_mem_reg *new_mem)
643{
644 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200645 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
646 struct ttm_mem_reg *old_mem = &bo->mem;
647 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200648 struct ttm_buffer_object *ghost_obj;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200649
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200650 reservation_object_add_excl_fence(bo->resv, fence);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200651 if (evict) {
Christian König8aa6d4f2016-04-06 11:12:04 +0200652 ret = ttm_bo_wait(bo, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200653 if (ret)
654 return ret;
655
Christian König4279cb12016-06-06 10:17:51 +0200656 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200657 ttm_tt_destroy(bo->ttm);
658 bo->ttm = NULL;
659 }
Ben Skeggseac20952011-08-22 03:15:04 +0000660 ttm_bo_free_old_node(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200661 } else {
662 /**
663 * This should help pipeline ordinary buffer moves.
664 *
665 * Hang old buffer memory on a new buffer object,
666 * and leave it to be released when the GPU
667 * operation has completed.
668 */
669
Christian König5bc73062016-06-15 13:44:01 +0200670 fence_put(bo->moving);
671 bo->moving = fence_get(fence);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200672
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100673 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200674 if (ret)
675 return ret;
676
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200677 reservation_object_add_excl_fence(ghost_obj->resv, fence);
678
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200679 /**
680 * If we're not moving to fixed memory, the TTM object
681 * needs to stay alive. Otherwhise hang it on the ghost
682 * bo to be unbound and destroyed.
683 */
684
685 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
686 ghost_obj->ttm = NULL;
687 else
688 bo->ttm = NULL;
689
690 ttm_bo_unreserve(ghost_obj);
691 ttm_bo_unref(&ghost_obj);
692 }
693
694 *old_mem = *new_mem;
695 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800696
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200697 return 0;
698}
699EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
Christian König3ddf4ad2016-06-15 13:44:03 +0200700
701int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
702 struct fence *fence, bool evict,
703 struct ttm_mem_reg *new_mem)
704{
705 struct ttm_bo_device *bdev = bo->bdev;
706 struct ttm_mem_reg *old_mem = &bo->mem;
707
708 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
709 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
710
711 int ret;
712
713 reservation_object_add_excl_fence(bo->resv, fence);
714
715 if (!evict) {
716 struct ttm_buffer_object *ghost_obj;
717
718 /**
719 * This should help pipeline ordinary buffer moves.
720 *
721 * Hang old buffer memory on a new buffer object,
722 * and leave it to be released when the GPU
723 * operation has completed.
724 */
725
726 fence_put(bo->moving);
727 bo->moving = fence_get(fence);
728
729 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
730 if (ret)
731 return ret;
732
733 reservation_object_add_excl_fence(ghost_obj->resv, fence);
734
735 /**
736 * If we're not moving to fixed memory, the TTM object
737 * needs to stay alive. Otherwhise hang it on the ghost
738 * bo to be unbound and destroyed.
739 */
740
741 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
742 ghost_obj->ttm = NULL;
743 else
744 bo->ttm = NULL;
745
746 ttm_bo_unreserve(ghost_obj);
747 ttm_bo_unref(&ghost_obj);
748
749 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
750
751 /**
752 * BO doesn't have a TTM we need to bind/unbind. Just remember
753 * this eviction and free up the allocation
754 */
755
756 spin_lock(&from->move_lock);
Christian König847b6cf2016-06-24 21:51:03 +0200757 if (!from->move || fence_is_later(fence, from->move)) {
Christian König3ddf4ad2016-06-15 13:44:03 +0200758 fence_put(from->move);
759 from->move = fence_get(fence);
760 }
761 spin_unlock(&from->move_lock);
762
763 ttm_bo_free_old_node(bo);
764
765 fence_put(bo->moving);
766 bo->moving = fence_get(fence);
767
768 } else {
769 /**
770 * Last resort, wait for the move to be completed.
771 *
772 * Should never happen in pratice.
773 */
774
775 ret = ttm_bo_wait(bo, false, false);
776 if (ret)
777 return ret;
778
779 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
780 ttm_tt_destroy(bo->ttm);
781 bo->ttm = NULL;
782 }
783 ttm_bo_free_old_node(bo);
784 }
785
786 *old_mem = *new_mem;
787 new_mem->mm_node = NULL;
788
789 return 0;
790}
791EXPORT_SYMBOL(ttm_bo_pipeline_move);