blob: 2ebbae6067ab9db76900eb37bb414265a40d5102 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
David Herrmann72525b32013-07-24 21:08:53 +020033#include <drm/drm_vma_manager.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020034#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020039#include <linux/module.h>
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +020040#include <linux/reservation.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{
Ben Skeggs42311ff2010-08-04 12:07:08 +100044 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045}
46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Roger He3e98d822017-12-08 20:19:32 +080048 struct ttm_operation_ctx *ctx,
Michel Dänzer4e2f0ca2016-08-08 12:28:25 +090049 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050{
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020053 int ret;
54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
Roger He3e98d822017-12-08 20:19:32 +080056 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
Michel Dänzer7b8082b2016-08-05 18:36:10 +090057
58 if (unlikely(ret != 0)) {
59 if (ret != -ERESTARTSYS)
60 pr_err("Failed to expire sync object before unbinding TTM\n");
61 return ret;
62 }
63
Christian König2ff2bf12016-07-21 12:18:19 +020064 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020065 ttm_bo_free_old_node(bo);
66 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
67 TTM_PL_MASK_MEM);
68 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020069 }
70
71 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
72 if (unlikely(ret != 0))
73 return ret;
74
75 if (new_mem->mem_type != TTM_PL_SYSTEM) {
Roger He993baf12017-12-21 17:42:51 +080076 ret = ttm_tt_bind(ttm, new_mem, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020077 if (unlikely(ret != 0))
78 return ret;
79 }
80
81 *old_mem = *new_mem;
82 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080083
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020084 return 0;
85}
86EXPORT_SYMBOL(ttm_bo_move_ttm);
87
Thomas Hellstromeba67092010-11-11 09:41:57 +010088int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
Jerome Glisse82c5da62010-04-09 14:39:23 +020089{
Thomas Hellstromeba67092010-11-11 09:41:57 +010090 if (likely(man->io_reserve_fastpath))
91 return 0;
92
93 if (interruptible)
94 return mutex_lock_interruptible(&man->io_reserve_mutex);
95
96 mutex_lock(&man->io_reserve_mutex);
97 return 0;
98}
Dave Airlieafe68042013-01-22 13:56:04 +100099EXPORT_SYMBOL(ttm_mem_io_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100100
101void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102{
103 if (likely(man->io_reserve_fastpath))
104 return;
105
106 mutex_unlock(&man->io_reserve_mutex);
107}
Dave Airlieafe68042013-01-22 13:56:04 +1000108EXPORT_SYMBOL(ttm_mem_io_unlock);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100109
110static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111{
112 struct ttm_buffer_object *bo;
113
114 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115 return -EAGAIN;
116
117 bo = list_first_entry(&man->io_reserve_lru,
118 struct ttm_buffer_object,
119 io_reserve_lru);
120 list_del_init(&bo->io_reserve_lru);
121 ttm_bo_unmap_virtual_locked(bo);
122
123 return 0;
124}
125
Dave Airlieafe68042013-01-22 13:56:04 +1000126
127int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100129{
130 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131 int ret = 0;
132
133 if (!bdev->driver->io_mem_reserve)
134 return 0;
135 if (likely(man->io_reserve_fastpath))
136 return bdev->driver->io_mem_reserve(bdev, mem);
137
138 if (bdev->driver->io_mem_reserve &&
139 mem->bus.io_reserved_count++ == 0) {
140retry:
141 ret = bdev->driver->io_mem_reserve(bdev, mem);
142 if (ret == -EAGAIN) {
143 ret = ttm_mem_io_evict(man);
144 if (ret == 0)
145 goto retry;
146 }
147 }
148 return ret;
149}
Dave Airlieafe68042013-01-22 13:56:04 +1000150EXPORT_SYMBOL(ttm_mem_io_reserve);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100151
Dave Airlieafe68042013-01-22 13:56:04 +1000152void ttm_mem_io_free(struct ttm_bo_device *bdev,
153 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100154{
155 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157 if (likely(man->io_reserve_fastpath))
158 return;
159
160 if (bdev->driver->io_mem_reserve &&
161 --mem->bus.io_reserved_count == 0 &&
162 bdev->driver->io_mem_free)
163 bdev->driver->io_mem_free(bdev, mem);
164
165}
Dave Airlieafe68042013-01-22 13:56:04 +1000166EXPORT_SYMBOL(ttm_mem_io_free);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100167
168int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169{
170 struct ttm_mem_reg *mem = &bo->mem;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200171 int ret;
172
Thomas Hellstromeba67092010-11-11 09:41:57 +0100173 if (!mem->bus.io_reserved_vm) {
174 struct ttm_mem_type_manager *man =
175 &bo->bdev->man[mem->mem_type];
176
177 ret = ttm_mem_io_reserve(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200178 if (unlikely(ret != 0))
179 return ret;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100180 mem->bus.io_reserved_vm = true;
181 if (man->use_io_reserve_lru)
182 list_add_tail(&bo->io_reserve_lru,
183 &man->io_reserve_lru);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200184 }
185 return 0;
186}
187
Thomas Hellstromeba67092010-11-11 09:41:57 +0100188void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200189{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100190 struct ttm_mem_reg *mem = &bo->mem;
191
192 if (mem->bus.io_reserved_vm) {
193 mem->bus.io_reserved_vm = false;
194 list_del_init(&bo->io_reserve_lru);
195 ttm_mem_io_free(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200196 }
197}
198
Rashika Kheriadcbff152014-01-06 22:14:27 +0530199static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200200 void **virtual)
201{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100202 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200203 int ret;
204 void *addr;
205
206 *virtual = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100207 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200208 ret = ttm_mem_io_reserve(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100209 ttm_mem_io_unlock(man);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200210 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200211 return ret;
212
Jerome Glisse82c5da62010-04-09 14:39:23 +0200213 if (mem->bus.addr) {
214 addr = mem->bus.addr;
215 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200216 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200217 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200218 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200219 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220 if (!addr) {
Thomas Hellstromeba67092010-11-11 09:41:57 +0100221 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200222 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100223 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200224 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200225 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200226 }
227 *virtual = addr;
228 return 0;
229}
230
Rashika Kheriadcbff152014-01-06 22:14:27 +0530231static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200232 void *virtual)
233{
234 struct ttm_mem_type_manager *man;
235
236 man = &bdev->man[mem->mem_type];
237
Jerome Glisse0c321c72010-04-07 10:21:27 +0000238 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200239 iounmap(virtual);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100240 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200241 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100242 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200243}
244
245static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246{
247 uint32_t *dstP =
248 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249 uint32_t *srcP =
250 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252 int i;
253 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254 iowrite32(ioread32(srcP++), dstP++);
255 return 0;
256}
257
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100258#ifdef CONFIG_X86
259#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
260#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
261#else
262#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
263#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
264#endif
265
Thomas Hellstrom9c11fcf2018-01-16 09:12:05 +0100266
267/**
268 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
269 * specified page protection.
270 *
271 * @page: The page to map.
272 * @prot: The page protection.
273 *
274 * This function maps a TTM page using the kmap_atomic api if available,
275 * otherwise falls back to vmap. The user must make sure that the
276 * specified page does not have an aliased mapping with a different caching
277 * policy unless the architecture explicitly allows it. Also mapping and
278 * unmapping using this api must be correctly nested. Unmapping should
279 * occur in the reverse order of mapping.
280 */
281void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100282{
283 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
284 return kmap_atomic(page);
285 else
286 return __ttm_kmap_atomic_prot(page, prot);
287}
Thomas Hellstrom9c11fcf2018-01-16 09:12:05 +0100288EXPORT_SYMBOL(ttm_kmap_atomic_prot);
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100289
Thomas Hellstrom9c11fcf2018-01-16 09:12:05 +0100290/**
291 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
292 * ttm_kmap_atomic_prot.
293 *
294 * @addr: The virtual address from the map.
295 * @prot: The page protection.
296 */
297void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100298{
299 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
300 kunmap_atomic(addr);
301 else
302 __ttm_kunmap_atomic(addr);
303}
Thomas Hellstrom9c11fcf2018-01-16 09:12:05 +0100304EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100305
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200306static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200307 unsigned long page,
308 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200309{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400310 struct page *d = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200311 void *dst;
312
313 if (!d)
314 return -ENOMEM;
315
316 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100317 dst = ttm_kmap_atomic_prot(d, prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200318 if (!dst)
319 return -ENOMEM;
320
321 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200322
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100323 ttm_kunmap_atomic_prot(dst, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200324
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200325 return 0;
326}
327
328static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200329 unsigned long page,
330 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200331{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400332 struct page *s = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200333 void *src;
334
335 if (!s)
336 return -ENOMEM;
337
338 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100339 src = ttm_kmap_atomic_prot(s, prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200340 if (!src)
341 return -ENOMEM;
342
343 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200344
Thomas Hellstrom403c1822018-01-16 09:02:03 +0100345 ttm_kunmap_atomic_prot(src, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200346
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200347 return 0;
348}
349
350int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Roger He3e98d822017-12-08 20:19:32 +0800351 struct ttm_operation_ctx *ctx,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000352 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200353{
354 struct ttm_bo_device *bdev = bo->bdev;
355 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
356 struct ttm_tt *ttm = bo->ttm;
357 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstrome22469c2011-10-17 13:27:34 +0200358 struct ttm_mem_reg old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200359 void *old_iomap;
360 void *new_iomap;
361 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200362 unsigned long i;
363 unsigned long page;
364 unsigned long add = 0;
365 int dir;
366
Roger He3e98d822017-12-08 20:19:32 +0800367 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
Christian König77dfc282016-06-06 10:17:54 +0200368 if (ret)
369 return ret;
370
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200371 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
372 if (ret)
373 return ret;
374 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
375 if (ret)
376 goto out;
377
Thomas Hellstromda95c782013-10-30 03:29:50 -0700378 /*
379 * Single TTM move. NOP.
380 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200381 if (old_iomap == NULL && new_iomap == NULL)
382 goto out2;
Thomas Hellstromda95c782013-10-30 03:29:50 -0700383
384 /*
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800385 * Don't move nonexistent data. Clear destination instead.
Thomas Hellstromda95c782013-10-30 03:29:50 -0700386 */
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800387 if (old_iomap == NULL &&
Thomas Hellstrom2e6d8b42013-12-21 22:23:02 +0100388 (ttm == NULL || (ttm->state == tt_unpopulated &&
389 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800390 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391 goto out2;
Thomas Hellstrom0bc25422013-11-17 23:30:38 -0800392 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200393
Thomas Hellstromda95c782013-10-30 03:29:50 -0700394 /*
395 * TTM might be null for moves within the same region.
Jakob Bornecrantz9a0599d2013-10-30 02:46:56 -0700396 */
Christian König25893a12018-02-01 14:39:29 +0100397 if (ttm) {
398 ret = ttm_tt_populate(ttm, ctx);
Thomas Hellstromda95c782013-10-30 03:29:50 -0700399 if (ret)
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400400 goto out1;
401 }
402
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403 add = 0;
404 dir = 1;
405
406 if ((old_mem->mem_type == new_mem->mem_type) &&
Ben Skeggsd961db72010-08-05 10:48:18 +1000407 (new_mem->start < old_mem->start + old_mem->size)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200408 dir = -1;
409 add = new_mem->num_pages - 1;
410 }
411
412 for (i = 0; i < new_mem->num_pages; ++i) {
413 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200414 if (old_iomap == NULL) {
415 pgprot_t prot = ttm_io_prot(old_mem->placement,
416 PAGE_KERNEL);
417 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
418 prot);
419 } else if (new_iomap == NULL) {
420 pgprot_t prot = ttm_io_prot(new_mem->placement,
421 PAGE_KERNEL);
422 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
423 prot);
Tom St Denis449f7972018-01-26 08:55:10 -0500424 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200425 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
Tom St Denis449f7972018-01-26 08:55:10 -0500426 }
Thomas Hellstromda95c782013-10-30 03:29:50 -0700427 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200428 goto out1;
429 }
430 mb();
431out2:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100432 old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200433 *old_mem = *new_mem;
434 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200435
Christian König4279cb12016-06-06 10:17:51 +0200436 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200437 ttm_tt_destroy(ttm);
438 bo->ttm = NULL;
439 }
440
441out1:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100442 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200443out:
444 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
Thomas Hellstromda95c782013-10-30 03:29:50 -0700445
446 /*
447 * On error, keep the mm node!
448 */
449 if (!ret)
450 ttm_bo_mem_put(bo, &old_copy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200451 return ret;
452}
453EXPORT_SYMBOL(ttm_bo_move_memcpy);
454
455static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
456{
457 kfree(bo);
458}
459
460/**
461 * ttm_buffer_object_transfer
462 *
463 * @bo: A pointer to a struct ttm_buffer_object.
464 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
465 * holding the data of @bo with the old placement.
466 *
467 * This is a utility function that may be called after an accelerated move
468 * has been scheduled. A new buffer object is created as a placeholder for
469 * the old data while it's being copied. When that buffer object is idle,
470 * it can be destroyed, releasing the space of the old placement.
471 * Returns:
472 * !0: Failure.
473 */
474
475static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
476 struct ttm_buffer_object **new_obj)
477{
478 struct ttm_buffer_object *fbo;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200479 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200480
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100481 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200482 if (!fbo)
483 return -ENOMEM;
484
485 *fbo = *bo;
486
487 /**
488 * Fix up members that we shouldn't copy directly:
489 * TODO: Explicit member copy would probably be better here.
490 */
491
Christian König38392632018-02-21 17:26:45 +0100492 atomic_inc(&bo->bdev->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200493 INIT_LIST_HEAD(&fbo->ddestroy);
494 INIT_LIST_HEAD(&fbo->lru);
495 INIT_LIST_HEAD(&fbo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100496 INIT_LIST_HEAD(&fbo->io_reserve_lru);
Christian König4d98e5e2017-10-30 14:57:43 +0100497 mutex_init(&fbo->wu_mutex);
Christian König5bc73062016-06-15 13:44:01 +0200498 fbo->moving = NULL;
David Herrmann72525b32013-07-24 21:08:53 +0200499 drm_vma_node_reset(&fbo->vma_node);
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200500 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200501
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200502 kref_init(&fbo->list_kref);
503 kref_init(&fbo->kref);
504 fbo->destroy = &ttm_transfered_destroy;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500505 fbo->acc_size = 0;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200506 fbo->resv = &fbo->ttm_resv;
507 reservation_object_init(fbo->resv);
Christian König2b7e35f2017-12-15 17:27:28 +0100508 ret = reservation_object_trylock(fbo->resv);
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200509 WARN_ON(!ret);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200510
511 *new_obj = fbo;
512 return 0;
513}
514
515pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
516{
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000517 /* Cached mappings need no adjustment */
518 if (caching_flags & TTM_PL_FLAG_CACHED)
519 return tmp;
520
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200521#if defined(__i386__) || defined(__x86_64__)
522 if (caching_flags & TTM_PL_FLAG_WC)
523 tmp = pgprot_writecombine(tmp);
524 else if (boot_cpu_data.x86 > 3)
525 tmp = pgprot_noncached(tmp);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200526#endif
Alexandre Courbotf135b972015-07-01 17:32:29 +0900527#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
528 defined(__powerpc__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200529 if (caching_flags & TTM_PL_FLAG_WC)
530 tmp = pgprot_writecombine(tmp);
531 else
532 tmp = pgprot_noncached(tmp);
533#endif
Huacai Chen04cf55e2012-08-11 09:32:17 +0000534#if defined(__sparc__) || defined(__mips__)
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000535 tmp = pgprot_noncached(tmp);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200536#endif
537 return tmp;
538}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100539EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200540
541static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200542 unsigned long offset,
543 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200544 struct ttm_bo_kmap_obj *map)
545{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200546 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200547
Jerome Glisse82c5da62010-04-09 14:39:23 +0200548 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200549 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200550 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 } else {
552 map->bo_kmap_type = ttm_bo_map_iomap;
553 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200554 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
555 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200557 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
558 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 }
560 return (!map->virtual) ? -ENOMEM : 0;
561}
562
563static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
564 unsigned long start_page,
565 unsigned long num_pages,
566 struct ttm_bo_kmap_obj *map)
567{
Roger Hed0cef9f2017-12-21 17:42:50 +0800568 struct ttm_mem_reg *mem = &bo->mem;
569 struct ttm_operation_ctx ctx = {
570 .interruptible = false,
571 .no_wait_gpu = false
572 };
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200573 struct ttm_tt *ttm = bo->ttm;
Roger Hed0cef9f2017-12-21 17:42:50 +0800574 pgprot_t prot;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400575 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200576
577 BUG_ON(!ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400578
Christian König25893a12018-02-01 14:39:29 +0100579 ret = ttm_tt_populate(ttm, &ctx);
580 if (ret)
581 return ret;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400582
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200583 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
584 /*
585 * We're mapping a single page, and the desired
586 * page protection is consistent with the bo.
587 */
588
589 map->bo_kmap_type = ttm_bo_map_kmap;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400590 map->page = ttm->pages[start_page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200591 map->virtual = kmap(map->page);
592 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200593 /*
594 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200595 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200596 */
Benjamin Herrenschmidt94318d52014-09-04 17:47:23 +1000597 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200598 map->bo_kmap_type = ttm_bo_map_vmap;
599 map->virtual = vmap(ttm->pages + start_page, num_pages,
600 0, prot);
601 }
602 return (!map->virtual) ? -ENOMEM : 0;
603}
604
605int ttm_bo_kmap(struct ttm_buffer_object *bo,
606 unsigned long start_page, unsigned long num_pages,
607 struct ttm_bo_kmap_obj *map)
608{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100609 struct ttm_mem_type_manager *man =
610 &bo->bdev->man[bo->mem.mem_type];
Jerome Glisse82c5da62010-04-09 14:39:23 +0200611 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200612 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200613
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200614 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200615 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200616 if (num_pages > bo->num_pages)
617 return -EINVAL;
618 if (start_page > bo->num_pages)
619 return -EINVAL;
620#if 0
Daniel Vetter4cda8782013-12-11 11:34:46 +0100621 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200622 return -EPERM;
623#endif
Thomas Hellstromeba67092010-11-11 09:41:57 +0100624 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200625 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100626 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200627 if (ret)
628 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200629 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200630 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
631 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200632 offset = start_page << PAGE_SHIFT;
633 size = num_pages << PAGE_SHIFT;
634 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200635 }
636}
637EXPORT_SYMBOL(ttm_bo_kmap);
638
639void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
640{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100641 struct ttm_buffer_object *bo = map->bo;
642 struct ttm_mem_type_manager *man =
643 &bo->bdev->man[bo->mem.mem_type];
644
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200645 if (!map->virtual)
646 return;
647 switch (map->bo_kmap_type) {
648 case ttm_bo_map_iomap:
649 iounmap(map->virtual);
650 break;
651 case ttm_bo_map_vmap:
652 vunmap(map->virtual);
653 break;
654 case ttm_bo_map_kmap:
655 kunmap(map->page);
656 break;
657 case ttm_bo_map_premapped:
658 break;
659 default:
660 BUG();
661 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100662 (void) ttm_mem_io_lock(man, false);
663 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
664 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200665 map->virtual = NULL;
666 map->page = NULL;
667}
668EXPORT_SYMBOL(ttm_bo_kunmap);
669
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200670int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100671 struct dma_fence *fence,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000672 bool evict,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200673 struct ttm_mem_reg *new_mem)
674{
675 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200676 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
677 struct ttm_mem_reg *old_mem = &bo->mem;
678 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200679 struct ttm_buffer_object *ghost_obj;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200680
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200681 reservation_object_add_excl_fence(bo->resv, fence);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200682 if (evict) {
Christian König8aa6d4f2016-04-06 11:12:04 +0200683 ret = ttm_bo_wait(bo, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200684 if (ret)
685 return ret;
686
Christian König4279cb12016-06-06 10:17:51 +0200687 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200688 ttm_tt_destroy(bo->ttm);
689 bo->ttm = NULL;
690 }
Ben Skeggseac20952011-08-22 03:15:04 +0000691 ttm_bo_free_old_node(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200692 } else {
693 /**
694 * This should help pipeline ordinary buffer moves.
695 *
696 * Hang old buffer memory on a new buffer object,
697 * and leave it to be released when the GPU
698 * operation has completed.
699 */
700
Chris Wilsonf54d1862016-10-25 13:00:45 +0100701 dma_fence_put(bo->moving);
702 bo->moving = dma_fence_get(fence);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200703
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100704 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200705 if (ret)
706 return ret;
707
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200708 reservation_object_add_excl_fence(ghost_obj->resv, fence);
709
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200710 /**
711 * If we're not moving to fixed memory, the TTM object
712 * needs to stay alive. Otherwhise hang it on the ghost
713 * bo to be unbound and destroyed.
714 */
715
716 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
717 ghost_obj->ttm = NULL;
718 else
719 bo->ttm = NULL;
720
721 ttm_bo_unreserve(ghost_obj);
722 ttm_bo_unref(&ghost_obj);
723 }
724
725 *old_mem = *new_mem;
726 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800727
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200728 return 0;
729}
730EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
Christian König3ddf4ad2016-06-15 13:44:03 +0200731
732int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100733 struct dma_fence *fence, bool evict,
Christian König3ddf4ad2016-06-15 13:44:03 +0200734 struct ttm_mem_reg *new_mem)
735{
736 struct ttm_bo_device *bdev = bo->bdev;
737 struct ttm_mem_reg *old_mem = &bo->mem;
738
739 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
740 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
741
742 int ret;
743
744 reservation_object_add_excl_fence(bo->resv, fence);
745
746 if (!evict) {
747 struct ttm_buffer_object *ghost_obj;
748
749 /**
750 * This should help pipeline ordinary buffer moves.
751 *
752 * Hang old buffer memory on a new buffer object,
753 * and leave it to be released when the GPU
754 * operation has completed.
755 */
756
Chris Wilsonf54d1862016-10-25 13:00:45 +0100757 dma_fence_put(bo->moving);
758 bo->moving = dma_fence_get(fence);
Christian König3ddf4ad2016-06-15 13:44:03 +0200759
760 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
761 if (ret)
762 return ret;
763
764 reservation_object_add_excl_fence(ghost_obj->resv, fence);
765
766 /**
767 * If we're not moving to fixed memory, the TTM object
768 * needs to stay alive. Otherwhise hang it on the ghost
769 * bo to be unbound and destroyed.
770 */
771
772 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
773 ghost_obj->ttm = NULL;
774 else
775 bo->ttm = NULL;
776
777 ttm_bo_unreserve(ghost_obj);
778 ttm_bo_unref(&ghost_obj);
779
780 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
781
782 /**
783 * BO doesn't have a TTM we need to bind/unbind. Just remember
784 * this eviction and free up the allocation
785 */
786
787 spin_lock(&from->move_lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100788 if (!from->move || dma_fence_is_later(fence, from->move)) {
789 dma_fence_put(from->move);
790 from->move = dma_fence_get(fence);
Christian König3ddf4ad2016-06-15 13:44:03 +0200791 }
792 spin_unlock(&from->move_lock);
793
794 ttm_bo_free_old_node(bo);
795
Chris Wilsonf54d1862016-10-25 13:00:45 +0100796 dma_fence_put(bo->moving);
797 bo->moving = dma_fence_get(fence);
Christian König3ddf4ad2016-06-15 13:44:03 +0200798
799 } else {
800 /**
801 * Last resort, wait for the move to be completed.
802 *
803 * Should never happen in pratice.
804 */
805
806 ret = ttm_bo_wait(bo, false, false);
807 if (ret)
808 return ret;
809
810 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
811 ttm_tt_destroy(bo->ttm);
812 bo->ttm = NULL;
813 }
814 ttm_bo_free_old_node(bo);
815 }
816
817 *old_mem = *new_mem;
818 new_mem->mm_node = NULL;
819
820 return 0;
821}
822EXPORT_SYMBOL(ttm_bo_pipeline_move);
Christian König5d951092018-02-20 15:35:21 +0100823
824int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
825{
826 struct ttm_buffer_object *ghost;
827 int ret;
828
829 ret = ttm_buffer_object_transfer(bo, &ghost);
830 if (ret)
831 return ret;
832
833 ret = reservation_object_copy_fences(ghost->resv, bo->resv);
834 /* Last resort, wait for the BO to be idle when we are OOM */
835 if (ret)
836 ttm_bo_wait(bo, false, false);
837
838 memset(&bo->mem, 0, sizeof(bo->mem));
839 bo->mem.mem_type = TTM_PL_SYSTEM;
840 bo->ttm = NULL;
841
842 ttm_bo_unreserve(ghost);
843 ttm_bo_unref(&ghost);
844
845 return 0;
846}