blob: 319cf4127c5b2b308523fe154f32b6fd0076b000 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/vmalloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
Ben Skeggs42311ff2010-08-04 12:07:08 +100042 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043}
44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +000046 bool evict,
Jerome Glisse9d87fa22010-04-07 10:21:19 +000047 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048{
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020051 int ret;
52
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_tt_unbind(ttm);
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 TTM_PL_MASK_MEM);
58 old_mem->mem_type = TTM_PL_SYSTEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020059 }
60
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
63 return ret;
64
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
68 return ret;
69 }
70
71 *old_mem = *new_mem;
72 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +080073
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020074 return 0;
75}
76EXPORT_SYMBOL(ttm_bo_move_ttm);
77
Thomas Hellstromeba67092010-11-11 09:41:57 +010078int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
Jerome Glisse82c5da62010-04-09 14:39:23 +020079{
Thomas Hellstromeba67092010-11-11 09:41:57 +010080 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
Dave Airlieafe68042013-01-22 13:56:04 +100089EXPORT_SYMBOL(ttm_mem_io_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +010090
91void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
92{
93 if (likely(man->io_reserve_fastpath))
94 return;
95
96 mutex_unlock(&man->io_reserve_mutex);
97}
Dave Airlieafe68042013-01-22 13:56:04 +100098EXPORT_SYMBOL(ttm_mem_io_unlock);
Thomas Hellstromeba67092010-11-11 09:41:57 +010099
100static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
101{
102 struct ttm_buffer_object *bo;
103
104 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
105 return -EAGAIN;
106
107 bo = list_first_entry(&man->io_reserve_lru,
108 struct ttm_buffer_object,
109 io_reserve_lru);
110 list_del_init(&bo->io_reserve_lru);
111 ttm_bo_unmap_virtual_locked(bo);
112
113 return 0;
114}
115
Dave Airlieafe68042013-01-22 13:56:04 +1000116
117int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
118 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100119{
120 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
121 int ret = 0;
122
123 if (!bdev->driver->io_mem_reserve)
124 return 0;
125 if (likely(man->io_reserve_fastpath))
126 return bdev->driver->io_mem_reserve(bdev, mem);
127
128 if (bdev->driver->io_mem_reserve &&
129 mem->bus.io_reserved_count++ == 0) {
130retry:
131 ret = bdev->driver->io_mem_reserve(bdev, mem);
132 if (ret == -EAGAIN) {
133 ret = ttm_mem_io_evict(man);
134 if (ret == 0)
135 goto retry;
136 }
137 }
138 return ret;
139}
Dave Airlieafe68042013-01-22 13:56:04 +1000140EXPORT_SYMBOL(ttm_mem_io_reserve);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100141
Dave Airlieafe68042013-01-22 13:56:04 +1000142void ttm_mem_io_free(struct ttm_bo_device *bdev,
143 struct ttm_mem_reg *mem)
Thomas Hellstromeba67092010-11-11 09:41:57 +0100144{
145 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
146
147 if (likely(man->io_reserve_fastpath))
148 return;
149
150 if (bdev->driver->io_mem_reserve &&
151 --mem->bus.io_reserved_count == 0 &&
152 bdev->driver->io_mem_free)
153 bdev->driver->io_mem_free(bdev, mem);
154
155}
Dave Airlieafe68042013-01-22 13:56:04 +1000156EXPORT_SYMBOL(ttm_mem_io_free);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100157
158int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
159{
160 struct ttm_mem_reg *mem = &bo->mem;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200161 int ret;
162
Thomas Hellstromeba67092010-11-11 09:41:57 +0100163 if (!mem->bus.io_reserved_vm) {
164 struct ttm_mem_type_manager *man =
165 &bo->bdev->man[mem->mem_type];
166
167 ret = ttm_mem_io_reserve(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200168 if (unlikely(ret != 0))
169 return ret;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100170 mem->bus.io_reserved_vm = true;
171 if (man->use_io_reserve_lru)
172 list_add_tail(&bo->io_reserve_lru,
173 &man->io_reserve_lru);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200174 }
175 return 0;
176}
177
Thomas Hellstromeba67092010-11-11 09:41:57 +0100178void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200179{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100180 struct ttm_mem_reg *mem = &bo->mem;
181
182 if (mem->bus.io_reserved_vm) {
183 mem->bus.io_reserved_vm = false;
184 list_del_init(&bo->io_reserve_lru);
185 ttm_mem_io_free(bo->bdev, mem);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200186 }
187}
188
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200189int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
190 void **virtual)
191{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100192 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200193 int ret;
194 void *addr;
195
196 *virtual = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100197 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200198 ret = ttm_mem_io_reserve(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100199 ttm_mem_io_unlock(man);
Jerome Glisse9e51159c2010-05-05 11:02:44 +0200200 if (ret || !mem->bus.is_iomem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200201 return ret;
202
Jerome Glisse82c5da62010-04-09 14:39:23 +0200203 if (mem->bus.addr) {
204 addr = mem->bus.addr;
205 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200206 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200207 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200209 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
210 if (!addr) {
Thomas Hellstromeba67092010-11-11 09:41:57 +0100211 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200212 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100213 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200214 return -ENOMEM;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200215 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200216 }
217 *virtual = addr;
218 return 0;
219}
220
221void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
222 void *virtual)
223{
224 struct ttm_mem_type_manager *man;
225
226 man = &bdev->man[mem->mem_type];
227
Jerome Glisse0c321c72010-04-07 10:21:27 +0000228 if (virtual && mem->bus.addr == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200229 iounmap(virtual);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100230 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200231 ttm_mem_io_free(bdev, mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100232 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200233}
234
235static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
236{
237 uint32_t *dstP =
238 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
239 uint32_t *srcP =
240 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
241
242 int i;
243 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
244 iowrite32(ioread32(srcP++), dstP++);
245 return 0;
246}
247
248static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200249 unsigned long page,
250 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200251{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400252 struct page *d = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200253 void *dst;
254
255 if (!d)
256 return -ENOMEM;
257
258 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200259
260#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700261 dst = kmap_atomic_prot(d, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200262#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200263 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200264 dst = vmap(&d, 1, 0, prot);
265 else
266 dst = kmap(d);
267#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200268 if (!dst)
269 return -ENOMEM;
270
271 memcpy_fromio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200272
273#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700274 kunmap_atomic(dst);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200275#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200276 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200277 vunmap(dst);
278 else
279 kunmap(d);
280#endif
281
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200282 return 0;
283}
284
285static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200286 unsigned long page,
287 pgprot_t prot)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288{
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400289 struct page *s = ttm->pages[page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200290 void *src;
291
292 if (!s)
293 return -ENOMEM;
294
295 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200296#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700297 src = kmap_atomic_prot(s, prot);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200298#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200299 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200300 src = vmap(&s, 1, 0, prot);
301 else
302 src = kmap(s);
303#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200304 if (!src)
305 return -ENOMEM;
306
307 memcpy_toio(dst, src, PAGE_SIZE);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200308
309#ifdef CONFIG_X86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700310 kunmap_atomic(src);
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200311#else
Thomas Hellstrom6d0897b2009-07-31 10:47:51 +0200312 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200313 vunmap(src);
314 else
315 kunmap(s);
316#endif
317
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200318 return 0;
319}
320
321int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000322 bool evict, bool no_wait_gpu,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000323 struct ttm_mem_reg *new_mem)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200324{
325 struct ttm_bo_device *bdev = bo->bdev;
326 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
327 struct ttm_tt *ttm = bo->ttm;
328 struct ttm_mem_reg *old_mem = &bo->mem;
Thomas Hellstrome22469c2011-10-17 13:27:34 +0200329 struct ttm_mem_reg old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200330 void *old_iomap;
331 void *new_iomap;
332 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200333 unsigned long i;
334 unsigned long page;
335 unsigned long add = 0;
336 int dir;
337
338 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
339 if (ret)
340 return ret;
341 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
342 if (ret)
343 goto out;
344
345 if (old_iomap == NULL && new_iomap == NULL)
346 goto out2;
347 if (old_iomap == NULL && ttm == NULL)
348 goto out2;
349
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400350 if (ttm->state == tt_unpopulated) {
351 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
Dave Airlie63054182013-01-16 14:25:44 +1000352 if (ret) {
353 /* if we fail here don't nuke the mm node
354 * as the bo still owns it */
355 old_copy.mm_node = NULL;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400356 goto out1;
Dave Airlie63054182013-01-16 14:25:44 +1000357 }
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400358 }
359
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200360 add = 0;
361 dir = 1;
362
363 if ((old_mem->mem_type == new_mem->mem_type) &&
Ben Skeggsd961db72010-08-05 10:48:18 +1000364 (new_mem->start < old_mem->start + old_mem->size)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200365 dir = -1;
366 add = new_mem->num_pages - 1;
367 }
368
369 for (i = 0; i < new_mem->num_pages; ++i) {
370 page = i * dir + add;
Thomas Hellstrom542c6f62009-07-24 09:57:34 +0200371 if (old_iomap == NULL) {
372 pgprot_t prot = ttm_io_prot(old_mem->placement,
373 PAGE_KERNEL);
374 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
375 prot);
376 } else if (new_iomap == NULL) {
377 pgprot_t prot = ttm_io_prot(new_mem->placement,
378 PAGE_KERNEL);
379 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
380 prot);
381 } else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200382 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
Dave Airlie63054182013-01-16 14:25:44 +1000383 if (ret) {
384 /* failing here, means keep old copy as-is */
385 old_copy.mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200386 goto out1;
Dave Airlie63054182013-01-16 14:25:44 +1000387 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200388 }
389 mb();
390out2:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100391 old_copy = *old_mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200392 *old_mem = *new_mem;
393 new_mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200394
395 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
396 ttm_tt_unbind(ttm);
397 ttm_tt_destroy(ttm);
398 bo->ttm = NULL;
399 }
400
401out1:
Thomas Hellstromeba67092010-11-11 09:41:57 +0100402 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403out:
404 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
Ben Skeggsb921bae2010-12-16 14:47:46 +1000405 ttm_bo_mem_put(bo, &old_copy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200406 return ret;
407}
408EXPORT_SYMBOL(ttm_bo_move_memcpy);
409
410static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
411{
412 kfree(bo);
413}
414
415/**
416 * ttm_buffer_object_transfer
417 *
418 * @bo: A pointer to a struct ttm_buffer_object.
419 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
420 * holding the data of @bo with the old placement.
421 *
422 * This is a utility function that may be called after an accelerated move
423 * has been scheduled. A new buffer object is created as a placeholder for
424 * the old data while it's being copied. When that buffer object is idle,
425 * it can be destroyed, releasing the space of the old placement.
426 * Returns:
427 * !0: Failure.
428 */
429
430static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
431 struct ttm_buffer_object **new_obj)
432{
433 struct ttm_buffer_object *fbo;
434 struct ttm_bo_device *bdev = bo->bdev;
435 struct ttm_bo_driver *driver = bdev->driver;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200436 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200437
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100438 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200439 if (!fbo)
440 return -ENOMEM;
441
442 *fbo = *bo;
443
444 /**
445 * Fix up members that we shouldn't copy directly:
446 * TODO: Explicit member copy would probably be better here.
447 */
448
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200449 INIT_LIST_HEAD(&fbo->ddestroy);
450 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100452 INIT_LIST_HEAD(&fbo->io_reserve_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200453 fbo->vm_node = NULL;
Francisco Jerez0fbecd42010-09-21 02:15:15 +0200454 atomic_set(&fbo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200455
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100456 spin_lock(&bdev->fence_lock);
457 if (bo->sync_obj)
458 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
459 else
460 fbo->sync_obj = NULL;
461 spin_unlock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200462 kref_init(&fbo->list_kref);
463 kref_init(&fbo->kref);
464 fbo->destroy = &ttm_transfered_destroy;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500465 fbo->acc_size = 0;
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200466 fbo->resv = &fbo->ttm_resv;
467 reservation_object_init(fbo->resv);
468 ret = ww_mutex_trylock(&fbo->resv->lock);
469 WARN_ON(!ret);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200470
471 *new_obj = fbo;
472 return 0;
473}
474
475pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
476{
477#if defined(__i386__) || defined(__x86_64__)
478 if (caching_flags & TTM_PL_FLAG_WC)
479 tmp = pgprot_writecombine(tmp);
480 else if (boot_cpu_data.x86 > 3)
481 tmp = pgprot_noncached(tmp);
482
483#elif defined(__powerpc__)
484 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
485 pgprot_val(tmp) |= _PAGE_NO_CACHE;
486 if (caching_flags & TTM_PL_FLAG_UNCACHED)
487 pgprot_val(tmp) |= _PAGE_GUARDED;
488 }
489#endif
490#if defined(__ia64__)
491 if (caching_flags & TTM_PL_FLAG_WC)
492 tmp = pgprot_writecombine(tmp);
493 else
494 tmp = pgprot_noncached(tmp);
495#endif
Huacai Chen04cf55e2012-08-11 09:32:17 +0000496#if defined(__sparc__) || defined(__mips__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200497 if (!(caching_flags & TTM_PL_FLAG_CACHED))
498 tmp = pgprot_noncached(tmp);
499#endif
500 return tmp;
501}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100502EXPORT_SYMBOL(ttm_io_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200503
504static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
Jerome Glisse82c5da62010-04-09 14:39:23 +0200505 unsigned long offset,
506 unsigned long size,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200507 struct ttm_bo_kmap_obj *map)
508{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200509 struct ttm_mem_reg *mem = &bo->mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200510
Jerome Glisse82c5da62010-04-09 14:39:23 +0200511 if (bo->mem.bus.addr) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200512 map->bo_kmap_type = ttm_bo_map_premapped;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200513 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200514 } else {
515 map->bo_kmap_type = ttm_bo_map_iomap;
516 if (mem->placement & TTM_PL_FLAG_WC)
Jerome Glisse82c5da62010-04-09 14:39:23 +0200517 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
518 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200519 else
Jerome Glisse82c5da62010-04-09 14:39:23 +0200520 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
521 size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200522 }
523 return (!map->virtual) ? -ENOMEM : 0;
524}
525
526static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
527 unsigned long start_page,
528 unsigned long num_pages,
529 struct ttm_bo_kmap_obj *map)
530{
531 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
532 struct ttm_tt *ttm = bo->ttm;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400533 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200534
535 BUG_ON(!ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400536
537 if (ttm->state == tt_unpopulated) {
538 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
539 if (ret)
540 return ret;
541 }
542
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200543 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
544 /*
545 * We're mapping a single page, and the desired
546 * page protection is consistent with the bo.
547 */
548
549 map->bo_kmap_type = ttm_bo_map_kmap;
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400550 map->page = ttm->pages[start_page];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 map->virtual = kmap(map->page);
552 } else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200553 /*
554 * We need to use vmap to get the desired page protection
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200555 * or to make the buffer object look contiguous.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 */
557 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
558 PAGE_KERNEL :
559 ttm_io_prot(mem->placement, PAGE_KERNEL);
560 map->bo_kmap_type = ttm_bo_map_vmap;
561 map->virtual = vmap(ttm->pages + start_page, num_pages,
562 0, prot);
563 }
564 return (!map->virtual) ? -ENOMEM : 0;
565}
566
567int ttm_bo_kmap(struct ttm_buffer_object *bo,
568 unsigned long start_page, unsigned long num_pages,
569 struct ttm_bo_kmap_obj *map)
570{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100571 struct ttm_mem_type_manager *man =
572 &bo->bdev->man[bo->mem.mem_type];
Jerome Glisse82c5da62010-04-09 14:39:23 +0200573 unsigned long offset, size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200574 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200575
576 BUG_ON(!list_empty(&bo->swap));
577 map->virtual = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200578 map->bo = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200579 if (num_pages > bo->num_pages)
580 return -EINVAL;
581 if (start_page > bo->num_pages)
582 return -EINVAL;
583#if 0
584 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
585 return -EPERM;
586#endif
Thomas Hellstromeba67092010-11-11 09:41:57 +0100587 (void) ttm_mem_io_lock(man, false);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200588 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100589 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200590 if (ret)
591 return ret;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200592 if (!bo->mem.bus.is_iomem) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200593 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
594 } else {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200595 offset = start_page << PAGE_SHIFT;
596 size = num_pages << PAGE_SHIFT;
597 return ttm_bo_ioremap(bo, offset, size, map);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200598 }
599}
600EXPORT_SYMBOL(ttm_bo_kmap);
601
602void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
603{
Thomas Hellstromeba67092010-11-11 09:41:57 +0100604 struct ttm_buffer_object *bo = map->bo;
605 struct ttm_mem_type_manager *man =
606 &bo->bdev->man[bo->mem.mem_type];
607
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200608 if (!map->virtual)
609 return;
610 switch (map->bo_kmap_type) {
611 case ttm_bo_map_iomap:
612 iounmap(map->virtual);
613 break;
614 case ttm_bo_map_vmap:
615 vunmap(map->virtual);
616 break;
617 case ttm_bo_map_kmap:
618 kunmap(map->page);
619 break;
620 case ttm_bo_map_premapped:
621 break;
622 default:
623 BUG();
624 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100625 (void) ttm_mem_io_lock(man, false);
626 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
627 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200628 map->virtual = NULL;
629 map->page = NULL;
630}
631EXPORT_SYMBOL(ttm_bo_kunmap);
632
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200633int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
634 void *sync_obj,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000635 bool evict,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000636 bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200637 struct ttm_mem_reg *new_mem)
638{
639 struct ttm_bo_device *bdev = bo->bdev;
640 struct ttm_bo_driver *driver = bdev->driver;
641 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642 struct ttm_mem_reg *old_mem = &bo->mem;
643 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200644 struct ttm_buffer_object *ghost_obj;
645 void *tmp_obj = NULL;
646
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000647 spin_lock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200648 if (bo->sync_obj) {
649 tmp_obj = bo->sync_obj;
650 bo->sync_obj = NULL;
651 }
652 bo->sync_obj = driver->sync_obj_ref(sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200653 if (evict) {
654 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000655 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200656 if (tmp_obj)
657 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200658 if (ret)
659 return ret;
660
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200661 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
662 (bo->ttm != NULL)) {
663 ttm_tt_unbind(bo->ttm);
664 ttm_tt_destroy(bo->ttm);
665 bo->ttm = NULL;
666 }
Ben Skeggseac20952011-08-22 03:15:04 +0000667 ttm_bo_free_old_node(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200668 } else {
669 /**
670 * This should help pipeline ordinary buffer moves.
671 *
672 * Hang old buffer memory on a new buffer object,
673 * and leave it to be released when the GPU
674 * operation has completed.
675 */
676
677 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000678 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom4677f152009-07-21 17:45:13 +0200679 if (tmp_obj)
680 driver->sync_obj_unref(&tmp_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200681
Daniel Vetterff7c60c2013-01-14 15:08:14 +0100682 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200683 if (ret)
684 return ret;
685
686 /**
687 * If we're not moving to fixed memory, the TTM object
688 * needs to stay alive. Otherwhise hang it on the ghost
689 * bo to be unbound and destroyed.
690 */
691
692 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
693 ghost_obj->ttm = NULL;
694 else
695 bo->ttm = NULL;
696
697 ttm_bo_unreserve(ghost_obj);
698 ttm_bo_unref(&ghost_obj);
699 }
700
701 *old_mem = *new_mem;
702 new_mem->mm_node = NULL;
Austin Yuan110b20c2010-01-21 13:45:40 +0800703
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200704 return 0;
705}
706EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);