Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | /* |
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 29 | */ |
| 30 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 31 | #include <drm/ttm/ttm_bo_driver.h> |
| 32 | #include <drm/ttm/ttm_placement.h> |
David Herrmann | 72525b3 | 2013-07-24 21:08:53 +0200 | [diff] [blame] | 33 | #include <drm/drm_vma_manager.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 34 | #include <linux/io.h> |
| 35 | #include <linux/highmem.h> |
| 36 | #include <linux/wait.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 38 | #include <linux/vmalloc.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 39 | #include <linux/module.h> |
Maarten Lankhorst | f2c24b8 | 2014-04-02 17:14:48 +0200 | [diff] [blame] | 40 | #include <linux/reservation.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 41 | |
| 42 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
| 43 | { |
Ben Skeggs | 42311ff | 2010-08-04 12:07:08 +1000 | [diff] [blame] | 44 | ttm_bo_mem_put(bo, &bo->mem); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 48 | struct ttm_operation_ctx *ctx, |
Michel Dänzer | 4e2f0ca | 2016-08-08 12:28:25 +0900 | [diff] [blame] | 49 | struct ttm_mem_reg *new_mem) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 50 | { |
| 51 | struct ttm_tt *ttm = bo->ttm; |
| 52 | struct ttm_mem_reg *old_mem = &bo->mem; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 53 | int ret; |
| 54 | |
| 55 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 56 | ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); |
Michel Dänzer | 7b8082b | 2016-08-05 18:36:10 +0900 | [diff] [blame] | 57 | |
| 58 | if (unlikely(ret != 0)) { |
| 59 | if (ret != -ERESTARTSYS) |
| 60 | pr_err("Failed to expire sync object before unbinding TTM\n"); |
| 61 | return ret; |
| 62 | } |
| 63 | |
Christian König | 2ff2bf1 | 2016-07-21 12:18:19 +0200 | [diff] [blame] | 64 | ttm_tt_unbind(ttm); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 65 | ttm_bo_free_old_node(bo); |
| 66 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
| 67 | TTM_PL_MASK_MEM); |
| 68 | old_mem->mem_type = TTM_PL_SYSTEM; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
| 72 | if (unlikely(ret != 0)) |
| 73 | return ret; |
| 74 | |
| 75 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
Roger He | 993baf1 | 2017-12-21 17:42:51 +0800 | [diff] [blame] | 76 | ret = ttm_tt_bind(ttm, new_mem, ctx); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 77 | if (unlikely(ret != 0)) |
| 78 | return ret; |
| 79 | } |
| 80 | |
| 81 | *old_mem = *new_mem; |
| 82 | new_mem->mm_node = NULL; |
Austin Yuan | 110b20c | 2010-01-21 13:45:40 +0800 | [diff] [blame] | 83 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 84 | return 0; |
| 85 | } |
| 86 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
| 87 | |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 88 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 89 | { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 90 | if (likely(man->io_reserve_fastpath)) |
| 91 | return 0; |
| 92 | |
| 93 | if (interruptible) |
| 94 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
| 95 | |
| 96 | mutex_lock(&man->io_reserve_mutex); |
| 97 | return 0; |
| 98 | } |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 99 | EXPORT_SYMBOL(ttm_mem_io_lock); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 100 | |
| 101 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
| 102 | { |
| 103 | if (likely(man->io_reserve_fastpath)) |
| 104 | return; |
| 105 | |
| 106 | mutex_unlock(&man->io_reserve_mutex); |
| 107 | } |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 108 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 109 | |
| 110 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
| 111 | { |
| 112 | struct ttm_buffer_object *bo; |
| 113 | |
| 114 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
| 115 | return -EAGAIN; |
| 116 | |
| 117 | bo = list_first_entry(&man->io_reserve_lru, |
| 118 | struct ttm_buffer_object, |
| 119 | io_reserve_lru); |
| 120 | list_del_init(&bo->io_reserve_lru); |
| 121 | ttm_bo_unmap_virtual_locked(bo); |
| 122 | |
| 123 | return 0; |
| 124 | } |
| 125 | |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 126 | |
| 127 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
| 128 | struct ttm_mem_reg *mem) |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 129 | { |
| 130 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| 131 | int ret = 0; |
| 132 | |
| 133 | if (!bdev->driver->io_mem_reserve) |
| 134 | return 0; |
| 135 | if (likely(man->io_reserve_fastpath)) |
| 136 | return bdev->driver->io_mem_reserve(bdev, mem); |
| 137 | |
| 138 | if (bdev->driver->io_mem_reserve && |
| 139 | mem->bus.io_reserved_count++ == 0) { |
| 140 | retry: |
| 141 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
| 142 | if (ret == -EAGAIN) { |
| 143 | ret = ttm_mem_io_evict(man); |
| 144 | if (ret == 0) |
| 145 | goto retry; |
| 146 | } |
| 147 | } |
| 148 | return ret; |
| 149 | } |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 150 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 151 | |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 152 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
| 153 | struct ttm_mem_reg *mem) |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 154 | { |
| 155 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| 156 | |
| 157 | if (likely(man->io_reserve_fastpath)) |
| 158 | return; |
| 159 | |
| 160 | if (bdev->driver->io_mem_reserve && |
| 161 | --mem->bus.io_reserved_count == 0 && |
| 162 | bdev->driver->io_mem_free) |
| 163 | bdev->driver->io_mem_free(bdev, mem); |
| 164 | |
| 165 | } |
Dave Airlie | afe6804 | 2013-01-22 13:56:04 +1000 | [diff] [blame] | 166 | EXPORT_SYMBOL(ttm_mem_io_free); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 167 | |
| 168 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
| 169 | { |
| 170 | struct ttm_mem_reg *mem = &bo->mem; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 171 | int ret; |
| 172 | |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 173 | if (!mem->bus.io_reserved_vm) { |
| 174 | struct ttm_mem_type_manager *man = |
| 175 | &bo->bdev->man[mem->mem_type]; |
| 176 | |
| 177 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 178 | if (unlikely(ret != 0)) |
| 179 | return ret; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 180 | mem->bus.io_reserved_vm = true; |
| 181 | if (man->use_io_reserve_lru) |
| 182 | list_add_tail(&bo->io_reserve_lru, |
| 183 | &man->io_reserve_lru); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 184 | } |
| 185 | return 0; |
| 186 | } |
| 187 | |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 188 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 189 | { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 190 | struct ttm_mem_reg *mem = &bo->mem; |
| 191 | |
| 192 | if (mem->bus.io_reserved_vm) { |
| 193 | mem->bus.io_reserved_vm = false; |
| 194 | list_del_init(&bo->io_reserve_lru); |
| 195 | ttm_mem_io_free(bo->bdev, mem); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 196 | } |
| 197 | } |
| 198 | |
Rashika Kheria | dcbff15 | 2014-01-06 22:14:27 +0530 | [diff] [blame] | 199 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 200 | void **virtual) |
| 201 | { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 202 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 203 | int ret; |
| 204 | void *addr; |
| 205 | |
| 206 | *virtual = NULL; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 207 | (void) ttm_mem_io_lock(man, false); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 208 | ret = ttm_mem_io_reserve(bdev, mem); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 209 | ttm_mem_io_unlock(man); |
Jerome Glisse | 9e51159c | 2010-05-05 11:02:44 +0200 | [diff] [blame] | 210 | if (ret || !mem->bus.is_iomem) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 211 | return ret; |
| 212 | |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 213 | if (mem->bus.addr) { |
| 214 | addr = mem->bus.addr; |
| 215 | } else { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 216 | if (mem->placement & TTM_PL_FLAG_WC) |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 217 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 218 | else |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 219 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
| 220 | if (!addr) { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 221 | (void) ttm_mem_io_lock(man, false); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 222 | ttm_mem_io_free(bdev, mem); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 223 | ttm_mem_io_unlock(man); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 224 | return -ENOMEM; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 225 | } |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 226 | } |
| 227 | *virtual = addr; |
| 228 | return 0; |
| 229 | } |
| 230 | |
Rashika Kheria | dcbff15 | 2014-01-06 22:14:27 +0530 | [diff] [blame] | 231 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 232 | void *virtual) |
| 233 | { |
| 234 | struct ttm_mem_type_manager *man; |
| 235 | |
| 236 | man = &bdev->man[mem->mem_type]; |
| 237 | |
Jerome Glisse | 0c321c7 | 2010-04-07 10:21:27 +0000 | [diff] [blame] | 238 | if (virtual && mem->bus.addr == NULL) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 239 | iounmap(virtual); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 240 | (void) ttm_mem_io_lock(man, false); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 241 | ttm_mem_io_free(bdev, mem); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 242 | ttm_mem_io_unlock(man); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
| 246 | { |
| 247 | uint32_t *dstP = |
| 248 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
| 249 | uint32_t *srcP = |
| 250 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
| 251 | |
| 252 | int i; |
| 253 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
| 254 | iowrite32(ioread32(srcP++), dstP++); |
| 255 | return 0; |
| 256 | } |
| 257 | |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 258 | #ifdef CONFIG_X86 |
| 259 | #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot) |
| 260 | #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr) |
| 261 | #else |
| 262 | #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot) |
| 263 | #define __ttm_kunmap_atomic(__addr) vunmap(__addr) |
| 264 | #endif |
| 265 | |
Thomas Hellstrom | 9c11fcf | 2018-01-16 09:12:05 +0100 | [diff] [blame] | 266 | |
| 267 | /** |
| 268 | * ttm_kmap_atomic_prot - Efficient kernel map of a single page with |
| 269 | * specified page protection. |
| 270 | * |
| 271 | * @page: The page to map. |
| 272 | * @prot: The page protection. |
| 273 | * |
| 274 | * This function maps a TTM page using the kmap_atomic api if available, |
| 275 | * otherwise falls back to vmap. The user must make sure that the |
| 276 | * specified page does not have an aliased mapping with a different caching |
| 277 | * policy unless the architecture explicitly allows it. Also mapping and |
| 278 | * unmapping using this api must be correctly nested. Unmapping should |
| 279 | * occur in the reverse order of mapping. |
| 280 | */ |
| 281 | void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot) |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 282 | { |
| 283 | if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) |
| 284 | return kmap_atomic(page); |
| 285 | else |
| 286 | return __ttm_kmap_atomic_prot(page, prot); |
| 287 | } |
Thomas Hellstrom | 9c11fcf | 2018-01-16 09:12:05 +0100 | [diff] [blame] | 288 | EXPORT_SYMBOL(ttm_kmap_atomic_prot); |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 289 | |
Thomas Hellstrom | 9c11fcf | 2018-01-16 09:12:05 +0100 | [diff] [blame] | 290 | /** |
| 291 | * ttm_kunmap_atomic_prot - Unmap a page that was mapped using |
| 292 | * ttm_kmap_atomic_prot. |
| 293 | * |
| 294 | * @addr: The virtual address from the map. |
| 295 | * @prot: The page protection. |
| 296 | */ |
| 297 | void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot) |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 298 | { |
| 299 | if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) |
| 300 | kunmap_atomic(addr); |
| 301 | else |
| 302 | __ttm_kunmap_atomic(addr); |
| 303 | } |
Thomas Hellstrom | 9c11fcf | 2018-01-16 09:12:05 +0100 | [diff] [blame] | 304 | EXPORT_SYMBOL(ttm_kunmap_atomic_prot); |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 305 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 306 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 307 | unsigned long page, |
| 308 | pgprot_t prot) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 309 | { |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 310 | struct page *d = ttm->pages[page]; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 311 | void *dst; |
| 312 | |
| 313 | if (!d) |
| 314 | return -ENOMEM; |
| 315 | |
| 316 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 317 | dst = ttm_kmap_atomic_prot(d, prot); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 318 | if (!dst) |
| 319 | return -ENOMEM; |
| 320 | |
| 321 | memcpy_fromio(dst, src, PAGE_SIZE); |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 322 | |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 323 | ttm_kunmap_atomic_prot(dst, prot); |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 324 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 329 | unsigned long page, |
| 330 | pgprot_t prot) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 331 | { |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 332 | struct page *s = ttm->pages[page]; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 333 | void *src; |
| 334 | |
| 335 | if (!s) |
| 336 | return -ENOMEM; |
| 337 | |
| 338 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 339 | src = ttm_kmap_atomic_prot(s, prot); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 340 | if (!src) |
| 341 | return -ENOMEM; |
| 342 | |
| 343 | memcpy_toio(dst, src, PAGE_SIZE); |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 344 | |
Thomas Hellstrom | 403c182 | 2018-01-16 09:02:03 +0100 | [diff] [blame] | 345 | ttm_kunmap_atomic_prot(src, prot); |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 346 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 351 | struct ttm_operation_ctx *ctx, |
Jerome Glisse | 9d87fa2 | 2010-04-07 10:21:19 +0000 | [diff] [blame] | 352 | struct ttm_mem_reg *new_mem) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 353 | { |
| 354 | struct ttm_bo_device *bdev = bo->bdev; |
| 355 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
| 356 | struct ttm_tt *ttm = bo->ttm; |
| 357 | struct ttm_mem_reg *old_mem = &bo->mem; |
Thomas Hellstrom | e22469c | 2011-10-17 13:27:34 +0200 | [diff] [blame] | 358 | struct ttm_mem_reg old_copy = *old_mem; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 359 | void *old_iomap; |
| 360 | void *new_iomap; |
| 361 | int ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 362 | unsigned long i; |
| 363 | unsigned long page; |
| 364 | unsigned long add = 0; |
| 365 | int dir; |
| 366 | |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 367 | ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); |
Christian König | 77dfc28 | 2016-06-06 10:17:54 +0200 | [diff] [blame] | 368 | if (ret) |
| 369 | return ret; |
| 370 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 371 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
| 372 | if (ret) |
| 373 | return ret; |
| 374 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
| 375 | if (ret) |
| 376 | goto out; |
| 377 | |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 378 | /* |
| 379 | * Single TTM move. NOP. |
| 380 | */ |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 381 | if (old_iomap == NULL && new_iomap == NULL) |
| 382 | goto out2; |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 383 | |
| 384 | /* |
Thomas Hellstrom | 0bc2542 | 2013-11-17 23:30:38 -0800 | [diff] [blame] | 385 | * Don't move nonexistent data. Clear destination instead. |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 386 | */ |
Thomas Hellstrom | 0bc2542 | 2013-11-17 23:30:38 -0800 | [diff] [blame] | 387 | if (old_iomap == NULL && |
Thomas Hellstrom | 2e6d8b4 | 2013-12-21 22:23:02 +0100 | [diff] [blame] | 388 | (ttm == NULL || (ttm->state == tt_unpopulated && |
| 389 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
Thomas Hellstrom | 0bc2542 | 2013-11-17 23:30:38 -0800 | [diff] [blame] | 390 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 391 | goto out2; |
Thomas Hellstrom | 0bc2542 | 2013-11-17 23:30:38 -0800 | [diff] [blame] | 392 | } |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 393 | |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 394 | /* |
| 395 | * TTM might be null for moves within the same region. |
Jakob Bornecrantz | 9a0599d | 2013-10-30 02:46:56 -0700 | [diff] [blame] | 396 | */ |
Christian König | 25893a1 | 2018-02-01 14:39:29 +0100 | [diff] [blame] | 397 | if (ttm) { |
| 398 | ret = ttm_tt_populate(ttm, ctx); |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 399 | if (ret) |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 400 | goto out1; |
| 401 | } |
| 402 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 403 | add = 0; |
| 404 | dir = 1; |
| 405 | |
| 406 | if ((old_mem->mem_type == new_mem->mem_type) && |
Ben Skeggs | d961db7 | 2010-08-05 10:48:18 +1000 | [diff] [blame] | 407 | (new_mem->start < old_mem->start + old_mem->size)) { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 408 | dir = -1; |
| 409 | add = new_mem->num_pages - 1; |
| 410 | } |
| 411 | |
| 412 | for (i = 0; i < new_mem->num_pages; ++i) { |
| 413 | page = i * dir + add; |
Thomas Hellstrom | 542c6f6 | 2009-07-24 09:57:34 +0200 | [diff] [blame] | 414 | if (old_iomap == NULL) { |
| 415 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
| 416 | PAGE_KERNEL); |
| 417 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
| 418 | prot); |
| 419 | } else if (new_iomap == NULL) { |
| 420 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
| 421 | PAGE_KERNEL); |
| 422 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
| 423 | prot); |
Tom St Denis | 449f797 | 2018-01-26 08:55:10 -0500 | [diff] [blame] | 424 | } else { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 425 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
Tom St Denis | 449f797 | 2018-01-26 08:55:10 -0500 | [diff] [blame] | 426 | } |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 427 | if (ret) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 428 | goto out1; |
| 429 | } |
| 430 | mb(); |
| 431 | out2: |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 432 | old_copy = *old_mem; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 433 | *old_mem = *new_mem; |
| 434 | new_mem->mm_node = NULL; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 435 | |
Christian König | 4279cb1 | 2016-06-06 10:17:51 +0200 | [diff] [blame] | 436 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 437 | ttm_tt_destroy(ttm); |
| 438 | bo->ttm = NULL; |
| 439 | } |
| 440 | |
| 441 | out1: |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 442 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 443 | out: |
| 444 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
Thomas Hellstrom | da95c78 | 2013-10-30 03:29:50 -0700 | [diff] [blame] | 445 | |
| 446 | /* |
| 447 | * On error, keep the mm node! |
| 448 | */ |
| 449 | if (!ret) |
| 450 | ttm_bo_mem_put(bo, &old_copy); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 451 | return ret; |
| 452 | } |
| 453 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
| 454 | |
| 455 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
| 456 | { |
| 457 | kfree(bo); |
| 458 | } |
| 459 | |
| 460 | /** |
| 461 | * ttm_buffer_object_transfer |
| 462 | * |
| 463 | * @bo: A pointer to a struct ttm_buffer_object. |
| 464 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
| 465 | * holding the data of @bo with the old placement. |
| 466 | * |
| 467 | * This is a utility function that may be called after an accelerated move |
| 468 | * has been scheduled. A new buffer object is created as a placeholder for |
| 469 | * the old data while it's being copied. When that buffer object is idle, |
| 470 | * it can be destroyed, releasing the space of the old placement. |
| 471 | * Returns: |
| 472 | * !0: Failure. |
| 473 | */ |
| 474 | |
| 475 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
| 476 | struct ttm_buffer_object **new_obj) |
| 477 | { |
| 478 | struct ttm_buffer_object *fbo; |
Maarten Lankhorst | 5e33840 | 2013-06-27 13:48:19 +0200 | [diff] [blame] | 479 | int ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 480 | |
Daniel Vetter | ff7c60c | 2013-01-14 15:08:14 +0100 | [diff] [blame] | 481 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 482 | if (!fbo) |
| 483 | return -ENOMEM; |
| 484 | |
| 485 | *fbo = *bo; |
| 486 | |
| 487 | /** |
| 488 | * Fix up members that we shouldn't copy directly: |
| 489 | * TODO: Explicit member copy would probably be better here. |
| 490 | */ |
| 491 | |
Christian König | 3839263 | 2018-02-21 17:26:45 +0100 | [diff] [blame] | 492 | atomic_inc(&bo->bdev->glob->bo_count); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 493 | INIT_LIST_HEAD(&fbo->ddestroy); |
| 494 | INIT_LIST_HEAD(&fbo->lru); |
| 495 | INIT_LIST_HEAD(&fbo->swap); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 496 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
Christian König | 4d98e5e | 2017-10-30 14:57:43 +0100 | [diff] [blame] | 497 | mutex_init(&fbo->wu_mutex); |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 498 | fbo->moving = NULL; |
David Herrmann | 72525b3 | 2013-07-24 21:08:53 +0200 | [diff] [blame] | 499 | drm_vma_node_reset(&fbo->vma_node); |
Francisco Jerez | 0fbecd4 | 2010-09-21 02:15:15 +0200 | [diff] [blame] | 500 | atomic_set(&fbo->cpu_writers, 0); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 501 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 502 | kref_init(&fbo->list_kref); |
| 503 | kref_init(&fbo->kref); |
| 504 | fbo->destroy = &ttm_transfered_destroy; |
Jerome Glisse | 57de4ba | 2011-11-11 15:42:57 -0500 | [diff] [blame] | 505 | fbo->acc_size = 0; |
Maarten Lankhorst | 5e33840 | 2013-06-27 13:48:19 +0200 | [diff] [blame] | 506 | fbo->resv = &fbo->ttm_resv; |
| 507 | reservation_object_init(fbo->resv); |
Christian König | 2b7e35f | 2017-12-15 17:27:28 +0100 | [diff] [blame] | 508 | ret = reservation_object_trylock(fbo->resv); |
Maarten Lankhorst | 5e33840 | 2013-06-27 13:48:19 +0200 | [diff] [blame] | 509 | WARN_ON(!ret); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 510 | |
| 511 | *new_obj = fbo; |
| 512 | return 0; |
| 513 | } |
| 514 | |
| 515 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
| 516 | { |
Benjamin Herrenschmidt | 94318d5 | 2014-09-04 17:47:23 +1000 | [diff] [blame] | 517 | /* Cached mappings need no adjustment */ |
| 518 | if (caching_flags & TTM_PL_FLAG_CACHED) |
| 519 | return tmp; |
| 520 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 521 | #if defined(__i386__) || defined(__x86_64__) |
| 522 | if (caching_flags & TTM_PL_FLAG_WC) |
| 523 | tmp = pgprot_writecombine(tmp); |
| 524 | else if (boot_cpu_data.x86 > 3) |
| 525 | tmp = pgprot_noncached(tmp); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 526 | #endif |
Alexandre Courbot | f135b97 | 2015-07-01 17:32:29 +0900 | [diff] [blame] | 527 | #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ |
| 528 | defined(__powerpc__) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 529 | if (caching_flags & TTM_PL_FLAG_WC) |
| 530 | tmp = pgprot_writecombine(tmp); |
| 531 | else |
| 532 | tmp = pgprot_noncached(tmp); |
| 533 | #endif |
Huacai Chen | 04cf55e | 2012-08-11 09:32:17 +0000 | [diff] [blame] | 534 | #if defined(__sparc__) || defined(__mips__) |
Benjamin Herrenschmidt | 94318d5 | 2014-09-04 17:47:23 +1000 | [diff] [blame] | 535 | tmp = pgprot_noncached(tmp); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 536 | #endif |
| 537 | return tmp; |
| 538 | } |
Thomas Hellstrom | 4bfd75c | 2009-12-06 21:46:27 +0100 | [diff] [blame] | 539 | EXPORT_SYMBOL(ttm_io_prot); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 540 | |
| 541 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 542 | unsigned long offset, |
| 543 | unsigned long size, |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 544 | struct ttm_bo_kmap_obj *map) |
| 545 | { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 546 | struct ttm_mem_reg *mem = &bo->mem; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 547 | |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 548 | if (bo->mem.bus.addr) { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 549 | map->bo_kmap_type = ttm_bo_map_premapped; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 550 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 551 | } else { |
| 552 | map->bo_kmap_type = ttm_bo_map_iomap; |
| 553 | if (mem->placement & TTM_PL_FLAG_WC) |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 554 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
| 555 | size); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 556 | else |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 557 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
| 558 | size); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 559 | } |
| 560 | return (!map->virtual) ? -ENOMEM : 0; |
| 561 | } |
| 562 | |
| 563 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
| 564 | unsigned long start_page, |
| 565 | unsigned long num_pages, |
| 566 | struct ttm_bo_kmap_obj *map) |
| 567 | { |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 568 | struct ttm_mem_reg *mem = &bo->mem; |
| 569 | struct ttm_operation_ctx ctx = { |
| 570 | .interruptible = false, |
| 571 | .no_wait_gpu = false |
| 572 | }; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 573 | struct ttm_tt *ttm = bo->ttm; |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 574 | pgprot_t prot; |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 575 | int ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 576 | |
| 577 | BUG_ON(!ttm); |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 578 | |
Christian König | 25893a1 | 2018-02-01 14:39:29 +0100 | [diff] [blame] | 579 | ret = ttm_tt_populate(ttm, &ctx); |
| 580 | if (ret) |
| 581 | return ret; |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 582 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 583 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
| 584 | /* |
| 585 | * We're mapping a single page, and the desired |
| 586 | * page protection is consistent with the bo. |
| 587 | */ |
| 588 | |
| 589 | map->bo_kmap_type = ttm_bo_map_kmap; |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 590 | map->page = ttm->pages[start_page]; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 591 | map->virtual = kmap(map->page); |
| 592 | } else { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 593 | /* |
| 594 | * We need to use vmap to get the desired page protection |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 595 | * or to make the buffer object look contiguous. |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 596 | */ |
Benjamin Herrenschmidt | 94318d5 | 2014-09-04 17:47:23 +1000 | [diff] [blame] | 597 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 598 | map->bo_kmap_type = ttm_bo_map_vmap; |
| 599 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
| 600 | 0, prot); |
| 601 | } |
| 602 | return (!map->virtual) ? -ENOMEM : 0; |
| 603 | } |
| 604 | |
| 605 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
| 606 | unsigned long start_page, unsigned long num_pages, |
| 607 | struct ttm_bo_kmap_obj *map) |
| 608 | { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 609 | struct ttm_mem_type_manager *man = |
| 610 | &bo->bdev->man[bo->mem.mem_type]; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 611 | unsigned long offset, size; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 612 | int ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 613 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 614 | map->virtual = NULL; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 615 | map->bo = bo; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 616 | if (num_pages > bo->num_pages) |
| 617 | return -EINVAL; |
| 618 | if (start_page > bo->num_pages) |
| 619 | return -EINVAL; |
| 620 | #if 0 |
Daniel Vetter | 4cda878 | 2013-12-11 11:34:46 +0100 | [diff] [blame] | 621 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 622 | return -EPERM; |
| 623 | #endif |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 624 | (void) ttm_mem_io_lock(man, false); |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 625 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 626 | ttm_mem_io_unlock(man); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 627 | if (ret) |
| 628 | return ret; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 629 | if (!bo->mem.bus.is_iomem) { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 630 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
| 631 | } else { |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 632 | offset = start_page << PAGE_SHIFT; |
| 633 | size = num_pages << PAGE_SHIFT; |
| 634 | return ttm_bo_ioremap(bo, offset, size, map); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 635 | } |
| 636 | } |
| 637 | EXPORT_SYMBOL(ttm_bo_kmap); |
| 638 | |
| 639 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
| 640 | { |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 641 | struct ttm_buffer_object *bo = map->bo; |
| 642 | struct ttm_mem_type_manager *man = |
| 643 | &bo->bdev->man[bo->mem.mem_type]; |
| 644 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 645 | if (!map->virtual) |
| 646 | return; |
| 647 | switch (map->bo_kmap_type) { |
| 648 | case ttm_bo_map_iomap: |
| 649 | iounmap(map->virtual); |
| 650 | break; |
| 651 | case ttm_bo_map_vmap: |
| 652 | vunmap(map->virtual); |
| 653 | break; |
| 654 | case ttm_bo_map_kmap: |
| 655 | kunmap(map->page); |
| 656 | break; |
| 657 | case ttm_bo_map_premapped: |
| 658 | break; |
| 659 | default: |
| 660 | BUG(); |
| 661 | } |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 662 | (void) ttm_mem_io_lock(man, false); |
| 663 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
| 664 | ttm_mem_io_unlock(man); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 665 | map->virtual = NULL; |
| 666 | map->page = NULL; |
| 667 | } |
| 668 | EXPORT_SYMBOL(ttm_bo_kunmap); |
| 669 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 670 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 671 | struct dma_fence *fence, |
Maarten Lankhorst | 97a875c | 2012-11-28 11:25:44 +0000 | [diff] [blame] | 672 | bool evict, |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 673 | struct ttm_mem_reg *new_mem) |
| 674 | { |
| 675 | struct ttm_bo_device *bdev = bo->bdev; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 676 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
| 677 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 678 | int ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 679 | struct ttm_buffer_object *ghost_obj; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 680 | |
Maarten Lankhorst | f2c24b8 | 2014-04-02 17:14:48 +0200 | [diff] [blame] | 681 | reservation_object_add_excl_fence(bo->resv, fence); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 682 | if (evict) { |
Christian König | 8aa6d4f | 2016-04-06 11:12:04 +0200 | [diff] [blame] | 683 | ret = ttm_bo_wait(bo, false, false); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 684 | if (ret) |
| 685 | return ret; |
| 686 | |
Christian König | 4279cb1 | 2016-06-06 10:17:51 +0200 | [diff] [blame] | 687 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 688 | ttm_tt_destroy(bo->ttm); |
| 689 | bo->ttm = NULL; |
| 690 | } |
Ben Skeggs | eac2095 | 2011-08-22 03:15:04 +0000 | [diff] [blame] | 691 | ttm_bo_free_old_node(bo); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 692 | } else { |
| 693 | /** |
| 694 | * This should help pipeline ordinary buffer moves. |
| 695 | * |
| 696 | * Hang old buffer memory on a new buffer object, |
| 697 | * and leave it to be released when the GPU |
| 698 | * operation has completed. |
| 699 | */ |
| 700 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 701 | dma_fence_put(bo->moving); |
| 702 | bo->moving = dma_fence_get(fence); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 703 | |
Daniel Vetter | ff7c60c | 2013-01-14 15:08:14 +0100 | [diff] [blame] | 704 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 705 | if (ret) |
| 706 | return ret; |
| 707 | |
Maarten Lankhorst | f2c24b8 | 2014-04-02 17:14:48 +0200 | [diff] [blame] | 708 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
| 709 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 710 | /** |
| 711 | * If we're not moving to fixed memory, the TTM object |
| 712 | * needs to stay alive. Otherwhise hang it on the ghost |
| 713 | * bo to be unbound and destroyed. |
| 714 | */ |
| 715 | |
| 716 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
| 717 | ghost_obj->ttm = NULL; |
| 718 | else |
| 719 | bo->ttm = NULL; |
| 720 | |
| 721 | ttm_bo_unreserve(ghost_obj); |
| 722 | ttm_bo_unref(&ghost_obj); |
| 723 | } |
| 724 | |
| 725 | *old_mem = *new_mem; |
| 726 | new_mem->mm_node = NULL; |
Austin Yuan | 110b20c | 2010-01-21 13:45:40 +0800 | [diff] [blame] | 727 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 728 | return 0; |
| 729 | } |
| 730 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
Christian König | 3ddf4ad | 2016-06-15 13:44:03 +0200 | [diff] [blame] | 731 | |
| 732 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 733 | struct dma_fence *fence, bool evict, |
Christian König | 3ddf4ad | 2016-06-15 13:44:03 +0200 | [diff] [blame] | 734 | struct ttm_mem_reg *new_mem) |
| 735 | { |
| 736 | struct ttm_bo_device *bdev = bo->bdev; |
| 737 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 738 | |
| 739 | struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; |
| 740 | struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; |
| 741 | |
| 742 | int ret; |
| 743 | |
| 744 | reservation_object_add_excl_fence(bo->resv, fence); |
| 745 | |
| 746 | if (!evict) { |
| 747 | struct ttm_buffer_object *ghost_obj; |
| 748 | |
| 749 | /** |
| 750 | * This should help pipeline ordinary buffer moves. |
| 751 | * |
| 752 | * Hang old buffer memory on a new buffer object, |
| 753 | * and leave it to be released when the GPU |
| 754 | * operation has completed. |
| 755 | */ |
| 756 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 757 | dma_fence_put(bo->moving); |
| 758 | bo->moving = dma_fence_get(fence); |
Christian König | 3ddf4ad | 2016-06-15 13:44:03 +0200 | [diff] [blame] | 759 | |
| 760 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
| 761 | if (ret) |
| 762 | return ret; |
| 763 | |
| 764 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
| 765 | |
| 766 | /** |
| 767 | * If we're not moving to fixed memory, the TTM object |
| 768 | * needs to stay alive. Otherwhise hang it on the ghost |
| 769 | * bo to be unbound and destroyed. |
| 770 | */ |
| 771 | |
| 772 | if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) |
| 773 | ghost_obj->ttm = NULL; |
| 774 | else |
| 775 | bo->ttm = NULL; |
| 776 | |
| 777 | ttm_bo_unreserve(ghost_obj); |
| 778 | ttm_bo_unref(&ghost_obj); |
| 779 | |
| 780 | } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { |
| 781 | |
| 782 | /** |
| 783 | * BO doesn't have a TTM we need to bind/unbind. Just remember |
| 784 | * this eviction and free up the allocation |
| 785 | */ |
| 786 | |
| 787 | spin_lock(&from->move_lock); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 788 | if (!from->move || dma_fence_is_later(fence, from->move)) { |
| 789 | dma_fence_put(from->move); |
| 790 | from->move = dma_fence_get(fence); |
Christian König | 3ddf4ad | 2016-06-15 13:44:03 +0200 | [diff] [blame] | 791 | } |
| 792 | spin_unlock(&from->move_lock); |
| 793 | |
| 794 | ttm_bo_free_old_node(bo); |
| 795 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 796 | dma_fence_put(bo->moving); |
| 797 | bo->moving = dma_fence_get(fence); |
Christian König | 3ddf4ad | 2016-06-15 13:44:03 +0200 | [diff] [blame] | 798 | |
| 799 | } else { |
| 800 | /** |
| 801 | * Last resort, wait for the move to be completed. |
| 802 | * |
| 803 | * Should never happen in pratice. |
| 804 | */ |
| 805 | |
| 806 | ret = ttm_bo_wait(bo, false, false); |
| 807 | if (ret) |
| 808 | return ret; |
| 809 | |
| 810 | if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { |
| 811 | ttm_tt_destroy(bo->ttm); |
| 812 | bo->ttm = NULL; |
| 813 | } |
| 814 | ttm_bo_free_old_node(bo); |
| 815 | } |
| 816 | |
| 817 | *old_mem = *new_mem; |
| 818 | new_mem->mm_node = NULL; |
| 819 | |
| 820 | return 0; |
| 821 | } |
| 822 | EXPORT_SYMBOL(ttm_bo_pipeline_move); |
Christian König | 5d95109 | 2018-02-20 15:35:21 +0100 | [diff] [blame] | 823 | |
| 824 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) |
| 825 | { |
| 826 | struct ttm_buffer_object *ghost; |
| 827 | int ret; |
| 828 | |
| 829 | ret = ttm_buffer_object_transfer(bo, &ghost); |
| 830 | if (ret) |
| 831 | return ret; |
| 832 | |
| 833 | ret = reservation_object_copy_fences(ghost->resv, bo->resv); |
| 834 | /* Last resort, wait for the BO to be idle when we are OOM */ |
| 835 | if (ret) |
| 836 | ttm_bo_wait(bo, false, false); |
| 837 | |
| 838 | memset(&bo->mem, 0, sizeof(bo->mem)); |
| 839 | bo->mem.mem_type = TTM_PL_SYSTEM; |
| 840 | bo->ttm = NULL; |
| 841 | |
| 842 | ttm_bo_unreserve(ghost); |
| 843 | ttm_bo_unref(&ghost); |
| 844 | |
| 845 | return 0; |
| 846 | } |