Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
Masahiro Yamada | 248a1d6 | 2017-04-24 13:50:21 +0900 | [diff] [blame] | 32 | #include <drm/ttm/ttm_bo_api.h> |
| 33 | #include <drm/ttm/ttm_bo_driver.h> |
| 34 | #include <drm/ttm/ttm_placement.h> |
| 35 | #include <drm/ttm/ttm_module.h> |
| 36 | #include <drm/ttm/ttm_page_alloc.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 37 | #include <drm/drmP.h> |
| 38 | #include <drm/amdgpu_drm.h> |
| 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <linux/swiotlb.h> |
| 42 | #include <linux/swap.h> |
| 43 | #include <linux/pagemap.h> |
| 44 | #include <linux/debugfs.h> |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 45 | #include <linux/iommu.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 46 | #include "amdgpu.h" |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 47 | #include "amdgpu_object.h" |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 48 | #include "amdgpu_trace.h" |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame^] | 49 | #include "amdgpu_amdkfd.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 50 | #include "bif/bif_4_1_d.h" |
| 51 | |
| 52 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
| 53 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 54 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 55 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 56 | uint64_t offset, unsigned window, |
| 57 | struct amdgpu_ring *ring, |
| 58 | uint64_t *addr); |
| 59 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 60 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
| 61 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
| 62 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 63 | /* |
| 64 | * Global memory. |
| 65 | */ |
| 66 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) |
| 67 | { |
| 68 | return ttm_mem_global_init(ref->object); |
| 69 | } |
| 70 | |
| 71 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) |
| 72 | { |
| 73 | ttm_mem_global_release(ref->object); |
| 74 | } |
| 75 | |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 76 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 77 | { |
| 78 | struct drm_global_reference *global_ref; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 79 | struct amdgpu_ring *ring; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 80 | struct drm_sched_rq *rq; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 81 | int r; |
| 82 | |
| 83 | adev->mman.mem_global_referenced = false; |
| 84 | global_ref = &adev->mman.mem_global_ref; |
| 85 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
| 86 | global_ref->size = sizeof(struct ttm_mem_global); |
| 87 | global_ref->init = &amdgpu_ttm_mem_global_init; |
| 88 | global_ref->release = &amdgpu_ttm_mem_global_release; |
| 89 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 90 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 91 | DRM_ERROR("Failed setting up TTM memory accounting " |
| 92 | "subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 93 | goto error_mem; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | adev->mman.bo_global_ref.mem_glob = |
| 97 | adev->mman.mem_global_ref.object; |
| 98 | global_ref = &adev->mman.bo_global_ref.ref; |
| 99 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
| 100 | global_ref->size = sizeof(struct ttm_bo_global); |
| 101 | global_ref->init = &ttm_bo_global_init; |
| 102 | global_ref->release = &ttm_bo_global_release; |
| 103 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 104 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 105 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 106 | goto error_bo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 107 | } |
| 108 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 109 | mutex_init(&adev->mman.gtt_window_lock); |
| 110 | |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 111 | ring = adev->mman.buffer_funcs_ring; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 112 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; |
| 113 | r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, |
Monk Liu | b3eebe3 | 2017-10-23 12:23:29 +0800 | [diff] [blame] | 114 | rq, amdgpu_sched_jobs, NULL); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 115 | if (r) { |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 116 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 117 | goto error_entity; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 118 | } |
| 119 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 120 | adev->mman.mem_global_referenced = true; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 121 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 122 | return 0; |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 123 | |
| 124 | error_entity: |
| 125 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
| 126 | error_bo: |
| 127 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 128 | error_mem: |
| 129 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) |
| 133 | { |
| 134 | if (adev->mman.mem_global_referenced) { |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 135 | drm_sched_entity_fini(adev->mman.entity.sched, |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 136 | &adev->mman.entity); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 137 | mutex_destroy(&adev->mman.gtt_window_lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 138 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
| 139 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 140 | adev->mman.mem_global_referenced = false; |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 145 | { |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 150 | struct ttm_mem_type_manager *man) |
| 151 | { |
| 152 | struct amdgpu_device *adev; |
| 153 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 154 | adev = amdgpu_ttm_adev(bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 155 | |
| 156 | switch (type) { |
| 157 | case TTM_PL_SYSTEM: |
| 158 | /* System memory */ |
| 159 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 160 | man->available_caching = TTM_PL_MASK_CACHING; |
| 161 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 162 | break; |
| 163 | case TTM_PL_TT: |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 164 | man->func = &amdgpu_gtt_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 165 | man->gpu_offset = adev->gmc.gart_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 166 | man->available_caching = TTM_PL_MASK_CACHING; |
| 167 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 168 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
| 169 | break; |
| 170 | case TTM_PL_VRAM: |
| 171 | /* "On-card" video ram */ |
Christian König | 6a7f76e | 2016-08-24 15:51:49 +0200 | [diff] [blame] | 172 | man->func = &amdgpu_vram_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 173 | man->gpu_offset = adev->gmc.vram_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 174 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 175 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 176 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
| 177 | man->default_caching = TTM_PL_FLAG_WC; |
| 178 | break; |
| 179 | case AMDGPU_PL_GDS: |
| 180 | case AMDGPU_PL_GWS: |
| 181 | case AMDGPU_PL_OA: |
| 182 | /* On-chip GDS memory*/ |
| 183 | man->func = &ttm_bo_manager_func; |
| 184 | man->gpu_offset = 0; |
| 185 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; |
| 186 | man->available_caching = TTM_PL_FLAG_UNCACHED; |
| 187 | man->default_caching = TTM_PL_FLAG_UNCACHED; |
| 188 | break; |
| 189 | default: |
| 190 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
| 191 | return -EINVAL; |
| 192 | } |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
| 197 | struct ttm_placement *placement) |
| 198 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 199 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 200 | struct amdgpu_bo *abo; |
Arvind Yadav | 1aaa560 | 2017-07-02 14:43:58 +0530 | [diff] [blame] | 201 | static const struct ttm_place placements = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 202 | .fpfn = 0, |
| 203 | .lpfn = 0, |
| 204 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM |
| 205 | }; |
| 206 | |
| 207 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { |
| 208 | placement->placement = &placements; |
| 209 | placement->busy_placement = &placements; |
| 210 | placement->num_placement = 1; |
| 211 | placement->num_busy_placement = 1; |
| 212 | return; |
| 213 | } |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 214 | abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 215 | switch (bo->mem.mem_type) { |
| 216 | case TTM_PL_VRAM: |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 217 | if (!adev->mman.buffer_funcs_enabled) { |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 218 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 219 | } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 220 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 221 | unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 222 | struct drm_mm_node *node = bo->mem.mm_node; |
| 223 | unsigned long pages_left; |
| 224 | |
| 225 | for (pages_left = bo->mem.num_pages; |
| 226 | pages_left; |
| 227 | pages_left -= node->size, node++) { |
| 228 | if (node->start < fpfn) |
| 229 | break; |
| 230 | } |
| 231 | |
| 232 | if (!pages_left) |
| 233 | goto gtt; |
| 234 | |
| 235 | /* Try evicting to the CPU inaccessible part of VRAM |
| 236 | * first, but only set GTT as busy placement, so this |
| 237 | * BO will be evicted to GTT rather than causing other |
| 238 | * BOs to be evicted from VRAM |
| 239 | */ |
| 240 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
| 241 | AMDGPU_GEM_DOMAIN_GTT); |
| 242 | abo->placements[0].fpfn = fpfn; |
| 243 | abo->placements[0].lpfn = 0; |
| 244 | abo->placement.busy_placement = &abo->placements[1]; |
| 245 | abo->placement.num_busy_placement = 1; |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 246 | } else { |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 247 | gtt: |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 248 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 249 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 250 | break; |
| 251 | case TTM_PL_TT: |
| 252 | default: |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 253 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 254 | } |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 255 | *placement = abo->placement; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 259 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 260 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 261 | |
Jérôme Glisse | 054892e | 2016-04-19 09:07:51 -0400 | [diff] [blame] | 262 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
| 263 | return -EPERM; |
Dave Airlie | 28a3965 | 2016-09-30 13:18:26 +1000 | [diff] [blame] | 264 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 265 | filp->private_data); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 266 | } |
| 267 | |
| 268 | static void amdgpu_move_null(struct ttm_buffer_object *bo, |
| 269 | struct ttm_mem_reg *new_mem) |
| 270 | { |
| 271 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 272 | |
| 273 | BUG_ON(old_mem->mm_node != NULL); |
| 274 | *old_mem = *new_mem; |
| 275 | new_mem->mm_node = NULL; |
| 276 | } |
| 277 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 278 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
| 279 | struct drm_mm_node *mm_node, |
| 280 | struct ttm_mem_reg *mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 281 | { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 282 | uint64_t addr = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 283 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 284 | if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 285 | addr = mm_node->start << PAGE_SHIFT; |
| 286 | addr += bo->bdev->man[mem->mem_type].gpu_offset; |
| 287 | } |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 288 | return addr; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 289 | } |
| 290 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 291 | /** |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 292 | * amdgpu_find_mm_node - Helper function finds the drm_mm_node |
| 293 | * corresponding to @offset. It also modifies the offset to be |
| 294 | * within the drm_mm_node returned |
| 295 | */ |
| 296 | static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, |
| 297 | unsigned long *offset) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 298 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 299 | struct drm_mm_node *mm_node = mem->mm_node; |
| 300 | |
| 301 | while (*offset >= (mm_node->size << PAGE_SHIFT)) { |
| 302 | *offset -= (mm_node->size << PAGE_SHIFT); |
| 303 | ++mm_node; |
| 304 | } |
| 305 | return mm_node; |
| 306 | } |
| 307 | |
| 308 | /** |
| 309 | * amdgpu_copy_ttm_mem_to_mem - Helper function for copy |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 310 | * |
| 311 | * The function copies @size bytes from {src->mem + src->offset} to |
| 312 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a |
| 313 | * move and different for a BO to BO copy. |
| 314 | * |
| 315 | * @f: Returns the last fence if multiple jobs are submitted. |
| 316 | */ |
| 317 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, |
| 318 | struct amdgpu_copy_mem *src, |
| 319 | struct amdgpu_copy_mem *dst, |
| 320 | uint64_t size, |
| 321 | struct reservation_object *resv, |
| 322 | struct dma_fence **f) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 323 | { |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 324 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 325 | struct drm_mm_node *src_mm, *dst_mm; |
| 326 | uint64_t src_node_start, dst_node_start, src_node_size, |
| 327 | dst_node_size, src_page_offset, dst_page_offset; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 328 | struct dma_fence *fence = NULL; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 329 | int r = 0; |
| 330 | const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 331 | AMDGPU_GPU_PAGE_SIZE); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 332 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 333 | if (!adev->mman.buffer_funcs_enabled) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 334 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 335 | return -EINVAL; |
| 336 | } |
| 337 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 338 | src_mm = amdgpu_find_mm_node(src->mem, &src->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 339 | src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + |
| 340 | src->offset; |
| 341 | src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; |
| 342 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 343 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 344 | dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 345 | dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + |
| 346 | dst->offset; |
| 347 | dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; |
| 348 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 349 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 350 | mutex_lock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 351 | |
| 352 | while (size) { |
| 353 | unsigned long cur_size; |
| 354 | uint64_t from = src_node_start, to = dst_node_start; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 355 | struct dma_fence *next; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 356 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 357 | /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst |
| 358 | * begins at an offset, then adjust the size accordingly |
| 359 | */ |
| 360 | cur_size = min3(min(src_node_size, dst_node_size), size, |
| 361 | GTT_MAX_BYTES); |
| 362 | if (cur_size + src_page_offset > GTT_MAX_BYTES || |
| 363 | cur_size + dst_page_offset > GTT_MAX_BYTES) |
| 364 | cur_size -= max(src_page_offset, dst_page_offset); |
| 365 | |
| 366 | /* Map only what needs to be accessed. Map src to window 0 and |
| 367 | * dst to window 1 |
| 368 | */ |
| 369 | if (src->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 370 | !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 371 | r = amdgpu_map_buffer(src->bo, src->mem, |
| 372 | PFN_UP(cur_size + src_page_offset), |
| 373 | src_node_start, 0, ring, |
| 374 | &from); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 375 | if (r) |
| 376 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 377 | /* Adjust the offset because amdgpu_map_buffer returns |
| 378 | * start of mapped page |
| 379 | */ |
| 380 | from += src_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 381 | } |
| 382 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 383 | if (dst->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 384 | !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 385 | r = amdgpu_map_buffer(dst->bo, dst->mem, |
| 386 | PFN_UP(cur_size + dst_page_offset), |
| 387 | dst_node_start, 1, ring, |
| 388 | &to); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 389 | if (r) |
| 390 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 391 | to += dst_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 392 | } |
| 393 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 394 | r = amdgpu_copy_buffer(ring, from, to, cur_size, |
| 395 | resv, &next, false, true); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 396 | if (r) |
| 397 | goto error; |
| 398 | |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 399 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 400 | fence = next; |
| 401 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 402 | size -= cur_size; |
| 403 | if (!size) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 404 | break; |
| 405 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 406 | src_node_size -= cur_size; |
| 407 | if (!src_node_size) { |
| 408 | src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, |
| 409 | src->mem); |
| 410 | src_node_size = (src_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 411 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 412 | src_node_start += cur_size; |
| 413 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 414 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 415 | dst_node_size -= cur_size; |
| 416 | if (!dst_node_size) { |
| 417 | dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, |
| 418 | dst->mem); |
| 419 | dst_node_size = (dst_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 420 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 421 | dst_node_start += cur_size; |
| 422 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 423 | } |
| 424 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 425 | error: |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 426 | mutex_unlock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 427 | if (f) |
| 428 | *f = dma_fence_get(fence); |
| 429 | dma_fence_put(fence); |
| 430 | return r; |
| 431 | } |
| 432 | |
| 433 | |
| 434 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
| 435 | bool evict, bool no_wait_gpu, |
| 436 | struct ttm_mem_reg *new_mem, |
| 437 | struct ttm_mem_reg *old_mem) |
| 438 | { |
| 439 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
| 440 | struct amdgpu_copy_mem src, dst; |
| 441 | struct dma_fence *fence = NULL; |
| 442 | int r; |
| 443 | |
| 444 | src.bo = bo; |
| 445 | dst.bo = bo; |
| 446 | src.mem = old_mem; |
| 447 | dst.mem = new_mem; |
| 448 | src.offset = 0; |
| 449 | dst.offset = 0; |
| 450 | |
| 451 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, |
| 452 | new_mem->num_pages << PAGE_SHIFT, |
| 453 | bo->resv, &fence); |
| 454 | if (r) |
| 455 | goto error; |
Christian König | ce64bc2 | 2016-06-15 13:44:05 +0200 | [diff] [blame] | 456 | |
| 457 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 458 | dma_fence_put(fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 459 | return r; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 460 | |
| 461 | error: |
| 462 | if (fence) |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 463 | dma_fence_wait(fence, false); |
| 464 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 465 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 466 | } |
| 467 | |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 468 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, |
| 469 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 470 | struct ttm_mem_reg *new_mem) |
| 471 | { |
| 472 | struct amdgpu_device *adev; |
| 473 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 474 | struct ttm_mem_reg tmp_mem; |
| 475 | struct ttm_place placements; |
| 476 | struct ttm_placement placement; |
| 477 | int r; |
| 478 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 479 | adev = amdgpu_ttm_adev(bo->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 480 | tmp_mem = *new_mem; |
| 481 | tmp_mem.mm_node = NULL; |
| 482 | placement.num_placement = 1; |
| 483 | placement.placement = &placements; |
| 484 | placement.num_busy_placement = 1; |
| 485 | placement.busy_placement = &placements; |
| 486 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 487 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 488 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 489 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 490 | if (unlikely(r)) { |
| 491 | return r; |
| 492 | } |
| 493 | |
| 494 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
| 495 | if (unlikely(r)) { |
| 496 | goto out_cleanup; |
| 497 | } |
| 498 | |
Roger He | 993baf1 | 2017-12-21 17:42:51 +0800 | [diff] [blame] | 499 | r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 500 | if (unlikely(r)) { |
| 501 | goto out_cleanup; |
| 502 | } |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 503 | r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 504 | if (unlikely(r)) { |
| 505 | goto out_cleanup; |
| 506 | } |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 507 | r = ttm_bo_move_ttm(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 508 | out_cleanup: |
| 509 | ttm_bo_mem_put(bo, &tmp_mem); |
| 510 | return r; |
| 511 | } |
| 512 | |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 513 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, |
| 514 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 515 | struct ttm_mem_reg *new_mem) |
| 516 | { |
| 517 | struct amdgpu_device *adev; |
| 518 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 519 | struct ttm_mem_reg tmp_mem; |
| 520 | struct ttm_placement placement; |
| 521 | struct ttm_place placements; |
| 522 | int r; |
| 523 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 524 | adev = amdgpu_ttm_adev(bo->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 525 | tmp_mem = *new_mem; |
| 526 | tmp_mem.mm_node = NULL; |
| 527 | placement.num_placement = 1; |
| 528 | placement.placement = &placements; |
| 529 | placement.num_busy_placement = 1; |
| 530 | placement.busy_placement = &placements; |
| 531 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 532 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 533 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 534 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 535 | if (unlikely(r)) { |
| 536 | return r; |
| 537 | } |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 538 | r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 539 | if (unlikely(r)) { |
| 540 | goto out_cleanup; |
| 541 | } |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 542 | r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 543 | if (unlikely(r)) { |
| 544 | goto out_cleanup; |
| 545 | } |
| 546 | out_cleanup: |
| 547 | ttm_bo_mem_put(bo, &tmp_mem); |
| 548 | return r; |
| 549 | } |
| 550 | |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 551 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, |
| 552 | struct ttm_operation_ctx *ctx, |
| 553 | struct ttm_mem_reg *new_mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 554 | { |
| 555 | struct amdgpu_device *adev; |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 556 | struct amdgpu_bo *abo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 557 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 558 | int r; |
| 559 | |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 560 | /* Can't move a pinned BO */ |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 561 | abo = ttm_to_amdgpu_bo(bo); |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 562 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
| 563 | return -EINVAL; |
| 564 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 565 | adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | dbd5ed6 | 2016-06-21 16:28:14 +0200 | [diff] [blame] | 566 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 567 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
| 568 | amdgpu_move_null(bo, new_mem); |
| 569 | return 0; |
| 570 | } |
| 571 | if ((old_mem->mem_type == TTM_PL_TT && |
| 572 | new_mem->mem_type == TTM_PL_SYSTEM) || |
| 573 | (old_mem->mem_type == TTM_PL_SYSTEM && |
| 574 | new_mem->mem_type == TTM_PL_TT)) { |
| 575 | /* bind is enough */ |
| 576 | amdgpu_move_null(bo, new_mem); |
| 577 | return 0; |
| 578 | } |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 579 | |
| 580 | if (!adev->mman.buffer_funcs_enabled) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 581 | goto memcpy; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 582 | |
| 583 | if (old_mem->mem_type == TTM_PL_VRAM && |
| 584 | new_mem->mem_type == TTM_PL_SYSTEM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 585 | r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 586 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
| 587 | new_mem->mem_type == TTM_PL_VRAM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 588 | r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 589 | } else { |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 590 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, |
| 591 | new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 592 | } |
| 593 | |
| 594 | if (r) { |
| 595 | memcpy: |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 596 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 597 | if (r) { |
| 598 | return r; |
| 599 | } |
| 600 | } |
| 601 | |
John Brooks | 96cf827 | 2017-06-30 11:31:08 -0400 | [diff] [blame] | 602 | if (bo->type == ttm_bo_type_device && |
| 603 | new_mem->mem_type == TTM_PL_VRAM && |
| 604 | old_mem->mem_type != TTM_PL_VRAM) { |
| 605 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU |
| 606 | * accesses the BO after it's moved. |
| 607 | */ |
| 608 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
| 609 | } |
| 610 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 611 | /* update statistics */ |
| 612 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); |
| 613 | return 0; |
| 614 | } |
| 615 | |
| 616 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 617 | { |
| 618 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 619 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 620 | struct drm_mm_node *mm_node = mem->mm_node; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 621 | |
| 622 | mem->bus.addr = NULL; |
| 623 | mem->bus.offset = 0; |
| 624 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
| 625 | mem->bus.base = 0; |
| 626 | mem->bus.is_iomem = false; |
| 627 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 628 | return -EINVAL; |
| 629 | switch (mem->mem_type) { |
| 630 | case TTM_PL_SYSTEM: |
| 631 | /* system memory */ |
| 632 | return 0; |
| 633 | case TTM_PL_TT: |
| 634 | break; |
| 635 | case TTM_PL_VRAM: |
| 636 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 637 | /* check if it's visible */ |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 638 | if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 639 | return -EINVAL; |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 640 | /* Only physically contiguous buffers apply. In a contiguous |
| 641 | * buffer, size of the first mm_node would match the number of |
| 642 | * pages in ttm_mem_reg. |
| 643 | */ |
| 644 | if (adev->mman.aper_base_kaddr && |
| 645 | (mm_node->size == mem->num_pages)) |
| 646 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + |
| 647 | mem->bus.offset; |
| 648 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 649 | mem->bus.base = adev->gmc.aper_base; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 650 | mem->bus.is_iomem = true; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 651 | break; |
| 652 | default: |
| 653 | return -EINVAL; |
| 654 | } |
| 655 | return 0; |
| 656 | } |
| 657 | |
| 658 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 659 | { |
| 660 | } |
| 661 | |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 662 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
| 663 | unsigned long page_offset) |
| 664 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 665 | struct drm_mm_node *mm; |
| 666 | unsigned long offset = (page_offset << PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 667 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 668 | mm = amdgpu_find_mm_node(&bo->mem, &offset); |
| 669 | return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + |
| 670 | (offset >> PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 671 | } |
| 672 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 673 | /* |
| 674 | * TTM backend functions. |
| 675 | */ |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 676 | struct amdgpu_ttm_gup_task_list { |
| 677 | struct list_head list; |
| 678 | struct task_struct *task; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 679 | }; |
| 680 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 681 | struct amdgpu_ttm_tt { |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 682 | struct ttm_dma_tt ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 683 | u64 offset; |
| 684 | uint64_t userptr; |
| 685 | struct mm_struct *usermm; |
| 686 | uint32_t userflags; |
| 687 | spinlock_t guptasklock; |
| 688 | struct list_head guptasks; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 689 | atomic_t mmu_invalidations; |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 690 | uint32_t last_set_pages; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 691 | }; |
| 692 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 693 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 694 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 695 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 696 | unsigned int flags = 0; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 697 | unsigned pinned = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 698 | int r; |
| 699 | |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 700 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 701 | flags |= FOLL_WRITE; |
| 702 | |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 703 | down_read(¤t->mm->mmap_sem); |
| 704 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 705 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 706 | /* check that we only use anonymous memory |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 707 | to prevent problems with writeback */ |
| 708 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; |
| 709 | struct vm_area_struct *vma; |
| 710 | |
| 711 | vma = find_vma(gtt->usermm, gtt->userptr); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 712 | if (!vma || vma->vm_file || vma->vm_end < end) { |
| 713 | up_read(¤t->mm->mmap_sem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 714 | return -EPERM; |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 715 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | do { |
| 719 | unsigned num_pages = ttm->num_pages - pinned; |
| 720 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 721 | struct page **p = pages + pinned; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 722 | struct amdgpu_ttm_gup_task_list guptask; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 723 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 724 | guptask.task = current; |
| 725 | spin_lock(>t->guptasklock); |
| 726 | list_add(&guptask.list, >t->guptasks); |
| 727 | spin_unlock(>t->guptasklock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 728 | |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 729 | r = get_user_pages(userptr, num_pages, flags, p, NULL); |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 730 | |
| 731 | spin_lock(>t->guptasklock); |
| 732 | list_del(&guptask.list); |
| 733 | spin_unlock(>t->guptasklock); |
| 734 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 735 | if (r < 0) |
| 736 | goto release_pages; |
| 737 | |
| 738 | pinned += r; |
| 739 | |
| 740 | } while (pinned < ttm->num_pages); |
| 741 | |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 742 | up_read(¤t->mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 743 | return 0; |
| 744 | |
| 745 | release_pages: |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 746 | release_pages(pages, pinned); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 747 | up_read(¤t->mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 748 | return r; |
| 749 | } |
| 750 | |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 751 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 752 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 753 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 754 | unsigned i; |
| 755 | |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 756 | gtt->last_set_pages = atomic_read(>t->mmu_invalidations); |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 757 | for (i = 0; i < ttm->num_pages; ++i) { |
| 758 | if (ttm->pages[i]) |
| 759 | put_page(ttm->pages[i]); |
| 760 | |
| 761 | ttm->pages[i] = pages ? pages[i] : NULL; |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 762 | } |
| 763 | } |
| 764 | |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 765 | void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 766 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 767 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 768 | unsigned i; |
| 769 | |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 770 | for (i = 0; i < ttm->num_pages; ++i) { |
| 771 | struct page *page = ttm->pages[i]; |
| 772 | |
| 773 | if (!page) |
| 774 | continue; |
| 775 | |
| 776 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 777 | set_page_dirty(page); |
| 778 | |
| 779 | mark_page_accessed(page); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 780 | } |
| 781 | } |
| 782 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 783 | /* prepare the sg table with the user pages */ |
| 784 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
| 785 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 786 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 787 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 788 | unsigned nents; |
| 789 | int r; |
| 790 | |
| 791 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 792 | enum dma_data_direction direction = write ? |
| 793 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 794 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 795 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
| 796 | ttm->num_pages << PAGE_SHIFT, |
| 797 | GFP_KERNEL); |
| 798 | if (r) |
| 799 | goto release_sg; |
| 800 | |
| 801 | r = -ENOMEM; |
| 802 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 803 | if (nents != ttm->sg->nents) |
| 804 | goto release_sg; |
| 805 | |
| 806 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
| 807 | gtt->ttm.dma_address, ttm->num_pages); |
| 808 | |
| 809 | return 0; |
| 810 | |
| 811 | release_sg: |
| 812 | kfree(ttm->sg); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 813 | return r; |
| 814 | } |
| 815 | |
| 816 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
| 817 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 818 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 819 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 820 | |
| 821 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 822 | enum dma_data_direction direction = write ? |
| 823 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 824 | |
| 825 | /* double check that we don't free the table twice */ |
| 826 | if (!ttm->sg->sgl) |
| 827 | return; |
| 828 | |
| 829 | /* free the sg table and pages again */ |
| 830 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 831 | |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 832 | amdgpu_ttm_tt_mark_user_pages(ttm); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 833 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 834 | sg_free_table(ttm->sg); |
| 835 | } |
| 836 | |
| 837 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, |
| 838 | struct ttm_mem_reg *bo_mem) |
| 839 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 840 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 841 | struct amdgpu_ttm_tt *gtt = (void*)ttm; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 842 | uint64_t flags; |
Dan Carpenter | 2ce3f5dc | 2017-08-09 13:30:46 +0300 | [diff] [blame] | 843 | int r = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 844 | |
Chunming Zhou | e2f784f | 2015-11-26 16:33:58 +0800 | [diff] [blame] | 845 | if (gtt->userptr) { |
| 846 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
| 847 | if (r) { |
| 848 | DRM_ERROR("failed to pin userptr\n"); |
| 849 | return r; |
| 850 | } |
| 851 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 852 | if (!ttm->num_pages) { |
| 853 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
| 854 | ttm->num_pages, bo_mem, ttm); |
| 855 | } |
| 856 | |
| 857 | if (bo_mem->mem_type == AMDGPU_PL_GDS || |
| 858 | bo_mem->mem_type == AMDGPU_PL_GWS || |
| 859 | bo_mem->mem_type == AMDGPU_PL_OA) |
| 860 | return -EINVAL; |
| 861 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 862 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
| 863 | gtt->offset = AMDGPU_BO_INVALID_OFFSET; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 864 | return 0; |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 865 | } |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 866 | |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 867 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 868 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 869 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 870 | ttm->pages, gtt->ttm.dma_address, flags); |
| 871 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 872 | if (r) |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 873 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 874 | ttm->num_pages, gtt->offset); |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 875 | return r; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 876 | } |
| 877 | |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 878 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 879 | { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 880 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 881 | struct ttm_operation_ctx ctx = { false, false }; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 882 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 883 | struct ttm_mem_reg tmp; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 884 | struct ttm_placement placement; |
| 885 | struct ttm_place placements; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 886 | uint64_t flags; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 887 | int r; |
| 888 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 889 | if (bo->mem.mem_type != TTM_PL_TT || |
| 890 | amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 891 | return 0; |
| 892 | |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 893 | tmp = bo->mem; |
| 894 | tmp.mm_node = NULL; |
| 895 | placement.num_placement = 1; |
| 896 | placement.placement = &placements; |
| 897 | placement.num_busy_placement = 1; |
| 898 | placement.busy_placement = &placements; |
| 899 | placements.fpfn = 0; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 900 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; |
Christian König | ec8c9f8 | 2017-10-16 13:47:15 +0200 | [diff] [blame] | 901 | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | |
| 902 | TTM_PL_FLAG_TT; |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 903 | |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 904 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 905 | if (unlikely(r)) |
| 906 | return r; |
| 907 | |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 908 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); |
| 909 | gtt->offset = (u64)tmp.start << PAGE_SHIFT; |
| 910 | r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages, |
| 911 | bo->ttm->pages, gtt->ttm.dma_address, flags); |
| 912 | if (unlikely(r)) { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 913 | ttm_bo_mem_put(bo, &tmp); |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 914 | return r; |
| 915 | } |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 916 | |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 917 | ttm_bo_mem_put(bo, &bo->mem); |
| 918 | bo->mem = tmp; |
| 919 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
| 920 | bo->bdev->man[bo->mem.mem_type].gpu_offset; |
| 921 | |
| 922 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 923 | } |
| 924 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 925 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 926 | { |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 927 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
| 928 | struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; |
Monk Liu | 1d1a2cd | 2017-04-27 17:14:57 +0800 | [diff] [blame] | 929 | uint64_t flags; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 930 | int r; |
| 931 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 932 | if (!gtt) |
| 933 | return 0; |
| 934 | |
| 935 | flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); |
| 936 | r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, |
| 937 | gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); |
| 938 | if (r) |
| 939 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 940 | gtt->ttm.ttm.num_pages, gtt->offset); |
| 941 | return r; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 942 | } |
| 943 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 944 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
| 945 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 946 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 947 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 948 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 949 | |
Christian König | 85a4b57 | 2016-09-22 14:19:50 +0200 | [diff] [blame] | 950 | if (gtt->userptr) |
| 951 | amdgpu_ttm_tt_unpin_userptr(ttm); |
| 952 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 953 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) |
Christian König | 78ab0a3 | 2016-09-09 15:39:08 +0200 | [diff] [blame] | 954 | return 0; |
| 955 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 956 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 957 | r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 958 | if (r) |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 959 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", |
| 960 | gtt->ttm.ttm.num_pages, gtt->offset); |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 961 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 962 | } |
| 963 | |
| 964 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) |
| 965 | { |
| 966 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 967 | |
| 968 | ttm_dma_tt_fini(>t->ttm); |
| 969 | kfree(gtt); |
| 970 | } |
| 971 | |
| 972 | static struct ttm_backend_func amdgpu_backend_func = { |
| 973 | .bind = &amdgpu_ttm_backend_bind, |
| 974 | .unbind = &amdgpu_ttm_backend_unbind, |
| 975 | .destroy = &amdgpu_ttm_backend_destroy, |
| 976 | }; |
| 977 | |
| 978 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, |
Christian König | 231cdaf | 2018-02-21 20:34:13 +0100 | [diff] [blame] | 979 | unsigned long size, uint32_t page_flags) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 980 | { |
| 981 | struct amdgpu_device *adev; |
| 982 | struct amdgpu_ttm_tt *gtt; |
| 983 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 984 | adev = amdgpu_ttm_adev(bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 985 | |
| 986 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
| 987 | if (gtt == NULL) { |
| 988 | return NULL; |
| 989 | } |
| 990 | gtt->ttm.ttm.func = &amdgpu_backend_func; |
Christian König | 231cdaf | 2018-02-21 20:34:13 +0100 | [diff] [blame] | 991 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 992 | kfree(gtt); |
| 993 | return NULL; |
| 994 | } |
| 995 | return >t->ttm.ttm; |
| 996 | } |
| 997 | |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 998 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, |
| 999 | struct ttm_operation_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1000 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 1001 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1002 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1003 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1004 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1005 | if (gtt && gtt->userptr) { |
Maninder Singh | 5f0b34c | 2015-06-26 13:28:50 +0530 | [diff] [blame] | 1006 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1007 | if (!ttm->sg) |
| 1008 | return -ENOMEM; |
| 1009 | |
| 1010 | ttm->page_flags |= TTM_PAGE_FLAG_SG; |
| 1011 | ttm->state = tt_unbound; |
| 1012 | return 0; |
| 1013 | } |
| 1014 | |
| 1015 | if (slave && ttm->sg) { |
| 1016 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
| 1017 | gtt->ttm.dma_address, ttm->num_pages); |
| 1018 | ttm->state = tt_unbound; |
Tom St Denis | 79ba280 | 2017-09-18 08:10:00 -0400 | [diff] [blame] | 1019 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1020 | } |
| 1021 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1022 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1023 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1024 | return ttm_dma_populate(>t->ttm, adev->dev, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1025 | } |
| 1026 | #endif |
| 1027 | |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1028 | return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1029 | } |
| 1030 | |
| 1031 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) |
| 1032 | { |
| 1033 | struct amdgpu_device *adev; |
| 1034 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1035 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1036 | |
| 1037 | if (gtt && gtt->userptr) { |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 1038 | amdgpu_ttm_tt_set_user_pages(ttm, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1039 | kfree(ttm->sg); |
| 1040 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; |
| 1041 | return; |
| 1042 | } |
| 1043 | |
| 1044 | if (slave) |
| 1045 | return; |
| 1046 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 1047 | adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1048 | |
| 1049 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1050 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1051 | ttm_dma_unpopulate(>t->ttm, adev->dev); |
| 1052 | return; |
| 1053 | } |
| 1054 | #endif |
| 1055 | |
Tom St Denis | 7405e0d | 2017-08-18 10:05:48 -0400 | [diff] [blame] | 1056 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1057 | } |
| 1058 | |
| 1059 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 1060 | uint32_t flags) |
| 1061 | { |
| 1062 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1063 | |
| 1064 | if (gtt == NULL) |
| 1065 | return -EINVAL; |
| 1066 | |
| 1067 | gtt->userptr = addr; |
| 1068 | gtt->usermm = current->mm; |
| 1069 | gtt->userflags = flags; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1070 | spin_lock_init(>t->guptasklock); |
| 1071 | INIT_LIST_HEAD(>t->guptasks); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1072 | atomic_set(>t->mmu_invalidations, 0); |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1073 | gtt->last_set_pages = 0; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1074 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1075 | return 0; |
| 1076 | } |
| 1077 | |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1078 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1079 | { |
| 1080 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1081 | |
| 1082 | if (gtt == NULL) |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1083 | return NULL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1084 | |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1085 | return gtt->usermm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1086 | } |
| 1087 | |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1088 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
| 1089 | unsigned long end) |
| 1090 | { |
| 1091 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1092 | struct amdgpu_ttm_gup_task_list *entry; |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1093 | unsigned long size; |
| 1094 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1095 | if (gtt == NULL || !gtt->userptr) |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1096 | return false; |
| 1097 | |
| 1098 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; |
| 1099 | if (gtt->userptr > end || gtt->userptr + size <= start) |
| 1100 | return false; |
| 1101 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1102 | spin_lock(>t->guptasklock); |
| 1103 | list_for_each_entry(entry, >t->guptasks, list) { |
| 1104 | if (entry->task == current) { |
| 1105 | spin_unlock(>t->guptasklock); |
| 1106 | return false; |
| 1107 | } |
| 1108 | } |
| 1109 | spin_unlock(>t->guptasklock); |
| 1110 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1111 | atomic_inc(>t->mmu_invalidations); |
| 1112 | |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1113 | return true; |
| 1114 | } |
| 1115 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1116 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
| 1117 | int *last_invalidated) |
| 1118 | { |
| 1119 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1120 | int prev_invalidated = *last_invalidated; |
| 1121 | |
| 1122 | *last_invalidated = atomic_read(>t->mmu_invalidations); |
| 1123 | return prev_invalidated != *last_invalidated; |
| 1124 | } |
| 1125 | |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1126 | bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) |
| 1127 | { |
| 1128 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1129 | |
| 1130 | if (gtt == NULL || !gtt->userptr) |
| 1131 | return false; |
| 1132 | |
| 1133 | return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; |
| 1134 | } |
| 1135 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1136 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
| 1137 | { |
| 1138 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1139 | |
| 1140 | if (gtt == NULL) |
| 1141 | return false; |
| 1142 | |
| 1143 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 1144 | } |
| 1145 | |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1146 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1147 | struct ttm_mem_reg *mem) |
| 1148 | { |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1149 | uint64_t flags = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1150 | |
| 1151 | if (mem && mem->mem_type != TTM_PL_SYSTEM) |
| 1152 | flags |= AMDGPU_PTE_VALID; |
| 1153 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1154 | if (mem && mem->mem_type == TTM_PL_TT) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1155 | flags |= AMDGPU_PTE_SYSTEM; |
| 1156 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1157 | if (ttm->caching_state == tt_cached) |
| 1158 | flags |= AMDGPU_PTE_SNOOPED; |
| 1159 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1160 | |
Alex Xie | 4b98e0c | 2017-02-14 12:31:36 -0500 | [diff] [blame] | 1161 | flags |= adev->gart.gart_pte_flags; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1162 | flags |= AMDGPU_PTE_READABLE; |
| 1163 | |
| 1164 | if (!amdgpu_ttm_tt_is_readonly(ttm)) |
| 1165 | flags |= AMDGPU_PTE_WRITEABLE; |
| 1166 | |
| 1167 | return flags; |
| 1168 | } |
| 1169 | |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1170 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
| 1171 | const struct ttm_place *place) |
| 1172 | { |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1173 | unsigned long num_pages = bo->mem.num_pages; |
| 1174 | struct drm_mm_node *node = bo->mem.mm_node; |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame^] | 1175 | struct reservation_object_list *flist; |
| 1176 | struct dma_fence *f; |
| 1177 | int i; |
| 1178 | |
| 1179 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
| 1180 | * If true, then return false as any KFD process needs all its BOs to |
| 1181 | * be resident to run successfully |
| 1182 | */ |
| 1183 | flist = reservation_object_get_list(bo->resv); |
| 1184 | if (flist) { |
| 1185 | for (i = 0; i < flist->shared_count; ++i) { |
| 1186 | f = rcu_dereference_protected(flist->shared[i], |
| 1187 | reservation_object_held(bo->resv)); |
| 1188 | if (amdkfd_fence_check_mm(f, current->mm)) |
| 1189 | return false; |
| 1190 | } |
| 1191 | } |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1192 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1193 | switch (bo->mem.mem_type) { |
| 1194 | case TTM_PL_TT: |
| 1195 | return true; |
| 1196 | |
| 1197 | case TTM_PL_VRAM: |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1198 | /* Check each drm MM node individually */ |
| 1199 | while (num_pages) { |
| 1200 | if (place->fpfn < (node->start + node->size) && |
| 1201 | !(place->lpfn && place->lpfn <= node->start)) |
| 1202 | return true; |
| 1203 | |
| 1204 | num_pages -= node->size; |
| 1205 | ++node; |
| 1206 | } |
Roger He | 7da2e3e | 2017-11-02 13:14:27 +0800 | [diff] [blame] | 1207 | return false; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1208 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1209 | default: |
| 1210 | break; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1211 | } |
| 1212 | |
| 1213 | return ttm_bo_eviction_valuable(bo, place); |
| 1214 | } |
| 1215 | |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1216 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
| 1217 | unsigned long offset, |
| 1218 | void *buf, int len, int write) |
| 1219 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 1220 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1221 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1222 | struct drm_mm_node *nodes; |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1223 | uint32_t value = 0; |
| 1224 | int ret = 0; |
| 1225 | uint64_t pos; |
| 1226 | unsigned long flags; |
| 1227 | |
| 1228 | if (bo->mem.mem_type != TTM_PL_VRAM) |
| 1229 | return -EIO; |
| 1230 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1231 | nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1232 | pos = (nodes->start << PAGE_SHIFT) + offset; |
| 1233 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1234 | while (len && pos < adev->gmc.mc_vram_size) { |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1235 | uint64_t aligned_pos = pos & ~(uint64_t)3; |
| 1236 | uint32_t bytes = 4 - (pos & 3); |
| 1237 | uint32_t shift = (pos & 3) * 8; |
| 1238 | uint32_t mask = 0xffffffff << shift; |
| 1239 | |
| 1240 | if (len < bytes) { |
| 1241 | mask &= 0xffffffff >> (bytes - len) * 8; |
| 1242 | bytes = len; |
| 1243 | } |
| 1244 | |
| 1245 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1246 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); |
| 1247 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1248 | if (!write || mask != 0xffffffff) |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1249 | value = RREG32_NO_KIQ(mmMM_DATA); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1250 | if (write) { |
| 1251 | value &= ~mask; |
| 1252 | value |= (*(uint32_t *)buf << shift) & mask; |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1253 | WREG32_NO_KIQ(mmMM_DATA, value); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1254 | } |
| 1255 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 1256 | if (!write) { |
| 1257 | value = (value & mask) >> shift; |
| 1258 | memcpy(buf, &value, bytes); |
| 1259 | } |
| 1260 | |
| 1261 | ret += bytes; |
| 1262 | buf = (uint8_t *)buf + bytes; |
| 1263 | pos += bytes; |
| 1264 | len -= bytes; |
| 1265 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { |
| 1266 | ++nodes; |
| 1267 | pos = (nodes->start << PAGE_SHIFT); |
| 1268 | } |
| 1269 | } |
| 1270 | |
| 1271 | return ret; |
| 1272 | } |
| 1273 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1274 | static struct ttm_bo_driver amdgpu_bo_driver = { |
| 1275 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
| 1276 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
| 1277 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
| 1278 | .invalidate_caches = &amdgpu_invalidate_caches, |
| 1279 | .init_mem_type = &amdgpu_init_mem_type, |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1280 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1281 | .evict_flags = &amdgpu_evict_flags, |
| 1282 | .move = &amdgpu_bo_move, |
| 1283 | .verify_access = &amdgpu_verify_access, |
| 1284 | .move_notify = &amdgpu_bo_move_notify, |
| 1285 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, |
| 1286 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, |
| 1287 | .io_mem_free = &amdgpu_ttm_io_mem_free, |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 1288 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1289 | .access_memory = &amdgpu_ttm_access_memory |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1290 | }; |
| 1291 | |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1292 | /* |
| 1293 | * Firmware Reservation functions |
| 1294 | */ |
| 1295 | /** |
| 1296 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram |
| 1297 | * |
| 1298 | * @adev: amdgpu_device pointer |
| 1299 | * |
| 1300 | * free fw reserved vram if it has been reserved. |
| 1301 | */ |
| 1302 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) |
| 1303 | { |
| 1304 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, |
| 1305 | NULL, &adev->fw_vram_usage.va); |
| 1306 | } |
| 1307 | |
| 1308 | /** |
| 1309 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw |
| 1310 | * |
| 1311 | * @adev: amdgpu_device pointer |
| 1312 | * |
| 1313 | * create bo vram reservation from fw. |
| 1314 | */ |
| 1315 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) |
| 1316 | { |
| 1317 | struct ttm_operation_ctx ctx = { false, false }; |
| 1318 | int r = 0; |
| 1319 | int i; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1320 | u64 vram_size = adev->gmc.visible_vram_size; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1321 | u64 offset = adev->fw_vram_usage.start_offset; |
| 1322 | u64 size = adev->fw_vram_usage.size; |
| 1323 | struct amdgpu_bo *bo; |
| 1324 | |
| 1325 | adev->fw_vram_usage.va = NULL; |
| 1326 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1327 | |
| 1328 | if (adev->fw_vram_usage.size > 0 && |
| 1329 | adev->fw_vram_usage.size <= vram_size) { |
| 1330 | |
| 1331 | r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, |
| 1332 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
| 1333 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
Christian König | 8febe61 | 2018-01-24 19:55:32 +0100 | [diff] [blame] | 1334 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1335 | &adev->fw_vram_usage.reserved_bo); |
| 1336 | if (r) |
| 1337 | goto error_create; |
| 1338 | |
| 1339 | r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); |
| 1340 | if (r) |
| 1341 | goto error_reserve; |
| 1342 | |
| 1343 | /* remove the original mem node and create a new one at the |
| 1344 | * request position |
| 1345 | */ |
| 1346 | bo = adev->fw_vram_usage.reserved_bo; |
| 1347 | offset = ALIGN(offset, PAGE_SIZE); |
| 1348 | for (i = 0; i < bo->placement.num_placement; ++i) { |
| 1349 | bo->placements[i].fpfn = offset >> PAGE_SHIFT; |
| 1350 | bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
| 1351 | } |
| 1352 | |
| 1353 | ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); |
| 1354 | r = ttm_bo_mem_space(&bo->tbo, &bo->placement, |
| 1355 | &bo->tbo.mem, &ctx); |
| 1356 | if (r) |
| 1357 | goto error_pin; |
| 1358 | |
| 1359 | r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, |
| 1360 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1361 | adev->fw_vram_usage.start_offset, |
| 1362 | (adev->fw_vram_usage.start_offset + |
| 1363 | adev->fw_vram_usage.size), NULL); |
| 1364 | if (r) |
| 1365 | goto error_pin; |
| 1366 | r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, |
| 1367 | &adev->fw_vram_usage.va); |
| 1368 | if (r) |
| 1369 | goto error_kmap; |
| 1370 | |
| 1371 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1372 | } |
| 1373 | return r; |
| 1374 | |
| 1375 | error_kmap: |
| 1376 | amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); |
| 1377 | error_pin: |
| 1378 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1379 | error_reserve: |
| 1380 | amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); |
| 1381 | error_create: |
| 1382 | adev->fw_vram_usage.va = NULL; |
| 1383 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1384 | return r; |
| 1385 | } |
| 1386 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1387 | int amdgpu_ttm_init(struct amdgpu_device *adev) |
| 1388 | { |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1389 | uint64_t gtt_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1390 | int r; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1391 | u64 vis_vram_limit; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1392 | |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 1393 | r = amdgpu_ttm_global_init(adev); |
| 1394 | if (r) { |
| 1395 | return r; |
| 1396 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1397 | /* No others user of address space so set it to 0 */ |
| 1398 | r = ttm_bo_device_init(&adev->mman.bdev, |
| 1399 | adev->mman.bo_global_ref.ref.object, |
| 1400 | &amdgpu_bo_driver, |
| 1401 | adev->ddev->anon_inode->i_mapping, |
| 1402 | DRM_FILE_PAGE_OFFSET, |
| 1403 | adev->need_dma32); |
| 1404 | if (r) { |
| 1405 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 1406 | return r; |
| 1407 | } |
| 1408 | adev->mman.initialized = true; |
Andrey Grodzovsky | 7cce958 | 2018-01-16 10:06:36 -0500 | [diff] [blame] | 1409 | |
| 1410 | /* We opt to avoid OOM on system pages allocations */ |
| 1411 | adev->mman.bdev.no_retry = true; |
| 1412 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1413 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1414 | adev->gmc.real_vram_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1415 | if (r) { |
| 1416 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 1417 | return r; |
| 1418 | } |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1419 | |
| 1420 | /* Reduce size of CPU-visible VRAM if requested */ |
| 1421 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; |
| 1422 | if (amdgpu_vis_vram_limit > 0 && |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1423 | vis_vram_limit <= adev->gmc.visible_vram_size) |
| 1424 | adev->gmc.visible_vram_size = vis_vram_limit; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1425 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1426 | /* Change the size here instead of the init above so only lpfn is affected */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1427 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1428 | #ifdef CONFIG_64BIT |
| 1429 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, |
| 1430 | adev->gmc.visible_vram_size); |
| 1431 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1432 | |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1433 | /* |
| 1434 | *The reserved vram for firmware must be pinned to the specified |
| 1435 | *place on the VRAM, so reserve it early. |
| 1436 | */ |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1437 | r = amdgpu_ttm_fw_reserve_vram_init(adev); |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1438 | if (r) { |
| 1439 | return r; |
| 1440 | } |
| 1441 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1442 | r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, |
Christian König | a4a0277 | 2017-07-27 17:24:36 +0200 | [diff] [blame] | 1443 | AMDGPU_GEM_DOMAIN_VRAM, |
Kent Russell | 5af2c10 | 2017-08-08 07:48:01 -0400 | [diff] [blame] | 1444 | &adev->stolen_vga_memory, |
Christian König | a4a0277 | 2017-07-27 17:24:36 +0200 | [diff] [blame] | 1445 | NULL, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1446 | if (r) |
| 1447 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1448 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1449 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1450 | |
Roger He | 424e2c8 | 2017-11-10 19:05:13 +0800 | [diff] [blame] | 1451 | if (amdgpu_gtt_size == -1) { |
| 1452 | struct sysinfo si; |
| 1453 | |
| 1454 | si_meminfo(&si); |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1455 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1456 | adev->gmc.mc_vram_size), |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1457 | ((uint64_t)si.totalram * si.mem_unit * 3/4)); |
| 1458 | } |
| 1459 | else |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1460 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; |
| 1461 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1462 | if (r) { |
| 1463 | DRM_ERROR("Failed initializing GTT heap.\n"); |
| 1464 | return r; |
| 1465 | } |
| 1466 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1467 | (unsigned)(gtt_size / (1024 * 1024))); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1468 | |
| 1469 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; |
| 1470 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; |
| 1471 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; |
| 1472 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; |
| 1473 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; |
| 1474 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; |
| 1475 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; |
| 1476 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; |
| 1477 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; |
| 1478 | /* GDS Memory */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1479 | if (adev->gds.mem.total_size) { |
| 1480 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, |
| 1481 | adev->gds.mem.total_size >> PAGE_SHIFT); |
| 1482 | if (r) { |
| 1483 | DRM_ERROR("Failed initializing GDS heap.\n"); |
| 1484 | return r; |
| 1485 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1486 | } |
| 1487 | |
| 1488 | /* GWS */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1489 | if (adev->gds.gws.total_size) { |
| 1490 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, |
| 1491 | adev->gds.gws.total_size >> PAGE_SHIFT); |
| 1492 | if (r) { |
| 1493 | DRM_ERROR("Failed initializing gws heap.\n"); |
| 1494 | return r; |
| 1495 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1496 | } |
| 1497 | |
| 1498 | /* OA */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1499 | if (adev->gds.oa.total_size) { |
| 1500 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, |
| 1501 | adev->gds.oa.total_size >> PAGE_SHIFT); |
| 1502 | if (r) { |
| 1503 | DRM_ERROR("Failed initializing oa heap.\n"); |
| 1504 | return r; |
| 1505 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1506 | } |
| 1507 | |
| 1508 | r = amdgpu_ttm_debugfs_init(adev); |
| 1509 | if (r) { |
| 1510 | DRM_ERROR("Failed to init debugfs\n"); |
| 1511 | return r; |
| 1512 | } |
| 1513 | return 0; |
| 1514 | } |
| 1515 | |
| 1516 | void amdgpu_ttm_fini(struct amdgpu_device *adev) |
| 1517 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1518 | if (!adev->mman.initialized) |
| 1519 | return; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1520 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1521 | amdgpu_ttm_debugfs_fini(adev); |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1522 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1523 | amdgpu_ttm_fw_reserve_vram_fini(adev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1524 | if (adev->mman.aper_base_kaddr) |
| 1525 | iounmap(adev->mman.aper_base_kaddr); |
| 1526 | adev->mman.aper_base_kaddr = NULL; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1527 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1528 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); |
| 1529 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1530 | if (adev->gds.mem.total_size) |
| 1531 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); |
| 1532 | if (adev->gds.gws.total_size) |
| 1533 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); |
| 1534 | if (adev->gds.oa.total_size) |
| 1535 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1536 | ttm_bo_device_release(&adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1537 | amdgpu_ttm_global_fini(adev); |
| 1538 | adev->mman.initialized = false; |
| 1539 | DRM_INFO("amdgpu: ttm finalized\n"); |
| 1540 | } |
| 1541 | |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1542 | /** |
| 1543 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions |
| 1544 | * |
| 1545 | * @adev: amdgpu_device pointer |
| 1546 | * @enable: true when we can use buffer functions. |
| 1547 | * |
| 1548 | * Enable/disable use of buffer functions during suspend/resume. This should |
| 1549 | * only be called at bootup or when userspace isn't running. |
| 1550 | */ |
| 1551 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1552 | { |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1553 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; |
| 1554 | uint64_t size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1555 | |
Christian König | 380383f | 2018-03-01 11:03:27 +0100 | [diff] [blame] | 1556 | if (!adev->mman.initialized || adev->in_gpu_reset) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1557 | return; |
| 1558 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1559 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1560 | if (enable) |
| 1561 | size = adev->gmc.real_vram_size; |
| 1562 | else |
| 1563 | size = adev->gmc.visible_vram_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1564 | man->size = size >> PAGE_SHIFT; |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 1565 | adev->mman.buffer_funcs_enabled = enable; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1566 | } |
| 1567 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1568 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
| 1569 | { |
| 1570 | struct drm_file *file_priv; |
| 1571 | struct amdgpu_device *adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1572 | |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1573 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1574 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1575 | |
| 1576 | file_priv = filp->private_data; |
| 1577 | adev = file_priv->minor->dev->dev_private; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1578 | if (adev == NULL) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1579 | return -EINVAL; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1580 | |
| 1581 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1582 | } |
| 1583 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1584 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 1585 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 1586 | uint64_t offset, unsigned window, |
| 1587 | struct amdgpu_ring *ring, |
| 1588 | uint64_t *addr) |
| 1589 | { |
| 1590 | struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; |
| 1591 | struct amdgpu_device *adev = ring->adev; |
| 1592 | struct ttm_tt *ttm = bo->ttm; |
| 1593 | struct amdgpu_job *job; |
| 1594 | unsigned num_dw, num_bytes; |
| 1595 | dma_addr_t *dma_address; |
| 1596 | struct dma_fence *fence; |
| 1597 | uint64_t src_addr, dst_addr; |
| 1598 | uint64_t flags; |
| 1599 | int r; |
| 1600 | |
| 1601 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < |
| 1602 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); |
| 1603 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1604 | *addr = adev->gmc.gart_start; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1605 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 1606 | AMDGPU_GPU_PAGE_SIZE; |
| 1607 | |
| 1608 | num_dw = adev->mman.buffer_funcs->copy_num_dw; |
| 1609 | while (num_dw & 0x7) |
| 1610 | num_dw++; |
| 1611 | |
| 1612 | num_bytes = num_pages * 8; |
| 1613 | |
| 1614 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); |
| 1615 | if (r) |
| 1616 | return r; |
| 1617 | |
| 1618 | src_addr = num_dw * 4; |
| 1619 | src_addr += job->ibs[0].gpu_addr; |
| 1620 | |
| 1621 | dst_addr = adev->gart.table_addr; |
| 1622 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; |
| 1623 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, |
| 1624 | dst_addr, num_bytes); |
| 1625 | |
| 1626 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 1627 | WARN_ON(job->ibs[0].length_dw > num_dw); |
| 1628 | |
| 1629 | dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; |
| 1630 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); |
| 1631 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, |
| 1632 | &job->ibs[0].ptr[num_dw]); |
| 1633 | if (r) |
| 1634 | goto error_free; |
| 1635 | |
| 1636 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
| 1637 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); |
| 1638 | if (r) |
| 1639 | goto error_free; |
| 1640 | |
| 1641 | dma_fence_put(fence); |
| 1642 | |
| 1643 | return r; |
| 1644 | |
| 1645 | error_free: |
| 1646 | amdgpu_job_free(job); |
| 1647 | return r; |
| 1648 | } |
| 1649 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 1650 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
| 1651 | uint64_t dst_offset, uint32_t byte_count, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1652 | struct reservation_object *resv, |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 1653 | struct dma_fence **fence, bool direct_submit, |
| 1654 | bool vm_needs_flush) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1655 | { |
| 1656 | struct amdgpu_device *adev = ring->adev; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1657 | struct amdgpu_job *job; |
| 1658 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1659 | uint32_t max_bytes; |
| 1660 | unsigned num_loops, num_dw; |
| 1661 | unsigned i; |
| 1662 | int r; |
| 1663 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 1664 | if (direct_submit && !ring->ready) { |
| 1665 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 1666 | return -EINVAL; |
| 1667 | } |
| 1668 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1669 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
| 1670 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); |
| 1671 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; |
| 1672 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1673 | /* for IB padding */ |
| 1674 | while (num_dw & 0x7) |
| 1675 | num_dw++; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1676 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1677 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 1678 | if (r) |
Chunming Zhou | 9066b0c | 2015-08-25 15:12:26 +0800 | [diff] [blame] | 1679 | return r; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1680 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 1681 | job->vm_needs_flush = vm_needs_flush; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1682 | if (resv) { |
Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 1683 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 1684 | AMDGPU_FENCE_OWNER_UNDEFINED, |
| 1685 | false); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1686 | if (r) { |
| 1687 | DRM_ERROR("sync failed (%d).\n", r); |
| 1688 | goto error_free; |
| 1689 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1690 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1691 | |
| 1692 | for (i = 0; i < num_loops; i++) { |
| 1693 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 1694 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1695 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
| 1696 | dst_offset, cur_size_in_bytes); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1697 | |
| 1698 | src_offset += cur_size_in_bytes; |
| 1699 | dst_offset += cur_size_in_bytes; |
| 1700 | byte_count -= cur_size_in_bytes; |
| 1701 | } |
| 1702 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1703 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 1704 | WARN_ON(job->ibs[0].length_dw > num_dw); |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 1705 | if (direct_submit) { |
| 1706 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, |
Junwei Zhang | 50ddc75 | 2017-01-23 16:30:38 +0800 | [diff] [blame] | 1707 | NULL, fence); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 1708 | job->fence = dma_fence_get(*fence); |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 1709 | if (r) |
| 1710 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
| 1711 | amdgpu_job_free(job); |
| 1712 | } else { |
| 1713 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
| 1714 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
| 1715 | if (r) |
| 1716 | goto error_free; |
| 1717 | } |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1718 | |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 1719 | return r; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1720 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1721 | error_free: |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 1722 | amdgpu_job_free(job); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1723 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1724 | } |
| 1725 | |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1726 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 1727 | uint32_t src_data, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1728 | struct reservation_object *resv, |
| 1729 | struct dma_fence **fence) |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1730 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 1731 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 1732 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1733 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
| 1734 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1735 | struct drm_mm_node *mm_node; |
| 1736 | unsigned long num_pages; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1737 | unsigned int num_loops, num_dw; |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1738 | |
| 1739 | struct amdgpu_job *job; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1740 | int r; |
| 1741 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 1742 | if (!adev->mman.buffer_funcs_enabled) { |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1743 | DRM_ERROR("Trying to clear memory with ring turned off.\n"); |
| 1744 | return -EINVAL; |
| 1745 | } |
| 1746 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 1747 | if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 1748 | r = amdgpu_ttm_alloc_gart(&bo->tbo); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 1749 | if (r) |
| 1750 | return r; |
| 1751 | } |
| 1752 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1753 | num_pages = bo->tbo.num_pages; |
| 1754 | mm_node = bo->tbo.mem.mm_node; |
| 1755 | num_loops = 0; |
| 1756 | while (num_pages) { |
| 1757 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 1758 | |
| 1759 | num_loops += DIV_ROUND_UP(byte_count, max_bytes); |
| 1760 | num_pages -= mm_node->size; |
| 1761 | ++mm_node; |
| 1762 | } |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 1763 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1764 | |
| 1765 | /* for IB padding */ |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1766 | num_dw += 64; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1767 | |
| 1768 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 1769 | if (r) |
| 1770 | return r; |
| 1771 | |
| 1772 | if (resv) { |
| 1773 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 1774 | AMDGPU_FENCE_OWNER_UNDEFINED, false); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1775 | if (r) { |
| 1776 | DRM_ERROR("sync failed (%d).\n", r); |
| 1777 | goto error_free; |
| 1778 | } |
| 1779 | } |
| 1780 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1781 | num_pages = bo->tbo.num_pages; |
| 1782 | mm_node = bo->tbo.mem.mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1783 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1784 | while (num_pages) { |
| 1785 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 1786 | uint64_t dst_addr; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1787 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 1788 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1789 | while (byte_count) { |
| 1790 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 1791 | |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 1792 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, |
| 1793 | dst_addr, cur_size_in_bytes); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1794 | |
| 1795 | dst_addr += cur_size_in_bytes; |
| 1796 | byte_count -= cur_size_in_bytes; |
| 1797 | } |
| 1798 | |
| 1799 | num_pages -= mm_node->size; |
| 1800 | ++mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1801 | } |
| 1802 | |
| 1803 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 1804 | WARN_ON(job->ibs[0].length_dw > num_dw); |
| 1805 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 1806 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 1807 | if (r) |
| 1808 | goto error_free; |
| 1809 | |
| 1810 | return 0; |
| 1811 | |
| 1812 | error_free: |
| 1813 | amdgpu_job_free(job); |
| 1814 | return r; |
| 1815 | } |
| 1816 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1817 | #if defined(CONFIG_DEBUG_FS) |
| 1818 | |
| 1819 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) |
| 1820 | { |
| 1821 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 1822 | unsigned ttm_pl = *(int *)node->info_ent->data; |
| 1823 | struct drm_device *dev = node->minor->dev; |
| 1824 | struct amdgpu_device *adev = dev->dev_private; |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 1825 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 1826 | struct drm_printer p = drm_seq_file_printer(m); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1827 | |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 1828 | man->func->debug(man, &p); |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 1829 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1830 | } |
| 1831 | |
| 1832 | static int ttm_pl_vram = TTM_PL_VRAM; |
| 1833 | static int ttm_pl_tt = TTM_PL_TT; |
| 1834 | |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 1835 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1836 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, |
| 1837 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, |
| 1838 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, |
| 1839 | #ifdef CONFIG_SWIOTLB |
| 1840 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} |
| 1841 | #endif |
| 1842 | }; |
| 1843 | |
| 1844 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, |
| 1845 | size_t size, loff_t *pos) |
| 1846 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 1847 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1848 | ssize_t result = 0; |
| 1849 | int r; |
| 1850 | |
| 1851 | if (size & 0x3 || *pos & 0x3) |
| 1852 | return -EINVAL; |
| 1853 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1854 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 9156e72 | 2017-05-23 11:35:22 -0400 | [diff] [blame] | 1855 | return -ENXIO; |
| 1856 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1857 | while (size) { |
| 1858 | unsigned long flags; |
| 1859 | uint32_t value; |
| 1860 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1861 | if (*pos >= adev->gmc.mc_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1862 | return result; |
| 1863 | |
| 1864 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 1865 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 1866 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 1867 | value = RREG32_NO_KIQ(mmMM_DATA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1868 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 1869 | |
| 1870 | r = put_user(value, (uint32_t *)buf); |
| 1871 | if (r) |
| 1872 | return r; |
| 1873 | |
| 1874 | result += 4; |
| 1875 | buf += 4; |
| 1876 | *pos += 4; |
| 1877 | size -= 4; |
| 1878 | } |
| 1879 | |
| 1880 | return result; |
| 1881 | } |
| 1882 | |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 1883 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
| 1884 | size_t size, loff_t *pos) |
| 1885 | { |
| 1886 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 1887 | ssize_t result = 0; |
| 1888 | int r; |
| 1889 | |
| 1890 | if (size & 0x3 || *pos & 0x3) |
| 1891 | return -EINVAL; |
| 1892 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1893 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 1894 | return -ENXIO; |
| 1895 | |
| 1896 | while (size) { |
| 1897 | unsigned long flags; |
| 1898 | uint32_t value; |
| 1899 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1900 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 1901 | return result; |
| 1902 | |
| 1903 | r = get_user(value, (uint32_t *)buf); |
| 1904 | if (r) |
| 1905 | return r; |
| 1906 | |
| 1907 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 1908 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 1909 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 1910 | WREG32_NO_KIQ(mmMM_DATA, value); |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 1911 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 1912 | |
| 1913 | result += 4; |
| 1914 | buf += 4; |
| 1915 | *pos += 4; |
| 1916 | size -= 4; |
| 1917 | } |
| 1918 | |
| 1919 | return result; |
| 1920 | } |
| 1921 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1922 | static const struct file_operations amdgpu_ttm_vram_fops = { |
| 1923 | .owner = THIS_MODULE, |
| 1924 | .read = amdgpu_ttm_vram_read, |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 1925 | .write = amdgpu_ttm_vram_write, |
| 1926 | .llseek = default_llseek, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1927 | }; |
| 1928 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 1929 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 1930 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1931 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
| 1932 | size_t size, loff_t *pos) |
| 1933 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 1934 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1935 | ssize_t result = 0; |
| 1936 | int r; |
| 1937 | |
| 1938 | while (size) { |
| 1939 | loff_t p = *pos / PAGE_SIZE; |
| 1940 | unsigned off = *pos & ~PAGE_MASK; |
| 1941 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
| 1942 | struct page *page; |
| 1943 | void *ptr; |
| 1944 | |
| 1945 | if (p >= adev->gart.num_cpu_pages) |
| 1946 | return result; |
| 1947 | |
| 1948 | page = adev->gart.pages[p]; |
| 1949 | if (page) { |
| 1950 | ptr = kmap(page); |
| 1951 | ptr += off; |
| 1952 | |
| 1953 | r = copy_to_user(buf, ptr, cur_size); |
| 1954 | kunmap(adev->gart.pages[p]); |
| 1955 | } else |
| 1956 | r = clear_user(buf, cur_size); |
| 1957 | |
| 1958 | if (r) |
| 1959 | return -EFAULT; |
| 1960 | |
| 1961 | result += cur_size; |
| 1962 | buf += cur_size; |
| 1963 | *pos += cur_size; |
| 1964 | size -= cur_size; |
| 1965 | } |
| 1966 | |
| 1967 | return result; |
| 1968 | } |
| 1969 | |
| 1970 | static const struct file_operations amdgpu_ttm_gtt_fops = { |
| 1971 | .owner = THIS_MODULE, |
| 1972 | .read = amdgpu_ttm_gtt_read, |
| 1973 | .llseek = default_llseek |
| 1974 | }; |
| 1975 | |
| 1976 | #endif |
| 1977 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 1978 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, |
| 1979 | size_t size, loff_t *pos) |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 1980 | { |
| 1981 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 1982 | struct iommu_domain *dom; |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 1983 | ssize_t result = 0; |
| 1984 | int r; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 1985 | |
| 1986 | dom = iommu_get_domain_for_dev(adev->dev); |
Tom St Denis | 10cfafd | 2017-09-19 11:29:04 -0400 | [diff] [blame] | 1987 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 1988 | while (size) { |
| 1989 | phys_addr_t addr = *pos & PAGE_MASK; |
| 1990 | loff_t off = *pos & ~PAGE_MASK; |
| 1991 | size_t bytes = PAGE_SIZE - off; |
| 1992 | unsigned long pfn; |
| 1993 | struct page *p; |
| 1994 | void *ptr; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 1995 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 1996 | bytes = bytes < size ? bytes : size; |
| 1997 | |
| 1998 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 1999 | |
| 2000 | pfn = addr >> PAGE_SHIFT; |
| 2001 | if (!pfn_valid(pfn)) |
| 2002 | return -EPERM; |
| 2003 | |
| 2004 | p = pfn_to_page(pfn); |
| 2005 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2006 | return -EPERM; |
| 2007 | |
| 2008 | ptr = kmap(p); |
| 2009 | r = copy_to_user(buf, ptr, bytes); |
| 2010 | kunmap(p); |
| 2011 | if (r) |
| 2012 | return -EFAULT; |
| 2013 | |
| 2014 | size -= bytes; |
| 2015 | *pos += bytes; |
| 2016 | result += bytes; |
| 2017 | } |
| 2018 | |
| 2019 | return result; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2020 | } |
| 2021 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2022 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, |
| 2023 | size_t size, loff_t *pos) |
| 2024 | { |
| 2025 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 2026 | struct iommu_domain *dom; |
| 2027 | ssize_t result = 0; |
| 2028 | int r; |
| 2029 | |
| 2030 | dom = iommu_get_domain_for_dev(adev->dev); |
| 2031 | |
| 2032 | while (size) { |
| 2033 | phys_addr_t addr = *pos & PAGE_MASK; |
| 2034 | loff_t off = *pos & ~PAGE_MASK; |
| 2035 | size_t bytes = PAGE_SIZE - off; |
| 2036 | unsigned long pfn; |
| 2037 | struct page *p; |
| 2038 | void *ptr; |
| 2039 | |
| 2040 | bytes = bytes < size ? bytes : size; |
| 2041 | |
| 2042 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 2043 | |
| 2044 | pfn = addr >> PAGE_SHIFT; |
| 2045 | if (!pfn_valid(pfn)) |
| 2046 | return -EPERM; |
| 2047 | |
| 2048 | p = pfn_to_page(pfn); |
| 2049 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2050 | return -EPERM; |
| 2051 | |
| 2052 | ptr = kmap(p); |
| 2053 | r = copy_from_user(ptr, buf, bytes); |
| 2054 | kunmap(p); |
| 2055 | if (r) |
| 2056 | return -EFAULT; |
| 2057 | |
| 2058 | size -= bytes; |
| 2059 | *pos += bytes; |
| 2060 | result += bytes; |
| 2061 | } |
| 2062 | |
| 2063 | return result; |
| 2064 | } |
| 2065 | |
| 2066 | static const struct file_operations amdgpu_ttm_iomem_fops = { |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2067 | .owner = THIS_MODULE, |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2068 | .read = amdgpu_iomem_read, |
| 2069 | .write = amdgpu_iomem_write, |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2070 | .llseek = default_llseek |
| 2071 | }; |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2072 | |
| 2073 | static const struct { |
| 2074 | char *name; |
| 2075 | const struct file_operations *fops; |
| 2076 | int domain; |
| 2077 | } ttm_debugfs_entries[] = { |
| 2078 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, |
| 2079 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 2080 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, |
| 2081 | #endif |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2082 | { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2083 | }; |
| 2084 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2085 | #endif |
| 2086 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2087 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
| 2088 | { |
| 2089 | #if defined(CONFIG_DEBUG_FS) |
| 2090 | unsigned count; |
| 2091 | |
| 2092 | struct drm_minor *minor = adev->ddev->primary; |
| 2093 | struct dentry *ent, *root = minor->debugfs_root; |
| 2094 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2095 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { |
| 2096 | ent = debugfs_create_file( |
| 2097 | ttm_debugfs_entries[count].name, |
| 2098 | S_IFREG | S_IRUGO, root, |
| 2099 | adev, |
| 2100 | ttm_debugfs_entries[count].fops); |
| 2101 | if (IS_ERR(ent)) |
| 2102 | return PTR_ERR(ent); |
| 2103 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2104 | i_size_write(ent->d_inode, adev->gmc.mc_vram_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2105 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2106 | i_size_write(ent->d_inode, adev->gmc.gart_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2107 | adev->mman.debugfs_entries[count] = ent; |
| 2108 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2109 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2110 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); |
| 2111 | |
| 2112 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 2113 | if (!(adev->need_swiotlb && swiotlb_nr_tbl())) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2114 | --count; |
| 2115 | #endif |
| 2116 | |
| 2117 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); |
| 2118 | #else |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2119 | return 0; |
| 2120 | #endif |
| 2121 | } |
| 2122 | |
| 2123 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) |
| 2124 | { |
| 2125 | #if defined(CONFIG_DEBUG_FS) |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2126 | unsigned i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2127 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2128 | for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) |
| 2129 | debugfs_remove(adev->mman.debugfs_entries[i]); |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2130 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2131 | } |