Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
Masahiro Yamada | 248a1d6 | 2017-04-24 13:50:21 +0900 | [diff] [blame] | 32 | #include <drm/ttm/ttm_bo_api.h> |
| 33 | #include <drm/ttm/ttm_bo_driver.h> |
| 34 | #include <drm/ttm/ttm_placement.h> |
| 35 | #include <drm/ttm/ttm_module.h> |
| 36 | #include <drm/ttm/ttm_page_alloc.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 37 | #include <drm/drmP.h> |
| 38 | #include <drm/amdgpu_drm.h> |
| 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <linux/swiotlb.h> |
| 42 | #include <linux/swap.h> |
| 43 | #include <linux/pagemap.h> |
| 44 | #include <linux/debugfs.h> |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 45 | #include <linux/iommu.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 46 | #include "amdgpu.h" |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 47 | #include "amdgpu_object.h" |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 48 | #include "amdgpu_trace.h" |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame] | 49 | #include "amdgpu_amdkfd.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 50 | #include "bif/bif_4_1_d.h" |
| 51 | |
| 52 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
| 53 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 54 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 55 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 56 | uint64_t offset, unsigned window, |
| 57 | struct amdgpu_ring *ring, |
| 58 | uint64_t *addr); |
| 59 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 60 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
| 61 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
| 62 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 63 | /* |
| 64 | * Global memory. |
| 65 | */ |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * amdgpu_ttm_mem_global_init - Initialize and acquire reference to |
| 69 | * memory object |
| 70 | * |
| 71 | * @ref: Object for initialization. |
| 72 | * |
| 73 | * This is called by drm_global_item_ref() when an object is being |
| 74 | * initialized. |
| 75 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 76 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) |
| 77 | { |
| 78 | return ttm_mem_global_init(ref->object); |
| 79 | } |
| 80 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 81 | /** |
| 82 | * amdgpu_ttm_mem_global_release - Drop reference to a memory object |
| 83 | * |
| 84 | * @ref: Object being removed |
| 85 | * |
| 86 | * This is called by drm_global_item_unref() when an object is being |
| 87 | * released. |
| 88 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 89 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) |
| 90 | { |
| 91 | ttm_mem_global_release(ref->object); |
| 92 | } |
| 93 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 94 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 95 | * amdgpu_ttm_global_init - Initialize global TTM memory reference structures. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 96 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 97 | * @adev: AMDGPU device for which the global structures need to be registered. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 98 | * |
| 99 | * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() |
| 100 | * during bring up. |
| 101 | */ |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 102 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 103 | { |
| 104 | struct drm_global_reference *global_ref; |
| 105 | int r; |
| 106 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 107 | /* ensure reference is false in case init fails */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 108 | adev->mman.mem_global_referenced = false; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 109 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 110 | global_ref = &adev->mman.mem_global_ref; |
| 111 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
| 112 | global_ref->size = sizeof(struct ttm_mem_global); |
| 113 | global_ref->init = &amdgpu_ttm_mem_global_init; |
| 114 | global_ref->release = &amdgpu_ttm_mem_global_release; |
| 115 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 116 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 117 | DRM_ERROR("Failed setting up TTM memory accounting " |
| 118 | "subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 119 | goto error_mem; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | adev->mman.bo_global_ref.mem_glob = |
| 123 | adev->mman.mem_global_ref.object; |
| 124 | global_ref = &adev->mman.bo_global_ref.ref; |
| 125 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
| 126 | global_ref->size = sizeof(struct ttm_bo_global); |
| 127 | global_ref->init = &ttm_bo_global_init; |
| 128 | global_ref->release = &ttm_bo_global_release; |
| 129 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 130 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 131 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 132 | goto error_bo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 133 | } |
| 134 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 135 | mutex_init(&adev->mman.gtt_window_lock); |
| 136 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 137 | adev->mman.mem_global_referenced = true; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 138 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 139 | return 0; |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 140 | |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 141 | error_bo: |
| 142 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 143 | error_mem: |
| 144 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) |
| 148 | { |
| 149 | if (adev->mman.mem_global_referenced) { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 150 | mutex_destroy(&adev->mman.gtt_window_lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 151 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
| 152 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 153 | adev->mman.mem_global_referenced = false; |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 158 | { |
| 159 | return 0; |
| 160 | } |
| 161 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 162 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 163 | * amdgpu_init_mem_type - Initialize a memory manager for a specific type of |
| 164 | * memory request. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 165 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 166 | * @bdev: The TTM BO device object (contains a reference to amdgpu_device) |
| 167 | * @type: The type of memory requested |
| 168 | * @man: The memory type manager for each domain |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 169 | * |
| 170 | * This is called by ttm_bo_init_mm() when a buffer object is being |
| 171 | * initialized. |
| 172 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 173 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 174 | struct ttm_mem_type_manager *man) |
| 175 | { |
| 176 | struct amdgpu_device *adev; |
| 177 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 178 | adev = amdgpu_ttm_adev(bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 179 | |
| 180 | switch (type) { |
| 181 | case TTM_PL_SYSTEM: |
| 182 | /* System memory */ |
| 183 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 184 | man->available_caching = TTM_PL_MASK_CACHING; |
| 185 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 186 | break; |
| 187 | case TTM_PL_TT: |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 188 | /* GTT memory */ |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 189 | man->func = &amdgpu_gtt_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 190 | man->gpu_offset = adev->gmc.gart_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 191 | man->available_caching = TTM_PL_MASK_CACHING; |
| 192 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 193 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
| 194 | break; |
| 195 | case TTM_PL_VRAM: |
| 196 | /* "On-card" video ram */ |
Christian König | 6a7f76e | 2016-08-24 15:51:49 +0200 | [diff] [blame] | 197 | man->func = &amdgpu_vram_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 198 | man->gpu_offset = adev->gmc.vram_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 199 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 200 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 201 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
| 202 | man->default_caching = TTM_PL_FLAG_WC; |
| 203 | break; |
| 204 | case AMDGPU_PL_GDS: |
| 205 | case AMDGPU_PL_GWS: |
| 206 | case AMDGPU_PL_OA: |
| 207 | /* On-chip GDS memory*/ |
| 208 | man->func = &ttm_bo_manager_func; |
| 209 | man->gpu_offset = 0; |
| 210 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; |
| 211 | man->available_caching = TTM_PL_FLAG_UNCACHED; |
| 212 | man->default_caching = TTM_PL_FLAG_UNCACHED; |
| 213 | break; |
| 214 | default: |
| 215 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
| 216 | return -EINVAL; |
| 217 | } |
| 218 | return 0; |
| 219 | } |
| 220 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 221 | /** |
| 222 | * amdgpu_evict_flags - Compute placement flags |
| 223 | * |
| 224 | * @bo: The buffer object to evict |
| 225 | * @placement: Possible destination(s) for evicted BO |
| 226 | * |
| 227 | * Fill in placement data when ttm_bo_evict() is called |
| 228 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 229 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
| 230 | struct ttm_placement *placement) |
| 231 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 232 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 233 | struct amdgpu_bo *abo; |
Arvind Yadav | 1aaa560 | 2017-07-02 14:43:58 +0530 | [diff] [blame] | 234 | static const struct ttm_place placements = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 235 | .fpfn = 0, |
| 236 | .lpfn = 0, |
| 237 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM |
| 238 | }; |
| 239 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 240 | /* Don't handle scatter gather BOs */ |
Christian König | 82dee24 | 2018-02-20 19:09:56 +0100 | [diff] [blame] | 241 | if (bo->type == ttm_bo_type_sg) { |
| 242 | placement->num_placement = 0; |
| 243 | placement->num_busy_placement = 0; |
| 244 | return; |
| 245 | } |
| 246 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 247 | /* Object isn't an AMDGPU object so ignore */ |
Christian König | c704ab1 | 2018-07-16 16:12:24 +0200 | [diff] [blame] | 248 | if (!amdgpu_bo_is_amdgpu_bo(bo)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 249 | placement->placement = &placements; |
| 250 | placement->busy_placement = &placements; |
| 251 | placement->num_placement = 1; |
| 252 | placement->num_busy_placement = 1; |
| 253 | return; |
| 254 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 255 | |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 256 | abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 257 | switch (bo->mem.mem_type) { |
| 258 | case TTM_PL_VRAM: |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 259 | if (!adev->mman.buffer_funcs_enabled) { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 260 | /* Move to system memory */ |
Christian König | c704ab1 | 2018-07-16 16:12:24 +0200 | [diff] [blame] | 261 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Andrey Grodzovsky | c8c5e56 | 2018-06-12 14:28:20 -0400 | [diff] [blame] | 262 | } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
Christian König | 5422a28 | 2018-04-05 16:42:03 +0200 | [diff] [blame] | 263 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
| 264 | amdgpu_bo_in_cpu_visible_vram(abo)) { |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 265 | |
| 266 | /* Try evicting to the CPU inaccessible part of VRAM |
| 267 | * first, but only set GTT as busy placement, so this |
| 268 | * BO will be evicted to GTT rather than causing other |
| 269 | * BOs to be evicted from VRAM |
| 270 | */ |
Christian König | c704ab1 | 2018-07-16 16:12:24 +0200 | [diff] [blame] | 271 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 272 | AMDGPU_GEM_DOMAIN_GTT); |
Christian König | 5422a28 | 2018-04-05 16:42:03 +0200 | [diff] [blame] | 273 | abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 274 | abo->placements[0].lpfn = 0; |
| 275 | abo->placement.busy_placement = &abo->placements[1]; |
| 276 | abo->placement.num_busy_placement = 1; |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 277 | } else { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 278 | /* Move to GTT memory */ |
Christian König | c704ab1 | 2018-07-16 16:12:24 +0200 | [diff] [blame] | 279 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 280 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 281 | break; |
| 282 | case TTM_PL_TT: |
| 283 | default: |
Christian König | c704ab1 | 2018-07-16 16:12:24 +0200 | [diff] [blame] | 284 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 285 | } |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 286 | *placement = abo->placement; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 287 | } |
| 288 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 289 | /** |
| 290 | * amdgpu_verify_access - Verify access for a mmap call |
| 291 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 292 | * @bo: The buffer object to map |
| 293 | * @filp: The file pointer from the process performing the mmap |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 294 | * |
| 295 | * This is called by ttm_bo_mmap() to verify whether a process |
| 296 | * has the right to mmap a BO to their process space. |
| 297 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 298 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 299 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 300 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 301 | |
Felix Kuehling | a46a2cd | 2018-02-06 20:32:38 -0500 | [diff] [blame] | 302 | /* |
| 303 | * Don't verify access for KFD BOs. They don't have a GEM |
| 304 | * object associated with them. |
| 305 | */ |
| 306 | if (abo->kfd_bo) |
| 307 | return 0; |
| 308 | |
Jérôme Glisse | 054892e | 2016-04-19 09:07:51 -0400 | [diff] [blame] | 309 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
| 310 | return -EPERM; |
Dave Airlie | 28a3965 | 2016-09-30 13:18:26 +1000 | [diff] [blame] | 311 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 312 | filp->private_data); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 313 | } |
| 314 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 315 | /** |
| 316 | * amdgpu_move_null - Register memory for a buffer object |
| 317 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 318 | * @bo: The bo to assign the memory to |
| 319 | * @new_mem: The memory to be assigned. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 320 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 321 | * Assign the memory from new_mem to the memory of the buffer object bo. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 322 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 323 | static void amdgpu_move_null(struct ttm_buffer_object *bo, |
| 324 | struct ttm_mem_reg *new_mem) |
| 325 | { |
| 326 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 327 | |
| 328 | BUG_ON(old_mem->mm_node != NULL); |
| 329 | *old_mem = *new_mem; |
| 330 | new_mem->mm_node = NULL; |
| 331 | } |
| 332 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 333 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 334 | * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. |
| 335 | * |
| 336 | * @bo: The bo to assign the memory to. |
| 337 | * @mm_node: Memory manager node for drm allocator. |
| 338 | * @mem: The region where the bo resides. |
| 339 | * |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 340 | */ |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 341 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
| 342 | struct drm_mm_node *mm_node, |
| 343 | struct ttm_mem_reg *mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 344 | { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 345 | uint64_t addr = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 346 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 347 | if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 348 | addr = mm_node->start << PAGE_SHIFT; |
| 349 | addr += bo->bdev->man[mem->mem_type].gpu_offset; |
| 350 | } |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 351 | return addr; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 352 | } |
| 353 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 354 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 355 | * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to |
| 356 | * @offset. It also modifies the offset to be within the drm_mm_node returned |
| 357 | * |
| 358 | * @mem: The region where the bo resides. |
| 359 | * @offset: The offset that drm_mm_node is used for finding. |
| 360 | * |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 361 | */ |
| 362 | static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, |
| 363 | unsigned long *offset) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 364 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 365 | struct drm_mm_node *mm_node = mem->mm_node; |
| 366 | |
| 367 | while (*offset >= (mm_node->size << PAGE_SHIFT)) { |
| 368 | *offset -= (mm_node->size << PAGE_SHIFT); |
| 369 | ++mm_node; |
| 370 | } |
| 371 | return mm_node; |
| 372 | } |
| 373 | |
| 374 | /** |
| 375 | * amdgpu_copy_ttm_mem_to_mem - Helper function for copy |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 376 | * |
| 377 | * The function copies @size bytes from {src->mem + src->offset} to |
| 378 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a |
| 379 | * move and different for a BO to BO copy. |
| 380 | * |
| 381 | * @f: Returns the last fence if multiple jobs are submitted. |
| 382 | */ |
| 383 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, |
| 384 | struct amdgpu_copy_mem *src, |
| 385 | struct amdgpu_copy_mem *dst, |
| 386 | uint64_t size, |
| 387 | struct reservation_object *resv, |
| 388 | struct dma_fence **f) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 389 | { |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 390 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 391 | struct drm_mm_node *src_mm, *dst_mm; |
| 392 | uint64_t src_node_start, dst_node_start, src_node_size, |
| 393 | dst_node_size, src_page_offset, dst_page_offset; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 394 | struct dma_fence *fence = NULL; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 395 | int r = 0; |
| 396 | const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 397 | AMDGPU_GPU_PAGE_SIZE); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 398 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 399 | if (!adev->mman.buffer_funcs_enabled) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 400 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 401 | return -EINVAL; |
| 402 | } |
| 403 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 404 | src_mm = amdgpu_find_mm_node(src->mem, &src->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 405 | src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + |
| 406 | src->offset; |
| 407 | src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; |
| 408 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 409 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 410 | dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 411 | dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + |
| 412 | dst->offset; |
| 413 | dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; |
| 414 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 415 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 416 | mutex_lock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 417 | |
| 418 | while (size) { |
| 419 | unsigned long cur_size; |
| 420 | uint64_t from = src_node_start, to = dst_node_start; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 421 | struct dma_fence *next; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 422 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 423 | /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst |
| 424 | * begins at an offset, then adjust the size accordingly |
| 425 | */ |
| 426 | cur_size = min3(min(src_node_size, dst_node_size), size, |
| 427 | GTT_MAX_BYTES); |
| 428 | if (cur_size + src_page_offset > GTT_MAX_BYTES || |
| 429 | cur_size + dst_page_offset > GTT_MAX_BYTES) |
| 430 | cur_size -= max(src_page_offset, dst_page_offset); |
| 431 | |
| 432 | /* Map only what needs to be accessed. Map src to window 0 and |
| 433 | * dst to window 1 |
| 434 | */ |
| 435 | if (src->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 436 | !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 437 | r = amdgpu_map_buffer(src->bo, src->mem, |
| 438 | PFN_UP(cur_size + src_page_offset), |
| 439 | src_node_start, 0, ring, |
| 440 | &from); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 441 | if (r) |
| 442 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 443 | /* Adjust the offset because amdgpu_map_buffer returns |
| 444 | * start of mapped page |
| 445 | */ |
| 446 | from += src_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 447 | } |
| 448 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 449 | if (dst->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 450 | !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 451 | r = amdgpu_map_buffer(dst->bo, dst->mem, |
| 452 | PFN_UP(cur_size + dst_page_offset), |
| 453 | dst_node_start, 1, ring, |
| 454 | &to); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 455 | if (r) |
| 456 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 457 | to += dst_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 458 | } |
| 459 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 460 | r = amdgpu_copy_buffer(ring, from, to, cur_size, |
| 461 | resv, &next, false, true); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 462 | if (r) |
| 463 | goto error; |
| 464 | |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 465 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 466 | fence = next; |
| 467 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 468 | size -= cur_size; |
| 469 | if (!size) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 470 | break; |
| 471 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 472 | src_node_size -= cur_size; |
| 473 | if (!src_node_size) { |
| 474 | src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, |
| 475 | src->mem); |
| 476 | src_node_size = (src_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 477 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 478 | src_node_start += cur_size; |
| 479 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 480 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 481 | dst_node_size -= cur_size; |
| 482 | if (!dst_node_size) { |
| 483 | dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, |
| 484 | dst->mem); |
| 485 | dst_node_size = (dst_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 486 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 487 | dst_node_start += cur_size; |
| 488 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 489 | } |
| 490 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 491 | error: |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 492 | mutex_unlock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 493 | if (f) |
| 494 | *f = dma_fence_get(fence); |
| 495 | dma_fence_put(fence); |
| 496 | return r; |
| 497 | } |
| 498 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 499 | /** |
| 500 | * amdgpu_move_blit - Copy an entire buffer to another buffer |
| 501 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 502 | * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to |
| 503 | * help move buffers to and from VRAM. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 504 | */ |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 505 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
| 506 | bool evict, bool no_wait_gpu, |
| 507 | struct ttm_mem_reg *new_mem, |
| 508 | struct ttm_mem_reg *old_mem) |
| 509 | { |
| 510 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
| 511 | struct amdgpu_copy_mem src, dst; |
| 512 | struct dma_fence *fence = NULL; |
| 513 | int r; |
| 514 | |
| 515 | src.bo = bo; |
| 516 | dst.bo = bo; |
| 517 | src.mem = old_mem; |
| 518 | dst.mem = new_mem; |
| 519 | src.offset = 0; |
| 520 | dst.offset = 0; |
| 521 | |
| 522 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, |
| 523 | new_mem->num_pages << PAGE_SHIFT, |
| 524 | bo->resv, &fence); |
| 525 | if (r) |
| 526 | goto error; |
Christian König | ce64bc2 | 2016-06-15 13:44:05 +0200 | [diff] [blame] | 527 | |
| 528 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 529 | dma_fence_put(fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 530 | return r; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 531 | |
| 532 | error: |
| 533 | if (fence) |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 534 | dma_fence_wait(fence, false); |
| 535 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 536 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 537 | } |
| 538 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 539 | /** |
| 540 | * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer |
| 541 | * |
| 542 | * Called by amdgpu_bo_move(). |
| 543 | */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 544 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, |
| 545 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 546 | struct ttm_mem_reg *new_mem) |
| 547 | { |
| 548 | struct amdgpu_device *adev; |
| 549 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 550 | struct ttm_mem_reg tmp_mem; |
| 551 | struct ttm_place placements; |
| 552 | struct ttm_placement placement; |
| 553 | int r; |
| 554 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 555 | adev = amdgpu_ttm_adev(bo->bdev); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 556 | |
| 557 | /* create space/pages for new_mem in GTT space */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 558 | tmp_mem = *new_mem; |
| 559 | tmp_mem.mm_node = NULL; |
| 560 | placement.num_placement = 1; |
| 561 | placement.placement = &placements; |
| 562 | placement.num_busy_placement = 1; |
| 563 | placement.busy_placement = &placements; |
| 564 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 565 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 566 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 567 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 568 | if (unlikely(r)) { |
| 569 | return r; |
| 570 | } |
| 571 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 572 | /* set caching flags */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 573 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
| 574 | if (unlikely(r)) { |
| 575 | goto out_cleanup; |
| 576 | } |
| 577 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 578 | /* Bind the memory to the GTT space */ |
Roger He | 993baf1 | 2017-12-21 17:42:51 +0800 | [diff] [blame] | 579 | r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 580 | if (unlikely(r)) { |
| 581 | goto out_cleanup; |
| 582 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 583 | |
| 584 | /* blit VRAM to GTT */ |
Junwei Zhang | 204029e | 2018-07-26 18:00:13 +0800 | [diff] [blame] | 585 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 586 | if (unlikely(r)) { |
| 587 | goto out_cleanup; |
| 588 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 589 | |
| 590 | /* move BO (in tmp_mem) to new_mem */ |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 591 | r = ttm_bo_move_ttm(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 592 | out_cleanup: |
| 593 | ttm_bo_mem_put(bo, &tmp_mem); |
| 594 | return r; |
| 595 | } |
| 596 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 597 | /** |
| 598 | * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM |
| 599 | * |
| 600 | * Called by amdgpu_bo_move(). |
| 601 | */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 602 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, |
| 603 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 604 | struct ttm_mem_reg *new_mem) |
| 605 | { |
| 606 | struct amdgpu_device *adev; |
| 607 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 608 | struct ttm_mem_reg tmp_mem; |
| 609 | struct ttm_placement placement; |
| 610 | struct ttm_place placements; |
| 611 | int r; |
| 612 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 613 | adev = amdgpu_ttm_adev(bo->bdev); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 614 | |
| 615 | /* make space in GTT for old_mem buffer */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 616 | tmp_mem = *new_mem; |
| 617 | tmp_mem.mm_node = NULL; |
| 618 | placement.num_placement = 1; |
| 619 | placement.placement = &placements; |
| 620 | placement.num_busy_placement = 1; |
| 621 | placement.busy_placement = &placements; |
| 622 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 623 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 624 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 625 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 626 | if (unlikely(r)) { |
| 627 | return r; |
| 628 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 629 | |
| 630 | /* move/bind old memory to GTT space */ |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 631 | r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 632 | if (unlikely(r)) { |
| 633 | goto out_cleanup; |
| 634 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 635 | |
| 636 | /* copy to VRAM */ |
Junwei Zhang | 204029e | 2018-07-26 18:00:13 +0800 | [diff] [blame] | 637 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 638 | if (unlikely(r)) { |
| 639 | goto out_cleanup; |
| 640 | } |
| 641 | out_cleanup: |
| 642 | ttm_bo_mem_put(bo, &tmp_mem); |
| 643 | return r; |
| 644 | } |
| 645 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 646 | /** |
| 647 | * amdgpu_bo_move - Move a buffer object to a new memory location |
| 648 | * |
| 649 | * Called by ttm_bo_handle_move_mem() |
| 650 | */ |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 651 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, |
| 652 | struct ttm_operation_ctx *ctx, |
| 653 | struct ttm_mem_reg *new_mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 654 | { |
| 655 | struct amdgpu_device *adev; |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 656 | struct amdgpu_bo *abo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 657 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 658 | int r; |
| 659 | |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 660 | /* Can't move a pinned BO */ |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 661 | abo = ttm_to_amdgpu_bo(bo); |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 662 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
| 663 | return -EINVAL; |
| 664 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 665 | adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | dbd5ed6 | 2016-06-21 16:28:14 +0200 | [diff] [blame] | 666 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 667 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
| 668 | amdgpu_move_null(bo, new_mem); |
| 669 | return 0; |
| 670 | } |
| 671 | if ((old_mem->mem_type == TTM_PL_TT && |
| 672 | new_mem->mem_type == TTM_PL_SYSTEM) || |
| 673 | (old_mem->mem_type == TTM_PL_SYSTEM && |
| 674 | new_mem->mem_type == TTM_PL_TT)) { |
| 675 | /* bind is enough */ |
| 676 | amdgpu_move_null(bo, new_mem); |
| 677 | return 0; |
| 678 | } |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 679 | |
| 680 | if (!adev->mman.buffer_funcs_enabled) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 681 | goto memcpy; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 682 | |
| 683 | if (old_mem->mem_type == TTM_PL_VRAM && |
| 684 | new_mem->mem_type == TTM_PL_SYSTEM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 685 | r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 686 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
| 687 | new_mem->mem_type == TTM_PL_VRAM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 688 | r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 689 | } else { |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 690 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, |
| 691 | new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | if (r) { |
| 695 | memcpy: |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 696 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 697 | if (r) { |
| 698 | return r; |
| 699 | } |
| 700 | } |
| 701 | |
John Brooks | 96cf827 | 2017-06-30 11:31:08 -0400 | [diff] [blame] | 702 | if (bo->type == ttm_bo_type_device && |
| 703 | new_mem->mem_type == TTM_PL_VRAM && |
| 704 | old_mem->mem_type != TTM_PL_VRAM) { |
| 705 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU |
| 706 | * accesses the BO after it's moved. |
| 707 | */ |
| 708 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
| 709 | } |
| 710 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 711 | /* update statistics */ |
| 712 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); |
| 713 | return 0; |
| 714 | } |
| 715 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 716 | /** |
| 717 | * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault |
| 718 | * |
| 719 | * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() |
| 720 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 721 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 722 | { |
| 723 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 724 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 725 | struct drm_mm_node *mm_node = mem->mm_node; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 726 | |
| 727 | mem->bus.addr = NULL; |
| 728 | mem->bus.offset = 0; |
| 729 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
| 730 | mem->bus.base = 0; |
| 731 | mem->bus.is_iomem = false; |
| 732 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 733 | return -EINVAL; |
| 734 | switch (mem->mem_type) { |
| 735 | case TTM_PL_SYSTEM: |
| 736 | /* system memory */ |
| 737 | return 0; |
| 738 | case TTM_PL_TT: |
| 739 | break; |
| 740 | case TTM_PL_VRAM: |
| 741 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 742 | /* check if it's visible */ |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 743 | if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 744 | return -EINVAL; |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 745 | /* Only physically contiguous buffers apply. In a contiguous |
| 746 | * buffer, size of the first mm_node would match the number of |
| 747 | * pages in ttm_mem_reg. |
| 748 | */ |
| 749 | if (adev->mman.aper_base_kaddr && |
| 750 | (mm_node->size == mem->num_pages)) |
| 751 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + |
| 752 | mem->bus.offset; |
| 753 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 754 | mem->bus.base = adev->gmc.aper_base; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 755 | mem->bus.is_iomem = true; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 756 | break; |
| 757 | default: |
| 758 | return -EINVAL; |
| 759 | } |
| 760 | return 0; |
| 761 | } |
| 762 | |
| 763 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 764 | { |
| 765 | } |
| 766 | |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 767 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
| 768 | unsigned long page_offset) |
| 769 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 770 | struct drm_mm_node *mm; |
| 771 | unsigned long offset = (page_offset << PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 772 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 773 | mm = amdgpu_find_mm_node(&bo->mem, &offset); |
| 774 | return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + |
| 775 | (offset >> PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 776 | } |
| 777 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 778 | /* |
| 779 | * TTM backend functions. |
| 780 | */ |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 781 | struct amdgpu_ttm_gup_task_list { |
| 782 | struct list_head list; |
| 783 | struct task_struct *task; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 784 | }; |
| 785 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 786 | struct amdgpu_ttm_tt { |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 787 | struct ttm_dma_tt ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 788 | u64 offset; |
| 789 | uint64_t userptr; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 790 | struct task_struct *usertask; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 791 | uint32_t userflags; |
| 792 | spinlock_t guptasklock; |
| 793 | struct list_head guptasks; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 794 | atomic_t mmu_invalidations; |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 795 | uint32_t last_set_pages; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 796 | }; |
| 797 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 798 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 799 | * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR |
| 800 | * pointer to memory |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 801 | * |
| 802 | * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). |
| 803 | * This provides a wrapper around the get_user_pages() call to provide |
| 804 | * device accessible pages that back user memory. |
| 805 | */ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 806 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 807 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 808 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 809 | struct mm_struct *mm = gtt->usertask->mm; |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 810 | unsigned int flags = 0; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 811 | unsigned pinned = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 812 | int r; |
| 813 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 814 | if (!mm) /* Happens during process shutdown */ |
| 815 | return -ESRCH; |
| 816 | |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 817 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 818 | flags |= FOLL_WRITE; |
| 819 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 820 | down_read(&mm->mmap_sem); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 821 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 822 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 823 | /* |
| 824 | * check that we only use anonymous memory to prevent problems |
| 825 | * with writeback |
| 826 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 827 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; |
| 828 | struct vm_area_struct *vma; |
| 829 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 830 | vma = find_vma(mm, gtt->userptr); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 831 | if (!vma || vma->vm_file || vma->vm_end < end) { |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 832 | up_read(&mm->mmap_sem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 833 | return -EPERM; |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 834 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 835 | } |
| 836 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 837 | /* loop enough times using contiguous pages of memory */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 838 | do { |
| 839 | unsigned num_pages = ttm->num_pages - pinned; |
| 840 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 841 | struct page **p = pages + pinned; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 842 | struct amdgpu_ttm_gup_task_list guptask; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 843 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 844 | guptask.task = current; |
| 845 | spin_lock(>t->guptasklock); |
| 846 | list_add(&guptask.list, >t->guptasks); |
| 847 | spin_unlock(>t->guptasklock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 848 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 849 | if (mm == current->mm) |
| 850 | r = get_user_pages(userptr, num_pages, flags, p, NULL); |
| 851 | else |
| 852 | r = get_user_pages_remote(gtt->usertask, |
| 853 | mm, userptr, num_pages, |
| 854 | flags, p, NULL, NULL); |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 855 | |
| 856 | spin_lock(>t->guptasklock); |
| 857 | list_del(&guptask.list); |
| 858 | spin_unlock(>t->guptasklock); |
| 859 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 860 | if (r < 0) |
| 861 | goto release_pages; |
| 862 | |
| 863 | pinned += r; |
| 864 | |
| 865 | } while (pinned < ttm->num_pages); |
| 866 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 867 | up_read(&mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 868 | return 0; |
| 869 | |
| 870 | release_pages: |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 871 | release_pages(pages, pinned); |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 872 | up_read(&mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 873 | return r; |
| 874 | } |
| 875 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 876 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 877 | * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 878 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 879 | * Called by amdgpu_cs_list_validate(). This creates the page list |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 880 | * that backs user memory and will ultimately be mapped into the device |
| 881 | * address space. |
| 882 | */ |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 883 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 884 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 885 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 886 | unsigned i; |
| 887 | |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 888 | gtt->last_set_pages = atomic_read(>t->mmu_invalidations); |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 889 | for (i = 0; i < ttm->num_pages; ++i) { |
| 890 | if (ttm->pages[i]) |
| 891 | put_page(ttm->pages[i]); |
| 892 | |
| 893 | ttm->pages[i] = pages ? pages[i] : NULL; |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 894 | } |
| 895 | } |
| 896 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 897 | /** |
| 898 | * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty |
| 899 | * |
| 900 | * Called while unpinning userptr pages |
| 901 | */ |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 902 | void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 903 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 904 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 905 | unsigned i; |
| 906 | |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 907 | for (i = 0; i < ttm->num_pages; ++i) { |
| 908 | struct page *page = ttm->pages[i]; |
| 909 | |
| 910 | if (!page) |
| 911 | continue; |
| 912 | |
| 913 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 914 | set_page_dirty(page); |
| 915 | |
| 916 | mark_page_accessed(page); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 917 | } |
| 918 | } |
| 919 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 920 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 921 | * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 922 | * |
| 923 | * Called by amdgpu_ttm_backend_bind() |
| 924 | **/ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 925 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
| 926 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 927 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 928 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 929 | unsigned nents; |
| 930 | int r; |
| 931 | |
| 932 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 933 | enum dma_data_direction direction = write ? |
| 934 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 935 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 936 | /* Allocate an SG array and squash pages into it */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 937 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
| 938 | ttm->num_pages << PAGE_SHIFT, |
| 939 | GFP_KERNEL); |
| 940 | if (r) |
| 941 | goto release_sg; |
| 942 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 943 | /* Map SG to device */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 944 | r = -ENOMEM; |
| 945 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 946 | if (nents != ttm->sg->nents) |
| 947 | goto release_sg; |
| 948 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 949 | /* convert SG to linear array of pages and dma addresses */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 950 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
| 951 | gtt->ttm.dma_address, ttm->num_pages); |
| 952 | |
| 953 | return 0; |
| 954 | |
| 955 | release_sg: |
| 956 | kfree(ttm->sg); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 957 | return r; |
| 958 | } |
| 959 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 960 | /** |
| 961 | * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages |
| 962 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 963 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
| 964 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 965 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 966 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 967 | |
| 968 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 969 | enum dma_data_direction direction = write ? |
| 970 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 971 | |
| 972 | /* double check that we don't free the table twice */ |
| 973 | if (!ttm->sg->sgl) |
| 974 | return; |
| 975 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 976 | /* unmap the pages mapped to the device */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 977 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 978 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 979 | /* mark the pages as dirty */ |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 980 | amdgpu_ttm_tt_mark_user_pages(ttm); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 981 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 982 | sg_free_table(ttm->sg); |
| 983 | } |
| 984 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 985 | int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, |
| 986 | struct ttm_buffer_object *tbo, |
| 987 | uint64_t flags) |
| 988 | { |
| 989 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); |
| 990 | struct ttm_tt *ttm = tbo->ttm; |
| 991 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 992 | int r; |
| 993 | |
| 994 | if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { |
| 995 | uint64_t page_idx = 1; |
| 996 | |
| 997 | r = amdgpu_gart_bind(adev, gtt->offset, page_idx, |
| 998 | ttm->pages, gtt->ttm.dma_address, flags); |
| 999 | if (r) |
| 1000 | goto gart_bind_fail; |
| 1001 | |
| 1002 | /* Patch mtype of the second part BO */ |
| 1003 | flags &= ~AMDGPU_PTE_MTYPE_MASK; |
| 1004 | flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC); |
| 1005 | |
| 1006 | r = amdgpu_gart_bind(adev, |
| 1007 | gtt->offset + (page_idx << PAGE_SHIFT), |
| 1008 | ttm->num_pages - page_idx, |
| 1009 | &ttm->pages[page_idx], |
| 1010 | &(gtt->ttm.dma_address[page_idx]), flags); |
| 1011 | } else { |
| 1012 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
| 1013 | ttm->pages, gtt->ttm.dma_address, flags); |
| 1014 | } |
| 1015 | |
| 1016 | gart_bind_fail: |
| 1017 | if (r) |
| 1018 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 1019 | ttm->num_pages, gtt->offset); |
| 1020 | |
| 1021 | return r; |
| 1022 | } |
| 1023 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1024 | /** |
| 1025 | * amdgpu_ttm_backend_bind - Bind GTT memory |
| 1026 | * |
| 1027 | * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). |
| 1028 | * This handles binding GTT memory to the device address space. |
| 1029 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1030 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, |
| 1031 | struct ttm_mem_reg *bo_mem) |
| 1032 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1033 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1034 | struct amdgpu_ttm_tt *gtt = (void*)ttm; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1035 | uint64_t flags; |
Dan Carpenter | 2ce3f5dc | 2017-08-09 13:30:46 +0300 | [diff] [blame] | 1036 | int r = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1037 | |
Chunming Zhou | e2f784f | 2015-11-26 16:33:58 +0800 | [diff] [blame] | 1038 | if (gtt->userptr) { |
| 1039 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
| 1040 | if (r) { |
| 1041 | DRM_ERROR("failed to pin userptr\n"); |
| 1042 | return r; |
| 1043 | } |
| 1044 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1045 | if (!ttm->num_pages) { |
| 1046 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
| 1047 | ttm->num_pages, bo_mem, ttm); |
| 1048 | } |
| 1049 | |
| 1050 | if (bo_mem->mem_type == AMDGPU_PL_GDS || |
| 1051 | bo_mem->mem_type == AMDGPU_PL_GWS || |
| 1052 | bo_mem->mem_type == AMDGPU_PL_OA) |
| 1053 | return -EINVAL; |
| 1054 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1055 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
| 1056 | gtt->offset = AMDGPU_BO_INVALID_OFFSET; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1057 | return 0; |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1058 | } |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 1059 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1060 | /* compute PTE flags relevant to this BO memory */ |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1061 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1062 | |
| 1063 | /* bind pages into GART page tables */ |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1064 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1065 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1066 | ttm->pages, gtt->ttm.dma_address, flags); |
| 1067 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1068 | if (r) |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1069 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 1070 | ttm->num_pages, gtt->offset); |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 1071 | return r; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1072 | } |
| 1073 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1074 | /** |
| 1075 | * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object |
| 1076 | */ |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 1077 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1078 | { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1079 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 1080 | struct ttm_operation_ctx ctx = { false, false }; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1081 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1082 | struct ttm_mem_reg tmp; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1083 | struct ttm_placement placement; |
| 1084 | struct ttm_place placements; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1085 | uint64_t flags; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1086 | int r; |
| 1087 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1088 | if (bo->mem.mem_type != TTM_PL_TT || |
| 1089 | amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1090 | return 0; |
| 1091 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1092 | /* allocate GTT space */ |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1093 | tmp = bo->mem; |
| 1094 | tmp.mm_node = NULL; |
| 1095 | placement.num_placement = 1; |
| 1096 | placement.placement = &placements; |
| 1097 | placement.num_busy_placement = 1; |
| 1098 | placement.busy_placement = &placements; |
| 1099 | placements.fpfn = 0; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1100 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; |
Christian König | ec8c9f8 | 2017-10-16 13:47:15 +0200 | [diff] [blame] | 1101 | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | |
| 1102 | TTM_PL_FLAG_TT; |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 1103 | |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 1104 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1105 | if (unlikely(r)) |
| 1106 | return r; |
| 1107 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1108 | /* compute PTE flags for this buffer object */ |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1109 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1110 | |
| 1111 | /* Bind pages */ |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1112 | gtt->offset = (u64)tmp.start << PAGE_SHIFT; |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1113 | r = amdgpu_ttm_gart_bind(adev, bo, flags); |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1114 | if (unlikely(r)) { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1115 | ttm_bo_mem_put(bo, &tmp); |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1116 | return r; |
| 1117 | } |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1118 | |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1119 | ttm_bo_mem_put(bo, &bo->mem); |
| 1120 | bo->mem = tmp; |
| 1121 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
| 1122 | bo->bdev->man[bo->mem.mem_type].gpu_offset; |
| 1123 | |
| 1124 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1125 | } |
| 1126 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1127 | /** |
| 1128 | * amdgpu_ttm_recover_gart - Rebind GTT pages |
| 1129 | * |
| 1130 | * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to |
| 1131 | * rebind GTT pages during a GPU reset. |
| 1132 | */ |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1133 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1134 | { |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1135 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
Monk Liu | 1d1a2cd | 2017-04-27 17:14:57 +0800 | [diff] [blame] | 1136 | uint64_t flags; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1137 | int r; |
| 1138 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1139 | if (!tbo->ttm) |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1140 | return 0; |
| 1141 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1142 | flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); |
| 1143 | r = amdgpu_ttm_gart_bind(adev, tbo, flags); |
| 1144 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1145 | return r; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1146 | } |
| 1147 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1148 | /** |
| 1149 | * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages |
| 1150 | * |
| 1151 | * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and |
| 1152 | * ttm_tt_destroy(). |
| 1153 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1154 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
| 1155 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1156 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1157 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1158 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1159 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1160 | /* if the pages have userptr pinning then clear that first */ |
Christian König | 85a4b57 | 2016-09-22 14:19:50 +0200 | [diff] [blame] | 1161 | if (gtt->userptr) |
| 1162 | amdgpu_ttm_tt_unpin_userptr(ttm); |
| 1163 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1164 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) |
Christian König | 78ab0a3 | 2016-09-09 15:39:08 +0200 | [diff] [blame] | 1165 | return 0; |
| 1166 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1167 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1168 | r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1169 | if (r) |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1170 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", |
| 1171 | gtt->ttm.ttm.num_pages, gtt->offset); |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1172 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1173 | } |
| 1174 | |
| 1175 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) |
| 1176 | { |
| 1177 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1178 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1179 | if (gtt->usertask) |
| 1180 | put_task_struct(gtt->usertask); |
| 1181 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1182 | ttm_dma_tt_fini(>t->ttm); |
| 1183 | kfree(gtt); |
| 1184 | } |
| 1185 | |
| 1186 | static struct ttm_backend_func amdgpu_backend_func = { |
| 1187 | .bind = &amdgpu_ttm_backend_bind, |
| 1188 | .unbind = &amdgpu_ttm_backend_unbind, |
| 1189 | .destroy = &amdgpu_ttm_backend_destroy, |
| 1190 | }; |
| 1191 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1192 | /** |
| 1193 | * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO |
| 1194 | * |
| 1195 | * @bo: The buffer object to create a GTT ttm_tt object around |
| 1196 | * |
| 1197 | * Called by ttm_tt_create(). |
| 1198 | */ |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1199 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, |
| 1200 | uint32_t page_flags) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1201 | { |
| 1202 | struct amdgpu_device *adev; |
| 1203 | struct amdgpu_ttm_tt *gtt; |
| 1204 | |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1205 | adev = amdgpu_ttm_adev(bo->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1206 | |
| 1207 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
| 1208 | if (gtt == NULL) { |
| 1209 | return NULL; |
| 1210 | } |
| 1211 | gtt->ttm.ttm.func = &amdgpu_backend_func; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1212 | |
| 1213 | /* allocate space for the uninitialized page entries */ |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1214 | if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1215 | kfree(gtt); |
| 1216 | return NULL; |
| 1217 | } |
| 1218 | return >t->ttm.ttm; |
| 1219 | } |
| 1220 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1221 | /** |
| 1222 | * amdgpu_ttm_tt_populate - Map GTT pages visible to the device |
| 1223 | * |
| 1224 | * Map the pages of a ttm_tt object to an address space visible |
| 1225 | * to the underlying device. |
| 1226 | */ |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1227 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, |
| 1228 | struct ttm_operation_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1229 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 1230 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1231 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1232 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1233 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1234 | /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1235 | if (gtt && gtt->userptr) { |
Maninder Singh | 5f0b34c | 2015-06-26 13:28:50 +0530 | [diff] [blame] | 1236 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1237 | if (!ttm->sg) |
| 1238 | return -ENOMEM; |
| 1239 | |
| 1240 | ttm->page_flags |= TTM_PAGE_FLAG_SG; |
| 1241 | ttm->state = tt_unbound; |
| 1242 | return 0; |
| 1243 | } |
| 1244 | |
| 1245 | if (slave && ttm->sg) { |
| 1246 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
Christian König | e89d0d3 | 2018-02-23 16:08:51 +0100 | [diff] [blame] | 1247 | gtt->ttm.dma_address, |
| 1248 | ttm->num_pages); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1249 | ttm->state = tt_unbound; |
Tom St Denis | 79ba280 | 2017-09-18 08:10:00 -0400 | [diff] [blame] | 1250 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1251 | } |
| 1252 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1253 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1254 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1255 | return ttm_dma_populate(>t->ttm, adev->dev, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1256 | } |
| 1257 | #endif |
| 1258 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1259 | /* fall back to generic helper to populate the page array |
| 1260 | * and map them to the device */ |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1261 | return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1262 | } |
| 1263 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1264 | /** |
| 1265 | * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays |
| 1266 | * |
| 1267 | * Unmaps pages of a ttm_tt object from the device address space and |
| 1268 | * unpopulates the page array backing it. |
| 1269 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1270 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) |
| 1271 | { |
| 1272 | struct amdgpu_device *adev; |
| 1273 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1274 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1275 | |
| 1276 | if (gtt && gtt->userptr) { |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 1277 | amdgpu_ttm_tt_set_user_pages(ttm, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1278 | kfree(ttm->sg); |
| 1279 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; |
| 1280 | return; |
| 1281 | } |
| 1282 | |
| 1283 | if (slave) |
| 1284 | return; |
| 1285 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 1286 | adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1287 | |
| 1288 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1289 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1290 | ttm_dma_unpopulate(>t->ttm, adev->dev); |
| 1291 | return; |
| 1292 | } |
| 1293 | #endif |
| 1294 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1295 | /* fall back to generic helper to unmap and unpopulate array */ |
Tom St Denis | 7405e0d | 2017-08-18 10:05:48 -0400 | [diff] [blame] | 1296 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1297 | } |
| 1298 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1299 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1300 | * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current |
| 1301 | * task |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1302 | * |
| 1303 | * @ttm: The ttm_tt object to bind this userptr object to |
| 1304 | * @addr: The address in the current tasks VM space to use |
| 1305 | * @flags: Requirements of userptr object. |
| 1306 | * |
| 1307 | * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages |
| 1308 | * to current task |
| 1309 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1310 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 1311 | uint32_t flags) |
| 1312 | { |
| 1313 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1314 | |
| 1315 | if (gtt == NULL) |
| 1316 | return -EINVAL; |
| 1317 | |
| 1318 | gtt->userptr = addr; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1319 | gtt->userflags = flags; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1320 | |
| 1321 | if (gtt->usertask) |
| 1322 | put_task_struct(gtt->usertask); |
| 1323 | gtt->usertask = current->group_leader; |
| 1324 | get_task_struct(gtt->usertask); |
| 1325 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1326 | spin_lock_init(>t->guptasklock); |
| 1327 | INIT_LIST_HEAD(>t->guptasks); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1328 | atomic_set(>t->mmu_invalidations, 0); |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1329 | gtt->last_set_pages = 0; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1330 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1331 | return 0; |
| 1332 | } |
| 1333 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1334 | /** |
| 1335 | * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object |
| 1336 | */ |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1337 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1338 | { |
| 1339 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1340 | |
| 1341 | if (gtt == NULL) |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1342 | return NULL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1343 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1344 | if (gtt->usertask == NULL) |
| 1345 | return NULL; |
| 1346 | |
| 1347 | return gtt->usertask->mm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1348 | } |
| 1349 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1350 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1351 | * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an |
| 1352 | * address range for the current task. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1353 | * |
| 1354 | */ |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1355 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
| 1356 | unsigned long end) |
| 1357 | { |
| 1358 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1359 | struct amdgpu_ttm_gup_task_list *entry; |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1360 | unsigned long size; |
| 1361 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1362 | if (gtt == NULL || !gtt->userptr) |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1363 | return false; |
| 1364 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1365 | /* Return false if no part of the ttm_tt object lies within |
| 1366 | * the range |
| 1367 | */ |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1368 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; |
| 1369 | if (gtt->userptr > end || gtt->userptr + size <= start) |
| 1370 | return false; |
| 1371 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1372 | /* Search the lists of tasks that hold this mapping and see |
| 1373 | * if current is one of them. If it is return false. |
| 1374 | */ |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1375 | spin_lock(>t->guptasklock); |
| 1376 | list_for_each_entry(entry, >t->guptasks, list) { |
| 1377 | if (entry->task == current) { |
| 1378 | spin_unlock(>t->guptasklock); |
| 1379 | return false; |
| 1380 | } |
| 1381 | } |
| 1382 | spin_unlock(>t->guptasklock); |
| 1383 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1384 | atomic_inc(>t->mmu_invalidations); |
| 1385 | |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1386 | return true; |
| 1387 | } |
| 1388 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1389 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1390 | * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated? |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1391 | */ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1392 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
| 1393 | int *last_invalidated) |
| 1394 | { |
| 1395 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1396 | int prev_invalidated = *last_invalidated; |
| 1397 | |
| 1398 | *last_invalidated = atomic_read(>t->mmu_invalidations); |
| 1399 | return prev_invalidated != *last_invalidated; |
| 1400 | } |
| 1401 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1402 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1403 | * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object |
| 1404 | * been invalidated since the last time they've been set? |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1405 | */ |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1406 | bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) |
| 1407 | { |
| 1408 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1409 | |
| 1410 | if (gtt == NULL || !gtt->userptr) |
| 1411 | return false; |
| 1412 | |
| 1413 | return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; |
| 1414 | } |
| 1415 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1416 | /** |
| 1417 | * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? |
| 1418 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1419 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
| 1420 | { |
| 1421 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1422 | |
| 1423 | if (gtt == NULL) |
| 1424 | return false; |
| 1425 | |
| 1426 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 1427 | } |
| 1428 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1429 | /** |
| 1430 | * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object |
| 1431 | * |
| 1432 | * @ttm: The ttm_tt object to compute the flags for |
| 1433 | * @mem: The memory registry backing this ttm_tt object |
| 1434 | */ |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1435 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1436 | struct ttm_mem_reg *mem) |
| 1437 | { |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1438 | uint64_t flags = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1439 | |
| 1440 | if (mem && mem->mem_type != TTM_PL_SYSTEM) |
| 1441 | flags |= AMDGPU_PTE_VALID; |
| 1442 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1443 | if (mem && mem->mem_type == TTM_PL_TT) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1444 | flags |= AMDGPU_PTE_SYSTEM; |
| 1445 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1446 | if (ttm->caching_state == tt_cached) |
| 1447 | flags |= AMDGPU_PTE_SNOOPED; |
| 1448 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1449 | |
Alex Xie | 4b98e0c | 2017-02-14 12:31:36 -0500 | [diff] [blame] | 1450 | flags |= adev->gart.gart_pte_flags; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1451 | flags |= AMDGPU_PTE_READABLE; |
| 1452 | |
| 1453 | if (!amdgpu_ttm_tt_is_readonly(ttm)) |
| 1454 | flags |= AMDGPU_PTE_WRITEABLE; |
| 1455 | |
| 1456 | return flags; |
| 1457 | } |
| 1458 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1459 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1460 | * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer |
| 1461 | * object. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1462 | * |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1463 | * Return true if eviction is sensible. Called by ttm_mem_evict_first() on |
| 1464 | * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until |
| 1465 | * it can find space for a new object and by ttm_bo_force_list_clean() which is |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1466 | * used to clean out a memory space. |
| 1467 | */ |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1468 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
| 1469 | const struct ttm_place *place) |
| 1470 | { |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1471 | unsigned long num_pages = bo->mem.num_pages; |
| 1472 | struct drm_mm_node *node = bo->mem.mm_node; |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame] | 1473 | struct reservation_object_list *flist; |
| 1474 | struct dma_fence *f; |
| 1475 | int i; |
| 1476 | |
| 1477 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
| 1478 | * If true, then return false as any KFD process needs all its BOs to |
| 1479 | * be resident to run successfully |
| 1480 | */ |
| 1481 | flist = reservation_object_get_list(bo->resv); |
| 1482 | if (flist) { |
| 1483 | for (i = 0; i < flist->shared_count; ++i) { |
| 1484 | f = rcu_dereference_protected(flist->shared[i], |
| 1485 | reservation_object_held(bo->resv)); |
| 1486 | if (amdkfd_fence_check_mm(f, current->mm)) |
| 1487 | return false; |
| 1488 | } |
| 1489 | } |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1490 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1491 | switch (bo->mem.mem_type) { |
| 1492 | case TTM_PL_TT: |
| 1493 | return true; |
| 1494 | |
| 1495 | case TTM_PL_VRAM: |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1496 | /* Check each drm MM node individually */ |
| 1497 | while (num_pages) { |
| 1498 | if (place->fpfn < (node->start + node->size) && |
| 1499 | !(place->lpfn && place->lpfn <= node->start)) |
| 1500 | return true; |
| 1501 | |
| 1502 | num_pages -= node->size; |
| 1503 | ++node; |
| 1504 | } |
Roger He | 7da2e3e | 2017-11-02 13:14:27 +0800 | [diff] [blame] | 1505 | return false; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1506 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1507 | default: |
| 1508 | break; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1509 | } |
| 1510 | |
| 1511 | return ttm_bo_eviction_valuable(bo, place); |
| 1512 | } |
| 1513 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1514 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1515 | * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1516 | * |
| 1517 | * @bo: The buffer object to read/write |
| 1518 | * @offset: Offset into buffer object |
| 1519 | * @buf: Secondary buffer to write/read from |
| 1520 | * @len: Length in bytes of access |
| 1521 | * @write: true if writing |
| 1522 | * |
| 1523 | * This is used to access VRAM that backs a buffer object via MMIO |
| 1524 | * access for debugging purposes. |
| 1525 | */ |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1526 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
| 1527 | unsigned long offset, |
| 1528 | void *buf, int len, int write) |
| 1529 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 1530 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1531 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1532 | struct drm_mm_node *nodes; |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1533 | uint32_t value = 0; |
| 1534 | int ret = 0; |
| 1535 | uint64_t pos; |
| 1536 | unsigned long flags; |
| 1537 | |
| 1538 | if (bo->mem.mem_type != TTM_PL_VRAM) |
| 1539 | return -EIO; |
| 1540 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1541 | nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1542 | pos = (nodes->start << PAGE_SHIFT) + offset; |
| 1543 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1544 | while (len && pos < adev->gmc.mc_vram_size) { |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1545 | uint64_t aligned_pos = pos & ~(uint64_t)3; |
| 1546 | uint32_t bytes = 4 - (pos & 3); |
| 1547 | uint32_t shift = (pos & 3) * 8; |
| 1548 | uint32_t mask = 0xffffffff << shift; |
| 1549 | |
| 1550 | if (len < bytes) { |
| 1551 | mask &= 0xffffffff >> (bytes - len) * 8; |
| 1552 | bytes = len; |
| 1553 | } |
| 1554 | |
| 1555 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1556 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); |
| 1557 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1558 | if (!write || mask != 0xffffffff) |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1559 | value = RREG32_NO_KIQ(mmMM_DATA); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1560 | if (write) { |
| 1561 | value &= ~mask; |
| 1562 | value |= (*(uint32_t *)buf << shift) & mask; |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1563 | WREG32_NO_KIQ(mmMM_DATA, value); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1564 | } |
| 1565 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 1566 | if (!write) { |
| 1567 | value = (value & mask) >> shift; |
| 1568 | memcpy(buf, &value, bytes); |
| 1569 | } |
| 1570 | |
| 1571 | ret += bytes; |
| 1572 | buf = (uint8_t *)buf + bytes; |
| 1573 | pos += bytes; |
| 1574 | len -= bytes; |
| 1575 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { |
| 1576 | ++nodes; |
| 1577 | pos = (nodes->start << PAGE_SHIFT); |
| 1578 | } |
| 1579 | } |
| 1580 | |
| 1581 | return ret; |
| 1582 | } |
| 1583 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1584 | static struct ttm_bo_driver amdgpu_bo_driver = { |
| 1585 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
| 1586 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
| 1587 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
| 1588 | .invalidate_caches = &amdgpu_invalidate_caches, |
| 1589 | .init_mem_type = &amdgpu_init_mem_type, |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1590 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1591 | .evict_flags = &amdgpu_evict_flags, |
| 1592 | .move = &amdgpu_bo_move, |
| 1593 | .verify_access = &amdgpu_verify_access, |
| 1594 | .move_notify = &amdgpu_bo_move_notify, |
| 1595 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, |
| 1596 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, |
| 1597 | .io_mem_free = &amdgpu_ttm_io_mem_free, |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 1598 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1599 | .access_memory = &amdgpu_ttm_access_memory |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1600 | }; |
| 1601 | |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1602 | /* |
| 1603 | * Firmware Reservation functions |
| 1604 | */ |
| 1605 | /** |
| 1606 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram |
| 1607 | * |
| 1608 | * @adev: amdgpu_device pointer |
| 1609 | * |
| 1610 | * free fw reserved vram if it has been reserved. |
| 1611 | */ |
| 1612 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) |
| 1613 | { |
| 1614 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, |
| 1615 | NULL, &adev->fw_vram_usage.va); |
| 1616 | } |
| 1617 | |
| 1618 | /** |
| 1619 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw |
| 1620 | * |
| 1621 | * @adev: amdgpu_device pointer |
| 1622 | * |
| 1623 | * create bo vram reservation from fw. |
| 1624 | */ |
| 1625 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) |
| 1626 | { |
| 1627 | struct ttm_operation_ctx ctx = { false, false }; |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1628 | struct amdgpu_bo_param bp; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1629 | int r = 0; |
| 1630 | int i; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1631 | u64 vram_size = adev->gmc.visible_vram_size; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1632 | u64 offset = adev->fw_vram_usage.start_offset; |
| 1633 | u64 size = adev->fw_vram_usage.size; |
| 1634 | struct amdgpu_bo *bo; |
| 1635 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1636 | memset(&bp, 0, sizeof(bp)); |
| 1637 | bp.size = adev->fw_vram_usage.size; |
| 1638 | bp.byte_align = PAGE_SIZE; |
| 1639 | bp.domain = AMDGPU_GEM_DOMAIN_VRAM; |
| 1640 | bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
| 1641 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
| 1642 | bp.type = ttm_bo_type_kernel; |
| 1643 | bp.resv = NULL; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1644 | adev->fw_vram_usage.va = NULL; |
| 1645 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1646 | |
| 1647 | if (adev->fw_vram_usage.size > 0 && |
| 1648 | adev->fw_vram_usage.size <= vram_size) { |
| 1649 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1650 | r = amdgpu_bo_create(adev, &bp, |
Christian König | eab3de2 | 2018-03-14 14:48:17 -0500 | [diff] [blame] | 1651 | &adev->fw_vram_usage.reserved_bo); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1652 | if (r) |
| 1653 | goto error_create; |
| 1654 | |
| 1655 | r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); |
| 1656 | if (r) |
| 1657 | goto error_reserve; |
| 1658 | |
| 1659 | /* remove the original mem node and create a new one at the |
| 1660 | * request position |
| 1661 | */ |
| 1662 | bo = adev->fw_vram_usage.reserved_bo; |
| 1663 | offset = ALIGN(offset, PAGE_SIZE); |
| 1664 | for (i = 0; i < bo->placement.num_placement; ++i) { |
| 1665 | bo->placements[i].fpfn = offset >> PAGE_SHIFT; |
| 1666 | bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
| 1667 | } |
| 1668 | |
| 1669 | ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); |
| 1670 | r = ttm_bo_mem_space(&bo->tbo, &bo->placement, |
| 1671 | &bo->tbo.mem, &ctx); |
| 1672 | if (r) |
| 1673 | goto error_pin; |
| 1674 | |
| 1675 | r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, |
| 1676 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1677 | adev->fw_vram_usage.start_offset, |
| 1678 | (adev->fw_vram_usage.start_offset + |
Junwei Zhang | 7b7c6c8 | 2018-06-25 12:51:14 +0800 | [diff] [blame] | 1679 | adev->fw_vram_usage.size)); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1680 | if (r) |
| 1681 | goto error_pin; |
| 1682 | r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, |
| 1683 | &adev->fw_vram_usage.va); |
| 1684 | if (r) |
| 1685 | goto error_kmap; |
| 1686 | |
| 1687 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1688 | } |
| 1689 | return r; |
| 1690 | |
| 1691 | error_kmap: |
| 1692 | amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); |
| 1693 | error_pin: |
| 1694 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1695 | error_reserve: |
| 1696 | amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); |
| 1697 | error_create: |
| 1698 | adev->fw_vram_usage.va = NULL; |
| 1699 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1700 | return r; |
| 1701 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1702 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1703 | * amdgpu_ttm_init - Init the memory management (ttm) as well as various |
| 1704 | * gtt/vram related fields. |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1705 | * |
| 1706 | * This initializes all of the memory space pools that the TTM layer |
| 1707 | * will need such as the GTT space (system memory mapped to the device), |
| 1708 | * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which |
| 1709 | * can be mapped per VMID. |
| 1710 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1711 | int amdgpu_ttm_init(struct amdgpu_device *adev) |
| 1712 | { |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1713 | uint64_t gtt_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1714 | int r; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1715 | u64 vis_vram_limit; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1716 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1717 | /* initialize global references for vram/gtt */ |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 1718 | r = amdgpu_ttm_global_init(adev); |
| 1719 | if (r) { |
| 1720 | return r; |
| 1721 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1722 | /* No others user of address space so set it to 0 */ |
| 1723 | r = ttm_bo_device_init(&adev->mman.bdev, |
| 1724 | adev->mman.bo_global_ref.ref.object, |
| 1725 | &amdgpu_bo_driver, |
| 1726 | adev->ddev->anon_inode->i_mapping, |
| 1727 | DRM_FILE_PAGE_OFFSET, |
| 1728 | adev->need_dma32); |
| 1729 | if (r) { |
| 1730 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 1731 | return r; |
| 1732 | } |
| 1733 | adev->mman.initialized = true; |
Andrey Grodzovsky | 7cce958 | 2018-01-16 10:06:36 -0500 | [diff] [blame] | 1734 | |
| 1735 | /* We opt to avoid OOM on system pages allocations */ |
| 1736 | adev->mman.bdev.no_retry = true; |
| 1737 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1738 | /* Initialize VRAM pool with all of VRAM divided into pages */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1739 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1740 | adev->gmc.real_vram_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1741 | if (r) { |
| 1742 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 1743 | return r; |
| 1744 | } |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1745 | |
| 1746 | /* Reduce size of CPU-visible VRAM if requested */ |
| 1747 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; |
| 1748 | if (amdgpu_vis_vram_limit > 0 && |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1749 | vis_vram_limit <= adev->gmc.visible_vram_size) |
| 1750 | adev->gmc.visible_vram_size = vis_vram_limit; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1751 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1752 | /* Change the size here instead of the init above so only lpfn is affected */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1753 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1754 | #ifdef CONFIG_64BIT |
| 1755 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, |
| 1756 | adev->gmc.visible_vram_size); |
| 1757 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1758 | |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1759 | /* |
| 1760 | *The reserved vram for firmware must be pinned to the specified |
| 1761 | *place on the VRAM, so reserve it early. |
| 1762 | */ |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1763 | r = amdgpu_ttm_fw_reserve_vram_init(adev); |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1764 | if (r) { |
| 1765 | return r; |
| 1766 | } |
| 1767 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1768 | /* allocate memory as required for VGA |
| 1769 | * This is used for VGA emulation and pre-OS scanout buffers to |
| 1770 | * avoid display artifacts while transitioning between pre-OS |
| 1771 | * and driver. */ |
Alex Deucher | ebdef28 | 2018-04-06 14:54:09 -0500 | [diff] [blame] | 1772 | if (adev->gmc.stolen_size) { |
| 1773 | r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, |
| 1774 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1775 | &adev->stolen_vga_memory, |
| 1776 | NULL, NULL); |
| 1777 | if (r) |
| 1778 | return r; |
| 1779 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1780 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1781 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1782 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1783 | /* Compute GTT size, either bsaed on 3/4th the size of RAM size |
| 1784 | * or whatever the user passed on module init */ |
Roger He | 424e2c8 | 2017-11-10 19:05:13 +0800 | [diff] [blame] | 1785 | if (amdgpu_gtt_size == -1) { |
| 1786 | struct sysinfo si; |
| 1787 | |
| 1788 | si_meminfo(&si); |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1789 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1790 | adev->gmc.mc_vram_size), |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1791 | ((uint64_t)si.totalram * si.mem_unit * 3/4)); |
| 1792 | } |
| 1793 | else |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1794 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1795 | |
| 1796 | /* Initialize GTT memory pool */ |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1797 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1798 | if (r) { |
| 1799 | DRM_ERROR("Failed initializing GTT heap.\n"); |
| 1800 | return r; |
| 1801 | } |
| 1802 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1803 | (unsigned)(gtt_size / (1024 * 1024))); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1804 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1805 | /* Initialize various on-chip memory pools */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1806 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; |
| 1807 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; |
| 1808 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; |
| 1809 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; |
| 1810 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; |
| 1811 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; |
| 1812 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; |
| 1813 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; |
| 1814 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; |
| 1815 | /* GDS Memory */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1816 | if (adev->gds.mem.total_size) { |
| 1817 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, |
| 1818 | adev->gds.mem.total_size >> PAGE_SHIFT); |
| 1819 | if (r) { |
| 1820 | DRM_ERROR("Failed initializing GDS heap.\n"); |
| 1821 | return r; |
| 1822 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1823 | } |
| 1824 | |
| 1825 | /* GWS */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1826 | if (adev->gds.gws.total_size) { |
| 1827 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, |
| 1828 | adev->gds.gws.total_size >> PAGE_SHIFT); |
| 1829 | if (r) { |
| 1830 | DRM_ERROR("Failed initializing gws heap.\n"); |
| 1831 | return r; |
| 1832 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1833 | } |
| 1834 | |
| 1835 | /* OA */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1836 | if (adev->gds.oa.total_size) { |
| 1837 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, |
| 1838 | adev->gds.oa.total_size >> PAGE_SHIFT); |
| 1839 | if (r) { |
| 1840 | DRM_ERROR("Failed initializing oa heap.\n"); |
| 1841 | return r; |
| 1842 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1843 | } |
| 1844 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1845 | /* Register debugfs entries for amdgpu_ttm */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1846 | r = amdgpu_ttm_debugfs_init(adev); |
| 1847 | if (r) { |
| 1848 | DRM_ERROR("Failed to init debugfs\n"); |
| 1849 | return r; |
| 1850 | } |
| 1851 | return 0; |
| 1852 | } |
| 1853 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1854 | /** |
Huang Rui | 2e603d0 | 2018-07-26 14:08:03 +0800 | [diff] [blame^] | 1855 | * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1856 | */ |
Andrey Grodzovsky | 6f752ec | 2018-04-06 14:54:10 -0500 | [diff] [blame] | 1857 | void amdgpu_ttm_late_init(struct amdgpu_device *adev) |
| 1858 | { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1859 | /* return the VGA stolen memory (if any) back to VRAM */ |
Andrey Grodzovsky | 6f752ec | 2018-04-06 14:54:10 -0500 | [diff] [blame] | 1860 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
| 1861 | } |
| 1862 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1863 | /** |
| 1864 | * amdgpu_ttm_fini - De-initialize the TTM memory pools |
| 1865 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1866 | void amdgpu_ttm_fini(struct amdgpu_device *adev) |
| 1867 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1868 | if (!adev->mman.initialized) |
| 1869 | return; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1870 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1871 | amdgpu_ttm_debugfs_fini(adev); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1872 | amdgpu_ttm_fw_reserve_vram_fini(adev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1873 | if (adev->mman.aper_base_kaddr) |
| 1874 | iounmap(adev->mman.aper_base_kaddr); |
| 1875 | adev->mman.aper_base_kaddr = NULL; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1876 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1877 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); |
| 1878 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1879 | if (adev->gds.mem.total_size) |
| 1880 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); |
| 1881 | if (adev->gds.gws.total_size) |
| 1882 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); |
| 1883 | if (adev->gds.oa.total_size) |
| 1884 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1885 | ttm_bo_device_release(&adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1886 | amdgpu_ttm_global_fini(adev); |
| 1887 | adev->mman.initialized = false; |
| 1888 | DRM_INFO("amdgpu: ttm finalized\n"); |
| 1889 | } |
| 1890 | |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1891 | /** |
| 1892 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions |
| 1893 | * |
| 1894 | * @adev: amdgpu_device pointer |
| 1895 | * @enable: true when we can use buffer functions. |
| 1896 | * |
| 1897 | * Enable/disable use of buffer functions during suspend/resume. This should |
| 1898 | * only be called at bootup or when userspace isn't running. |
| 1899 | */ |
| 1900 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1901 | { |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1902 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; |
| 1903 | uint64_t size; |
Christian König | b7d85e1 | 2018-07-12 14:31:25 +0200 | [diff] [blame] | 1904 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1905 | |
Christian König | b7d85e1 | 2018-07-12 14:31:25 +0200 | [diff] [blame] | 1906 | if (!adev->mman.initialized || adev->in_gpu_reset || |
| 1907 | adev->mman.buffer_funcs_enabled == enable) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1908 | return; |
| 1909 | |
Christian König | b7d85e1 | 2018-07-12 14:31:25 +0200 | [diff] [blame] | 1910 | if (enable) { |
| 1911 | struct amdgpu_ring *ring; |
| 1912 | struct drm_sched_rq *rq; |
| 1913 | |
| 1914 | ring = adev->mman.buffer_funcs_ring; |
| 1915 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; |
Nayan Deshmukh | aa16b6c | 2018-07-13 15:21:14 +0530 | [diff] [blame] | 1916 | r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); |
Christian König | b7d85e1 | 2018-07-12 14:31:25 +0200 | [diff] [blame] | 1917 | if (r) { |
| 1918 | DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", |
| 1919 | r); |
| 1920 | return; |
| 1921 | } |
| 1922 | } else { |
Nayan Deshmukh | cdc5017 | 2018-07-20 17:51:05 +0530 | [diff] [blame] | 1923 | drm_sched_entity_destroy(&adev->mman.entity); |
Andrey Grodzovsky | 7766484 | 2018-07-20 11:42:24 -0400 | [diff] [blame] | 1924 | dma_fence_put(man->move); |
| 1925 | man->move = NULL; |
Christian König | b7d85e1 | 2018-07-12 14:31:25 +0200 | [diff] [blame] | 1926 | } |
| 1927 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1928 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1929 | if (enable) |
| 1930 | size = adev->gmc.real_vram_size; |
| 1931 | else |
| 1932 | size = adev->gmc.visible_vram_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1933 | man->size = size >> PAGE_SHIFT; |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 1934 | adev->mman.buffer_funcs_enabled = enable; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1935 | } |
| 1936 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1937 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
| 1938 | { |
| 1939 | struct drm_file *file_priv; |
| 1940 | struct amdgpu_device *adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1941 | |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1942 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1943 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1944 | |
| 1945 | file_priv = filp->private_data; |
| 1946 | adev = file_priv->minor->dev->dev_private; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1947 | if (adev == NULL) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1948 | return -EINVAL; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1949 | |
| 1950 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1951 | } |
| 1952 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1953 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 1954 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 1955 | uint64_t offset, unsigned window, |
| 1956 | struct amdgpu_ring *ring, |
| 1957 | uint64_t *addr) |
| 1958 | { |
| 1959 | struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; |
| 1960 | struct amdgpu_device *adev = ring->adev; |
| 1961 | struct ttm_tt *ttm = bo->ttm; |
| 1962 | struct amdgpu_job *job; |
| 1963 | unsigned num_dw, num_bytes; |
| 1964 | dma_addr_t *dma_address; |
| 1965 | struct dma_fence *fence; |
| 1966 | uint64_t src_addr, dst_addr; |
| 1967 | uint64_t flags; |
| 1968 | int r; |
| 1969 | |
| 1970 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < |
| 1971 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); |
| 1972 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1973 | *addr = adev->gmc.gart_start; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1974 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 1975 | AMDGPU_GPU_PAGE_SIZE; |
| 1976 | |
| 1977 | num_dw = adev->mman.buffer_funcs->copy_num_dw; |
| 1978 | while (num_dw & 0x7) |
| 1979 | num_dw++; |
| 1980 | |
| 1981 | num_bytes = num_pages * 8; |
| 1982 | |
| 1983 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); |
| 1984 | if (r) |
| 1985 | return r; |
| 1986 | |
| 1987 | src_addr = num_dw * 4; |
| 1988 | src_addr += job->ibs[0].gpu_addr; |
| 1989 | |
| 1990 | dst_addr = adev->gart.table_addr; |
| 1991 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; |
| 1992 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, |
| 1993 | dst_addr, num_bytes); |
| 1994 | |
| 1995 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 1996 | WARN_ON(job->ibs[0].length_dw > num_dw); |
| 1997 | |
| 1998 | dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; |
| 1999 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); |
| 2000 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, |
| 2001 | &job->ibs[0].ptr[num_dw]); |
| 2002 | if (r) |
| 2003 | goto error_free; |
| 2004 | |
Christian König | 0e28b10 | 2018-07-13 13:54:56 +0200 | [diff] [blame] | 2005 | r = amdgpu_job_submit(job, &adev->mman.entity, |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 2006 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); |
| 2007 | if (r) |
| 2008 | goto error_free; |
| 2009 | |
| 2010 | dma_fence_put(fence); |
| 2011 | |
| 2012 | return r; |
| 2013 | |
| 2014 | error_free: |
| 2015 | amdgpu_job_free(job); |
| 2016 | return r; |
| 2017 | } |
| 2018 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2019 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
| 2020 | uint64_t dst_offset, uint32_t byte_count, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2021 | struct reservation_object *resv, |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2022 | struct dma_fence **fence, bool direct_submit, |
| 2023 | bool vm_needs_flush) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2024 | { |
| 2025 | struct amdgpu_device *adev = ring->adev; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2026 | struct amdgpu_job *job; |
| 2027 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2028 | uint32_t max_bytes; |
| 2029 | unsigned num_loops, num_dw; |
| 2030 | unsigned i; |
| 2031 | int r; |
| 2032 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 2033 | if (direct_submit && !ring->ready) { |
| 2034 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 2035 | return -EINVAL; |
| 2036 | } |
| 2037 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2038 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
| 2039 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); |
| 2040 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; |
| 2041 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2042 | /* for IB padding */ |
| 2043 | while (num_dw & 0x7) |
| 2044 | num_dw++; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2045 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2046 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 2047 | if (r) |
Chunming Zhou | 9066b0c | 2015-08-25 15:12:26 +0800 | [diff] [blame] | 2048 | return r; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2049 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2050 | job->vm_needs_flush = vm_needs_flush; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2051 | if (resv) { |
Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 2052 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 2053 | AMDGPU_FENCE_OWNER_UNDEFINED, |
| 2054 | false); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2055 | if (r) { |
| 2056 | DRM_ERROR("sync failed (%d).\n", r); |
| 2057 | goto error_free; |
| 2058 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2059 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2060 | |
| 2061 | for (i = 0; i < num_loops; i++) { |
| 2062 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 2063 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2064 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
| 2065 | dst_offset, cur_size_in_bytes); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2066 | |
| 2067 | src_offset += cur_size_in_bytes; |
| 2068 | dst_offset += cur_size_in_bytes; |
| 2069 | byte_count -= cur_size_in_bytes; |
| 2070 | } |
| 2071 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2072 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 2073 | WARN_ON(job->ibs[0].length_dw > num_dw); |
Christian König | ee913fd | 2018-07-13 16:29:10 +0200 | [diff] [blame] | 2074 | if (direct_submit) |
| 2075 | r = amdgpu_job_submit_direct(job, ring, fence); |
| 2076 | else |
Christian König | 0e28b10 | 2018-07-13 13:54:56 +0200 | [diff] [blame] | 2077 | r = amdgpu_job_submit(job, &adev->mman.entity, |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 2078 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
Christian König | ee913fd | 2018-07-13 16:29:10 +0200 | [diff] [blame] | 2079 | if (r) |
| 2080 | goto error_free; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2081 | |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 2082 | return r; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2083 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2084 | error_free: |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2085 | amdgpu_job_free(job); |
Christian König | ee913fd | 2018-07-13 16:29:10 +0200 | [diff] [blame] | 2086 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2087 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2088 | } |
| 2089 | |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2090 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2091 | uint32_t src_data, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2092 | struct reservation_object *resv, |
| 2093 | struct dma_fence **fence) |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2094 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 2095 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2096 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2097 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
| 2098 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2099 | struct drm_mm_node *mm_node; |
| 2100 | unsigned long num_pages; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2101 | unsigned int num_loops, num_dw; |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2102 | |
| 2103 | struct amdgpu_job *job; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2104 | int r; |
| 2105 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 2106 | if (!adev->mman.buffer_funcs_enabled) { |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2107 | DRM_ERROR("Trying to clear memory with ring turned off.\n"); |
| 2108 | return -EINVAL; |
| 2109 | } |
| 2110 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2111 | if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 2112 | r = amdgpu_ttm_alloc_gart(&bo->tbo); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2113 | if (r) |
| 2114 | return r; |
| 2115 | } |
| 2116 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2117 | num_pages = bo->tbo.num_pages; |
| 2118 | mm_node = bo->tbo.mem.mm_node; |
| 2119 | num_loops = 0; |
| 2120 | while (num_pages) { |
| 2121 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 2122 | |
| 2123 | num_loops += DIV_ROUND_UP(byte_count, max_bytes); |
| 2124 | num_pages -= mm_node->size; |
| 2125 | ++mm_node; |
| 2126 | } |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2127 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2128 | |
| 2129 | /* for IB padding */ |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2130 | num_dw += 64; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2131 | |
| 2132 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 2133 | if (r) |
| 2134 | return r; |
| 2135 | |
| 2136 | if (resv) { |
| 2137 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 2138 | AMDGPU_FENCE_OWNER_UNDEFINED, false); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2139 | if (r) { |
| 2140 | DRM_ERROR("sync failed (%d).\n", r); |
| 2141 | goto error_free; |
| 2142 | } |
| 2143 | } |
| 2144 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2145 | num_pages = bo->tbo.num_pages; |
| 2146 | mm_node = bo->tbo.mem.mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2147 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2148 | while (num_pages) { |
| 2149 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 2150 | uint64_t dst_addr; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2151 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2152 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2153 | while (byte_count) { |
| 2154 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 2155 | |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2156 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, |
| 2157 | dst_addr, cur_size_in_bytes); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2158 | |
| 2159 | dst_addr += cur_size_in_bytes; |
| 2160 | byte_count -= cur_size_in_bytes; |
| 2161 | } |
| 2162 | |
| 2163 | num_pages -= mm_node->size; |
| 2164 | ++mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2165 | } |
| 2166 | |
| 2167 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 2168 | WARN_ON(job->ibs[0].length_dw > num_dw); |
Christian König | 0e28b10 | 2018-07-13 13:54:56 +0200 | [diff] [blame] | 2169 | r = amdgpu_job_submit(job, &adev->mman.entity, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2170 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2171 | if (r) |
| 2172 | goto error_free; |
| 2173 | |
| 2174 | return 0; |
| 2175 | |
| 2176 | error_free: |
| 2177 | amdgpu_job_free(job); |
| 2178 | return r; |
| 2179 | } |
| 2180 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2181 | #if defined(CONFIG_DEBUG_FS) |
| 2182 | |
| 2183 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) |
| 2184 | { |
| 2185 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 2186 | unsigned ttm_pl = *(int *)node->info_ent->data; |
| 2187 | struct drm_device *dev = node->minor->dev; |
| 2188 | struct amdgpu_device *adev = dev->dev_private; |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 2189 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 2190 | struct drm_printer p = drm_seq_file_printer(m); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2191 | |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 2192 | man->func->debug(man, &p); |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 2193 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2194 | } |
| 2195 | |
| 2196 | static int ttm_pl_vram = TTM_PL_VRAM; |
| 2197 | static int ttm_pl_tt = TTM_PL_TT; |
| 2198 | |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 2199 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2200 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, |
| 2201 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, |
| 2202 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, |
| 2203 | #ifdef CONFIG_SWIOTLB |
| 2204 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} |
| 2205 | #endif |
| 2206 | }; |
| 2207 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2208 | /** |
| 2209 | * amdgpu_ttm_vram_read - Linear read access to VRAM |
| 2210 | * |
| 2211 | * Accesses VRAM via MMIO for debugging purposes. |
| 2212 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2213 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, |
| 2214 | size_t size, loff_t *pos) |
| 2215 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 2216 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2217 | ssize_t result = 0; |
| 2218 | int r; |
| 2219 | |
| 2220 | if (size & 0x3 || *pos & 0x3) |
| 2221 | return -EINVAL; |
| 2222 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2223 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 9156e72 | 2017-05-23 11:35:22 -0400 | [diff] [blame] | 2224 | return -ENXIO; |
| 2225 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2226 | while (size) { |
| 2227 | unsigned long flags; |
| 2228 | uint32_t value; |
| 2229 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2230 | if (*pos >= adev->gmc.mc_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2231 | return result; |
| 2232 | |
| 2233 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 2234 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 2235 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 2236 | value = RREG32_NO_KIQ(mmMM_DATA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2237 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 2238 | |
| 2239 | r = put_user(value, (uint32_t *)buf); |
| 2240 | if (r) |
| 2241 | return r; |
| 2242 | |
| 2243 | result += 4; |
| 2244 | buf += 4; |
| 2245 | *pos += 4; |
| 2246 | size -= 4; |
| 2247 | } |
| 2248 | |
| 2249 | return result; |
| 2250 | } |
| 2251 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2252 | /** |
| 2253 | * amdgpu_ttm_vram_write - Linear write access to VRAM |
| 2254 | * |
| 2255 | * Accesses VRAM via MMIO for debugging purposes. |
| 2256 | */ |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2257 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
| 2258 | size_t size, loff_t *pos) |
| 2259 | { |
| 2260 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 2261 | ssize_t result = 0; |
| 2262 | int r; |
| 2263 | |
| 2264 | if (size & 0x3 || *pos & 0x3) |
| 2265 | return -EINVAL; |
| 2266 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2267 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2268 | return -ENXIO; |
| 2269 | |
| 2270 | while (size) { |
| 2271 | unsigned long flags; |
| 2272 | uint32_t value; |
| 2273 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2274 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2275 | return result; |
| 2276 | |
| 2277 | r = get_user(value, (uint32_t *)buf); |
| 2278 | if (r) |
| 2279 | return r; |
| 2280 | |
| 2281 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 2282 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 2283 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 2284 | WREG32_NO_KIQ(mmMM_DATA, value); |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2285 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 2286 | |
| 2287 | result += 4; |
| 2288 | buf += 4; |
| 2289 | *pos += 4; |
| 2290 | size -= 4; |
| 2291 | } |
| 2292 | |
| 2293 | return result; |
| 2294 | } |
| 2295 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2296 | static const struct file_operations amdgpu_ttm_vram_fops = { |
| 2297 | .owner = THIS_MODULE, |
| 2298 | .read = amdgpu_ttm_vram_read, |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2299 | .write = amdgpu_ttm_vram_write, |
| 2300 | .llseek = default_llseek, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2301 | }; |
| 2302 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2303 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 2304 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2305 | /** |
| 2306 | * amdgpu_ttm_gtt_read - Linear read access to GTT memory |
| 2307 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2308 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
| 2309 | size_t size, loff_t *pos) |
| 2310 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 2311 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2312 | ssize_t result = 0; |
| 2313 | int r; |
| 2314 | |
| 2315 | while (size) { |
| 2316 | loff_t p = *pos / PAGE_SIZE; |
| 2317 | unsigned off = *pos & ~PAGE_MASK; |
| 2318 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
| 2319 | struct page *page; |
| 2320 | void *ptr; |
| 2321 | |
| 2322 | if (p >= adev->gart.num_cpu_pages) |
| 2323 | return result; |
| 2324 | |
| 2325 | page = adev->gart.pages[p]; |
| 2326 | if (page) { |
| 2327 | ptr = kmap(page); |
| 2328 | ptr += off; |
| 2329 | |
| 2330 | r = copy_to_user(buf, ptr, cur_size); |
| 2331 | kunmap(adev->gart.pages[p]); |
| 2332 | } else |
| 2333 | r = clear_user(buf, cur_size); |
| 2334 | |
| 2335 | if (r) |
| 2336 | return -EFAULT; |
| 2337 | |
| 2338 | result += cur_size; |
| 2339 | buf += cur_size; |
| 2340 | *pos += cur_size; |
| 2341 | size -= cur_size; |
| 2342 | } |
| 2343 | |
| 2344 | return result; |
| 2345 | } |
| 2346 | |
| 2347 | static const struct file_operations amdgpu_ttm_gtt_fops = { |
| 2348 | .owner = THIS_MODULE, |
| 2349 | .read = amdgpu_ttm_gtt_read, |
| 2350 | .llseek = default_llseek |
| 2351 | }; |
| 2352 | |
| 2353 | #endif |
| 2354 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2355 | /** |
| 2356 | * amdgpu_iomem_read - Virtual read access to GPU mapped memory |
| 2357 | * |
| 2358 | * This function is used to read memory that has been mapped to the |
| 2359 | * GPU and the known addresses are not physical addresses but instead |
| 2360 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
| 2361 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2362 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, |
| 2363 | size_t size, loff_t *pos) |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2364 | { |
| 2365 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2366 | struct iommu_domain *dom; |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2367 | ssize_t result = 0; |
| 2368 | int r; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2369 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2370 | /* retrieve the IOMMU domain if any for this device */ |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2371 | dom = iommu_get_domain_for_dev(adev->dev); |
Tom St Denis | 10cfafd | 2017-09-19 11:29:04 -0400 | [diff] [blame] | 2372 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2373 | while (size) { |
| 2374 | phys_addr_t addr = *pos & PAGE_MASK; |
| 2375 | loff_t off = *pos & ~PAGE_MASK; |
| 2376 | size_t bytes = PAGE_SIZE - off; |
| 2377 | unsigned long pfn; |
| 2378 | struct page *p; |
| 2379 | void *ptr; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2380 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2381 | bytes = bytes < size ? bytes : size; |
| 2382 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2383 | /* Translate the bus address to a physical address. If |
| 2384 | * the domain is NULL it means there is no IOMMU active |
| 2385 | * and the address translation is the identity |
| 2386 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2387 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 2388 | |
| 2389 | pfn = addr >> PAGE_SHIFT; |
| 2390 | if (!pfn_valid(pfn)) |
| 2391 | return -EPERM; |
| 2392 | |
| 2393 | p = pfn_to_page(pfn); |
| 2394 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2395 | return -EPERM; |
| 2396 | |
| 2397 | ptr = kmap(p); |
Tom St Denis | 864917a | 2018-03-20 09:13:08 -0400 | [diff] [blame] | 2398 | r = copy_to_user(buf, ptr + off, bytes); |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2399 | kunmap(p); |
| 2400 | if (r) |
| 2401 | return -EFAULT; |
| 2402 | |
| 2403 | size -= bytes; |
| 2404 | *pos += bytes; |
| 2405 | result += bytes; |
| 2406 | } |
| 2407 | |
| 2408 | return result; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2409 | } |
| 2410 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2411 | /** |
| 2412 | * amdgpu_iomem_write - Virtual write access to GPU mapped memory |
| 2413 | * |
| 2414 | * This function is used to write memory that has been mapped to the |
| 2415 | * GPU and the known addresses are not physical addresses but instead |
| 2416 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
| 2417 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2418 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, |
| 2419 | size_t size, loff_t *pos) |
| 2420 | { |
| 2421 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 2422 | struct iommu_domain *dom; |
| 2423 | ssize_t result = 0; |
| 2424 | int r; |
| 2425 | |
| 2426 | dom = iommu_get_domain_for_dev(adev->dev); |
| 2427 | |
| 2428 | while (size) { |
| 2429 | phys_addr_t addr = *pos & PAGE_MASK; |
| 2430 | loff_t off = *pos & ~PAGE_MASK; |
| 2431 | size_t bytes = PAGE_SIZE - off; |
| 2432 | unsigned long pfn; |
| 2433 | struct page *p; |
| 2434 | void *ptr; |
| 2435 | |
| 2436 | bytes = bytes < size ? bytes : size; |
| 2437 | |
| 2438 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 2439 | |
| 2440 | pfn = addr >> PAGE_SHIFT; |
| 2441 | if (!pfn_valid(pfn)) |
| 2442 | return -EPERM; |
| 2443 | |
| 2444 | p = pfn_to_page(pfn); |
| 2445 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2446 | return -EPERM; |
| 2447 | |
| 2448 | ptr = kmap(p); |
Tom St Denis | 864917a | 2018-03-20 09:13:08 -0400 | [diff] [blame] | 2449 | r = copy_from_user(ptr + off, buf, bytes); |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2450 | kunmap(p); |
| 2451 | if (r) |
| 2452 | return -EFAULT; |
| 2453 | |
| 2454 | size -= bytes; |
| 2455 | *pos += bytes; |
| 2456 | result += bytes; |
| 2457 | } |
| 2458 | |
| 2459 | return result; |
| 2460 | } |
| 2461 | |
| 2462 | static const struct file_operations amdgpu_ttm_iomem_fops = { |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2463 | .owner = THIS_MODULE, |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2464 | .read = amdgpu_iomem_read, |
| 2465 | .write = amdgpu_iomem_write, |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2466 | .llseek = default_llseek |
| 2467 | }; |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2468 | |
| 2469 | static const struct { |
| 2470 | char *name; |
| 2471 | const struct file_operations *fops; |
| 2472 | int domain; |
| 2473 | } ttm_debugfs_entries[] = { |
| 2474 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, |
| 2475 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 2476 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, |
| 2477 | #endif |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2478 | { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2479 | }; |
| 2480 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2481 | #endif |
| 2482 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2483 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
| 2484 | { |
| 2485 | #if defined(CONFIG_DEBUG_FS) |
| 2486 | unsigned count; |
| 2487 | |
| 2488 | struct drm_minor *minor = adev->ddev->primary; |
| 2489 | struct dentry *ent, *root = minor->debugfs_root; |
| 2490 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2491 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { |
| 2492 | ent = debugfs_create_file( |
| 2493 | ttm_debugfs_entries[count].name, |
| 2494 | S_IFREG | S_IRUGO, root, |
| 2495 | adev, |
| 2496 | ttm_debugfs_entries[count].fops); |
| 2497 | if (IS_ERR(ent)) |
| 2498 | return PTR_ERR(ent); |
| 2499 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2500 | i_size_write(ent->d_inode, adev->gmc.mc_vram_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2501 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2502 | i_size_write(ent->d_inode, adev->gmc.gart_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2503 | adev->mman.debugfs_entries[count] = ent; |
| 2504 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2505 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2506 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); |
| 2507 | |
| 2508 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 2509 | if (!(adev->need_swiotlb && swiotlb_nr_tbl())) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2510 | --count; |
| 2511 | #endif |
| 2512 | |
| 2513 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); |
| 2514 | #else |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2515 | return 0; |
| 2516 | #endif |
| 2517 | } |
| 2518 | |
| 2519 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) |
| 2520 | { |
| 2521 | #if defined(CONFIG_DEBUG_FS) |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2522 | unsigned i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2523 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2524 | for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) |
| 2525 | debugfs_remove(adev->mman.debugfs_entries[i]); |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2526 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2527 | } |