Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
Masahiro Yamada | 248a1d6 | 2017-04-24 13:50:21 +0900 | [diff] [blame] | 32 | #include <drm/ttm/ttm_bo_api.h> |
| 33 | #include <drm/ttm/ttm_bo_driver.h> |
| 34 | #include <drm/ttm/ttm_placement.h> |
| 35 | #include <drm/ttm/ttm_module.h> |
| 36 | #include <drm/ttm/ttm_page_alloc.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 37 | #include <drm/drmP.h> |
| 38 | #include <drm/amdgpu_drm.h> |
| 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <linux/swiotlb.h> |
| 42 | #include <linux/swap.h> |
| 43 | #include <linux/pagemap.h> |
| 44 | #include <linux/debugfs.h> |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 45 | #include <linux/iommu.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 46 | #include "amdgpu.h" |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 47 | #include "amdgpu_object.h" |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 48 | #include "amdgpu_trace.h" |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame] | 49 | #include "amdgpu_amdkfd.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 50 | #include "bif/bif_4_1_d.h" |
| 51 | |
| 52 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
| 53 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 54 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 55 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 56 | uint64_t offset, unsigned window, |
| 57 | struct amdgpu_ring *ring, |
| 58 | uint64_t *addr); |
| 59 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 60 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
| 61 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
| 62 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 63 | /* |
| 64 | * Global memory. |
| 65 | */ |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * amdgpu_ttm_mem_global_init - Initialize and acquire reference to |
| 69 | * memory object |
| 70 | * |
| 71 | * @ref: Object for initialization. |
| 72 | * |
| 73 | * This is called by drm_global_item_ref() when an object is being |
| 74 | * initialized. |
| 75 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 76 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) |
| 77 | { |
| 78 | return ttm_mem_global_init(ref->object); |
| 79 | } |
| 80 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 81 | /** |
| 82 | * amdgpu_ttm_mem_global_release - Drop reference to a memory object |
| 83 | * |
| 84 | * @ref: Object being removed |
| 85 | * |
| 86 | * This is called by drm_global_item_unref() when an object is being |
| 87 | * released. |
| 88 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 89 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) |
| 90 | { |
| 91 | ttm_mem_global_release(ref->object); |
| 92 | } |
| 93 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 94 | /** |
| 95 | * amdgpu_ttm_global_init - Initialize global TTM memory reference |
| 96 | * structures. |
| 97 | * |
| 98 | * @adev: AMDGPU device for which the global structures need to be |
| 99 | * registered. |
| 100 | * |
| 101 | * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() |
| 102 | * during bring up. |
| 103 | */ |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 104 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 105 | { |
| 106 | struct drm_global_reference *global_ref; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 107 | struct amdgpu_ring *ring; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 108 | struct drm_sched_rq *rq; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 109 | int r; |
| 110 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 111 | /* ensure reference is false in case init fails */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 112 | adev->mman.mem_global_referenced = false; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 113 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 114 | global_ref = &adev->mman.mem_global_ref; |
| 115 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
| 116 | global_ref->size = sizeof(struct ttm_mem_global); |
| 117 | global_ref->init = &amdgpu_ttm_mem_global_init; |
| 118 | global_ref->release = &amdgpu_ttm_mem_global_release; |
| 119 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 120 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 121 | DRM_ERROR("Failed setting up TTM memory accounting " |
| 122 | "subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 123 | goto error_mem; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | adev->mman.bo_global_ref.mem_glob = |
| 127 | adev->mman.mem_global_ref.object; |
| 128 | global_ref = &adev->mman.bo_global_ref.ref; |
| 129 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
| 130 | global_ref->size = sizeof(struct ttm_bo_global); |
| 131 | global_ref->init = &ttm_bo_global_init; |
| 132 | global_ref->release = &ttm_bo_global_release; |
| 133 | r = drm_global_item_ref(global_ref); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 134 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 135 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 136 | goto error_bo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 137 | } |
| 138 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 139 | mutex_init(&adev->mman.gtt_window_lock); |
| 140 | |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 141 | ring = adev->mman.buffer_funcs_ring; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 142 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; |
| 143 | r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, |
Nayan Deshmukh | 8344c53 | 2018-03-29 22:36:32 +0530 | [diff] [blame] | 144 | rq, NULL); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 145 | if (r) { |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 146 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 147 | goto error_entity; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 148 | } |
| 149 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 150 | adev->mman.mem_global_referenced = true; |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 151 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 152 | return 0; |
Huang Rui | e9d035e | 2016-09-07 20:55:42 +0800 | [diff] [blame] | 153 | |
| 154 | error_entity: |
| 155 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
| 156 | error_bo: |
| 157 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 158 | error_mem: |
| 159 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) |
| 163 | { |
| 164 | if (adev->mman.mem_global_referenced) { |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 165 | drm_sched_entity_fini(adev->mman.entity.sched, |
Christian König | 703297c | 2016-02-10 14:20:50 +0100 | [diff] [blame] | 166 | &adev->mman.entity); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 167 | mutex_destroy(&adev->mman.gtt_window_lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 168 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
| 169 | drm_global_item_unref(&adev->mman.mem_global_ref); |
| 170 | adev->mman.mem_global_referenced = false; |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 175 | { |
| 176 | return 0; |
| 177 | } |
| 178 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 179 | /** |
| 180 | * amdgpu_init_mem_type - Initialize a memory manager for a specific |
| 181 | * type of memory request. |
| 182 | * |
| 183 | * @bdev: The TTM BO device object (contains a reference to |
| 184 | * amdgpu_device) |
| 185 | * @type: The type of memory requested |
| 186 | * @man: |
| 187 | * |
| 188 | * This is called by ttm_bo_init_mm() when a buffer object is being |
| 189 | * initialized. |
| 190 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 191 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 192 | struct ttm_mem_type_manager *man) |
| 193 | { |
| 194 | struct amdgpu_device *adev; |
| 195 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 196 | adev = amdgpu_ttm_adev(bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 197 | |
| 198 | switch (type) { |
| 199 | case TTM_PL_SYSTEM: |
| 200 | /* System memory */ |
| 201 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 202 | man->available_caching = TTM_PL_MASK_CACHING; |
| 203 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 204 | break; |
| 205 | case TTM_PL_TT: |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 206 | /* GTT memory */ |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 207 | man->func = &amdgpu_gtt_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 208 | man->gpu_offset = adev->gmc.gart_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 209 | man->available_caching = TTM_PL_MASK_CACHING; |
| 210 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 211 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
| 212 | break; |
| 213 | case TTM_PL_VRAM: |
| 214 | /* "On-card" video ram */ |
Christian König | 6a7f76e | 2016-08-24 15:51:49 +0200 | [diff] [blame] | 215 | man->func = &amdgpu_vram_mgr_func; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 216 | man->gpu_offset = adev->gmc.vram_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 217 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 218 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 219 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
| 220 | man->default_caching = TTM_PL_FLAG_WC; |
| 221 | break; |
| 222 | case AMDGPU_PL_GDS: |
| 223 | case AMDGPU_PL_GWS: |
| 224 | case AMDGPU_PL_OA: |
| 225 | /* On-chip GDS memory*/ |
| 226 | man->func = &ttm_bo_manager_func; |
| 227 | man->gpu_offset = 0; |
| 228 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; |
| 229 | man->available_caching = TTM_PL_FLAG_UNCACHED; |
| 230 | man->default_caching = TTM_PL_FLAG_UNCACHED; |
| 231 | break; |
| 232 | default: |
| 233 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
| 234 | return -EINVAL; |
| 235 | } |
| 236 | return 0; |
| 237 | } |
| 238 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 239 | /** |
| 240 | * amdgpu_evict_flags - Compute placement flags |
| 241 | * |
| 242 | * @bo: The buffer object to evict |
| 243 | * @placement: Possible destination(s) for evicted BO |
| 244 | * |
| 245 | * Fill in placement data when ttm_bo_evict() is called |
| 246 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 247 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
| 248 | struct ttm_placement *placement) |
| 249 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 250 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 251 | struct amdgpu_bo *abo; |
Arvind Yadav | 1aaa560 | 2017-07-02 14:43:58 +0530 | [diff] [blame] | 252 | static const struct ttm_place placements = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 253 | .fpfn = 0, |
| 254 | .lpfn = 0, |
| 255 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM |
| 256 | }; |
| 257 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 258 | /* Don't handle scatter gather BOs */ |
Christian König | 82dee24 | 2018-02-20 19:09:56 +0100 | [diff] [blame] | 259 | if (bo->type == ttm_bo_type_sg) { |
| 260 | placement->num_placement = 0; |
| 261 | placement->num_busy_placement = 0; |
| 262 | return; |
| 263 | } |
| 264 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 265 | /* Object isn't an AMDGPU object so ignore */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 266 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { |
| 267 | placement->placement = &placements; |
| 268 | placement->busy_placement = &placements; |
| 269 | placement->num_placement = 1; |
| 270 | placement->num_busy_placement = 1; |
| 271 | return; |
| 272 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 273 | |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 274 | abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 275 | switch (bo->mem.mem_type) { |
| 276 | case TTM_PL_VRAM: |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 277 | if (!adev->mman.buffer_funcs_enabled) { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 278 | /* Move to system memory */ |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 279 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Andrey Grodzovsky | c8c5e56 | 2018-06-12 14:28:20 -0400 | [diff] [blame^] | 280 | } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
Christian König | 5422a28 | 2018-04-05 16:42:03 +0200 | [diff] [blame] | 281 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
| 282 | amdgpu_bo_in_cpu_visible_vram(abo)) { |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 283 | |
| 284 | /* Try evicting to the CPU inaccessible part of VRAM |
| 285 | * first, but only set GTT as busy placement, so this |
| 286 | * BO will be evicted to GTT rather than causing other |
| 287 | * BOs to be evicted from VRAM |
| 288 | */ |
| 289 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
| 290 | AMDGPU_GEM_DOMAIN_GTT); |
Christian König | 5422a28 | 2018-04-05 16:42:03 +0200 | [diff] [blame] | 291 | abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
Michel Dänzer | cb2dd1a | 2017-07-04 17:16:42 +0900 | [diff] [blame] | 292 | abo->placements[0].lpfn = 0; |
| 293 | abo->placement.busy_placement = &abo->placements[1]; |
| 294 | abo->placement.num_busy_placement = 1; |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 295 | } else { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 296 | /* Move to GTT memory */ |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 297 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
Christian König | 08291c5 | 2016-09-12 16:06:18 +0200 | [diff] [blame] | 298 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 299 | break; |
| 300 | case TTM_PL_TT: |
| 301 | default: |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 302 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 303 | } |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 304 | *placement = abo->placement; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 305 | } |
| 306 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 307 | /** |
| 308 | * amdgpu_verify_access - Verify access for a mmap call |
| 309 | * |
| 310 | * @bo: The buffer object to map |
| 311 | * @filp: The file pointer from the process performing the mmap |
| 312 | * |
| 313 | * This is called by ttm_bo_mmap() to verify whether a process |
| 314 | * has the right to mmap a BO to their process space. |
| 315 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 316 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 317 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 318 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 319 | |
Felix Kuehling | a46a2cd | 2018-02-06 20:32:38 -0500 | [diff] [blame] | 320 | /* |
| 321 | * Don't verify access for KFD BOs. They don't have a GEM |
| 322 | * object associated with them. |
| 323 | */ |
| 324 | if (abo->kfd_bo) |
| 325 | return 0; |
| 326 | |
Jérôme Glisse | 054892e | 2016-04-19 09:07:51 -0400 | [diff] [blame] | 327 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
| 328 | return -EPERM; |
Dave Airlie | 28a3965 | 2016-09-30 13:18:26 +1000 | [diff] [blame] | 329 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 330 | filp->private_data); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 331 | } |
| 332 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 333 | /** |
| 334 | * amdgpu_move_null - Register memory for a buffer object |
| 335 | * |
| 336 | * @bo: The bo to assign the memory to |
| 337 | * @new_mem: The memory to be assigned. |
| 338 | * |
| 339 | * Assign the memory from new_mem to the memory of the buffer object |
| 340 | * bo. |
| 341 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 342 | static void amdgpu_move_null(struct ttm_buffer_object *bo, |
| 343 | struct ttm_mem_reg *new_mem) |
| 344 | { |
| 345 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 346 | |
| 347 | BUG_ON(old_mem->mm_node != NULL); |
| 348 | *old_mem = *new_mem; |
| 349 | new_mem->mm_node = NULL; |
| 350 | } |
| 351 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 352 | /** |
| 353 | * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT |
| 354 | * buffer. |
| 355 | */ |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 356 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
| 357 | struct drm_mm_node *mm_node, |
| 358 | struct ttm_mem_reg *mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 359 | { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 360 | uint64_t addr = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 361 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 362 | if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 363 | addr = mm_node->start << PAGE_SHIFT; |
| 364 | addr += bo->bdev->man[mem->mem_type].gpu_offset; |
| 365 | } |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 366 | return addr; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 367 | } |
| 368 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 369 | /** |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 370 | * amdgpu_find_mm_node - Helper function finds the drm_mm_node |
| 371 | * corresponding to @offset. It also modifies |
| 372 | * the offset to be within the drm_mm_node |
| 373 | * returned |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 374 | */ |
| 375 | static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, |
| 376 | unsigned long *offset) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 377 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 378 | struct drm_mm_node *mm_node = mem->mm_node; |
| 379 | |
| 380 | while (*offset >= (mm_node->size << PAGE_SHIFT)) { |
| 381 | *offset -= (mm_node->size << PAGE_SHIFT); |
| 382 | ++mm_node; |
| 383 | } |
| 384 | return mm_node; |
| 385 | } |
| 386 | |
| 387 | /** |
| 388 | * amdgpu_copy_ttm_mem_to_mem - Helper function for copy |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 389 | * |
| 390 | * The function copies @size bytes from {src->mem + src->offset} to |
| 391 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a |
| 392 | * move and different for a BO to BO copy. |
| 393 | * |
| 394 | * @f: Returns the last fence if multiple jobs are submitted. |
| 395 | */ |
| 396 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, |
| 397 | struct amdgpu_copy_mem *src, |
| 398 | struct amdgpu_copy_mem *dst, |
| 399 | uint64_t size, |
| 400 | struct reservation_object *resv, |
| 401 | struct dma_fence **f) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 402 | { |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 403 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 404 | struct drm_mm_node *src_mm, *dst_mm; |
| 405 | uint64_t src_node_start, dst_node_start, src_node_size, |
| 406 | dst_node_size, src_page_offset, dst_page_offset; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 407 | struct dma_fence *fence = NULL; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 408 | int r = 0; |
| 409 | const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 410 | AMDGPU_GPU_PAGE_SIZE); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 411 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 412 | if (!adev->mman.buffer_funcs_enabled) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 413 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 414 | return -EINVAL; |
| 415 | } |
| 416 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 417 | src_mm = amdgpu_find_mm_node(src->mem, &src->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 418 | src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + |
| 419 | src->offset; |
| 420 | src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; |
| 421 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 422 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 423 | dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 424 | dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + |
| 425 | dst->offset; |
| 426 | dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; |
| 427 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 428 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 429 | mutex_lock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 430 | |
| 431 | while (size) { |
| 432 | unsigned long cur_size; |
| 433 | uint64_t from = src_node_start, to = dst_node_start; |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 434 | struct dma_fence *next; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 435 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 436 | /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst |
| 437 | * begins at an offset, then adjust the size accordingly |
| 438 | */ |
| 439 | cur_size = min3(min(src_node_size, dst_node_size), size, |
| 440 | GTT_MAX_BYTES); |
| 441 | if (cur_size + src_page_offset > GTT_MAX_BYTES || |
| 442 | cur_size + dst_page_offset > GTT_MAX_BYTES) |
| 443 | cur_size -= max(src_page_offset, dst_page_offset); |
| 444 | |
| 445 | /* Map only what needs to be accessed. Map src to window 0 and |
| 446 | * dst to window 1 |
| 447 | */ |
| 448 | if (src->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 449 | !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 450 | r = amdgpu_map_buffer(src->bo, src->mem, |
| 451 | PFN_UP(cur_size + src_page_offset), |
| 452 | src_node_start, 0, ring, |
| 453 | &from); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 454 | if (r) |
| 455 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 456 | /* Adjust the offset because amdgpu_map_buffer returns |
| 457 | * start of mapped page |
| 458 | */ |
| 459 | from += src_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 460 | } |
| 461 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 462 | if (dst->mem->mem_type == TTM_PL_TT && |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 463 | !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 464 | r = amdgpu_map_buffer(dst->bo, dst->mem, |
| 465 | PFN_UP(cur_size + dst_page_offset), |
| 466 | dst_node_start, 1, ring, |
| 467 | &to); |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 468 | if (r) |
| 469 | goto error; |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 470 | to += dst_page_offset; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 471 | } |
| 472 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 473 | r = amdgpu_copy_buffer(ring, from, to, cur_size, |
| 474 | resv, &next, false, true); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 475 | if (r) |
| 476 | goto error; |
| 477 | |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 478 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 479 | fence = next; |
| 480 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 481 | size -= cur_size; |
| 482 | if (!size) |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 483 | break; |
| 484 | |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 485 | src_node_size -= cur_size; |
| 486 | if (!src_node_size) { |
| 487 | src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, |
| 488 | src->mem); |
| 489 | src_node_size = (src_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 490 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 491 | src_node_start += cur_size; |
| 492 | src_page_offset = src_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 493 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 494 | dst_node_size -= cur_size; |
| 495 | if (!dst_node_size) { |
| 496 | dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, |
| 497 | dst->mem); |
| 498 | dst_node_size = (dst_mm->size << PAGE_SHIFT); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 499 | } else { |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 500 | dst_node_start += cur_size; |
| 501 | dst_page_offset = dst_node_start & (PAGE_SIZE - 1); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 502 | } |
| 503 | } |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 504 | error: |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 505 | mutex_unlock(&adev->mman.gtt_window_lock); |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 506 | if (f) |
| 507 | *f = dma_fence_get(fence); |
| 508 | dma_fence_put(fence); |
| 509 | return r; |
| 510 | } |
| 511 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 512 | /** |
| 513 | * amdgpu_move_blit - Copy an entire buffer to another buffer |
| 514 | * |
| 515 | * This is a helper called by amdgpu_bo_move() and |
| 516 | * amdgpu_move_vram_ram() to help move buffers to and from VRAM. |
| 517 | */ |
Harish Kasiviswanathan | 1eca5a5 | 2017-10-03 15:41:56 -0400 | [diff] [blame] | 518 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
| 519 | bool evict, bool no_wait_gpu, |
| 520 | struct ttm_mem_reg *new_mem, |
| 521 | struct ttm_mem_reg *old_mem) |
| 522 | { |
| 523 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
| 524 | struct amdgpu_copy_mem src, dst; |
| 525 | struct dma_fence *fence = NULL; |
| 526 | int r; |
| 527 | |
| 528 | src.bo = bo; |
| 529 | dst.bo = bo; |
| 530 | src.mem = old_mem; |
| 531 | dst.mem = new_mem; |
| 532 | src.offset = 0; |
| 533 | dst.offset = 0; |
| 534 | |
| 535 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, |
| 536 | new_mem->num_pages << PAGE_SHIFT, |
| 537 | bo->resv, &fence); |
| 538 | if (r) |
| 539 | goto error; |
Christian König | ce64bc2 | 2016-06-15 13:44:05 +0200 | [diff] [blame] | 540 | |
| 541 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 542 | dma_fence_put(fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 543 | return r; |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 544 | |
| 545 | error: |
| 546 | if (fence) |
Dave Airlie | 220196b | 2016-10-28 11:33:52 +1000 | [diff] [blame] | 547 | dma_fence_wait(fence, false); |
| 548 | dma_fence_put(fence); |
Christian König | 8892f15 | 2016-08-17 10:46:52 +0200 | [diff] [blame] | 549 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 550 | } |
| 551 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 552 | /** |
| 553 | * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer |
| 554 | * |
| 555 | * Called by amdgpu_bo_move(). |
| 556 | */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 557 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, |
| 558 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 559 | struct ttm_mem_reg *new_mem) |
| 560 | { |
| 561 | struct amdgpu_device *adev; |
| 562 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 563 | struct ttm_mem_reg tmp_mem; |
| 564 | struct ttm_place placements; |
| 565 | struct ttm_placement placement; |
| 566 | int r; |
| 567 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 568 | adev = amdgpu_ttm_adev(bo->bdev); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 569 | |
| 570 | /* create space/pages for new_mem in GTT space */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 571 | tmp_mem = *new_mem; |
| 572 | tmp_mem.mm_node = NULL; |
| 573 | placement.num_placement = 1; |
| 574 | placement.placement = &placements; |
| 575 | placement.num_busy_placement = 1; |
| 576 | placement.busy_placement = &placements; |
| 577 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 578 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 579 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 580 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 581 | if (unlikely(r)) { |
| 582 | return r; |
| 583 | } |
| 584 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 585 | /* set caching flags */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 586 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
| 587 | if (unlikely(r)) { |
| 588 | goto out_cleanup; |
| 589 | } |
| 590 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 591 | /* Bind the memory to the GTT space */ |
Roger He | 993baf1 | 2017-12-21 17:42:51 +0800 | [diff] [blame] | 592 | r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 593 | if (unlikely(r)) { |
| 594 | goto out_cleanup; |
| 595 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 596 | |
| 597 | /* blit VRAM to GTT */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 598 | r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 599 | if (unlikely(r)) { |
| 600 | goto out_cleanup; |
| 601 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 602 | |
| 603 | /* move BO (in tmp_mem) to new_mem */ |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 604 | r = ttm_bo_move_ttm(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 605 | out_cleanup: |
| 606 | ttm_bo_mem_put(bo, &tmp_mem); |
| 607 | return r; |
| 608 | } |
| 609 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 610 | /** |
| 611 | * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM |
| 612 | * |
| 613 | * Called by amdgpu_bo_move(). |
| 614 | */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 615 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, |
| 616 | struct ttm_operation_ctx *ctx, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 617 | struct ttm_mem_reg *new_mem) |
| 618 | { |
| 619 | struct amdgpu_device *adev; |
| 620 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 621 | struct ttm_mem_reg tmp_mem; |
| 622 | struct ttm_placement placement; |
| 623 | struct ttm_place placements; |
| 624 | int r; |
| 625 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 626 | adev = amdgpu_ttm_adev(bo->bdev); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 627 | |
| 628 | /* make space in GTT for old_mem buffer */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 629 | tmp_mem = *new_mem; |
| 630 | tmp_mem.mm_node = NULL; |
| 631 | placement.num_placement = 1; |
| 632 | placement.placement = &placements; |
| 633 | placement.num_busy_placement = 1; |
| 634 | placement.busy_placement = &placements; |
| 635 | placements.fpfn = 0; |
Christian König | 5e7e839 | 2017-06-30 12:19:42 +0200 | [diff] [blame] | 636 | placements.lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 637 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 638 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 639 | if (unlikely(r)) { |
| 640 | return r; |
| 641 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 642 | |
| 643 | /* move/bind old memory to GTT space */ |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 644 | r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 645 | if (unlikely(r)) { |
| 646 | goto out_cleanup; |
| 647 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 648 | |
| 649 | /* copy to VRAM */ |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 650 | r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 651 | if (unlikely(r)) { |
| 652 | goto out_cleanup; |
| 653 | } |
| 654 | out_cleanup: |
| 655 | ttm_bo_mem_put(bo, &tmp_mem); |
| 656 | return r; |
| 657 | } |
| 658 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 659 | /** |
| 660 | * amdgpu_bo_move - Move a buffer object to a new memory location |
| 661 | * |
| 662 | * Called by ttm_bo_handle_move_mem() |
| 663 | */ |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 664 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, |
| 665 | struct ttm_operation_ctx *ctx, |
| 666 | struct ttm_mem_reg *new_mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 667 | { |
| 668 | struct amdgpu_device *adev; |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 669 | struct amdgpu_bo *abo; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 670 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 671 | int r; |
| 672 | |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 673 | /* Can't move a pinned BO */ |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 674 | abo = ttm_to_amdgpu_bo(bo); |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 675 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
| 676 | return -EINVAL; |
| 677 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 678 | adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | dbd5ed6 | 2016-06-21 16:28:14 +0200 | [diff] [blame] | 679 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 680 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
| 681 | amdgpu_move_null(bo, new_mem); |
| 682 | return 0; |
| 683 | } |
| 684 | if ((old_mem->mem_type == TTM_PL_TT && |
| 685 | new_mem->mem_type == TTM_PL_SYSTEM) || |
| 686 | (old_mem->mem_type == TTM_PL_SYSTEM && |
| 687 | new_mem->mem_type == TTM_PL_TT)) { |
| 688 | /* bind is enough */ |
| 689 | amdgpu_move_null(bo, new_mem); |
| 690 | return 0; |
| 691 | } |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 692 | |
| 693 | if (!adev->mman.buffer_funcs_enabled) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 694 | goto memcpy; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 695 | |
| 696 | if (old_mem->mem_type == TTM_PL_VRAM && |
| 697 | new_mem->mem_type == TTM_PL_SYSTEM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 698 | r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 699 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
| 700 | new_mem->mem_type == TTM_PL_VRAM) { |
Christian König | dfb8fa9 | 2017-04-26 16:44:41 +0200 | [diff] [blame] | 701 | r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 702 | } else { |
Christian König | 2823f4f | 2017-04-26 16:31:14 +0200 | [diff] [blame] | 703 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, |
| 704 | new_mem, old_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | if (r) { |
| 708 | memcpy: |
Roger He | 3e98d82 | 2017-12-08 20:19:32 +0800 | [diff] [blame] | 709 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 710 | if (r) { |
| 711 | return r; |
| 712 | } |
| 713 | } |
| 714 | |
John Brooks | 96cf827 | 2017-06-30 11:31:08 -0400 | [diff] [blame] | 715 | if (bo->type == ttm_bo_type_device && |
| 716 | new_mem->mem_type == TTM_PL_VRAM && |
| 717 | old_mem->mem_type != TTM_PL_VRAM) { |
| 718 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU |
| 719 | * accesses the BO after it's moved. |
| 720 | */ |
| 721 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
| 722 | } |
| 723 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 724 | /* update statistics */ |
| 725 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); |
| 726 | return 0; |
| 727 | } |
| 728 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 729 | /** |
| 730 | * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault |
| 731 | * |
| 732 | * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() |
| 733 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 734 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 735 | { |
| 736 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 737 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 738 | struct drm_mm_node *mm_node = mem->mm_node; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 739 | |
| 740 | mem->bus.addr = NULL; |
| 741 | mem->bus.offset = 0; |
| 742 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
| 743 | mem->bus.base = 0; |
| 744 | mem->bus.is_iomem = false; |
| 745 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 746 | return -EINVAL; |
| 747 | switch (mem->mem_type) { |
| 748 | case TTM_PL_SYSTEM: |
| 749 | /* system memory */ |
| 750 | return 0; |
| 751 | case TTM_PL_TT: |
| 752 | break; |
| 753 | case TTM_PL_VRAM: |
| 754 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 755 | /* check if it's visible */ |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 756 | if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 757 | return -EINVAL; |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 758 | /* Only physically contiguous buffers apply. In a contiguous |
| 759 | * buffer, size of the first mm_node would match the number of |
| 760 | * pages in ttm_mem_reg. |
| 761 | */ |
| 762 | if (adev->mman.aper_base_kaddr && |
| 763 | (mm_node->size == mem->num_pages)) |
| 764 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + |
| 765 | mem->bus.offset; |
| 766 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 767 | mem->bus.base = adev->gmc.aper_base; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 768 | mem->bus.is_iomem = true; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 769 | break; |
| 770 | default: |
| 771 | return -EINVAL; |
| 772 | } |
| 773 | return 0; |
| 774 | } |
| 775 | |
| 776 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 777 | { |
| 778 | } |
| 779 | |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 780 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
| 781 | unsigned long page_offset) |
| 782 | { |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 783 | struct drm_mm_node *mm; |
| 784 | unsigned long offset = (page_offset << PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 785 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 786 | mm = amdgpu_find_mm_node(&bo->mem, &offset); |
| 787 | return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + |
| 788 | (offset >> PAGE_SHIFT); |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 789 | } |
| 790 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 791 | /* |
| 792 | * TTM backend functions. |
| 793 | */ |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 794 | struct amdgpu_ttm_gup_task_list { |
| 795 | struct list_head list; |
| 796 | struct task_struct *task; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 797 | }; |
| 798 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 799 | struct amdgpu_ttm_tt { |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 800 | struct ttm_dma_tt ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 801 | u64 offset; |
| 802 | uint64_t userptr; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 803 | struct task_struct *usertask; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 804 | uint32_t userflags; |
| 805 | spinlock_t guptasklock; |
| 806 | struct list_head guptasks; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 807 | atomic_t mmu_invalidations; |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 808 | uint32_t last_set_pages; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 809 | }; |
| 810 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 811 | /** |
| 812 | * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to |
| 813 | * by a USERPTR pointer to memory |
| 814 | * |
| 815 | * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). |
| 816 | * This provides a wrapper around the get_user_pages() call to provide |
| 817 | * device accessible pages that back user memory. |
| 818 | */ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 819 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 820 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 821 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 822 | struct mm_struct *mm = gtt->usertask->mm; |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 823 | unsigned int flags = 0; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 824 | unsigned pinned = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 825 | int r; |
| 826 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 827 | if (!mm) /* Happens during process shutdown */ |
| 828 | return -ESRCH; |
| 829 | |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 830 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 831 | flags |= FOLL_WRITE; |
| 832 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 833 | down_read(&mm->mmap_sem); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 834 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 835 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 836 | /* check that we only use anonymous memory |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 837 | to prevent problems with writeback */ |
| 838 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; |
| 839 | struct vm_area_struct *vma; |
| 840 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 841 | vma = find_vma(mm, gtt->userptr); |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 842 | if (!vma || vma->vm_file || vma->vm_end < end) { |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 843 | up_read(&mm->mmap_sem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 844 | return -EPERM; |
Christian König | b72cf4f | 2017-09-03 15:22:06 +0200 | [diff] [blame] | 845 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 846 | } |
| 847 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 848 | /* loop enough times using contiguous pages of memory */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 849 | do { |
| 850 | unsigned num_pages = ttm->num_pages - pinned; |
| 851 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 852 | struct page **p = pages + pinned; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 853 | struct amdgpu_ttm_gup_task_list guptask; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 854 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 855 | guptask.task = current; |
| 856 | spin_lock(>t->guptasklock); |
| 857 | list_add(&guptask.list, >t->guptasks); |
| 858 | spin_unlock(>t->guptasklock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 859 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 860 | if (mm == current->mm) |
| 861 | r = get_user_pages(userptr, num_pages, flags, p, NULL); |
| 862 | else |
| 863 | r = get_user_pages_remote(gtt->usertask, |
| 864 | mm, userptr, num_pages, |
| 865 | flags, p, NULL, NULL); |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 866 | |
| 867 | spin_lock(>t->guptasklock); |
| 868 | list_del(&guptask.list); |
| 869 | spin_unlock(>t->guptasklock); |
| 870 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 871 | if (r < 0) |
| 872 | goto release_pages; |
| 873 | |
| 874 | pinned += r; |
| 875 | |
| 876 | } while (pinned < ttm->num_pages); |
| 877 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 878 | up_read(&mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 879 | return 0; |
| 880 | |
| 881 | release_pages: |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 882 | release_pages(pages, pinned); |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 883 | up_read(&mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 884 | return r; |
| 885 | } |
| 886 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 887 | /** |
| 888 | * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages |
| 889 | * as necessary. |
| 890 | * |
| 891 | * Called by amdgpu_cs_list_validate(). This creates the page list |
| 892 | * that backs user memory and will ultimately be mapped into the device |
| 893 | * address space. |
| 894 | */ |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 895 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 896 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 897 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 898 | unsigned i; |
| 899 | |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 900 | gtt->last_set_pages = atomic_read(>t->mmu_invalidations); |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 901 | for (i = 0; i < ttm->num_pages; ++i) { |
| 902 | if (ttm->pages[i]) |
| 903 | put_page(ttm->pages[i]); |
| 904 | |
| 905 | ttm->pages[i] = pages ? pages[i] : NULL; |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 906 | } |
| 907 | } |
| 908 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 909 | /** |
| 910 | * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty |
| 911 | * |
| 912 | * Called while unpinning userptr pages |
| 913 | */ |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 914 | void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 915 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 916 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 917 | unsigned i; |
| 918 | |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 919 | for (i = 0; i < ttm->num_pages; ++i) { |
| 920 | struct page *page = ttm->pages[i]; |
| 921 | |
| 922 | if (!page) |
| 923 | continue; |
| 924 | |
| 925 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
| 926 | set_page_dirty(page); |
| 927 | |
| 928 | mark_page_accessed(page); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 929 | } |
| 930 | } |
| 931 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 932 | /** |
| 933 | * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the |
| 934 | * user pages |
| 935 | * |
| 936 | * Called by amdgpu_ttm_backend_bind() |
| 937 | **/ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 938 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
| 939 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 940 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 941 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 942 | unsigned nents; |
| 943 | int r; |
| 944 | |
| 945 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 946 | enum dma_data_direction direction = write ? |
| 947 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 948 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 949 | /* Allocate an SG array and squash pages into it */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 950 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
| 951 | ttm->num_pages << PAGE_SHIFT, |
| 952 | GFP_KERNEL); |
| 953 | if (r) |
| 954 | goto release_sg; |
| 955 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 956 | /* Map SG to device */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 957 | r = -ENOMEM; |
| 958 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 959 | if (nents != ttm->sg->nents) |
| 960 | goto release_sg; |
| 961 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 962 | /* convert SG to linear array of pages and dma addresses */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 963 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
| 964 | gtt->ttm.dma_address, ttm->num_pages); |
| 965 | |
| 966 | return 0; |
| 967 | |
| 968 | release_sg: |
| 969 | kfree(ttm->sg); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 970 | return r; |
| 971 | } |
| 972 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 973 | /** |
| 974 | * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages |
| 975 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 976 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
| 977 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 978 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 979 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 980 | |
| 981 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 982 | enum dma_data_direction direction = write ? |
| 983 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 984 | |
| 985 | /* double check that we don't free the table twice */ |
| 986 | if (!ttm->sg->sgl) |
| 987 | return; |
| 988 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 989 | /* unmap the pages mapped to the device */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 990 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 991 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 992 | /* mark the pages as dirty */ |
Christian König | 1b0c0f9 | 2017-09-05 14:36:44 +0200 | [diff] [blame] | 993 | amdgpu_ttm_tt_mark_user_pages(ttm); |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 994 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 995 | sg_free_table(ttm->sg); |
| 996 | } |
| 997 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 998 | int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, |
| 999 | struct ttm_buffer_object *tbo, |
| 1000 | uint64_t flags) |
| 1001 | { |
| 1002 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); |
| 1003 | struct ttm_tt *ttm = tbo->ttm; |
| 1004 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1005 | int r; |
| 1006 | |
| 1007 | if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { |
| 1008 | uint64_t page_idx = 1; |
| 1009 | |
| 1010 | r = amdgpu_gart_bind(adev, gtt->offset, page_idx, |
| 1011 | ttm->pages, gtt->ttm.dma_address, flags); |
| 1012 | if (r) |
| 1013 | goto gart_bind_fail; |
| 1014 | |
| 1015 | /* Patch mtype of the second part BO */ |
| 1016 | flags &= ~AMDGPU_PTE_MTYPE_MASK; |
| 1017 | flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC); |
| 1018 | |
| 1019 | r = amdgpu_gart_bind(adev, |
| 1020 | gtt->offset + (page_idx << PAGE_SHIFT), |
| 1021 | ttm->num_pages - page_idx, |
| 1022 | &ttm->pages[page_idx], |
| 1023 | &(gtt->ttm.dma_address[page_idx]), flags); |
| 1024 | } else { |
| 1025 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
| 1026 | ttm->pages, gtt->ttm.dma_address, flags); |
| 1027 | } |
| 1028 | |
| 1029 | gart_bind_fail: |
| 1030 | if (r) |
| 1031 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 1032 | ttm->num_pages, gtt->offset); |
| 1033 | |
| 1034 | return r; |
| 1035 | } |
| 1036 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1037 | /** |
| 1038 | * amdgpu_ttm_backend_bind - Bind GTT memory |
| 1039 | * |
| 1040 | * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). |
| 1041 | * This handles binding GTT memory to the device address space. |
| 1042 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1043 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, |
| 1044 | struct ttm_mem_reg *bo_mem) |
| 1045 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1046 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1047 | struct amdgpu_ttm_tt *gtt = (void*)ttm; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1048 | uint64_t flags; |
Dan Carpenter | 2ce3f5dc | 2017-08-09 13:30:46 +0300 | [diff] [blame] | 1049 | int r = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1050 | |
Chunming Zhou | e2f784f | 2015-11-26 16:33:58 +0800 | [diff] [blame] | 1051 | if (gtt->userptr) { |
| 1052 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
| 1053 | if (r) { |
| 1054 | DRM_ERROR("failed to pin userptr\n"); |
| 1055 | return r; |
| 1056 | } |
| 1057 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1058 | if (!ttm->num_pages) { |
| 1059 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
| 1060 | ttm->num_pages, bo_mem, ttm); |
| 1061 | } |
| 1062 | |
| 1063 | if (bo_mem->mem_type == AMDGPU_PL_GDS || |
| 1064 | bo_mem->mem_type == AMDGPU_PL_GWS || |
| 1065 | bo_mem->mem_type == AMDGPU_PL_OA) |
| 1066 | return -EINVAL; |
| 1067 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1068 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
| 1069 | gtt->offset = AMDGPU_BO_INVALID_OFFSET; |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1070 | return 0; |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1071 | } |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 1072 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1073 | /* compute PTE flags relevant to this BO memory */ |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1074 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1075 | |
| 1076 | /* bind pages into GART page tables */ |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1077 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1078 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1079 | ttm->pages, gtt->ttm.dma_address, flags); |
| 1080 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1081 | if (r) |
Christian König | ac7afe6 | 2017-08-22 21:04:47 +0200 | [diff] [blame] | 1082 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
| 1083 | ttm->num_pages, gtt->offset); |
Christian König | 98a7f88 | 2017-06-30 10:41:07 +0200 | [diff] [blame] | 1084 | return r; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1085 | } |
| 1086 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1087 | /** |
| 1088 | * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object |
| 1089 | */ |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 1090 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1091 | { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1092 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 1093 | struct ttm_operation_ctx ctx = { false, false }; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1094 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1095 | struct ttm_mem_reg tmp; |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1096 | struct ttm_placement placement; |
| 1097 | struct ttm_place placements; |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1098 | uint64_t flags; |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1099 | int r; |
| 1100 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1101 | if (bo->mem.mem_type != TTM_PL_TT || |
| 1102 | amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1103 | return 0; |
| 1104 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1105 | /* allocate GTT space */ |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1106 | tmp = bo->mem; |
| 1107 | tmp.mm_node = NULL; |
| 1108 | placement.num_placement = 1; |
| 1109 | placement.placement = &placements; |
| 1110 | placement.num_busy_placement = 1; |
| 1111 | placement.busy_placement = &placements; |
| 1112 | placements.fpfn = 0; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1113 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; |
Christian König | ec8c9f8 | 2017-10-16 13:47:15 +0200 | [diff] [blame] | 1114 | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | |
| 1115 | TTM_PL_FLAG_TT; |
Christian König | bb990bb | 2016-09-09 16:32:33 +0200 | [diff] [blame] | 1116 | |
Christian König | c13c55d | 2017-04-12 15:33:00 +0200 | [diff] [blame] | 1117 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1118 | if (unlikely(r)) |
| 1119 | return r; |
| 1120 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1121 | /* compute PTE flags for this buffer object */ |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1122 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1123 | |
| 1124 | /* Bind pages */ |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1125 | gtt->offset = (u64)tmp.start << PAGE_SHIFT; |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1126 | r = amdgpu_ttm_gart_bind(adev, bo, flags); |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1127 | if (unlikely(r)) { |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1128 | ttm_bo_mem_put(bo, &tmp); |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1129 | return r; |
| 1130 | } |
Christian König | 1d00402 | 2017-08-22 16:58:07 +0200 | [diff] [blame] | 1131 | |
Christian König | 4057573 | 2017-10-26 17:54:12 +0200 | [diff] [blame] | 1132 | ttm_bo_mem_put(bo, &bo->mem); |
| 1133 | bo->mem = tmp; |
| 1134 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
| 1135 | bo->bdev->man[bo->mem.mem_type].gpu_offset; |
| 1136 | |
| 1137 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1138 | } |
| 1139 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1140 | /** |
| 1141 | * amdgpu_ttm_recover_gart - Rebind GTT pages |
| 1142 | * |
| 1143 | * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to |
| 1144 | * rebind GTT pages during a GPU reset. |
| 1145 | */ |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1146 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1147 | { |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1148 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
Monk Liu | 1d1a2cd | 2017-04-27 17:14:57 +0800 | [diff] [blame] | 1149 | uint64_t flags; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1150 | int r; |
| 1151 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1152 | if (!tbo->ttm) |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1153 | return 0; |
| 1154 | |
Yong Zhao | 959a209 | 2018-05-14 12:15:27 -0400 | [diff] [blame] | 1155 | flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); |
| 1156 | r = amdgpu_ttm_gart_bind(adev, tbo, flags); |
| 1157 | |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1158 | return r; |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 1159 | } |
| 1160 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1161 | /** |
| 1162 | * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages |
| 1163 | * |
| 1164 | * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and |
| 1165 | * ttm_tt_destroy(). |
| 1166 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1167 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
| 1168 | { |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1169 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1170 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1171 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1172 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1173 | /* if the pages have userptr pinning then clear that first */ |
Christian König | 85a4b57 | 2016-09-22 14:19:50 +0200 | [diff] [blame] | 1174 | if (gtt->userptr) |
| 1175 | amdgpu_ttm_tt_unpin_userptr(ttm); |
| 1176 | |
Christian König | 3da917b | 2017-10-27 14:17:09 +0200 | [diff] [blame] | 1177 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) |
Christian König | 78ab0a3 | 2016-09-09 15:39:08 +0200 | [diff] [blame] | 1178 | return 0; |
| 1179 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1180 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
Christian König | d9a1376 | 2018-02-28 09:35:39 +0100 | [diff] [blame] | 1181 | r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); |
Christian König | c1c7ce8 | 2017-10-16 16:50:32 +0200 | [diff] [blame] | 1182 | if (r) |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1183 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", |
| 1184 | gtt->ttm.ttm.num_pages, gtt->offset); |
Roger.He | 738f64c | 2017-05-05 13:27:10 +0800 | [diff] [blame] | 1185 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1186 | } |
| 1187 | |
| 1188 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) |
| 1189 | { |
| 1190 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1191 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1192 | if (gtt->usertask) |
| 1193 | put_task_struct(gtt->usertask); |
| 1194 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1195 | ttm_dma_tt_fini(>t->ttm); |
| 1196 | kfree(gtt); |
| 1197 | } |
| 1198 | |
| 1199 | static struct ttm_backend_func amdgpu_backend_func = { |
| 1200 | .bind = &amdgpu_ttm_backend_bind, |
| 1201 | .unbind = &amdgpu_ttm_backend_unbind, |
| 1202 | .destroy = &amdgpu_ttm_backend_destroy, |
| 1203 | }; |
| 1204 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1205 | /** |
| 1206 | * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO |
| 1207 | * |
| 1208 | * @bo: The buffer object to create a GTT ttm_tt object around |
| 1209 | * |
| 1210 | * Called by ttm_tt_create(). |
| 1211 | */ |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1212 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, |
| 1213 | uint32_t page_flags) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1214 | { |
| 1215 | struct amdgpu_device *adev; |
| 1216 | struct amdgpu_ttm_tt *gtt; |
| 1217 | |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1218 | adev = amdgpu_ttm_adev(bo->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1219 | |
| 1220 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
| 1221 | if (gtt == NULL) { |
| 1222 | return NULL; |
| 1223 | } |
| 1224 | gtt->ttm.ttm.func = &amdgpu_backend_func; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1225 | |
| 1226 | /* allocate space for the uninitialized page entries */ |
Christian König | dde5da2 | 2018-02-22 10:18:14 +0100 | [diff] [blame] | 1227 | if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1228 | kfree(gtt); |
| 1229 | return NULL; |
| 1230 | } |
| 1231 | return >t->ttm.ttm; |
| 1232 | } |
| 1233 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1234 | /** |
| 1235 | * amdgpu_ttm_tt_populate - Map GTT pages visible to the device |
| 1236 | * |
| 1237 | * Map the pages of a ttm_tt object to an address space visible |
| 1238 | * to the underlying device. |
| 1239 | */ |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1240 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, |
| 1241 | struct ttm_operation_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1242 | { |
Tom St Denis | aca8171 | 2017-07-31 09:35:24 -0400 | [diff] [blame] | 1243 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1244 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1245 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1246 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1247 | /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1248 | if (gtt && gtt->userptr) { |
Maninder Singh | 5f0b34c | 2015-06-26 13:28:50 +0530 | [diff] [blame] | 1249 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1250 | if (!ttm->sg) |
| 1251 | return -ENOMEM; |
| 1252 | |
| 1253 | ttm->page_flags |= TTM_PAGE_FLAG_SG; |
| 1254 | ttm->state = tt_unbound; |
| 1255 | return 0; |
| 1256 | } |
| 1257 | |
| 1258 | if (slave && ttm->sg) { |
| 1259 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
Christian König | e89d0d3 | 2018-02-23 16:08:51 +0100 | [diff] [blame] | 1260 | gtt->ttm.dma_address, |
| 1261 | ttm->num_pages); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1262 | ttm->state = tt_unbound; |
Tom St Denis | 79ba280 | 2017-09-18 08:10:00 -0400 | [diff] [blame] | 1263 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1264 | } |
| 1265 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1266 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1267 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1268 | return ttm_dma_populate(>t->ttm, adev->dev, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1269 | } |
| 1270 | #endif |
| 1271 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1272 | /* fall back to generic helper to populate the page array |
| 1273 | * and map them to the device */ |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 1274 | return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1275 | } |
| 1276 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1277 | /** |
| 1278 | * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays |
| 1279 | * |
| 1280 | * Unmaps pages of a ttm_tt object from the device address space and |
| 1281 | * unpopulates the page array backing it. |
| 1282 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1283 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) |
| 1284 | { |
| 1285 | struct amdgpu_device *adev; |
| 1286 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1287 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1288 | |
| 1289 | if (gtt && gtt->userptr) { |
Christian König | a216ab0 | 2017-09-02 13:21:31 +0200 | [diff] [blame] | 1290 | amdgpu_ttm_tt_set_user_pages(ttm, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1291 | kfree(ttm->sg); |
| 1292 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; |
| 1293 | return; |
| 1294 | } |
| 1295 | |
| 1296 | if (slave) |
| 1297 | return; |
| 1298 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 1299 | adev = amdgpu_ttm_adev(ttm->bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1300 | |
| 1301 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 1302 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1303 | ttm_dma_unpopulate(>t->ttm, adev->dev); |
| 1304 | return; |
| 1305 | } |
| 1306 | #endif |
| 1307 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1308 | /* fall back to generic helper to unmap and unpopulate array */ |
Tom St Denis | 7405e0d | 2017-08-18 10:05:48 -0400 | [diff] [blame] | 1309 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1310 | } |
| 1311 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1312 | /** |
| 1313 | * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt |
| 1314 | * for the current task |
| 1315 | * |
| 1316 | * @ttm: The ttm_tt object to bind this userptr object to |
| 1317 | * @addr: The address in the current tasks VM space to use |
| 1318 | * @flags: Requirements of userptr object. |
| 1319 | * |
| 1320 | * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages |
| 1321 | * to current task |
| 1322 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1323 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 1324 | uint32_t flags) |
| 1325 | { |
| 1326 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1327 | |
| 1328 | if (gtt == NULL) |
| 1329 | return -EINVAL; |
| 1330 | |
| 1331 | gtt->userptr = addr; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1332 | gtt->userflags = flags; |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1333 | |
| 1334 | if (gtt->usertask) |
| 1335 | put_task_struct(gtt->usertask); |
| 1336 | gtt->usertask = current->group_leader; |
| 1337 | get_task_struct(gtt->usertask); |
| 1338 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1339 | spin_lock_init(>t->guptasklock); |
| 1340 | INIT_LIST_HEAD(>t->guptasks); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1341 | atomic_set(>t->mmu_invalidations, 0); |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1342 | gtt->last_set_pages = 0; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1343 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1344 | return 0; |
| 1345 | } |
| 1346 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1347 | /** |
| 1348 | * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object |
| 1349 | */ |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1350 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1351 | { |
| 1352 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1353 | |
| 1354 | if (gtt == NULL) |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1355 | return NULL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1356 | |
Felix Kuehling | 0919195 | 2018-03-23 15:32:29 -0400 | [diff] [blame] | 1357 | if (gtt->usertask == NULL) |
| 1358 | return NULL; |
| 1359 | |
| 1360 | return gtt->usertask->mm; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1361 | } |
| 1362 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1363 | /** |
| 1364 | * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays |
| 1365 | * inside an address range for the |
| 1366 | * current task. |
| 1367 | * |
| 1368 | */ |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1369 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
| 1370 | unsigned long end) |
| 1371 | { |
| 1372 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1373 | struct amdgpu_ttm_gup_task_list *entry; |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1374 | unsigned long size; |
| 1375 | |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1376 | if (gtt == NULL || !gtt->userptr) |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1377 | return false; |
| 1378 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1379 | /* Return false if no part of the ttm_tt object lies within |
| 1380 | * the range |
| 1381 | */ |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1382 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; |
| 1383 | if (gtt->userptr > end || gtt->userptr + size <= start) |
| 1384 | return false; |
| 1385 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1386 | /* Search the lists of tasks that hold this mapping and see |
| 1387 | * if current is one of them. If it is return false. |
| 1388 | */ |
Christian König | 637dd3b | 2016-03-03 14:24:57 +0100 | [diff] [blame] | 1389 | spin_lock(>t->guptasklock); |
| 1390 | list_for_each_entry(entry, >t->guptasks, list) { |
| 1391 | if (entry->task == current) { |
| 1392 | spin_unlock(>t->guptasklock); |
| 1393 | return false; |
| 1394 | } |
| 1395 | } |
| 1396 | spin_unlock(>t->guptasklock); |
| 1397 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1398 | atomic_inc(>t->mmu_invalidations); |
| 1399 | |
Christian König | cc1de6e | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1400 | return true; |
| 1401 | } |
| 1402 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1403 | /** |
| 1404 | * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been |
| 1405 | * invalidated? |
| 1406 | */ |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1407 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
| 1408 | int *last_invalidated) |
| 1409 | { |
| 1410 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1411 | int prev_invalidated = *last_invalidated; |
| 1412 | |
| 1413 | *last_invalidated = atomic_read(>t->mmu_invalidations); |
| 1414 | return prev_invalidated != *last_invalidated; |
| 1415 | } |
| 1416 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1417 | /** |
| 1418 | * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this |
| 1419 | * ttm_tt object been invalidated |
| 1420 | * since the last time they've |
| 1421 | * been set? |
| 1422 | */ |
Christian König | ca666a3 | 2017-09-05 14:30:05 +0200 | [diff] [blame] | 1423 | bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) |
| 1424 | { |
| 1425 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1426 | |
| 1427 | if (gtt == NULL || !gtt->userptr) |
| 1428 | return false; |
| 1429 | |
| 1430 | return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; |
| 1431 | } |
| 1432 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1433 | /** |
| 1434 | * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? |
| 1435 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1436 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
| 1437 | { |
| 1438 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
| 1439 | |
| 1440 | if (gtt == NULL) |
| 1441 | return false; |
| 1442 | |
| 1443 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
| 1444 | } |
| 1445 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1446 | /** |
| 1447 | * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object |
| 1448 | * |
| 1449 | * @ttm: The ttm_tt object to compute the flags for |
| 1450 | * @mem: The memory registry backing this ttm_tt object |
| 1451 | */ |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1452 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1453 | struct ttm_mem_reg *mem) |
| 1454 | { |
Chunming Zhou | 6b77760 | 2016-09-21 16:19:19 +0800 | [diff] [blame] | 1455 | uint64_t flags = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1456 | |
| 1457 | if (mem && mem->mem_type != TTM_PL_SYSTEM) |
| 1458 | flags |= AMDGPU_PTE_VALID; |
| 1459 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1460 | if (mem && mem->mem_type == TTM_PL_TT) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1461 | flags |= AMDGPU_PTE_SYSTEM; |
| 1462 | |
Christian König | 6d99905 | 2015-12-04 13:32:55 +0100 | [diff] [blame] | 1463 | if (ttm->caching_state == tt_cached) |
| 1464 | flags |= AMDGPU_PTE_SNOOPED; |
| 1465 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1466 | |
Alex Xie | 4b98e0c | 2017-02-14 12:31:36 -0500 | [diff] [blame] | 1467 | flags |= adev->gart.gart_pte_flags; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1468 | flags |= AMDGPU_PTE_READABLE; |
| 1469 | |
| 1470 | if (!amdgpu_ttm_tt_is_readonly(ttm)) |
| 1471 | flags |= AMDGPU_PTE_WRITEABLE; |
| 1472 | |
| 1473 | return flags; |
| 1474 | } |
| 1475 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1476 | /** |
| 1477 | * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict |
| 1478 | * a buffer object. |
| 1479 | * |
| 1480 | * Return true if eviction is sensible. Called by |
| 1481 | * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space() |
| 1482 | * which tries to evict buffer objects until it can find space |
| 1483 | * for a new object and by ttm_bo_force_list_clean() which is |
| 1484 | * used to clean out a memory space. |
| 1485 | */ |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1486 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
| 1487 | const struct ttm_place *place) |
| 1488 | { |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1489 | unsigned long num_pages = bo->mem.num_pages; |
| 1490 | struct drm_mm_node *node = bo->mem.mm_node; |
Felix Kuehling | d8d019c | 2018-02-06 20:32:35 -0500 | [diff] [blame] | 1491 | struct reservation_object_list *flist; |
| 1492 | struct dma_fence *f; |
| 1493 | int i; |
| 1494 | |
| 1495 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
| 1496 | * If true, then return false as any KFD process needs all its BOs to |
| 1497 | * be resident to run successfully |
| 1498 | */ |
| 1499 | flist = reservation_object_get_list(bo->resv); |
| 1500 | if (flist) { |
| 1501 | for (i = 0; i < flist->shared_count; ++i) { |
| 1502 | f = rcu_dereference_protected(flist->shared[i], |
| 1503 | reservation_object_held(bo->resv)); |
| 1504 | if (amdkfd_fence_check_mm(f, current->mm)) |
| 1505 | return false; |
| 1506 | } |
| 1507 | } |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1508 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1509 | switch (bo->mem.mem_type) { |
| 1510 | case TTM_PL_TT: |
| 1511 | return true; |
| 1512 | |
| 1513 | case TTM_PL_VRAM: |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1514 | /* Check each drm MM node individually */ |
| 1515 | while (num_pages) { |
| 1516 | if (place->fpfn < (node->start + node->size) && |
| 1517 | !(place->lpfn && place->lpfn <= node->start)) |
| 1518 | return true; |
| 1519 | |
| 1520 | num_pages -= node->size; |
| 1521 | ++node; |
| 1522 | } |
Roger He | 7da2e3e | 2017-11-02 13:14:27 +0800 | [diff] [blame] | 1523 | return false; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1524 | |
Christian König | 4fcae78 | 2017-04-20 12:11:47 +0200 | [diff] [blame] | 1525 | default: |
| 1526 | break; |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1527 | } |
| 1528 | |
| 1529 | return ttm_bo_eviction_valuable(bo, place); |
| 1530 | } |
| 1531 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1532 | /** |
| 1533 | * amdgpu_ttm_access_memory - Read or Write memory that backs a |
| 1534 | * buffer object. |
| 1535 | * |
| 1536 | * @bo: The buffer object to read/write |
| 1537 | * @offset: Offset into buffer object |
| 1538 | * @buf: Secondary buffer to write/read from |
| 1539 | * @len: Length in bytes of access |
| 1540 | * @write: true if writing |
| 1541 | * |
| 1542 | * This is used to access VRAM that backs a buffer object via MMIO |
| 1543 | * access for debugging purposes. |
| 1544 | */ |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1545 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
| 1546 | unsigned long offset, |
| 1547 | void *buf, int len, int write) |
| 1548 | { |
Andres Rodriguez | b82485f | 2017-09-15 21:05:19 -0400 | [diff] [blame] | 1549 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1550 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1551 | struct drm_mm_node *nodes; |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1552 | uint32_t value = 0; |
| 1553 | int ret = 0; |
| 1554 | uint64_t pos; |
| 1555 | unsigned long flags; |
| 1556 | |
| 1557 | if (bo->mem.mem_type != TTM_PL_VRAM) |
| 1558 | return -EIO; |
| 1559 | |
Harish Kasiviswanathan | e1d5150 | 2017-10-06 17:36:35 -0400 | [diff] [blame] | 1560 | nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1561 | pos = (nodes->start << PAGE_SHIFT) + offset; |
| 1562 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1563 | while (len && pos < adev->gmc.mc_vram_size) { |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1564 | uint64_t aligned_pos = pos & ~(uint64_t)3; |
| 1565 | uint32_t bytes = 4 - (pos & 3); |
| 1566 | uint32_t shift = (pos & 3) * 8; |
| 1567 | uint32_t mask = 0xffffffff << shift; |
| 1568 | |
| 1569 | if (len < bytes) { |
| 1570 | mask &= 0xffffffff >> (bytes - len) * 8; |
| 1571 | bytes = len; |
| 1572 | } |
| 1573 | |
| 1574 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1575 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); |
| 1576 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1577 | if (!write || mask != 0xffffffff) |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1578 | value = RREG32_NO_KIQ(mmMM_DATA); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1579 | if (write) { |
| 1580 | value &= ~mask; |
| 1581 | value |= (*(uint32_t *)buf << shift) & mask; |
Tom St Denis | 97bae49 | 2017-09-14 08:57:26 -0400 | [diff] [blame] | 1582 | WREG32_NO_KIQ(mmMM_DATA, value); |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1583 | } |
| 1584 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 1585 | if (!write) { |
| 1586 | value = (value & mask) >> shift; |
| 1587 | memcpy(buf, &value, bytes); |
| 1588 | } |
| 1589 | |
| 1590 | ret += bytes; |
| 1591 | buf = (uint8_t *)buf + bytes; |
| 1592 | pos += bytes; |
| 1593 | len -= bytes; |
| 1594 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { |
| 1595 | ++nodes; |
| 1596 | pos = (nodes->start << PAGE_SHIFT); |
| 1597 | } |
| 1598 | } |
| 1599 | |
| 1600 | return ret; |
| 1601 | } |
| 1602 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1603 | static struct ttm_bo_driver amdgpu_bo_driver = { |
| 1604 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
| 1605 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
| 1606 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
| 1607 | .invalidate_caches = &amdgpu_invalidate_caches, |
| 1608 | .init_mem_type = &amdgpu_init_mem_type, |
Christian König | 9982ca6 | 2016-10-19 14:44:22 +0200 | [diff] [blame] | 1609 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1610 | .evict_flags = &amdgpu_evict_flags, |
| 1611 | .move = &amdgpu_bo_move, |
| 1612 | .verify_access = &amdgpu_verify_access, |
| 1613 | .move_notify = &amdgpu_bo_move_notify, |
| 1614 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, |
| 1615 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, |
| 1616 | .io_mem_free = &amdgpu_ttm_io_mem_free, |
Christian König | 9bbdcc0 | 2017-03-29 11:16:05 +0200 | [diff] [blame] | 1617 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
Felix Kuehling | e342610 | 2017-07-03 14:18:27 -0400 | [diff] [blame] | 1618 | .access_memory = &amdgpu_ttm_access_memory |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1619 | }; |
| 1620 | |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1621 | /* |
| 1622 | * Firmware Reservation functions |
| 1623 | */ |
| 1624 | /** |
| 1625 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram |
| 1626 | * |
| 1627 | * @adev: amdgpu_device pointer |
| 1628 | * |
| 1629 | * free fw reserved vram if it has been reserved. |
| 1630 | */ |
| 1631 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) |
| 1632 | { |
| 1633 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, |
| 1634 | NULL, &adev->fw_vram_usage.va); |
| 1635 | } |
| 1636 | |
| 1637 | /** |
| 1638 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw |
| 1639 | * |
| 1640 | * @adev: amdgpu_device pointer |
| 1641 | * |
| 1642 | * create bo vram reservation from fw. |
| 1643 | */ |
| 1644 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) |
| 1645 | { |
| 1646 | struct ttm_operation_ctx ctx = { false, false }; |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1647 | struct amdgpu_bo_param bp; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1648 | int r = 0; |
| 1649 | int i; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1650 | u64 vram_size = adev->gmc.visible_vram_size; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1651 | u64 offset = adev->fw_vram_usage.start_offset; |
| 1652 | u64 size = adev->fw_vram_usage.size; |
| 1653 | struct amdgpu_bo *bo; |
| 1654 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1655 | memset(&bp, 0, sizeof(bp)); |
| 1656 | bp.size = adev->fw_vram_usage.size; |
| 1657 | bp.byte_align = PAGE_SIZE; |
| 1658 | bp.domain = AMDGPU_GEM_DOMAIN_VRAM; |
| 1659 | bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
| 1660 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
| 1661 | bp.type = ttm_bo_type_kernel; |
| 1662 | bp.resv = NULL; |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1663 | adev->fw_vram_usage.va = NULL; |
| 1664 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1665 | |
| 1666 | if (adev->fw_vram_usage.size > 0 && |
| 1667 | adev->fw_vram_usage.size <= vram_size) { |
| 1668 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 1669 | r = amdgpu_bo_create(adev, &bp, |
Christian König | eab3de2 | 2018-03-14 14:48:17 -0500 | [diff] [blame] | 1670 | &adev->fw_vram_usage.reserved_bo); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1671 | if (r) |
| 1672 | goto error_create; |
| 1673 | |
| 1674 | r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); |
| 1675 | if (r) |
| 1676 | goto error_reserve; |
| 1677 | |
| 1678 | /* remove the original mem node and create a new one at the |
| 1679 | * request position |
| 1680 | */ |
| 1681 | bo = adev->fw_vram_usage.reserved_bo; |
| 1682 | offset = ALIGN(offset, PAGE_SIZE); |
| 1683 | for (i = 0; i < bo->placement.num_placement; ++i) { |
| 1684 | bo->placements[i].fpfn = offset >> PAGE_SHIFT; |
| 1685 | bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
| 1686 | } |
| 1687 | |
| 1688 | ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); |
| 1689 | r = ttm_bo_mem_space(&bo->tbo, &bo->placement, |
| 1690 | &bo->tbo.mem, &ctx); |
| 1691 | if (r) |
| 1692 | goto error_pin; |
| 1693 | |
| 1694 | r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, |
| 1695 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1696 | adev->fw_vram_usage.start_offset, |
| 1697 | (adev->fw_vram_usage.start_offset + |
| 1698 | adev->fw_vram_usage.size), NULL); |
| 1699 | if (r) |
| 1700 | goto error_pin; |
| 1701 | r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, |
| 1702 | &adev->fw_vram_usage.va); |
| 1703 | if (r) |
| 1704 | goto error_kmap; |
| 1705 | |
| 1706 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1707 | } |
| 1708 | return r; |
| 1709 | |
| 1710 | error_kmap: |
| 1711 | amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); |
| 1712 | error_pin: |
| 1713 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
| 1714 | error_reserve: |
| 1715 | amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); |
| 1716 | error_create: |
| 1717 | adev->fw_vram_usage.va = NULL; |
| 1718 | adev->fw_vram_usage.reserved_bo = NULL; |
| 1719 | return r; |
| 1720 | } |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1721 | /** |
| 1722 | * amdgpu_ttm_init - Init the memory management (ttm) as well as |
| 1723 | * various gtt/vram related fields. |
| 1724 | * |
| 1725 | * This initializes all of the memory space pools that the TTM layer |
| 1726 | * will need such as the GTT space (system memory mapped to the device), |
| 1727 | * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which |
| 1728 | * can be mapped per VMID. |
| 1729 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1730 | int amdgpu_ttm_init(struct amdgpu_device *adev) |
| 1731 | { |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1732 | uint64_t gtt_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1733 | int r; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1734 | u64 vis_vram_limit; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1735 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1736 | /* initialize global references for vram/gtt */ |
Alex Deucher | 70b5c5a | 2016-11-15 16:55:53 -0500 | [diff] [blame] | 1737 | r = amdgpu_ttm_global_init(adev); |
| 1738 | if (r) { |
| 1739 | return r; |
| 1740 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1741 | /* No others user of address space so set it to 0 */ |
| 1742 | r = ttm_bo_device_init(&adev->mman.bdev, |
| 1743 | adev->mman.bo_global_ref.ref.object, |
| 1744 | &amdgpu_bo_driver, |
| 1745 | adev->ddev->anon_inode->i_mapping, |
| 1746 | DRM_FILE_PAGE_OFFSET, |
| 1747 | adev->need_dma32); |
| 1748 | if (r) { |
| 1749 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 1750 | return r; |
| 1751 | } |
| 1752 | adev->mman.initialized = true; |
Andrey Grodzovsky | 7cce958 | 2018-01-16 10:06:36 -0500 | [diff] [blame] | 1753 | |
| 1754 | /* We opt to avoid OOM on system pages allocations */ |
| 1755 | adev->mman.bdev.no_retry = true; |
| 1756 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1757 | /* Initialize VRAM pool with all of VRAM divided into pages */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1758 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1759 | adev->gmc.real_vram_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1760 | if (r) { |
| 1761 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 1762 | return r; |
| 1763 | } |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1764 | |
| 1765 | /* Reduce size of CPU-visible VRAM if requested */ |
| 1766 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; |
| 1767 | if (amdgpu_vis_vram_limit > 0 && |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1768 | vis_vram_limit <= adev->gmc.visible_vram_size) |
| 1769 | adev->gmc.visible_vram_size = vis_vram_limit; |
John Brooks | 218b5dc | 2017-06-27 22:33:17 -0400 | [diff] [blame] | 1770 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1771 | /* Change the size here instead of the init above so only lpfn is affected */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1772 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1773 | #ifdef CONFIG_64BIT |
| 1774 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, |
| 1775 | adev->gmc.visible_vram_size); |
| 1776 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1777 | |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1778 | /* |
| 1779 | *The reserved vram for firmware must be pinned to the specified |
| 1780 | *place on the VRAM, so reserve it early. |
| 1781 | */ |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1782 | r = amdgpu_ttm_fw_reserve_vram_init(adev); |
Horace Chen | a05502e | 2017-09-29 14:41:57 +0800 | [diff] [blame] | 1783 | if (r) { |
| 1784 | return r; |
| 1785 | } |
| 1786 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1787 | /* allocate memory as required for VGA |
| 1788 | * This is used for VGA emulation and pre-OS scanout buffers to |
| 1789 | * avoid display artifacts while transitioning between pre-OS |
| 1790 | * and driver. */ |
Alex Deucher | ebdef28 | 2018-04-06 14:54:09 -0500 | [diff] [blame] | 1791 | if (adev->gmc.stolen_size) { |
| 1792 | r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, |
| 1793 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1794 | &adev->stolen_vga_memory, |
| 1795 | NULL, NULL); |
| 1796 | if (r) |
| 1797 | return r; |
| 1798 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1799 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1800 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1801 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1802 | /* Compute GTT size, either bsaed on 3/4th the size of RAM size |
| 1803 | * or whatever the user passed on module init */ |
Roger He | 424e2c8 | 2017-11-10 19:05:13 +0800 | [diff] [blame] | 1804 | if (amdgpu_gtt_size == -1) { |
| 1805 | struct sysinfo si; |
| 1806 | |
| 1807 | si_meminfo(&si); |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1808 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1809 | adev->gmc.mc_vram_size), |
Andrey Grodzovsky | 2456252 | 2017-12-15 12:09:16 -0500 | [diff] [blame] | 1810 | ((uint64_t)si.totalram * si.mem_unit * 3/4)); |
| 1811 | } |
| 1812 | else |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1813 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1814 | |
| 1815 | /* Initialize GTT memory pool */ |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1816 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1817 | if (r) { |
| 1818 | DRM_ERROR("Failed initializing GTT heap.\n"); |
| 1819 | return r; |
| 1820 | } |
| 1821 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", |
Christian König | 36d3837 | 2017-07-07 13:17:45 +0200 | [diff] [blame] | 1822 | (unsigned)(gtt_size / (1024 * 1024))); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1823 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1824 | /* Initialize various on-chip memory pools */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1825 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; |
| 1826 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; |
| 1827 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; |
| 1828 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; |
| 1829 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; |
| 1830 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; |
| 1831 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; |
| 1832 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; |
| 1833 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; |
| 1834 | /* GDS Memory */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1835 | if (adev->gds.mem.total_size) { |
| 1836 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, |
| 1837 | adev->gds.mem.total_size >> PAGE_SHIFT); |
| 1838 | if (r) { |
| 1839 | DRM_ERROR("Failed initializing GDS heap.\n"); |
| 1840 | return r; |
| 1841 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1842 | } |
| 1843 | |
| 1844 | /* GWS */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1845 | if (adev->gds.gws.total_size) { |
| 1846 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, |
| 1847 | adev->gds.gws.total_size >> PAGE_SHIFT); |
| 1848 | if (r) { |
| 1849 | DRM_ERROR("Failed initializing gws heap.\n"); |
| 1850 | return r; |
| 1851 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1852 | } |
| 1853 | |
| 1854 | /* OA */ |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1855 | if (adev->gds.oa.total_size) { |
| 1856 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, |
| 1857 | adev->gds.oa.total_size >> PAGE_SHIFT); |
| 1858 | if (r) { |
| 1859 | DRM_ERROR("Failed initializing oa heap.\n"); |
| 1860 | return r; |
| 1861 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1862 | } |
| 1863 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1864 | /* Register debugfs entries for amdgpu_ttm */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1865 | r = amdgpu_ttm_debugfs_init(adev); |
| 1866 | if (r) { |
| 1867 | DRM_ERROR("Failed to init debugfs\n"); |
| 1868 | return r; |
| 1869 | } |
| 1870 | return 0; |
| 1871 | } |
| 1872 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1873 | /** |
| 1874 | * amdgpu_ttm_late_init - Handle any late initialization for |
| 1875 | * amdgpu_ttm |
| 1876 | */ |
Andrey Grodzovsky | 6f752ec | 2018-04-06 14:54:10 -0500 | [diff] [blame] | 1877 | void amdgpu_ttm_late_init(struct amdgpu_device *adev) |
| 1878 | { |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1879 | /* return the VGA stolen memory (if any) back to VRAM */ |
Andrey Grodzovsky | 6f752ec | 2018-04-06 14:54:10 -0500 | [diff] [blame] | 1880 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
| 1881 | } |
| 1882 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 1883 | /** |
| 1884 | * amdgpu_ttm_fini - De-initialize the TTM memory pools |
| 1885 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1886 | void amdgpu_ttm_fini(struct amdgpu_device *adev) |
| 1887 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1888 | if (!adev->mman.initialized) |
| 1889 | return; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1890 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1891 | amdgpu_ttm_debugfs_fini(adev); |
Alex Deucher | f5ec697 | 2017-12-14 16:39:02 -0500 | [diff] [blame] | 1892 | amdgpu_ttm_fw_reserve_vram_fini(adev); |
Amber Lin | f8f4b9a | 2018-02-27 10:01:59 -0500 | [diff] [blame] | 1893 | if (adev->mman.aper_base_kaddr) |
| 1894 | iounmap(adev->mman.aper_base_kaddr); |
| 1895 | adev->mman.aper_base_kaddr = NULL; |
Monk Liu | 11c6b82 | 2017-11-13 20:41:56 +0800 | [diff] [blame] | 1896 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1897 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); |
| 1898 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); |
Alex Deucher | d2d51d8 | 2017-03-15 09:45:48 -0400 | [diff] [blame] | 1899 | if (adev->gds.mem.total_size) |
| 1900 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); |
| 1901 | if (adev->gds.gws.total_size) |
| 1902 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); |
| 1903 | if (adev->gds.oa.total_size) |
| 1904 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1905 | ttm_bo_device_release(&adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1906 | amdgpu_ttm_global_fini(adev); |
| 1907 | adev->mman.initialized = false; |
| 1908 | DRM_INFO("amdgpu: ttm finalized\n"); |
| 1909 | } |
| 1910 | |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1911 | /** |
| 1912 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions |
| 1913 | * |
| 1914 | * @adev: amdgpu_device pointer |
| 1915 | * @enable: true when we can use buffer functions. |
| 1916 | * |
| 1917 | * Enable/disable use of buffer functions during suspend/resume. This should |
| 1918 | * only be called at bootup or when userspace isn't running. |
| 1919 | */ |
| 1920 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1921 | { |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1922 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; |
| 1923 | uint64_t size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1924 | |
Christian König | 380383f | 2018-03-01 11:03:27 +0100 | [diff] [blame] | 1925 | if (!adev->mman.initialized || adev->in_gpu_reset) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1926 | return; |
| 1927 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1928 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
Christian König | 57adc4c | 2018-03-01 11:01:52 +0100 | [diff] [blame] | 1929 | if (enable) |
| 1930 | size = adev->gmc.real_vram_size; |
| 1931 | else |
| 1932 | size = adev->gmc.visible_vram_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1933 | man->size = size >> PAGE_SHIFT; |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 1934 | adev->mman.buffer_funcs_enabled = enable; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1935 | } |
| 1936 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1937 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
| 1938 | { |
| 1939 | struct drm_file *file_priv; |
| 1940 | struct amdgpu_device *adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1941 | |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1942 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1943 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1944 | |
| 1945 | file_priv = filp->private_data; |
| 1946 | adev = file_priv->minor->dev->dev_private; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1947 | if (adev == NULL) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1948 | return -EINVAL; |
Christian König | e176fe17 | 2015-05-27 10:22:47 +0200 | [diff] [blame] | 1949 | |
| 1950 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1951 | } |
| 1952 | |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1953 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
| 1954 | struct ttm_mem_reg *mem, unsigned num_pages, |
| 1955 | uint64_t offset, unsigned window, |
| 1956 | struct amdgpu_ring *ring, |
| 1957 | uint64_t *addr) |
| 1958 | { |
| 1959 | struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; |
| 1960 | struct amdgpu_device *adev = ring->adev; |
| 1961 | struct ttm_tt *ttm = bo->ttm; |
| 1962 | struct amdgpu_job *job; |
| 1963 | unsigned num_dw, num_bytes; |
| 1964 | dma_addr_t *dma_address; |
| 1965 | struct dma_fence *fence; |
| 1966 | uint64_t src_addr, dst_addr; |
| 1967 | uint64_t flags; |
| 1968 | int r; |
| 1969 | |
| 1970 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < |
| 1971 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); |
| 1972 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 1973 | *addr = adev->gmc.gart_start; |
Christian König | abca90f | 2017-06-30 11:05:54 +0200 | [diff] [blame] | 1974 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * |
| 1975 | AMDGPU_GPU_PAGE_SIZE; |
| 1976 | |
| 1977 | num_dw = adev->mman.buffer_funcs->copy_num_dw; |
| 1978 | while (num_dw & 0x7) |
| 1979 | num_dw++; |
| 1980 | |
| 1981 | num_bytes = num_pages * 8; |
| 1982 | |
| 1983 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); |
| 1984 | if (r) |
| 1985 | return r; |
| 1986 | |
| 1987 | src_addr = num_dw * 4; |
| 1988 | src_addr += job->ibs[0].gpu_addr; |
| 1989 | |
| 1990 | dst_addr = adev->gart.table_addr; |
| 1991 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; |
| 1992 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, |
| 1993 | dst_addr, num_bytes); |
| 1994 | |
| 1995 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 1996 | WARN_ON(job->ibs[0].length_dw > num_dw); |
| 1997 | |
| 1998 | dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; |
| 1999 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); |
| 2000 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, |
| 2001 | &job->ibs[0].ptr[num_dw]); |
| 2002 | if (r) |
| 2003 | goto error_free; |
| 2004 | |
| 2005 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
| 2006 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); |
| 2007 | if (r) |
| 2008 | goto error_free; |
| 2009 | |
| 2010 | dma_fence_put(fence); |
| 2011 | |
| 2012 | return r; |
| 2013 | |
| 2014 | error_free: |
| 2015 | amdgpu_job_free(job); |
| 2016 | return r; |
| 2017 | } |
| 2018 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2019 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
| 2020 | uint64_t dst_offset, uint32_t byte_count, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2021 | struct reservation_object *resv, |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2022 | struct dma_fence **fence, bool direct_submit, |
| 2023 | bool vm_needs_flush) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2024 | { |
| 2025 | struct amdgpu_device *adev = ring->adev; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2026 | struct amdgpu_job *job; |
| 2027 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2028 | uint32_t max_bytes; |
| 2029 | unsigned num_loops, num_dw; |
| 2030 | unsigned i; |
| 2031 | int r; |
| 2032 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 2033 | if (direct_submit && !ring->ready) { |
| 2034 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
| 2035 | return -EINVAL; |
| 2036 | } |
| 2037 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2038 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
| 2039 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); |
| 2040 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; |
| 2041 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2042 | /* for IB padding */ |
| 2043 | while (num_dw & 0x7) |
| 2044 | num_dw++; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2045 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2046 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 2047 | if (r) |
Chunming Zhou | 9066b0c | 2015-08-25 15:12:26 +0800 | [diff] [blame] | 2048 | return r; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2049 | |
Christian König | fc9c8f5 | 2017-06-29 11:46:15 +0200 | [diff] [blame] | 2050 | job->vm_needs_flush = vm_needs_flush; |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2051 | if (resv) { |
Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 2052 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 2053 | AMDGPU_FENCE_OWNER_UNDEFINED, |
| 2054 | false); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2055 | if (r) { |
| 2056 | DRM_ERROR("sync failed (%d).\n", r); |
| 2057 | goto error_free; |
| 2058 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2059 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2060 | |
| 2061 | for (i = 0; i < num_loops; i++) { |
| 2062 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 2063 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2064 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
| 2065 | dst_offset, cur_size_in_bytes); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2066 | |
| 2067 | src_offset += cur_size_in_bytes; |
| 2068 | dst_offset += cur_size_in_bytes; |
| 2069 | byte_count -= cur_size_in_bytes; |
| 2070 | } |
| 2071 | |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2072 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 2073 | WARN_ON(job->ibs[0].length_dw > num_dw); |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 2074 | if (direct_submit) { |
| 2075 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, |
Junwei Zhang | 50ddc75 | 2017-01-23 16:30:38 +0800 | [diff] [blame] | 2076 | NULL, fence); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 2077 | job->fence = dma_fence_get(*fence); |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 2078 | if (r) |
| 2079 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
| 2080 | amdgpu_job_free(job); |
| 2081 | } else { |
| 2082 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
| 2083 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
| 2084 | if (r) |
| 2085 | goto error_free; |
| 2086 | } |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2087 | |
Chunming Zhou | e24db98 | 2016-08-15 10:46:04 +0800 | [diff] [blame] | 2088 | return r; |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2089 | |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2090 | error_free: |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 2091 | amdgpu_job_free(job); |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2092 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2093 | } |
| 2094 | |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2095 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2096 | uint32_t src_data, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2097 | struct reservation_object *resv, |
| 2098 | struct dma_fence **fence) |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2099 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 2100 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2101 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2102 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
| 2103 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2104 | struct drm_mm_node *mm_node; |
| 2105 | unsigned long num_pages; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2106 | unsigned int num_loops, num_dw; |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2107 | |
| 2108 | struct amdgpu_job *job; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2109 | int r; |
| 2110 | |
Christian König | 81988f9 | 2018-03-01 11:09:15 +0100 | [diff] [blame] | 2111 | if (!adev->mman.buffer_funcs_enabled) { |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2112 | DRM_ERROR("Trying to clear memory with ring turned off.\n"); |
| 2113 | return -EINVAL; |
| 2114 | } |
| 2115 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2116 | if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
Christian König | c5835bb | 2017-10-27 15:43:14 +0200 | [diff] [blame] | 2117 | r = amdgpu_ttm_alloc_gart(&bo->tbo); |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2118 | if (r) |
| 2119 | return r; |
| 2120 | } |
| 2121 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2122 | num_pages = bo->tbo.num_pages; |
| 2123 | mm_node = bo->tbo.mem.mm_node; |
| 2124 | num_loops = 0; |
| 2125 | while (num_pages) { |
| 2126 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 2127 | |
| 2128 | num_loops += DIV_ROUND_UP(byte_count, max_bytes); |
| 2129 | num_pages -= mm_node->size; |
| 2130 | ++mm_node; |
| 2131 | } |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2132 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2133 | |
| 2134 | /* for IB padding */ |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2135 | num_dw += 64; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2136 | |
| 2137 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
| 2138 | if (r) |
| 2139 | return r; |
| 2140 | |
| 2141 | if (resv) { |
| 2142 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
Andres Rodriguez | 177ae09 | 2017-09-15 20:44:06 -0400 | [diff] [blame] | 2143 | AMDGPU_FENCE_OWNER_UNDEFINED, false); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2144 | if (r) { |
| 2145 | DRM_ERROR("sync failed (%d).\n", r); |
| 2146 | goto error_free; |
| 2147 | } |
| 2148 | } |
| 2149 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2150 | num_pages = bo->tbo.num_pages; |
| 2151 | mm_node = bo->tbo.mem.mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2152 | |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2153 | while (num_pages) { |
| 2154 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
| 2155 | uint64_t dst_addr; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2156 | |
Christian König | 92c60d9 | 2017-06-29 10:44:39 +0200 | [diff] [blame] | 2157 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2158 | while (byte_count) { |
| 2159 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
| 2160 | |
Christian König | 44e1bae | 2018-01-24 19:58:45 +0100 | [diff] [blame] | 2161 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, |
| 2162 | dst_addr, cur_size_in_bytes); |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2163 | |
| 2164 | dst_addr += cur_size_in_bytes; |
| 2165 | byte_count -= cur_size_in_bytes; |
| 2166 | } |
| 2167 | |
| 2168 | num_pages -= mm_node->size; |
| 2169 | ++mm_node; |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2170 | } |
| 2171 | |
| 2172 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 2173 | WARN_ON(job->ibs[0].length_dw > num_dw); |
| 2174 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
Christian König | f29224a6 | 2016-11-17 12:06:38 +0100 | [diff] [blame] | 2175 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
Flora Cui | 59b4a97 | 2016-07-19 16:48:22 +0800 | [diff] [blame] | 2176 | if (r) |
| 2177 | goto error_free; |
| 2178 | |
| 2179 | return 0; |
| 2180 | |
| 2181 | error_free: |
| 2182 | amdgpu_job_free(job); |
| 2183 | return r; |
| 2184 | } |
| 2185 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2186 | #if defined(CONFIG_DEBUG_FS) |
| 2187 | |
| 2188 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) |
| 2189 | { |
| 2190 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 2191 | unsigned ttm_pl = *(int *)node->info_ent->data; |
| 2192 | struct drm_device *dev = node->minor->dev; |
| 2193 | struct amdgpu_device *adev = dev->dev_private; |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 2194 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 2195 | struct drm_printer p = drm_seq_file_printer(m); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2196 | |
Christian König | 12d4ac5 | 2017-08-07 14:07:43 +0200 | [diff] [blame] | 2197 | man->func->debug(man, &p); |
Daniel Vetter | b5c3714 | 2016-12-29 12:09:24 +0100 | [diff] [blame] | 2198 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2199 | } |
| 2200 | |
| 2201 | static int ttm_pl_vram = TTM_PL_VRAM; |
| 2202 | static int ttm_pl_tt = TTM_PL_TT; |
| 2203 | |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 2204 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2205 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, |
| 2206 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, |
| 2207 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, |
| 2208 | #ifdef CONFIG_SWIOTLB |
| 2209 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} |
| 2210 | #endif |
| 2211 | }; |
| 2212 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2213 | /** |
| 2214 | * amdgpu_ttm_vram_read - Linear read access to VRAM |
| 2215 | * |
| 2216 | * Accesses VRAM via MMIO for debugging purposes. |
| 2217 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2218 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, |
| 2219 | size_t size, loff_t *pos) |
| 2220 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 2221 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2222 | ssize_t result = 0; |
| 2223 | int r; |
| 2224 | |
| 2225 | if (size & 0x3 || *pos & 0x3) |
| 2226 | return -EINVAL; |
| 2227 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2228 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 9156e72 | 2017-05-23 11:35:22 -0400 | [diff] [blame] | 2229 | return -ENXIO; |
| 2230 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2231 | while (size) { |
| 2232 | unsigned long flags; |
| 2233 | uint32_t value; |
| 2234 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2235 | if (*pos >= adev->gmc.mc_vram_size) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2236 | return result; |
| 2237 | |
| 2238 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 2239 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 2240 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 2241 | value = RREG32_NO_KIQ(mmMM_DATA); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2242 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 2243 | |
| 2244 | r = put_user(value, (uint32_t *)buf); |
| 2245 | if (r) |
| 2246 | return r; |
| 2247 | |
| 2248 | result += 4; |
| 2249 | buf += 4; |
| 2250 | *pos += 4; |
| 2251 | size -= 4; |
| 2252 | } |
| 2253 | |
| 2254 | return result; |
| 2255 | } |
| 2256 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2257 | /** |
| 2258 | * amdgpu_ttm_vram_write - Linear write access to VRAM |
| 2259 | * |
| 2260 | * Accesses VRAM via MMIO for debugging purposes. |
| 2261 | */ |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2262 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
| 2263 | size_t size, loff_t *pos) |
| 2264 | { |
| 2265 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 2266 | ssize_t result = 0; |
| 2267 | int r; |
| 2268 | |
| 2269 | if (size & 0x3 || *pos & 0x3) |
| 2270 | return -EINVAL; |
| 2271 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2272 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2273 | return -ENXIO; |
| 2274 | |
| 2275 | while (size) { |
| 2276 | unsigned long flags; |
| 2277 | uint32_t value; |
| 2278 | |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2279 | if (*pos >= adev->gmc.mc_vram_size) |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2280 | return result; |
| 2281 | |
| 2282 | r = get_user(value, (uint32_t *)buf); |
| 2283 | if (r) |
| 2284 | return r; |
| 2285 | |
| 2286 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); |
Tom St Denis | c3057281 | 2017-09-13 12:35:15 -0400 | [diff] [blame] | 2287 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
| 2288 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); |
| 2289 | WREG32_NO_KIQ(mmMM_DATA, value); |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2290 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
| 2291 | |
| 2292 | result += 4; |
| 2293 | buf += 4; |
| 2294 | *pos += 4; |
| 2295 | size -= 4; |
| 2296 | } |
| 2297 | |
| 2298 | return result; |
| 2299 | } |
| 2300 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2301 | static const struct file_operations amdgpu_ttm_vram_fops = { |
| 2302 | .owner = THIS_MODULE, |
| 2303 | .read = amdgpu_ttm_vram_read, |
Tom St Denis | 08cab98 | 2017-08-29 08:36:52 -0400 | [diff] [blame] | 2304 | .write = amdgpu_ttm_vram_write, |
| 2305 | .llseek = default_llseek, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2306 | }; |
| 2307 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2308 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 2309 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2310 | /** |
| 2311 | * amdgpu_ttm_gtt_read - Linear read access to GTT memory |
| 2312 | */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2313 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
| 2314 | size_t size, loff_t *pos) |
| 2315 | { |
Al Viro | 4506309 | 2016-12-04 18:24:56 -0500 | [diff] [blame] | 2316 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2317 | ssize_t result = 0; |
| 2318 | int r; |
| 2319 | |
| 2320 | while (size) { |
| 2321 | loff_t p = *pos / PAGE_SIZE; |
| 2322 | unsigned off = *pos & ~PAGE_MASK; |
| 2323 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
| 2324 | struct page *page; |
| 2325 | void *ptr; |
| 2326 | |
| 2327 | if (p >= adev->gart.num_cpu_pages) |
| 2328 | return result; |
| 2329 | |
| 2330 | page = adev->gart.pages[p]; |
| 2331 | if (page) { |
| 2332 | ptr = kmap(page); |
| 2333 | ptr += off; |
| 2334 | |
| 2335 | r = copy_to_user(buf, ptr, cur_size); |
| 2336 | kunmap(adev->gart.pages[p]); |
| 2337 | } else |
| 2338 | r = clear_user(buf, cur_size); |
| 2339 | |
| 2340 | if (r) |
| 2341 | return -EFAULT; |
| 2342 | |
| 2343 | result += cur_size; |
| 2344 | buf += cur_size; |
| 2345 | *pos += cur_size; |
| 2346 | size -= cur_size; |
| 2347 | } |
| 2348 | |
| 2349 | return result; |
| 2350 | } |
| 2351 | |
| 2352 | static const struct file_operations amdgpu_ttm_gtt_fops = { |
| 2353 | .owner = THIS_MODULE, |
| 2354 | .read = amdgpu_ttm_gtt_read, |
| 2355 | .llseek = default_llseek |
| 2356 | }; |
| 2357 | |
| 2358 | #endif |
| 2359 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2360 | /** |
| 2361 | * amdgpu_iomem_read - Virtual read access to GPU mapped memory |
| 2362 | * |
| 2363 | * This function is used to read memory that has been mapped to the |
| 2364 | * GPU and the known addresses are not physical addresses but instead |
| 2365 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
| 2366 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2367 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, |
| 2368 | size_t size, loff_t *pos) |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2369 | { |
| 2370 | struct amdgpu_device *adev = file_inode(f)->i_private; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2371 | struct iommu_domain *dom; |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2372 | ssize_t result = 0; |
| 2373 | int r; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2374 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2375 | /* retrieve the IOMMU domain if any for this device */ |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2376 | dom = iommu_get_domain_for_dev(adev->dev); |
Tom St Denis | 10cfafd | 2017-09-19 11:29:04 -0400 | [diff] [blame] | 2377 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2378 | while (size) { |
| 2379 | phys_addr_t addr = *pos & PAGE_MASK; |
| 2380 | loff_t off = *pos & ~PAGE_MASK; |
| 2381 | size_t bytes = PAGE_SIZE - off; |
| 2382 | unsigned long pfn; |
| 2383 | struct page *p; |
| 2384 | void *ptr; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2385 | |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2386 | bytes = bytes < size ? bytes : size; |
| 2387 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2388 | /* Translate the bus address to a physical address. If |
| 2389 | * the domain is NULL it means there is no IOMMU active |
| 2390 | * and the address translation is the identity |
| 2391 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2392 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 2393 | |
| 2394 | pfn = addr >> PAGE_SHIFT; |
| 2395 | if (!pfn_valid(pfn)) |
| 2396 | return -EPERM; |
| 2397 | |
| 2398 | p = pfn_to_page(pfn); |
| 2399 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2400 | return -EPERM; |
| 2401 | |
| 2402 | ptr = kmap(p); |
Tom St Denis | 864917a | 2018-03-20 09:13:08 -0400 | [diff] [blame] | 2403 | r = copy_to_user(buf, ptr + off, bytes); |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2404 | kunmap(p); |
| 2405 | if (r) |
| 2406 | return -EFAULT; |
| 2407 | |
| 2408 | size -= bytes; |
| 2409 | *pos += bytes; |
| 2410 | result += bytes; |
| 2411 | } |
| 2412 | |
| 2413 | return result; |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2414 | } |
| 2415 | |
Tom St Denis | 50da517 | 2018-05-09 14:22:29 -0400 | [diff] [blame] | 2416 | /** |
| 2417 | * amdgpu_iomem_write - Virtual write access to GPU mapped memory |
| 2418 | * |
| 2419 | * This function is used to write memory that has been mapped to the |
| 2420 | * GPU and the known addresses are not physical addresses but instead |
| 2421 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
| 2422 | */ |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2423 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, |
| 2424 | size_t size, loff_t *pos) |
| 2425 | { |
| 2426 | struct amdgpu_device *adev = file_inode(f)->i_private; |
| 2427 | struct iommu_domain *dom; |
| 2428 | ssize_t result = 0; |
| 2429 | int r; |
| 2430 | |
| 2431 | dom = iommu_get_domain_for_dev(adev->dev); |
| 2432 | |
| 2433 | while (size) { |
| 2434 | phys_addr_t addr = *pos & PAGE_MASK; |
| 2435 | loff_t off = *pos & ~PAGE_MASK; |
| 2436 | size_t bytes = PAGE_SIZE - off; |
| 2437 | unsigned long pfn; |
| 2438 | struct page *p; |
| 2439 | void *ptr; |
| 2440 | |
| 2441 | bytes = bytes < size ? bytes : size; |
| 2442 | |
| 2443 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
| 2444 | |
| 2445 | pfn = addr >> PAGE_SHIFT; |
| 2446 | if (!pfn_valid(pfn)) |
| 2447 | return -EPERM; |
| 2448 | |
| 2449 | p = pfn_to_page(pfn); |
| 2450 | if (p->mapping != adev->mman.bdev.dev_mapping) |
| 2451 | return -EPERM; |
| 2452 | |
| 2453 | ptr = kmap(p); |
Tom St Denis | 864917a | 2018-03-20 09:13:08 -0400 | [diff] [blame] | 2454 | r = copy_from_user(ptr + off, buf, bytes); |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2455 | kunmap(p); |
| 2456 | if (r) |
| 2457 | return -EFAULT; |
| 2458 | |
| 2459 | size -= bytes; |
| 2460 | *pos += bytes; |
| 2461 | result += bytes; |
| 2462 | } |
| 2463 | |
| 2464 | return result; |
| 2465 | } |
| 2466 | |
| 2467 | static const struct file_operations amdgpu_ttm_iomem_fops = { |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2468 | .owner = THIS_MODULE, |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2469 | .read = amdgpu_iomem_read, |
| 2470 | .write = amdgpu_iomem_write, |
Tom St Denis | 38290b2 | 2017-09-18 07:28:14 -0400 | [diff] [blame] | 2471 | .llseek = default_llseek |
| 2472 | }; |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2473 | |
| 2474 | static const struct { |
| 2475 | char *name; |
| 2476 | const struct file_operations *fops; |
| 2477 | int domain; |
| 2478 | } ttm_debugfs_entries[] = { |
| 2479 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, |
| 2480 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
| 2481 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, |
| 2482 | #endif |
Tom St Denis | ebb043f | 2018-02-23 09:46:23 -0500 | [diff] [blame] | 2483 | { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2484 | }; |
| 2485 | |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2486 | #endif |
| 2487 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2488 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
| 2489 | { |
| 2490 | #if defined(CONFIG_DEBUG_FS) |
| 2491 | unsigned count; |
| 2492 | |
| 2493 | struct drm_minor *minor = adev->ddev->primary; |
| 2494 | struct dentry *ent, *root = minor->debugfs_root; |
| 2495 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2496 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { |
| 2497 | ent = debugfs_create_file( |
| 2498 | ttm_debugfs_entries[count].name, |
| 2499 | S_IFREG | S_IRUGO, root, |
| 2500 | adev, |
| 2501 | ttm_debugfs_entries[count].fops); |
| 2502 | if (IS_ERR(ent)) |
| 2503 | return PTR_ERR(ent); |
| 2504 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2505 | i_size_write(ent->d_inode, adev->gmc.mc_vram_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2506 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 2507 | i_size_write(ent->d_inode, adev->gmc.gart_size); |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2508 | adev->mman.debugfs_entries[count] = ent; |
| 2509 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2510 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2511 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); |
| 2512 | |
| 2513 | #ifdef CONFIG_SWIOTLB |
Chunming Zhou | fd5fd48 | 2018-02-09 10:44:09 +0800 | [diff] [blame] | 2514 | if (!(adev->need_swiotlb && swiotlb_nr_tbl())) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2515 | --count; |
| 2516 | #endif |
| 2517 | |
| 2518 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); |
| 2519 | #else |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2520 | return 0; |
| 2521 | #endif |
| 2522 | } |
| 2523 | |
| 2524 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) |
| 2525 | { |
| 2526 | #if defined(CONFIG_DEBUG_FS) |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2527 | unsigned i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2528 | |
Tom St Denis | a40cfa0 | 2017-09-18 07:14:56 -0400 | [diff] [blame] | 2529 | for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) |
| 2530 | debugfs_remove(adev->mman.debugfs_entries[i]); |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 2531 | #endif |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 2532 | } |