blob: 5e9fd256faada5ad98f8ca4c0e8b083a52322df1 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
Masahiro Yamada248a1d62017-04-24 13:50:21 +090032#include <drm/ttm/ttm_bo_api.h>
33#include <drm/ttm/ttm_bo_driver.h>
34#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_module.h>
36#include <drm/ttm/ttm_page_alloc.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040037#include <drm/drmP.h>
38#include <drm/amdgpu_drm.h>
39#include <linux/seq_file.h>
40#include <linux/slab.h>
41#include <linux/swiotlb.h>
42#include <linux/swap.h>
43#include <linux/pagemap.h>
44#include <linux/debugfs.h>
Tom St Denis38290b22017-09-18 07:28:14 -040045#include <linux/iommu.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040046#include "amdgpu.h"
Andres Rodriguezb82485f2017-09-15 21:05:19 -040047#include "amdgpu_object.h"
Tom St Denisaca81712017-07-31 09:35:24 -040048#include "amdgpu_trace.h"
Felix Kuehlingd8d019c2018-02-06 20:32:35 -050049#include "amdgpu_amdkfd.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040050#include "bif/bif_4_1_d.h"
51
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
Christian Königabca90f2017-06-30 11:05:54 +020054static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
55 struct ttm_mem_reg *mem, unsigned num_pages,
56 uint64_t offset, unsigned window,
57 struct amdgpu_ring *ring,
58 uint64_t *addr);
59
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
61static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
62
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063/*
64 * Global memory.
65 */
66static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
67{
68 return ttm_mem_global_init(ref->object);
69}
70
71static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
72{
73 ttm_mem_global_release(ref->object);
74}
75
Alex Deucher70b5c5a2016-11-15 16:55:53 -050076static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077{
78 struct drm_global_reference *global_ref;
Christian König703297c2016-02-10 14:20:50 +010079 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010080 struct drm_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 int r;
82
83 adev->mman.mem_global_referenced = false;
84 global_ref = &adev->mman.mem_global_ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86 global_ref->size = sizeof(struct ttm_mem_global);
87 global_ref->init = &amdgpu_ttm_mem_global_init;
88 global_ref->release = &amdgpu_ttm_mem_global_release;
89 r = drm_global_item_ref(global_ref);
Huang Ruie9d035e2016-09-07 20:55:42 +080090 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091 DRM_ERROR("Failed setting up TTM memory accounting "
92 "subsystem.\n");
Huang Ruie9d035e2016-09-07 20:55:42 +080093 goto error_mem;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040094 }
95
96 adev->mman.bo_global_ref.mem_glob =
97 adev->mman.mem_global_ref.object;
98 global_ref = &adev->mman.bo_global_ref.ref;
99 global_ref->global_type = DRM_GLOBAL_TTM_BO;
100 global_ref->size = sizeof(struct ttm_bo_global);
101 global_ref->init = &ttm_bo_global_init;
102 global_ref->release = &ttm_bo_global_release;
103 r = drm_global_item_ref(global_ref);
Huang Ruie9d035e2016-09-07 20:55:42 +0800104 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
Huang Ruie9d035e2016-09-07 20:55:42 +0800106 goto error_bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 }
108
Christian Königabca90f2017-06-30 11:05:54 +0200109 mutex_init(&adev->mman.gtt_window_lock);
110
Christian König703297c2016-02-10 14:20:50 +0100111 ring = adev->mman.buffer_funcs_ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100112 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
113 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530114 rq, NULL);
Huang Ruie9d035e2016-09-07 20:55:42 +0800115 if (r) {
Christian König703297c2016-02-10 14:20:50 +0100116 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
Huang Ruie9d035e2016-09-07 20:55:42 +0800117 goto error_entity;
Christian König703297c2016-02-10 14:20:50 +0100118 }
119
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400120 adev->mman.mem_global_referenced = true;
Christian König703297c2016-02-10 14:20:50 +0100121
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122 return 0;
Huang Ruie9d035e2016-09-07 20:55:42 +0800123
124error_entity:
125 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
126error_bo:
127 drm_global_item_unref(&adev->mman.mem_global_ref);
128error_mem:
129 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130}
131
132static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
133{
134 if (adev->mman.mem_global_referenced) {
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100135 drm_sched_entity_fini(adev->mman.entity.sched,
Christian König703297c2016-02-10 14:20:50 +0100136 &adev->mman.entity);
Christian Königabca90f2017-06-30 11:05:54 +0200137 mutex_destroy(&adev->mman.gtt_window_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
139 drm_global_item_unref(&adev->mman.mem_global_ref);
140 adev->mman.mem_global_referenced = false;
141 }
142}
143
144static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
145{
146 return 0;
147}
148
149static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 struct ttm_mem_type_manager *man)
151{
152 struct amdgpu_device *adev;
153
Christian Königa7d64de2016-09-15 14:58:48 +0200154 adev = amdgpu_ttm_adev(bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155
156 switch (type) {
157 case TTM_PL_SYSTEM:
158 /* System memory */
159 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
160 man->available_caching = TTM_PL_MASK_CACHING;
161 man->default_caching = TTM_PL_FLAG_CACHED;
162 break;
163 case TTM_PL_TT:
Christian Königbb990bb2016-09-09 16:32:33 +0200164 man->func = &amdgpu_gtt_mgr_func;
Christian König770d13b2018-01-12 14:52:22 +0100165 man->gpu_offset = adev->gmc.gart_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400166 man->available_caching = TTM_PL_MASK_CACHING;
167 man->default_caching = TTM_PL_FLAG_CACHED;
168 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
169 break;
170 case TTM_PL_VRAM:
171 /* "On-card" video ram */
Christian König6a7f76e2016-08-24 15:51:49 +0200172 man->func = &amdgpu_vram_mgr_func;
Christian König770d13b2018-01-12 14:52:22 +0100173 man->gpu_offset = adev->gmc.vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174 man->flags = TTM_MEMTYPE_FLAG_FIXED |
175 TTM_MEMTYPE_FLAG_MAPPABLE;
176 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
177 man->default_caching = TTM_PL_FLAG_WC;
178 break;
179 case AMDGPU_PL_GDS:
180 case AMDGPU_PL_GWS:
181 case AMDGPU_PL_OA:
182 /* On-chip GDS memory*/
183 man->func = &ttm_bo_manager_func;
184 man->gpu_offset = 0;
185 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
186 man->available_caching = TTM_PL_FLAG_UNCACHED;
187 man->default_caching = TTM_PL_FLAG_UNCACHED;
188 break;
189 default:
190 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
191 return -EINVAL;
192 }
193 return 0;
194}
195
196static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
197 struct ttm_placement *placement)
198{
Christian Königa7d64de2016-09-15 14:58:48 +0200199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian König765e7fb2016-09-15 15:06:50 +0200200 struct amdgpu_bo *abo;
Arvind Yadav1aaa5602017-07-02 14:43:58 +0530201 static const struct ttm_place placements = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 .fpfn = 0,
203 .lpfn = 0,
204 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
205 };
206
Christian König82dee242018-02-20 19:09:56 +0100207 if (bo->type == ttm_bo_type_sg) {
208 placement->num_placement = 0;
209 placement->num_busy_placement = 0;
210 return;
211 }
212
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
214 placement->placement = &placements;
215 placement->busy_placement = &placements;
216 placement->num_placement = 1;
217 placement->num_busy_placement = 1;
218 return;
219 }
Andres Rodriguezb82485f2017-09-15 21:05:19 -0400220 abo = ttm_to_amdgpu_bo(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400221 switch (bo->mem.mem_type) {
222 case TTM_PL_VRAM:
Christian König81988f92018-03-01 11:09:15 +0100223 if (!adev->mman.buffer_funcs_enabled) {
Christian König765e7fb2016-09-15 15:06:50 +0200224 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
Christian König770d13b2018-01-12 14:52:22 +0100225 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
Christian König5422a282018-04-05 16:42:03 +0200226 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
227 amdgpu_bo_in_cpu_visible_vram(abo)) {
Michel Dänzercb2dd1a2017-07-04 17:16:42 +0900228
229 /* Try evicting to the CPU inaccessible part of VRAM
230 * first, but only set GTT as busy placement, so this
231 * BO will be evicted to GTT rather than causing other
232 * BOs to be evicted from VRAM
233 */
234 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
235 AMDGPU_GEM_DOMAIN_GTT);
Christian König5422a282018-04-05 16:42:03 +0200236 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
Michel Dänzercb2dd1a2017-07-04 17:16:42 +0900237 abo->placements[0].lpfn = 0;
238 abo->placement.busy_placement = &abo->placements[1];
239 abo->placement.num_busy_placement = 1;
Christian König08291c52016-09-12 16:06:18 +0200240 } else {
Christian König765e7fb2016-09-15 15:06:50 +0200241 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
Christian König08291c52016-09-12 16:06:18 +0200242 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400243 break;
244 case TTM_PL_TT:
245 default:
Christian König765e7fb2016-09-15 15:06:50 +0200246 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400247 }
Christian König765e7fb2016-09-15 15:06:50 +0200248 *placement = abo->placement;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400249}
250
251static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
252{
Andres Rodriguezb82485f2017-09-15 21:05:19 -0400253 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400254
Felix Kuehlinga46a2cd2018-02-06 20:32:38 -0500255 /*
256 * Don't verify access for KFD BOs. They don't have a GEM
257 * object associated with them.
258 */
259 if (abo->kfd_bo)
260 return 0;
261
Jérôme Glisse054892e2016-04-19 09:07:51 -0400262 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
263 return -EPERM;
Dave Airlie28a39652016-09-30 13:18:26 +1000264 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200265 filp->private_data);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266}
267
268static void amdgpu_move_null(struct ttm_buffer_object *bo,
269 struct ttm_mem_reg *new_mem)
270{
271 struct ttm_mem_reg *old_mem = &bo->mem;
272
273 BUG_ON(old_mem->mm_node != NULL);
274 *old_mem = *new_mem;
275 new_mem->mm_node = NULL;
276}
277
Christian König92c60d92017-06-29 10:44:39 +0200278static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
279 struct drm_mm_node *mm_node,
280 struct ttm_mem_reg *mem)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400281{
Christian Königabca90f2017-06-30 11:05:54 +0200282 uint64_t addr = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400283
Christian König3da917b2017-10-27 14:17:09 +0200284 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
Christian Königabca90f2017-06-30 11:05:54 +0200285 addr = mm_node->start << PAGE_SHIFT;
286 addr += bo->bdev->man[mem->mem_type].gpu_offset;
287 }
Christian König92c60d92017-06-29 10:44:39 +0200288 return addr;
Christian König8892f152016-08-17 10:46:52 +0200289}
290
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400291/**
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400292 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
293 * corresponding to @offset. It also modifies the offset to be
294 * within the drm_mm_node returned
295 */
296static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
297 unsigned long *offset)
Christian König8892f152016-08-17 10:46:52 +0200298{
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400299 struct drm_mm_node *mm_node = mem->mm_node;
300
301 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
302 *offset -= (mm_node->size << PAGE_SHIFT);
303 ++mm_node;
304 }
305 return mm_node;
306}
307
308/**
309 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400310 *
311 * The function copies @size bytes from {src->mem + src->offset} to
312 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
313 * move and different for a BO to BO copy.
314 *
315 * @f: Returns the last fence if multiple jobs are submitted.
316 */
317int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
318 struct amdgpu_copy_mem *src,
319 struct amdgpu_copy_mem *dst,
320 uint64_t size,
321 struct reservation_object *resv,
322 struct dma_fence **f)
Christian König8892f152016-08-17 10:46:52 +0200323{
Christian König8892f152016-08-17 10:46:52 +0200324 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400325 struct drm_mm_node *src_mm, *dst_mm;
326 uint64_t src_node_start, dst_node_start, src_node_size,
327 dst_node_size, src_page_offset, dst_page_offset;
Dave Airlie220196b2016-10-28 11:33:52 +1000328 struct dma_fence *fence = NULL;
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400329 int r = 0;
330 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
331 AMDGPU_GPU_PAGE_SIZE);
Christian König8892f152016-08-17 10:46:52 +0200332
Christian König81988f92018-03-01 11:09:15 +0100333 if (!adev->mman.buffer_funcs_enabled) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400334 DRM_ERROR("Trying to move memory with ring turned off.\n");
335 return -EINVAL;
336 }
337
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400338 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400339 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
340 src->offset;
341 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
342 src_page_offset = src_node_start & (PAGE_SIZE - 1);
Christian König92c60d92017-06-29 10:44:39 +0200343
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400344 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400345 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
346 dst->offset;
347 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
348 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
Christian König8892f152016-08-17 10:46:52 +0200349
Christian Königabca90f2017-06-30 11:05:54 +0200350 mutex_lock(&adev->mman.gtt_window_lock);
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400351
352 while (size) {
353 unsigned long cur_size;
354 uint64_t from = src_node_start, to = dst_node_start;
Dave Airlie220196b2016-10-28 11:33:52 +1000355 struct dma_fence *next;
Christian König8892f152016-08-17 10:46:52 +0200356
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400357 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
358 * begins at an offset, then adjust the size accordingly
359 */
360 cur_size = min3(min(src_node_size, dst_node_size), size,
361 GTT_MAX_BYTES);
362 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
363 cur_size + dst_page_offset > GTT_MAX_BYTES)
364 cur_size -= max(src_page_offset, dst_page_offset);
365
366 /* Map only what needs to be accessed. Map src to window 0 and
367 * dst to window 1
368 */
369 if (src->mem->mem_type == TTM_PL_TT &&
Christian König3da917b2017-10-27 14:17:09 +0200370 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400371 r = amdgpu_map_buffer(src->bo, src->mem,
372 PFN_UP(cur_size + src_page_offset),
373 src_node_start, 0, ring,
374 &from);
Christian Königabca90f2017-06-30 11:05:54 +0200375 if (r)
376 goto error;
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400377 /* Adjust the offset because amdgpu_map_buffer returns
378 * start of mapped page
379 */
380 from += src_page_offset;
Christian Königabca90f2017-06-30 11:05:54 +0200381 }
382
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400383 if (dst->mem->mem_type == TTM_PL_TT &&
Christian König3da917b2017-10-27 14:17:09 +0200384 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400385 r = amdgpu_map_buffer(dst->bo, dst->mem,
386 PFN_UP(cur_size + dst_page_offset),
387 dst_node_start, 1, ring,
388 &to);
Christian Königabca90f2017-06-30 11:05:54 +0200389 if (r)
390 goto error;
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400391 to += dst_page_offset;
Christian Königabca90f2017-06-30 11:05:54 +0200392 }
393
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400394 r = amdgpu_copy_buffer(ring, from, to, cur_size,
395 resv, &next, false, true);
Christian König8892f152016-08-17 10:46:52 +0200396 if (r)
397 goto error;
398
Dave Airlie220196b2016-10-28 11:33:52 +1000399 dma_fence_put(fence);
Christian König8892f152016-08-17 10:46:52 +0200400 fence = next;
401
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400402 size -= cur_size;
403 if (!size)
Christian König8892f152016-08-17 10:46:52 +0200404 break;
405
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400406 src_node_size -= cur_size;
407 if (!src_node_size) {
408 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
409 src->mem);
410 src_node_size = (src_mm->size << PAGE_SHIFT);
Christian König8892f152016-08-17 10:46:52 +0200411 } else {
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400412 src_node_start += cur_size;
413 src_page_offset = src_node_start & (PAGE_SIZE - 1);
Christian König8892f152016-08-17 10:46:52 +0200414 }
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400415 dst_node_size -= cur_size;
416 if (!dst_node_size) {
417 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
418 dst->mem);
419 dst_node_size = (dst_mm->size << PAGE_SHIFT);
Christian König8892f152016-08-17 10:46:52 +0200420 } else {
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400421 dst_node_start += cur_size;
422 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
Christian König8892f152016-08-17 10:46:52 +0200423 }
424 }
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400425error:
Christian Königabca90f2017-06-30 11:05:54 +0200426 mutex_unlock(&adev->mman.gtt_window_lock);
Harish Kasiviswanathan1eca5a52017-10-03 15:41:56 -0400427 if (f)
428 *f = dma_fence_get(fence);
429 dma_fence_put(fence);
430 return r;
431}
432
433
434static int amdgpu_move_blit(struct ttm_buffer_object *bo,
435 bool evict, bool no_wait_gpu,
436 struct ttm_mem_reg *new_mem,
437 struct ttm_mem_reg *old_mem)
438{
439 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
440 struct amdgpu_copy_mem src, dst;
441 struct dma_fence *fence = NULL;
442 int r;
443
444 src.bo = bo;
445 dst.bo = bo;
446 src.mem = old_mem;
447 dst.mem = new_mem;
448 src.offset = 0;
449 dst.offset = 0;
450
451 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
452 new_mem->num_pages << PAGE_SHIFT,
453 bo->resv, &fence);
454 if (r)
455 goto error;
Christian Königce64bc22016-06-15 13:44:05 +0200456
457 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100458 dma_fence_put(fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400459 return r;
Christian König8892f152016-08-17 10:46:52 +0200460
461error:
462 if (fence)
Dave Airlie220196b2016-10-28 11:33:52 +1000463 dma_fence_wait(fence, false);
464 dma_fence_put(fence);
Christian König8892f152016-08-17 10:46:52 +0200465 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400466}
467
Christian Königdfb8fa92017-04-26 16:44:41 +0200468static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
469 struct ttm_operation_ctx *ctx,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400470 struct ttm_mem_reg *new_mem)
471{
472 struct amdgpu_device *adev;
473 struct ttm_mem_reg *old_mem = &bo->mem;
474 struct ttm_mem_reg tmp_mem;
475 struct ttm_place placements;
476 struct ttm_placement placement;
477 int r;
478
Christian Königa7d64de2016-09-15 14:58:48 +0200479 adev = amdgpu_ttm_adev(bo->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400480 tmp_mem = *new_mem;
481 tmp_mem.mm_node = NULL;
482 placement.num_placement = 1;
483 placement.placement = &placements;
484 placement.num_busy_placement = 1;
485 placement.busy_placement = &placements;
486 placements.fpfn = 0;
Christian König5e7e8392017-06-30 12:19:42 +0200487 placements.lpfn = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400488 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
Christian Königdfb8fa92017-04-26 16:44:41 +0200489 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400490 if (unlikely(r)) {
491 return r;
492 }
493
494 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
495 if (unlikely(r)) {
496 goto out_cleanup;
497 }
498
Roger He993baf12017-12-21 17:42:51 +0800499 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400500 if (unlikely(r)) {
501 goto out_cleanup;
502 }
Christian Königdfb8fa92017-04-26 16:44:41 +0200503 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400504 if (unlikely(r)) {
505 goto out_cleanup;
506 }
Roger He3e98d822017-12-08 20:19:32 +0800507 r = ttm_bo_move_ttm(bo, ctx, new_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400508out_cleanup:
509 ttm_bo_mem_put(bo, &tmp_mem);
510 return r;
511}
512
Christian Königdfb8fa92017-04-26 16:44:41 +0200513static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
514 struct ttm_operation_ctx *ctx,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400515 struct ttm_mem_reg *new_mem)
516{
517 struct amdgpu_device *adev;
518 struct ttm_mem_reg *old_mem = &bo->mem;
519 struct ttm_mem_reg tmp_mem;
520 struct ttm_placement placement;
521 struct ttm_place placements;
522 int r;
523
Christian Königa7d64de2016-09-15 14:58:48 +0200524 adev = amdgpu_ttm_adev(bo->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400525 tmp_mem = *new_mem;
526 tmp_mem.mm_node = NULL;
527 placement.num_placement = 1;
528 placement.placement = &placements;
529 placement.num_busy_placement = 1;
530 placement.busy_placement = &placements;
531 placements.fpfn = 0;
Christian König5e7e8392017-06-30 12:19:42 +0200532 placements.lpfn = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
Christian Königdfb8fa92017-04-26 16:44:41 +0200534 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400535 if (unlikely(r)) {
536 return r;
537 }
Roger He3e98d822017-12-08 20:19:32 +0800538 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400539 if (unlikely(r)) {
540 goto out_cleanup;
541 }
Christian Königdfb8fa92017-04-26 16:44:41 +0200542 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400543 if (unlikely(r)) {
544 goto out_cleanup;
545 }
546out_cleanup:
547 ttm_bo_mem_put(bo, &tmp_mem);
548 return r;
549}
550
Christian König2823f4f2017-04-26 16:31:14 +0200551static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
552 struct ttm_operation_ctx *ctx,
553 struct ttm_mem_reg *new_mem)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400554{
555 struct amdgpu_device *adev;
Michel Dänzer104ece92016-03-28 12:53:02 +0900556 struct amdgpu_bo *abo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400557 struct ttm_mem_reg *old_mem = &bo->mem;
558 int r;
559
Michel Dänzer104ece92016-03-28 12:53:02 +0900560 /* Can't move a pinned BO */
Andres Rodriguezb82485f2017-09-15 21:05:19 -0400561 abo = ttm_to_amdgpu_bo(bo);
Michel Dänzer104ece92016-03-28 12:53:02 +0900562 if (WARN_ON_ONCE(abo->pin_count > 0))
563 return -EINVAL;
564
Christian Königa7d64de2016-09-15 14:58:48 +0200565 adev = amdgpu_ttm_adev(bo->bdev);
Christian Königdbd5ed62016-06-21 16:28:14 +0200566
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400567 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
568 amdgpu_move_null(bo, new_mem);
569 return 0;
570 }
571 if ((old_mem->mem_type == TTM_PL_TT &&
572 new_mem->mem_type == TTM_PL_SYSTEM) ||
573 (old_mem->mem_type == TTM_PL_SYSTEM &&
574 new_mem->mem_type == TTM_PL_TT)) {
575 /* bind is enough */
576 amdgpu_move_null(bo, new_mem);
577 return 0;
578 }
Christian König81988f92018-03-01 11:09:15 +0100579
580 if (!adev->mman.buffer_funcs_enabled)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400581 goto memcpy;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400582
583 if (old_mem->mem_type == TTM_PL_VRAM &&
584 new_mem->mem_type == TTM_PL_SYSTEM) {
Christian Königdfb8fa92017-04-26 16:44:41 +0200585 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400586 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
587 new_mem->mem_type == TTM_PL_VRAM) {
Christian Königdfb8fa92017-04-26 16:44:41 +0200588 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400589 } else {
Christian König2823f4f2017-04-26 16:31:14 +0200590 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
591 new_mem, old_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400592 }
593
594 if (r) {
595memcpy:
Roger He3e98d822017-12-08 20:19:32 +0800596 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400597 if (r) {
598 return r;
599 }
600 }
601
John Brooks96cf8272017-06-30 11:31:08 -0400602 if (bo->type == ttm_bo_type_device &&
603 new_mem->mem_type == TTM_PL_VRAM &&
604 old_mem->mem_type != TTM_PL_VRAM) {
605 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
606 * accesses the BO after it's moved.
607 */
608 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
609 }
610
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400611 /* update statistics */
612 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
613 return 0;
614}
615
616static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
617{
618 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
Christian Königa7d64de2016-09-15 14:58:48 +0200619 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
Amber Linf8f4b9a2018-02-27 10:01:59 -0500620 struct drm_mm_node *mm_node = mem->mm_node;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400621
622 mem->bus.addr = NULL;
623 mem->bus.offset = 0;
624 mem->bus.size = mem->num_pages << PAGE_SHIFT;
625 mem->bus.base = 0;
626 mem->bus.is_iomem = false;
627 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
628 return -EINVAL;
629 switch (mem->mem_type) {
630 case TTM_PL_SYSTEM:
631 /* system memory */
632 return 0;
633 case TTM_PL_TT:
634 break;
635 case TTM_PL_VRAM:
636 mem->bus.offset = mem->start << PAGE_SHIFT;
637 /* check if it's visible */
Christian König770d13b2018-01-12 14:52:22 +0100638 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400639 return -EINVAL;
Amber Linf8f4b9a2018-02-27 10:01:59 -0500640 /* Only physically contiguous buffers apply. In a contiguous
641 * buffer, size of the first mm_node would match the number of
642 * pages in ttm_mem_reg.
643 */
644 if (adev->mman.aper_base_kaddr &&
645 (mm_node->size == mem->num_pages))
646 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
647 mem->bus.offset;
648
Christian König770d13b2018-01-12 14:52:22 +0100649 mem->bus.base = adev->gmc.aper_base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400650 mem->bus.is_iomem = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400651 break;
652 default:
653 return -EINVAL;
654 }
655 return 0;
656}
657
658static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
659{
660}
661
Christian König9bbdcc02017-03-29 11:16:05 +0200662static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
663 unsigned long page_offset)
664{
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400665 struct drm_mm_node *mm;
666 unsigned long offset = (page_offset << PAGE_SHIFT);
Christian König9bbdcc02017-03-29 11:16:05 +0200667
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -0400668 mm = amdgpu_find_mm_node(&bo->mem, &offset);
669 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
670 (offset >> PAGE_SHIFT);
Christian König9bbdcc02017-03-29 11:16:05 +0200671}
672
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400673/*
674 * TTM backend functions.
675 */
Christian König637dd3b2016-03-03 14:24:57 +0100676struct amdgpu_ttm_gup_task_list {
677 struct list_head list;
678 struct task_struct *task;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400679};
680
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400681struct amdgpu_ttm_tt {
Christian König637dd3b2016-03-03 14:24:57 +0100682 struct ttm_dma_tt ttm;
Christian König637dd3b2016-03-03 14:24:57 +0100683 u64 offset;
684 uint64_t userptr;
685 struct mm_struct *usermm;
686 uint32_t userflags;
687 spinlock_t guptasklock;
688 struct list_head guptasks;
Christian König2f568db2016-02-23 12:36:59 +0100689 atomic_t mmu_invalidations;
Christian Königca666a32017-09-05 14:30:05 +0200690 uint32_t last_set_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400691};
692
Christian König2f568db2016-02-23 12:36:59 +0100693int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400694{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100696 unsigned int flags = 0;
Christian König2f568db2016-02-23 12:36:59 +0100697 unsigned pinned = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400698 int r;
699
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100700 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
701 flags |= FOLL_WRITE;
702
Christian Königb72cf4f2017-09-03 15:22:06 +0200703 down_read(&current->mm->mmap_sem);
704
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400705 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
Christian König2f568db2016-02-23 12:36:59 +0100706 /* check that we only use anonymous memory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 to prevent problems with writeback */
708 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
709 struct vm_area_struct *vma;
710
711 vma = find_vma(gtt->usermm, gtt->userptr);
Christian Königb72cf4f2017-09-03 15:22:06 +0200712 if (!vma || vma->vm_file || vma->vm_end < end) {
713 up_read(&current->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400714 return -EPERM;
Christian Königb72cf4f2017-09-03 15:22:06 +0200715 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716 }
717
718 do {
719 unsigned num_pages = ttm->num_pages - pinned;
720 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
Christian König2f568db2016-02-23 12:36:59 +0100721 struct page **p = pages + pinned;
Christian König637dd3b2016-03-03 14:24:57 +0100722 struct amdgpu_ttm_gup_task_list guptask;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723
Christian König637dd3b2016-03-03 14:24:57 +0100724 guptask.task = current;
725 spin_lock(&gtt->guptasklock);
726 list_add(&guptask.list, &gtt->guptasks);
727 spin_unlock(&gtt->guptasklock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100729 r = get_user_pages(userptr, num_pages, flags, p, NULL);
Christian König637dd3b2016-03-03 14:24:57 +0100730
731 spin_lock(&gtt->guptasklock);
732 list_del(&guptask.list);
733 spin_unlock(&gtt->guptasklock);
734
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400735 if (r < 0)
736 goto release_pages;
737
738 pinned += r;
739
740 } while (pinned < ttm->num_pages);
741
Christian Königb72cf4f2017-09-03 15:22:06 +0200742 up_read(&current->mm->mmap_sem);
Christian König2f568db2016-02-23 12:36:59 +0100743 return 0;
744
745release_pages:
Mel Gormanc6f92f92017-11-15 17:37:55 -0800746 release_pages(pages, pinned);
Christian Königb72cf4f2017-09-03 15:22:06 +0200747 up_read(&current->mm->mmap_sem);
Christian König2f568db2016-02-23 12:36:59 +0100748 return r;
749}
750
Christian Königa216ab02017-09-02 13:21:31 +0200751void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
Tom St Denisaca81712017-07-31 09:35:24 -0400752{
Tom St Denisaca81712017-07-31 09:35:24 -0400753 struct amdgpu_ttm_tt *gtt = (void *)ttm;
754 unsigned i;
755
Christian Königca666a32017-09-05 14:30:05 +0200756 gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
Christian Königa216ab02017-09-02 13:21:31 +0200757 for (i = 0; i < ttm->num_pages; ++i) {
758 if (ttm->pages[i])
759 put_page(ttm->pages[i]);
760
761 ttm->pages[i] = pages ? pages[i] : NULL;
Tom St Denisaca81712017-07-31 09:35:24 -0400762 }
763}
764
Christian König1b0c0f92017-09-05 14:36:44 +0200765void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
Tom St Denisaca81712017-07-31 09:35:24 -0400766{
Tom St Denisaca81712017-07-31 09:35:24 -0400767 struct amdgpu_ttm_tt *gtt = (void *)ttm;
768 unsigned i;
769
Christian König1b0c0f92017-09-05 14:36:44 +0200770 for (i = 0; i < ttm->num_pages; ++i) {
771 struct page *page = ttm->pages[i];
772
773 if (!page)
774 continue;
775
776 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
777 set_page_dirty(page);
778
779 mark_page_accessed(page);
Tom St Denisaca81712017-07-31 09:35:24 -0400780 }
781}
782
Christian König2f568db2016-02-23 12:36:59 +0100783/* prepare the sg table with the user pages */
784static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
785{
Christian Königa7d64de2016-09-15 14:58:48 +0200786 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
Christian König2f568db2016-02-23 12:36:59 +0100787 struct amdgpu_ttm_tt *gtt = (void *)ttm;
788 unsigned nents;
789 int r;
790
791 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
792 enum dma_data_direction direction = write ?
793 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
794
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400795 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
796 ttm->num_pages << PAGE_SHIFT,
797 GFP_KERNEL);
798 if (r)
799 goto release_sg;
800
801 r = -ENOMEM;
802 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
803 if (nents != ttm->sg->nents)
804 goto release_sg;
805
806 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
807 gtt->ttm.dma_address, ttm->num_pages);
808
809 return 0;
810
811release_sg:
812 kfree(ttm->sg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400813 return r;
814}
815
816static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
817{
Christian Königa7d64de2016-09-15 14:58:48 +0200818 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400819 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400820
821 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
822 enum dma_data_direction direction = write ?
823 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
824
825 /* double check that we don't free the table twice */
826 if (!ttm->sg->sgl)
827 return;
828
829 /* free the sg table and pages again */
830 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
831
Christian König1b0c0f92017-09-05 14:36:44 +0200832 amdgpu_ttm_tt_mark_user_pages(ttm);
Tom St Denisaca81712017-07-31 09:35:24 -0400833
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400834 sg_free_table(ttm->sg);
835}
836
Yong Zhao959a2092018-05-14 12:15:27 -0400837int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
838 struct ttm_buffer_object *tbo,
839 uint64_t flags)
840{
841 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
842 struct ttm_tt *ttm = tbo->ttm;
843 struct amdgpu_ttm_tt *gtt = (void *)ttm;
844 int r;
845
846 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
847 uint64_t page_idx = 1;
848
849 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
850 ttm->pages, gtt->ttm.dma_address, flags);
851 if (r)
852 goto gart_bind_fail;
853
854 /* Patch mtype of the second part BO */
855 flags &= ~AMDGPU_PTE_MTYPE_MASK;
856 flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
857
858 r = amdgpu_gart_bind(adev,
859 gtt->offset + (page_idx << PAGE_SHIFT),
860 ttm->num_pages - page_idx,
861 &ttm->pages[page_idx],
862 &(gtt->ttm.dma_address[page_idx]), flags);
863 } else {
864 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
865 ttm->pages, gtt->ttm.dma_address, flags);
866 }
867
868gart_bind_fail:
869 if (r)
870 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
871 ttm->num_pages, gtt->offset);
872
873 return r;
874}
875
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400876static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
877 struct ttm_mem_reg *bo_mem)
878{
Christian Königd9a13762018-02-28 09:35:39 +0100879 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400880 struct amdgpu_ttm_tt *gtt = (void*)ttm;
Christian Königac7afe62017-08-22 21:04:47 +0200881 uint64_t flags;
Dan Carpenter2ce3f5dc2017-08-09 13:30:46 +0300882 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400883
Chunming Zhoue2f784f2015-11-26 16:33:58 +0800884 if (gtt->userptr) {
885 r = amdgpu_ttm_tt_pin_userptr(ttm);
886 if (r) {
887 DRM_ERROR("failed to pin userptr\n");
888 return r;
889 }
890 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400891 if (!ttm->num_pages) {
892 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
893 ttm->num_pages, bo_mem, ttm);
894 }
895
896 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
897 bo_mem->mem_type == AMDGPU_PL_GWS ||
898 bo_mem->mem_type == AMDGPU_PL_OA)
899 return -EINVAL;
900
Christian König3da917b2017-10-27 14:17:09 +0200901 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
902 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
Christian Königac7afe62017-08-22 21:04:47 +0200903 return 0;
Christian König3da917b2017-10-27 14:17:09 +0200904 }
Christian König98a7f882017-06-30 10:41:07 +0200905
Christian Königd9a13762018-02-28 09:35:39 +0100906 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
Christian Königac7afe62017-08-22 21:04:47 +0200907 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
Christian Königd9a13762018-02-28 09:35:39 +0100908 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
Christian Königac7afe62017-08-22 21:04:47 +0200909 ttm->pages, gtt->ttm.dma_address, flags);
910
Christian Königc1c7ce82017-10-16 16:50:32 +0200911 if (r)
Christian Königac7afe62017-08-22 21:04:47 +0200912 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
913 ttm->num_pages, gtt->offset);
Christian König98a7f882017-06-30 10:41:07 +0200914 return r;
Christian Königc855e252016-09-05 17:00:57 +0200915}
916
Christian Königc5835bb2017-10-27 15:43:14 +0200917int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
Christian Königc855e252016-09-05 17:00:57 +0200918{
Christian König1d004022017-08-22 16:58:07 +0200919 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian Königc13c55d2017-04-12 15:33:00 +0200920 struct ttm_operation_ctx ctx = { false, false };
Christian König40575732017-10-26 17:54:12 +0200921 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
Christian König1d004022017-08-22 16:58:07 +0200922 struct ttm_mem_reg tmp;
Christian König1d004022017-08-22 16:58:07 +0200923 struct ttm_placement placement;
924 struct ttm_place placements;
Christian König40575732017-10-26 17:54:12 +0200925 uint64_t flags;
Christian Königc855e252016-09-05 17:00:57 +0200926 int r;
927
Christian König3da917b2017-10-27 14:17:09 +0200928 if (bo->mem.mem_type != TTM_PL_TT ||
929 amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
Christian Königc855e252016-09-05 17:00:57 +0200930 return 0;
931
Christian König1d004022017-08-22 16:58:07 +0200932 tmp = bo->mem;
933 tmp.mm_node = NULL;
934 placement.num_placement = 1;
935 placement.placement = &placements;
936 placement.num_busy_placement = 1;
937 placement.busy_placement = &placements;
938 placements.fpfn = 0;
Christian König770d13b2018-01-12 14:52:22 +0100939 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
Christian Königec8c9f82017-10-16 13:47:15 +0200940 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
941 TTM_PL_FLAG_TT;
Christian Königbb990bb2016-09-09 16:32:33 +0200942
Christian Königc13c55d2017-04-12 15:33:00 +0200943 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
Christian König1d004022017-08-22 16:58:07 +0200944 if (unlikely(r))
945 return r;
946
Christian König40575732017-10-26 17:54:12 +0200947 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
948 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
Yong Zhao959a2092018-05-14 12:15:27 -0400949 r = amdgpu_ttm_gart_bind(adev, bo, flags);
Christian König40575732017-10-26 17:54:12 +0200950 if (unlikely(r)) {
Christian König1d004022017-08-22 16:58:07 +0200951 ttm_bo_mem_put(bo, &tmp);
Christian König40575732017-10-26 17:54:12 +0200952 return r;
953 }
Christian König1d004022017-08-22 16:58:07 +0200954
Christian König40575732017-10-26 17:54:12 +0200955 ttm_bo_mem_put(bo, &bo->mem);
956 bo->mem = tmp;
957 bo->offset = (bo->mem.start << PAGE_SHIFT) +
958 bo->bdev->man[bo->mem.mem_type].gpu_offset;
959
960 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400961}
962
Christian Königc1c7ce82017-10-16 16:50:32 +0200963int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
Chunming Zhou2c0d7312016-08-30 16:36:25 +0800964{
Christian Königc1c7ce82017-10-16 16:50:32 +0200965 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
Monk Liu1d1a2cd2017-04-27 17:14:57 +0800966 uint64_t flags;
Chunming Zhou2c0d7312016-08-30 16:36:25 +0800967 int r;
968
Yong Zhao959a2092018-05-14 12:15:27 -0400969 if (!tbo->ttm)
Christian Königc1c7ce82017-10-16 16:50:32 +0200970 return 0;
971
Yong Zhao959a2092018-05-14 12:15:27 -0400972 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
973 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
974
Christian Königc1c7ce82017-10-16 16:50:32 +0200975 return r;
Chunming Zhou2c0d7312016-08-30 16:36:25 +0800976}
977
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400978static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
979{
Christian Königd9a13762018-02-28 09:35:39 +0100980 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400981 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Roger.He738f64c2017-05-05 13:27:10 +0800982 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983
Christian König85a4b572016-09-22 14:19:50 +0200984 if (gtt->userptr)
985 amdgpu_ttm_tt_unpin_userptr(ttm);
986
Christian König3da917b2017-10-27 14:17:09 +0200987 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
Christian König78ab0a32016-09-09 15:39:08 +0200988 return 0;
989
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400990 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
Christian Königd9a13762018-02-28 09:35:39 +0100991 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
Christian Königc1c7ce82017-10-16 16:50:32 +0200992 if (r)
Roger.He738f64c2017-05-05 13:27:10 +0800993 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
994 gtt->ttm.ttm.num_pages, gtt->offset);
Roger.He738f64c2017-05-05 13:27:10 +0800995 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400996}
997
998static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
999{
1000 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1001
1002 ttm_dma_tt_fini(&gtt->ttm);
1003 kfree(gtt);
1004}
1005
1006static struct ttm_backend_func amdgpu_backend_func = {
1007 .bind = &amdgpu_ttm_backend_bind,
1008 .unbind = &amdgpu_ttm_backend_unbind,
1009 .destroy = &amdgpu_ttm_backend_destroy,
1010};
1011
Christian Königdde5da22018-02-22 10:18:14 +01001012static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1013 uint32_t page_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001014{
1015 struct amdgpu_device *adev;
1016 struct amdgpu_ttm_tt *gtt;
1017
Christian Königdde5da22018-02-22 10:18:14 +01001018 adev = amdgpu_ttm_adev(bo->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001019
1020 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1021 if (gtt == NULL) {
1022 return NULL;
1023 }
1024 gtt->ttm.ttm.func = &amdgpu_backend_func;
Christian Königdde5da22018-02-22 10:18:14 +01001025 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001026 kfree(gtt);
1027 return NULL;
1028 }
1029 return &gtt->ttm.ttm;
1030}
1031
Roger Hed0cef9f2017-12-21 17:42:50 +08001032static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1033 struct ttm_operation_ctx *ctx)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001034{
Tom St Denisaca81712017-07-31 09:35:24 -04001035 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001036 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001037 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1038
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001039 if (gtt && gtt->userptr) {
Maninder Singh5f0b34c2015-06-26 13:28:50 +05301040 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001041 if (!ttm->sg)
1042 return -ENOMEM;
1043
1044 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1045 ttm->state = tt_unbound;
1046 return 0;
1047 }
1048
1049 if (slave && ttm->sg) {
1050 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
Christian Könige89d0d32018-02-23 16:08:51 +01001051 gtt->ttm.dma_address,
1052 ttm->num_pages);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001053 ttm->state = tt_unbound;
Tom St Denis79ba2802017-09-18 08:10:00 -04001054 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001055 }
1056
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001057#ifdef CONFIG_SWIOTLB
Chunming Zhoufd5fd482018-02-09 10:44:09 +08001058 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
Roger Hed0cef9f2017-12-21 17:42:50 +08001059 return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001060 }
1061#endif
1062
Roger Hed0cef9f2017-12-21 17:42:50 +08001063 return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001064}
1065
1066static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1067{
1068 struct amdgpu_device *adev;
1069 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001070 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1071
1072 if (gtt && gtt->userptr) {
Christian Königa216ab02017-09-02 13:21:31 +02001073 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001074 kfree(ttm->sg);
1075 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1076 return;
1077 }
1078
1079 if (slave)
1080 return;
1081
Christian Königa7d64de2016-09-15 14:58:48 +02001082 adev = amdgpu_ttm_adev(ttm->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001083
1084#ifdef CONFIG_SWIOTLB
Chunming Zhoufd5fd482018-02-09 10:44:09 +08001085 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001086 ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1087 return;
1088 }
1089#endif
1090
Tom St Denis7405e0d2017-08-18 10:05:48 -04001091 ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001092}
1093
1094int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1095 uint32_t flags)
1096{
1097 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1098
1099 if (gtt == NULL)
1100 return -EINVAL;
1101
1102 gtt->userptr = addr;
1103 gtt->usermm = current->mm;
1104 gtt->userflags = flags;
Christian König637dd3b2016-03-03 14:24:57 +01001105 spin_lock_init(&gtt->guptasklock);
1106 INIT_LIST_HEAD(&gtt->guptasks);
Christian König2f568db2016-02-23 12:36:59 +01001107 atomic_set(&gtt->mmu_invalidations, 0);
Christian Königca666a32017-09-05 14:30:05 +02001108 gtt->last_set_pages = 0;
Christian König637dd3b2016-03-03 14:24:57 +01001109
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001110 return 0;
1111}
1112
Christian Königcc325d12016-02-08 11:08:35 +01001113struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001114{
1115 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1116
1117 if (gtt == NULL)
Christian Königcc325d12016-02-08 11:08:35 +01001118 return NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001119
Christian Königcc325d12016-02-08 11:08:35 +01001120 return gtt->usermm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001121}
1122
Christian Königcc1de6e2016-02-08 10:57:22 +01001123bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1124 unsigned long end)
1125{
1126 struct amdgpu_ttm_tt *gtt = (void *)ttm;
Christian König637dd3b2016-03-03 14:24:57 +01001127 struct amdgpu_ttm_gup_task_list *entry;
Christian Königcc1de6e2016-02-08 10:57:22 +01001128 unsigned long size;
1129
Christian König637dd3b2016-03-03 14:24:57 +01001130 if (gtt == NULL || !gtt->userptr)
Christian Königcc1de6e2016-02-08 10:57:22 +01001131 return false;
1132
1133 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1134 if (gtt->userptr > end || gtt->userptr + size <= start)
1135 return false;
1136
Christian König637dd3b2016-03-03 14:24:57 +01001137 spin_lock(&gtt->guptasklock);
1138 list_for_each_entry(entry, &gtt->guptasks, list) {
1139 if (entry->task == current) {
1140 spin_unlock(&gtt->guptasklock);
1141 return false;
1142 }
1143 }
1144 spin_unlock(&gtt->guptasklock);
1145
Christian König2f568db2016-02-23 12:36:59 +01001146 atomic_inc(&gtt->mmu_invalidations);
1147
Christian Königcc1de6e2016-02-08 10:57:22 +01001148 return true;
1149}
1150
Christian König2f568db2016-02-23 12:36:59 +01001151bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1152 int *last_invalidated)
1153{
1154 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1155 int prev_invalidated = *last_invalidated;
1156
1157 *last_invalidated = atomic_read(&gtt->mmu_invalidations);
1158 return prev_invalidated != *last_invalidated;
1159}
1160
Christian Königca666a32017-09-05 14:30:05 +02001161bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1162{
1163 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1164
1165 if (gtt == NULL || !gtt->userptr)
1166 return false;
1167
1168 return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
1169}
1170
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001171bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1172{
1173 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1174
1175 if (gtt == NULL)
1176 return false;
1177
1178 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1179}
1180
Chunming Zhou6b777602016-09-21 16:19:19 +08001181uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001182 struct ttm_mem_reg *mem)
1183{
Chunming Zhou6b777602016-09-21 16:19:19 +08001184 uint64_t flags = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001185
1186 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1187 flags |= AMDGPU_PTE_VALID;
1188
Christian König6d999052015-12-04 13:32:55 +01001189 if (mem && mem->mem_type == TTM_PL_TT) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001190 flags |= AMDGPU_PTE_SYSTEM;
1191
Christian König6d999052015-12-04 13:32:55 +01001192 if (ttm->caching_state == tt_cached)
1193 flags |= AMDGPU_PTE_SNOOPED;
1194 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001195
Alex Xie4b98e0c2017-02-14 12:31:36 -05001196 flags |= adev->gart.gart_pte_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001197 flags |= AMDGPU_PTE_READABLE;
1198
1199 if (!amdgpu_ttm_tt_is_readonly(ttm))
1200 flags |= AMDGPU_PTE_WRITEABLE;
1201
1202 return flags;
1203}
1204
Christian König9982ca62016-10-19 14:44:22 +02001205static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1206 const struct ttm_place *place)
1207{
Christian König4fcae782017-04-20 12:11:47 +02001208 unsigned long num_pages = bo->mem.num_pages;
1209 struct drm_mm_node *node = bo->mem.mm_node;
Felix Kuehlingd8d019c2018-02-06 20:32:35 -05001210 struct reservation_object_list *flist;
1211 struct dma_fence *f;
1212 int i;
1213
1214 /* If bo is a KFD BO, check if the bo belongs to the current process.
1215 * If true, then return false as any KFD process needs all its BOs to
1216 * be resident to run successfully
1217 */
1218 flist = reservation_object_get_list(bo->resv);
1219 if (flist) {
1220 for (i = 0; i < flist->shared_count; ++i) {
1221 f = rcu_dereference_protected(flist->shared[i],
1222 reservation_object_held(bo->resv));
1223 if (amdkfd_fence_check_mm(f, current->mm))
1224 return false;
1225 }
1226 }
Christian König9982ca62016-10-19 14:44:22 +02001227
Christian König4fcae782017-04-20 12:11:47 +02001228 switch (bo->mem.mem_type) {
1229 case TTM_PL_TT:
1230 return true;
1231
1232 case TTM_PL_VRAM:
Christian König9982ca62016-10-19 14:44:22 +02001233 /* Check each drm MM node individually */
1234 while (num_pages) {
1235 if (place->fpfn < (node->start + node->size) &&
1236 !(place->lpfn && place->lpfn <= node->start))
1237 return true;
1238
1239 num_pages -= node->size;
1240 ++node;
1241 }
Roger He7da2e3e2017-11-02 13:14:27 +08001242 return false;
Christian König9982ca62016-10-19 14:44:22 +02001243
Christian König4fcae782017-04-20 12:11:47 +02001244 default:
1245 break;
Christian König9982ca62016-10-19 14:44:22 +02001246 }
1247
1248 return ttm_bo_eviction_valuable(bo, place);
1249}
1250
Felix Kuehlinge3426102017-07-03 14:18:27 -04001251static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1252 unsigned long offset,
1253 void *buf, int len, int write)
1254{
Andres Rodriguezb82485f2017-09-15 21:05:19 -04001255 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
Felix Kuehlinge3426102017-07-03 14:18:27 -04001256 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -04001257 struct drm_mm_node *nodes;
Felix Kuehlinge3426102017-07-03 14:18:27 -04001258 uint32_t value = 0;
1259 int ret = 0;
1260 uint64_t pos;
1261 unsigned long flags;
1262
1263 if (bo->mem.mem_type != TTM_PL_VRAM)
1264 return -EIO;
1265
Harish Kasiviswanathane1d51502017-10-06 17:36:35 -04001266 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
Felix Kuehlinge3426102017-07-03 14:18:27 -04001267 pos = (nodes->start << PAGE_SHIFT) + offset;
1268
Christian König770d13b2018-01-12 14:52:22 +01001269 while (len && pos < adev->gmc.mc_vram_size) {
Felix Kuehlinge3426102017-07-03 14:18:27 -04001270 uint64_t aligned_pos = pos & ~(uint64_t)3;
1271 uint32_t bytes = 4 - (pos & 3);
1272 uint32_t shift = (pos & 3) * 8;
1273 uint32_t mask = 0xffffffff << shift;
1274
1275 if (len < bytes) {
1276 mask &= 0xffffffff >> (bytes - len) * 8;
1277 bytes = len;
1278 }
1279
1280 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
Tom St Denis97bae492017-09-14 08:57:26 -04001281 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1282 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
Felix Kuehlinge3426102017-07-03 14:18:27 -04001283 if (!write || mask != 0xffffffff)
Tom St Denis97bae492017-09-14 08:57:26 -04001284 value = RREG32_NO_KIQ(mmMM_DATA);
Felix Kuehlinge3426102017-07-03 14:18:27 -04001285 if (write) {
1286 value &= ~mask;
1287 value |= (*(uint32_t *)buf << shift) & mask;
Tom St Denis97bae492017-09-14 08:57:26 -04001288 WREG32_NO_KIQ(mmMM_DATA, value);
Felix Kuehlinge3426102017-07-03 14:18:27 -04001289 }
1290 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1291 if (!write) {
1292 value = (value & mask) >> shift;
1293 memcpy(buf, &value, bytes);
1294 }
1295
1296 ret += bytes;
1297 buf = (uint8_t *)buf + bytes;
1298 pos += bytes;
1299 len -= bytes;
1300 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1301 ++nodes;
1302 pos = (nodes->start << PAGE_SHIFT);
1303 }
1304 }
1305
1306 return ret;
1307}
1308
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001309static struct ttm_bo_driver amdgpu_bo_driver = {
1310 .ttm_tt_create = &amdgpu_ttm_tt_create,
1311 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1312 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1313 .invalidate_caches = &amdgpu_invalidate_caches,
1314 .init_mem_type = &amdgpu_init_mem_type,
Christian König9982ca62016-10-19 14:44:22 +02001315 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001316 .evict_flags = &amdgpu_evict_flags,
1317 .move = &amdgpu_bo_move,
1318 .verify_access = &amdgpu_verify_access,
1319 .move_notify = &amdgpu_bo_move_notify,
1320 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1321 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1322 .io_mem_free = &amdgpu_ttm_io_mem_free,
Christian König9bbdcc02017-03-29 11:16:05 +02001323 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
Felix Kuehlinge3426102017-07-03 14:18:27 -04001324 .access_memory = &amdgpu_ttm_access_memory
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001325};
1326
Alex Deucherf5ec6972017-12-14 16:39:02 -05001327/*
1328 * Firmware Reservation functions
1329 */
1330/**
1331 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1332 *
1333 * @adev: amdgpu_device pointer
1334 *
1335 * free fw reserved vram if it has been reserved.
1336 */
1337static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1338{
1339 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1340 NULL, &adev->fw_vram_usage.va);
1341}
1342
1343/**
1344 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1345 *
1346 * @adev: amdgpu_device pointer
1347 *
1348 * create bo vram reservation from fw.
1349 */
1350static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1351{
1352 struct ttm_operation_ctx ctx = { false, false };
Chunming Zhou3216c6b2018-04-16 18:27:50 +08001353 struct amdgpu_bo_param bp;
Alex Deucherf5ec6972017-12-14 16:39:02 -05001354 int r = 0;
1355 int i;
Christian König770d13b2018-01-12 14:52:22 +01001356 u64 vram_size = adev->gmc.visible_vram_size;
Alex Deucherf5ec6972017-12-14 16:39:02 -05001357 u64 offset = adev->fw_vram_usage.start_offset;
1358 u64 size = adev->fw_vram_usage.size;
1359 struct amdgpu_bo *bo;
1360
Chunming Zhou3216c6b2018-04-16 18:27:50 +08001361 memset(&bp, 0, sizeof(bp));
1362 bp.size = adev->fw_vram_usage.size;
1363 bp.byte_align = PAGE_SIZE;
1364 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1365 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1366 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1367 bp.type = ttm_bo_type_kernel;
1368 bp.resv = NULL;
Alex Deucherf5ec6972017-12-14 16:39:02 -05001369 adev->fw_vram_usage.va = NULL;
1370 adev->fw_vram_usage.reserved_bo = NULL;
1371
1372 if (adev->fw_vram_usage.size > 0 &&
1373 adev->fw_vram_usage.size <= vram_size) {
1374
Chunming Zhou3216c6b2018-04-16 18:27:50 +08001375 r = amdgpu_bo_create(adev, &bp,
Christian Königeab3de22018-03-14 14:48:17 -05001376 &adev->fw_vram_usage.reserved_bo);
Alex Deucherf5ec6972017-12-14 16:39:02 -05001377 if (r)
1378 goto error_create;
1379
1380 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1381 if (r)
1382 goto error_reserve;
1383
1384 /* remove the original mem node and create a new one at the
1385 * request position
1386 */
1387 bo = adev->fw_vram_usage.reserved_bo;
1388 offset = ALIGN(offset, PAGE_SIZE);
1389 for (i = 0; i < bo->placement.num_placement; ++i) {
1390 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1391 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1392 }
1393
1394 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1395 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1396 &bo->tbo.mem, &ctx);
1397 if (r)
1398 goto error_pin;
1399
1400 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1401 AMDGPU_GEM_DOMAIN_VRAM,
1402 adev->fw_vram_usage.start_offset,
1403 (adev->fw_vram_usage.start_offset +
1404 adev->fw_vram_usage.size), NULL);
1405 if (r)
1406 goto error_pin;
1407 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1408 &adev->fw_vram_usage.va);
1409 if (r)
1410 goto error_kmap;
1411
1412 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1413 }
1414 return r;
1415
1416error_kmap:
1417 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1418error_pin:
1419 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1420error_reserve:
1421 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1422error_create:
1423 adev->fw_vram_usage.va = NULL;
1424 adev->fw_vram_usage.reserved_bo = NULL;
1425 return r;
1426}
1427
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001428int amdgpu_ttm_init(struct amdgpu_device *adev)
1429{
Christian König36d38372017-07-07 13:17:45 +02001430 uint64_t gtt_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001431 int r;
John Brooks218b5dc2017-06-27 22:33:17 -04001432 u64 vis_vram_limit;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001433
Alex Deucher70b5c5a2016-11-15 16:55:53 -05001434 r = amdgpu_ttm_global_init(adev);
1435 if (r) {
1436 return r;
1437 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001438 /* No others user of address space so set it to 0 */
1439 r = ttm_bo_device_init(&adev->mman.bdev,
1440 adev->mman.bo_global_ref.ref.object,
1441 &amdgpu_bo_driver,
1442 adev->ddev->anon_inode->i_mapping,
1443 DRM_FILE_PAGE_OFFSET,
1444 adev->need_dma32);
1445 if (r) {
1446 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1447 return r;
1448 }
1449 adev->mman.initialized = true;
Andrey Grodzovsky7cce9582018-01-16 10:06:36 -05001450
1451 /* We opt to avoid OOM on system pages allocations */
1452 adev->mman.bdev.no_retry = true;
1453
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001454 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
Christian König770d13b2018-01-12 14:52:22 +01001455 adev->gmc.real_vram_size >> PAGE_SHIFT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001456 if (r) {
1457 DRM_ERROR("Failed initializing VRAM heap.\n");
1458 return r;
1459 }
John Brooks218b5dc2017-06-27 22:33:17 -04001460
1461 /* Reduce size of CPU-visible VRAM if requested */
1462 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1463 if (amdgpu_vis_vram_limit > 0 &&
Christian König770d13b2018-01-12 14:52:22 +01001464 vis_vram_limit <= adev->gmc.visible_vram_size)
1465 adev->gmc.visible_vram_size = vis_vram_limit;
John Brooks218b5dc2017-06-27 22:33:17 -04001466
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001467 /* Change the size here instead of the init above so only lpfn is affected */
Christian König57adc4c2018-03-01 11:01:52 +01001468 amdgpu_ttm_set_buffer_funcs_status(adev, false);
Amber Linf8f4b9a2018-02-27 10:01:59 -05001469#ifdef CONFIG_64BIT
1470 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1471 adev->gmc.visible_vram_size);
1472#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001473
Horace Chena05502e2017-09-29 14:41:57 +08001474 /*
1475 *The reserved vram for firmware must be pinned to the specified
1476 *place on the VRAM, so reserve it early.
1477 */
Alex Deucherf5ec6972017-12-14 16:39:02 -05001478 r = amdgpu_ttm_fw_reserve_vram_init(adev);
Horace Chena05502e2017-09-29 14:41:57 +08001479 if (r) {
1480 return r;
1481 }
1482
Alex Deucherebdef282018-04-06 14:54:09 -05001483 if (adev->gmc.stolen_size) {
1484 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1485 AMDGPU_GEM_DOMAIN_VRAM,
1486 &adev->stolen_vga_memory,
1487 NULL, NULL);
1488 if (r)
1489 return r;
1490 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001491 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
Christian König770d13b2018-01-12 14:52:22 +01001492 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
Christian König36d38372017-07-07 13:17:45 +02001493
Roger He424e2c82017-11-10 19:05:13 +08001494 if (amdgpu_gtt_size == -1) {
1495 struct sysinfo si;
1496
1497 si_meminfo(&si);
Andrey Grodzovsky24562522017-12-15 12:09:16 -05001498 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
Christian König770d13b2018-01-12 14:52:22 +01001499 adev->gmc.mc_vram_size),
Andrey Grodzovsky24562522017-12-15 12:09:16 -05001500 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1501 }
1502 else
Christian König36d38372017-07-07 13:17:45 +02001503 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1504 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001505 if (r) {
1506 DRM_ERROR("Failed initializing GTT heap.\n");
1507 return r;
1508 }
1509 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
Christian König36d38372017-07-07 13:17:45 +02001510 (unsigned)(gtt_size / (1024 * 1024)));
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001511
1512 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1513 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1514 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1515 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1516 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1517 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1518 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1519 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1520 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1521 /* GDS Memory */
Alex Deucherd2d51d82017-03-15 09:45:48 -04001522 if (adev->gds.mem.total_size) {
1523 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1524 adev->gds.mem.total_size >> PAGE_SHIFT);
1525 if (r) {
1526 DRM_ERROR("Failed initializing GDS heap.\n");
1527 return r;
1528 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001529 }
1530
1531 /* GWS */
Alex Deucherd2d51d82017-03-15 09:45:48 -04001532 if (adev->gds.gws.total_size) {
1533 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1534 adev->gds.gws.total_size >> PAGE_SHIFT);
1535 if (r) {
1536 DRM_ERROR("Failed initializing gws heap.\n");
1537 return r;
1538 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001539 }
1540
1541 /* OA */
Alex Deucherd2d51d82017-03-15 09:45:48 -04001542 if (adev->gds.oa.total_size) {
1543 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1544 adev->gds.oa.total_size >> PAGE_SHIFT);
1545 if (r) {
1546 DRM_ERROR("Failed initializing oa heap.\n");
1547 return r;
1548 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001549 }
1550
1551 r = amdgpu_ttm_debugfs_init(adev);
1552 if (r) {
1553 DRM_ERROR("Failed to init debugfs\n");
1554 return r;
1555 }
1556 return 0;
1557}
1558
Andrey Grodzovsky6f752ec2018-04-06 14:54:10 -05001559void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1560{
1561 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1562}
1563
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001564void amdgpu_ttm_fini(struct amdgpu_device *adev)
1565{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 if (!adev->mman.initialized)
1567 return;
Monk Liu11c6b822017-11-13 20:41:56 +08001568
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001569 amdgpu_ttm_debugfs_fini(adev);
Alex Deucherf5ec6972017-12-14 16:39:02 -05001570 amdgpu_ttm_fw_reserve_vram_fini(adev);
Amber Linf8f4b9a2018-02-27 10:01:59 -05001571 if (adev->mman.aper_base_kaddr)
1572 iounmap(adev->mman.aper_base_kaddr);
1573 adev->mman.aper_base_kaddr = NULL;
Monk Liu11c6b822017-11-13 20:41:56 +08001574
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001575 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1576 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
Alex Deucherd2d51d82017-03-15 09:45:48 -04001577 if (adev->gds.mem.total_size)
1578 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1579 if (adev->gds.gws.total_size)
1580 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1581 if (adev->gds.oa.total_size)
1582 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001583 ttm_bo_device_release(&adev->mman.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 amdgpu_ttm_global_fini(adev);
1585 adev->mman.initialized = false;
1586 DRM_INFO("amdgpu: ttm finalized\n");
1587}
1588
Christian König57adc4c2018-03-01 11:01:52 +01001589/**
1590 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1591 *
1592 * @adev: amdgpu_device pointer
1593 * @enable: true when we can use buffer functions.
1594 *
1595 * Enable/disable use of buffer functions during suspend/resume. This should
1596 * only be called at bootup or when userspace isn't running.
1597 */
1598void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001599{
Christian König57adc4c2018-03-01 11:01:52 +01001600 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1601 uint64_t size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001602
Christian König380383f2018-03-01 11:03:27 +01001603 if (!adev->mman.initialized || adev->in_gpu_reset)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001604 return;
1605
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
Christian König57adc4c2018-03-01 11:01:52 +01001607 if (enable)
1608 size = adev->gmc.real_vram_size;
1609 else
1610 size = adev->gmc.visible_vram_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001611 man->size = size >> PAGE_SHIFT;
Christian König81988f92018-03-01 11:09:15 +01001612 adev->mman.buffer_funcs_enabled = enable;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001613}
1614
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001615int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1616{
1617 struct drm_file *file_priv;
1618 struct amdgpu_device *adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619
Christian Könige176fe172015-05-27 10:22:47 +02001620 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001621 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001622
1623 file_priv = filp->private_data;
1624 adev = file_priv->minor->dev->dev_private;
Christian Könige176fe172015-05-27 10:22:47 +02001625 if (adev == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001626 return -EINVAL;
Christian Könige176fe172015-05-27 10:22:47 +02001627
1628 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001629}
1630
Christian Königabca90f2017-06-30 11:05:54 +02001631static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1632 struct ttm_mem_reg *mem, unsigned num_pages,
1633 uint64_t offset, unsigned window,
1634 struct amdgpu_ring *ring,
1635 uint64_t *addr)
1636{
1637 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1638 struct amdgpu_device *adev = ring->adev;
1639 struct ttm_tt *ttm = bo->ttm;
1640 struct amdgpu_job *job;
1641 unsigned num_dw, num_bytes;
1642 dma_addr_t *dma_address;
1643 struct dma_fence *fence;
1644 uint64_t src_addr, dst_addr;
1645 uint64_t flags;
1646 int r;
1647
1648 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1649 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1650
Christian König770d13b2018-01-12 14:52:22 +01001651 *addr = adev->gmc.gart_start;
Christian Königabca90f2017-06-30 11:05:54 +02001652 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1653 AMDGPU_GPU_PAGE_SIZE;
1654
1655 num_dw = adev->mman.buffer_funcs->copy_num_dw;
1656 while (num_dw & 0x7)
1657 num_dw++;
1658
1659 num_bytes = num_pages * 8;
1660
1661 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1662 if (r)
1663 return r;
1664
1665 src_addr = num_dw * 4;
1666 src_addr += job->ibs[0].gpu_addr;
1667
1668 dst_addr = adev->gart.table_addr;
1669 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1670 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1671 dst_addr, num_bytes);
1672
1673 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1674 WARN_ON(job->ibs[0].length_dw > num_dw);
1675
1676 dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1677 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1678 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1679 &job->ibs[0].ptr[num_dw]);
1680 if (r)
1681 goto error_free;
1682
1683 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1684 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1685 if (r)
1686 goto error_free;
1687
1688 dma_fence_put(fence);
1689
1690 return r;
1691
1692error_free:
1693 amdgpu_job_free(job);
1694 return r;
1695}
1696
Christian Königfc9c8f52017-06-29 11:46:15 +02001697int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1698 uint64_t dst_offset, uint32_t byte_count,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001699 struct reservation_object *resv,
Christian Königfc9c8f52017-06-29 11:46:15 +02001700 struct dma_fence **fence, bool direct_submit,
1701 bool vm_needs_flush)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001702{
1703 struct amdgpu_device *adev = ring->adev;
Christian Königd71518b2016-02-01 12:20:25 +01001704 struct amdgpu_job *job;
1705
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001706 uint32_t max_bytes;
1707 unsigned num_loops, num_dw;
1708 unsigned i;
1709 int r;
1710
Christian König81988f92018-03-01 11:09:15 +01001711 if (direct_submit && !ring->ready) {
1712 DRM_ERROR("Trying to move memory with ring turned off.\n");
1713 return -EINVAL;
1714 }
1715
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001716 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1717 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1718 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1719
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001720 /* for IB padding */
1721 while (num_dw & 0x7)
1722 num_dw++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001723
Christian Königd71518b2016-02-01 12:20:25 +01001724 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1725 if (r)
Chunming Zhou9066b0c2015-08-25 15:12:26 +08001726 return r;
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001727
Christian Königfc9c8f52017-06-29 11:46:15 +02001728 job->vm_needs_flush = vm_needs_flush;
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001729 if (resv) {
Christian Könige86f9ce2016-02-08 12:13:05 +01001730 r = amdgpu_sync_resv(adev, &job->sync, resv,
Andres Rodriguez177ae092017-09-15 20:44:06 -04001731 AMDGPU_FENCE_OWNER_UNDEFINED,
1732 false);
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001733 if (r) {
1734 DRM_ERROR("sync failed (%d).\n", r);
1735 goto error_free;
1736 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001737 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001738
1739 for (i = 0; i < num_loops; i++) {
1740 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1741
Christian Königd71518b2016-02-01 12:20:25 +01001742 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1743 dst_offset, cur_size_in_bytes);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001744
1745 src_offset += cur_size_in_bytes;
1746 dst_offset += cur_size_in_bytes;
1747 byte_count -= cur_size_in_bytes;
1748 }
1749
Christian Königd71518b2016-02-01 12:20:25 +01001750 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1751 WARN_ON(job->ibs[0].length_dw > num_dw);
Chunming Zhoue24db982016-08-15 10:46:04 +08001752 if (direct_submit) {
1753 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
Junwei Zhang50ddc752017-01-23 16:30:38 +08001754 NULL, fence);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001755 job->fence = dma_fence_get(*fence);
Chunming Zhoue24db982016-08-15 10:46:04 +08001756 if (r)
1757 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1758 amdgpu_job_free(job);
1759 } else {
1760 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1761 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1762 if (r)
1763 goto error_free;
1764 }
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001765
Chunming Zhoue24db982016-08-15 10:46:04 +08001766 return r;
Christian Königd71518b2016-02-01 12:20:25 +01001767
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001768error_free:
Christian Königd71518b2016-02-01 12:20:25 +01001769 amdgpu_job_free(job);
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001770 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001771}
1772
Flora Cui59b4a972016-07-19 16:48:22 +08001773int amdgpu_fill_buffer(struct amdgpu_bo *bo,
Christian König44e1bae2018-01-24 19:58:45 +01001774 uint32_t src_data,
Christian Königf29224a62016-11-17 12:06:38 +01001775 struct reservation_object *resv,
1776 struct dma_fence **fence)
Flora Cui59b4a972016-07-19 16:48:22 +08001777{
Christian Königa7d64de2016-09-15 14:58:48 +02001778 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Christian König44e1bae2018-01-24 19:58:45 +01001779 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
Flora Cui59b4a972016-07-19 16:48:22 +08001780 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1781
Christian Königf29224a62016-11-17 12:06:38 +01001782 struct drm_mm_node *mm_node;
1783 unsigned long num_pages;
Flora Cui59b4a972016-07-19 16:48:22 +08001784 unsigned int num_loops, num_dw;
Christian Königf29224a62016-11-17 12:06:38 +01001785
1786 struct amdgpu_job *job;
Flora Cui59b4a972016-07-19 16:48:22 +08001787 int r;
1788
Christian König81988f92018-03-01 11:09:15 +01001789 if (!adev->mman.buffer_funcs_enabled) {
Christian Königf29224a62016-11-17 12:06:38 +01001790 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1791 return -EINVAL;
1792 }
1793
Christian König92c60d92017-06-29 10:44:39 +02001794 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
Christian Königc5835bb2017-10-27 15:43:14 +02001795 r = amdgpu_ttm_alloc_gart(&bo->tbo);
Christian König92c60d92017-06-29 10:44:39 +02001796 if (r)
1797 return r;
1798 }
1799
Christian Königf29224a62016-11-17 12:06:38 +01001800 num_pages = bo->tbo.num_pages;
1801 mm_node = bo->tbo.mem.mm_node;
1802 num_loops = 0;
1803 while (num_pages) {
1804 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1805
1806 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1807 num_pages -= mm_node->size;
1808 ++mm_node;
1809 }
Christian König44e1bae2018-01-24 19:58:45 +01001810 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
Flora Cui59b4a972016-07-19 16:48:22 +08001811
1812 /* for IB padding */
Christian Königf29224a62016-11-17 12:06:38 +01001813 num_dw += 64;
Flora Cui59b4a972016-07-19 16:48:22 +08001814
1815 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1816 if (r)
1817 return r;
1818
1819 if (resv) {
1820 r = amdgpu_sync_resv(adev, &job->sync, resv,
Andres Rodriguez177ae092017-09-15 20:44:06 -04001821 AMDGPU_FENCE_OWNER_UNDEFINED, false);
Flora Cui59b4a972016-07-19 16:48:22 +08001822 if (r) {
1823 DRM_ERROR("sync failed (%d).\n", r);
1824 goto error_free;
1825 }
1826 }
1827
Christian Königf29224a62016-11-17 12:06:38 +01001828 num_pages = bo->tbo.num_pages;
1829 mm_node = bo->tbo.mem.mm_node;
Flora Cui59b4a972016-07-19 16:48:22 +08001830
Christian Königf29224a62016-11-17 12:06:38 +01001831 while (num_pages) {
1832 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1833 uint64_t dst_addr;
Flora Cui59b4a972016-07-19 16:48:22 +08001834
Christian König92c60d92017-06-29 10:44:39 +02001835 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
Christian Königf29224a62016-11-17 12:06:38 +01001836 while (byte_count) {
1837 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1838
Christian König44e1bae2018-01-24 19:58:45 +01001839 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1840 dst_addr, cur_size_in_bytes);
Christian Königf29224a62016-11-17 12:06:38 +01001841
1842 dst_addr += cur_size_in_bytes;
1843 byte_count -= cur_size_in_bytes;
1844 }
1845
1846 num_pages -= mm_node->size;
1847 ++mm_node;
Flora Cui59b4a972016-07-19 16:48:22 +08001848 }
1849
1850 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1851 WARN_ON(job->ibs[0].length_dw > num_dw);
1852 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
Christian Königf29224a62016-11-17 12:06:38 +01001853 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
Flora Cui59b4a972016-07-19 16:48:22 +08001854 if (r)
1855 goto error_free;
1856
1857 return 0;
1858
1859error_free:
1860 amdgpu_job_free(job);
1861 return r;
1862}
1863
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001864#if defined(CONFIG_DEBUG_FS)
1865
1866static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1867{
1868 struct drm_info_node *node = (struct drm_info_node *)m->private;
1869 unsigned ttm_pl = *(int *)node->info_ent->data;
1870 struct drm_device *dev = node->minor->dev;
1871 struct amdgpu_device *adev = dev->dev_private;
Christian König12d4ac52017-08-07 14:07:43 +02001872 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
Daniel Vetterb5c37142016-12-29 12:09:24 +01001873 struct drm_printer p = drm_seq_file_printer(m);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001874
Christian König12d4ac52017-08-07 14:07:43 +02001875 man->func->debug(man, &p);
Daniel Vetterb5c37142016-12-29 12:09:24 +01001876 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001877}
1878
1879static int ttm_pl_vram = TTM_PL_VRAM;
1880static int ttm_pl_tt = TTM_PL_TT;
1881
Nils Wallménius06ab6832016-05-02 12:46:15 -04001882static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001883 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1884 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1885 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1886#ifdef CONFIG_SWIOTLB
1887 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1888#endif
1889};
1890
1891static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1892 size_t size, loff_t *pos)
1893{
Al Viro45063092016-12-04 18:24:56 -05001894 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001895 ssize_t result = 0;
1896 int r;
1897
1898 if (size & 0x3 || *pos & 0x3)
1899 return -EINVAL;
1900
Christian König770d13b2018-01-12 14:52:22 +01001901 if (*pos >= adev->gmc.mc_vram_size)
Tom St Denis9156e722017-05-23 11:35:22 -04001902 return -ENXIO;
1903
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001904 while (size) {
1905 unsigned long flags;
1906 uint32_t value;
1907
Christian König770d13b2018-01-12 14:52:22 +01001908 if (*pos >= adev->gmc.mc_vram_size)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001909 return result;
1910
1911 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
Tom St Denisc30572812017-09-13 12:35:15 -04001912 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1913 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1914 value = RREG32_NO_KIQ(mmMM_DATA);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001915 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1916
1917 r = put_user(value, (uint32_t *)buf);
1918 if (r)
1919 return r;
1920
1921 result += 4;
1922 buf += 4;
1923 *pos += 4;
1924 size -= 4;
1925 }
1926
1927 return result;
1928}
1929
Tom St Denis08cab982017-08-29 08:36:52 -04001930static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1931 size_t size, loff_t *pos)
1932{
1933 struct amdgpu_device *adev = file_inode(f)->i_private;
1934 ssize_t result = 0;
1935 int r;
1936
1937 if (size & 0x3 || *pos & 0x3)
1938 return -EINVAL;
1939
Christian König770d13b2018-01-12 14:52:22 +01001940 if (*pos >= adev->gmc.mc_vram_size)
Tom St Denis08cab982017-08-29 08:36:52 -04001941 return -ENXIO;
1942
1943 while (size) {
1944 unsigned long flags;
1945 uint32_t value;
1946
Christian König770d13b2018-01-12 14:52:22 +01001947 if (*pos >= adev->gmc.mc_vram_size)
Tom St Denis08cab982017-08-29 08:36:52 -04001948 return result;
1949
1950 r = get_user(value, (uint32_t *)buf);
1951 if (r)
1952 return r;
1953
1954 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
Tom St Denisc30572812017-09-13 12:35:15 -04001955 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1956 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1957 WREG32_NO_KIQ(mmMM_DATA, value);
Tom St Denis08cab982017-08-29 08:36:52 -04001958 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1959
1960 result += 4;
1961 buf += 4;
1962 *pos += 4;
1963 size -= 4;
1964 }
1965
1966 return result;
1967}
1968
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001969static const struct file_operations amdgpu_ttm_vram_fops = {
1970 .owner = THIS_MODULE,
1971 .read = amdgpu_ttm_vram_read,
Tom St Denis08cab982017-08-29 08:36:52 -04001972 .write = amdgpu_ttm_vram_write,
1973 .llseek = default_llseek,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001974};
1975
Christian Königa1d29472016-03-30 14:42:57 +02001976#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1977
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001978static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1979 size_t size, loff_t *pos)
1980{
Al Viro45063092016-12-04 18:24:56 -05001981 struct amdgpu_device *adev = file_inode(f)->i_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982 ssize_t result = 0;
1983 int r;
1984
1985 while (size) {
1986 loff_t p = *pos / PAGE_SIZE;
1987 unsigned off = *pos & ~PAGE_MASK;
1988 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1989 struct page *page;
1990 void *ptr;
1991
1992 if (p >= adev->gart.num_cpu_pages)
1993 return result;
1994
1995 page = adev->gart.pages[p];
1996 if (page) {
1997 ptr = kmap(page);
1998 ptr += off;
1999
2000 r = copy_to_user(buf, ptr, cur_size);
2001 kunmap(adev->gart.pages[p]);
2002 } else
2003 r = clear_user(buf, cur_size);
2004
2005 if (r)
2006 return -EFAULT;
2007
2008 result += cur_size;
2009 buf += cur_size;
2010 *pos += cur_size;
2011 size -= cur_size;
2012 }
2013
2014 return result;
2015}
2016
2017static const struct file_operations amdgpu_ttm_gtt_fops = {
2018 .owner = THIS_MODULE,
2019 .read = amdgpu_ttm_gtt_read,
2020 .llseek = default_llseek
2021};
2022
2023#endif
2024
Tom St Denisebb043f2018-02-23 09:46:23 -05002025static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2026 size_t size, loff_t *pos)
Tom St Denis38290b22017-09-18 07:28:14 -04002027{
2028 struct amdgpu_device *adev = file_inode(f)->i_private;
Tom St Denis38290b22017-09-18 07:28:14 -04002029 struct iommu_domain *dom;
Tom St Denisebb043f2018-02-23 09:46:23 -05002030 ssize_t result = 0;
2031 int r;
Tom St Denis38290b22017-09-18 07:28:14 -04002032
2033 dom = iommu_get_domain_for_dev(adev->dev);
Tom St Denis10cfafd2017-09-19 11:29:04 -04002034
Tom St Denisebb043f2018-02-23 09:46:23 -05002035 while (size) {
2036 phys_addr_t addr = *pos & PAGE_MASK;
2037 loff_t off = *pos & ~PAGE_MASK;
2038 size_t bytes = PAGE_SIZE - off;
2039 unsigned long pfn;
2040 struct page *p;
2041 void *ptr;
Tom St Denis38290b22017-09-18 07:28:14 -04002042
Tom St Denisebb043f2018-02-23 09:46:23 -05002043 bytes = bytes < size ? bytes : size;
2044
2045 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2046
2047 pfn = addr >> PAGE_SHIFT;
2048 if (!pfn_valid(pfn))
2049 return -EPERM;
2050
2051 p = pfn_to_page(pfn);
2052 if (p->mapping != adev->mman.bdev.dev_mapping)
2053 return -EPERM;
2054
2055 ptr = kmap(p);
Tom St Denis864917a2018-03-20 09:13:08 -04002056 r = copy_to_user(buf, ptr + off, bytes);
Tom St Denisebb043f2018-02-23 09:46:23 -05002057 kunmap(p);
2058 if (r)
2059 return -EFAULT;
2060
2061 size -= bytes;
2062 *pos += bytes;
2063 result += bytes;
2064 }
2065
2066 return result;
Tom St Denis38290b22017-09-18 07:28:14 -04002067}
2068
Tom St Denisebb043f2018-02-23 09:46:23 -05002069static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2070 size_t size, loff_t *pos)
2071{
2072 struct amdgpu_device *adev = file_inode(f)->i_private;
2073 struct iommu_domain *dom;
2074 ssize_t result = 0;
2075 int r;
2076
2077 dom = iommu_get_domain_for_dev(adev->dev);
2078
2079 while (size) {
2080 phys_addr_t addr = *pos & PAGE_MASK;
2081 loff_t off = *pos & ~PAGE_MASK;
2082 size_t bytes = PAGE_SIZE - off;
2083 unsigned long pfn;
2084 struct page *p;
2085 void *ptr;
2086
2087 bytes = bytes < size ? bytes : size;
2088
2089 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2090
2091 pfn = addr >> PAGE_SHIFT;
2092 if (!pfn_valid(pfn))
2093 return -EPERM;
2094
2095 p = pfn_to_page(pfn);
2096 if (p->mapping != adev->mman.bdev.dev_mapping)
2097 return -EPERM;
2098
2099 ptr = kmap(p);
Tom St Denis864917a2018-03-20 09:13:08 -04002100 r = copy_from_user(ptr + off, buf, bytes);
Tom St Denisebb043f2018-02-23 09:46:23 -05002101 kunmap(p);
2102 if (r)
2103 return -EFAULT;
2104
2105 size -= bytes;
2106 *pos += bytes;
2107 result += bytes;
2108 }
2109
2110 return result;
2111}
2112
2113static const struct file_operations amdgpu_ttm_iomem_fops = {
Tom St Denis38290b22017-09-18 07:28:14 -04002114 .owner = THIS_MODULE,
Tom St Denisebb043f2018-02-23 09:46:23 -05002115 .read = amdgpu_iomem_read,
2116 .write = amdgpu_iomem_write,
Tom St Denis38290b22017-09-18 07:28:14 -04002117 .llseek = default_llseek
2118};
Tom St Denisa40cfa02017-09-18 07:14:56 -04002119
2120static const struct {
2121 char *name;
2122 const struct file_operations *fops;
2123 int domain;
2124} ttm_debugfs_entries[] = {
2125 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2126#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2127 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2128#endif
Tom St Denisebb043f2018-02-23 09:46:23 -05002129 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
Tom St Denisa40cfa02017-09-18 07:14:56 -04002130};
2131
Christian Königa1d29472016-03-30 14:42:57 +02002132#endif
2133
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002134static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2135{
2136#if defined(CONFIG_DEBUG_FS)
2137 unsigned count;
2138
2139 struct drm_minor *minor = adev->ddev->primary;
2140 struct dentry *ent, *root = minor->debugfs_root;
2141
Tom St Denisa40cfa02017-09-18 07:14:56 -04002142 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2143 ent = debugfs_create_file(
2144 ttm_debugfs_entries[count].name,
2145 S_IFREG | S_IRUGO, root,
2146 adev,
2147 ttm_debugfs_entries[count].fops);
2148 if (IS_ERR(ent))
2149 return PTR_ERR(ent);
2150 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
Christian König770d13b2018-01-12 14:52:22 +01002151 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
Tom St Denisa40cfa02017-09-18 07:14:56 -04002152 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
Christian König770d13b2018-01-12 14:52:22 +01002153 i_size_write(ent->d_inode, adev->gmc.gart_size);
Tom St Denisa40cfa02017-09-18 07:14:56 -04002154 adev->mman.debugfs_entries[count] = ent;
2155 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002156
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002157 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2158
2159#ifdef CONFIG_SWIOTLB
Chunming Zhoufd5fd482018-02-09 10:44:09 +08002160 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002161 --count;
2162#endif
2163
2164 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2165#else
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002166 return 0;
2167#endif
2168}
2169
2170static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2171{
2172#if defined(CONFIG_DEBUG_FS)
Tom St Denisa40cfa02017-09-18 07:14:56 -04002173 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002174
Tom St Denisa40cfa02017-09-18 07:14:56 -04002175 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2176 debugfs_remove(adev->mman.debugfs_entries[i]);
Christian Königa1d29472016-03-30 14:42:57 +02002177#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002178}