blob: 16f31cbd91473a8d2039ce19c153b8f7f6458ac0 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
Oded Gabbaya187f172016-01-30 07:59:34 +020036#include <drm/drm_cache.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040037#include "amdgpu.h"
38#include "amdgpu_trace.h"
39
40
Alex Deucherd38ceaf2015-04-20 16:55:21 -040041
42static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
Chunming Zhou7e5a5472015-04-24 17:37:30 +080043 struct ttm_mem_reg *mem)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040044{
Christian König6681c5e2016-08-12 16:50:12 +020045 if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
46 return 0;
47
48 return ((mem->start << PAGE_SHIFT) + mem->size) >
49 adev->mc.visible_vram_size ?
50 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51 mem->size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052}
53
54static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
55 struct ttm_mem_reg *old_mem,
56 struct ttm_mem_reg *new_mem)
57{
58 u64 vis_size;
59 if (!adev)
60 return;
61
62 if (new_mem) {
63 switch (new_mem->mem_type) {
64 case TTM_PL_TT:
65 atomic64_add(new_mem->size, &adev->gtt_usage);
66 break;
67 case TTM_PL_VRAM:
68 atomic64_add(new_mem->size, &adev->vram_usage);
69 vis_size = amdgpu_get_vis_part_size(adev, new_mem);
70 atomic64_add(vis_size, &adev->vram_vis_usage);
71 break;
72 }
73 }
74
75 if (old_mem) {
76 switch (old_mem->mem_type) {
77 case TTM_PL_TT:
78 atomic64_sub(old_mem->size, &adev->gtt_usage);
79 break;
80 case TTM_PL_VRAM:
81 atomic64_sub(old_mem->size, &adev->vram_usage);
82 vis_size = amdgpu_get_vis_part_size(adev, old_mem);
83 atomic64_sub(vis_size, &adev->vram_vis_usage);
84 break;
85 }
86 }
87}
88
89static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90{
Christian Königa7d64de2016-09-15 14:58:48 +020091 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040092 struct amdgpu_bo *bo;
93
94 bo = container_of(tbo, struct amdgpu_bo, tbo);
95
Christian König6375bbb2017-07-11 17:25:49 +020096 amdgpu_bo_kunmap(bo);
Christian Königa7d64de2016-09-15 14:58:48 +020097 amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099 drm_gem_object_release(&bo->gem_base);
Christian König82b9c552015-11-27 16:49:00 +0100100 amdgpu_bo_unref(&bo->parent);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800101 if (!list_empty(&bo->shadow_list)) {
Christian Königa7d64de2016-09-15 14:58:48 +0200102 mutex_lock(&adev->shadow_list_lock);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800103 list_del_init(&bo->shadow_list);
Christian Königa7d64de2016-09-15 14:58:48 +0200104 mutex_unlock(&adev->shadow_list_lock);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800105 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 kfree(bo->metadata);
107 kfree(bo);
108}
109
110bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
111{
112 if (bo->destroy == &amdgpu_ttm_bo_destroy)
113 return true;
114 return false;
115}
116
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800117static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
118 struct ttm_placement *placement,
Christian Königfaceaf62016-08-15 14:06:50 +0200119 struct ttm_place *places,
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800120 u32 domain, u64 flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121{
Christian König6369f6f2016-08-15 14:08:54 +0200122 u32 c = 0;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800123
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400124 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
Christian Königfaceaf62016-08-15 14:06:50 +0200125 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
126
Christian Königfaceaf62016-08-15 14:06:50 +0200127 places[c].fpfn = 0;
Christian König89bb5752017-03-29 13:41:57 +0200128 places[c].lpfn = 0;
Christian Königfaceaf62016-08-15 14:06:50 +0200129 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800130 TTM_PL_FLAG_VRAM;
Christian König89bb5752017-03-29 13:41:57 +0200131
Christian Königfaceaf62016-08-15 14:06:50 +0200132 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
133 places[c].lpfn = visible_pfn;
134 else
135 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
Christian König89bb5752017-03-29 13:41:57 +0200136
137 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
138 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
Christian Königfaceaf62016-08-15 14:06:50 +0200139 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140 }
141
142 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
Christian Königfaceaf62016-08-15 14:06:50 +0200143 places[c].fpfn = 0;
144 places[c].lpfn = 0;
145 places[c].flags = TTM_PL_FLAG_TT;
146 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
147 places[c].flags |= TTM_PL_FLAG_WC |
148 TTM_PL_FLAG_UNCACHED;
149 else
150 places[c].flags |= TTM_PL_FLAG_CACHED;
151 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152 }
153
154 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
Christian Königfaceaf62016-08-15 14:06:50 +0200155 places[c].fpfn = 0;
156 places[c].lpfn = 0;
157 places[c].flags = TTM_PL_FLAG_SYSTEM;
158 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
159 places[c].flags |= TTM_PL_FLAG_WC |
160 TTM_PL_FLAG_UNCACHED;
161 else
162 places[c].flags |= TTM_PL_FLAG_CACHED;
163 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164 }
165
166 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
Christian Königfaceaf62016-08-15 14:06:50 +0200167 places[c].fpfn = 0;
168 places[c].lpfn = 0;
169 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
170 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400171 }
Christian Königfaceaf62016-08-15 14:06:50 +0200172
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400173 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
Christian Königfaceaf62016-08-15 14:06:50 +0200174 places[c].fpfn = 0;
175 places[c].lpfn = 0;
176 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
177 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400178 }
Christian Königfaceaf62016-08-15 14:06:50 +0200179
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400180 if (domain & AMDGPU_GEM_DOMAIN_OA) {
Christian Königfaceaf62016-08-15 14:06:50 +0200181 places[c].fpfn = 0;
182 places[c].lpfn = 0;
183 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
184 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185 }
186
187 if (!c) {
Christian Königfaceaf62016-08-15 14:06:50 +0200188 places[c].fpfn = 0;
189 places[c].lpfn = 0;
190 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
191 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400192 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193
Christian Königfaceaf62016-08-15 14:06:50 +0200194 placement->num_placement = c;
195 placement->placement = places;
196
197 placement->num_busy_placement = c;
198 placement->busy_placement = places;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199}
200
Christian König765e7fb2016-09-15 15:06:50 +0200201void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800202{
Christian Königa7d64de2016-09-15 14:58:48 +0200203 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
204
205 amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
206 domain, abo->flags);
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800207}
208
209static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
210 struct ttm_placement *placement)
211{
212 BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
213
214 memcpy(bo->placements, placement->placement,
215 placement->num_placement * sizeof(struct ttm_place));
216 bo->placement.num_placement = placement->num_placement;
217 bo->placement.num_busy_placement = placement->num_busy_placement;
218 bo->placement.placement = bo->placements;
219 bo->placement.busy_placement = bo->placements;
220}
221
Christian König7c204882015-12-14 13:18:01 +0100222/**
Christian König9d903cb2017-07-27 17:08:54 +0200223 * amdgpu_bo_create_reserved - create reserved BO for kernel use
Christian König7c204882015-12-14 13:18:01 +0100224 *
225 * @adev: amdgpu device object
226 * @size: size for the new BO
227 * @align: alignment for the new BO
228 * @domain: where to place it
229 * @bo_ptr: resulting BO
230 * @gpu_addr: GPU addr of the pinned BO
231 * @cpu_addr: optional CPU address mapping
232 *
Christian König9d903cb2017-07-27 17:08:54 +0200233 * Allocates and pins a BO for kernel internal use, and returns it still
234 * reserved.
Christian König7c204882015-12-14 13:18:01 +0100235 *
236 * Returns 0 on success, negative error code otherwise.
237 */
Christian König9d903cb2017-07-27 17:08:54 +0200238int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
239 unsigned long size, int align,
240 u32 domain, struct amdgpu_bo **bo_ptr,
241 u64 *gpu_addr, void **cpu_addr)
Christian König7c204882015-12-14 13:18:01 +0100242{
Christian König53766e52017-07-27 14:52:53 +0200243 bool free = false;
Christian König7c204882015-12-14 13:18:01 +0100244 int r;
245
Christian König53766e52017-07-27 14:52:53 +0200246 if (!*bo_ptr) {
247 r = amdgpu_bo_create(adev, size, align, true, domain,
248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
249 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Yong Zhao2046d462017-07-20 18:49:09 -0400250 NULL, NULL, 0, bo_ptr);
Christian König53766e52017-07-27 14:52:53 +0200251 if (r) {
252 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
253 r);
254 return r;
255 }
256 free = true;
Christian König7c204882015-12-14 13:18:01 +0100257 }
258
259 r = amdgpu_bo_reserve(*bo_ptr, false);
260 if (r) {
261 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
262 goto error_free;
263 }
264
265 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
266 if (r) {
267 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
268 goto error_unreserve;
269 }
270
271 if (cpu_addr) {
272 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
273 if (r) {
274 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
275 goto error_unreserve;
276 }
277 }
278
Christian König7c204882015-12-14 13:18:01 +0100279 return 0;
280
281error_unreserve:
282 amdgpu_bo_unreserve(*bo_ptr);
283
284error_free:
Christian König53766e52017-07-27 14:52:53 +0200285 if (free)
286 amdgpu_bo_unref(bo_ptr);
Christian König7c204882015-12-14 13:18:01 +0100287
288 return r;
289}
290
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800291/**
Christian König9d903cb2017-07-27 17:08:54 +0200292 * amdgpu_bo_create_kernel - create BO for kernel use
293 *
294 * @adev: amdgpu device object
295 * @size: size for the new BO
296 * @align: alignment for the new BO
297 * @domain: where to place it
298 * @bo_ptr: resulting BO
299 * @gpu_addr: GPU addr of the pinned BO
300 * @cpu_addr: optional CPU address mapping
301 *
302 * Allocates and pins a BO for kernel internal use.
303 *
304 * Returns 0 on success, negative error code otherwise.
305 */
306int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
307 unsigned long size, int align,
308 u32 domain, struct amdgpu_bo **bo_ptr,
309 u64 *gpu_addr, void **cpu_addr)
310{
311 int r;
312
313 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
314 gpu_addr, cpu_addr);
315
316 if (r)
317 return r;
318
319 amdgpu_bo_unreserve(*bo_ptr);
320
321 return 0;
322}
323
324/**
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800325 * amdgpu_bo_free_kernel - free BO for kernel use
326 *
327 * @bo: amdgpu BO to free
328 *
329 * unmaps and unpin a BO for kernel internal use.
330 */
331void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
332 void **cpu_addr)
333{
334 if (*bo == NULL)
335 return;
336
Alex Xief3aa7452017-04-24 14:27:00 -0400337 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800338 if (cpu_addr)
339 amdgpu_bo_kunmap(*bo);
340
341 amdgpu_bo_unpin(*bo);
342 amdgpu_bo_unreserve(*bo);
343 }
344 amdgpu_bo_unref(bo);
345
346 if (gpu_addr)
347 *gpu_addr = 0;
348
349 if (cpu_addr)
350 *cpu_addr = NULL;
351}
352
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800353int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
354 unsigned long size, int byte_align,
355 bool kernel, u32 domain, u64 flags,
356 struct sg_table *sg,
357 struct ttm_placement *placement,
Christian König72d76682015-09-03 17:34:59 +0200358 struct reservation_object *resv,
Yong Zhao2046d462017-07-20 18:49:09 -0400359 uint64_t init_value,
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800360 struct amdgpu_bo **bo_ptr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400361{
362 struct amdgpu_bo *bo;
363 enum ttm_bo_type type;
364 unsigned long page_align;
John Brooks00f06b22017-06-27 22:33:18 -0400365 u64 initial_bytes_moved, bytes_moved;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400366 size_t acc_size;
367 int r;
368
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400369 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
370 size = ALIGN(size, PAGE_SIZE);
371
372 if (kernel) {
373 type = ttm_bo_type_kernel;
374 } else if (sg) {
375 type = ttm_bo_type_sg;
376 } else {
377 type = ttm_bo_type_device;
378 }
379 *bo_ptr = NULL;
380
381 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
382 sizeof(struct amdgpu_bo));
383
384 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
385 if (bo == NULL)
386 return -ENOMEM;
387 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
388 if (unlikely(r)) {
389 kfree(bo);
390 return r;
391 }
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800392 INIT_LIST_HEAD(&bo->shadow_list);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400393 INIT_LIST_HEAD(&bo->va);
Christian König1ea863f2015-12-18 22:13:12 +0100394 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
395 AMDGPU_GEM_DOMAIN_GTT |
396 AMDGPU_GEM_DOMAIN_CPU |
397 AMDGPU_GEM_DOMAIN_GDS |
398 AMDGPU_GEM_DOMAIN_GWS |
399 AMDGPU_GEM_DOMAIN_OA);
400 bo->allowed_domains = bo->prefered_domains;
401 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
402 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400403
404 bo->flags = flags;
Oded Gabbaya187f172016-01-30 07:59:34 +0200405
Nils Hollanda2e2f292017-01-22 20:15:27 +0100406#ifdef CONFIG_X86_32
407 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
408 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
409 */
410 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
411#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
412 /* Don't try to enable write-combining when it can't work, or things
413 * may be slow
414 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
415 */
416
Arnd Bergmann31bb90f2017-02-01 16:59:21 +0100417#ifndef CONFIG_COMPILE_TEST
Nils Hollanda2e2f292017-01-22 20:15:27 +0100418#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
419 thanks to write-combining
Arnd Bergmann31bb90f2017-02-01 16:59:21 +0100420#endif
Nils Hollanda2e2f292017-01-22 20:15:27 +0100421
422 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
423 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
424 "better performance thanks to write-combining\n");
425 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
426#else
Oded Gabbaya187f172016-01-30 07:59:34 +0200427 /* For architectures that don't support WC memory,
428 * mask out the WC flag from the BO
429 */
430 if (!drm_arch_can_wc_memory())
431 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
Nils Hollanda2e2f292017-01-22 20:15:27 +0100432#endif
Oded Gabbaya187f172016-01-30 07:59:34 +0200433
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800434 amdgpu_fill_placement_to_bo(bo, placement);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435 /* Kernel allocation are uninterruptible */
Christian Königf45dc742016-11-17 12:24:48 +0100436
Samuel Pitoisetfad06122017-02-09 11:33:37 +0100437 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
Nicolai Hähnle59c66c92017-02-16 11:01:44 +0100438 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
439 &bo->placement, page_align, !kernel, NULL,
440 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
John Brooks00f06b22017-06-27 22:33:18 -0400441 bytes_moved = atomic64_read(&adev->num_bytes_moved) -
442 initial_bytes_moved;
443 if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
444 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
445 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
446 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
447 else
448 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
Samuel Pitoisetfad06122017-02-09 11:33:37 +0100449
Nicolai Hähnleb9d022c2017-02-14 09:47:36 +0100450 if (unlikely(r != 0))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 return r;
Flora Cui4fea83f2016-07-20 14:44:38 +0800452
Christian König373308a52017-01-23 16:28:06 -0500453 if (kernel)
Roger.Hec309cd02017-03-27 19:38:11 +0800454 bo->tbo.priority = 1;
Christian Könige1f055b2017-01-10 17:27:49 +0100455
Flora Cui4fea83f2016-07-20 14:44:38 +0800456 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
457 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100458 struct dma_fence *fence;
Flora Cui4fea83f2016-07-20 14:44:38 +0800459
Yong Zhao2046d462017-07-20 18:49:09 -0400460 r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
Christian Königc3af12582016-11-17 12:16:34 +0100461 if (unlikely(r))
462 goto fail_unreserve;
463
Flora Cui4fea83f2016-07-20 14:44:38 +0800464 amdgpu_bo_fence(bo, fence, false);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100465 dma_fence_put(bo->tbo.moving);
466 bo->tbo.moving = dma_fence_get(fence);
467 dma_fence_put(fence);
Flora Cui4fea83f2016-07-20 14:44:38 +0800468 }
Christian Königf45dc742016-11-17 12:24:48 +0100469 if (!resv)
Nicolai Hähnle59c66c92017-02-16 11:01:44 +0100470 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400471 *bo_ptr = bo;
472
473 trace_amdgpu_bo_create(bo);
474
John Brooks96cf8272017-06-30 11:31:08 -0400475 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
476 if (type == ttm_bo_type_device)
477 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
478
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400479 return 0;
Flora Cui4fea83f2016-07-20 14:44:38 +0800480
481fail_unreserve:
Nicolai Hähnlef1543f52017-01-10 20:36:56 +0100482 if (!resv)
483 ww_mutex_unlock(&bo->tbo.resv->lock);
Flora Cui4fea83f2016-07-20 14:44:38 +0800484 amdgpu_bo_unref(&bo);
485 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400486}
487
Chunming Zhoue7893c42016-07-26 14:13:21 +0800488static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
489 unsigned long size, int byte_align,
490 struct amdgpu_bo *bo)
491{
492 struct ttm_placement placement = {0};
493 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
494 int r;
495
496 if (bo->shadow)
497 return 0;
498
499 bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
500 memset(&placements, 0,
501 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
502
503 amdgpu_ttm_placement_init(adev, &placement,
504 placements, AMDGPU_GEM_DOMAIN_GTT,
505 AMDGPU_GEM_CREATE_CPU_GTT_USWC);
506
507 r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
508 AMDGPU_GEM_DOMAIN_GTT,
509 AMDGPU_GEM_CREATE_CPU_GTT_USWC,
510 NULL, &placement,
511 bo->tbo.resv,
Yong Zhao2046d462017-07-20 18:49:09 -0400512 0,
Chunming Zhoue7893c42016-07-26 14:13:21 +0800513 &bo->shadow);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800514 if (!r) {
Chunming Zhoue7893c42016-07-26 14:13:21 +0800515 bo->shadow->parent = amdgpu_bo_ref(bo);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800516 mutex_lock(&adev->shadow_list_lock);
517 list_add_tail(&bo->shadow_list, &adev->shadow_list);
518 mutex_unlock(&adev->shadow_list_lock);
519 }
Chunming Zhoue7893c42016-07-26 14:13:21 +0800520
521 return r;
522}
523
Yong Zhao2046d462017-07-20 18:49:09 -0400524/* init_value will only take effect when flags contains
525 * AMDGPU_GEM_CREATE_VRAM_CLEARED.
526 */
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800527int amdgpu_bo_create(struct amdgpu_device *adev,
528 unsigned long size, int byte_align,
529 bool kernel, u32 domain, u64 flags,
Christian König72d76682015-09-03 17:34:59 +0200530 struct sg_table *sg,
531 struct reservation_object *resv,
Yong Zhao2046d462017-07-20 18:49:09 -0400532 uint64_t init_value,
Christian König72d76682015-09-03 17:34:59 +0200533 struct amdgpu_bo **bo_ptr)
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800534{
535 struct ttm_placement placement = {0};
536 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
Chunming Zhoue7893c42016-07-26 14:13:21 +0800537 int r;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800538
539 memset(&placements, 0,
540 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
541
542 amdgpu_ttm_placement_init(adev, &placement,
543 placements, domain, flags);
544
Chunming Zhoue7893c42016-07-26 14:13:21 +0800545 r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
546 domain, flags, sg, &placement,
Yong Zhao2046d462017-07-20 18:49:09 -0400547 resv, init_value, bo_ptr);
Chunming Zhoue7893c42016-07-26 14:13:21 +0800548 if (r)
549 return r;
550
Chunming Zhou3ad81f12016-08-05 17:30:17 +0800551 if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
Nicolai Hähnle36ea83d2017-01-10 19:06:00 +0100552 if (!resv) {
553 r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
554 WARN_ON(r != 0);
555 }
556
Chunming Zhoue7893c42016-07-26 14:13:21 +0800557 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
Nicolai Hähnle36ea83d2017-01-10 19:06:00 +0100558
559 if (!resv)
560 ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
561
Chunming Zhoue7893c42016-07-26 14:13:21 +0800562 if (r)
563 amdgpu_bo_unref(bo_ptr);
564 }
565
566 return r;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800567}
568
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800569int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
570 struct amdgpu_ring *ring,
571 struct amdgpu_bo *bo,
572 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100573 struct dma_fence **fence,
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800574 bool direct)
575
576{
577 struct amdgpu_bo *shadow = bo->shadow;
578 uint64_t bo_addr, shadow_addr;
579 int r;
580
581 if (!shadow)
582 return -EINVAL;
583
584 bo_addr = amdgpu_bo_gpu_offset(bo);
585 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
586
587 r = reservation_object_reserve_shared(bo->tbo.resv);
588 if (r)
589 goto err;
590
591 r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
592 amdgpu_bo_size(bo), resv, fence,
Christian Königfc9c8f52017-06-29 11:46:15 +0200593 direct, false);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800594 if (!r)
595 amdgpu_bo_fence(bo, *fence, true);
596
597err:
598 return r;
599}
600
Roger.He82521312017-04-21 13:08:43 +0800601int amdgpu_bo_validate(struct amdgpu_bo *bo)
602{
603 uint32_t domain;
604 int r;
605
606 if (bo->pin_count)
607 return 0;
608
609 domain = bo->prefered_domains;
610
611retry:
612 amdgpu_ttm_placement_from_domain(bo, domain);
613 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
614 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
615 domain = bo->allowed_domains;
616 goto retry;
617 }
618
619 return r;
620}
621
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800622int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
623 struct amdgpu_ring *ring,
624 struct amdgpu_bo *bo,
625 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100626 struct dma_fence **fence,
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800627 bool direct)
628
629{
630 struct amdgpu_bo *shadow = bo->shadow;
631 uint64_t bo_addr, shadow_addr;
632 int r;
633
634 if (!shadow)
635 return -EINVAL;
636
637 bo_addr = amdgpu_bo_gpu_offset(bo);
638 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
639
640 r = reservation_object_reserve_shared(bo->tbo.resv);
641 if (r)
642 goto err;
643
644 r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
645 amdgpu_bo_size(bo), resv, fence,
Christian Königfc9c8f52017-06-29 11:46:15 +0200646 direct, false);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800647 if (!r)
648 amdgpu_bo_fence(bo, *fence, true);
649
650err:
651 return r;
652}
653
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
655{
Christian Königf5e1c742017-07-20 23:45:18 +0200656 void *kptr;
Christian König587f3c72016-03-10 16:21:04 +0100657 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400658
Christian König271c8122015-05-13 14:30:53 +0200659 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
660 return -EPERM;
661
Christian Königf5e1c742017-07-20 23:45:18 +0200662 kptr = amdgpu_bo_kptr(bo);
663 if (kptr) {
664 if (ptr)
665 *ptr = kptr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400666 return 0;
667 }
Christian König587f3c72016-03-10 16:21:04 +0100668
669 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
670 MAX_SCHEDULE_TIMEOUT);
671 if (r < 0)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400672 return r;
Christian König587f3c72016-03-10 16:21:04 +0100673
674 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
675 if (r)
676 return r;
677
Christian König587f3c72016-03-10 16:21:04 +0100678 if (ptr)
Christian Königf5e1c742017-07-20 23:45:18 +0200679 *ptr = amdgpu_bo_kptr(bo);
Christian König587f3c72016-03-10 16:21:04 +0100680
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400681 return 0;
682}
683
Christian Königf5e1c742017-07-20 23:45:18 +0200684void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
685{
686 bool is_iomem;
687
688 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
689}
690
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400691void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
692{
Christian Königf5e1c742017-07-20 23:45:18 +0200693 if (bo->kmap.bo)
694 ttm_bo_kunmap(&bo->kmap);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695}
696
697struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
698{
699 if (bo == NULL)
700 return NULL;
701
702 ttm_bo_reference(&bo->tbo);
703 return bo;
704}
705
706void amdgpu_bo_unref(struct amdgpu_bo **bo)
707{
708 struct ttm_buffer_object *tbo;
709
710 if ((*bo) == NULL)
711 return;
712
713 tbo = &((*bo)->tbo);
714 ttm_bo_unref(&tbo);
715 if (tbo == NULL)
716 *bo = NULL;
717}
718
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800719int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
720 u64 min_offset, u64 max_offset,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400721 u64 *gpu_addr)
722{
Christian Königa7d64de2016-09-15 14:58:48 +0200723 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400724 int r, i;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800725 unsigned fpfn, lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726
Christian Königcc325d12016-02-08 11:08:35 +0100727 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728 return -EPERM;
729
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800730 if (WARN_ON_ONCE(min_offset > max_offset))
731 return -EINVAL;
732
Christopher James Halse Rogers803d89a2017-04-03 13:31:22 +1000733 /* A shared bo cannot be migrated to VRAM */
734 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
735 return -EINVAL;
736
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400737 if (bo->pin_count) {
Flora Cui408778e2016-08-18 12:55:13 +0800738 uint32_t mem_type = bo->tbo.mem.mem_type;
739
740 if (domain != amdgpu_mem_type_to_domain(mem_type))
741 return -EINVAL;
742
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400743 bo->pin_count++;
744 if (gpu_addr)
745 *gpu_addr = amdgpu_bo_gpu_offset(bo);
746
747 if (max_offset != 0) {
Flora Cui27798e02016-08-18 13:18:09 +0800748 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400749 WARN_ON_ONCE(max_offset <
750 (amdgpu_bo_gpu_offset(bo) - domain_start));
751 }
752
753 return 0;
754 }
Christian König03f48dd2016-08-15 17:00:22 +0200755
756 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757 amdgpu_ttm_placement_from_domain(bo, domain);
758 for (i = 0; i < bo->placement.num_placement; i++) {
759 /* force to pin into visible video ram */
760 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800761 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
Christian König6681c5e2016-08-12 16:50:12 +0200762 (!max_offset || max_offset >
Christian Königa7d64de2016-09-15 14:58:48 +0200763 adev->mc.visible_vram_size)) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800764 if (WARN_ON_ONCE(min_offset >
Christian Königa7d64de2016-09-15 14:58:48 +0200765 adev->mc.visible_vram_size))
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800766 return -EINVAL;
767 fpfn = min_offset >> PAGE_SHIFT;
Christian Königa7d64de2016-09-15 14:58:48 +0200768 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800769 } else {
770 fpfn = min_offset >> PAGE_SHIFT;
771 lpfn = max_offset >> PAGE_SHIFT;
772 }
773 if (fpfn > bo->placements[i].fpfn)
774 bo->placements[i].fpfn = fpfn;
Christian König78d0e182016-01-19 12:48:14 +0100775 if (!bo->placements[i].lpfn ||
776 (lpfn && lpfn < bo->placements[i].lpfn))
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800777 bo->placements[i].lpfn = lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400778 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
779 }
780
781 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
Christian König6681c5e2016-08-12 16:50:12 +0200782 if (unlikely(r)) {
Christian Königa7d64de2016-09-15 14:58:48 +0200783 dev_err(adev->dev, "%p pin failed\n", bo);
Christian König6681c5e2016-08-12 16:50:12 +0200784 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400785 }
Christian König6681c5e2016-08-12 16:50:12 +0200786
787 bo->pin_count = 1;
Chunming Zhou07306b42017-07-12 12:36:47 +0800788 if (gpu_addr != NULL) {
789 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
790 if (unlikely(r)) {
791 dev_err(adev->dev, "%p bind failed\n", bo);
792 goto error;
793 }
Christian König6681c5e2016-08-12 16:50:12 +0200794 *gpu_addr = amdgpu_bo_gpu_offset(bo);
Chunming Zhou07306b42017-07-12 12:36:47 +0800795 }
Christian König6681c5e2016-08-12 16:50:12 +0200796 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Christian Königa7d64de2016-09-15 14:58:48 +0200797 adev->vram_pin_size += amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200798 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
Christian Königa7d64de2016-09-15 14:58:48 +0200799 adev->invisible_pin_size += amdgpu_bo_size(bo);
Flora Cui32ab75f2016-08-18 13:17:07 +0800800 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
Christian Königa7d64de2016-09-15 14:58:48 +0200801 adev->gart_pin_size += amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200802 }
803
804error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400805 return r;
806}
807
808int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
809{
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800810 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400811}
812
813int amdgpu_bo_unpin(struct amdgpu_bo *bo)
814{
Christian Königa7d64de2016-09-15 14:58:48 +0200815 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400816 int r, i;
817
818 if (!bo->pin_count) {
Christian Königa7d64de2016-09-15 14:58:48 +0200819 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400820 return 0;
821 }
822 bo->pin_count--;
823 if (bo->pin_count)
824 return 0;
825 for (i = 0; i < bo->placement.num_placement; i++) {
826 bo->placements[i].lpfn = 0;
827 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
828 }
829 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
Christian König6681c5e2016-08-12 16:50:12 +0200830 if (unlikely(r)) {
Christian Königa7d64de2016-09-15 14:58:48 +0200831 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
Christian König6681c5e2016-08-12 16:50:12 +0200832 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400833 }
Christian König6681c5e2016-08-12 16:50:12 +0200834
835 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
Christian Königa7d64de2016-09-15 14:58:48 +0200836 adev->vram_pin_size -= amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200837 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
Christian Königa7d64de2016-09-15 14:58:48 +0200838 adev->invisible_pin_size -= amdgpu_bo_size(bo);
Flora Cui441f90e2016-09-09 14:15:30 +0800839 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
Christian Königa7d64de2016-09-15 14:58:48 +0200840 adev->gart_pin_size -= amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200841 }
842
843error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400844 return r;
845}
846
847int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
848{
849 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
Jammy Zhou2f7d10b2015-07-22 11:29:01 +0800850 if (0 && (adev->flags & AMD_IS_APU)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400851 /* Useless to evict on IGP chips */
852 return 0;
853 }
854 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
855}
856
Alex Deucher1f8628c2016-03-31 16:56:22 -0400857static const char *amdgpu_vram_names[] = {
858 "UNKNOWN",
859 "GDDR1",
860 "DDR2",
861 "GDDR3",
862 "GDDR4",
863 "GDDR5",
864 "HBM",
865 "DDR3"
866};
867
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400868int amdgpu_bo_init(struct amdgpu_device *adev)
869{
Dave Airlie7cf321d2016-10-24 15:37:48 +1000870 /* reserve PAT memory space to WC for VRAM */
871 arch_io_reserve_memtype_wc(adev->mc.aper_base,
872 adev->mc.aper_size);
873
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400874 /* Add an MTRR for the VRAM */
875 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
876 adev->mc.aper_size);
877 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
878 adev->mc.mc_vram_size >> 20,
879 (unsigned long long)adev->mc.aper_size >> 20);
Alex Deucher1f8628c2016-03-31 16:56:22 -0400880 DRM_INFO("RAM width %dbits %s\n",
881 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400882 return amdgpu_ttm_init(adev);
883}
884
885void amdgpu_bo_fini(struct amdgpu_device *adev)
886{
887 amdgpu_ttm_fini(adev);
888 arch_phys_wc_del(adev->mc.vram_mtrr);
Dave Airlie7cf321d2016-10-24 15:37:48 +1000889 arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400890}
891
892int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
893 struct vm_area_struct *vma)
894{
895 return ttm_fbdev_mmap(vma, &bo->tbo);
896}
897
898int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
899{
Marek Olšák9079ac72017-03-03 16:03:15 -0500900 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
901
902 if (adev->family <= AMDGPU_FAMILY_CZ &&
903 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400904 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400905
906 bo->tiling_flags = tiling_flags;
907 return 0;
908}
909
910void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
911{
912 lockdep_assert_held(&bo->tbo.resv->lock.base);
913
914 if (tiling_flags)
915 *tiling_flags = bo->tiling_flags;
916}
917
918int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
919 uint32_t metadata_size, uint64_t flags)
920{
921 void *buffer;
922
923 if (!metadata_size) {
924 if (bo->metadata_size) {
925 kfree(bo->metadata);
Dave Airlie0092d3e2016-05-03 12:44:29 +1000926 bo->metadata = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400927 bo->metadata_size = 0;
928 }
929 return 0;
930 }
931
932 if (metadata == NULL)
933 return -EINVAL;
934
Andrzej Hajda71affda2015-09-21 17:34:39 -0400935 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400936 if (buffer == NULL)
937 return -ENOMEM;
938
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400939 kfree(bo->metadata);
940 bo->metadata_flags = flags;
941 bo->metadata = buffer;
942 bo->metadata_size = metadata_size;
943
944 return 0;
945}
946
947int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
948 size_t buffer_size, uint32_t *metadata_size,
949 uint64_t *flags)
950{
951 if (!buffer && !metadata_size)
952 return -EINVAL;
953
954 if (buffer) {
955 if (buffer_size < bo->metadata_size)
956 return -EINVAL;
957
958 if (bo->metadata_size)
959 memcpy(buffer, bo->metadata, bo->metadata_size);
960 }
961
962 if (metadata_size)
963 *metadata_size = bo->metadata_size;
964 if (flags)
965 *flags = bo->metadata_flags;
966
967 return 0;
968}
969
970void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100971 bool evict,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400972 struct ttm_mem_reg *new_mem)
973{
Christian Königa7d64de2016-09-15 14:58:48 +0200974 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian König765e7fb2016-09-15 15:06:50 +0200975 struct amdgpu_bo *abo;
David Mao15da3012016-06-07 17:48:52 +0800976 struct ttm_mem_reg *old_mem = &bo->mem;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400977
978 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
979 return;
980
Christian König765e7fb2016-09-15 15:06:50 +0200981 abo = container_of(bo, struct amdgpu_bo, tbo);
Christian Königa7d64de2016-09-15 14:58:48 +0200982 amdgpu_vm_bo_invalidate(adev, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983
Christian König6375bbb2017-07-11 17:25:49 +0200984 amdgpu_bo_kunmap(abo);
985
Nicolai Hähnle661a7602016-12-15 17:26:42 +0100986 /* remember the eviction */
987 if (evict)
988 atomic64_inc(&adev->num_evictions);
989
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400990 /* update statistics */
991 if (!new_mem)
992 return;
993
994 /* move_notify is called before move happens */
Christian Königa7d64de2016-09-15 14:58:48 +0200995 amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
David Mao15da3012016-06-07 17:48:52 +0800996
Christian König765e7fb2016-09-15 15:06:50 +0200997 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400998}
999
1000int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1001{
Christian Königa7d64de2016-09-15 14:58:48 +02001002 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian König5fb19412015-05-21 17:03:46 +02001003 struct amdgpu_bo *abo;
John Brooks96cf8272017-06-30 11:31:08 -04001004 unsigned long offset, size;
1005 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001006
1007 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
1008 return 0;
Christian König5fb19412015-05-21 17:03:46 +02001009
1010 abo = container_of(bo, struct amdgpu_bo, tbo);
John Brooks96cf8272017-06-30 11:31:08 -04001011
1012 /* Remember that this BO was accessed by the CPU */
1013 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1014
Christian König5fb19412015-05-21 17:03:46 +02001015 if (bo->mem.mem_type != TTM_PL_VRAM)
1016 return 0;
1017
1018 size = bo->mem.num_pages << PAGE_SHIFT;
1019 offset = bo->mem.start << PAGE_SHIFT;
Christian König9bbdcc02017-03-29 11:16:05 +02001020 if ((offset + size) <= adev->mc.visible_vram_size)
Christian König5fb19412015-05-21 17:03:46 +02001021 return 0;
1022
Michel Dänzer104ece92016-03-28 12:53:02 +09001023 /* Can't move a pinned BO to visible VRAM */
1024 if (abo->pin_count > 0)
1025 return -EINVAL;
1026
Christian König5fb19412015-05-21 17:03:46 +02001027 /* hurrah the memory is not visible ! */
Marek Olšák68e2c5f2017-05-17 20:05:08 +02001028 atomic64_inc(&adev->num_vram_cpu_page_faults);
John Brooks41d9a6a2017-06-27 22:33:21 -04001029 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1030 AMDGPU_GEM_DOMAIN_GTT);
1031
1032 /* Avoid costly evictions; only set GTT as a busy placement */
1033 abo->placement.num_busy_placement = 1;
1034 abo->placement.busy_placement = &abo->placements[1];
1035
Christian König5fb19412015-05-21 17:03:46 +02001036 r = ttm_bo_validate(bo, &abo->placement, false, false);
John Brooks41d9a6a2017-06-27 22:33:21 -04001037 if (unlikely(r != 0))
Christian König5fb19412015-05-21 17:03:46 +02001038 return r;
Christian König5fb19412015-05-21 17:03:46 +02001039
1040 offset = bo->mem.start << PAGE_SHIFT;
1041 /* this should never happen */
John Brooks41d9a6a2017-06-27 22:33:21 -04001042 if (bo->mem.mem_type == TTM_PL_VRAM &&
1043 (offset + size) > adev->mc.visible_vram_size)
Christian König5fb19412015-05-21 17:03:46 +02001044 return -EINVAL;
1045
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001046 return 0;
1047}
1048
1049/**
1050 * amdgpu_bo_fence - add fence to buffer object
1051 *
1052 * @bo: buffer object in question
1053 * @fence: fence to add
1054 * @shared: true if fence should be added shared
1055 *
1056 */
Chris Wilsonf54d1862016-10-25 13:00:45 +01001057void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001058 bool shared)
1059{
1060 struct reservation_object *resv = bo->tbo.resv;
1061
1062 if (shared)
Chunming Zhoue40a3112015-08-03 11:38:09 +08001063 reservation_object_add_shared_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001064 else
Chunming Zhoue40a3112015-08-03 11:38:09 +08001065 reservation_object_add_excl_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001066}
Christian Königcdb7e8f2016-07-25 17:56:18 +02001067
1068/**
1069 * amdgpu_bo_gpu_offset - return GPU offset of bo
1070 * @bo: amdgpu object for which we query the offset
1071 *
1072 * Returns current GPU offset of the object.
1073 *
1074 * Note: object should either be pinned or reserved when calling this
1075 * function, it might be useful to add check for this for debugging.
1076 */
1077u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1078{
1079 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
Christian Königc855e252016-09-05 17:00:57 +02001080 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1081 !amdgpu_ttm_is_bound(bo->tbo.ttm));
Christian Königcdb7e8f2016-07-25 17:56:18 +02001082 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1083 !bo->pin_count);
Christian König9702d402016-09-07 15:10:44 +02001084 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
Christian König03f48dd2016-08-15 17:00:22 +02001085 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1086 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
Christian Königcdb7e8f2016-07-25 17:56:18 +02001087
1088 return bo->tbo.offset;
1089}