blob: cac65e32a0b92f77d6d474f12f931c2d73290fc7 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
Oded Gabbaya187f172016-01-30 07:59:34 +020036#include <drm/drm_cache.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040037#include "amdgpu.h"
38#include "amdgpu_trace.h"
Felix Kuehlinga46a2cd2018-02-06 20:32:38 -050039#include "amdgpu_amdkfd.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040
Alex Deucher6b8f4ee2017-12-15 16:45:02 -050041static bool amdgpu_need_backup(struct amdgpu_device *adev)
42{
43 if (adev->flags & AMD_IS_APU)
44 return false;
45
Christian König4f4b94e2017-12-20 14:21:25 +010046 if (amdgpu_gpu_recovery == 0 ||
47 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
48 return false;
49
50 return true;
Alex Deucher6b8f4ee2017-12-15 16:45:02 -050051}
52
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
54{
Christian Königa7d64de2016-09-15 14:58:48 +020055 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
Andres Rodriguezb82485f2017-09-15 21:05:19 -040056 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Felix Kuehlinga46a2cd2018-02-06 20:32:38 -050058 if (bo->kfd_bo)
59 amdgpu_amdkfd_unreserve_system_memory_limit(bo);
60
Christian König6375bbb2017-07-11 17:25:49 +020061 amdgpu_bo_kunmap(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040062
Christian König7f8fb912018-03-09 14:42:54 +010063 if (bo->gem_base.import_attach)
64 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 drm_gem_object_release(&bo->gem_base);
Christian König82b9c552015-11-27 16:49:00 +010066 amdgpu_bo_unref(&bo->parent);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +080067 if (!list_empty(&bo->shadow_list)) {
Christian Königa7d64de2016-09-15 14:58:48 +020068 mutex_lock(&adev->shadow_list_lock);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +080069 list_del_init(&bo->shadow_list);
Christian Königa7d64de2016-09-15 14:58:48 +020070 mutex_unlock(&adev->shadow_list_lock);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +080071 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -040072 kfree(bo->metadata);
73 kfree(bo);
74}
75
76bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
77{
78 if (bo->destroy == &amdgpu_ttm_bo_destroy)
79 return true;
80 return false;
81}
82
Christian Königc09312a2017-09-12 10:56:17 +020083void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040084{
Christian Königc09312a2017-09-12 10:56:17 +020085 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
86 struct ttm_placement *placement = &abo->placement;
87 struct ttm_place *places = abo->placements;
88 u64 flags = abo->flags;
Christian König6369f6f2016-08-15 14:08:54 +020089 u32 c = 0;
Chunming Zhou7e5a5472015-04-24 17:37:30 +080090
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
Christian König770d13b2018-01-12 14:52:22 +010092 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
Christian Königfaceaf62016-08-15 14:06:50 +020093
Christian Königfaceaf62016-08-15 14:06:50 +020094 places[c].fpfn = 0;
Christian König89bb5752017-03-29 13:41:57 +020095 places[c].lpfn = 0;
Christian Königfaceaf62016-08-15 14:06:50 +020096 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
Chunming Zhou7e5a5472015-04-24 17:37:30 +080097 TTM_PL_FLAG_VRAM;
Christian König89bb5752017-03-29 13:41:57 +020098
Christian Königfaceaf62016-08-15 14:06:50 +020099 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
100 places[c].lpfn = visible_pfn;
101 else
102 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
Christian König89bb5752017-03-29 13:41:57 +0200103
104 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
105 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
Christian Königfaceaf62016-08-15 14:06:50 +0200106 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 }
108
109 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
Christian Königfaceaf62016-08-15 14:06:50 +0200110 places[c].fpfn = 0;
Christian Königcf273a52017-08-18 15:50:17 +0200111 if (flags & AMDGPU_GEM_CREATE_SHADOW)
Christian König770d13b2018-01-12 14:52:22 +0100112 places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
Christian Königcf273a52017-08-18 15:50:17 +0200113 else
114 places[c].lpfn = 0;
Christian Königfaceaf62016-08-15 14:06:50 +0200115 places[c].flags = TTM_PL_FLAG_TT;
116 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
117 places[c].flags |= TTM_PL_FLAG_WC |
118 TTM_PL_FLAG_UNCACHED;
119 else
120 places[c].flags |= TTM_PL_FLAG_CACHED;
121 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122 }
123
124 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
Christian Königfaceaf62016-08-15 14:06:50 +0200125 places[c].fpfn = 0;
126 places[c].lpfn = 0;
127 places[c].flags = TTM_PL_FLAG_SYSTEM;
128 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
129 places[c].flags |= TTM_PL_FLAG_WC |
130 TTM_PL_FLAG_UNCACHED;
131 else
132 places[c].flags |= TTM_PL_FLAG_CACHED;
133 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134 }
135
136 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
Christian Königfaceaf62016-08-15 14:06:50 +0200137 places[c].fpfn = 0;
138 places[c].lpfn = 0;
139 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
140 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400141 }
Christian Königfaceaf62016-08-15 14:06:50 +0200142
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
Christian Königfaceaf62016-08-15 14:06:50 +0200144 places[c].fpfn = 0;
145 places[c].lpfn = 0;
146 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
147 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400148 }
Christian Königfaceaf62016-08-15 14:06:50 +0200149
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400150 if (domain & AMDGPU_GEM_DOMAIN_OA) {
Christian Königfaceaf62016-08-15 14:06:50 +0200151 places[c].fpfn = 0;
152 places[c].lpfn = 0;
153 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
154 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155 }
156
157 if (!c) {
Christian Königfaceaf62016-08-15 14:06:50 +0200158 places[c].fpfn = 0;
159 places[c].lpfn = 0;
160 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
161 c++;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400162 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400163
Christian Königfaceaf62016-08-15 14:06:50 +0200164 placement->num_placement = c;
165 placement->placement = places;
166
167 placement->num_busy_placement = c;
168 placement->busy_placement = places;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169}
170
Christian König7c204882015-12-14 13:18:01 +0100171/**
Christian König9d903cb2017-07-27 17:08:54 +0200172 * amdgpu_bo_create_reserved - create reserved BO for kernel use
Christian König7c204882015-12-14 13:18:01 +0100173 *
174 * @adev: amdgpu device object
175 * @size: size for the new BO
176 * @align: alignment for the new BO
177 * @domain: where to place it
Andrey Grodzovsky64350f12018-03-14 11:45:22 -0400178 * @bo_ptr: used to initialize BOs in structures
Christian König7c204882015-12-14 13:18:01 +0100179 * @gpu_addr: GPU addr of the pinned BO
180 * @cpu_addr: optional CPU address mapping
181 *
Christian König9d903cb2017-07-27 17:08:54 +0200182 * Allocates and pins a BO for kernel internal use, and returns it still
183 * reserved.
Christian König7c204882015-12-14 13:18:01 +0100184 *
Andrey Grodzovsky64350f12018-03-14 11:45:22 -0400185 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
186 *
Christian König7c204882015-12-14 13:18:01 +0100187 * Returns 0 on success, negative error code otherwise.
188 */
Christian König9d903cb2017-07-27 17:08:54 +0200189int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
190 unsigned long size, int align,
191 u32 domain, struct amdgpu_bo **bo_ptr,
192 u64 *gpu_addr, void **cpu_addr)
Christian König7c204882015-12-14 13:18:01 +0100193{
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800194 struct amdgpu_bo_param bp;
Christian König53766e52017-07-27 14:52:53 +0200195 bool free = false;
Christian König7c204882015-12-14 13:18:01 +0100196 int r;
197
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800198 memset(&bp, 0, sizeof(bp));
199 bp.size = size;
200 bp.byte_align = align;
201 bp.domain = domain;
202 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
203 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
204 bp.type = ttm_bo_type_kernel;
205 bp.resv = NULL;
206
Christian König53766e52017-07-27 14:52:53 +0200207 if (!*bo_ptr) {
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800208 r = amdgpu_bo_create(adev, &bp, bo_ptr);
Christian König53766e52017-07-27 14:52:53 +0200209 if (r) {
210 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
211 r);
212 return r;
213 }
214 free = true;
Christian König7c204882015-12-14 13:18:01 +0100215 }
216
217 r = amdgpu_bo_reserve(*bo_ptr, false);
218 if (r) {
219 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
220 goto error_free;
221 }
222
223 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
224 if (r) {
225 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
226 goto error_unreserve;
227 }
228
229 if (cpu_addr) {
230 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
231 if (r) {
232 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
233 goto error_unreserve;
234 }
235 }
236
Christian König7c204882015-12-14 13:18:01 +0100237 return 0;
238
239error_unreserve:
240 amdgpu_bo_unreserve(*bo_ptr);
241
242error_free:
Christian König53766e52017-07-27 14:52:53 +0200243 if (free)
244 amdgpu_bo_unref(bo_ptr);
Christian König7c204882015-12-14 13:18:01 +0100245
246 return r;
247}
248
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800249/**
Christian König9d903cb2017-07-27 17:08:54 +0200250 * amdgpu_bo_create_kernel - create BO for kernel use
251 *
252 * @adev: amdgpu device object
253 * @size: size for the new BO
254 * @align: alignment for the new BO
255 * @domain: where to place it
Andrey Grodzovsky64350f12018-03-14 11:45:22 -0400256 * @bo_ptr: used to initialize BOs in structures
Christian König9d903cb2017-07-27 17:08:54 +0200257 * @gpu_addr: GPU addr of the pinned BO
258 * @cpu_addr: optional CPU address mapping
259 *
260 * Allocates and pins a BO for kernel internal use.
261 *
Andrey Grodzovsky64350f12018-03-14 11:45:22 -0400262 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
263 *
Christian König9d903cb2017-07-27 17:08:54 +0200264 * Returns 0 on success, negative error code otherwise.
265 */
266int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
267 unsigned long size, int align,
268 u32 domain, struct amdgpu_bo **bo_ptr,
269 u64 *gpu_addr, void **cpu_addr)
270{
271 int r;
272
273 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
274 gpu_addr, cpu_addr);
275
276 if (r)
277 return r;
278
279 amdgpu_bo_unreserve(*bo_ptr);
280
281 return 0;
282}
283
284/**
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800285 * amdgpu_bo_free_kernel - free BO for kernel use
286 *
287 * @bo: amdgpu BO to free
288 *
289 * unmaps and unpin a BO for kernel internal use.
290 */
291void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
292 void **cpu_addr)
293{
294 if (*bo == NULL)
295 return;
296
Alex Xief3aa7452017-04-24 14:27:00 -0400297 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800298 if (cpu_addr)
299 amdgpu_bo_kunmap(*bo);
300
301 amdgpu_bo_unpin(*bo);
302 amdgpu_bo_unreserve(*bo);
303 }
304 amdgpu_bo_unref(bo);
305
306 if (gpu_addr)
307 *gpu_addr = 0;
308
309 if (cpu_addr)
310 *cpu_addr = NULL;
311}
312
Andrey Grodzovsky79c63122017-11-10 18:35:56 -0500313/* Validate bo size is bit bigger then the request domain */
314static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
315 unsigned long size, u32 domain)
316{
317 struct ttm_mem_type_manager *man = NULL;
318
319 /*
320 * If GTT is part of requested domains the check must succeed to
321 * allow fall back to GTT
322 */
323 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
324 man = &adev->mman.bdev.man[TTM_PL_TT];
325
326 if (size < (man->size << PAGE_SHIFT))
327 return true;
328 else
329 goto fail;
330 }
331
332 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
333 man = &adev->mman.bdev.man[TTM_PL_VRAM];
334
335 if (size < (man->size << PAGE_SHIFT))
336 return true;
337 else
338 goto fail;
339 }
340
341
342 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
343 return true;
344
345fail:
Michel Dänzer299c7762017-11-15 11:37:23 +0100346 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
347 man->size << PAGE_SHIFT);
Andrey Grodzovsky79c63122017-11-10 18:35:56 -0500348 return false;
349}
350
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800351static int amdgpu_bo_do_create(struct amdgpu_device *adev,
352 struct amdgpu_bo_param *bp,
Christian Königc09312a2017-09-12 10:56:17 +0200353 struct amdgpu_bo **bo_ptr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400354{
Roger He92518592017-12-08 13:31:52 +0800355 struct ttm_operation_ctx ctx = {
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800356 .interruptible = (bp->type != ttm_bo_type_kernel),
Roger He92518592017-12-08 13:31:52 +0800357 .no_wait_gpu = false,
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800358 .resv = bp->resv,
Roger Hed330fca2018-02-06 11:22:57 +0800359 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
Roger He92518592017-12-08 13:31:52 +0800360 };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400361 struct amdgpu_bo *bo;
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800362 unsigned long page_align, size = bp->size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400363 size_t acc_size;
364 int r;
365
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800366 page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400367 size = ALIGN(size, PAGE_SIZE);
368
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800369 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
Andrey Grodzovsky79c63122017-11-10 18:35:56 -0500370 return -ENOMEM;
371
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400372 *bo_ptr = NULL;
373
374 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
375 sizeof(struct amdgpu_bo));
376
377 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
378 if (bo == NULL)
379 return -ENOMEM;
Christian Königc06cc6f2018-02-16 09:52:51 +0100380 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800381 INIT_LIST_HEAD(&bo->shadow_list);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400382 INIT_LIST_HEAD(&bo->va);
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800383 bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM |
384 AMDGPU_GEM_DOMAIN_GTT |
385 AMDGPU_GEM_DOMAIN_CPU |
386 AMDGPU_GEM_DOMAIN_GDS |
387 AMDGPU_GEM_DOMAIN_GWS |
388 AMDGPU_GEM_DOMAIN_OA);
Christian König08082102018-04-10 13:42:38 +0200389 bo->allowed_domains = bo->preferred_domains;
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800390 if (bp->type != ttm_bo_type_kernel &&
Christian König08082102018-04-10 13:42:38 +0200391 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
392 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400393
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800394 bo->flags = bp->flags;
Oded Gabbaya187f172016-01-30 07:59:34 +0200395
Nils Hollanda2e2f292017-01-22 20:15:27 +0100396#ifdef CONFIG_X86_32
397 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
398 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
399 */
400 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
401#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
402 /* Don't try to enable write-combining when it can't work, or things
403 * may be slow
404 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
405 */
406
Arnd Bergmann31bb90f2017-02-01 16:59:21 +0100407#ifndef CONFIG_COMPILE_TEST
Nils Hollanda2e2f292017-01-22 20:15:27 +0100408#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
409 thanks to write-combining
Arnd Bergmann31bb90f2017-02-01 16:59:21 +0100410#endif
Nils Hollanda2e2f292017-01-22 20:15:27 +0100411
412 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
413 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
414 "better performance thanks to write-combining\n");
415 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
416#else
Oded Gabbaya187f172016-01-30 07:59:34 +0200417 /* For architectures that don't support WC memory,
418 * mask out the WC flag from the BO
419 */
420 if (!drm_arch_can_wc_memory())
421 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
Nils Hollanda2e2f292017-01-22 20:15:27 +0100422#endif
Oded Gabbaya187f172016-01-30 07:59:34 +0200423
Christian Königc09312a2017-09-12 10:56:17 +0200424 bo->tbo.bdev = &adev->mman.bdev;
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800425 amdgpu_ttm_placement_from_domain(bo, bp->domain);
Christian König08082102018-04-10 13:42:38 +0200426
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800427 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
Christian König724daa42018-02-22 15:52:31 +0100428 &bo->placement, page_align, &ctx, acc_size,
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800429 NULL, bp->resv, &amdgpu_ttm_bo_destroy);
Christian König08082102018-04-10 13:42:38 +0200430 if (unlikely(r != 0))
Christian Königa695e432017-10-31 09:36:13 +0100431 return r;
432
Christian König770d13b2018-01-12 14:52:22 +0100433 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
John Brooks00f06b22017-06-27 22:33:18 -0400434 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
Christian König770d13b2018-01-12 14:52:22 +0100435 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
Christian König6af046d2017-04-27 18:20:47 +0200436 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
437 ctx.bytes_moved);
John Brooks00f06b22017-06-27 22:33:18 -0400438 else
Christian König6af046d2017-04-27 18:20:47 +0200439 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
Samuel Pitoisetfad06122017-02-09 11:33:37 +0100440
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800441 if (bp->type == ttm_bo_type_kernel)
Roger.Hec309cd02017-03-27 19:38:11 +0800442 bo->tbo.priority = 1;
Christian Könige1f055b2017-01-10 17:27:49 +0100443
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800444 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
Flora Cui4fea83f2016-07-20 14:44:38 +0800445 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100446 struct dma_fence *fence;
Flora Cui4fea83f2016-07-20 14:44:38 +0800447
Christian König8febe612018-01-24 19:55:32 +0100448 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
Christian Königc3af12582016-11-17 12:16:34 +0100449 if (unlikely(r))
450 goto fail_unreserve;
451
Flora Cui4fea83f2016-07-20 14:44:38 +0800452 amdgpu_bo_fence(bo, fence, false);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100453 dma_fence_put(bo->tbo.moving);
454 bo->tbo.moving = dma_fence_get(fence);
455 dma_fence_put(fence);
Flora Cui4fea83f2016-07-20 14:44:38 +0800456 }
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800457 if (!bp->resv)
Nicolai Hähnle59c66c92017-02-16 11:01:44 +0100458 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400459 *bo_ptr = bo;
460
461 trace_amdgpu_bo_create(bo);
462
John Brooks96cf8272017-06-30 11:31:08 -0400463 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800464 if (bp->type == ttm_bo_type_device)
John Brooks96cf8272017-06-30 11:31:08 -0400465 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
466
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400467 return 0;
Flora Cui4fea83f2016-07-20 14:44:38 +0800468
469fail_unreserve:
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800470 if (!bp->resv)
Nicolai Hähnlef1543f52017-01-10 20:36:56 +0100471 ww_mutex_unlock(&bo->tbo.resv->lock);
Flora Cui4fea83f2016-07-20 14:44:38 +0800472 amdgpu_bo_unref(&bo);
473 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400474}
475
Chunming Zhoue7893c42016-07-26 14:13:21 +0800476static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
477 unsigned long size, int byte_align,
478 struct amdgpu_bo *bo)
479{
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800480 struct amdgpu_bo_param bp;
Chunming Zhoue7893c42016-07-26 14:13:21 +0800481 int r;
482
483 if (bo->shadow)
484 return 0;
485
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800486 memset(&bp, 0, sizeof(bp));
487 bp.size = size;
488 bp.byte_align = byte_align;
489 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
490 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
491 AMDGPU_GEM_CREATE_SHADOW;
492 bp.type = ttm_bo_type_kernel;
493 bp.resv = bo->tbo.resv;
494
Chunming Zhoua906dbb2018-04-16 17:57:19 +0800495 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800496 if (!r) {
Chunming Zhoue7893c42016-07-26 14:13:21 +0800497 bo->shadow->parent = amdgpu_bo_ref(bo);
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +0800498 mutex_lock(&adev->shadow_list_lock);
499 list_add_tail(&bo->shadow_list, &adev->shadow_list);
500 mutex_unlock(&adev->shadow_list_lock);
501 }
Chunming Zhoue7893c42016-07-26 14:13:21 +0800502
503 return r;
504}
505
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800506int amdgpu_bo_create(struct amdgpu_device *adev,
507 struct amdgpu_bo_param *bp,
Christian König72d76682015-09-03 17:34:59 +0200508 struct amdgpu_bo **bo_ptr)
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800509{
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800510 u64 flags = bp->flags;
Chunming Zhoue7893c42016-07-26 14:13:21 +0800511 int r;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800512
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800513 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
514 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
Chunming Zhoue7893c42016-07-26 14:13:21 +0800515 if (r)
516 return r;
517
Christian Königcf273a52017-08-18 15:50:17 +0200518 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800519 if (!bp->resv)
Christian Königcf273a52017-08-18 15:50:17 +0200520 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
521 NULL));
Nicolai Hähnle36ea83d2017-01-10 19:06:00 +0100522
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800523 r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
Nicolai Hähnle36ea83d2017-01-10 19:06:00 +0100524
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800525 if (!bp->resv)
Christian Königcf273a52017-08-18 15:50:17 +0200526 reservation_object_unlock((*bo_ptr)->tbo.resv);
Nicolai Hähnle36ea83d2017-01-10 19:06:00 +0100527
Chunming Zhoue7893c42016-07-26 14:13:21 +0800528 if (r)
529 amdgpu_bo_unref(bo_ptr);
530 }
531
532 return r;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800533}
534
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800535int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
536 struct amdgpu_ring *ring,
537 struct amdgpu_bo *bo,
538 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100539 struct dma_fence **fence,
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800540 bool direct)
541
542{
543 struct amdgpu_bo *shadow = bo->shadow;
544 uint64_t bo_addr, shadow_addr;
545 int r;
546
547 if (!shadow)
548 return -EINVAL;
549
550 bo_addr = amdgpu_bo_gpu_offset(bo);
551 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
552
553 r = reservation_object_reserve_shared(bo->tbo.resv);
554 if (r)
555 goto err;
556
557 r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
558 amdgpu_bo_size(bo), resv, fence,
Christian Königfc9c8f52017-06-29 11:46:15 +0200559 direct, false);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800560 if (!r)
561 amdgpu_bo_fence(bo, *fence, true);
562
563err:
564 return r;
565}
566
Roger.He82521312017-04-21 13:08:43 +0800567int amdgpu_bo_validate(struct amdgpu_bo *bo)
568{
Christian König19be5572017-04-12 14:24:39 +0200569 struct ttm_operation_ctx ctx = { false, false };
Roger.He82521312017-04-21 13:08:43 +0800570 uint32_t domain;
571 int r;
572
573 if (bo->pin_count)
574 return 0;
575
Kent Russell6d7d9c52017-08-08 07:58:01 -0400576 domain = bo->preferred_domains;
Roger.He82521312017-04-21 13:08:43 +0800577
578retry:
579 amdgpu_ttm_placement_from_domain(bo, domain);
Christian König19be5572017-04-12 14:24:39 +0200580 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
Roger.He82521312017-04-21 13:08:43 +0800581 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
582 domain = bo->allowed_domains;
583 goto retry;
584 }
585
586 return r;
587}
588
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800589int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
590 struct amdgpu_ring *ring,
591 struct amdgpu_bo *bo,
592 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100593 struct dma_fence **fence,
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800594 bool direct)
595
596{
597 struct amdgpu_bo *shadow = bo->shadow;
598 uint64_t bo_addr, shadow_addr;
599 int r;
600
601 if (!shadow)
602 return -EINVAL;
603
604 bo_addr = amdgpu_bo_gpu_offset(bo);
605 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
606
607 r = reservation_object_reserve_shared(bo->tbo.resv);
608 if (r)
609 goto err;
610
611 r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
612 amdgpu_bo_size(bo), resv, fence,
Christian Königfc9c8f52017-06-29 11:46:15 +0200613 direct, false);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800614 if (!r)
615 amdgpu_bo_fence(bo, *fence, true);
616
617err:
618 return r;
619}
620
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400621int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
622{
Christian Königf5e1c742017-07-20 23:45:18 +0200623 void *kptr;
Christian König587f3c72016-03-10 16:21:04 +0100624 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400625
Christian König271c8122015-05-13 14:30:53 +0200626 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
627 return -EPERM;
628
Christian Königf5e1c742017-07-20 23:45:18 +0200629 kptr = amdgpu_bo_kptr(bo);
630 if (kptr) {
631 if (ptr)
632 *ptr = kptr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400633 return 0;
634 }
Christian König587f3c72016-03-10 16:21:04 +0100635
636 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
637 MAX_SCHEDULE_TIMEOUT);
638 if (r < 0)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400639 return r;
Christian König587f3c72016-03-10 16:21:04 +0100640
641 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
642 if (r)
643 return r;
644
Christian König587f3c72016-03-10 16:21:04 +0100645 if (ptr)
Christian Königf5e1c742017-07-20 23:45:18 +0200646 *ptr = amdgpu_bo_kptr(bo);
Christian König587f3c72016-03-10 16:21:04 +0100647
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400648 return 0;
649}
650
Christian Königf5e1c742017-07-20 23:45:18 +0200651void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
652{
653 bool is_iomem;
654
655 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
656}
657
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400658void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
659{
Christian Königf5e1c742017-07-20 23:45:18 +0200660 if (bo->kmap.bo)
661 ttm_bo_kunmap(&bo->kmap);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400662}
663
664struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
665{
666 if (bo == NULL)
667 return NULL;
668
669 ttm_bo_reference(&bo->tbo);
670 return bo;
671}
672
673void amdgpu_bo_unref(struct amdgpu_bo **bo)
674{
675 struct ttm_buffer_object *tbo;
676
677 if ((*bo) == NULL)
678 return;
679
680 tbo = &((*bo)->tbo);
681 ttm_bo_unref(&tbo);
682 if (tbo == NULL)
683 *bo = NULL;
684}
685
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800686int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
687 u64 min_offset, u64 max_offset,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400688 u64 *gpu_addr)
689{
Christian Königa7d64de2016-09-15 14:58:48 +0200690 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Christian König19be5572017-04-12 14:24:39 +0200691 struct ttm_operation_ctx ctx = { false, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400692 int r, i;
693
Christian Königcc325d12016-02-08 11:08:35 +0100694 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695 return -EPERM;
696
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800697 if (WARN_ON_ONCE(min_offset > max_offset))
698 return -EINVAL;
699
Christopher James Halse Rogers803d89a2017-04-03 13:31:22 +1000700 /* A shared bo cannot be migrated to VRAM */
701 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
702 return -EINVAL;
703
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400704 if (bo->pin_count) {
Flora Cui408778e2016-08-18 12:55:13 +0800705 uint32_t mem_type = bo->tbo.mem.mem_type;
706
Christian Königf5318952017-10-23 17:29:36 +0200707 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
Flora Cui408778e2016-08-18 12:55:13 +0800708 return -EINVAL;
709
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400710 bo->pin_count++;
711 if (gpu_addr)
712 *gpu_addr = amdgpu_bo_gpu_offset(bo);
713
714 if (max_offset != 0) {
Flora Cui27798e02016-08-18 13:18:09 +0800715 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716 WARN_ON_ONCE(max_offset <
717 (amdgpu_bo_gpu_offset(bo) - domain_start));
718 }
719
720 return 0;
721 }
Christian König03f48dd2016-08-15 17:00:22 +0200722
723 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
Christian Könige9c75772017-09-11 17:29:26 +0200724 /* force to pin into visible video ram */
725 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
726 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400727 amdgpu_ttm_placement_from_domain(bo, domain);
728 for (i = 0; i < bo->placement.num_placement; i++) {
Christian Könige9c75772017-09-11 17:29:26 +0200729 unsigned fpfn, lpfn;
730
731 fpfn = min_offset >> PAGE_SHIFT;
732 lpfn = max_offset >> PAGE_SHIFT;
733
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800734 if (fpfn > bo->placements[i].fpfn)
735 bo->placements[i].fpfn = fpfn;
Christian König78d0e182016-01-19 12:48:14 +0100736 if (!bo->placements[i].lpfn ||
737 (lpfn && lpfn < bo->placements[i].lpfn))
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800738 bo->placements[i].lpfn = lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400739 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
740 }
741
Christian König19be5572017-04-12 14:24:39 +0200742 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
Christian König6681c5e2016-08-12 16:50:12 +0200743 if (unlikely(r)) {
Christian Königa7d64de2016-09-15 14:58:48 +0200744 dev_err(adev->dev, "%p pin failed\n", bo);
Christian König6681c5e2016-08-12 16:50:12 +0200745 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400746 }
Christian König6681c5e2016-08-12 16:50:12 +0200747
Christian Königc5835bb2017-10-27 15:43:14 +0200748 r = amdgpu_ttm_alloc_gart(&bo->tbo);
Christian Königead282a2017-10-20 13:12:12 +0200749 if (unlikely(r)) {
750 dev_err(adev->dev, "%p bind failed\n", bo);
751 goto error;
Chunming Zhou07306b42017-07-12 12:36:47 +0800752 }
Christian König5e91fb52017-10-20 13:11:00 +0200753
Christian Königead282a2017-10-20 13:12:12 +0200754 bo->pin_count = 1;
755 if (gpu_addr != NULL)
756 *gpu_addr = amdgpu_bo_gpu_offset(bo);
757
Christian König5e91fb52017-10-20 13:11:00 +0200758 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
Christian König6681c5e2016-08-12 16:50:12 +0200759 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Christian Königa7d64de2016-09-15 14:58:48 +0200760 adev->vram_pin_size += amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200761 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
Christian Königa7d64de2016-09-15 14:58:48 +0200762 adev->invisible_pin_size += amdgpu_bo_size(bo);
Flora Cui32ab75f2016-08-18 13:17:07 +0800763 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
Christian Königa7d64de2016-09-15 14:58:48 +0200764 adev->gart_pin_size += amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200765 }
766
767error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400768 return r;
769}
770
771int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
772{
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800773 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400774}
775
776int amdgpu_bo_unpin(struct amdgpu_bo *bo)
777{
Christian Königa7d64de2016-09-15 14:58:48 +0200778 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Christian König19be5572017-04-12 14:24:39 +0200779 struct ttm_operation_ctx ctx = { false, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400780 int r, i;
781
782 if (!bo->pin_count) {
Christian Königa7d64de2016-09-15 14:58:48 +0200783 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400784 return 0;
785 }
786 bo->pin_count--;
787 if (bo->pin_count)
788 return 0;
789 for (i = 0; i < bo->placement.num_placement; i++) {
790 bo->placements[i].lpfn = 0;
791 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
792 }
Christian König19be5572017-04-12 14:24:39 +0200793 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
Christian König6681c5e2016-08-12 16:50:12 +0200794 if (unlikely(r)) {
Christian Königa7d64de2016-09-15 14:58:48 +0200795 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
Christian König6681c5e2016-08-12 16:50:12 +0200796 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400797 }
Christian König6681c5e2016-08-12 16:50:12 +0200798
799 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
Christian Königa7d64de2016-09-15 14:58:48 +0200800 adev->vram_pin_size -= amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200801 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
Christian Königa7d64de2016-09-15 14:58:48 +0200802 adev->invisible_pin_size -= amdgpu_bo_size(bo);
Flora Cui441f90e2016-09-09 14:15:30 +0800803 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
Christian Königa7d64de2016-09-15 14:58:48 +0200804 adev->gart_pin_size -= amdgpu_bo_size(bo);
Christian König6681c5e2016-08-12 16:50:12 +0200805 }
806
807error:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400808 return r;
809}
810
811int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
812{
813 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
Jammy Zhou2f7d10b2015-07-22 11:29:01 +0800814 if (0 && (adev->flags & AMD_IS_APU)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400815 /* Useless to evict on IGP chips */
816 return 0;
817 }
818 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
819}
820
Alex Deucher1f8628c2016-03-31 16:56:22 -0400821static const char *amdgpu_vram_names[] = {
822 "UNKNOWN",
823 "GDDR1",
824 "DDR2",
825 "GDDR3",
826 "GDDR4",
827 "GDDR5",
828 "HBM",
Tom St Denisbc227cf2018-03-09 06:16:55 -0500829 "DDR3",
830 "DDR4",
Alex Deucher1f8628c2016-03-31 16:56:22 -0400831};
832
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400833int amdgpu_bo_init(struct amdgpu_device *adev)
834{
Dave Airlie7cf321d2016-10-24 15:37:48 +1000835 /* reserve PAT memory space to WC for VRAM */
Christian König770d13b2018-01-12 14:52:22 +0100836 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
837 adev->gmc.aper_size);
Dave Airlie7cf321d2016-10-24 15:37:48 +1000838
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400839 /* Add an MTRR for the VRAM */
Christian König770d13b2018-01-12 14:52:22 +0100840 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
841 adev->gmc.aper_size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400842 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
Christian König770d13b2018-01-12 14:52:22 +0100843 adev->gmc.mc_vram_size >> 20,
844 (unsigned long long)adev->gmc.aper_size >> 20);
Alex Deucher1f8628c2016-03-31 16:56:22 -0400845 DRM_INFO("RAM width %dbits %s\n",
Christian König770d13b2018-01-12 14:52:22 +0100846 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400847 return amdgpu_ttm_init(adev);
848}
849
Andrey Grodzovsky6f752ec2018-04-06 14:54:10 -0500850int amdgpu_bo_late_init(struct amdgpu_device *adev)
851{
852 amdgpu_ttm_late_init(adev);
853
854 return 0;
855}
856
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400857void amdgpu_bo_fini(struct amdgpu_device *adev)
858{
859 amdgpu_ttm_fini(adev);
Christian König770d13b2018-01-12 14:52:22 +0100860 arch_phys_wc_del(adev->gmc.vram_mtrr);
861 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400862}
863
864int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
865 struct vm_area_struct *vma)
866{
867 return ttm_fbdev_mmap(vma, &bo->tbo);
868}
869
870int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
871{
Marek Olšák9079ac72017-03-03 16:03:15 -0500872 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
873
874 if (adev->family <= AMDGPU_FAMILY_CZ &&
875 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400876 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400877
878 bo->tiling_flags = tiling_flags;
879 return 0;
880}
881
882void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
883{
884 lockdep_assert_held(&bo->tbo.resv->lock.base);
885
886 if (tiling_flags)
887 *tiling_flags = bo->tiling_flags;
888}
889
890int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
891 uint32_t metadata_size, uint64_t flags)
892{
893 void *buffer;
894
895 if (!metadata_size) {
896 if (bo->metadata_size) {
897 kfree(bo->metadata);
Dave Airlie0092d3e2016-05-03 12:44:29 +1000898 bo->metadata = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400899 bo->metadata_size = 0;
900 }
901 return 0;
902 }
903
904 if (metadata == NULL)
905 return -EINVAL;
906
Andrzej Hajda71affda2015-09-21 17:34:39 -0400907 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400908 if (buffer == NULL)
909 return -ENOMEM;
910
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400911 kfree(bo->metadata);
912 bo->metadata_flags = flags;
913 bo->metadata = buffer;
914 bo->metadata_size = metadata_size;
915
916 return 0;
917}
918
919int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
920 size_t buffer_size, uint32_t *metadata_size,
921 uint64_t *flags)
922{
923 if (!buffer && !metadata_size)
924 return -EINVAL;
925
926 if (buffer) {
927 if (buffer_size < bo->metadata_size)
928 return -EINVAL;
929
930 if (bo->metadata_size)
931 memcpy(buffer, bo->metadata, bo->metadata_size);
932 }
933
934 if (metadata_size)
935 *metadata_size = bo->metadata_size;
936 if (flags)
937 *flags = bo->metadata_flags;
938
939 return 0;
940}
941
942void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100943 bool evict,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400944 struct ttm_mem_reg *new_mem)
945{
Christian Königa7d64de2016-09-15 14:58:48 +0200946 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian König765e7fb2016-09-15 15:06:50 +0200947 struct amdgpu_bo *abo;
David Mao15da3012016-06-07 17:48:52 +0800948 struct ttm_mem_reg *old_mem = &bo->mem;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400949
950 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
951 return;
952
Andres Rodriguezb82485f2017-09-15 21:05:19 -0400953 abo = ttm_to_amdgpu_bo(bo);
Christian König3f3333f2017-08-03 14:02:13 +0200954 amdgpu_vm_bo_invalidate(adev, abo, evict);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400955
Christian König6375bbb2017-07-11 17:25:49 +0200956 amdgpu_bo_kunmap(abo);
957
Nicolai Hähnle661a7602016-12-15 17:26:42 +0100958 /* remember the eviction */
959 if (evict)
960 atomic64_inc(&adev->num_evictions);
961
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400962 /* update statistics */
963 if (!new_mem)
964 return;
965
966 /* move_notify is called before move happens */
Christian König765e7fb2016-09-15 15:06:50 +0200967 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400968}
969
970int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
971{
Christian Königa7d64de2016-09-15 14:58:48 +0200972 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
Christian König19be5572017-04-12 14:24:39 +0200973 struct ttm_operation_ctx ctx = { false, false };
Christian König5fb19412015-05-21 17:03:46 +0200974 struct amdgpu_bo *abo;
John Brooks96cf8272017-06-30 11:31:08 -0400975 unsigned long offset, size;
976 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400977
978 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
979 return 0;
Christian König5fb19412015-05-21 17:03:46 +0200980
Andres Rodriguezb82485f2017-09-15 21:05:19 -0400981 abo = ttm_to_amdgpu_bo(bo);
John Brooks96cf8272017-06-30 11:31:08 -0400982
983 /* Remember that this BO was accessed by the CPU */
984 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
985
Christian König5fb19412015-05-21 17:03:46 +0200986 if (bo->mem.mem_type != TTM_PL_VRAM)
987 return 0;
988
989 size = bo->mem.num_pages << PAGE_SHIFT;
990 offset = bo->mem.start << PAGE_SHIFT;
Christian König770d13b2018-01-12 14:52:22 +0100991 if ((offset + size) <= adev->gmc.visible_vram_size)
Christian König5fb19412015-05-21 17:03:46 +0200992 return 0;
993
Michel Dänzer104ece92016-03-28 12:53:02 +0900994 /* Can't move a pinned BO to visible VRAM */
995 if (abo->pin_count > 0)
996 return -EINVAL;
997
Christian König5fb19412015-05-21 17:03:46 +0200998 /* hurrah the memory is not visible ! */
Marek Olšák68e2c5f2017-05-17 20:05:08 +0200999 atomic64_inc(&adev->num_vram_cpu_page_faults);
John Brooks41d9a6a2017-06-27 22:33:21 -04001000 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1001 AMDGPU_GEM_DOMAIN_GTT);
1002
1003 /* Avoid costly evictions; only set GTT as a busy placement */
1004 abo->placement.num_busy_placement = 1;
1005 abo->placement.busy_placement = &abo->placements[1];
1006
Christian König19be5572017-04-12 14:24:39 +02001007 r = ttm_bo_validate(bo, &abo->placement, &ctx);
John Brooks41d9a6a2017-06-27 22:33:21 -04001008 if (unlikely(r != 0))
Christian König5fb19412015-05-21 17:03:46 +02001009 return r;
Christian König5fb19412015-05-21 17:03:46 +02001010
1011 offset = bo->mem.start << PAGE_SHIFT;
1012 /* this should never happen */
John Brooks41d9a6a2017-06-27 22:33:21 -04001013 if (bo->mem.mem_type == TTM_PL_VRAM &&
Christian König770d13b2018-01-12 14:52:22 +01001014 (offset + size) > adev->gmc.visible_vram_size)
Christian König5fb19412015-05-21 17:03:46 +02001015 return -EINVAL;
1016
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001017 return 0;
1018}
1019
1020/**
1021 * amdgpu_bo_fence - add fence to buffer object
1022 *
1023 * @bo: buffer object in question
1024 * @fence: fence to add
1025 * @shared: true if fence should be added shared
1026 *
1027 */
Chris Wilsonf54d1862016-10-25 13:00:45 +01001028void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001029 bool shared)
1030{
1031 struct reservation_object *resv = bo->tbo.resv;
1032
1033 if (shared)
Chunming Zhoue40a3112015-08-03 11:38:09 +08001034 reservation_object_add_shared_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001035 else
Chunming Zhoue40a3112015-08-03 11:38:09 +08001036 reservation_object_add_excl_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001037}
Christian Königcdb7e8f2016-07-25 17:56:18 +02001038
1039/**
1040 * amdgpu_bo_gpu_offset - return GPU offset of bo
1041 * @bo: amdgpu object for which we query the offset
1042 *
1043 * Returns current GPU offset of the object.
1044 *
1045 * Note: object should either be pinned or reserved when calling this
1046 * function, it might be useful to add check for this for debugging.
1047 */
1048u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1049{
1050 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
Christian Königc855e252016-09-05 17:00:57 +02001051 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
Christian König3da917b2017-10-27 14:17:09 +02001052 !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
Christian Königcdb7e8f2016-07-25 17:56:18 +02001053 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1054 !bo->pin_count);
Christian König9702d402016-09-07 15:10:44 +02001055 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
Christian König03f48dd2016-08-15 17:00:22 +02001056 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1057 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
Christian Königcdb7e8f2016-07-25 17:56:18 +02001058
1059 return bo->tbo.offset;
1060}