blob: d8e69a7e51f9dd130e8585e88473c6f063f73a1b [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
Oded Gabbaya187f172016-01-30 07:59:34 +020036#include <drm/drm_cache.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040037#include "amdgpu.h"
38#include "amdgpu_trace.h"
39
40
41int amdgpu_ttm_init(struct amdgpu_device *adev);
42void amdgpu_ttm_fini(struct amdgpu_device *adev);
43
44static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
Chunming Zhou7e5a5472015-04-24 17:37:30 +080045 struct ttm_mem_reg *mem)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040046{
47 u64 ret = 0;
48 if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
49 ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
50 adev->mc.visible_vram_size ?
Chunming Zhou7e5a5472015-04-24 17:37:30 +080051 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052 mem->size;
53 }
54 return ret;
55}
56
57static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
58 struct ttm_mem_reg *old_mem,
59 struct ttm_mem_reg *new_mem)
60{
61 u64 vis_size;
62 if (!adev)
63 return;
64
65 if (new_mem) {
66 switch (new_mem->mem_type) {
67 case TTM_PL_TT:
68 atomic64_add(new_mem->size, &adev->gtt_usage);
69 break;
70 case TTM_PL_VRAM:
71 atomic64_add(new_mem->size, &adev->vram_usage);
72 vis_size = amdgpu_get_vis_part_size(adev, new_mem);
73 atomic64_add(vis_size, &adev->vram_vis_usage);
74 break;
75 }
76 }
77
78 if (old_mem) {
79 switch (old_mem->mem_type) {
80 case TTM_PL_TT:
81 atomic64_sub(old_mem->size, &adev->gtt_usage);
82 break;
83 case TTM_PL_VRAM:
84 atomic64_sub(old_mem->size, &adev->vram_usage);
85 vis_size = amdgpu_get_vis_part_size(adev, old_mem);
86 atomic64_sub(vis_size, &adev->vram_vis_usage);
87 break;
88 }
89 }
90}
91
92static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
93{
94 struct amdgpu_bo *bo;
95
96 bo = container_of(tbo, struct amdgpu_bo, tbo);
97
98 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100 drm_gem_object_release(&bo->gem_base);
Christian König82b9c552015-11-27 16:49:00 +0100101 amdgpu_bo_unref(&bo->parent);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102 kfree(bo->metadata);
103 kfree(bo);
104}
105
106bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
107{
108 if (bo->destroy == &amdgpu_ttm_bo_destroy)
109 return true;
110 return false;
111}
112
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800113static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
114 struct ttm_placement *placement,
115 struct ttm_place *placements,
116 u32 domain, u64 flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117{
118 u32 c = 0, i;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800119
120 placement->placement = placements;
121 placement->busy_placement = placements;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122
123 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800124 if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
125 adev->mc.visible_vram_size < adev->mc.real_vram_size) {
126 placements[c].fpfn =
127 adev->mc.visible_vram_size >> PAGE_SHIFT;
128 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
Alex Deuchercace5dc2015-09-02 15:06:08 -0400129 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 }
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800131 placements[c].fpfn = 0;
132 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
133 TTM_PL_FLAG_VRAM;
Chunming Zhou95d79182015-09-23 17:22:43 +0800134 if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
135 placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136 }
137
138 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800139 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
140 placements[c].fpfn = 0;
141 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
142 TTM_PL_FLAG_UNCACHED;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143 } else {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800144 placements[c].fpfn = 0;
145 placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 }
147 }
148
149 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800150 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
151 placements[c].fpfn = 0;
152 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
153 TTM_PL_FLAG_UNCACHED;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154 } else {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800155 placements[c].fpfn = 0;
156 placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157 }
158 }
159
160 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800161 placements[c].fpfn = 0;
162 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
163 AMDGPU_PL_FLAG_GDS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164 }
165 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800166 placements[c].fpfn = 0;
167 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
168 AMDGPU_PL_FLAG_GWS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169 }
170 if (domain & AMDGPU_GEM_DOMAIN_OA) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800171 placements[c].fpfn = 0;
172 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
173 AMDGPU_PL_FLAG_OA;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174 }
175
176 if (!c) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800177 placements[c].fpfn = 0;
178 placements[c++].flags = TTM_PL_MASK_CACHING |
179 TTM_PL_FLAG_SYSTEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400180 }
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800181 placement->num_placement = c;
182 placement->num_busy_placement = c;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183
184 for (i = 0; i < c; i++) {
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800185 if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
186 (placements[i].flags & TTM_PL_FLAG_VRAM) &&
187 !placements[i].fpfn)
188 placements[i].lpfn =
189 adev->mc.visible_vram_size >> PAGE_SHIFT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400190 else
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800191 placements[i].lpfn = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400192 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193}
194
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800195void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
196{
197 amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
198 rbo->placements, domain, rbo->flags);
199}
200
201static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
202 struct ttm_placement *placement)
203{
204 BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
205
206 memcpy(bo->placements, placement->placement,
207 placement->num_placement * sizeof(struct ttm_place));
208 bo->placement.num_placement = placement->num_placement;
209 bo->placement.num_busy_placement = placement->num_busy_placement;
210 bo->placement.placement = bo->placements;
211 bo->placement.busy_placement = bo->placements;
212}
213
Christian König7c204882015-12-14 13:18:01 +0100214/**
215 * amdgpu_bo_create_kernel - create BO for kernel use
216 *
217 * @adev: amdgpu device object
218 * @size: size for the new BO
219 * @align: alignment for the new BO
220 * @domain: where to place it
221 * @bo_ptr: resulting BO
222 * @gpu_addr: GPU addr of the pinned BO
223 * @cpu_addr: optional CPU address mapping
224 *
225 * Allocates and pins a BO for kernel internal use.
226 *
227 * Returns 0 on success, negative error code otherwise.
228 */
229int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
230 unsigned long size, int align,
231 u32 domain, struct amdgpu_bo **bo_ptr,
232 u64 *gpu_addr, void **cpu_addr)
233{
234 int r;
235
236 r = amdgpu_bo_create(adev, size, align, true, domain,
237 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
238 NULL, NULL, bo_ptr);
239 if (r) {
240 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
241 return r;
242 }
243
244 r = amdgpu_bo_reserve(*bo_ptr, false);
245 if (r) {
246 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
247 goto error_free;
248 }
249
250 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
251 if (r) {
252 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
253 goto error_unreserve;
254 }
255
256 if (cpu_addr) {
257 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
258 if (r) {
259 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
260 goto error_unreserve;
261 }
262 }
263
264 amdgpu_bo_unreserve(*bo_ptr);
265
266 return 0;
267
268error_unreserve:
269 amdgpu_bo_unreserve(*bo_ptr);
270
271error_free:
272 amdgpu_bo_unref(bo_ptr);
273
274 return r;
275}
276
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800277int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
278 unsigned long size, int byte_align,
279 bool kernel, u32 domain, u64 flags,
280 struct sg_table *sg,
281 struct ttm_placement *placement,
Christian König72d76682015-09-03 17:34:59 +0200282 struct reservation_object *resv,
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800283 struct amdgpu_bo **bo_ptr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400284{
285 struct amdgpu_bo *bo;
286 enum ttm_bo_type type;
287 unsigned long page_align;
288 size_t acc_size;
289 int r;
290
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400291 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
292 size = ALIGN(size, PAGE_SIZE);
293
294 if (kernel) {
295 type = ttm_bo_type_kernel;
296 } else if (sg) {
297 type = ttm_bo_type_sg;
298 } else {
299 type = ttm_bo_type_device;
300 }
301 *bo_ptr = NULL;
302
303 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
304 sizeof(struct amdgpu_bo));
305
306 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
307 if (bo == NULL)
308 return -ENOMEM;
309 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
310 if (unlikely(r)) {
311 kfree(bo);
312 return r;
313 }
314 bo->adev = adev;
315 INIT_LIST_HEAD(&bo->list);
316 INIT_LIST_HEAD(&bo->va);
Christian König1ea863f2015-12-18 22:13:12 +0100317 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
318 AMDGPU_GEM_DOMAIN_GTT |
319 AMDGPU_GEM_DOMAIN_CPU |
320 AMDGPU_GEM_DOMAIN_GDS |
321 AMDGPU_GEM_DOMAIN_GWS |
322 AMDGPU_GEM_DOMAIN_OA);
323 bo->allowed_domains = bo->prefered_domains;
324 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
325 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400326
327 bo->flags = flags;
Oded Gabbaya187f172016-01-30 07:59:34 +0200328
329 /* For architectures that don't support WC memory,
330 * mask out the WC flag from the BO
331 */
332 if (!drm_arch_can_wc_memory())
333 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
334
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800335 amdgpu_fill_placement_to_bo(bo, placement);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400336 /* Kernel allocation are uninterruptible */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400337 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
338 &bo->placement, page_align, !kernel, NULL,
Christian König72d76682015-09-03 17:34:59 +0200339 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400340 if (unlikely(r != 0)) {
341 return r;
342 }
Flora Cui4fea83f2016-07-20 14:44:38 +0800343
344 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
345 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
346 struct fence *fence;
347
348 if (adev->mman.buffer_funcs_ring == NULL ||
349 !adev->mman.buffer_funcs_ring->ready) {
350 r = -EBUSY;
351 goto fail_free;
352 }
353
354 r = amdgpu_bo_reserve(bo, false);
355 if (unlikely(r != 0))
356 goto fail_free;
357
358 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
359 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
360 if (unlikely(r != 0))
361 goto fail_unreserve;
362
363 amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
364 amdgpu_bo_fence(bo, fence, false);
365 amdgpu_bo_unreserve(bo);
366 fence_put(bo->tbo.moving);
367 bo->tbo.moving = fence_get(fence);
368 fence_put(fence);
369 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400370 *bo_ptr = bo;
371
372 trace_amdgpu_bo_create(bo);
373
374 return 0;
Flora Cui4fea83f2016-07-20 14:44:38 +0800375
376fail_unreserve:
377 amdgpu_bo_unreserve(bo);
378fail_free:
379 amdgpu_bo_unref(&bo);
380 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400381}
382
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800383int amdgpu_bo_create(struct amdgpu_device *adev,
384 unsigned long size, int byte_align,
385 bool kernel, u32 domain, u64 flags,
Christian König72d76682015-09-03 17:34:59 +0200386 struct sg_table *sg,
387 struct reservation_object *resv,
388 struct amdgpu_bo **bo_ptr)
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800389{
390 struct ttm_placement placement = {0};
391 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
392
393 memset(&placements, 0,
394 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
395
396 amdgpu_ttm_placement_init(adev, &placement,
397 placements, domain, flags);
398
Christian König72d76682015-09-03 17:34:59 +0200399 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
400 domain, flags, sg, &placement,
401 resv, bo_ptr);
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800402}
403
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400404int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
405{
406 bool is_iomem;
Christian König587f3c72016-03-10 16:21:04 +0100407 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400408
Christian König271c8122015-05-13 14:30:53 +0200409 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
410 return -EPERM;
411
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400412 if (bo->kptr) {
413 if (ptr) {
414 *ptr = bo->kptr;
415 }
416 return 0;
417 }
Christian König587f3c72016-03-10 16:21:04 +0100418
419 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
420 MAX_SCHEDULE_TIMEOUT);
421 if (r < 0)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400422 return r;
Christian König587f3c72016-03-10 16:21:04 +0100423
424 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
425 if (r)
426 return r;
427
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400428 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
Christian König587f3c72016-03-10 16:21:04 +0100429 if (ptr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400430 *ptr = bo->kptr;
Christian König587f3c72016-03-10 16:21:04 +0100431
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400432 return 0;
433}
434
435void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
436{
437 if (bo->kptr == NULL)
438 return;
439 bo->kptr = NULL;
440 ttm_bo_kunmap(&bo->kmap);
441}
442
443struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
444{
445 if (bo == NULL)
446 return NULL;
447
448 ttm_bo_reference(&bo->tbo);
449 return bo;
450}
451
452void amdgpu_bo_unref(struct amdgpu_bo **bo)
453{
454 struct ttm_buffer_object *tbo;
455
456 if ((*bo) == NULL)
457 return;
458
459 tbo = &((*bo)->tbo);
460 ttm_bo_unref(&tbo);
461 if (tbo == NULL)
462 *bo = NULL;
463}
464
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800465int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
466 u64 min_offset, u64 max_offset,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400467 u64 *gpu_addr)
468{
469 int r, i;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800470 unsigned fpfn, lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400471
Christian Königcc325d12016-02-08 11:08:35 +0100472 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400473 return -EPERM;
474
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800475 if (WARN_ON_ONCE(min_offset > max_offset))
476 return -EINVAL;
477
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400478 if (bo->pin_count) {
479 bo->pin_count++;
480 if (gpu_addr)
481 *gpu_addr = amdgpu_bo_gpu_offset(bo);
482
483 if (max_offset != 0) {
484 u64 domain_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400485 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
486 domain_start = bo->adev->mc.vram_start;
487 else
488 domain_start = bo->adev->mc.gtt_start;
489 WARN_ON_ONCE(max_offset <
490 (amdgpu_bo_gpu_offset(bo) - domain_start));
491 }
492
493 return 0;
494 }
495 amdgpu_ttm_placement_from_domain(bo, domain);
496 for (i = 0; i < bo->placement.num_placement; i++) {
497 /* force to pin into visible video ram */
498 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800499 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
500 (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
501 if (WARN_ON_ONCE(min_offset >
502 bo->adev->mc.visible_vram_size))
503 return -EINVAL;
504 fpfn = min_offset >> PAGE_SHIFT;
505 lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
506 } else {
507 fpfn = min_offset >> PAGE_SHIFT;
508 lpfn = max_offset >> PAGE_SHIFT;
509 }
510 if (fpfn > bo->placements[i].fpfn)
511 bo->placements[i].fpfn = fpfn;
Christian König78d0e182016-01-19 12:48:14 +0100512 if (!bo->placements[i].lpfn ||
513 (lpfn && lpfn < bo->placements[i].lpfn))
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800514 bo->placements[i].lpfn = lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400515 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
516 }
517
518 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
519 if (likely(r == 0)) {
520 bo->pin_count = 1;
521 if (gpu_addr != NULL)
522 *gpu_addr = amdgpu_bo_gpu_offset(bo);
Chunming Zhoue131b912016-04-05 10:48:48 +0800523 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524 bo->adev->vram_pin_size += amdgpu_bo_size(bo);
Chunming Zhoue131b912016-04-05 10:48:48 +0800525 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
526 bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
527 } else
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400528 bo->adev->gart_pin_size += amdgpu_bo_size(bo);
529 } else {
530 dev_err(bo->adev->dev, "%p pin failed\n", bo);
531 }
532 return r;
533}
534
535int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
536{
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800537 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400538}
539
540int amdgpu_bo_unpin(struct amdgpu_bo *bo)
541{
542 int r, i;
543
544 if (!bo->pin_count) {
545 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
546 return 0;
547 }
548 bo->pin_count--;
549 if (bo->pin_count)
550 return 0;
551 for (i = 0; i < bo->placement.num_placement; i++) {
552 bo->placements[i].lpfn = 0;
553 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
554 }
555 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
556 if (likely(r == 0)) {
Chunming Zhoue131b912016-04-05 10:48:48 +0800557 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400558 bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
Chunming Zhoue131b912016-04-05 10:48:48 +0800559 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
560 bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
561 } else
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400562 bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
563 } else {
564 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
565 }
566 return r;
567}
568
569int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
570{
571 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
Jammy Zhou2f7d10b2015-07-22 11:29:01 +0800572 if (0 && (adev->flags & AMD_IS_APU)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400573 /* Useless to evict on IGP chips */
574 return 0;
575 }
576 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
577}
578
Alex Deucher1f8628c2016-03-31 16:56:22 -0400579static const char *amdgpu_vram_names[] = {
580 "UNKNOWN",
581 "GDDR1",
582 "DDR2",
583 "GDDR3",
584 "GDDR4",
585 "GDDR5",
586 "HBM",
587 "DDR3"
588};
589
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400590int amdgpu_bo_init(struct amdgpu_device *adev)
591{
592 /* Add an MTRR for the VRAM */
593 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
594 adev->mc.aper_size);
595 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
596 adev->mc.mc_vram_size >> 20,
597 (unsigned long long)adev->mc.aper_size >> 20);
Alex Deucher1f8628c2016-03-31 16:56:22 -0400598 DRM_INFO("RAM width %dbits %s\n",
599 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400600 return amdgpu_ttm_init(adev);
601}
602
603void amdgpu_bo_fini(struct amdgpu_device *adev)
604{
605 amdgpu_ttm_fini(adev);
606 arch_phys_wc_del(adev->mc.vram_mtrr);
607}
608
609int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
610 struct vm_area_struct *vma)
611{
612 return ttm_fbdev_mmap(vma, &bo->tbo);
613}
614
615int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
616{
Marek Olšákfbd76d52015-05-14 23:48:26 +0200617 if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400618 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400619
620 bo->tiling_flags = tiling_flags;
621 return 0;
622}
623
624void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
625{
626 lockdep_assert_held(&bo->tbo.resv->lock.base);
627
628 if (tiling_flags)
629 *tiling_flags = bo->tiling_flags;
630}
631
632int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
633 uint32_t metadata_size, uint64_t flags)
634{
635 void *buffer;
636
637 if (!metadata_size) {
638 if (bo->metadata_size) {
639 kfree(bo->metadata);
Dave Airlie0092d3e2016-05-03 12:44:29 +1000640 bo->metadata = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400641 bo->metadata_size = 0;
642 }
643 return 0;
644 }
645
646 if (metadata == NULL)
647 return -EINVAL;
648
Andrzej Hajda71affda2015-09-21 17:34:39 -0400649 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400650 if (buffer == NULL)
651 return -ENOMEM;
652
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400653 kfree(bo->metadata);
654 bo->metadata_flags = flags;
655 bo->metadata = buffer;
656 bo->metadata_size = metadata_size;
657
658 return 0;
659}
660
661int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
662 size_t buffer_size, uint32_t *metadata_size,
663 uint64_t *flags)
664{
665 if (!buffer && !metadata_size)
666 return -EINVAL;
667
668 if (buffer) {
669 if (buffer_size < bo->metadata_size)
670 return -EINVAL;
671
672 if (bo->metadata_size)
673 memcpy(buffer, bo->metadata, bo->metadata_size);
674 }
675
676 if (metadata_size)
677 *metadata_size = bo->metadata_size;
678 if (flags)
679 *flags = bo->metadata_flags;
680
681 return 0;
682}
683
684void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
685 struct ttm_mem_reg *new_mem)
686{
687 struct amdgpu_bo *rbo;
David Mao15da3012016-06-07 17:48:52 +0800688 struct ttm_mem_reg *old_mem = &bo->mem;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400689
690 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
691 return;
692
693 rbo = container_of(bo, struct amdgpu_bo, tbo);
694 amdgpu_vm_bo_invalidate(rbo->adev, rbo);
695
696 /* update statistics */
697 if (!new_mem)
698 return;
699
700 /* move_notify is called before move happens */
701 amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
David Mao15da3012016-06-07 17:48:52 +0800702
703 trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400704}
705
706int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
707{
708 struct amdgpu_device *adev;
Christian König5fb19412015-05-21 17:03:46 +0200709 struct amdgpu_bo *abo;
710 unsigned long offset, size, lpfn;
711 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400712
713 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
714 return 0;
Christian König5fb19412015-05-21 17:03:46 +0200715
716 abo = container_of(bo, struct amdgpu_bo, tbo);
717 adev = abo->adev;
718 if (bo->mem.mem_type != TTM_PL_VRAM)
719 return 0;
720
721 size = bo->mem.num_pages << PAGE_SHIFT;
722 offset = bo->mem.start << PAGE_SHIFT;
723 if ((offset + size) <= adev->mc.visible_vram_size)
724 return 0;
725
Michel Dänzer104ece92016-03-28 12:53:02 +0900726 /* Can't move a pinned BO to visible VRAM */
727 if (abo->pin_count > 0)
728 return -EINVAL;
729
Christian König5fb19412015-05-21 17:03:46 +0200730 /* hurrah the memory is not visible ! */
731 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
732 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
733 for (i = 0; i < abo->placement.num_placement; i++) {
734 /* Force into visible VRAM */
735 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
736 (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
737 abo->placements[i].lpfn = lpfn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400738 }
Christian König5fb19412015-05-21 17:03:46 +0200739 r = ttm_bo_validate(bo, &abo->placement, false, false);
740 if (unlikely(r == -ENOMEM)) {
741 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
742 return ttm_bo_validate(bo, &abo->placement, false, false);
743 } else if (unlikely(r != 0)) {
744 return r;
745 }
746
747 offset = bo->mem.start << PAGE_SHIFT;
748 /* this should never happen */
749 if ((offset + size) > adev->mc.visible_vram_size)
750 return -EINVAL;
751
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400752 return 0;
753}
754
755/**
756 * amdgpu_bo_fence - add fence to buffer object
757 *
758 * @bo: buffer object in question
759 * @fence: fence to add
760 * @shared: true if fence should be added shared
761 *
762 */
Chunming Zhoue40a3112015-08-03 11:38:09 +0800763void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400764 bool shared)
765{
766 struct reservation_object *resv = bo->tbo.resv;
767
768 if (shared)
Chunming Zhoue40a3112015-08-03 11:38:09 +0800769 reservation_object_add_shared_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400770 else
Chunming Zhoue40a3112015-08-03 11:38:09 +0800771 reservation_object_add_excl_fence(resv, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400772}
Christian Königcdb7e8f2016-07-25 17:56:18 +0200773
774/**
775 * amdgpu_bo_gpu_offset - return GPU offset of bo
776 * @bo: amdgpu object for which we query the offset
777 *
778 * Returns current GPU offset of the object.
779 *
780 * Note: object should either be pinned or reserved when calling this
781 * function, it might be useful to add check for this for debugging.
782 */
783u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
784{
785 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
786 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
787 !bo->pin_count);
788
789 return bo->tbo.offset;
790}