blob: 15a723adca764903f21ef0f220852c70093b0a36 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
Christian König9702d402016-09-07 15:10:44 +020034#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35
Alex Deucherd38ceaf2015-04-20 16:55:21 -040036/**
37 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
38 * @mem_type: ttm memory type
39 *
40 * Returns corresponding domain of the ttm mem_type
41 */
42static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
43{
44 switch (mem_type) {
45 case TTM_PL_VRAM:
46 return AMDGPU_GEM_DOMAIN_VRAM;
47 case TTM_PL_TT:
48 return AMDGPU_GEM_DOMAIN_GTT;
49 case TTM_PL_SYSTEM:
50 return AMDGPU_GEM_DOMAIN_CPU;
51 case AMDGPU_PL_GDS:
52 return AMDGPU_GEM_DOMAIN_GDS;
53 case AMDGPU_PL_GWS:
54 return AMDGPU_GEM_DOMAIN_GWS;
55 case AMDGPU_PL_OA:
56 return AMDGPU_GEM_DOMAIN_OA;
57 default:
58 break;
59 }
60 return 0;
61}
62
63/**
64 * amdgpu_bo_reserve - reserve bo
65 * @bo: bo structure
66 * @no_intr: don't return -ERESTARTSYS on pending signal
67 *
68 * Returns:
69 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
70 * a signal. Release all buffer reservations and return to user-space.
71 */
72static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
73{
Christian Königa7d64de2016-09-15 14:58:48 +020074 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 int r;
76
Christian Königdfd5e502016-04-06 11:12:03 +020077 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078 if (unlikely(r != 0)) {
79 if (r != -ERESTARTSYS)
Christian Königa7d64de2016-09-15 14:58:48 +020080 dev_err(adev->dev, "%p reserve failed\n", bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 return r;
82 }
83 return 0;
84}
85
86static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
87{
88 ttm_bo_unreserve(&bo->tbo);
89}
90
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
92{
93 return bo->tbo.num_pages << PAGE_SHIFT;
94}
95
96static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
97{
98 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
99}
100
101static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
102{
103 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
104}
105
106/**
107 * amdgpu_bo_mmap_offset - return mmap offset of bo
108 * @bo: amdgpu object for which we query the offset
109 *
110 * Returns mmap offset of the object.
111 */
112static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
113{
114 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
115}
116
Nicolai Hähnleb99f3102016-12-15 17:04:51 +0100117/**
118 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
119 * is accessible to the GPU.
120 */
121static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
122{
123 return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
124}
125
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126int amdgpu_bo_create(struct amdgpu_device *adev,
127 unsigned long size, int byte_align,
128 bool kernel, u32 domain, u64 flags,
129 struct sg_table *sg,
Christian König72d76682015-09-03 17:34:59 +0200130 struct reservation_object *resv,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 struct amdgpu_bo **bo_ptr);
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800132int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
133 unsigned long size, int byte_align,
134 bool kernel, u32 domain, u64 flags,
135 struct sg_table *sg,
136 struct ttm_placement *placement,
Christian König72d76682015-09-03 17:34:59 +0200137 struct reservation_object *resv,
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800138 struct amdgpu_bo **bo_ptr);
Christian König7c204882015-12-14 13:18:01 +0100139int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
140 unsigned long size, int align,
141 u32 domain, struct amdgpu_bo **bo_ptr,
142 u64 *gpu_addr, void **cpu_addr);
Junwei Zhangaa1d5622016-09-08 10:13:32 +0800143void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
144 void **cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400145int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
146void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
147struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
148void amdgpu_bo_unref(struct amdgpu_bo **bo);
149int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
150int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800151 u64 min_offset, u64 max_offset,
152 u64 *gpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400153int amdgpu_bo_unpin(struct amdgpu_bo *bo);
154int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155int amdgpu_bo_init(struct amdgpu_device *adev);
156void amdgpu_bo_fini(struct amdgpu_device *adev);
157int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
158 struct vm_area_struct *vma);
159int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
160void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
161int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
162 uint32_t metadata_size, uint64_t flags);
163int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
164 size_t buffer_size, uint32_t *metadata_size,
165 uint64_t *flags);
166void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100167 bool evict,
168 struct ttm_mem_reg *new_mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100170void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400171 bool shared);
Christian Königcdb7e8f2016-07-25 17:56:18 +0200172u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800173int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
174 struct amdgpu_ring *ring,
175 struct amdgpu_bo *bo,
176 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100177 struct dma_fence **fence, bool direct);
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800178int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
179 struct amdgpu_ring *ring,
180 struct amdgpu_bo *bo,
181 struct reservation_object *resv,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100182 struct dma_fence **fence,
Chunming Zhou20f4eff2016-08-04 16:51:18 +0800183 bool direct);
184
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185
186/*
187 * sub allocation
188 */
189
190static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
191{
192 return sa_bo->manager->gpu_addr + sa_bo->soffset;
193}
194
195static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
196{
197 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
198}
199
200int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
201 struct amdgpu_sa_manager *sa_manager,
202 unsigned size, u32 align, u32 domain);
203void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
204 struct amdgpu_sa_manager *sa_manager);
205int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
206 struct amdgpu_sa_manager *sa_manager);
207int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
208 struct amdgpu_sa_manager *sa_manager);
Junwei Zhangbbf0b342015-09-06 14:00:46 +0800209int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
210 struct amdgpu_sa_bo **sa_bo,
211 unsigned size, unsigned align);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400212void amdgpu_sa_bo_free(struct amdgpu_device *adev,
213 struct amdgpu_sa_bo **sa_bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100214 struct dma_fence *fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400215#if defined(CONFIG_DEBUG_FS)
216void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
217 struct seq_file *m);
218#endif
219
220
221#endif