Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #ifndef __AMDGPU_OBJECT_H__ |
| 29 | #define __AMDGPU_OBJECT_H__ |
| 30 | |
| 31 | #include <drm/amdgpu_drm.h> |
| 32 | #include "amdgpu.h" |
| 33 | |
Christian König | 9702d40 | 2016-09-07 15:10:44 +0200 | [diff] [blame] | 34 | #define AMDGPU_BO_INVALID_OFFSET LONG_MAX |
| 35 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 36 | /** |
| 37 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type |
| 38 | * @mem_type: ttm memory type |
| 39 | * |
| 40 | * Returns corresponding domain of the ttm mem_type |
| 41 | */ |
| 42 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) |
| 43 | { |
| 44 | switch (mem_type) { |
| 45 | case TTM_PL_VRAM: |
| 46 | return AMDGPU_GEM_DOMAIN_VRAM; |
| 47 | case TTM_PL_TT: |
| 48 | return AMDGPU_GEM_DOMAIN_GTT; |
| 49 | case TTM_PL_SYSTEM: |
| 50 | return AMDGPU_GEM_DOMAIN_CPU; |
| 51 | case AMDGPU_PL_GDS: |
| 52 | return AMDGPU_GEM_DOMAIN_GDS; |
| 53 | case AMDGPU_PL_GWS: |
| 54 | return AMDGPU_GEM_DOMAIN_GWS; |
| 55 | case AMDGPU_PL_OA: |
| 56 | return AMDGPU_GEM_DOMAIN_OA; |
| 57 | default: |
| 58 | break; |
| 59 | } |
| 60 | return 0; |
| 61 | } |
| 62 | |
| 63 | /** |
| 64 | * amdgpu_bo_reserve - reserve bo |
| 65 | * @bo: bo structure |
| 66 | * @no_intr: don't return -ERESTARTSYS on pending signal |
| 67 | * |
| 68 | * Returns: |
| 69 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
| 70 | * a signal. Release all buffer reservations and return to user-space. |
| 71 | */ |
| 72 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) |
| 73 | { |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 74 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 75 | int r; |
| 76 | |
Christian König | dfd5e50 | 2016-04-06 11:12:03 +0200 | [diff] [blame] | 77 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 78 | if (unlikely(r != 0)) { |
| 79 | if (r != -ERESTARTSYS) |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 80 | dev_err(adev->dev, "%p reserve failed\n", bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 81 | return r; |
| 82 | } |
| 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) |
| 87 | { |
| 88 | ttm_bo_unreserve(&bo->tbo); |
| 89 | } |
| 90 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 91 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) |
| 92 | { |
| 93 | return bo->tbo.num_pages << PAGE_SHIFT; |
| 94 | } |
| 95 | |
| 96 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) |
| 97 | { |
| 98 | return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; |
| 99 | } |
| 100 | |
| 101 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) |
| 102 | { |
| 103 | return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; |
| 104 | } |
| 105 | |
| 106 | /** |
| 107 | * amdgpu_bo_mmap_offset - return mmap offset of bo |
| 108 | * @bo: amdgpu object for which we query the offset |
| 109 | * |
| 110 | * Returns mmap offset of the object. |
| 111 | */ |
| 112 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) |
| 113 | { |
| 114 | return drm_vma_node_offset_addr(&bo->tbo.vma_node); |
| 115 | } |
| 116 | |
Nicolai Hähnle | b99f310 | 2016-12-15 17:04:51 +0100 | [diff] [blame] | 117 | /** |
| 118 | * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that |
| 119 | * is accessible to the GPU. |
| 120 | */ |
| 121 | static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) |
| 122 | { |
Christian König | 9d63c03 | 2017-07-13 12:21:00 +0200 | [diff] [blame] | 123 | switch (bo->tbo.mem.mem_type) { |
| 124 | case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); |
| 125 | case TTM_PL_VRAM: return true; |
| 126 | default: return false; |
| 127 | } |
Nicolai Hähnle | b99f310 | 2016-12-15 17:04:51 +0100 | [diff] [blame] | 128 | } |
| 129 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 130 | int amdgpu_bo_create(struct amdgpu_device *adev, |
| 131 | unsigned long size, int byte_align, |
| 132 | bool kernel, u32 domain, u64 flags, |
| 133 | struct sg_table *sg, |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 134 | struct reservation_object *resv, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 135 | struct amdgpu_bo **bo_ptr); |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 136 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
| 137 | unsigned long size, int byte_align, |
| 138 | bool kernel, u32 domain, u64 flags, |
| 139 | struct sg_table *sg, |
| 140 | struct ttm_placement *placement, |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 141 | struct reservation_object *resv, |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 142 | struct amdgpu_bo **bo_ptr); |
Christian König | 7c20488 | 2015-12-14 13:18:01 +0100 | [diff] [blame] | 143 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
| 144 | unsigned long size, int align, |
| 145 | u32 domain, struct amdgpu_bo **bo_ptr, |
| 146 | u64 *gpu_addr, void **cpu_addr); |
Junwei Zhang | aa1d562 | 2016-09-08 10:13:32 +0800 | [diff] [blame] | 147 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
| 148 | void **cpu_addr); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 149 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
Christian König | f5e1c74 | 2017-07-20 23:45:18 +0200 | [diff] [blame^] | 150 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 151 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
| 152 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); |
| 153 | void amdgpu_bo_unref(struct amdgpu_bo **bo); |
| 154 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); |
| 155 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 156 | u64 min_offset, u64 max_offset, |
| 157 | u64 *gpu_addr); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 158 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); |
| 159 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 160 | int amdgpu_bo_init(struct amdgpu_device *adev); |
| 161 | void amdgpu_bo_fini(struct amdgpu_device *adev); |
| 162 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, |
| 163 | struct vm_area_struct *vma); |
| 164 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); |
| 165 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); |
| 166 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
| 167 | uint32_t metadata_size, uint64_t flags); |
| 168 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
| 169 | size_t buffer_size, uint32_t *metadata_size, |
| 170 | uint64_t *flags); |
| 171 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
Nicolai Hähnle | 66257db | 2016-12-15 17:23:49 +0100 | [diff] [blame] | 172 | bool evict, |
| 173 | struct ttm_mem_reg *new_mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 174 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 175 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 176 | bool shared); |
Christian König | cdb7e8f | 2016-07-25 17:56:18 +0200 | [diff] [blame] | 177 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
Chunming Zhou | 20f4eff | 2016-08-04 16:51:18 +0800 | [diff] [blame] | 178 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
| 179 | struct amdgpu_ring *ring, |
| 180 | struct amdgpu_bo *bo, |
| 181 | struct reservation_object *resv, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 182 | struct dma_fence **fence, bool direct); |
Roger.He | 8252131 | 2017-04-21 13:08:43 +0800 | [diff] [blame] | 183 | int amdgpu_bo_validate(struct amdgpu_bo *bo); |
Chunming Zhou | 20f4eff | 2016-08-04 16:51:18 +0800 | [diff] [blame] | 184 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, |
| 185 | struct amdgpu_ring *ring, |
| 186 | struct amdgpu_bo *bo, |
| 187 | struct reservation_object *resv, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 188 | struct dma_fence **fence, |
Chunming Zhou | 20f4eff | 2016-08-04 16:51:18 +0800 | [diff] [blame] | 189 | bool direct); |
| 190 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * sub allocation |
| 194 | */ |
| 195 | |
| 196 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) |
| 197 | { |
| 198 | return sa_bo->manager->gpu_addr + sa_bo->soffset; |
| 199 | } |
| 200 | |
| 201 | static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) |
| 202 | { |
| 203 | return sa_bo->manager->cpu_ptr + sa_bo->soffset; |
| 204 | } |
| 205 | |
| 206 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, |
| 207 | struct amdgpu_sa_manager *sa_manager, |
| 208 | unsigned size, u32 align, u32 domain); |
| 209 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, |
| 210 | struct amdgpu_sa_manager *sa_manager); |
| 211 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, |
| 212 | struct amdgpu_sa_manager *sa_manager); |
| 213 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, |
| 214 | struct amdgpu_sa_manager *sa_manager); |
Junwei Zhang | bbf0b34 | 2015-09-06 14:00:46 +0800 | [diff] [blame] | 215 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
| 216 | struct amdgpu_sa_bo **sa_bo, |
| 217 | unsigned size, unsigned align); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 218 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
| 219 | struct amdgpu_sa_bo **sa_bo, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 220 | struct dma_fence *fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 221 | #if defined(CONFIG_DEBUG_FS) |
| 222 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
| 223 | struct seq_file *m); |
| 224 | #endif |
| 225 | |
| 226 | |
| 227 | #endif |