Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
| 32 | #include <linux/list.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <drm/drmP.h> |
| 35 | #include <drm/amdgpu_drm.h> |
Oded Gabbay | a187f17 | 2016-01-30 07:59:34 +0200 | [diff] [blame] | 36 | #include <drm/drm_cache.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 37 | #include "amdgpu.h" |
| 38 | #include "amdgpu_trace.h" |
| 39 | |
| 40 | |
| 41 | int amdgpu_ttm_init(struct amdgpu_device *adev); |
| 42 | void amdgpu_ttm_fini(struct amdgpu_device *adev); |
| 43 | |
| 44 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 45 | struct ttm_mem_reg *mem) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 46 | { |
| 47 | u64 ret = 0; |
| 48 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { |
| 49 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > |
| 50 | adev->mc.visible_vram_size ? |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 51 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 52 | mem->size; |
| 53 | } |
| 54 | return ret; |
| 55 | } |
| 56 | |
| 57 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, |
| 58 | struct ttm_mem_reg *old_mem, |
| 59 | struct ttm_mem_reg *new_mem) |
| 60 | { |
| 61 | u64 vis_size; |
| 62 | if (!adev) |
| 63 | return; |
| 64 | |
| 65 | if (new_mem) { |
| 66 | switch (new_mem->mem_type) { |
| 67 | case TTM_PL_TT: |
| 68 | atomic64_add(new_mem->size, &adev->gtt_usage); |
| 69 | break; |
| 70 | case TTM_PL_VRAM: |
| 71 | atomic64_add(new_mem->size, &adev->vram_usage); |
| 72 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); |
| 73 | atomic64_add(vis_size, &adev->vram_vis_usage); |
| 74 | break; |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | if (old_mem) { |
| 79 | switch (old_mem->mem_type) { |
| 80 | case TTM_PL_TT: |
| 81 | atomic64_sub(old_mem->size, &adev->gtt_usage); |
| 82 | break; |
| 83 | case TTM_PL_VRAM: |
| 84 | atomic64_sub(old_mem->size, &adev->vram_usage); |
| 85 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); |
| 86 | atomic64_sub(vis_size, &adev->vram_vis_usage); |
| 87 | break; |
| 88 | } |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
| 93 | { |
| 94 | struct amdgpu_bo *bo; |
| 95 | |
| 96 | bo = container_of(tbo, struct amdgpu_bo, tbo); |
| 97 | |
| 98 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 99 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 100 | drm_gem_object_release(&bo->gem_base); |
Christian König | 82b9c55 | 2015-11-27 16:49:00 +0100 | [diff] [blame] | 101 | amdgpu_bo_unref(&bo->parent); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 102 | kfree(bo->metadata); |
| 103 | kfree(bo); |
| 104 | } |
| 105 | |
| 106 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) |
| 107 | { |
| 108 | if (bo->destroy == &amdgpu_ttm_bo_destroy) |
| 109 | return true; |
| 110 | return false; |
| 111 | } |
| 112 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 113 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, |
| 114 | struct ttm_placement *placement, |
| 115 | struct ttm_place *placements, |
| 116 | u32 domain, u64 flags) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 117 | { |
| 118 | u32 c = 0, i; |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 119 | |
| 120 | placement->placement = placements; |
| 121 | placement->busy_placement = placements; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 122 | |
| 123 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 124 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
| 125 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
| 126 | placements[c].fpfn = |
| 127 | adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 128 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
Alex Deucher | cace5dc | 2015-09-02 15:06:08 -0400 | [diff] [blame] | 129 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 130 | } |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 131 | placements[c].fpfn = 0; |
| 132 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
| 133 | TTM_PL_FLAG_VRAM; |
Chunming Zhou | 95d7918 | 2015-09-23 17:22:43 +0800 | [diff] [blame] | 134 | if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) |
| 135 | placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 139 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
| 140 | placements[c].fpfn = 0; |
| 141 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | |
| 142 | TTM_PL_FLAG_UNCACHED; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 143 | } else { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 144 | placements[c].fpfn = 0; |
| 145 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 146 | } |
| 147 | } |
| 148 | |
| 149 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 150 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
| 151 | placements[c].fpfn = 0; |
| 152 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | |
| 153 | TTM_PL_FLAG_UNCACHED; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 154 | } else { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 155 | placements[c].fpfn = 0; |
| 156 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 157 | } |
| 158 | } |
| 159 | |
| 160 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 161 | placements[c].fpfn = 0; |
| 162 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 163 | AMDGPU_PL_FLAG_GDS; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 164 | } |
| 165 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 166 | placements[c].fpfn = 0; |
| 167 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 168 | AMDGPU_PL_FLAG_GWS; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 169 | } |
| 170 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 171 | placements[c].fpfn = 0; |
| 172 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 173 | AMDGPU_PL_FLAG_OA; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | if (!c) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 177 | placements[c].fpfn = 0; |
| 178 | placements[c++].flags = TTM_PL_MASK_CACHING | |
| 179 | TTM_PL_FLAG_SYSTEM; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 180 | } |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 181 | placement->num_placement = c; |
| 182 | placement->num_busy_placement = c; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 183 | |
| 184 | for (i = 0; i < c; i++) { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 185 | if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
| 186 | (placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 187 | !placements[i].fpfn) |
| 188 | placements[i].lpfn = |
| 189 | adev->mc.visible_vram_size >> PAGE_SHIFT; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 190 | else |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 191 | placements[i].lpfn = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 192 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 193 | } |
| 194 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 195 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) |
| 196 | { |
| 197 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, |
| 198 | rbo->placements, domain, rbo->flags); |
| 199 | } |
| 200 | |
| 201 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, |
| 202 | struct ttm_placement *placement) |
| 203 | { |
| 204 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); |
| 205 | |
| 206 | memcpy(bo->placements, placement->placement, |
| 207 | placement->num_placement * sizeof(struct ttm_place)); |
| 208 | bo->placement.num_placement = placement->num_placement; |
| 209 | bo->placement.num_busy_placement = placement->num_busy_placement; |
| 210 | bo->placement.placement = bo->placements; |
| 211 | bo->placement.busy_placement = bo->placements; |
| 212 | } |
| 213 | |
Christian König | 7c20488 | 2015-12-14 13:18:01 +0100 | [diff] [blame] | 214 | /** |
| 215 | * amdgpu_bo_create_kernel - create BO for kernel use |
| 216 | * |
| 217 | * @adev: amdgpu device object |
| 218 | * @size: size for the new BO |
| 219 | * @align: alignment for the new BO |
| 220 | * @domain: where to place it |
| 221 | * @bo_ptr: resulting BO |
| 222 | * @gpu_addr: GPU addr of the pinned BO |
| 223 | * @cpu_addr: optional CPU address mapping |
| 224 | * |
| 225 | * Allocates and pins a BO for kernel internal use. |
| 226 | * |
| 227 | * Returns 0 on success, negative error code otherwise. |
| 228 | */ |
| 229 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
| 230 | unsigned long size, int align, |
| 231 | u32 domain, struct amdgpu_bo **bo_ptr, |
| 232 | u64 *gpu_addr, void **cpu_addr) |
| 233 | { |
| 234 | int r; |
| 235 | |
| 236 | r = amdgpu_bo_create(adev, size, align, true, domain, |
| 237 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
| 238 | NULL, NULL, bo_ptr); |
| 239 | if (r) { |
| 240 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); |
| 241 | return r; |
| 242 | } |
| 243 | |
| 244 | r = amdgpu_bo_reserve(*bo_ptr, false); |
| 245 | if (r) { |
| 246 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); |
| 247 | goto error_free; |
| 248 | } |
| 249 | |
| 250 | r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); |
| 251 | if (r) { |
| 252 | dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); |
| 253 | goto error_unreserve; |
| 254 | } |
| 255 | |
| 256 | if (cpu_addr) { |
| 257 | r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); |
| 258 | if (r) { |
| 259 | dev_err(adev->dev, "(%d) kernel bo map failed\n", r); |
| 260 | goto error_unreserve; |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | amdgpu_bo_unreserve(*bo_ptr); |
| 265 | |
| 266 | return 0; |
| 267 | |
| 268 | error_unreserve: |
| 269 | amdgpu_bo_unreserve(*bo_ptr); |
| 270 | |
| 271 | error_free: |
| 272 | amdgpu_bo_unref(bo_ptr); |
| 273 | |
| 274 | return r; |
| 275 | } |
| 276 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 277 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
| 278 | unsigned long size, int byte_align, |
| 279 | bool kernel, u32 domain, u64 flags, |
| 280 | struct sg_table *sg, |
| 281 | struct ttm_placement *placement, |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 282 | struct reservation_object *resv, |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 283 | struct amdgpu_bo **bo_ptr) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 284 | { |
| 285 | struct amdgpu_bo *bo; |
| 286 | enum ttm_bo_type type; |
| 287 | unsigned long page_align; |
| 288 | size_t acc_size; |
| 289 | int r; |
| 290 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 291 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
| 292 | size = ALIGN(size, PAGE_SIZE); |
| 293 | |
| 294 | if (kernel) { |
| 295 | type = ttm_bo_type_kernel; |
| 296 | } else if (sg) { |
| 297 | type = ttm_bo_type_sg; |
| 298 | } else { |
| 299 | type = ttm_bo_type_device; |
| 300 | } |
| 301 | *bo_ptr = NULL; |
| 302 | |
| 303 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, |
| 304 | sizeof(struct amdgpu_bo)); |
| 305 | |
| 306 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); |
| 307 | if (bo == NULL) |
| 308 | return -ENOMEM; |
| 309 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); |
| 310 | if (unlikely(r)) { |
| 311 | kfree(bo); |
| 312 | return r; |
| 313 | } |
| 314 | bo->adev = adev; |
| 315 | INIT_LIST_HEAD(&bo->list); |
| 316 | INIT_LIST_HEAD(&bo->va); |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 317 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
| 318 | AMDGPU_GEM_DOMAIN_GTT | |
| 319 | AMDGPU_GEM_DOMAIN_CPU | |
| 320 | AMDGPU_GEM_DOMAIN_GDS | |
| 321 | AMDGPU_GEM_DOMAIN_GWS | |
| 322 | AMDGPU_GEM_DOMAIN_OA); |
| 323 | bo->allowed_domains = bo->prefered_domains; |
| 324 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
| 325 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 326 | |
| 327 | bo->flags = flags; |
Oded Gabbay | a187f17 | 2016-01-30 07:59:34 +0200 | [diff] [blame] | 328 | |
| 329 | /* For architectures that don't support WC memory, |
| 330 | * mask out the WC flag from the BO |
| 331 | */ |
| 332 | if (!drm_arch_can_wc_memory()) |
| 333 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
| 334 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 335 | amdgpu_fill_placement_to_bo(bo, placement); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 336 | /* Kernel allocation are uninterruptible */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 337 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
| 338 | &bo->placement, page_align, !kernel, NULL, |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 339 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 340 | if (unlikely(r != 0)) { |
| 341 | return r; |
| 342 | } |
Flora Cui | 4fea83f | 2016-07-20 14:44:38 +0800 | [diff] [blame^] | 343 | |
| 344 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
| 345 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
| 346 | struct fence *fence; |
| 347 | |
| 348 | if (adev->mman.buffer_funcs_ring == NULL || |
| 349 | !adev->mman.buffer_funcs_ring->ready) { |
| 350 | r = -EBUSY; |
| 351 | goto fail_free; |
| 352 | } |
| 353 | |
| 354 | r = amdgpu_bo_reserve(bo, false); |
| 355 | if (unlikely(r != 0)) |
| 356 | goto fail_free; |
| 357 | |
| 358 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); |
| 359 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 360 | if (unlikely(r != 0)) |
| 361 | goto fail_unreserve; |
| 362 | |
| 363 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); |
| 364 | amdgpu_bo_fence(bo, fence, false); |
| 365 | amdgpu_bo_unreserve(bo); |
| 366 | fence_put(bo->tbo.moving); |
| 367 | bo->tbo.moving = fence_get(fence); |
| 368 | fence_put(fence); |
| 369 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 370 | *bo_ptr = bo; |
| 371 | |
| 372 | trace_amdgpu_bo_create(bo); |
| 373 | |
| 374 | return 0; |
Flora Cui | 4fea83f | 2016-07-20 14:44:38 +0800 | [diff] [blame^] | 375 | |
| 376 | fail_unreserve: |
| 377 | amdgpu_bo_unreserve(bo); |
| 378 | fail_free: |
| 379 | amdgpu_bo_unref(&bo); |
| 380 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 381 | } |
| 382 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 383 | int amdgpu_bo_create(struct amdgpu_device *adev, |
| 384 | unsigned long size, int byte_align, |
| 385 | bool kernel, u32 domain, u64 flags, |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 386 | struct sg_table *sg, |
| 387 | struct reservation_object *resv, |
| 388 | struct amdgpu_bo **bo_ptr) |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 389 | { |
| 390 | struct ttm_placement placement = {0}; |
| 391 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; |
| 392 | |
| 393 | memset(&placements, 0, |
| 394 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); |
| 395 | |
| 396 | amdgpu_ttm_placement_init(adev, &placement, |
| 397 | placements, domain, flags); |
| 398 | |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 399 | return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
| 400 | domain, flags, sg, &placement, |
| 401 | resv, bo_ptr); |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 402 | } |
| 403 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 404 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
| 405 | { |
| 406 | bool is_iomem; |
Christian König | 587f3c7 | 2016-03-10 16:21:04 +0100 | [diff] [blame] | 407 | long r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 408 | |
Christian König | 271c812 | 2015-05-13 14:30:53 +0200 | [diff] [blame] | 409 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
| 410 | return -EPERM; |
| 411 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 412 | if (bo->kptr) { |
| 413 | if (ptr) { |
| 414 | *ptr = bo->kptr; |
| 415 | } |
| 416 | return 0; |
| 417 | } |
Christian König | 587f3c7 | 2016-03-10 16:21:04 +0100 | [diff] [blame] | 418 | |
| 419 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, |
| 420 | MAX_SCHEDULE_TIMEOUT); |
| 421 | if (r < 0) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 422 | return r; |
Christian König | 587f3c7 | 2016-03-10 16:21:04 +0100 | [diff] [blame] | 423 | |
| 424 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
| 425 | if (r) |
| 426 | return r; |
| 427 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 428 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
Christian König | 587f3c7 | 2016-03-10 16:21:04 +0100 | [diff] [blame] | 429 | if (ptr) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 430 | *ptr = bo->kptr; |
Christian König | 587f3c7 | 2016-03-10 16:21:04 +0100 | [diff] [blame] | 431 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 432 | return 0; |
| 433 | } |
| 434 | |
| 435 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
| 436 | { |
| 437 | if (bo->kptr == NULL) |
| 438 | return; |
| 439 | bo->kptr = NULL; |
| 440 | ttm_bo_kunmap(&bo->kmap); |
| 441 | } |
| 442 | |
| 443 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
| 444 | { |
| 445 | if (bo == NULL) |
| 446 | return NULL; |
| 447 | |
| 448 | ttm_bo_reference(&bo->tbo); |
| 449 | return bo; |
| 450 | } |
| 451 | |
| 452 | void amdgpu_bo_unref(struct amdgpu_bo **bo) |
| 453 | { |
| 454 | struct ttm_buffer_object *tbo; |
| 455 | |
| 456 | if ((*bo) == NULL) |
| 457 | return; |
| 458 | |
| 459 | tbo = &((*bo)->tbo); |
| 460 | ttm_bo_unref(&tbo); |
| 461 | if (tbo == NULL) |
| 462 | *bo = NULL; |
| 463 | } |
| 464 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 465 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
| 466 | u64 min_offset, u64 max_offset, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 467 | u64 *gpu_addr) |
| 468 | { |
| 469 | int r, i; |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 470 | unsigned fpfn, lpfn; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 471 | |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 472 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 473 | return -EPERM; |
| 474 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 475 | if (WARN_ON_ONCE(min_offset > max_offset)) |
| 476 | return -EINVAL; |
| 477 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 478 | if (bo->pin_count) { |
| 479 | bo->pin_count++; |
| 480 | if (gpu_addr) |
| 481 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
| 482 | |
| 483 | if (max_offset != 0) { |
| 484 | u64 domain_start; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 485 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
| 486 | domain_start = bo->adev->mc.vram_start; |
| 487 | else |
| 488 | domain_start = bo->adev->mc.gtt_start; |
| 489 | WARN_ON_ONCE(max_offset < |
| 490 | (amdgpu_bo_gpu_offset(bo) - domain_start)); |
| 491 | } |
| 492 | |
| 493 | return 0; |
| 494 | } |
| 495 | amdgpu_ttm_placement_from_domain(bo, domain); |
| 496 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 497 | /* force to pin into visible video ram */ |
| 498 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 499 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
| 500 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { |
| 501 | if (WARN_ON_ONCE(min_offset > |
| 502 | bo->adev->mc.visible_vram_size)) |
| 503 | return -EINVAL; |
| 504 | fpfn = min_offset >> PAGE_SHIFT; |
| 505 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 506 | } else { |
| 507 | fpfn = min_offset >> PAGE_SHIFT; |
| 508 | lpfn = max_offset >> PAGE_SHIFT; |
| 509 | } |
| 510 | if (fpfn > bo->placements[i].fpfn) |
| 511 | bo->placements[i].fpfn = fpfn; |
Christian König | 78d0e18 | 2016-01-19 12:48:14 +0100 | [diff] [blame] | 512 | if (!bo->placements[i].lpfn || |
| 513 | (lpfn && lpfn < bo->placements[i].lpfn)) |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 514 | bo->placements[i].lpfn = lpfn; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 515 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
| 516 | } |
| 517 | |
| 518 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 519 | if (likely(r == 0)) { |
| 520 | bo->pin_count = 1; |
| 521 | if (gpu_addr != NULL) |
| 522 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
Chunming Zhou | e131b91 | 2016-04-05 10:48:48 +0800 | [diff] [blame] | 523 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 524 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); |
Chunming Zhou | e131b91 | 2016-04-05 10:48:48 +0800 | [diff] [blame] | 525 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
| 526 | bo->adev->invisible_pin_size += amdgpu_bo_size(bo); |
| 527 | } else |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 528 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); |
| 529 | } else { |
| 530 | dev_err(bo->adev->dev, "%p pin failed\n", bo); |
| 531 | } |
| 532 | return r; |
| 533 | } |
| 534 | |
| 535 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) |
| 536 | { |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 537 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
| 541 | { |
| 542 | int r, i; |
| 543 | |
| 544 | if (!bo->pin_count) { |
| 545 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); |
| 546 | return 0; |
| 547 | } |
| 548 | bo->pin_count--; |
| 549 | if (bo->pin_count) |
| 550 | return 0; |
| 551 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 552 | bo->placements[i].lpfn = 0; |
| 553 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; |
| 554 | } |
| 555 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 556 | if (likely(r == 0)) { |
Chunming Zhou | e131b91 | 2016-04-05 10:48:48 +0800 | [diff] [blame] | 557 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 558 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); |
Chunming Zhou | e131b91 | 2016-04-05 10:48:48 +0800 | [diff] [blame] | 559 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
| 560 | bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); |
| 561 | } else |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 562 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); |
| 563 | } else { |
| 564 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); |
| 565 | } |
| 566 | return r; |
| 567 | } |
| 568 | |
| 569 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) |
| 570 | { |
| 571 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
Jammy Zhou | 2f7d10b | 2015-07-22 11:29:01 +0800 | [diff] [blame] | 572 | if (0 && (adev->flags & AMD_IS_APU)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 573 | /* Useless to evict on IGP chips */ |
| 574 | return 0; |
| 575 | } |
| 576 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); |
| 577 | } |
| 578 | |
Alex Deucher | 1f8628c | 2016-03-31 16:56:22 -0400 | [diff] [blame] | 579 | static const char *amdgpu_vram_names[] = { |
| 580 | "UNKNOWN", |
| 581 | "GDDR1", |
| 582 | "DDR2", |
| 583 | "GDDR3", |
| 584 | "GDDR4", |
| 585 | "GDDR5", |
| 586 | "HBM", |
| 587 | "DDR3" |
| 588 | }; |
| 589 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 590 | int amdgpu_bo_init(struct amdgpu_device *adev) |
| 591 | { |
| 592 | /* Add an MTRR for the VRAM */ |
| 593 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, |
| 594 | adev->mc.aper_size); |
| 595 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
| 596 | adev->mc.mc_vram_size >> 20, |
| 597 | (unsigned long long)adev->mc.aper_size >> 20); |
Alex Deucher | 1f8628c | 2016-03-31 16:56:22 -0400 | [diff] [blame] | 598 | DRM_INFO("RAM width %dbits %s\n", |
| 599 | adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 600 | return amdgpu_ttm_init(adev); |
| 601 | } |
| 602 | |
| 603 | void amdgpu_bo_fini(struct amdgpu_device *adev) |
| 604 | { |
| 605 | amdgpu_ttm_fini(adev); |
| 606 | arch_phys_wc_del(adev->mc.vram_mtrr); |
| 607 | } |
| 608 | |
| 609 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, |
| 610 | struct vm_area_struct *vma) |
| 611 | { |
| 612 | return ttm_fbdev_mmap(vma, &bo->tbo); |
| 613 | } |
| 614 | |
| 615 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) |
| 616 | { |
Marek Olšák | fbd76d5 | 2015-05-14 23:48:26 +0200 | [diff] [blame] | 617 | if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 618 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 619 | |
| 620 | bo->tiling_flags = tiling_flags; |
| 621 | return 0; |
| 622 | } |
| 623 | |
| 624 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
| 625 | { |
| 626 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
| 627 | |
| 628 | if (tiling_flags) |
| 629 | *tiling_flags = bo->tiling_flags; |
| 630 | } |
| 631 | |
| 632 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
| 633 | uint32_t metadata_size, uint64_t flags) |
| 634 | { |
| 635 | void *buffer; |
| 636 | |
| 637 | if (!metadata_size) { |
| 638 | if (bo->metadata_size) { |
| 639 | kfree(bo->metadata); |
Dave Airlie | 0092d3e | 2016-05-03 12:44:29 +1000 | [diff] [blame] | 640 | bo->metadata = NULL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 641 | bo->metadata_size = 0; |
| 642 | } |
| 643 | return 0; |
| 644 | } |
| 645 | |
| 646 | if (metadata == NULL) |
| 647 | return -EINVAL; |
| 648 | |
Andrzej Hajda | 71affda | 2015-09-21 17:34:39 -0400 | [diff] [blame] | 649 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 650 | if (buffer == NULL) |
| 651 | return -ENOMEM; |
| 652 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 653 | kfree(bo->metadata); |
| 654 | bo->metadata_flags = flags; |
| 655 | bo->metadata = buffer; |
| 656 | bo->metadata_size = metadata_size; |
| 657 | |
| 658 | return 0; |
| 659 | } |
| 660 | |
| 661 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
| 662 | size_t buffer_size, uint32_t *metadata_size, |
| 663 | uint64_t *flags) |
| 664 | { |
| 665 | if (!buffer && !metadata_size) |
| 666 | return -EINVAL; |
| 667 | |
| 668 | if (buffer) { |
| 669 | if (buffer_size < bo->metadata_size) |
| 670 | return -EINVAL; |
| 671 | |
| 672 | if (bo->metadata_size) |
| 673 | memcpy(buffer, bo->metadata, bo->metadata_size); |
| 674 | } |
| 675 | |
| 676 | if (metadata_size) |
| 677 | *metadata_size = bo->metadata_size; |
| 678 | if (flags) |
| 679 | *flags = bo->metadata_flags; |
| 680 | |
| 681 | return 0; |
| 682 | } |
| 683 | |
| 684 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
| 685 | struct ttm_mem_reg *new_mem) |
| 686 | { |
| 687 | struct amdgpu_bo *rbo; |
David Mao | 15da301 | 2016-06-07 17:48:52 +0800 | [diff] [blame] | 688 | struct ttm_mem_reg *old_mem = &bo->mem; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 689 | |
| 690 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
| 691 | return; |
| 692 | |
| 693 | rbo = container_of(bo, struct amdgpu_bo, tbo); |
| 694 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); |
| 695 | |
| 696 | /* update statistics */ |
| 697 | if (!new_mem) |
| 698 | return; |
| 699 | |
| 700 | /* move_notify is called before move happens */ |
| 701 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); |
David Mao | 15da301 | 2016-06-07 17:48:52 +0800 | [diff] [blame] | 702 | |
| 703 | trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 707 | { |
| 708 | struct amdgpu_device *adev; |
Christian König | 5fb1941 | 2015-05-21 17:03:46 +0200 | [diff] [blame] | 709 | struct amdgpu_bo *abo; |
| 710 | unsigned long offset, size, lpfn; |
| 711 | int i, r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 712 | |
| 713 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
| 714 | return 0; |
Christian König | 5fb1941 | 2015-05-21 17:03:46 +0200 | [diff] [blame] | 715 | |
| 716 | abo = container_of(bo, struct amdgpu_bo, tbo); |
| 717 | adev = abo->adev; |
| 718 | if (bo->mem.mem_type != TTM_PL_VRAM) |
| 719 | return 0; |
| 720 | |
| 721 | size = bo->mem.num_pages << PAGE_SHIFT; |
| 722 | offset = bo->mem.start << PAGE_SHIFT; |
| 723 | if ((offset + size) <= adev->mc.visible_vram_size) |
| 724 | return 0; |
| 725 | |
Michel Dänzer | 104ece9 | 2016-03-28 12:53:02 +0900 | [diff] [blame] | 726 | /* Can't move a pinned BO to visible VRAM */ |
| 727 | if (abo->pin_count > 0) |
| 728 | return -EINVAL; |
| 729 | |
Christian König | 5fb1941 | 2015-05-21 17:03:46 +0200 | [diff] [blame] | 730 | /* hurrah the memory is not visible ! */ |
| 731 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); |
| 732 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 733 | for (i = 0; i < abo->placement.num_placement; i++) { |
| 734 | /* Force into visible VRAM */ |
| 735 | if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 736 | (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) |
| 737 | abo->placements[i].lpfn = lpfn; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 738 | } |
Christian König | 5fb1941 | 2015-05-21 17:03:46 +0200 | [diff] [blame] | 739 | r = ttm_bo_validate(bo, &abo->placement, false, false); |
| 740 | if (unlikely(r == -ENOMEM)) { |
| 741 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
| 742 | return ttm_bo_validate(bo, &abo->placement, false, false); |
| 743 | } else if (unlikely(r != 0)) { |
| 744 | return r; |
| 745 | } |
| 746 | |
| 747 | offset = bo->mem.start << PAGE_SHIFT; |
| 748 | /* this should never happen */ |
| 749 | if ((offset + size) > adev->mc.visible_vram_size) |
| 750 | return -EINVAL; |
| 751 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 752 | return 0; |
| 753 | } |
| 754 | |
| 755 | /** |
| 756 | * amdgpu_bo_fence - add fence to buffer object |
| 757 | * |
| 758 | * @bo: buffer object in question |
| 759 | * @fence: fence to add |
| 760 | * @shared: true if fence should be added shared |
| 761 | * |
| 762 | */ |
Chunming Zhou | e40a311 | 2015-08-03 11:38:09 +0800 | [diff] [blame] | 763 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 764 | bool shared) |
| 765 | { |
| 766 | struct reservation_object *resv = bo->tbo.resv; |
| 767 | |
| 768 | if (shared) |
Chunming Zhou | e40a311 | 2015-08-03 11:38:09 +0800 | [diff] [blame] | 769 | reservation_object_add_shared_fence(resv, fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 770 | else |
Chunming Zhou | e40a311 | 2015-08-03 11:38:09 +0800 | [diff] [blame] | 771 | reservation_object_add_excl_fence(resv, fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 772 | } |
Christian König | cdb7e8f | 2016-07-25 17:56:18 +0200 | [diff] [blame] | 773 | |
| 774 | /** |
| 775 | * amdgpu_bo_gpu_offset - return GPU offset of bo |
| 776 | * @bo: amdgpu object for which we query the offset |
| 777 | * |
| 778 | * Returns current GPU offset of the object. |
| 779 | * |
| 780 | * Note: object should either be pinned or reserved when calling this |
| 781 | * function, it might be useful to add check for this for debugging. |
| 782 | */ |
| 783 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
| 784 | { |
| 785 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); |
| 786 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && |
| 787 | !bo->pin_count); |
| 788 | |
| 789 | return bo->tbo.offset; |
| 790 | } |