Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
| 32 | #include <linux/list.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <drm/drmP.h> |
| 35 | #include <drm/amdgpu_drm.h> |
| 36 | #include "amdgpu.h" |
| 37 | #include "amdgpu_trace.h" |
| 38 | |
| 39 | |
| 40 | int amdgpu_ttm_init(struct amdgpu_device *adev); |
| 41 | void amdgpu_ttm_fini(struct amdgpu_device *adev); |
| 42 | |
| 43 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, |
| 44 | struct ttm_mem_reg * mem) |
| 45 | { |
| 46 | u64 ret = 0; |
| 47 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { |
| 48 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > |
| 49 | adev->mc.visible_vram_size ? |
| 50 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT): |
| 51 | mem->size; |
| 52 | } |
| 53 | return ret; |
| 54 | } |
| 55 | |
| 56 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, |
| 57 | struct ttm_mem_reg *old_mem, |
| 58 | struct ttm_mem_reg *new_mem) |
| 59 | { |
| 60 | u64 vis_size; |
| 61 | if (!adev) |
| 62 | return; |
| 63 | |
| 64 | if (new_mem) { |
| 65 | switch (new_mem->mem_type) { |
| 66 | case TTM_PL_TT: |
| 67 | atomic64_add(new_mem->size, &adev->gtt_usage); |
| 68 | break; |
| 69 | case TTM_PL_VRAM: |
| 70 | atomic64_add(new_mem->size, &adev->vram_usage); |
| 71 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); |
| 72 | atomic64_add(vis_size, &adev->vram_vis_usage); |
| 73 | break; |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | if (old_mem) { |
| 78 | switch (old_mem->mem_type) { |
| 79 | case TTM_PL_TT: |
| 80 | atomic64_sub(old_mem->size, &adev->gtt_usage); |
| 81 | break; |
| 82 | case TTM_PL_VRAM: |
| 83 | atomic64_sub(old_mem->size, &adev->vram_usage); |
| 84 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); |
| 85 | atomic64_sub(vis_size, &adev->vram_vis_usage); |
| 86 | break; |
| 87 | } |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
| 92 | { |
| 93 | struct amdgpu_bo *bo; |
| 94 | |
| 95 | bo = container_of(tbo, struct amdgpu_bo, tbo); |
| 96 | |
| 97 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); |
| 98 | amdgpu_mn_unregister(bo); |
| 99 | |
| 100 | mutex_lock(&bo->adev->gem.mutex); |
| 101 | list_del_init(&bo->list); |
| 102 | mutex_unlock(&bo->adev->gem.mutex); |
| 103 | drm_gem_object_release(&bo->gem_base); |
| 104 | kfree(bo->metadata); |
| 105 | kfree(bo); |
| 106 | } |
| 107 | |
| 108 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) |
| 109 | { |
| 110 | if (bo->destroy == &amdgpu_ttm_bo_destroy) |
| 111 | return true; |
| 112 | return false; |
| 113 | } |
| 114 | |
| 115 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) |
| 116 | { |
| 117 | u32 c = 0, i; |
| 118 | rbo->placement.placement = rbo->placements; |
| 119 | rbo->placement.busy_placement = rbo->placements; |
| 120 | |
| 121 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
| 122 | if (rbo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
| 123 | rbo->adev->mc.visible_vram_size < rbo->adev->mc.real_vram_size) { |
| 124 | rbo->placements[c].fpfn = |
| 125 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 126 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
| 127 | TTM_PL_FLAG_VRAM; |
| 128 | } |
| 129 | rbo->placements[c].fpfn = 0; |
| 130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
| 131 | TTM_PL_FLAG_VRAM; |
| 132 | } |
| 133 | |
| 134 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
Jammy Zhou | 8867128 | 2015-05-06 18:44:29 +0800 | [diff] [blame] | 135 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 136 | rbo->placements[c].fpfn = 0; |
| 137 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | |
| 138 | TTM_PL_FLAG_UNCACHED; |
| 139 | } else { |
| 140 | rbo->placements[c].fpfn = 0; |
| 141 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
Jammy Zhou | 8867128 | 2015-05-06 18:44:29 +0800 | [diff] [blame] | 146 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 147 | rbo->placements[c].fpfn = 0; |
| 148 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | |
| 149 | TTM_PL_FLAG_UNCACHED; |
| 150 | } else { |
| 151 | rbo->placements[c].fpfn = 0; |
| 152 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
| 157 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 158 | AMDGPU_PL_FLAG_GDS; |
| 159 | } |
| 160 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
| 161 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 162 | AMDGPU_PL_FLAG_GWS; |
| 163 | } |
| 164 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
| 165 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 166 | AMDGPU_PL_FLAG_OA; |
| 167 | } |
| 168 | |
| 169 | if (!c) { |
| 170 | rbo->placements[c].fpfn = 0; |
| 171 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
| 172 | TTM_PL_FLAG_SYSTEM; |
| 173 | } |
| 174 | rbo->placement.num_placement = c; |
| 175 | rbo->placement.num_busy_placement = c; |
| 176 | |
| 177 | for (i = 0; i < c; i++) { |
| 178 | if ((rbo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
| 179 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 180 | !rbo->placements[i].fpfn) |
| 181 | rbo->placements[i].lpfn = |
| 182 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 183 | else |
| 184 | rbo->placements[i].lpfn = 0; |
| 185 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | int amdgpu_bo_create(struct amdgpu_device *adev, |
| 189 | unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, |
| 190 | struct sg_table *sg, struct amdgpu_bo **bo_ptr) |
| 191 | { |
| 192 | struct amdgpu_bo *bo; |
| 193 | enum ttm_bo_type type; |
| 194 | unsigned long page_align; |
| 195 | size_t acc_size; |
| 196 | int r; |
| 197 | |
| 198 | /* VI has a hw bug where VM PTEs have to be allocated in groups of 8. |
| 199 | * do this as a temporary workaround |
| 200 | */ |
| 201 | if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { |
| 202 | if (adev->asic_type >= CHIP_TOPAZ) { |
| 203 | if (byte_align & 0x7fff) |
| 204 | byte_align = ALIGN(byte_align, 0x8000); |
| 205 | if (size & 0x7fff) |
| 206 | size = ALIGN(size, 0x8000); |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
| 211 | size = ALIGN(size, PAGE_SIZE); |
| 212 | |
| 213 | if (kernel) { |
| 214 | type = ttm_bo_type_kernel; |
| 215 | } else if (sg) { |
| 216 | type = ttm_bo_type_sg; |
| 217 | } else { |
| 218 | type = ttm_bo_type_device; |
| 219 | } |
| 220 | *bo_ptr = NULL; |
| 221 | |
| 222 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, |
| 223 | sizeof(struct amdgpu_bo)); |
| 224 | |
| 225 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); |
| 226 | if (bo == NULL) |
| 227 | return -ENOMEM; |
| 228 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); |
| 229 | if (unlikely(r)) { |
| 230 | kfree(bo); |
| 231 | return r; |
| 232 | } |
| 233 | bo->adev = adev; |
| 234 | INIT_LIST_HEAD(&bo->list); |
| 235 | INIT_LIST_HEAD(&bo->va); |
| 236 | bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
| 237 | AMDGPU_GEM_DOMAIN_GTT | |
| 238 | AMDGPU_GEM_DOMAIN_CPU | |
| 239 | AMDGPU_GEM_DOMAIN_GDS | |
| 240 | AMDGPU_GEM_DOMAIN_GWS | |
| 241 | AMDGPU_GEM_DOMAIN_OA); |
| 242 | |
| 243 | bo->flags = flags; |
| 244 | amdgpu_ttm_placement_from_domain(bo, domain); |
| 245 | /* Kernel allocation are uninterruptible */ |
| 246 | down_read(&adev->pm.mclk_lock); |
| 247 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
| 248 | &bo->placement, page_align, !kernel, NULL, |
| 249 | acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); |
| 250 | up_read(&adev->pm.mclk_lock); |
| 251 | if (unlikely(r != 0)) { |
| 252 | return r; |
| 253 | } |
| 254 | *bo_ptr = bo; |
| 255 | |
| 256 | trace_amdgpu_bo_create(bo); |
| 257 | |
| 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
| 262 | { |
| 263 | bool is_iomem; |
| 264 | int r; |
| 265 | |
| 266 | if (bo->kptr) { |
| 267 | if (ptr) { |
| 268 | *ptr = bo->kptr; |
| 269 | } |
| 270 | return 0; |
| 271 | } |
| 272 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
| 273 | if (r) { |
| 274 | return r; |
| 275 | } |
| 276 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
| 277 | if (ptr) { |
| 278 | *ptr = bo->kptr; |
| 279 | } |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
| 284 | { |
| 285 | if (bo->kptr == NULL) |
| 286 | return; |
| 287 | bo->kptr = NULL; |
| 288 | ttm_bo_kunmap(&bo->kmap); |
| 289 | } |
| 290 | |
| 291 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
| 292 | { |
| 293 | if (bo == NULL) |
| 294 | return NULL; |
| 295 | |
| 296 | ttm_bo_reference(&bo->tbo); |
| 297 | return bo; |
| 298 | } |
| 299 | |
| 300 | void amdgpu_bo_unref(struct amdgpu_bo **bo) |
| 301 | { |
| 302 | struct ttm_buffer_object *tbo; |
| 303 | |
| 304 | if ((*bo) == NULL) |
| 305 | return; |
| 306 | |
| 307 | tbo = &((*bo)->tbo); |
| 308 | ttm_bo_unref(&tbo); |
| 309 | if (tbo == NULL) |
| 310 | *bo = NULL; |
| 311 | } |
| 312 | |
| 313 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, |
| 314 | u64 *gpu_addr) |
| 315 | { |
| 316 | int r, i; |
| 317 | |
| 318 | if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) |
| 319 | return -EPERM; |
| 320 | |
| 321 | if (bo->pin_count) { |
| 322 | bo->pin_count++; |
| 323 | if (gpu_addr) |
| 324 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
| 325 | |
| 326 | if (max_offset != 0) { |
| 327 | u64 domain_start; |
| 328 | |
| 329 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
| 330 | domain_start = bo->adev->mc.vram_start; |
| 331 | else |
| 332 | domain_start = bo->adev->mc.gtt_start; |
| 333 | WARN_ON_ONCE(max_offset < |
| 334 | (amdgpu_bo_gpu_offset(bo) - domain_start)); |
| 335 | } |
| 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | amdgpu_ttm_placement_from_domain(bo, domain); |
| 340 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 341 | /* force to pin into visible video ram */ |
| 342 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 343 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
| 344 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) |
| 345 | bo->placements[i].lpfn = |
| 346 | bo->adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 347 | else |
| 348 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
| 349 | |
| 350 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
| 351 | } |
| 352 | |
| 353 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 354 | if (likely(r == 0)) { |
| 355 | bo->pin_count = 1; |
| 356 | if (gpu_addr != NULL) |
| 357 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
| 358 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
| 359 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); |
| 360 | else |
| 361 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); |
| 362 | } else { |
| 363 | dev_err(bo->adev->dev, "%p pin failed\n", bo); |
| 364 | } |
| 365 | return r; |
| 366 | } |
| 367 | |
| 368 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) |
| 369 | { |
| 370 | return amdgpu_bo_pin_restricted(bo, domain, 0, gpu_addr); |
| 371 | } |
| 372 | |
| 373 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
| 374 | { |
| 375 | int r, i; |
| 376 | |
| 377 | if (!bo->pin_count) { |
| 378 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); |
| 379 | return 0; |
| 380 | } |
| 381 | bo->pin_count--; |
| 382 | if (bo->pin_count) |
| 383 | return 0; |
| 384 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 385 | bo->placements[i].lpfn = 0; |
| 386 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; |
| 387 | } |
| 388 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 389 | if (likely(r == 0)) { |
| 390 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
| 391 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); |
| 392 | else |
| 393 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); |
| 394 | } else { |
| 395 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); |
| 396 | } |
| 397 | return r; |
| 398 | } |
| 399 | |
| 400 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) |
| 401 | { |
| 402 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
| 403 | if (0 && (adev->flags & AMDGPU_IS_APU)) { |
| 404 | /* Useless to evict on IGP chips */ |
| 405 | return 0; |
| 406 | } |
| 407 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); |
| 408 | } |
| 409 | |
| 410 | void amdgpu_bo_force_delete(struct amdgpu_device *adev) |
| 411 | { |
| 412 | struct amdgpu_bo *bo, *n; |
| 413 | |
| 414 | if (list_empty(&adev->gem.objects)) { |
| 415 | return; |
| 416 | } |
| 417 | dev_err(adev->dev, "Userspace still has active objects !\n"); |
| 418 | list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { |
| 419 | mutex_lock(&adev->ddev->struct_mutex); |
| 420 | dev_err(adev->dev, "%p %p %lu %lu force free\n", |
| 421 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
| 422 | *((unsigned long *)&bo->gem_base.refcount)); |
| 423 | mutex_lock(&bo->adev->gem.mutex); |
| 424 | list_del_init(&bo->list); |
| 425 | mutex_unlock(&bo->adev->gem.mutex); |
| 426 | /* this should unref the ttm bo */ |
| 427 | drm_gem_object_unreference(&bo->gem_base); |
| 428 | mutex_unlock(&adev->ddev->struct_mutex); |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | int amdgpu_bo_init(struct amdgpu_device *adev) |
| 433 | { |
| 434 | /* Add an MTRR for the VRAM */ |
| 435 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, |
| 436 | adev->mc.aper_size); |
| 437 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
| 438 | adev->mc.mc_vram_size >> 20, |
| 439 | (unsigned long long)adev->mc.aper_size >> 20); |
| 440 | DRM_INFO("RAM width %dbits DDR\n", |
| 441 | adev->mc.vram_width); |
| 442 | return amdgpu_ttm_init(adev); |
| 443 | } |
| 444 | |
| 445 | void amdgpu_bo_fini(struct amdgpu_device *adev) |
| 446 | { |
| 447 | amdgpu_ttm_fini(adev); |
| 448 | arch_phys_wc_del(adev->mc.vram_mtrr); |
| 449 | } |
| 450 | |
| 451 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, |
| 452 | struct vm_area_struct *vma) |
| 453 | { |
| 454 | return ttm_fbdev_mmap(vma, &bo->tbo); |
| 455 | } |
| 456 | |
| 457 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) |
| 458 | { |
| 459 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
| 460 | |
| 461 | bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; |
| 462 | bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; |
| 463 | mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; |
| 464 | tilesplit = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; |
| 465 | stilesplit = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK; |
| 466 | switch (bankw) { |
| 467 | case 0: |
| 468 | case 1: |
| 469 | case 2: |
| 470 | case 4: |
| 471 | case 8: |
| 472 | break; |
| 473 | default: |
| 474 | return -EINVAL; |
| 475 | } |
| 476 | switch (bankh) { |
| 477 | case 0: |
| 478 | case 1: |
| 479 | case 2: |
| 480 | case 4: |
| 481 | case 8: |
| 482 | break; |
| 483 | default: |
| 484 | return -EINVAL; |
| 485 | } |
| 486 | switch (mtaspect) { |
| 487 | case 0: |
| 488 | case 1: |
| 489 | case 2: |
| 490 | case 4: |
| 491 | case 8: |
| 492 | break; |
| 493 | default: |
| 494 | return -EINVAL; |
| 495 | } |
| 496 | if (tilesplit > 6) { |
| 497 | return -EINVAL; |
| 498 | } |
| 499 | if (stilesplit > 6) { |
| 500 | return -EINVAL; |
| 501 | } |
| 502 | |
| 503 | bo->tiling_flags = tiling_flags; |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
| 508 | { |
| 509 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
| 510 | |
| 511 | if (tiling_flags) |
| 512 | *tiling_flags = bo->tiling_flags; |
| 513 | } |
| 514 | |
| 515 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
| 516 | uint32_t metadata_size, uint64_t flags) |
| 517 | { |
| 518 | void *buffer; |
| 519 | |
| 520 | if (!metadata_size) { |
| 521 | if (bo->metadata_size) { |
| 522 | kfree(bo->metadata); |
| 523 | bo->metadata_size = 0; |
| 524 | } |
| 525 | return 0; |
| 526 | } |
| 527 | |
| 528 | if (metadata == NULL) |
| 529 | return -EINVAL; |
| 530 | |
| 531 | buffer = kzalloc(metadata_size, GFP_KERNEL); |
| 532 | if (buffer == NULL) |
| 533 | return -ENOMEM; |
| 534 | |
| 535 | memcpy(buffer, metadata, metadata_size); |
| 536 | |
| 537 | kfree(bo->metadata); |
| 538 | bo->metadata_flags = flags; |
| 539 | bo->metadata = buffer; |
| 540 | bo->metadata_size = metadata_size; |
| 541 | |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
| 546 | size_t buffer_size, uint32_t *metadata_size, |
| 547 | uint64_t *flags) |
| 548 | { |
| 549 | if (!buffer && !metadata_size) |
| 550 | return -EINVAL; |
| 551 | |
| 552 | if (buffer) { |
| 553 | if (buffer_size < bo->metadata_size) |
| 554 | return -EINVAL; |
| 555 | |
| 556 | if (bo->metadata_size) |
| 557 | memcpy(buffer, bo->metadata, bo->metadata_size); |
| 558 | } |
| 559 | |
| 560 | if (metadata_size) |
| 561 | *metadata_size = bo->metadata_size; |
| 562 | if (flags) |
| 563 | *flags = bo->metadata_flags; |
| 564 | |
| 565 | return 0; |
| 566 | } |
| 567 | |
| 568 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
| 569 | struct ttm_mem_reg *new_mem) |
| 570 | { |
| 571 | struct amdgpu_bo *rbo; |
| 572 | |
| 573 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
| 574 | return; |
| 575 | |
| 576 | rbo = container_of(bo, struct amdgpu_bo, tbo); |
| 577 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); |
| 578 | |
| 579 | /* update statistics */ |
| 580 | if (!new_mem) |
| 581 | return; |
| 582 | |
| 583 | /* move_notify is called before move happens */ |
| 584 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); |
| 585 | } |
| 586 | |
| 587 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 588 | { |
| 589 | struct amdgpu_device *adev; |
| 590 | struct amdgpu_bo *rbo; |
| 591 | unsigned long offset, size; |
| 592 | int r; |
| 593 | |
| 594 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
| 595 | return 0; |
| 596 | rbo = container_of(bo, struct amdgpu_bo, tbo); |
| 597 | adev = rbo->adev; |
| 598 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
| 599 | size = bo->mem.num_pages << PAGE_SHIFT; |
| 600 | offset = bo->mem.start << PAGE_SHIFT; |
| 601 | if ((offset + size) > adev->mc.visible_vram_size) { |
| 602 | /* hurrah the memory is not visible ! */ |
| 603 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_VRAM); |
| 604 | rbo->placements[0].lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
| 605 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
| 606 | if (unlikely(r != 0)) |
| 607 | return r; |
| 608 | offset = bo->mem.start << PAGE_SHIFT; |
| 609 | /* this should not happen */ |
| 610 | if ((offset + size) > adev->mc.visible_vram_size) |
| 611 | return -EINVAL; |
| 612 | } |
| 613 | } |
| 614 | return 0; |
| 615 | } |
| 616 | |
| 617 | /** |
| 618 | * amdgpu_bo_fence - add fence to buffer object |
| 619 | * |
| 620 | * @bo: buffer object in question |
| 621 | * @fence: fence to add |
| 622 | * @shared: true if fence should be added shared |
| 623 | * |
| 624 | */ |
| 625 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, |
| 626 | bool shared) |
| 627 | { |
| 628 | struct reservation_object *resv = bo->tbo.resv; |
| 629 | |
| 630 | if (shared) |
| 631 | reservation_object_add_shared_fence(resv, &fence->base); |
| 632 | else |
| 633 | reservation_object_add_excl_fence(resv, &fence->base); |
| 634 | } |