Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
| 32 | #include <linux/list.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/slab.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 34 | #include <drm/drmP.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 35 | #include <drm/radeon_drm.h> |
Oded Gabbay | c524498 | 2016-01-30 07:59:33 +0200 | [diff] [blame] | 36 | #include <drm/drm_cache.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 37 | #include "radeon.h" |
Dave Airlie | 99ee7fa | 2010-11-23 11:47:49 +1000 | [diff] [blame] | 38 | #include "radeon_trace.h" |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 39 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 40 | |
| 41 | int radeon_ttm_init(struct radeon_device *rdev); |
| 42 | void radeon_ttm_fini(struct radeon_device *rdev); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 43 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
| 47 | * function are calling it. |
| 48 | */ |
| 49 | |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 50 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
| 51 | unsigned mem_type, int sign) |
| 52 | { |
| 53 | struct radeon_device *rdev = bo->rdev; |
| 54 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; |
| 55 | |
| 56 | switch (mem_type) { |
| 57 | case TTM_PL_TT: |
| 58 | if (sign > 0) |
| 59 | atomic64_add(size, &rdev->gtt_usage); |
| 60 | else |
| 61 | atomic64_sub(size, &rdev->gtt_usage); |
| 62 | break; |
| 63 | case TTM_PL_VRAM: |
| 64 | if (sign > 0) |
| 65 | atomic64_add(size, &rdev->vram_usage); |
| 66 | else |
| 67 | atomic64_sub(size, &rdev->vram_usage); |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 72 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 73 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 74 | struct radeon_bo *bo; |
| 75 | |
| 76 | bo = container_of(tbo, struct radeon_bo, tbo); |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 77 | |
| 78 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); |
| 79 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 80 | mutex_lock(&bo->rdev->gem.mutex); |
| 81 | list_del_init(&bo->list); |
| 82 | mutex_unlock(&bo->rdev->gem.mutex); |
| 83 | radeon_bo_clear_surface_reg(bo); |
Christian König | c265f24 | 2014-07-18 09:24:54 +0200 | [diff] [blame] | 84 | WARN_ON(!list_empty(&bo->va)); |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 85 | drm_gem_object_release(&bo->gem_base); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 86 | kfree(bo); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 87 | } |
| 88 | |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 89 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
| 90 | { |
| 91 | if (bo->destroy == &radeon_ttm_bo_destroy) |
| 92 | return true; |
| 93 | return false; |
| 94 | } |
| 95 | |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 96 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
| 97 | { |
Lauri Kasanen | deadcb3 | 2014-04-02 20:33:42 +0300 | [diff] [blame] | 98 | u32 c = 0, i; |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 99 | |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 100 | rbo->placement.placement = rbo->placements; |
Alex Deucher | 2070787 | 2013-01-17 13:10:50 -0500 | [diff] [blame] | 101 | rbo->placement.busy_placement = rbo->placements; |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 102 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
| 103 | /* Try placing BOs which don't need CPU access outside of the |
| 104 | * CPU accessible part of VRAM |
| 105 | */ |
| 106 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
| 107 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { |
| 108 | rbo->placements[c].fpfn = |
| 109 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 110 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
| 111 | TTM_PL_FLAG_UNCACHED | |
| 112 | TTM_PL_FLAG_VRAM; |
| 113 | } |
| 114 | |
| 115 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 116 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
| 117 | TTM_PL_FLAG_UNCACHED | |
| 118 | TTM_PL_FLAG_VRAM; |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 119 | } |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 120 | |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 121 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 122 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 123 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 124 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 125 | TTM_PL_FLAG_TT; |
| 126 | |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 127 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
| 128 | (rbo->rdev->flags & RADEON_IS_AGP)) { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 129 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
| 131 | TTM_PL_FLAG_UNCACHED | |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 132 | TTM_PL_FLAG_TT; |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 133 | } else { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 134 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 135 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
| 136 | TTM_PL_FLAG_TT; |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 137 | } |
| 138 | } |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 139 | |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 140 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 141 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 142 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 143 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
| 144 | TTM_PL_FLAG_SYSTEM; |
| 145 | |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 146 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
| 147 | rbo->rdev->flags & RADEON_IS_AGP) { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 148 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 149 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
| 150 | TTM_PL_FLAG_UNCACHED | |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 151 | TTM_PL_FLAG_SYSTEM; |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 152 | } else { |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 153 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 154 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
| 155 | TTM_PL_FLAG_SYSTEM; |
Jerome Glisse | 0d0b3e7 | 2012-11-28 13:47:55 -0500 | [diff] [blame] | 156 | } |
| 157 | } |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 158 | if (!c) { |
| 159 | rbo->placements[c].fpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 160 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
| 161 | TTM_PL_FLAG_SYSTEM; |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 162 | } |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 163 | |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 164 | rbo->placement.num_placement = c; |
| 165 | rbo->placement.num_busy_placement = c; |
Lauri Kasanen | deadcb3 | 2014-04-02 20:33:42 +0300 | [diff] [blame] | 166 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 167 | for (i = 0; i < c; ++i) { |
Michel Dänzer | c858403 | 2014-08-28 15:56:00 +0900 | [diff] [blame] | 168 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 169 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 170 | !rbo->placements[i].fpfn) |
Michel Dänzer | c858403 | 2014-08-28 15:56:00 +0900 | [diff] [blame] | 171 | rbo->placements[i].lpfn = |
| 172 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 173 | else |
| 174 | rbo->placements[i].lpfn = 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 175 | } |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 176 | } |
| 177 | |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 178 | int radeon_bo_create(struct radeon_device *rdev, |
Maarten Lankhorst | 831b696 | 2014-09-18 14:11:56 +0200 | [diff] [blame] | 179 | unsigned long size, int byte_align, bool kernel, |
| 180 | u32 domain, u32 flags, struct sg_table *sg, |
| 181 | struct reservation_object *resv, |
| 182 | struct radeon_bo **bo_ptr) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 183 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 184 | struct radeon_bo *bo; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 185 | enum ttm_bo_type type; |
Jerome Glisse | 93225b0 | 2010-12-03 16:38:19 -0500 | [diff] [blame] | 186 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
Jerome Glisse | 57de4ba | 2011-11-11 15:42:57 -0500 | [diff] [blame] | 187 | size_t acc_size; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 188 | int r; |
| 189 | |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 190 | size = ALIGN(size, PAGE_SIZE); |
| 191 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 192 | if (kernel) { |
| 193 | type = ttm_bo_type_kernel; |
Alex Deucher | 40f5cf9 | 2012-05-10 18:33:13 -0400 | [diff] [blame] | 194 | } else if (sg) { |
| 195 | type = ttm_bo_type_sg; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 196 | } else { |
| 197 | type = ttm_bo_type_device; |
| 198 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 199 | *bo_ptr = NULL; |
Michel Dänzer | 2b66b50 | 2010-11-09 11:50:05 +0100 | [diff] [blame] | 200 | |
Jerome Glisse | 57de4ba | 2011-11-11 15:42:57 -0500 | [diff] [blame] | 201 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
| 202 | sizeof(struct radeon_bo)); |
| 203 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 204 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
| 205 | if (bo == NULL) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 206 | return -ENOMEM; |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 207 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
| 208 | if (unlikely(r)) { |
| 209 | kfree(bo); |
| 210 | return r; |
| 211 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 212 | bo->rdev = rdev; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 213 | bo->surface_reg = -1; |
| 214 | INIT_LIST_HEAD(&bo->list); |
Jerome Glisse | 721604a | 2012-01-05 22:11:05 -0500 | [diff] [blame] | 215 | INIT_LIST_HEAD(&bo->va); |
Marek Olšák | bda72d5 | 2014-03-02 00:56:17 +0100 | [diff] [blame] | 216 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
Jérome Glisse | 3cf8bb1 | 2016-03-16 12:56:45 +0100 | [diff] [blame] | 217 | RADEON_GEM_DOMAIN_GTT | |
| 218 | RADEON_GEM_DOMAIN_CPU); |
Michel Dänzer | 02376d8 | 2014-07-17 19:01:08 +0900 | [diff] [blame] | 219 | |
| 220 | bo->flags = flags; |
| 221 | /* PCI GART is always snooped */ |
| 222 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 223 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 224 | |
Michel Dänzer | 96ea47c | 2015-11-05 17:25:26 +0900 | [diff] [blame] | 225 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
| 226 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 |
| 227 | */ |
| 228 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) |
| 229 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 230 | |
Michel Dänzer | a08b588 | 2014-11-27 18:00:54 +0900 | [diff] [blame] | 231 | #ifdef CONFIG_X86_32 |
| 232 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
| 233 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
| 234 | */ |
Michel Dänzer | a28bbd5 | 2015-11-05 17:25:27 +0900 | [diff] [blame] | 235 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
Michel Dänzer | a53fa43 | 2015-02-04 10:19:51 +0900 | [diff] [blame] | 236 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
| 237 | /* Don't try to enable write-combining when it can't work, or things |
| 238 | * may be slow |
| 239 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
| 240 | */ |
| 241 | |
| 242 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
| 243 | thanks to write-combining |
| 244 | |
Michel Dänzer | 9382049 | 2015-11-05 17:25:28 +0900 | [diff] [blame] | 245 | if (bo->flags & RADEON_GEM_GTT_WC) |
| 246 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
| 247 | "better performance thanks to write-combining\n"); |
Michel Dänzer | a28bbd5 | 2015-11-05 17:25:27 +0900 | [diff] [blame] | 248 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
Oded Gabbay | c524498 | 2016-01-30 07:59:33 +0200 | [diff] [blame] | 249 | #else |
| 250 | /* For architectures that don't support WC memory, |
| 251 | * mask out the WC flag from the BO |
| 252 | */ |
| 253 | if (!drm_arch_can_wc_memory()) |
| 254 | bo->flags &= ~RADEON_GEM_GTT_WC; |
Michel Dänzer | a08b588 | 2014-11-27 18:00:54 +0900 | [diff] [blame] | 255 | #endif |
| 256 | |
Jerome Glisse | 1fb107f | 2009-12-10 17:16:28 +0100 | [diff] [blame] | 257 | radeon_ttm_placement_from_domain(bo, domain); |
Thomas Hellstrom | 5cc6fba | 2009-12-07 18:36:19 +0100 | [diff] [blame] | 258 | /* Kernel allocation are uninterruptible */ |
Christian König | db7fce3 | 2012-05-11 14:57:18 +0200 | [diff] [blame] | 259 | down_read(&rdev->pm.mclk_lock); |
Jerome Glisse | 1fb107f | 2009-12-10 17:16:28 +0100 | [diff] [blame] | 260 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
Marcin Slusarz | 0b91c4a | 2012-11-06 21:49:51 +0000 | [diff] [blame] | 261 | &bo->placement, page_align, !kernel, NULL, |
Maarten Lankhorst | 831b696 | 2014-09-18 14:11:56 +0200 | [diff] [blame] | 262 | acc_size, sg, resv, &radeon_ttm_bo_destroy); |
Christian König | db7fce3 | 2012-05-11 14:57:18 +0200 | [diff] [blame] | 263 | up_read(&rdev->pm.mclk_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 264 | if (unlikely(r != 0)) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 265 | return r; |
| 266 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 267 | *bo_ptr = bo; |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 268 | |
Dave Airlie | 99ee7fa | 2010-11-23 11:47:49 +1000 | [diff] [blame] | 269 | trace_radeon_bo_create(bo); |
Daniel Vetter | 441921d | 2011-02-18 17:59:16 +0100 | [diff] [blame] | 270 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 271 | return 0; |
| 272 | } |
| 273 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 274 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 275 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 276 | bool is_iomem; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 277 | int r; |
| 278 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 279 | if (bo->kptr) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 280 | if (ptr) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 281 | *ptr = bo->kptr; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 282 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 283 | return 0; |
| 284 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 285 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 286 | if (r) { |
| 287 | return r; |
| 288 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 289 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 290 | if (ptr) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 291 | *ptr = bo->kptr; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 292 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 293 | radeon_bo_check_tiling(bo, 0, 0); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 294 | return 0; |
| 295 | } |
| 296 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 297 | void radeon_bo_kunmap(struct radeon_bo *bo) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 298 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 299 | if (bo->kptr == NULL) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 300 | return; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 301 | bo->kptr = NULL; |
| 302 | radeon_bo_check_tiling(bo, 0, 0); |
| 303 | ttm_bo_kunmap(&bo->kmap); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 304 | } |
| 305 | |
Christian König | 512d8af | 2014-07-30 21:04:56 +0200 | [diff] [blame] | 306 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
| 307 | { |
| 308 | if (bo == NULL) |
| 309 | return NULL; |
| 310 | |
| 311 | ttm_bo_reference(&bo->tbo); |
| 312 | return bo; |
| 313 | } |
| 314 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 315 | void radeon_bo_unref(struct radeon_bo **bo) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 316 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 317 | struct ttm_buffer_object *tbo; |
Dave Airlie | f4b7fb9 | 2010-04-29 18:37:59 +1000 | [diff] [blame] | 318 | struct radeon_device *rdev; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 319 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 320 | if ((*bo) == NULL) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 321 | return; |
Dave Airlie | f4b7fb9 | 2010-04-29 18:37:59 +1000 | [diff] [blame] | 322 | rdev = (*bo)->rdev; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 323 | tbo = &((*bo)->tbo); |
| 324 | ttm_bo_unref(&tbo); |
| 325 | if (tbo == NULL) |
| 326 | *bo = NULL; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 327 | } |
| 328 | |
Michel Dänzer | c435301 | 2012-03-14 17:12:41 +0100 | [diff] [blame] | 329 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
| 330 | u64 *gpu_addr) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 331 | { |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 332 | int r, i; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 333 | |
Christian König | f72a113a | 2014-08-07 09:36:00 +0200 | [diff] [blame] | 334 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) |
| 335 | return -EPERM; |
| 336 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 337 | if (bo->pin_count) { |
| 338 | bo->pin_count++; |
| 339 | if (gpu_addr) |
| 340 | *gpu_addr = radeon_bo_gpu_offset(bo); |
Michel Dänzer | d936622 | 2012-03-28 08:52:32 +0200 | [diff] [blame] | 341 | |
| 342 | if (max_offset != 0) { |
| 343 | u64 domain_start; |
| 344 | |
| 345 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
| 346 | domain_start = bo->rdev->mc.vram_start; |
| 347 | else |
| 348 | domain_start = bo->rdev->mc.gtt_start; |
Michel Dänzer | e199fd4 | 2012-03-29 16:47:43 +0200 | [diff] [blame] | 349 | WARN_ON_ONCE(max_offset < |
| 350 | (radeon_bo_gpu_offset(bo) - domain_start)); |
Michel Dänzer | d936622 | 2012-03-28 08:52:32 +0200 | [diff] [blame] | 351 | } |
| 352 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 353 | return 0; |
| 354 | } |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 355 | radeon_ttm_placement_from_domain(bo, domain); |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 356 | for (i = 0; i < bo->placement.num_placement; i++) { |
Michel Dänzer | 3ca82da | 2010-03-26 19:18:55 +0000 | [diff] [blame] | 357 | /* force to pin into visible video ram */ |
Michel Dänzer | b76ee67 | 2014-09-09 10:09:23 +0900 | [diff] [blame] | 358 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
Alex Deucher | f266f04 | 2014-08-28 10:59:05 -0400 | [diff] [blame] | 359 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
Michel Dänzer | b76ee67 | 2014-09-09 10:09:23 +0900 | [diff] [blame] | 360 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
| 361 | bo->placements[i].lpfn = |
| 362 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 363 | else |
Michel Dänzer | b76ee67 | 2014-09-09 10:09:23 +0900 | [diff] [blame] | 364 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
Michel Dänzer | c435301 | 2012-03-14 17:12:41 +0100 | [diff] [blame] | 365 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 366 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
Michel Dänzer | c435301 | 2012-03-14 17:12:41 +0100 | [diff] [blame] | 367 | } |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 368 | |
Maarten Lankhorst | 97a875c | 2012-11-28 11:25:44 +0000 | [diff] [blame] | 369 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 370 | if (likely(r == 0)) { |
| 371 | bo->pin_count = 1; |
| 372 | if (gpu_addr != NULL) |
| 373 | *gpu_addr = radeon_bo_gpu_offset(bo); |
Alex Deucher | 71ecc97 | 2014-07-17 12:09:25 -0400 | [diff] [blame] | 374 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
| 375 | bo->rdev->vram_pin_size += radeon_bo_size(bo); |
| 376 | else |
| 377 | bo->rdev->gart_pin_size += radeon_bo_size(bo); |
| 378 | } else { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 379 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
Alex Deucher | 71ecc97 | 2014-07-17 12:09:25 -0400 | [diff] [blame] | 380 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 381 | return r; |
| 382 | } |
| 383 | |
Michel Dänzer | c435301 | 2012-03-14 17:12:41 +0100 | [diff] [blame] | 384 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
| 385 | { |
| 386 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); |
| 387 | } |
| 388 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 389 | int radeon_bo_unpin(struct radeon_bo *bo) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 390 | { |
Jerome Glisse | 312ea8d | 2009-12-07 15:52:58 +0100 | [diff] [blame] | 391 | int r, i; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 392 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 393 | if (!bo->pin_count) { |
| 394 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); |
| 395 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 396 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 397 | bo->pin_count--; |
| 398 | if (bo->pin_count) |
| 399 | return 0; |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 400 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 401 | bo->placements[i].lpfn = 0; |
| 402 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; |
| 403 | } |
Maarten Lankhorst | 97a875c | 2012-11-28 11:25:44 +0000 | [diff] [blame] | 404 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
Alex Deucher | 71ecc97 | 2014-07-17 12:09:25 -0400 | [diff] [blame] | 405 | if (likely(r == 0)) { |
| 406 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
| 407 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); |
| 408 | else |
| 409 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); |
| 410 | } else { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 411 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
Alex Deucher | 71ecc97 | 2014-07-17 12:09:25 -0400 | [diff] [blame] | 412 | } |
Thomas Hellstrom | 5cc6fba | 2009-12-07 18:36:19 +0100 | [diff] [blame] | 413 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 414 | } |
| 415 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 416 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 417 | { |
Dave Airlie | d796d84 | 2010-01-25 13:08:08 +1000 | [diff] [blame] | 418 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
| 419 | if (0 && (rdev->flags & RADEON_IS_IGP)) { |
Alex Deucher | 06b6476 | 2010-01-05 11:27:29 -0500 | [diff] [blame] | 420 | if (rdev->mc.igp_sideport_enabled == false) |
| 421 | /* Useless to evict on IGP chips */ |
| 422 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 423 | } |
| 424 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
| 425 | } |
| 426 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 427 | void radeon_bo_force_delete(struct radeon_device *rdev) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 428 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 429 | struct radeon_bo *bo, *n; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 430 | |
| 431 | if (list_empty(&rdev->gem.objects)) { |
| 432 | return; |
| 433 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 434 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
| 435 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 436 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
Daniel Vetter | 31c3603 | 2011-02-18 17:59:18 +0100 | [diff] [blame] | 437 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
| 438 | *((unsigned long *)&bo->gem_base.refcount)); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 439 | mutex_lock(&bo->rdev->gem.mutex); |
| 440 | list_del_init(&bo->list); |
| 441 | mutex_unlock(&bo->rdev->gem.mutex); |
Dave Airlie | 91132d6 | 2011-03-01 13:40:06 +1000 | [diff] [blame] | 442 | /* this should unref the ttm bo */ |
Daniel Vetter | 42192a9 | 2015-07-09 23:32:47 +0200 | [diff] [blame] | 443 | drm_gem_object_unreference_unlocked(&bo->gem_base); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 444 | } |
| 445 | } |
| 446 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 447 | int radeon_bo_init(struct radeon_device *rdev) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 448 | { |
Jerome Glisse | a4d6827 | 2009-09-11 13:00:43 +0200 | [diff] [blame] | 449 | /* Add an MTRR for the VRAM */ |
Samuel Li | a0a53aa | 2013-04-08 17:25:47 -0400 | [diff] [blame] | 450 | if (!rdev->fastfb_working) { |
Andy Lutomirski | 07ebea2 | 2013-05-13 23:58:45 +0000 | [diff] [blame] | 451 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
| 452 | rdev->mc.aper_size); |
Samuel Li | a0a53aa | 2013-04-08 17:25:47 -0400 | [diff] [blame] | 453 | } |
Jerome Glisse | a4d6827 | 2009-09-11 13:00:43 +0200 | [diff] [blame] | 454 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
| 455 | rdev->mc.mc_vram_size >> 20, |
| 456 | (unsigned long long)rdev->mc.aper_size >> 20); |
| 457 | DRM_INFO("RAM width %dbits %cDR\n", |
| 458 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 459 | return radeon_ttm_init(rdev); |
| 460 | } |
| 461 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 462 | void radeon_bo_fini(struct radeon_device *rdev) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 463 | { |
| 464 | radeon_ttm_fini(rdev); |
Andy Lutomirski | 07ebea2 | 2013-05-13 23:58:45 +0000 | [diff] [blame] | 465 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 466 | } |
| 467 | |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 468 | /* Returns how many bytes TTM can move per IB. |
| 469 | */ |
| 470 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) |
| 471 | { |
| 472 | u64 real_vram_size = rdev->mc.real_vram_size; |
| 473 | u64 vram_usage = atomic64_read(&rdev->vram_usage); |
| 474 | |
| 475 | /* This function is based on the current VRAM usage. |
| 476 | * |
| 477 | * - If all of VRAM is free, allow relocating the number of bytes that |
| 478 | * is equal to 1/4 of the size of VRAM for this IB. |
| 479 | |
| 480 | * - If more than one half of VRAM is occupied, only allow relocating |
| 481 | * 1 MB of data for this IB. |
| 482 | * |
| 483 | * - From 0 to one half of used VRAM, the threshold decreases |
| 484 | * linearly. |
| 485 | * __________________ |
| 486 | * 1/4 of -|\ | |
| 487 | * VRAM | \ | |
| 488 | * | \ | |
| 489 | * | \ | |
| 490 | * | \ | |
| 491 | * | \ | |
| 492 | * | \ | |
| 493 | * | \________|1 MB |
| 494 | * |----------------| |
| 495 | * VRAM 0 % 100 % |
| 496 | * used used |
| 497 | * |
| 498 | * Note: It's a threshold, not a limit. The threshold must be crossed |
| 499 | * for buffer relocations to stop, so any buffer of an arbitrary size |
| 500 | * can be moved as long as the threshold isn't crossed before |
| 501 | * the relocation takes place. We don't want to disable buffer |
| 502 | * relocations completely. |
| 503 | * |
| 504 | * The idea is that buffers should be placed in VRAM at creation time |
| 505 | * and TTM should only do a minimum number of relocations during |
| 506 | * command submission. In practice, you need to submit at least |
| 507 | * a dozen IBs to move all buffers to VRAM if they are in GTT. |
| 508 | * |
| 509 | * Also, things can get pretty crazy under memory pressure and actual |
| 510 | * VRAM usage can change a lot, so playing safe even at 50% does |
| 511 | * consistently increase performance. |
| 512 | */ |
| 513 | |
| 514 | u64 half_vram = real_vram_size >> 1; |
| 515 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; |
| 516 | u64 bytes_moved_threshold = half_free_vram >> 1; |
| 517 | return max(bytes_moved_threshold, 1024*1024ull); |
| 518 | } |
| 519 | |
| 520 | int radeon_bo_list_validate(struct radeon_device *rdev, |
| 521 | struct ww_acquire_ctx *ticket, |
Maarten Lankhorst | ecff665 | 2013-06-27 13:48:17 +0200 | [diff] [blame] | 522 | struct list_head *head, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 523 | { |
Christian König | 1d0c094 | 2014-11-27 14:48:42 +0100 | [diff] [blame] | 524 | struct radeon_bo_list *lobj; |
Christian König | 466be33 | 2014-12-03 15:46:49 +0100 | [diff] [blame] | 525 | struct list_head duplicates; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 526 | int r; |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 527 | u64 bytes_moved = 0, initial_bytes_moved; |
| 528 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 529 | |
Christian König | 466be33 | 2014-12-03 15:46:49 +0100 | [diff] [blame] | 530 | INIT_LIST_HEAD(&duplicates); |
| 531 | r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 532 | if (unlikely(r != 0)) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 533 | return r; |
| 534 | } |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 535 | |
Thomas Hellstrom | 147666f | 2010-11-17 12:38:32 +0000 | [diff] [blame] | 536 | list_for_each_entry(lobj, head, tv.head) { |
Christian König | 466be33 | 2014-12-03 15:46:49 +0100 | [diff] [blame] | 537 | struct radeon_bo *bo = lobj->robj; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 538 | if (!bo->pin_count) { |
Christian König | ce6758c | 2014-06-02 17:33:07 +0200 | [diff] [blame] | 539 | u32 domain = lobj->prefered_domains; |
Christian König | 3852752 | 2014-08-21 12:18:12 +0200 | [diff] [blame] | 540 | u32 allowed = lobj->allowed_domains; |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 541 | u32 current_domain = |
| 542 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
| 543 | |
| 544 | /* Check if this buffer will be moved and don't move it |
| 545 | * if we have moved too many buffers for this IB already. |
| 546 | * |
| 547 | * Note that this allows moving at least one buffer of |
| 548 | * any size, because it doesn't take the current "bo" |
| 549 | * into account. We don't want to disallow buffer moves |
| 550 | * completely. |
| 551 | */ |
Christian König | 3852752 | 2014-08-21 12:18:12 +0200 | [diff] [blame] | 552 | if ((allowed & current_domain) != 0 && |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 553 | (domain & current_domain) == 0 && /* will be moved */ |
| 554 | bytes_moved > bytes_moved_threshold) { |
| 555 | /* don't move it */ |
| 556 | domain = current_domain; |
| 557 | } |
| 558 | |
Alex Deucher | 2070787 | 2013-01-17 13:10:50 -0500 | [diff] [blame] | 559 | retry: |
| 560 | radeon_ttm_placement_from_domain(bo, domain); |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 561 | if (ring == R600_RING_TYPE_UVD_INDEX) |
Christian König | 3852752 | 2014-08-21 12:18:12 +0200 | [diff] [blame] | 562 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
Marek Olšák | 19dff56 | 2014-03-02 00:56:22 +0100 | [diff] [blame] | 563 | |
| 564 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
| 565 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 566 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - |
| 567 | initial_bytes_moved; |
| 568 | |
Michel Dänzer | e376573 | 2010-07-08 12:43:28 +1000 | [diff] [blame] | 569 | if (unlikely(r)) { |
Christian König | ce6758c | 2014-06-02 17:33:07 +0200 | [diff] [blame] | 570 | if (r != -ERESTARTSYS && |
| 571 | domain != lobj->allowed_domains) { |
| 572 | domain = lobj->allowed_domains; |
Alex Deucher | 2070787 | 2013-01-17 13:10:50 -0500 | [diff] [blame] | 573 | goto retry; |
| 574 | } |
Maarten Lankhorst | 1b6e5fd | 2013-07-10 12:26:56 +0200 | [diff] [blame] | 575 | ttm_eu_backoff_reservation(ticket, head); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 576 | return r; |
Michel Dänzer | e376573 | 2010-07-08 12:43:28 +1000 | [diff] [blame] | 577 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 578 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 579 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
| 580 | lobj->tiling_flags = bo->tiling_flags; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 581 | } |
Christian König | 466be33 | 2014-12-03 15:46:49 +0100 | [diff] [blame] | 582 | |
| 583 | list_for_each_entry(lobj, &duplicates, tv.head) { |
| 584 | lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); |
| 585 | lobj->tiling_flags = lobj->robj->tiling_flags; |
| 586 | } |
| 587 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 588 | return 0; |
| 589 | } |
| 590 | |
Dave Airlie | 550e2d9 | 2009-12-09 14:15:38 +1000 | [diff] [blame] | 591 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 592 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 593 | struct radeon_device *rdev = bo->rdev; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 594 | struct radeon_surface_reg *reg; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 595 | struct radeon_bo *old_object; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 596 | int steal; |
| 597 | int i; |
| 598 | |
Maarten Lankhorst | 977c38d | 2013-06-27 13:48:26 +0200 | [diff] [blame] | 599 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 600 | |
| 601 | if (!bo->tiling_flags) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 602 | return 0; |
| 603 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 604 | if (bo->surface_reg >= 0) { |
| 605 | reg = &rdev->surface_regs[bo->surface_reg]; |
| 606 | i = bo->surface_reg; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 607 | goto out; |
| 608 | } |
| 609 | |
| 610 | steal = -1; |
| 611 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
| 612 | |
| 613 | reg = &rdev->surface_regs[i]; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 614 | if (!reg->bo) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 615 | break; |
| 616 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 617 | old_object = reg->bo; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 618 | if (old_object->pin_count == 0) |
| 619 | steal = i; |
| 620 | } |
| 621 | |
| 622 | /* if we are all out */ |
| 623 | if (i == RADEON_GEM_MAX_SURFACES) { |
| 624 | if (steal == -1) |
| 625 | return -ENOMEM; |
| 626 | /* find someone with a surface reg and nuke their BO */ |
| 627 | reg = &rdev->surface_regs[steal]; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 628 | old_object = reg->bo; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 629 | /* blow away the mapping */ |
| 630 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 631 | ttm_bo_unmap_virtual(&old_object->tbo); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 632 | old_object->surface_reg = -1; |
| 633 | i = steal; |
| 634 | } |
| 635 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 636 | bo->surface_reg = i; |
| 637 | reg->bo = bo; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 638 | |
| 639 | out: |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 640 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
Ben Skeggs | d961db7 | 2010-08-05 10:48:18 +1000 | [diff] [blame] | 641 | bo->tbo.mem.start << PAGE_SHIFT, |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 642 | bo->tbo.num_pages << PAGE_SHIFT); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 643 | return 0; |
| 644 | } |
| 645 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 646 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 647 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 648 | struct radeon_device *rdev = bo->rdev; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 649 | struct radeon_surface_reg *reg; |
| 650 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 651 | if (bo->surface_reg == -1) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 652 | return; |
| 653 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 654 | reg = &rdev->surface_regs[bo->surface_reg]; |
| 655 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 656 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 657 | reg->bo = NULL; |
| 658 | bo->surface_reg = -1; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 659 | } |
| 660 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 661 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
| 662 | uint32_t tiling_flags, uint32_t pitch) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 663 | { |
Jerome Glisse | 285484e | 2011-12-16 17:03:42 -0500 | [diff] [blame] | 664 | struct radeon_device *rdev = bo->rdev; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 665 | int r; |
| 666 | |
Jerome Glisse | 285484e | 2011-12-16 17:03:42 -0500 | [diff] [blame] | 667 | if (rdev->family >= CHIP_CEDAR) { |
| 668 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
| 669 | |
| 670 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; |
| 671 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; |
| 672 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; |
| 673 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; |
| 674 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; |
| 675 | switch (bankw) { |
| 676 | case 0: |
| 677 | case 1: |
| 678 | case 2: |
| 679 | case 4: |
| 680 | case 8: |
| 681 | break; |
| 682 | default: |
| 683 | return -EINVAL; |
| 684 | } |
| 685 | switch (bankh) { |
| 686 | case 0: |
| 687 | case 1: |
| 688 | case 2: |
| 689 | case 4: |
| 690 | case 8: |
| 691 | break; |
| 692 | default: |
| 693 | return -EINVAL; |
| 694 | } |
| 695 | switch (mtaspect) { |
| 696 | case 0: |
| 697 | case 1: |
| 698 | case 2: |
| 699 | case 4: |
| 700 | case 8: |
| 701 | break; |
| 702 | default: |
| 703 | return -EINVAL; |
| 704 | } |
| 705 | if (tilesplit > 6) { |
| 706 | return -EINVAL; |
| 707 | } |
| 708 | if (stilesplit > 6) { |
| 709 | return -EINVAL; |
| 710 | } |
| 711 | } |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 712 | r = radeon_bo_reserve(bo, false); |
| 713 | if (unlikely(r != 0)) |
| 714 | return r; |
| 715 | bo->tiling_flags = tiling_flags; |
| 716 | bo->pitch = pitch; |
| 717 | radeon_bo_unreserve(bo); |
| 718 | return 0; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 719 | } |
| 720 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 721 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
| 722 | uint32_t *tiling_flags, |
| 723 | uint32_t *pitch) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 724 | { |
Maarten Lankhorst | 977c38d | 2013-06-27 13:48:26 +0200 | [diff] [blame] | 725 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
| 726 | |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 727 | if (tiling_flags) |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 728 | *tiling_flags = bo->tiling_flags; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 729 | if (pitch) |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 730 | *pitch = bo->pitch; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 731 | } |
| 732 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 733 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
| 734 | bool force_drop) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 735 | { |
Maarten Lankhorst | 977c38d | 2013-06-27 13:48:26 +0200 | [diff] [blame] | 736 | if (!force_drop) |
| 737 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 738 | |
| 739 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 740 | return 0; |
| 741 | |
| 742 | if (force_drop) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 743 | radeon_bo_clear_surface_reg(bo); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 744 | return 0; |
| 745 | } |
| 746 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 747 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 748 | if (!has_moved) |
| 749 | return 0; |
| 750 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 751 | if (bo->surface_reg >= 0) |
| 752 | radeon_bo_clear_surface_reg(bo); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 753 | return 0; |
| 754 | } |
| 755 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 756 | if ((bo->surface_reg >= 0) && !has_moved) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 757 | return 0; |
| 758 | |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 759 | return radeon_bo_get_surface_reg(bo); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 763 | struct ttm_mem_reg *new_mem) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 764 | { |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 765 | struct radeon_bo *rbo; |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 766 | |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 767 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
| 768 | return; |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 769 | |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 770 | rbo = container_of(bo, struct radeon_bo, tbo); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 771 | radeon_bo_check_tiling(rbo, 0, 1); |
Jerome Glisse | 721604a | 2012-01-05 22:11:05 -0500 | [diff] [blame] | 772 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
Marek Olšák | 67e8e3f | 2014-03-02 00:56:18 +0100 | [diff] [blame] | 773 | |
| 774 | /* update statistics */ |
| 775 | if (!new_mem) |
| 776 | return; |
| 777 | |
| 778 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); |
| 779 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 780 | } |
| 781 | |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 782 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 783 | { |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 784 | struct radeon_device *rdev; |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 785 | struct radeon_bo *rbo; |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 786 | unsigned long offset, size, lpfn; |
| 787 | int i, r; |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 788 | |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 789 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 790 | return 0; |
Jerome Glisse | d03d858 | 2009-12-14 21:02:09 +0100 | [diff] [blame] | 791 | rbo = container_of(bo, struct radeon_bo, tbo); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 792 | radeon_bo_check_tiling(rbo, 0, 0); |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 793 | rdev = rbo->rdev; |
Christian König | 5440925 | 2014-05-05 18:40:12 +0200 | [diff] [blame] | 794 | if (bo->mem.mem_type != TTM_PL_VRAM) |
| 795 | return 0; |
| 796 | |
| 797 | size = bo->mem.num_pages << PAGE_SHIFT; |
| 798 | offset = bo->mem.start << PAGE_SHIFT; |
| 799 | if ((offset + size) <= rdev->mc.visible_vram_size) |
| 800 | return 0; |
| 801 | |
Michel Dänzer | e1a575a | 2016-03-28 16:39:14 +0900 | [diff] [blame] | 802 | /* Can't move a pinned BO to visible VRAM */ |
| 803 | if (rbo->pin_count > 0) |
| 804 | return -EINVAL; |
| 805 | |
Christian König | 5440925 | 2014-05-05 18:40:12 +0200 | [diff] [blame] | 806 | /* hurrah the memory is not visible ! */ |
| 807 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
Michel Dänzer | c9da4a4 | 2014-10-10 12:28:36 +0900 | [diff] [blame] | 808 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 809 | for (i = 0; i < rbo->placement.num_placement; i++) { |
| 810 | /* Force into visible VRAM */ |
| 811 | if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
| 812 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) |
| 813 | rbo->placements[i].lpfn = lpfn; |
| 814 | } |
Christian König | 5440925 | 2014-05-05 18:40:12 +0200 | [diff] [blame] | 815 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
| 816 | if (unlikely(r == -ENOMEM)) { |
| 817 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
| 818 | return ttm_bo_validate(bo, &rbo->placement, false, false); |
| 819 | } else if (unlikely(r != 0)) { |
| 820 | return r; |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 821 | } |
Christian König | 5440925 | 2014-05-05 18:40:12 +0200 | [diff] [blame] | 822 | |
| 823 | offset = bo->mem.start << PAGE_SHIFT; |
| 824 | /* this should never happen */ |
| 825 | if ((offset + size) > rdev->mc.visible_vram_size) |
| 826 | return -EINVAL; |
| 827 | |
Jerome Glisse | 0a2d50e | 2010-04-09 14:39:24 +0200 | [diff] [blame] | 828 | return 0; |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 829 | } |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 830 | |
Dave Airlie | 83f30d0 | 2011-10-27 18:15:10 +0200 | [diff] [blame] | 831 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 832 | { |
| 833 | int r; |
| 834 | |
Christian König | dfd5e50 | 2016-04-06 11:12:03 +0200 | [diff] [blame^] | 835 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 836 | if (unlikely(r != 0)) |
| 837 | return r; |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 838 | if (mem_type) |
| 839 | *mem_type = bo->tbo.mem.mem_type; |
Maarten Lankhorst | f2c24b8 | 2014-04-02 17:14:48 +0200 | [diff] [blame] | 840 | |
| 841 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 842 | ttm_bo_unreserve(&bo->tbo); |
| 843 | return r; |
| 844 | } |
Christian König | 587cdda | 2014-11-19 14:01:23 +0100 | [diff] [blame] | 845 | |
| 846 | /** |
| 847 | * radeon_bo_fence - add fence to buffer object |
| 848 | * |
| 849 | * @bo: buffer object in question |
| 850 | * @fence: fence to add |
| 851 | * @shared: true if fence should be added shared |
| 852 | * |
| 853 | */ |
| 854 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
Jérome Glisse | 3cf8bb1 | 2016-03-16 12:56:45 +0100 | [diff] [blame] | 855 | bool shared) |
Christian König | 587cdda | 2014-11-19 14:01:23 +0100 | [diff] [blame] | 856 | { |
| 857 | struct reservation_object *resv = bo->tbo.resv; |
| 858 | |
| 859 | if (shared) |
| 860 | reservation_object_add_shared_fence(resv, &fence->base); |
| 861 | else |
| 862 | reservation_object_add_excl_fence(resv, &fence->base); |
| 863 | } |