Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <stdlib.h> |
| 25 | #include <string.h> |
Sabre Shao | 23fab59 | 2015-07-09 13:50:36 +0800 | [diff] [blame] | 26 | #include <errno.h> |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 27 | #include "amdgpu.h" |
| 28 | #include "amdgpu_drm.h" |
| 29 | #include "amdgpu_internal.h" |
| 30 | #include "util_math.h" |
| 31 | |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 32 | static struct amdgpu_bo_va_mgr vamgr = {{0}}; |
| 33 | |
Sabre Shao | 12802da | 2015-07-09 13:53:24 +0800 | [diff] [blame] | 34 | int amdgpu_va_range_query(amdgpu_device_handle dev, |
| 35 | enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end) |
| 36 | { |
| 37 | if (type == amdgpu_gpu_va_range_general) { |
| 38 | *start = dev->dev_info.virtual_address_offset; |
| 39 | *end = dev->dev_info.virtual_address_max; |
| 40 | return 0; |
| 41 | } |
| 42 | return -EINVAL; |
| 43 | } |
| 44 | |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 45 | static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, struct amdgpu_device *dev) |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 46 | { |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 47 | mgr->va_offset = dev->dev_info.virtual_address_offset; |
| 48 | mgr->va_max = dev->dev_info.virtual_address_max; |
| 49 | mgr->va_alignment = dev->dev_info.virtual_address_alignment; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 50 | |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 51 | list_inithead(&mgr->va_holes); |
| 52 | pthread_mutex_init(&mgr->bo_va_mutex, NULL); |
| 53 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 54 | |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 55 | static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) |
| 56 | { |
| 57 | struct amdgpu_bo_va_hole *hole; |
| 58 | LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) { |
| 59 | list_del(&hole->list); |
| 60 | free(hole); |
| 61 | } |
| 62 | pthread_mutex_destroy(&mgr->bo_va_mutex); |
| 63 | } |
| 64 | |
| 65 | struct amdgpu_bo_va_mgr * amdgpu_vamgr_get_global(struct amdgpu_device *dev) |
| 66 | { |
| 67 | int ref; |
| 68 | ref = atomic_inc_return(&vamgr.refcount); |
| 69 | |
| 70 | if (ref == 1) |
| 71 | amdgpu_vamgr_init(&vamgr, dev); |
| 72 | return &vamgr; |
| 73 | } |
| 74 | |
| 75 | void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, |
| 76 | struct amdgpu_bo_va_mgr *src) |
| 77 | { |
| 78 | if (update_references(&(*dst)->refcount, NULL)) |
| 79 | amdgpu_vamgr_deinit(*dst); |
| 80 | *dst = src; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 81 | } |
| 82 | |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 83 | uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, |
| 84 | uint64_t alignment, uint64_t base_required) |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 85 | { |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 86 | struct amdgpu_bo_va_hole *hole, *n; |
| 87 | uint64_t offset = 0, waste = 0; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 88 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 89 | alignment = MAX2(alignment, mgr->va_alignment); |
| 90 | size = ALIGN(size, mgr->va_alignment); |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 91 | |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 92 | if (base_required % alignment) |
| 93 | return AMDGPU_INVALID_VA_ADDRESS; |
| 94 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 95 | pthread_mutex_lock(&mgr->bo_va_mutex); |
| 96 | /* TODO: using more appropriate way to track the holes */ |
| 97 | /* first look for a hole */ |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 98 | LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) { |
| 99 | if (base_required) { |
| 100 | if(hole->offset > base_required || |
| 101 | (hole->offset + hole->size) < (base_required + size)) |
| 102 | continue; |
| 103 | waste = base_required - hole->offset; |
| 104 | offset = base_required; |
| 105 | } else { |
| 106 | offset = hole->offset; |
| 107 | waste = offset % alignment; |
| 108 | waste = waste ? alignment - waste : 0; |
| 109 | offset += waste; |
| 110 | if (offset >= (hole->offset + hole->size)) { |
| 111 | continue; |
| 112 | } |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 113 | } |
| 114 | if (!waste && hole->size == size) { |
| 115 | offset = hole->offset; |
| 116 | list_del(&hole->list); |
| 117 | free(hole); |
| 118 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
| 119 | return offset; |
| 120 | } |
| 121 | if ((hole->size - waste) > size) { |
| 122 | if (waste) { |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 123 | n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 124 | n->size = waste; |
| 125 | n->offset = hole->offset; |
| 126 | list_add(&n->list, &hole->list); |
| 127 | } |
| 128 | hole->size -= (size + waste); |
| 129 | hole->offset += size + waste; |
| 130 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
| 131 | return offset; |
| 132 | } |
| 133 | if ((hole->size - waste) == size) { |
| 134 | hole->size = waste; |
| 135 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
| 136 | return offset; |
| 137 | } |
| 138 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 139 | |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 140 | if (base_required) { |
| 141 | if (base_required < mgr->va_offset) |
| 142 | return AMDGPU_INVALID_VA_ADDRESS; |
| 143 | offset = mgr->va_offset; |
| 144 | waste = base_required - mgr->va_offset; |
| 145 | } else { |
| 146 | offset = mgr->va_offset; |
| 147 | waste = offset % alignment; |
| 148 | waste = waste ? alignment - waste : 0; |
| 149 | } |
Jammy Zhou | 241cf6d | 2015-05-13 01:14:11 +0800 | [diff] [blame] | 150 | |
| 151 | if (offset + waste + size > mgr->va_max) { |
| 152 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
| 153 | return AMDGPU_INVALID_VA_ADDRESS; |
| 154 | } |
| 155 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 156 | if (waste) { |
| 157 | n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); |
| 158 | n->size = waste; |
| 159 | n->offset = offset; |
| 160 | list_add(&n->list, &mgr->va_holes); |
| 161 | } |
Ken Wang | 5b01908 | 2015-07-09 13:48:25 +0800 | [diff] [blame] | 162 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 163 | offset += waste; |
| 164 | mgr->va_offset += size + waste; |
| 165 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
| 166 | return offset; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 167 | } |
| 168 | |
Ken Wang | 322d02d | 2015-05-21 17:21:21 +0800 | [diff] [blame] | 169 | void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, |
| 170 | uint64_t va, uint64_t size) |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 171 | { |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 172 | struct amdgpu_bo_va_hole *hole; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 173 | |
monk.liu | d3e7195 | 2015-05-13 14:01:53 +0800 | [diff] [blame] | 174 | if (va == AMDGPU_INVALID_VA_ADDRESS) |
| 175 | return; |
| 176 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 177 | size = ALIGN(size, mgr->va_alignment); |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 178 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 179 | pthread_mutex_lock(&mgr->bo_va_mutex); |
| 180 | if ((va + size) == mgr->va_offset) { |
| 181 | mgr->va_offset = va; |
| 182 | /* Delete uppermost hole if it reaches the new top */ |
| 183 | if (!LIST_IS_EMPTY(&mgr->va_holes)) { |
| 184 | hole = container_of(mgr->va_holes.next, hole, list); |
| 185 | if ((hole->offset + hole->size) == va) { |
| 186 | mgr->va_offset = hole->offset; |
| 187 | list_del(&hole->list); |
| 188 | free(hole); |
| 189 | } |
| 190 | } |
| 191 | } else { |
| 192 | struct amdgpu_bo_va_hole *next; |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 193 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 194 | hole = container_of(&mgr->va_holes, hole, list); |
| 195 | LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { |
| 196 | if (next->offset < va) |
| 197 | break; |
| 198 | hole = next; |
| 199 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 200 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 201 | if (&hole->list != &mgr->va_holes) { |
| 202 | /* Grow upper hole if it's adjacent */ |
| 203 | if (hole->offset == (va + size)) { |
| 204 | hole->offset = va; |
| 205 | hole->size += size; |
| 206 | /* Merge lower hole if it's adjacent */ |
| 207 | if (next != hole |
| 208 | && &next->list != &mgr->va_holes |
| 209 | && (next->offset + next->size) == va) { |
| 210 | next->size += hole->size; |
| 211 | list_del(&hole->list); |
| 212 | free(hole); |
| 213 | } |
| 214 | goto out; |
| 215 | } |
| 216 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 217 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 218 | /* Grow lower hole if it's adjacent */ |
| 219 | if (next != hole && &next->list != &mgr->va_holes && |
| 220 | (next->offset + next->size) == va) { |
| 221 | next->size += size; |
| 222 | goto out; |
| 223 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 224 | |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 225 | /* FIXME on allocation failure we just lose virtual address space |
| 226 | * maybe print a warning |
| 227 | */ |
| 228 | next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); |
| 229 | if (next) { |
| 230 | next->size = size; |
| 231 | next->offset = va; |
| 232 | list_add(&next->list, &hole->list); |
| 233 | } |
| 234 | } |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 235 | out: |
monk.liu | 9066acf | 2015-05-13 13:58:43 +0800 | [diff] [blame] | 236 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
Alex Deucher | 0936139 | 2015-04-20 12:04:22 -0400 | [diff] [blame] | 237 | } |
Sabre Shao | 23fab59 | 2015-07-09 13:50:36 +0800 | [diff] [blame] | 238 | |
| 239 | int amdgpu_va_range_alloc(amdgpu_device_handle dev, |
| 240 | enum amdgpu_gpu_va_range va_range_type, |
| 241 | uint64_t size, |
| 242 | uint64_t va_base_alignment, |
| 243 | uint64_t va_base_required, |
| 244 | uint64_t *va_base_allocated, |
Jammy Zhou | 95d0f35 | 2015-07-16 10:29:58 +0800 | [diff] [blame] | 245 | amdgpu_va_handle *va_range_handle, |
| 246 | uint64_t flags) |
Sabre Shao | 23fab59 | 2015-07-09 13:50:36 +0800 | [diff] [blame] | 247 | { |
| 248 | va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment); |
| 249 | size = ALIGN(size, vamgr.va_alignment); |
| 250 | |
| 251 | *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size, |
| 252 | va_base_alignment, va_base_required); |
| 253 | |
| 254 | if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) { |
| 255 | struct amdgpu_va* va; |
| 256 | va = calloc(1, sizeof(struct amdgpu_va)); |
| 257 | if(!va){ |
| 258 | amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size); |
| 259 | return -ENOMEM; |
| 260 | } |
| 261 | va->dev = dev; |
| 262 | va->address = *va_base_allocated; |
| 263 | va->size = size; |
| 264 | va->range = va_range_type; |
| 265 | *va_range_handle = va; |
| 266 | } else { |
| 267 | return -EINVAL; |
| 268 | } |
| 269 | |
| 270 | return 0; |
| 271 | } |
| 272 | |
| 273 | int amdgpu_va_range_free(amdgpu_va_handle va_range_handle) |
| 274 | { |
| 275 | if(!va_range_handle || !va_range_handle->address) |
| 276 | return 0; |
| 277 | amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address, |
| 278 | va_range_handle->size); |
| 279 | free(va_range_handle); |
| 280 | return 0; |
| 281 | } |