blob: 722067f3c8411bd97c883735b3e18104e35685f3 [file] [log] [blame]
Alex Deucher09361392015-04-20 12:04:22 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
Emil Velikova30da8e2015-08-07 17:20:51 +010022 */
Alex Deucher09361392015-04-20 12:04:22 -040023
Emil Velikovf4c2bfd2015-08-07 17:17:43 +010024#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
Alex Deucher09361392015-04-20 12:04:22 -040028#include <stdlib.h>
29#include <string.h>
Sabre Shao23fab592015-07-09 13:50:36 +080030#include <errno.h>
Alex Deucher09361392015-04-20 12:04:22 -040031#include "amdgpu.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34#include "util_math.h"
35
Sabre Shao12802da2015-07-09 13:53:24 +080036int amdgpu_va_range_query(amdgpu_device_handle dev,
Christian König4b4ccaa2017-11-02 18:47:34 +010037 enum amdgpu_gpu_va_range type,
38 uint64_t *start, uint64_t *end)
Sabre Shao12802da2015-07-09 13:53:24 +080039{
Christian König4b4ccaa2017-11-02 18:47:34 +010040 if (type != amdgpu_gpu_va_range_general)
41 return -EINVAL;
42
43 *start = dev->dev_info.virtual_address_offset;
44 *end = dev->dev_info.virtual_address_max;
45 return 0;
Sabre Shao12802da2015-07-09 13:53:24 +080046}
47
Jammy Zhouffa305d2015-08-17 11:09:08 +080048drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
Christian König4b4ccaa2017-11-02 18:47:34 +010049 uint64_t max, uint64_t alignment)
Alex Deucher09361392015-04-20 12:04:22 -040050{
Chunming Zhou69f9fae2018-02-08 14:52:11 +080051 struct amdgpu_bo_va_hole *n;
52
Jammy Zhou102ab6f2015-08-17 11:09:07 +080053 mgr->va_max = max;
54 mgr->va_alignment = alignment;
Alex Deucher09361392015-04-20 12:04:22 -040055
Ken Wang322d02d2015-05-21 17:21:21 +080056 list_inithead(&mgr->va_holes);
57 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
Chunming Zhou69f9fae2018-02-08 14:52:11 +080058 pthread_mutex_lock(&mgr->bo_va_mutex);
59 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
60 n->size = mgr->va_max;
61 n->offset = start;
62 list_add(&n->list, &mgr->va_holes);
63 pthread_mutex_unlock(&mgr->bo_va_mutex);
Ken Wang322d02d2015-05-21 17:21:21 +080064}
Alex Deucher09361392015-04-20 12:04:22 -040065
Jammy Zhouffa305d2015-08-17 11:09:08 +080066drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
Ken Wang322d02d2015-05-21 17:21:21 +080067{
Tom St Denis1a6a8f32015-10-09 12:07:26 -040068 struct amdgpu_bo_va_hole *hole, *tmp;
69 LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
Ken Wang322d02d2015-05-21 17:21:21 +080070 list_del(&hole->list);
71 free(hole);
72 }
73 pthread_mutex_destroy(&mgr->bo_va_mutex);
74}
75
Christian Königa8449252017-11-02 18:54:59 +010076static drm_private uint64_t
Emil Velikovb4718182015-08-07 16:54:29 +010077amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
78 uint64_t alignment, uint64_t base_required)
Alex Deucher09361392015-04-20 12:04:22 -040079{
monk.liu9066acf2015-05-13 13:58:43 +080080 struct amdgpu_bo_va_hole *hole, *n;
81 uint64_t offset = 0, waste = 0;
Alex Deucher09361392015-04-20 12:04:22 -040082
monk.liu9066acf2015-05-13 13:58:43 +080083 alignment = MAX2(alignment, mgr->va_alignment);
84 size = ALIGN(size, mgr->va_alignment);
Alex Deucher09361392015-04-20 12:04:22 -040085
Ken Wang5b019082015-07-09 13:48:25 +080086 if (base_required % alignment)
87 return AMDGPU_INVALID_VA_ADDRESS;
88
monk.liu9066acf2015-05-13 13:58:43 +080089 pthread_mutex_lock(&mgr->bo_va_mutex);
Chunming Zhoud07be742018-02-08 14:35:26 +080090 LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
Ken Wang5b019082015-07-09 13:48:25 +080091 if (base_required) {
Christian König4b4ccaa2017-11-02 18:47:34 +010092 if (hole->offset > base_required ||
93 (hole->offset + hole->size) < (base_required + size))
Ken Wang5b019082015-07-09 13:48:25 +080094 continue;
95 waste = base_required - hole->offset;
96 offset = base_required;
97 } else {
98 offset = hole->offset;
99 waste = offset % alignment;
100 waste = waste ? alignment - waste : 0;
101 offset += waste;
102 if (offset >= (hole->offset + hole->size)) {
103 continue;
104 }
monk.liu9066acf2015-05-13 13:58:43 +0800105 }
106 if (!waste && hole->size == size) {
107 offset = hole->offset;
108 list_del(&hole->list);
109 free(hole);
110 pthread_mutex_unlock(&mgr->bo_va_mutex);
111 return offset;
112 }
113 if ((hole->size - waste) > size) {
114 if (waste) {
Ken Wang5b019082015-07-09 13:48:25 +0800115 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
monk.liu9066acf2015-05-13 13:58:43 +0800116 n->size = waste;
117 n->offset = hole->offset;
118 list_add(&n->list, &hole->list);
119 }
120 hole->size -= (size + waste);
121 hole->offset += size + waste;
122 pthread_mutex_unlock(&mgr->bo_va_mutex);
123 return offset;
124 }
125 if ((hole->size - waste) == size) {
126 hole->size = waste;
127 pthread_mutex_unlock(&mgr->bo_va_mutex);
128 return offset;
129 }
130 }
Alex Deucher09361392015-04-20 12:04:22 -0400131
monk.liu9066acf2015-05-13 13:58:43 +0800132 pthread_mutex_unlock(&mgr->bo_va_mutex);
Chunming Zhou69f9fae2018-02-08 14:52:11 +0800133 return AMDGPU_INVALID_VA_ADDRESS;
Alex Deucher09361392015-04-20 12:04:22 -0400134}
135
Christian Königa8449252017-11-02 18:54:59 +0100136static drm_private void
Emil Velikovb4718182015-08-07 16:54:29 +0100137amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
Alex Deucher09361392015-04-20 12:04:22 -0400138{
Chunming Zhou69f9fae2018-02-08 14:52:11 +0800139 struct amdgpu_bo_va_hole *hole, *next;
Alex Deucher09361392015-04-20 12:04:22 -0400140
monk.liud3e71952015-05-13 14:01:53 +0800141 if (va == AMDGPU_INVALID_VA_ADDRESS)
142 return;
143
monk.liu9066acf2015-05-13 13:58:43 +0800144 size = ALIGN(size, mgr->va_alignment);
Alex Deucher09361392015-04-20 12:04:22 -0400145
monk.liu9066acf2015-05-13 13:58:43 +0800146 pthread_mutex_lock(&mgr->bo_va_mutex);
Chunming Zhou69f9fae2018-02-08 14:52:11 +0800147 hole = container_of(&mgr->va_holes, hole, list);
148 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
149 if (next->offset < va)
150 break;
151 hole = next;
152 }
153
154 if (&hole->list != &mgr->va_holes) {
155 /* Grow upper hole if it's adjacent */
156 if (hole->offset == (va + size)) {
157 hole->offset = va;
158 hole->size += size;
159 /* Merge lower hole if it's adjacent */
160 if (next != hole &&
161 &next->list != &mgr->va_holes &&
162 (next->offset + next->size) == va) {
163 next->size += hole->size;
monk.liu9066acf2015-05-13 13:58:43 +0800164 list_del(&hole->list);
165 free(hole);
166 }
Michel Dänzerfa35b512018-02-08 09:50:53 +0100167 goto out;
168 }
Michel Dänzerfa35b512018-02-08 09:50:53 +0100169 }
Chunming Zhou69f9fae2018-02-08 14:52:11 +0800170
171 /* Grow lower hole if it's adjacent */
172 if (next != hole && &next->list != &mgr->va_holes &&
173 (next->offset + next->size) == va) {
174 next->size += size;
175 goto out;
176 }
177
178 /* FIXME on allocation failure we just lose virtual address space
179 * maybe print a warning
180 */
181 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
182 if (next) {
183 next->size = size;
184 next->offset = va;
185 list_add(&next->list, &hole->list);
186 }
187
Alex Deucher09361392015-04-20 12:04:22 -0400188out:
monk.liu9066acf2015-05-13 13:58:43 +0800189 pthread_mutex_unlock(&mgr->bo_va_mutex);
Alex Deucher09361392015-04-20 12:04:22 -0400190}
Sabre Shao23fab592015-07-09 13:50:36 +0800191
192int amdgpu_va_range_alloc(amdgpu_device_handle dev,
193 enum amdgpu_gpu_va_range va_range_type,
194 uint64_t size,
195 uint64_t va_base_alignment,
196 uint64_t va_base_required,
197 uint64_t *va_base_allocated,
Jammy Zhou95d0f352015-07-16 10:29:58 +0800198 amdgpu_va_handle *va_range_handle,
199 uint64_t flags)
Sabre Shao23fab592015-07-09 13:50:36 +0800200{
Jammy Zhouffa305d2015-08-17 11:09:08 +0800201 struct amdgpu_bo_va_mgr *vamgr;
Sabre Shao23fab592015-07-09 13:50:36 +0800202
Jammy Zhouffa305d2015-08-17 11:09:08 +0800203 if (flags & AMDGPU_VA_RANGE_32_BIT)
Alex Xie067e9a12017-01-28 21:50:36 +0200204 vamgr = &dev->vamgr_32;
Jammy Zhouffa305d2015-08-17 11:09:08 +0800205 else
Alex Xiefe7cb342017-01-28 21:50:44 +0200206 vamgr = &dev->vamgr;
Jammy Zhouffa305d2015-08-17 11:09:08 +0800207
208 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
209 size = ALIGN(size, vamgr->va_alignment);
210
211 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
Sabre Shao23fab592015-07-09 13:50:36 +0800212 va_base_alignment, va_base_required);
213
Jammy Zhouffa305d2015-08-17 11:09:08 +0800214 if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
215 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
216 /* fallback to 32bit address */
Alex Xie067e9a12017-01-28 21:50:36 +0200217 vamgr = &dev->vamgr_32;
Jammy Zhouffa305d2015-08-17 11:09:08 +0800218 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
219 va_base_alignment, va_base_required);
220 }
221
Sabre Shao23fab592015-07-09 13:50:36 +0800222 if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
223 struct amdgpu_va* va;
224 va = calloc(1, sizeof(struct amdgpu_va));
225 if(!va){
Jammy Zhouffa305d2015-08-17 11:09:08 +0800226 amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
Sabre Shao23fab592015-07-09 13:50:36 +0800227 return -ENOMEM;
228 }
229 va->dev = dev;
230 va->address = *va_base_allocated;
231 va->size = size;
232 va->range = va_range_type;
Jammy Zhouffa305d2015-08-17 11:09:08 +0800233 va->vamgr = vamgr;
Sabre Shao23fab592015-07-09 13:50:36 +0800234 *va_range_handle = va;
235 } else {
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
242int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
243{
244 if(!va_range_handle || !va_range_handle->address)
245 return 0;
Jammy Zhouffa305d2015-08-17 11:09:08 +0800246
247 amdgpu_vamgr_free_va(va_range_handle->vamgr,
248 va_range_handle->address,
Sabre Shao23fab592015-07-09 13:50:36 +0800249 va_range_handle->size);
250 free(va_range_handle);
251 return 0;
252}