blob: 4f619a3ef3330cebb3524629014f6462f55ee7bd [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
jvanverth68c3d302016-09-23 10:30:04 -070013#ifdef SK_DEBUG
14// for simple tracking of how much we're using in each heap
15// last counter is for non-subheap allocations
16VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 };
17#endif
18
jvanverth9d54afc2016-09-20 09:20:03 -070019static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties& physDevMemProps,
Greg Daniel164a9f02016-02-22 09:56:40 -050020 uint32_t typeBits,
21 VkMemoryPropertyFlags requestedMemFlags,
jvanverth68c3d302016-09-23 10:30:04 -070022 uint32_t* typeIndex,
23 uint32_t* heapIndex) {
jvanverth9d54afc2016-09-20 09:20:03 -070024 for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) {
25 if (typeBits & (1 << i)) {
Greg Daniel164a9f02016-02-22 09:56:40 -050026 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
27 requestedMemFlags;
28 if (supportedFlags == requestedMemFlags) {
29 *typeIndex = i;
jvanverth68c3d302016-09-23 10:30:04 -070030 *heapIndex = physDevMemProps.memoryTypes[i].heapIndex;
Greg Daniel164a9f02016-02-22 09:56:40 -050031 return true;
32 }
33 }
Greg Daniel164a9f02016-02-22 09:56:40 -050034 }
35 return false;
36}
37
jvanverth6b6ffc42016-06-13 14:28:07 -070038static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
39 const GrVkGpu::Heap kBufferToHeap[]{
40 GrVkGpu::kVertexBuffer_Heap,
41 GrVkGpu::kIndexBuffer_Heap,
42 GrVkGpu::kUniformBuffer_Heap,
Greg Danielc2dd5ed2017-05-05 13:49:11 -040043 GrVkGpu::kTexelBuffer_Heap,
jvanverth6b6ffc42016-06-13 14:28:07 -070044 GrVkGpu::kCopyReadBuffer_Heap,
45 GrVkGpu::kCopyWriteBuffer_Heap,
Greg Daniel164a9f02016-02-22 09:56:40 -050046 };
jvanverth6b6ffc42016-06-13 14:28:07 -070047 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
48 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
49 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
Greg Danielc2dd5ed2017-05-05 13:49:11 -040050 GR_STATIC_ASSERT(3 == GrVkBuffer::kTexel_Type);
51 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyRead_Type);
52 GR_STATIC_ASSERT(5 == GrVkBuffer::kCopyWrite_Type);
Greg Daniel164a9f02016-02-22 09:56:40 -050053
jvanverth6b6ffc42016-06-13 14:28:07 -070054 return kBufferToHeap[type];
Greg Daniel164a9f02016-02-22 09:56:40 -050055}
56
57bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
58 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070059 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070060 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070061 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -070062 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050063 VkDevice device = gpu->device();
64
65 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070066 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050067
jvanverth7378ac82016-06-14 08:32:44 -070068 uint32_t typeIndex = 0;
jvanverth68c3d302016-09-23 10:30:04 -070069 uint32_t heapIndex = 0;
jvanverth9d54afc2016-09-20 09:20:03 -070070 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
Greg Daniel8385a8a2018-02-26 13:29:37 -050071 const VkPhysicalDeviceProperties& phDevProps = gpu->physicalDeviceProperties();
jvanverth9d54afc2016-09-20 09:20:03 -070072 if (dynamic) {
73 // try to get cached and ideally non-coherent memory first
74 if (!get_valid_memory_type_index(phDevMemProps,
75 memReqs.memoryTypeBits,
76 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
77 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
jvanverth68c3d302016-09-23 10:30:04 -070078 &typeIndex,
79 &heapIndex)) {
jvanverth9d54afc2016-09-20 09:20:03 -070080 // some sort of host-visible memory type should always be available for dynamic buffers
81 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
82 memReqs.memoryTypeBits,
83 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
jvanverth68c3d302016-09-23 10:30:04 -070084 &typeIndex,
85 &heapIndex));
jvanverth9d54afc2016-09-20 09:20:03 -070086 }
87
88 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
89 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0
90 : GrVkAlloc::kNoncoherent_Flag;
Greg Daniel8385a8a2018-02-26 13:29:37 -050091 if (SkToBool(alloc->fFlags & GrVkAlloc::kNoncoherent_Flag)) {
92 SkASSERT(SkIsPow2(memReqs.alignment));
93 SkASSERT(SkIsPow2(phDevProps.limits.nonCoherentAtomSize));
94 memReqs.alignment = SkTMax(memReqs.alignment, phDevProps.limits.nonCoherentAtomSize);
95 }
jvanverth9d54afc2016-09-20 09:20:03 -070096 } else {
97 // device-local memory should always be available for static buffers
98 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
jvanverth6b6ffc42016-06-13 14:28:07 -070099 memReqs.memoryTypeBits,
jvanverth9d54afc2016-09-20 09:20:03 -0700100 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
jvanverth68c3d302016-09-23 10:30:04 -0700101 &typeIndex,
102 &heapIndex));
jvanverth9d54afc2016-09-20 09:20:03 -0700103 alloc->fFlags = 0x0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700104 }
105
106 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
107
jvanverth68c3d302016-09-23 10:30:04 -0700108 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
109 // if static, try to allocate from non-host-visible non-device-local memory instead
110 if (dynamic ||
111 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
112 0, &typeIndex, &heapIndex) ||
113 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
114 SkDebugf("Failed to alloc buffer\n");
115 return false;
116 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500117 }
118
jvanverth9d54afc2016-09-20 09:20:03 -0700119 // Bind buffer
egdaniel6e90d422016-08-10 08:29:53 -0700120 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
jvanverth1e305ba2016-06-01 09:39:15 -0700121 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500122 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700123 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500124 return false;
125 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700126
Greg Daniel164a9f02016-02-22 09:56:40 -0500127 return true;
128}
129
jvanverth6b6ffc42016-06-13 14:28:07 -0700130void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
131 const GrVkAlloc& alloc) {
132
133 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
134 SkASSERT_RELEASE(heap->free(alloc));
135}
136
137// for debugging
138static uint64_t gTotalImageMemory = 0;
139static uint64_t gTotalImageMemoryFullPage = 0;
140
141const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
142const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
143
144static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
145 return (size + alignment - 1) & ~(alignment - 1);
jvanverth1e305ba2016-06-01 09:39:15 -0700146}
147
Greg Daniel164a9f02016-02-22 09:56:40 -0500148bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
149 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -0700150 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -0700151 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -0700152 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500153 VkDevice device = gpu->device();
154
155 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -0700156 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -0500157
jvanverth7378ac82016-06-14 08:32:44 -0700158 uint32_t typeIndex = 0;
jvanverth68c3d302016-09-23 10:30:04 -0700159 uint32_t heapIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700160 GrVkHeap* heap;
jvanverth9d54afc2016-09-20 09:20:03 -0700161 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
Greg Daniel8385a8a2018-02-26 13:29:37 -0500162 const VkPhysicalDeviceProperties& phDevProps = gpu->physicalDeviceProperties();
jvanverth6b6ffc42016-06-13 14:28:07 -0700163 if (linearTiling) {
164 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
jvanverth6b6ffc42016-06-13 14:28:07 -0700165 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
jvanverth9d54afc2016-09-20 09:20:03 -0700166 if (!get_valid_memory_type_index(phDevMemProps,
jvanverth6b6ffc42016-06-13 14:28:07 -0700167 memReqs.memoryTypeBits,
168 desiredMemProps,
jvanverth68c3d302016-09-23 10:30:04 -0700169 &typeIndex,
170 &heapIndex)) {
jvanverth9d54afc2016-09-20 09:20:03 -0700171 // some sort of host-visible memory type should always be available
172 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
jvanverth6b6ffc42016-06-13 14:28:07 -0700173 memReqs.memoryTypeBits,
jvanverth9d54afc2016-09-20 09:20:03 -0700174 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
jvanverth68c3d302016-09-23 10:30:04 -0700175 &typeIndex,
176 &heapIndex));
jvanverth6b6ffc42016-06-13 14:28:07 -0700177 }
178 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
jvanverth9d54afc2016-09-20 09:20:03 -0700179 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
180 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0
181 : GrVkAlloc::kNoncoherent_Flag;
Greg Daniel8385a8a2018-02-26 13:29:37 -0500182 if (SkToBool(alloc->fFlags & GrVkAlloc::kNoncoherent_Flag)) {
183 SkASSERT(SkIsPow2(memReqs.alignment));
184 SkASSERT(SkIsPow2(phDevProps.limits.nonCoherentAtomSize));
185 memReqs.alignment = SkTMax(memReqs.alignment, phDevProps.limits.nonCoherentAtomSize);
186 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700187 } else {
188 // this memory type should always be available
jvanverth9d54afc2016-09-20 09:20:03 -0700189 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
jvanverth6b6ffc42016-06-13 14:28:07 -0700190 memReqs.memoryTypeBits,
191 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
jvanverth68c3d302016-09-23 10:30:04 -0700192 &typeIndex,
193 &heapIndex));
jvanverth6b6ffc42016-06-13 14:28:07 -0700194 if (memReqs.size <= kMaxSmallImageSize) {
195 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
196 } else {
197 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
198 }
jvanverth9d54afc2016-09-20 09:20:03 -0700199 alloc->fFlags = 0x0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700200 }
201
jvanverth68c3d302016-09-23 10:30:04 -0700202 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
203 // if optimal, try to allocate from non-host-visible non-device-local memory instead
204 if (linearTiling ||
205 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
206 0, &typeIndex, &heapIndex) ||
207 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
208 SkDebugf("Failed to alloc image\n");
209 return false;
210 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500211 }
212
jvanverth9d54afc2016-09-20 09:20:03 -0700213 // Bind image
jvanverth1e305ba2016-06-01 09:39:15 -0700214 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
215 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500216 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700217 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500218 return false;
219 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700220
221 gTotalImageMemory += alloc->fSize;
222
223 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
224 gTotalImageMemoryFullPage += pageAlignedSize;
225
Greg Daniel164a9f02016-02-22 09:56:40 -0500226 return true;
227}
228
jvanverth6b6ffc42016-06-13 14:28:07 -0700229void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
230 const GrVkAlloc& alloc) {
231 GrVkHeap* heap;
232 if (linearTiling) {
233 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
234 } else if (alloc.fSize <= kMaxSmallImageSize) {
235 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
236 } else {
237 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
238 }
239 if (!heap->free(alloc)) {
240 // must be an adopted allocation
241 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
242 } else {
243 gTotalImageMemory -= alloc.fSize;
244 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
245 gTotalImageMemoryFullPage -= pageAlignedSize;
246 }
jvanverth1e305ba2016-06-01 09:39:15 -0700247}
248
Greg Daniele35a99e2018-03-02 11:44:22 -0500249void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
250 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700251 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniele35a99e2018-03-02 11:44:22 -0500252#ifdef SK_DEBUG
253 SkASSERT(offset >= alloc.fOffset);
254 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
255 SkASSERT(0 == (offset & (alignment-1)));
256 if (size != VK_WHOLE_SIZE) {
257 SkASSERT(size > 0);
258 SkASSERT(0 == (size & (alignment-1)) ||
259 (offset + size) == (alloc.fOffset + alloc.fSize));
260 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
261 }
262#endif
263
jvanverth9d54afc2016-09-20 09:20:03 -0700264 VkMappedMemoryRange mappedMemoryRange;
265 memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
266 mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
267 mappedMemoryRange.memory = alloc.fMemory;
Greg Daniele35a99e2018-03-02 11:44:22 -0500268 mappedMemoryRange.offset = offset;
269 mappedMemoryRange.size = size;
jvanverth9d54afc2016-09-20 09:20:03 -0700270 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(),
271 1, &mappedMemoryRange));
272 }
273}
274
Greg Daniele35a99e2018-03-02 11:44:22 -0500275void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
276 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700277 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniele35a99e2018-03-02 11:44:22 -0500278#ifdef SK_DEBUG
279 SkASSERT(offset >= alloc.fOffset);
280 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
281 SkASSERT(0 == (offset & (alignment-1)));
282 if (size != VK_WHOLE_SIZE) {
283 SkASSERT(size > 0);
284 SkASSERT(0 == (size & (alignment-1)) ||
285 (offset + size) == (alloc.fOffset + alloc.fSize));
286 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
287 }
288#endif
289
jvanverth9d54afc2016-09-20 09:20:03 -0700290 VkMappedMemoryRange mappedMemoryRange;
291 memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
292 mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
293 mappedMemoryRange.memory = alloc.fMemory;
Greg Daniele35a99e2018-03-02 11:44:22 -0500294 mappedMemoryRange.offset = offset;
295 mappedMemoryRange.size = size;
jvanverth9d54afc2016-09-20 09:20:03 -0700296 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(),
297 1, &mappedMemoryRange));
298 }
299}
300
jvanverth82356cc2016-07-07 07:16:42 -0700301bool GrVkFreeListAlloc::alloc(VkDeviceSize requestedSize,
302 VkDeviceSize* allocOffset, VkDeviceSize* allocSize) {
303 VkDeviceSize alignedSize = align_size(requestedSize, fAlignment);
jvanverth6b6ffc42016-06-13 14:28:07 -0700304
305 // find the smallest block big enough for our allocation
306 FreeList::Iter iter = fFreeList.headIter();
307 FreeList::Iter bestFitIter;
308 VkDeviceSize bestFitSize = fSize + 1;
309 VkDeviceSize secondLargestSize = 0;
310 VkDeviceSize secondLargestOffset = 0;
311 while (iter.get()) {
312 Block* block = iter.get();
313 // need to adjust size to match desired alignment
314 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
315 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
316 bestFitIter = iter;
317 bestFitSize = block->fSize;
318 }
319 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
320 secondLargestSize = block->fSize;
321 secondLargestOffset = block->fOffset;
322 }
323 iter.next();
324 }
325 SkASSERT(secondLargestSize <= fLargestBlockSize);
326
327 Block* bestFit = bestFitIter.get();
328 if (bestFit) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700329 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
jvanverth82356cc2016-07-07 07:16:42 -0700330 *allocOffset = bestFit->fOffset;
331 *allocSize = alignedSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700332 // adjust or remove current block
333 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
334 if (bestFit->fSize > alignedSize) {
335 bestFit->fOffset += alignedSize;
336 bestFit->fSize -= alignedSize;
337 if (fLargestBlockOffset == originalBestFitOffset) {
338 if (bestFit->fSize >= secondLargestSize) {
339 fLargestBlockSize = bestFit->fSize;
340 fLargestBlockOffset = bestFit->fOffset;
341 } else {
342 fLargestBlockSize = secondLargestSize;
343 fLargestBlockOffset = secondLargestOffset;
344 }
345 }
346#ifdef SK_DEBUG
347 VkDeviceSize largestSize = 0;
348 iter = fFreeList.headIter();
349 while (iter.get()) {
350 Block* block = iter.get();
351 if (largestSize < block->fSize) {
352 largestSize = block->fSize;
353 }
354 iter.next();
355 }
caryclarkd6562002016-07-27 12:02:07 -0700356 SkASSERT(largestSize == fLargestBlockSize);
jvanverth6b6ffc42016-06-13 14:28:07 -0700357#endif
358 } else {
359 SkASSERT(bestFit->fSize == alignedSize);
360 if (fLargestBlockOffset == originalBestFitOffset) {
361 fLargestBlockSize = secondLargestSize;
362 fLargestBlockOffset = secondLargestOffset;
363 }
364 fFreeList.remove(bestFit);
365#ifdef SK_DEBUG
366 VkDeviceSize largestSize = 0;
367 iter = fFreeList.headIter();
368 while (iter.get()) {
369 Block* block = iter.get();
370 if (largestSize < block->fSize) {
371 largestSize = block->fSize;
372 }
373 iter.next();
374 }
375 SkASSERT(largestSize == fLargestBlockSize);
376#endif
377 }
378 fFreeSize -= alignedSize;
egdaniel6e46eea2016-07-07 08:12:33 -0700379 SkASSERT(*allocSize > 0);
jvanverth6b6ffc42016-06-13 14:28:07 -0700380
381 return true;
382 }
jvanverth82356cc2016-07-07 07:16:42 -0700383
jvanverth6b6ffc42016-06-13 14:28:07 -0700384 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
385
386 return false;
387}
388
jvanverth82356cc2016-07-07 07:16:42 -0700389void GrVkFreeListAlloc::free(VkDeviceSize allocOffset, VkDeviceSize allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700390 // find the block right after this allocation
391 FreeList::Iter iter = fFreeList.headIter();
jvanverthd6f80342016-06-16 04:42:30 -0700392 FreeList::Iter prev;
jvanverth82356cc2016-07-07 07:16:42 -0700393 while (iter.get() && iter.get()->fOffset < allocOffset) {
jvanverthd6f80342016-06-16 04:42:30 -0700394 prev = iter;
jvanverth6b6ffc42016-06-13 14:28:07 -0700395 iter.next();
jvanverth82356cc2016-07-07 07:16:42 -0700396 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700397 // we have four cases:
398 // we exactly follow the previous one
399 Block* block;
jvanverth82356cc2016-07-07 07:16:42 -0700400 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == allocOffset) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700401 block = prev.get();
jvanverth82356cc2016-07-07 07:16:42 -0700402 block->fSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700403 if (block->fOffset == fLargestBlockOffset) {
404 fLargestBlockSize = block->fSize;
405 }
406 // and additionally we may exactly precede the next one
jvanverth82356cc2016-07-07 07:16:42 -0700407 if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700408 block->fSize += iter.get()->fSize;
409 if (iter.get()->fOffset == fLargestBlockOffset) {
410 fLargestBlockOffset = block->fOffset;
411 fLargestBlockSize = block->fSize;
412 }
413 fFreeList.remove(iter.get());
414 }
415 // or we only exactly proceed the next one
jvanverth82356cc2016-07-07 07:16:42 -0700416 } else if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700417 block = iter.get();
jvanverth82356cc2016-07-07 07:16:42 -0700418 block->fSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700419 if (block->fOffset == fLargestBlockOffset) {
jvanverth82356cc2016-07-07 07:16:42 -0700420 fLargestBlockOffset = allocOffset;
jvanverth6b6ffc42016-06-13 14:28:07 -0700421 fLargestBlockSize = block->fSize;
422 }
jvanverth82356cc2016-07-07 07:16:42 -0700423 block->fOffset = allocOffset;
jvanverth6b6ffc42016-06-13 14:28:07 -0700424 // or we fall somewhere in between, with gaps
425 } else {
426 block = fFreeList.addBefore(iter);
jvanverth82356cc2016-07-07 07:16:42 -0700427 block->fOffset = allocOffset;
428 block->fSize = allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700429 }
jvanverth82356cc2016-07-07 07:16:42 -0700430 fFreeSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700431 if (block->fSize > fLargestBlockSize) {
432 fLargestBlockSize = block->fSize;
433 fLargestBlockOffset = block->fOffset;
434 }
435
436#ifdef SK_DEBUG
437 VkDeviceSize largestSize = 0;
438 iter = fFreeList.headIter();
439 while (iter.get()) {
440 Block* block = iter.get();
441 if (largestSize < block->fSize) {
442 largestSize = block->fSize;
443 }
444 iter.next();
445 }
446 SkASSERT(fLargestBlockSize == largestSize);
447#endif
448}
449
jvanverth68c3d302016-09-23 10:30:04 -0700450GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t heapIndex,
jvanverth82356cc2016-07-07 07:16:42 -0700451 VkDeviceSize size, VkDeviceSize alignment)
452 : INHERITED(size, alignment)
453 , fGpu(gpu)
jvanverth68c3d302016-09-23 10:30:04 -0700454#ifdef SK_DEBUG
455 , fHeapIndex(heapIndex)
456#endif
jvanverthae6e4862016-09-22 13:45:24 -0700457 , fMemoryTypeIndex(memoryTypeIndex) {
jvanverth82356cc2016-07-07 07:16:42 -0700458
459 VkMemoryAllocateInfo allocInfo = {
460 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400461 nullptr, // pNext
jvanverth82356cc2016-07-07 07:16:42 -0700462 size, // allocationSize
463 memoryTypeIndex, // memoryTypeIndex
464 };
465
466 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
467 &allocInfo,
468 nullptr,
469 &fAlloc));
470 if (VK_SUCCESS != err) {
471 this->reset();
Ben Wagner63fd7602017-10-09 15:45:33 -0400472 }
jvanverth68c3d302016-09-23 10:30:04 -0700473#ifdef SK_DEBUG
474 else {
475 gHeapUsage[heapIndex] += size;
jvanverth82356cc2016-07-07 07:16:42 -0700476 }
jvanverth68c3d302016-09-23 10:30:04 -0700477#endif
jvanverth82356cc2016-07-07 07:16:42 -0700478}
479
480GrVkSubHeap::~GrVkSubHeap() {
481 const GrVkInterface* iface = fGpu->vkInterface();
482 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
jvanverth68c3d302016-09-23 10:30:04 -0700483#ifdef SK_DEBUG
484 gHeapUsage[fHeapIndex] -= fSize;
485#endif
jvanverth82356cc2016-07-07 07:16:42 -0700486}
487
488bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
489 alloc->fMemory = fAlloc;
490 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize);
491}
492
493void GrVkSubHeap::free(const GrVkAlloc& alloc) {
494 SkASSERT(alloc.fMemory == fAlloc);
495
496 INHERITED::free(alloc.fOffset, alloc.fSize);
jvanverth6b6ffc42016-06-13 14:28:07 -0700497}
498
egdaniel6e90d422016-08-10 08:29:53 -0700499bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
jvanverth68c3d302016-09-23 10:30:04 -0700500 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700501 VkDeviceSize alignedSize = align_size(size, alignment);
502
jvanverth6dc3af42016-06-16 14:05:09 -0700503 // if requested is larger than our subheap allocation, just alloc directly
504 if (alignedSize > fSubHeapSize) {
505 VkMemoryAllocateInfo allocInfo = {
506 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400507 nullptr, // pNext
Greg Daniel8385a8a2018-02-26 13:29:37 -0500508 alignedSize, // allocationSize
jvanverth6dc3af42016-06-16 14:05:09 -0700509 memoryTypeIndex, // memoryTypeIndex
510 };
511
512 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(),
513 &allocInfo,
514 nullptr,
515 &alloc->fMemory));
516 if (VK_SUCCESS != err) {
517 return false;
518 }
519 alloc->fOffset = 0;
Greg Daniel8385a8a2018-02-26 13:29:37 -0500520 alloc->fSize = alignedSize;
521 alloc->fUsesSystemHeap = true;
jvanverth68c3d302016-09-23 10:30:04 -0700522#ifdef SK_DEBUG
523 gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize;
524#endif
egdaniel6e90d422016-08-10 08:29:53 -0700525
jvanverth6dc3af42016-06-16 14:05:09 -0700526 return true;
527 }
528
jvanverth6b6ffc42016-06-13 14:28:07 -0700529 // first try to find a subheap that fits our allocation request
530 int bestFitIndex = -1;
531 VkDeviceSize bestFitSize = 0x7FFFFFFF;
532 for (auto i = 0; i < fSubHeaps.count(); ++i) {
egdaniel6e90d422016-08-10 08:29:53 -0700533 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex &&
534 fSubHeaps[i]->alignment() == alignment) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700535 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
jvanverthd6f80342016-06-16 04:42:30 -0700536 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700537 bestFitIndex = i;
538 bestFitSize = heapSize;
539 }
540 }
541 }
542
543 if (bestFitIndex >= 0) {
544 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
545 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
546 fUsedSize += alloc->fSize;
547 return true;
548 }
549 return false;
jvanverth6dc3af42016-06-16 14:05:09 -0700550 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700551
552 // need to allocate a new subheap
Ben Wagner145dbcd2016-11-03 14:40:50 -0400553 std::unique_ptr<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
jvanverth68c3d302016-09-23 10:30:04 -0700554 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize, alignment));
jvanverth6dc3af42016-06-16 14:05:09 -0700555 // try to recover from failed allocation by only allocating what we need
556 if (subHeap->size() == 0) {
557 VkDeviceSize alignedSize = align_size(size, alignment);
jvanverth68c3d302016-09-23 10:30:04 -0700558 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
jvanverth6dc3af42016-06-16 14:05:09 -0700559 if (subHeap->size() == 0) {
560 return false;
561 }
562 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700563 fAllocSize += fSubHeapSize;
564 if (subHeap->alloc(size, alloc)) {
565 fUsedSize += alloc->fSize;
566 return true;
567 }
568
569 return false;
570}
571
egdaniel6e90d422016-08-10 08:29:53 -0700572bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
jvanverth68c3d302016-09-23 10:30:04 -0700573 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700574 VkDeviceSize alignedSize = align_size(size, alignment);
575
576 // first try to find an unallocated subheap that fits our allocation request
577 int bestFitIndex = -1;
578 VkDeviceSize bestFitSize = 0x7FFFFFFF;
579 for (auto i = 0; i < fSubHeaps.count(); ++i) {
egdaniel6e90d422016-08-10 08:29:53 -0700580 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex &&
581 fSubHeaps[i]->alignment() == alignment &&
582 fSubHeaps[i]->unallocated()) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700583 VkDeviceSize heapSize = fSubHeaps[i]->size();
jvanverthd6f80342016-06-16 04:42:30 -0700584 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700585 bestFitIndex = i;
586 bestFitSize = heapSize;
587 }
588 }
589 }
590
591 if (bestFitIndex >= 0) {
592 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
593 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
594 fUsedSize += alloc->fSize;
595 return true;
596 }
597 return false;
598 }
599
600 // need to allocate a new subheap
Ben Wagner145dbcd2016-11-03 14:40:50 -0400601 std::unique_ptr<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
jvanverth68c3d302016-09-23 10:30:04 -0700602 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
jvanverth6b6ffc42016-06-13 14:28:07 -0700603 fAllocSize += alignedSize;
604 if (subHeap->alloc(size, alloc)) {
605 fUsedSize += alloc->fSize;
606 return true;
607 }
608
609 return false;
610}
611
612bool GrVkHeap::free(const GrVkAlloc& alloc) {
jvanverth6dc3af42016-06-16 14:05:09 -0700613 // a size of 0 means we're using the system heap
Greg Daniel8385a8a2018-02-26 13:29:37 -0500614 if (alloc.fUsesSystemHeap) {
jvanverth6dc3af42016-06-16 14:05:09 -0700615 const GrVkInterface* iface = fGpu->vkInterface();
616 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr));
617 return true;
618 }
619
jvanverth6b6ffc42016-06-13 14:28:07 -0700620 for (auto i = 0; i < fSubHeaps.count(); ++i) {
621 if (fSubHeaps[i]->memory() == alloc.fMemory) {
622 fSubHeaps[i]->free(alloc);
623 fUsedSize -= alloc.fSize;
624 return true;
625 }
626 }
627
628 return false;
629}
630
631