blob: 19150c62a735feb51a2a86138a9616a7a38d534b [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
13static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
14 uint32_t typeBits,
15 VkMemoryPropertyFlags requestedMemFlags,
16 uint32_t* typeIndex) {
17 uint32_t checkBit = 1;
18 for (uint32_t i = 0; i < 32; ++i) {
19 if (typeBits & checkBit) {
20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
21 requestedMemFlags;
22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i;
24 return true;
25 }
26 }
27 checkBit <<= 1;
28 }
29 return false;
30}
31
jvanverth6b6ffc42016-06-13 14:28:07 -070032static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
33 const GrVkGpu::Heap kBufferToHeap[]{
34 GrVkGpu::kVertexBuffer_Heap,
35 GrVkGpu::kIndexBuffer_Heap,
36 GrVkGpu::kUniformBuffer_Heap,
37 GrVkGpu::kCopyReadBuffer_Heap,
38 GrVkGpu::kCopyWriteBuffer_Heap,
Greg Daniel164a9f02016-02-22 09:56:40 -050039 };
jvanverth6b6ffc42016-06-13 14:28:07 -070040 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
41 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
42 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
43 GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
44 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
Greg Daniel164a9f02016-02-22 09:56:40 -050045
jvanverth6b6ffc42016-06-13 14:28:07 -070046 return kBufferToHeap[type];
Greg Daniel164a9f02016-02-22 09:56:40 -050047}
48
49bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
50 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070051 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070052 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070053 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -070054 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050055 VkDevice device = gpu->device();
56
57 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070058 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050059
jvanvertha584de92016-06-30 09:10:52 -070060 VkMemoryPropertyFlags desiredMemProps = dynamic ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
61 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
62 VK_MEMORY_PROPERTY_HOST_CACHED_BIT
63 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
jvanverth7378ac82016-06-14 08:32:44 -070064 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -070065 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
66 memReqs.memoryTypeBits,
67 desiredMemProps,
68 &typeIndex)) {
69 // this memory type should always be available
70 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
71 memReqs.memoryTypeBits,
72 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
73 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
74 &typeIndex));
75 }
76
77 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
78
79 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
80 SkDebugf("Failed to alloc buffer\n");
Greg Daniel164a9f02016-02-22 09:56:40 -050081 return false;
82 }
83
jvanverth1e305ba2016-06-01 09:39:15 -070084 // Bind Memory to device
85 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
86 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050087 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -070088 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -050089 return false;
90 }
jvanverth6b6ffc42016-06-13 14:28:07 -070091
Greg Daniel164a9f02016-02-22 09:56:40 -050092 return true;
93}
94
jvanverth6b6ffc42016-06-13 14:28:07 -070095void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
96 const GrVkAlloc& alloc) {
97
98 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
99 SkASSERT_RELEASE(heap->free(alloc));
100}
101
102// for debugging
103static uint64_t gTotalImageMemory = 0;
104static uint64_t gTotalImageMemoryFullPage = 0;
105
106const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
107const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
108
109static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
110 return (size + alignment - 1) & ~(alignment - 1);
jvanverth1e305ba2016-06-01 09:39:15 -0700111}
112
Greg Daniel164a9f02016-02-22 09:56:40 -0500113bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
114 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -0700115 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -0700116 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -0700117 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500118 VkDevice device = gpu->device();
119
120 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -0700121 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -0500122
jvanverth7378ac82016-06-14 08:32:44 -0700123 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700124 GrVkHeap* heap;
125 if (linearTiling) {
126 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
127 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
128 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
129 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
130 memReqs.memoryTypeBits,
131 desiredMemProps,
132 &typeIndex)) {
133 // this memory type should always be available
134 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
135 memReqs.memoryTypeBits,
136 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
137 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
138 &typeIndex));
139 }
140 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
141 } else {
142 // this memory type should always be available
143 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
144 memReqs.memoryTypeBits,
145 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
146 &typeIndex));
147 if (memReqs.size <= kMaxSmallImageSize) {
148 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
149 } else {
150 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
151 }
152 }
153
154 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
155 SkDebugf("Failed to alloc image\n");
Greg Daniel164a9f02016-02-22 09:56:40 -0500156 return false;
157 }
158
jvanverth1e305ba2016-06-01 09:39:15 -0700159 // Bind Memory to device
160 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
161 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500162 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700163 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500164 return false;
165 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700166
167 gTotalImageMemory += alloc->fSize;
168
169 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
170 gTotalImageMemoryFullPage += pageAlignedSize;
171
Greg Daniel164a9f02016-02-22 09:56:40 -0500172 return true;
173}
174
jvanverth6b6ffc42016-06-13 14:28:07 -0700175void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
176 const GrVkAlloc& alloc) {
177 GrVkHeap* heap;
178 if (linearTiling) {
179 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
180 } else if (alloc.fSize <= kMaxSmallImageSize) {
181 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
182 } else {
183 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
184 }
185 if (!heap->free(alloc)) {
186 // must be an adopted allocation
187 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
188 } else {
189 gTotalImageMemory -= alloc.fSize;
190 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
191 gTotalImageMemoryFullPage -= pageAlignedSize;
192 }
jvanverth1e305ba2016-06-01 09:39:15 -0700193}
194
Greg Daniel164a9f02016-02-22 09:56:40 -0500195VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
196 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
197 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
198 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
199 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
200 return VK_PIPELINE_STAGE_TRANSFER_BIT;
201 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
202 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
203 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
204 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
205 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
206 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
207 return VK_PIPELINE_STAGE_HOST_BIT;
208 }
209
210 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
211 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
212}
213
214VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
215 // Currently we assume we will never being doing any explict shader writes (this doesn't include
216 // color attachment or depth/stencil writes). So we will ignore the
217 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
218
219 // We can only directly access the host memory if we are in preinitialized or general layout,
220 // and the image is linear.
221 // TODO: Add check for linear here so we are not always adding host to general, and we should
222 // only be in preinitialized if we are linear
223 VkAccessFlags flags = 0;;
224 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
225 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
226 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
227 VK_ACCESS_TRANSFER_WRITE_BIT |
228 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
229 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
egdanielc2fde8b2016-06-24 10:29:02 -0700230 flags = VK_ACCESS_HOST_WRITE_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500231 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
232 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
233 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
234 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
235 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
236 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
237 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
238 flags = VK_ACCESS_TRANSFER_READ_BIT;
239 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
240 flags = VK_ACCESS_SHADER_READ_BIT;
241 }
242 return flags;
243}
jvanverth6b6ffc42016-06-13 14:28:07 -0700244
jvanverth82356cc2016-07-07 07:16:42 -0700245bool GrVkFreeListAlloc::alloc(VkDeviceSize requestedSize,
246 VkDeviceSize* allocOffset, VkDeviceSize* allocSize) {
247 VkDeviceSize alignedSize = align_size(requestedSize, fAlignment);
jvanverth6b6ffc42016-06-13 14:28:07 -0700248
249 // find the smallest block big enough for our allocation
250 FreeList::Iter iter = fFreeList.headIter();
251 FreeList::Iter bestFitIter;
252 VkDeviceSize bestFitSize = fSize + 1;
253 VkDeviceSize secondLargestSize = 0;
254 VkDeviceSize secondLargestOffset = 0;
255 while (iter.get()) {
256 Block* block = iter.get();
257 // need to adjust size to match desired alignment
258 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
259 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
260 bestFitIter = iter;
261 bestFitSize = block->fSize;
262 }
263 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
264 secondLargestSize = block->fSize;
265 secondLargestOffset = block->fOffset;
266 }
267 iter.next();
268 }
269 SkASSERT(secondLargestSize <= fLargestBlockSize);
270
271 Block* bestFit = bestFitIter.get();
272 if (bestFit) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700273 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
jvanverth82356cc2016-07-07 07:16:42 -0700274 *allocOffset = bestFit->fOffset;
275 *allocSize = alignedSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700276 // adjust or remove current block
277 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
278 if (bestFit->fSize > alignedSize) {
279 bestFit->fOffset += alignedSize;
280 bestFit->fSize -= alignedSize;
281 if (fLargestBlockOffset == originalBestFitOffset) {
282 if (bestFit->fSize >= secondLargestSize) {
283 fLargestBlockSize = bestFit->fSize;
284 fLargestBlockOffset = bestFit->fOffset;
285 } else {
286 fLargestBlockSize = secondLargestSize;
287 fLargestBlockOffset = secondLargestOffset;
288 }
289 }
290#ifdef SK_DEBUG
291 VkDeviceSize largestSize = 0;
292 iter = fFreeList.headIter();
293 while (iter.get()) {
294 Block* block = iter.get();
295 if (largestSize < block->fSize) {
296 largestSize = block->fSize;
297 }
298 iter.next();
299 }
caryclarkd6562002016-07-27 12:02:07 -0700300 SkASSERT(largestSize == fLargestBlockSize);
jvanverth6b6ffc42016-06-13 14:28:07 -0700301#endif
302 } else {
303 SkASSERT(bestFit->fSize == alignedSize);
304 if (fLargestBlockOffset == originalBestFitOffset) {
305 fLargestBlockSize = secondLargestSize;
306 fLargestBlockOffset = secondLargestOffset;
307 }
308 fFreeList.remove(bestFit);
309#ifdef SK_DEBUG
310 VkDeviceSize largestSize = 0;
311 iter = fFreeList.headIter();
312 while (iter.get()) {
313 Block* block = iter.get();
314 if (largestSize < block->fSize) {
315 largestSize = block->fSize;
316 }
317 iter.next();
318 }
319 SkASSERT(largestSize == fLargestBlockSize);
320#endif
321 }
322 fFreeSize -= alignedSize;
egdaniel6e46eea2016-07-07 08:12:33 -0700323 SkASSERT(*allocSize > 0);
jvanverth6b6ffc42016-06-13 14:28:07 -0700324
325 return true;
326 }
jvanverth82356cc2016-07-07 07:16:42 -0700327
jvanverth6b6ffc42016-06-13 14:28:07 -0700328 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
329
330 return false;
331}
332
jvanverth82356cc2016-07-07 07:16:42 -0700333void GrVkFreeListAlloc::free(VkDeviceSize allocOffset, VkDeviceSize allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700334 // find the block right after this allocation
335 FreeList::Iter iter = fFreeList.headIter();
jvanverthd6f80342016-06-16 04:42:30 -0700336 FreeList::Iter prev;
jvanverth82356cc2016-07-07 07:16:42 -0700337 while (iter.get() && iter.get()->fOffset < allocOffset) {
jvanverthd6f80342016-06-16 04:42:30 -0700338 prev = iter;
jvanverth6b6ffc42016-06-13 14:28:07 -0700339 iter.next();
jvanverth82356cc2016-07-07 07:16:42 -0700340 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700341 // we have four cases:
342 // we exactly follow the previous one
343 Block* block;
jvanverth82356cc2016-07-07 07:16:42 -0700344 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == allocOffset) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700345 block = prev.get();
jvanverth82356cc2016-07-07 07:16:42 -0700346 block->fSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700347 if (block->fOffset == fLargestBlockOffset) {
348 fLargestBlockSize = block->fSize;
349 }
350 // and additionally we may exactly precede the next one
jvanverth82356cc2016-07-07 07:16:42 -0700351 if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700352 block->fSize += iter.get()->fSize;
353 if (iter.get()->fOffset == fLargestBlockOffset) {
354 fLargestBlockOffset = block->fOffset;
355 fLargestBlockSize = block->fSize;
356 }
357 fFreeList.remove(iter.get());
358 }
359 // or we only exactly proceed the next one
jvanverth82356cc2016-07-07 07:16:42 -0700360 } else if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700361 block = iter.get();
jvanverth82356cc2016-07-07 07:16:42 -0700362 block->fSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700363 if (block->fOffset == fLargestBlockOffset) {
jvanverth82356cc2016-07-07 07:16:42 -0700364 fLargestBlockOffset = allocOffset;
jvanverth6b6ffc42016-06-13 14:28:07 -0700365 fLargestBlockSize = block->fSize;
366 }
jvanverth82356cc2016-07-07 07:16:42 -0700367 block->fOffset = allocOffset;
jvanverth6b6ffc42016-06-13 14:28:07 -0700368 // or we fall somewhere in between, with gaps
369 } else {
370 block = fFreeList.addBefore(iter);
jvanverth82356cc2016-07-07 07:16:42 -0700371 block->fOffset = allocOffset;
372 block->fSize = allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700373 }
jvanverth82356cc2016-07-07 07:16:42 -0700374 fFreeSize += allocSize;
jvanverth6b6ffc42016-06-13 14:28:07 -0700375 if (block->fSize > fLargestBlockSize) {
376 fLargestBlockSize = block->fSize;
377 fLargestBlockOffset = block->fOffset;
378 }
379
380#ifdef SK_DEBUG
381 VkDeviceSize largestSize = 0;
382 iter = fFreeList.headIter();
383 while (iter.get()) {
384 Block* block = iter.get();
385 if (largestSize < block->fSize) {
386 largestSize = block->fSize;
387 }
388 iter.next();
389 }
390 SkASSERT(fLargestBlockSize == largestSize);
391#endif
392}
393
jvanverth82356cc2016-07-07 07:16:42 -0700394GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
395 VkDeviceSize size, VkDeviceSize alignment)
396 : INHERITED(size, alignment)
397 , fGpu(gpu)
398 , fMemoryTypeIndex(memoryTypeIndex) {
399
400 VkMemoryAllocateInfo allocInfo = {
401 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
402 NULL, // pNext
403 size, // allocationSize
404 memoryTypeIndex, // memoryTypeIndex
405 };
406
407 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
408 &allocInfo,
409 nullptr,
410 &fAlloc));
411 if (VK_SUCCESS != err) {
412 this->reset();
413 }
414}
415
416GrVkSubHeap::~GrVkSubHeap() {
417 const GrVkInterface* iface = fGpu->vkInterface();
418 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
419}
420
421bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
422 alloc->fMemory = fAlloc;
423 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize);
424}
425
426void GrVkSubHeap::free(const GrVkAlloc& alloc) {
427 SkASSERT(alloc.fMemory == fAlloc);
428
429 INHERITED::free(alloc.fOffset, alloc.fSize);
jvanverth6b6ffc42016-06-13 14:28:07 -0700430}
431
432bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
433 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
434 VkDeviceSize alignedSize = align_size(size, alignment);
435
jvanverth6dc3af42016-06-16 14:05:09 -0700436 // if requested is larger than our subheap allocation, just alloc directly
437 if (alignedSize > fSubHeapSize) {
438 VkMemoryAllocateInfo allocInfo = {
439 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
440 NULL, // pNext
441 size, // allocationSize
442 memoryTypeIndex, // memoryTypeIndex
443 };
444
445 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(),
446 &allocInfo,
447 nullptr,
448 &alloc->fMemory));
449 if (VK_SUCCESS != err) {
450 return false;
451 }
452 alloc->fOffset = 0;
453 alloc->fSize = 0; // hint that this is not a subheap allocation
454
455 return true;
456 }
457
jvanverth6b6ffc42016-06-13 14:28:07 -0700458 // first try to find a subheap that fits our allocation request
459 int bestFitIndex = -1;
460 VkDeviceSize bestFitSize = 0x7FFFFFFF;
461 for (auto i = 0; i < fSubHeaps.count(); ++i) {
462 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) {
463 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
jvanverthd6f80342016-06-16 04:42:30 -0700464 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700465 bestFitIndex = i;
466 bestFitSize = heapSize;
467 }
468 }
469 }
470
471 if (bestFitIndex >= 0) {
472 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
473 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
474 fUsedSize += alloc->fSize;
475 return true;
476 }
477 return false;
jvanverth6dc3af42016-06-16 14:05:09 -0700478 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700479
480 // need to allocate a new subheap
481 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
482 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment));
jvanverth6dc3af42016-06-16 14:05:09 -0700483 // try to recover from failed allocation by only allocating what we need
484 if (subHeap->size() == 0) {
485 VkDeviceSize alignedSize = align_size(size, alignment);
486 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
487 if (subHeap->size() == 0) {
488 return false;
489 }
490 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700491 fAllocSize += fSubHeapSize;
492 if (subHeap->alloc(size, alloc)) {
493 fUsedSize += alloc->fSize;
494 return true;
495 }
496
497 return false;
498}
499
500bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
501 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
502 VkDeviceSize alignedSize = align_size(size, alignment);
503
504 // first try to find an unallocated subheap that fits our allocation request
505 int bestFitIndex = -1;
506 VkDeviceSize bestFitSize = 0x7FFFFFFF;
507 for (auto i = 0; i < fSubHeaps.count(); ++i) {
508 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && fSubHeaps[i]->unallocated()) {
509 VkDeviceSize heapSize = fSubHeaps[i]->size();
jvanverthd6f80342016-06-16 04:42:30 -0700510 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700511 bestFitIndex = i;
512 bestFitSize = heapSize;
513 }
514 }
515 }
516
517 if (bestFitIndex >= 0) {
518 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
519 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
520 fUsedSize += alloc->fSize;
521 return true;
522 }
523 return false;
524 }
525
526 // need to allocate a new subheap
527 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
528 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
529 fAllocSize += alignedSize;
530 if (subHeap->alloc(size, alloc)) {
531 fUsedSize += alloc->fSize;
532 return true;
533 }
534
535 return false;
536}
537
538bool GrVkHeap::free(const GrVkAlloc& alloc) {
jvanverth6dc3af42016-06-16 14:05:09 -0700539 // a size of 0 means we're using the system heap
540 if (0 == alloc.fSize) {
541 const GrVkInterface* iface = fGpu->vkInterface();
542 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr));
543 return true;
544 }
545
jvanverth6b6ffc42016-06-13 14:28:07 -0700546 for (auto i = 0; i < fSubHeaps.count(); ++i) {
547 if (fSubHeaps[i]->memory() == alloc.fMemory) {
548 fSubHeaps[i]->free(alloc);
549 fUsedSize -= alloc.fSize;
550 return true;
551 }
552 }
553
554 return false;
555}
556
557