blob: e411d2d5418683bb8ad8e04580a3e8aea62c5c5b [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
13static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
14 uint32_t typeBits,
15 VkMemoryPropertyFlags requestedMemFlags,
16 uint32_t* typeIndex) {
17 uint32_t checkBit = 1;
18 for (uint32_t i = 0; i < 32; ++i) {
19 if (typeBits & checkBit) {
20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
21 requestedMemFlags;
22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i;
24 return true;
25 }
26 }
27 checkBit <<= 1;
28 }
29 return false;
30}
31
jvanverth6b6ffc42016-06-13 14:28:07 -070032static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
33 const GrVkGpu::Heap kBufferToHeap[]{
34 GrVkGpu::kVertexBuffer_Heap,
35 GrVkGpu::kIndexBuffer_Heap,
36 GrVkGpu::kUniformBuffer_Heap,
37 GrVkGpu::kCopyReadBuffer_Heap,
38 GrVkGpu::kCopyWriteBuffer_Heap,
Greg Daniel164a9f02016-02-22 09:56:40 -050039 };
jvanverth6b6ffc42016-06-13 14:28:07 -070040 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
41 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
42 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
43 GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
44 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
Greg Daniel164a9f02016-02-22 09:56:40 -050045
jvanverth6b6ffc42016-06-13 14:28:07 -070046 return kBufferToHeap[type];
Greg Daniel164a9f02016-02-22 09:56:40 -050047}
48
49bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
50 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070051 GrVkBuffer::Type type,
jvanverth1e305ba2016-06-01 09:39:15 -070052 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -070053 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050054 VkDevice device = gpu->device();
55
56 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070057 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050058
jvanverth6b6ffc42016-06-13 14:28:07 -070059 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
60 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
61 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
jvanverth7378ac82016-06-14 08:32:44 -070062 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -070063 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
64 memReqs.memoryTypeBits,
65 desiredMemProps,
66 &typeIndex)) {
67 // this memory type should always be available
68 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
69 memReqs.memoryTypeBits,
70 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
71 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
72 &typeIndex));
73 }
74
75 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
76
77 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
78 SkDebugf("Failed to alloc buffer\n");
Greg Daniel164a9f02016-02-22 09:56:40 -050079 return false;
80 }
81
jvanverth1e305ba2016-06-01 09:39:15 -070082 // Bind Memory to device
83 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
84 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050085 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -070086 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -050087 return false;
88 }
jvanverth6b6ffc42016-06-13 14:28:07 -070089
Greg Daniel164a9f02016-02-22 09:56:40 -050090 return true;
91}
92
jvanverth6b6ffc42016-06-13 14:28:07 -070093void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
94 const GrVkAlloc& alloc) {
95
96 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
97 SkASSERT_RELEASE(heap->free(alloc));
98}
99
100// for debugging
101static uint64_t gTotalImageMemory = 0;
102static uint64_t gTotalImageMemoryFullPage = 0;
103
104const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
105const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
106
107static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
108 return (size + alignment - 1) & ~(alignment - 1);
jvanverth1e305ba2016-06-01 09:39:15 -0700109}
110
Greg Daniel164a9f02016-02-22 09:56:40 -0500111bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
112 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -0700113 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -0700114 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -0700115 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500116 VkDevice device = gpu->device();
117
118 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -0700119 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -0500120
jvanverth7378ac82016-06-14 08:32:44 -0700121 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700122 GrVkHeap* heap;
123 if (linearTiling) {
124 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
125 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
126 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
127 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
128 memReqs.memoryTypeBits,
129 desiredMemProps,
130 &typeIndex)) {
131 // this memory type should always be available
132 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
133 memReqs.memoryTypeBits,
134 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
135 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
136 &typeIndex));
137 }
138 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
139 } else {
140 // this memory type should always be available
141 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
142 memReqs.memoryTypeBits,
143 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
144 &typeIndex));
145 if (memReqs.size <= kMaxSmallImageSize) {
146 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
147 } else {
148 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
149 }
150 }
151
152 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
153 SkDebugf("Failed to alloc image\n");
Greg Daniel164a9f02016-02-22 09:56:40 -0500154 return false;
155 }
156
jvanverth1e305ba2016-06-01 09:39:15 -0700157 // Bind Memory to device
158 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
159 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500160 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700161 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500162 return false;
163 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700164
165 gTotalImageMemory += alloc->fSize;
166
167 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
168 gTotalImageMemoryFullPage += pageAlignedSize;
169
Greg Daniel164a9f02016-02-22 09:56:40 -0500170 return true;
171}
172
jvanverth6b6ffc42016-06-13 14:28:07 -0700173void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
174 const GrVkAlloc& alloc) {
175 GrVkHeap* heap;
176 if (linearTiling) {
177 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
178 } else if (alloc.fSize <= kMaxSmallImageSize) {
179 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
180 } else {
181 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
182 }
183 if (!heap->free(alloc)) {
184 // must be an adopted allocation
185 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
186 } else {
187 gTotalImageMemory -= alloc.fSize;
188 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
189 gTotalImageMemoryFullPage -= pageAlignedSize;
190 }
jvanverth1e305ba2016-06-01 09:39:15 -0700191}
192
Greg Daniel164a9f02016-02-22 09:56:40 -0500193VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
194 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
195 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
196 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
197 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
198 return VK_PIPELINE_STAGE_TRANSFER_BIT;
199 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
200 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
201 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
202 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
203 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
204 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
205 return VK_PIPELINE_STAGE_HOST_BIT;
206 }
207
208 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
209 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
210}
211
212VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
213 // Currently we assume we will never being doing any explict shader writes (this doesn't include
214 // color attachment or depth/stencil writes). So we will ignore the
215 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
216
217 // We can only directly access the host memory if we are in preinitialized or general layout,
218 // and the image is linear.
219 // TODO: Add check for linear here so we are not always adding host to general, and we should
220 // only be in preinitialized if we are linear
221 VkAccessFlags flags = 0;;
222 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
223 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
224 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
225 VK_ACCESS_TRANSFER_WRITE_BIT |
226 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
227 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
egdanielc2fde8b2016-06-24 10:29:02 -0700228 flags = VK_ACCESS_HOST_WRITE_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500229 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
230 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
231 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
232 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
233 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
234 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
235 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
236 flags = VK_ACCESS_TRANSFER_READ_BIT;
237 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
238 flags = VK_ACCESS_SHADER_READ_BIT;
239 }
240 return flags;
241}
jvanverth6b6ffc42016-06-13 14:28:07 -0700242
243GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
244 VkDeviceSize size, VkDeviceSize alignment)
245 : fGpu(gpu)
246 , fMemoryTypeIndex(memoryTypeIndex) {
247
248 VkMemoryAllocateInfo allocInfo = {
249 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
250 NULL, // pNext
251 size, // allocationSize
252 memoryTypeIndex, // memoryTypeIndex
253 };
254
255 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
256 &allocInfo,
257 nullptr,
258 &fAlloc));
259
260 if (VK_SUCCESS == err) {
261 fSize = size;
262 fAlignment = alignment;
263 fFreeSize = size;
264 fLargestBlockSize = size;
265 fLargestBlockOffset = 0;
266
267 Block* block = fFreeList.addToTail();
268 block->fOffset = 0;
269 block->fSize = fSize;
270 } else {
271 fSize = 0;
272 fAlignment = 0;
273 fFreeSize = 0;
274 fLargestBlockSize = 0;
275 }
276}
277
278GrVkSubHeap::~GrVkSubHeap() {
279 const GrVkInterface* iface = fGpu->vkInterface();
280 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
281
282 fFreeList.reset();
283}
284
285bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
286 VkDeviceSize alignedSize = align_size(size, fAlignment);
287
288 // find the smallest block big enough for our allocation
289 FreeList::Iter iter = fFreeList.headIter();
290 FreeList::Iter bestFitIter;
291 VkDeviceSize bestFitSize = fSize + 1;
292 VkDeviceSize secondLargestSize = 0;
293 VkDeviceSize secondLargestOffset = 0;
294 while (iter.get()) {
295 Block* block = iter.get();
296 // need to adjust size to match desired alignment
297 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
298 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
299 bestFitIter = iter;
300 bestFitSize = block->fSize;
301 }
302 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
303 secondLargestSize = block->fSize;
304 secondLargestOffset = block->fOffset;
305 }
306 iter.next();
307 }
308 SkASSERT(secondLargestSize <= fLargestBlockSize);
309
310 Block* bestFit = bestFitIter.get();
311 if (bestFit) {
312 alloc->fMemory = fAlloc;
313 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
314 alloc->fOffset = bestFit->fOffset;
315 alloc->fSize = alignedSize;
316 // adjust or remove current block
317 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
318 if (bestFit->fSize > alignedSize) {
319 bestFit->fOffset += alignedSize;
320 bestFit->fSize -= alignedSize;
321 if (fLargestBlockOffset == originalBestFitOffset) {
322 if (bestFit->fSize >= secondLargestSize) {
323 fLargestBlockSize = bestFit->fSize;
324 fLargestBlockOffset = bestFit->fOffset;
325 } else {
326 fLargestBlockSize = secondLargestSize;
327 fLargestBlockOffset = secondLargestOffset;
328 }
329 }
330#ifdef SK_DEBUG
331 VkDeviceSize largestSize = 0;
332 iter = fFreeList.headIter();
333 while (iter.get()) {
334 Block* block = iter.get();
335 if (largestSize < block->fSize) {
336 largestSize = block->fSize;
337 }
338 iter.next();
339 }
340 SkASSERT(largestSize == fLargestBlockSize)
341#endif
342 } else {
343 SkASSERT(bestFit->fSize == alignedSize);
344 if (fLargestBlockOffset == originalBestFitOffset) {
345 fLargestBlockSize = secondLargestSize;
346 fLargestBlockOffset = secondLargestOffset;
347 }
348 fFreeList.remove(bestFit);
349#ifdef SK_DEBUG
350 VkDeviceSize largestSize = 0;
351 iter = fFreeList.headIter();
352 while (iter.get()) {
353 Block* block = iter.get();
354 if (largestSize < block->fSize) {
355 largestSize = block->fSize;
356 }
357 iter.next();
358 }
359 SkASSERT(largestSize == fLargestBlockSize);
360#endif
361 }
362 fFreeSize -= alignedSize;
jvanverth6dc3af42016-06-16 14:05:09 -0700363 SkASSERT(alloc->fSize > 0);
jvanverth6b6ffc42016-06-13 14:28:07 -0700364
365 return true;
366 }
367
368 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
369
370 return false;
371}
372
373
374void GrVkSubHeap::free(const GrVkAlloc& alloc) {
375 SkASSERT(alloc.fMemory == fAlloc);
376
377 // find the block right after this allocation
378 FreeList::Iter iter = fFreeList.headIter();
jvanverthd6f80342016-06-16 04:42:30 -0700379 FreeList::Iter prev;
jvanverth6b6ffc42016-06-13 14:28:07 -0700380 while (iter.get() && iter.get()->fOffset < alloc.fOffset) {
jvanverthd6f80342016-06-16 04:42:30 -0700381 prev = iter;
jvanverth6b6ffc42016-06-13 14:28:07 -0700382 iter.next();
383 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700384 // we have four cases:
385 // we exactly follow the previous one
386 Block* block;
387 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == alloc.fOffset) {
388 block = prev.get();
389 block->fSize += alloc.fSize;
390 if (block->fOffset == fLargestBlockOffset) {
391 fLargestBlockSize = block->fSize;
392 }
393 // and additionally we may exactly precede the next one
394 if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
395 block->fSize += iter.get()->fSize;
396 if (iter.get()->fOffset == fLargestBlockOffset) {
397 fLargestBlockOffset = block->fOffset;
398 fLargestBlockSize = block->fSize;
399 }
400 fFreeList.remove(iter.get());
401 }
402 // or we only exactly proceed the next one
403 } else if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
404 block = iter.get();
405 block->fSize += alloc.fSize;
406 if (block->fOffset == fLargestBlockOffset) {
407 fLargestBlockOffset = alloc.fOffset;
408 fLargestBlockSize = block->fSize;
409 }
410 block->fOffset = alloc.fOffset;
411 // or we fall somewhere in between, with gaps
412 } else {
413 block = fFreeList.addBefore(iter);
414 block->fOffset = alloc.fOffset;
415 block->fSize = alloc.fSize;
416 }
417 fFreeSize += alloc.fSize;
418 if (block->fSize > fLargestBlockSize) {
419 fLargestBlockSize = block->fSize;
420 fLargestBlockOffset = block->fOffset;
421 }
422
423#ifdef SK_DEBUG
424 VkDeviceSize largestSize = 0;
425 iter = fFreeList.headIter();
426 while (iter.get()) {
427 Block* block = iter.get();
428 if (largestSize < block->fSize) {
429 largestSize = block->fSize;
430 }
431 iter.next();
432 }
433 SkASSERT(fLargestBlockSize == largestSize);
434#endif
435}
436
437GrVkHeap::~GrVkHeap() {
438}
439
440bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
441 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
442 VkDeviceSize alignedSize = align_size(size, alignment);
443
jvanverth6dc3af42016-06-16 14:05:09 -0700444 // if requested is larger than our subheap allocation, just alloc directly
445 if (alignedSize > fSubHeapSize) {
446 VkMemoryAllocateInfo allocInfo = {
447 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
448 NULL, // pNext
449 size, // allocationSize
450 memoryTypeIndex, // memoryTypeIndex
451 };
452
453 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(),
454 &allocInfo,
455 nullptr,
456 &alloc->fMemory));
457 if (VK_SUCCESS != err) {
458 return false;
459 }
460 alloc->fOffset = 0;
461 alloc->fSize = 0; // hint that this is not a subheap allocation
462
463 return true;
464 }
465
jvanverth6b6ffc42016-06-13 14:28:07 -0700466 // first try to find a subheap that fits our allocation request
467 int bestFitIndex = -1;
468 VkDeviceSize bestFitSize = 0x7FFFFFFF;
469 for (auto i = 0; i < fSubHeaps.count(); ++i) {
470 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) {
471 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
jvanverthd6f80342016-06-16 04:42:30 -0700472 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700473 bestFitIndex = i;
474 bestFitSize = heapSize;
475 }
476 }
477 }
478
479 if (bestFitIndex >= 0) {
480 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
481 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
482 fUsedSize += alloc->fSize;
483 return true;
484 }
485 return false;
jvanverth6dc3af42016-06-16 14:05:09 -0700486 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700487
488 // need to allocate a new subheap
489 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
490 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment));
jvanverth6dc3af42016-06-16 14:05:09 -0700491 // try to recover from failed allocation by only allocating what we need
492 if (subHeap->size() == 0) {
493 VkDeviceSize alignedSize = align_size(size, alignment);
494 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
495 if (subHeap->size() == 0) {
496 return false;
497 }
498 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700499 fAllocSize += fSubHeapSize;
500 if (subHeap->alloc(size, alloc)) {
501 fUsedSize += alloc->fSize;
502 return true;
503 }
504
505 return false;
506}
507
508bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
509 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
510 VkDeviceSize alignedSize = align_size(size, alignment);
511
512 // first try to find an unallocated subheap that fits our allocation request
513 int bestFitIndex = -1;
514 VkDeviceSize bestFitSize = 0x7FFFFFFF;
515 for (auto i = 0; i < fSubHeaps.count(); ++i) {
516 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && fSubHeaps[i]->unallocated()) {
517 VkDeviceSize heapSize = fSubHeaps[i]->size();
jvanverthd6f80342016-06-16 04:42:30 -0700518 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700519 bestFitIndex = i;
520 bestFitSize = heapSize;
521 }
522 }
523 }
524
525 if (bestFitIndex >= 0) {
526 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
527 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
528 fUsedSize += alloc->fSize;
529 return true;
530 }
531 return false;
532 }
533
534 // need to allocate a new subheap
535 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
536 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
537 fAllocSize += alignedSize;
538 if (subHeap->alloc(size, alloc)) {
539 fUsedSize += alloc->fSize;
540 return true;
541 }
542
543 return false;
544}
545
546bool GrVkHeap::free(const GrVkAlloc& alloc) {
jvanverth6dc3af42016-06-16 14:05:09 -0700547 // a size of 0 means we're using the system heap
548 if (0 == alloc.fSize) {
549 const GrVkInterface* iface = fGpu->vkInterface();
550 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr));
551 return true;
552 }
553
jvanverth6b6ffc42016-06-13 14:28:07 -0700554 for (auto i = 0; i < fSubHeaps.count(); ++i) {
555 if (fSubHeaps[i]->memory() == alloc.fMemory) {
556 fSubHeaps[i]->free(alloc);
557 fUsedSize -= alloc.fSize;
558 return true;
559 }
560 }
561
562 return false;
563}
564
565