blob: 407a2c1ca3a30de48ce1bd4e98ecaa155d07343e [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
13static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
14 uint32_t typeBits,
15 VkMemoryPropertyFlags requestedMemFlags,
16 uint32_t* typeIndex) {
17 uint32_t checkBit = 1;
18 for (uint32_t i = 0; i < 32; ++i) {
19 if (typeBits & checkBit) {
20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
21 requestedMemFlags;
22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i;
24 return true;
25 }
26 }
27 checkBit <<= 1;
28 }
29 return false;
30}
31
jvanverth6b6ffc42016-06-13 14:28:07 -070032static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
33 const GrVkGpu::Heap kBufferToHeap[]{
34 GrVkGpu::kVertexBuffer_Heap,
35 GrVkGpu::kIndexBuffer_Heap,
36 GrVkGpu::kUniformBuffer_Heap,
37 GrVkGpu::kCopyReadBuffer_Heap,
38 GrVkGpu::kCopyWriteBuffer_Heap,
Greg Daniel164a9f02016-02-22 09:56:40 -050039 };
jvanverth6b6ffc42016-06-13 14:28:07 -070040 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
41 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
42 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
43 GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
44 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
Greg Daniel164a9f02016-02-22 09:56:40 -050045
jvanverth6b6ffc42016-06-13 14:28:07 -070046 return kBufferToHeap[type];
Greg Daniel164a9f02016-02-22 09:56:40 -050047}
48
49bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
50 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070051 GrVkBuffer::Type type,
jvanverth1e305ba2016-06-01 09:39:15 -070052 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -070053 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050054 VkDevice device = gpu->device();
55
56 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070057 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050058
jvanverth6b6ffc42016-06-13 14:28:07 -070059 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
60 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
61 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
jvanverth7378ac82016-06-14 08:32:44 -070062 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -070063 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
64 memReqs.memoryTypeBits,
65 desiredMemProps,
66 &typeIndex)) {
67 // this memory type should always be available
68 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
69 memReqs.memoryTypeBits,
70 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
71 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
72 &typeIndex));
73 }
74
75 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
76
77 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
78 SkDebugf("Failed to alloc buffer\n");
Greg Daniel164a9f02016-02-22 09:56:40 -050079 return false;
80 }
81
jvanverth1e305ba2016-06-01 09:39:15 -070082 // Bind Memory to device
83 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
84 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050085 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -070086 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -050087 return false;
88 }
jvanverth6b6ffc42016-06-13 14:28:07 -070089
Greg Daniel164a9f02016-02-22 09:56:40 -050090 return true;
91}
92
jvanverth6b6ffc42016-06-13 14:28:07 -070093void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
94 const GrVkAlloc& alloc) {
95
96 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
97 SkASSERT_RELEASE(heap->free(alloc));
98}
99
100// for debugging
101static uint64_t gTotalImageMemory = 0;
102static uint64_t gTotalImageMemoryFullPage = 0;
103
104const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
105const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
106
107static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
108 return (size + alignment - 1) & ~(alignment - 1);
jvanverth1e305ba2016-06-01 09:39:15 -0700109}
110
Greg Daniel164a9f02016-02-22 09:56:40 -0500111bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
112 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -0700113 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -0700114 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -0700115 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500116 VkDevice device = gpu->device();
117
118 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -0700119 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -0500120
jvanverth7378ac82016-06-14 08:32:44 -0700121 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700122 GrVkHeap* heap;
123 if (linearTiling) {
124 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
125 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
126 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
127 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
128 memReqs.memoryTypeBits,
129 desiredMemProps,
130 &typeIndex)) {
131 // this memory type should always be available
132 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
133 memReqs.memoryTypeBits,
134 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
135 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
136 &typeIndex));
137 }
138 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
139 } else {
140 // this memory type should always be available
141 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
142 memReqs.memoryTypeBits,
143 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
144 &typeIndex));
145 if (memReqs.size <= kMaxSmallImageSize) {
146 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
147 } else {
148 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
149 }
150 }
151
152 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
153 SkDebugf("Failed to alloc image\n");
Greg Daniel164a9f02016-02-22 09:56:40 -0500154 return false;
155 }
156
jvanverth1e305ba2016-06-01 09:39:15 -0700157 // Bind Memory to device
158 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
159 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500160 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700161 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500162 return false;
163 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700164
165 gTotalImageMemory += alloc->fSize;
166
167 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
168 gTotalImageMemoryFullPage += pageAlignedSize;
169
Greg Daniel164a9f02016-02-22 09:56:40 -0500170 return true;
171}
172
jvanverth6b6ffc42016-06-13 14:28:07 -0700173void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
174 const GrVkAlloc& alloc) {
175 GrVkHeap* heap;
176 if (linearTiling) {
177 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
178 } else if (alloc.fSize <= kMaxSmallImageSize) {
179 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
180 } else {
181 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
182 }
183 if (!heap->free(alloc)) {
184 // must be an adopted allocation
185 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
186 } else {
187 gTotalImageMemory -= alloc.fSize;
188 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
189 gTotalImageMemoryFullPage -= pageAlignedSize;
190 }
jvanverth1e305ba2016-06-01 09:39:15 -0700191}
192
Greg Daniel164a9f02016-02-22 09:56:40 -0500193VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
194 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
195 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
196 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
197 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
198 return VK_PIPELINE_STAGE_TRANSFER_BIT;
199 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
200 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
201 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
202 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
203 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
204 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
205 return VK_PIPELINE_STAGE_HOST_BIT;
206 }
207
208 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
209 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
210}
211
212VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
213 // Currently we assume we will never being doing any explict shader writes (this doesn't include
214 // color attachment or depth/stencil writes). So we will ignore the
215 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
216
217 // We can only directly access the host memory if we are in preinitialized or general layout,
218 // and the image is linear.
219 // TODO: Add check for linear here so we are not always adding host to general, and we should
220 // only be in preinitialized if we are linear
221 VkAccessFlags flags = 0;;
222 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
223 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
224 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
225 VK_ACCESS_TRANSFER_WRITE_BIT |
226 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
227 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
228 flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
229 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
230 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
231 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
232 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
233 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
234 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
235 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
236 flags = VK_ACCESS_TRANSFER_READ_BIT;
237 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
238 flags = VK_ACCESS_SHADER_READ_BIT;
239 }
240 return flags;
241}
jvanverth6b6ffc42016-06-13 14:28:07 -0700242
243GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
244 VkDeviceSize size, VkDeviceSize alignment)
245 : fGpu(gpu)
246 , fMemoryTypeIndex(memoryTypeIndex) {
247
248 VkMemoryAllocateInfo allocInfo = {
249 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
250 NULL, // pNext
251 size, // allocationSize
252 memoryTypeIndex, // memoryTypeIndex
253 };
254
255 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
256 &allocInfo,
257 nullptr,
258 &fAlloc));
259
260 if (VK_SUCCESS == err) {
261 fSize = size;
262 fAlignment = alignment;
263 fFreeSize = size;
264 fLargestBlockSize = size;
265 fLargestBlockOffset = 0;
266
267 Block* block = fFreeList.addToTail();
268 block->fOffset = 0;
269 block->fSize = fSize;
270 } else {
271 fSize = 0;
272 fAlignment = 0;
273 fFreeSize = 0;
274 fLargestBlockSize = 0;
275 }
276}
277
278GrVkSubHeap::~GrVkSubHeap() {
279 const GrVkInterface* iface = fGpu->vkInterface();
280 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
281
282 fFreeList.reset();
283}
284
285bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
286 VkDeviceSize alignedSize = align_size(size, fAlignment);
287
288 // find the smallest block big enough for our allocation
289 FreeList::Iter iter = fFreeList.headIter();
290 FreeList::Iter bestFitIter;
291 VkDeviceSize bestFitSize = fSize + 1;
292 VkDeviceSize secondLargestSize = 0;
293 VkDeviceSize secondLargestOffset = 0;
294 while (iter.get()) {
295 Block* block = iter.get();
296 // need to adjust size to match desired alignment
297 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
298 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
299 bestFitIter = iter;
300 bestFitSize = block->fSize;
301 }
302 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
303 secondLargestSize = block->fSize;
304 secondLargestOffset = block->fOffset;
305 }
306 iter.next();
307 }
308 SkASSERT(secondLargestSize <= fLargestBlockSize);
309
310 Block* bestFit = bestFitIter.get();
311 if (bestFit) {
312 alloc->fMemory = fAlloc;
313 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
314 alloc->fOffset = bestFit->fOffset;
315 alloc->fSize = alignedSize;
316 // adjust or remove current block
317 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
318 if (bestFit->fSize > alignedSize) {
319 bestFit->fOffset += alignedSize;
320 bestFit->fSize -= alignedSize;
321 if (fLargestBlockOffset == originalBestFitOffset) {
322 if (bestFit->fSize >= secondLargestSize) {
323 fLargestBlockSize = bestFit->fSize;
324 fLargestBlockOffset = bestFit->fOffset;
325 } else {
326 fLargestBlockSize = secondLargestSize;
327 fLargestBlockOffset = secondLargestOffset;
328 }
329 }
330#ifdef SK_DEBUG
331 VkDeviceSize largestSize = 0;
332 iter = fFreeList.headIter();
333 while (iter.get()) {
334 Block* block = iter.get();
335 if (largestSize < block->fSize) {
336 largestSize = block->fSize;
337 }
338 iter.next();
339 }
340 SkASSERT(largestSize == fLargestBlockSize)
341#endif
342 } else {
343 SkASSERT(bestFit->fSize == alignedSize);
344 if (fLargestBlockOffset == originalBestFitOffset) {
345 fLargestBlockSize = secondLargestSize;
346 fLargestBlockOffset = secondLargestOffset;
347 }
348 fFreeList.remove(bestFit);
349#ifdef SK_DEBUG
350 VkDeviceSize largestSize = 0;
351 iter = fFreeList.headIter();
352 while (iter.get()) {
353 Block* block = iter.get();
354 if (largestSize < block->fSize) {
355 largestSize = block->fSize;
356 }
357 iter.next();
358 }
359 SkASSERT(largestSize == fLargestBlockSize);
360#endif
361 }
362 fFreeSize -= alignedSize;
363
364 return true;
365 }
366
367 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
368
369 return false;
370}
371
372
373void GrVkSubHeap::free(const GrVkAlloc& alloc) {
374 SkASSERT(alloc.fMemory == fAlloc);
375
376 // find the block right after this allocation
377 FreeList::Iter iter = fFreeList.headIter();
378 while (iter.get() && iter.get()->fOffset < alloc.fOffset) {
379 iter.next();
380 }
381 FreeList::Iter prev = iter;
382 prev.prev();
383 // we have four cases:
384 // we exactly follow the previous one
385 Block* block;
386 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == alloc.fOffset) {
387 block = prev.get();
388 block->fSize += alloc.fSize;
389 if (block->fOffset == fLargestBlockOffset) {
390 fLargestBlockSize = block->fSize;
391 }
392 // and additionally we may exactly precede the next one
393 if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
394 block->fSize += iter.get()->fSize;
395 if (iter.get()->fOffset == fLargestBlockOffset) {
396 fLargestBlockOffset = block->fOffset;
397 fLargestBlockSize = block->fSize;
398 }
399 fFreeList.remove(iter.get());
400 }
401 // or we only exactly proceed the next one
402 } else if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
403 block = iter.get();
404 block->fSize += alloc.fSize;
405 if (block->fOffset == fLargestBlockOffset) {
406 fLargestBlockOffset = alloc.fOffset;
407 fLargestBlockSize = block->fSize;
408 }
409 block->fOffset = alloc.fOffset;
410 // or we fall somewhere in between, with gaps
411 } else {
412 block = fFreeList.addBefore(iter);
413 block->fOffset = alloc.fOffset;
414 block->fSize = alloc.fSize;
415 }
416 fFreeSize += alloc.fSize;
417 if (block->fSize > fLargestBlockSize) {
418 fLargestBlockSize = block->fSize;
419 fLargestBlockOffset = block->fOffset;
420 }
421
422#ifdef SK_DEBUG
423 VkDeviceSize largestSize = 0;
424 iter = fFreeList.headIter();
425 while (iter.get()) {
426 Block* block = iter.get();
427 if (largestSize < block->fSize) {
428 largestSize = block->fSize;
429 }
430 iter.next();
431 }
432 SkASSERT(fLargestBlockSize == largestSize);
433#endif
434}
435
436GrVkHeap::~GrVkHeap() {
437}
438
439bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
440 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
441 VkDeviceSize alignedSize = align_size(size, alignment);
442
443 // first try to find a subheap that fits our allocation request
444 int bestFitIndex = -1;
445 VkDeviceSize bestFitSize = 0x7FFFFFFF;
446 for (auto i = 0; i < fSubHeaps.count(); ++i) {
447 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) {
448 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
449 if (heapSize > alignedSize && heapSize < bestFitSize) {
450 bestFitIndex = i;
451 bestFitSize = heapSize;
452 }
453 }
454 }
455
456 if (bestFitIndex >= 0) {
457 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
458 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
459 fUsedSize += alloc->fSize;
460 return true;
461 }
462 return false;
463 }
464
465 // need to allocate a new subheap
466 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
467 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment));
468 fAllocSize += fSubHeapSize;
469 if (subHeap->alloc(size, alloc)) {
470 fUsedSize += alloc->fSize;
471 return true;
472 }
473
474 return false;
475}
476
477bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
478 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
479 VkDeviceSize alignedSize = align_size(size, alignment);
480
481 // first try to find an unallocated subheap that fits our allocation request
482 int bestFitIndex = -1;
483 VkDeviceSize bestFitSize = 0x7FFFFFFF;
484 for (auto i = 0; i < fSubHeaps.count(); ++i) {
485 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && fSubHeaps[i]->unallocated()) {
486 VkDeviceSize heapSize = fSubHeaps[i]->size();
487 if (heapSize > alignedSize && heapSize < bestFitSize) {
488 bestFitIndex = i;
489 bestFitSize = heapSize;
490 }
491 }
492 }
493
494 if (bestFitIndex >= 0) {
495 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
496 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
497 fUsedSize += alloc->fSize;
498 return true;
499 }
500 return false;
501 }
502
503 // need to allocate a new subheap
504 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
505 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
506 fAllocSize += alignedSize;
507 if (subHeap->alloc(size, alloc)) {
508 fUsedSize += alloc->fSize;
509 return true;
510 }
511
512 return false;
513}
514
515bool GrVkHeap::free(const GrVkAlloc& alloc) {
516 for (auto i = 0; i < fSubHeaps.count(); ++i) {
517 if (fSubHeaps[i]->memory() == alloc.fMemory) {
518 fSubHeaps[i]->free(alloc);
519 fUsedSize -= alloc.fSize;
520 return true;
521 }
522 }
523
524 return false;
525}
526
527