blob: 1983db5c4cdcdd1d434df99bd7cb91c6364201b2 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
13static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
14 uint32_t typeBits,
15 VkMemoryPropertyFlags requestedMemFlags,
16 uint32_t* typeIndex) {
17 uint32_t checkBit = 1;
18 for (uint32_t i = 0; i < 32; ++i) {
19 if (typeBits & checkBit) {
20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
21 requestedMemFlags;
22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i;
24 return true;
25 }
26 }
27 checkBit <<= 1;
28 }
29 return false;
30}
31
jvanverth6b6ffc42016-06-13 14:28:07 -070032static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
33 const GrVkGpu::Heap kBufferToHeap[]{
34 GrVkGpu::kVertexBuffer_Heap,
35 GrVkGpu::kIndexBuffer_Heap,
36 GrVkGpu::kUniformBuffer_Heap,
37 GrVkGpu::kCopyReadBuffer_Heap,
38 GrVkGpu::kCopyWriteBuffer_Heap,
Greg Daniel164a9f02016-02-22 09:56:40 -050039 };
jvanverth6b6ffc42016-06-13 14:28:07 -070040 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
41 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
42 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
43 GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
44 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
Greg Daniel164a9f02016-02-22 09:56:40 -050045
jvanverth6b6ffc42016-06-13 14:28:07 -070046 return kBufferToHeap[type];
Greg Daniel164a9f02016-02-22 09:56:40 -050047}
48
49bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
50 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070051 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070052 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070053 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -070054 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050055 VkDevice device = gpu->device();
56
57 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070058 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050059
jvanvertha584de92016-06-30 09:10:52 -070060 VkMemoryPropertyFlags desiredMemProps = dynamic ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
61 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
62 VK_MEMORY_PROPERTY_HOST_CACHED_BIT
63 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
jvanverth7378ac82016-06-14 08:32:44 -070064 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -070065 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
66 memReqs.memoryTypeBits,
67 desiredMemProps,
68 &typeIndex)) {
69 // this memory type should always be available
70 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
71 memReqs.memoryTypeBits,
72 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
73 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
74 &typeIndex));
75 }
76
77 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
78
79 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
80 SkDebugf("Failed to alloc buffer\n");
Greg Daniel164a9f02016-02-22 09:56:40 -050081 return false;
82 }
83
jvanverth1e305ba2016-06-01 09:39:15 -070084 // Bind Memory to device
85 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
86 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050087 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -070088 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -050089 return false;
90 }
jvanverth6b6ffc42016-06-13 14:28:07 -070091
Greg Daniel164a9f02016-02-22 09:56:40 -050092 return true;
93}
94
jvanverth6b6ffc42016-06-13 14:28:07 -070095void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
96 const GrVkAlloc& alloc) {
97
98 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
99 SkASSERT_RELEASE(heap->free(alloc));
100}
101
102// for debugging
103static uint64_t gTotalImageMemory = 0;
104static uint64_t gTotalImageMemoryFullPage = 0;
105
106const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
107const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
108
109static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
110 return (size + alignment - 1) & ~(alignment - 1);
jvanverth1e305ba2016-06-01 09:39:15 -0700111}
112
Greg Daniel164a9f02016-02-22 09:56:40 -0500113bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
114 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -0700115 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -0700116 GrVkAlloc* alloc) {
jvanverthe50f3e72016-03-28 07:03:06 -0700117 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500118 VkDevice device = gpu->device();
119
120 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -0700121 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -0500122
jvanverth7378ac82016-06-14 08:32:44 -0700123 uint32_t typeIndex = 0;
jvanverth6b6ffc42016-06-13 14:28:07 -0700124 GrVkHeap* heap;
125 if (linearTiling) {
126 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
127 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
128 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
129 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
130 memReqs.memoryTypeBits,
131 desiredMemProps,
132 &typeIndex)) {
133 // this memory type should always be available
134 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
135 memReqs.memoryTypeBits,
136 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
137 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
138 &typeIndex));
139 }
140 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
141 } else {
142 // this memory type should always be available
143 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
144 memReqs.memoryTypeBits,
145 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
146 &typeIndex));
147 if (memReqs.size <= kMaxSmallImageSize) {
148 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
149 } else {
150 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
151 }
152 }
153
154 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
155 SkDebugf("Failed to alloc image\n");
Greg Daniel164a9f02016-02-22 09:56:40 -0500156 return false;
157 }
158
jvanverth1e305ba2016-06-01 09:39:15 -0700159 // Bind Memory to device
160 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
161 alloc->fMemory, alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -0500162 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700163 SkASSERT_RELEASE(heap->free(*alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500164 return false;
165 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700166
167 gTotalImageMemory += alloc->fSize;
168
169 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
170 gTotalImageMemoryFullPage += pageAlignedSize;
171
Greg Daniel164a9f02016-02-22 09:56:40 -0500172 return true;
173}
174
jvanverth6b6ffc42016-06-13 14:28:07 -0700175void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
176 const GrVkAlloc& alloc) {
177 GrVkHeap* heap;
178 if (linearTiling) {
179 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
180 } else if (alloc.fSize <= kMaxSmallImageSize) {
181 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
182 } else {
183 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
184 }
185 if (!heap->free(alloc)) {
186 // must be an adopted allocation
187 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
188 } else {
189 gTotalImageMemory -= alloc.fSize;
190 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
191 gTotalImageMemoryFullPage -= pageAlignedSize;
192 }
jvanverth1e305ba2016-06-01 09:39:15 -0700193}
194
Greg Daniel164a9f02016-02-22 09:56:40 -0500195VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
196 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
197 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
198 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
199 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
200 return VK_PIPELINE_STAGE_TRANSFER_BIT;
201 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
202 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
203 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
204 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
205 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
206 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
207 return VK_PIPELINE_STAGE_HOST_BIT;
208 }
209
210 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
211 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
212}
213
214VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
215 // Currently we assume we will never being doing any explict shader writes (this doesn't include
216 // color attachment or depth/stencil writes). So we will ignore the
217 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
218
219 // We can only directly access the host memory if we are in preinitialized or general layout,
220 // and the image is linear.
221 // TODO: Add check for linear here so we are not always adding host to general, and we should
222 // only be in preinitialized if we are linear
223 VkAccessFlags flags = 0;;
224 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
225 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
226 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
227 VK_ACCESS_TRANSFER_WRITE_BIT |
228 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
229 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
egdanielc2fde8b2016-06-24 10:29:02 -0700230 flags = VK_ACCESS_HOST_WRITE_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500231 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
232 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
233 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
234 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
235 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
236 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
237 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
238 flags = VK_ACCESS_TRANSFER_READ_BIT;
239 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
240 flags = VK_ACCESS_SHADER_READ_BIT;
241 }
242 return flags;
243}
jvanverth6b6ffc42016-06-13 14:28:07 -0700244
245GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
246 VkDeviceSize size, VkDeviceSize alignment)
247 : fGpu(gpu)
248 , fMemoryTypeIndex(memoryTypeIndex) {
249
250 VkMemoryAllocateInfo allocInfo = {
251 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
252 NULL, // pNext
253 size, // allocationSize
254 memoryTypeIndex, // memoryTypeIndex
255 };
256
257 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
258 &allocInfo,
259 nullptr,
260 &fAlloc));
261
262 if (VK_SUCCESS == err) {
263 fSize = size;
264 fAlignment = alignment;
265 fFreeSize = size;
266 fLargestBlockSize = size;
267 fLargestBlockOffset = 0;
268
269 Block* block = fFreeList.addToTail();
270 block->fOffset = 0;
271 block->fSize = fSize;
272 } else {
273 fSize = 0;
274 fAlignment = 0;
275 fFreeSize = 0;
276 fLargestBlockSize = 0;
277 }
278}
279
280GrVkSubHeap::~GrVkSubHeap() {
281 const GrVkInterface* iface = fGpu->vkInterface();
282 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
283
284 fFreeList.reset();
285}
286
287bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
288 VkDeviceSize alignedSize = align_size(size, fAlignment);
289
290 // find the smallest block big enough for our allocation
291 FreeList::Iter iter = fFreeList.headIter();
292 FreeList::Iter bestFitIter;
293 VkDeviceSize bestFitSize = fSize + 1;
294 VkDeviceSize secondLargestSize = 0;
295 VkDeviceSize secondLargestOffset = 0;
296 while (iter.get()) {
297 Block* block = iter.get();
298 // need to adjust size to match desired alignment
299 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
300 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
301 bestFitIter = iter;
302 bestFitSize = block->fSize;
303 }
304 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
305 secondLargestSize = block->fSize;
306 secondLargestOffset = block->fOffset;
307 }
308 iter.next();
309 }
310 SkASSERT(secondLargestSize <= fLargestBlockSize);
311
312 Block* bestFit = bestFitIter.get();
313 if (bestFit) {
314 alloc->fMemory = fAlloc;
315 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
316 alloc->fOffset = bestFit->fOffset;
317 alloc->fSize = alignedSize;
318 // adjust or remove current block
319 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
320 if (bestFit->fSize > alignedSize) {
321 bestFit->fOffset += alignedSize;
322 bestFit->fSize -= alignedSize;
323 if (fLargestBlockOffset == originalBestFitOffset) {
324 if (bestFit->fSize >= secondLargestSize) {
325 fLargestBlockSize = bestFit->fSize;
326 fLargestBlockOffset = bestFit->fOffset;
327 } else {
328 fLargestBlockSize = secondLargestSize;
329 fLargestBlockOffset = secondLargestOffset;
330 }
331 }
332#ifdef SK_DEBUG
333 VkDeviceSize largestSize = 0;
334 iter = fFreeList.headIter();
335 while (iter.get()) {
336 Block* block = iter.get();
337 if (largestSize < block->fSize) {
338 largestSize = block->fSize;
339 }
340 iter.next();
341 }
342 SkASSERT(largestSize == fLargestBlockSize)
343#endif
344 } else {
345 SkASSERT(bestFit->fSize == alignedSize);
346 if (fLargestBlockOffset == originalBestFitOffset) {
347 fLargestBlockSize = secondLargestSize;
348 fLargestBlockOffset = secondLargestOffset;
349 }
350 fFreeList.remove(bestFit);
351#ifdef SK_DEBUG
352 VkDeviceSize largestSize = 0;
353 iter = fFreeList.headIter();
354 while (iter.get()) {
355 Block* block = iter.get();
356 if (largestSize < block->fSize) {
357 largestSize = block->fSize;
358 }
359 iter.next();
360 }
361 SkASSERT(largestSize == fLargestBlockSize);
362#endif
363 }
364 fFreeSize -= alignedSize;
jvanverth6dc3af42016-06-16 14:05:09 -0700365 SkASSERT(alloc->fSize > 0);
jvanverth6b6ffc42016-06-13 14:28:07 -0700366
367 return true;
368 }
369
370 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
371
372 return false;
373}
374
375
376void GrVkSubHeap::free(const GrVkAlloc& alloc) {
377 SkASSERT(alloc.fMemory == fAlloc);
378
379 // find the block right after this allocation
380 FreeList::Iter iter = fFreeList.headIter();
jvanverthd6f80342016-06-16 04:42:30 -0700381 FreeList::Iter prev;
jvanverth6b6ffc42016-06-13 14:28:07 -0700382 while (iter.get() && iter.get()->fOffset < alloc.fOffset) {
jvanverthd6f80342016-06-16 04:42:30 -0700383 prev = iter;
jvanverth6b6ffc42016-06-13 14:28:07 -0700384 iter.next();
385 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700386 // we have four cases:
387 // we exactly follow the previous one
388 Block* block;
389 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == alloc.fOffset) {
390 block = prev.get();
391 block->fSize += alloc.fSize;
392 if (block->fOffset == fLargestBlockOffset) {
393 fLargestBlockSize = block->fSize;
394 }
395 // and additionally we may exactly precede the next one
396 if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
397 block->fSize += iter.get()->fSize;
398 if (iter.get()->fOffset == fLargestBlockOffset) {
399 fLargestBlockOffset = block->fOffset;
400 fLargestBlockSize = block->fSize;
401 }
402 fFreeList.remove(iter.get());
403 }
404 // or we only exactly proceed the next one
405 } else if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
406 block = iter.get();
407 block->fSize += alloc.fSize;
408 if (block->fOffset == fLargestBlockOffset) {
409 fLargestBlockOffset = alloc.fOffset;
410 fLargestBlockSize = block->fSize;
411 }
412 block->fOffset = alloc.fOffset;
413 // or we fall somewhere in between, with gaps
414 } else {
415 block = fFreeList.addBefore(iter);
416 block->fOffset = alloc.fOffset;
417 block->fSize = alloc.fSize;
418 }
419 fFreeSize += alloc.fSize;
420 if (block->fSize > fLargestBlockSize) {
421 fLargestBlockSize = block->fSize;
422 fLargestBlockOffset = block->fOffset;
423 }
424
425#ifdef SK_DEBUG
426 VkDeviceSize largestSize = 0;
427 iter = fFreeList.headIter();
428 while (iter.get()) {
429 Block* block = iter.get();
430 if (largestSize < block->fSize) {
431 largestSize = block->fSize;
432 }
433 iter.next();
434 }
435 SkASSERT(fLargestBlockSize == largestSize);
436#endif
437}
438
439GrVkHeap::~GrVkHeap() {
440}
441
442bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
443 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
444 VkDeviceSize alignedSize = align_size(size, alignment);
445
jvanverth6dc3af42016-06-16 14:05:09 -0700446 // if requested is larger than our subheap allocation, just alloc directly
447 if (alignedSize > fSubHeapSize) {
448 VkMemoryAllocateInfo allocInfo = {
449 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
450 NULL, // pNext
451 size, // allocationSize
452 memoryTypeIndex, // memoryTypeIndex
453 };
454
455 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(),
456 &allocInfo,
457 nullptr,
458 &alloc->fMemory));
459 if (VK_SUCCESS != err) {
460 return false;
461 }
462 alloc->fOffset = 0;
463 alloc->fSize = 0; // hint that this is not a subheap allocation
464
465 return true;
466 }
467
jvanverth6b6ffc42016-06-13 14:28:07 -0700468 // first try to find a subheap that fits our allocation request
469 int bestFitIndex = -1;
470 VkDeviceSize bestFitSize = 0x7FFFFFFF;
471 for (auto i = 0; i < fSubHeaps.count(); ++i) {
472 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) {
473 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
jvanverthd6f80342016-06-16 04:42:30 -0700474 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700475 bestFitIndex = i;
476 bestFitSize = heapSize;
477 }
478 }
479 }
480
481 if (bestFitIndex >= 0) {
482 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
483 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
484 fUsedSize += alloc->fSize;
485 return true;
486 }
487 return false;
jvanverth6dc3af42016-06-16 14:05:09 -0700488 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700489
490 // need to allocate a new subheap
491 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
492 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment));
jvanverth6dc3af42016-06-16 14:05:09 -0700493 // try to recover from failed allocation by only allocating what we need
494 if (subHeap->size() == 0) {
495 VkDeviceSize alignedSize = align_size(size, alignment);
496 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
497 if (subHeap->size() == 0) {
498 return false;
499 }
500 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700501 fAllocSize += fSubHeapSize;
502 if (subHeap->alloc(size, alloc)) {
503 fUsedSize += alloc->fSize;
504 return true;
505 }
506
507 return false;
508}
509
510bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
511 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
512 VkDeviceSize alignedSize = align_size(size, alignment);
513
514 // first try to find an unallocated subheap that fits our allocation request
515 int bestFitIndex = -1;
516 VkDeviceSize bestFitSize = 0x7FFFFFFF;
517 for (auto i = 0; i < fSubHeaps.count(); ++i) {
518 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && fSubHeaps[i]->unallocated()) {
519 VkDeviceSize heapSize = fSubHeaps[i]->size();
jvanverthd6f80342016-06-16 04:42:30 -0700520 if (heapSize >= alignedSize && heapSize < bestFitSize) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700521 bestFitIndex = i;
522 bestFitSize = heapSize;
523 }
524 }
525 }
526
527 if (bestFitIndex >= 0) {
528 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
529 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
530 fUsedSize += alloc->fSize;
531 return true;
532 }
533 return false;
534 }
535
536 // need to allocate a new subheap
537 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
538 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
539 fAllocSize += alignedSize;
540 if (subHeap->alloc(size, alloc)) {
541 fUsedSize += alloc->fSize;
542 return true;
543 }
544
545 return false;
546}
547
548bool GrVkHeap::free(const GrVkAlloc& alloc) {
jvanverth6dc3af42016-06-16 14:05:09 -0700549 // a size of 0 means we're using the system heap
550 if (0 == alloc.fSize) {
551 const GrVkInterface* iface = fGpu->vkInterface();
552 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr));
553 return true;
554 }
555
jvanverth6b6ffc42016-06-13 14:28:07 -0700556 for (auto i = 0; i < fSubHeaps.count(); ++i) {
557 if (fSubHeaps[i]->memory() == alloc.fMemory) {
558 fSubHeaps[i]->free(alloc);
559 fUsedSize -= alloc.fSize;
560 return true;
561 }
562 }
563
564 return false;
565}
566
567