Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "GrVkMemory.h" |
| 9 | |
| 10 | #include "GrVkGpu.h" |
| 11 | #include "GrVkUtil.h" |
| 12 | |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 13 | #ifdef SK_DEBUG |
| 14 | // for simple tracking of how much we're using in each heap |
| 15 | // last counter is for non-subheap allocations |
| 16 | VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 }; |
| 17 | #endif |
| 18 | |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 19 | static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties& physDevMemProps, |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 20 | uint32_t typeBits, |
| 21 | VkMemoryPropertyFlags requestedMemFlags, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 22 | uint32_t* typeIndex, |
| 23 | uint32_t* heapIndex) { |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 24 | for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) { |
| 25 | if (typeBits & (1 << i)) { |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 26 | uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags & |
| 27 | requestedMemFlags; |
| 28 | if (supportedFlags == requestedMemFlags) { |
| 29 | *typeIndex = i; |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 30 | *heapIndex = physDevMemProps.memoryTypes[i].heapIndex; |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 31 | return true; |
| 32 | } |
| 33 | } |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 34 | } |
| 35 | return false; |
| 36 | } |
| 37 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 38 | static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) { |
| 39 | const GrVkGpu::Heap kBufferToHeap[]{ |
| 40 | GrVkGpu::kVertexBuffer_Heap, |
| 41 | GrVkGpu::kIndexBuffer_Heap, |
| 42 | GrVkGpu::kUniformBuffer_Heap, |
Greg Daniel | c2dd5ed | 2017-05-05 13:49:11 -0400 | [diff] [blame] | 43 | GrVkGpu::kTexelBuffer_Heap, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 44 | GrVkGpu::kCopyReadBuffer_Heap, |
| 45 | GrVkGpu::kCopyWriteBuffer_Heap, |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 46 | }; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 47 | GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type); |
| 48 | GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type); |
| 49 | GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type); |
Greg Daniel | c2dd5ed | 2017-05-05 13:49:11 -0400 | [diff] [blame] | 50 | GR_STATIC_ASSERT(3 == GrVkBuffer::kTexel_Type); |
| 51 | GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyRead_Type); |
| 52 | GR_STATIC_ASSERT(5 == GrVkBuffer::kCopyWrite_Type); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 53 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 54 | return kBufferToHeap[type]; |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, |
| 58 | VkBuffer buffer, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 59 | GrVkBuffer::Type type, |
jvanverth | a584de9 | 2016-06-30 09:10:52 -0700 | [diff] [blame] | 60 | bool dynamic, |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 61 | GrVkAlloc* alloc) { |
jvanverth | e50f3e7 | 2016-03-28 07:03:06 -0700 | [diff] [blame] | 62 | const GrVkInterface* iface = gpu->vkInterface(); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 63 | VkDevice device = gpu->device(); |
| 64 | |
| 65 | VkMemoryRequirements memReqs; |
jvanverth | e50f3e7 | 2016-03-28 07:03:06 -0700 | [diff] [blame] | 66 | GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 67 | |
jvanverth | 7378ac8 | 2016-06-14 08:32:44 -0700 | [diff] [blame] | 68 | uint32_t typeIndex = 0; |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 69 | uint32_t heapIndex = 0; |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 70 | const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties(); |
| 71 | if (dynamic) { |
| 72 | // try to get cached and ideally non-coherent memory first |
| 73 | if (!get_valid_memory_type_index(phDevMemProps, |
| 74 | memReqs.memoryTypeBits, |
| 75 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | |
| 76 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 77 | &typeIndex, |
| 78 | &heapIndex)) { |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 79 | // some sort of host-visible memory type should always be available for dynamic buffers |
| 80 | SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
| 81 | memReqs.memoryTypeBits, |
| 82 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 83 | &typeIndex, |
| 84 | &heapIndex)); |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags; |
| 88 | alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
| 89 | : GrVkAlloc::kNoncoherent_Flag; |
| 90 | } else { |
| 91 | // device-local memory should always be available for static buffers |
| 92 | SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 93 | memReqs.memoryTypeBits, |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 94 | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 95 | &typeIndex, |
| 96 | &heapIndex)); |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 97 | alloc->fFlags = 0x0; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); |
| 101 | |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 102 | if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) { |
| 103 | // if static, try to allocate from non-host-visible non-device-local memory instead |
| 104 | if (dynamic || |
| 105 | !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, |
| 106 | 0, &typeIndex, &heapIndex) || |
| 107 | !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) { |
| 108 | SkDebugf("Failed to alloc buffer\n"); |
| 109 | return false; |
| 110 | } |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 111 | } |
| 112 | |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 113 | // Bind buffer |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 114 | VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 115 | alloc->fMemory, alloc->fOffset)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 116 | if (err) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 117 | SkASSERT_RELEASE(heap->free(*alloc)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 118 | return false; |
| 119 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 120 | |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 121 | return true; |
| 122 | } |
| 123 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 124 | void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, |
| 125 | const GrVkAlloc& alloc) { |
| 126 | |
| 127 | GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); |
| 128 | SkASSERT_RELEASE(heap->free(alloc)); |
| 129 | } |
| 130 | |
| 131 | // for debugging |
| 132 | static uint64_t gTotalImageMemory = 0; |
| 133 | static uint64_t gTotalImageMemoryFullPage = 0; |
| 134 | |
| 135 | const VkDeviceSize kMaxSmallImageSize = 16 * 1024; |
| 136 | const VkDeviceSize kMinVulkanPageSize = 16 * 1024; |
| 137 | |
| 138 | static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) { |
| 139 | return (size + alignment - 1) & ~(alignment - 1); |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 140 | } |
| 141 | |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 142 | bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, |
| 143 | VkImage image, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 144 | bool linearTiling, |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 145 | GrVkAlloc* alloc) { |
jvanverth | e50f3e7 | 2016-03-28 07:03:06 -0700 | [diff] [blame] | 146 | const GrVkInterface* iface = gpu->vkInterface(); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 147 | VkDevice device = gpu->device(); |
| 148 | |
| 149 | VkMemoryRequirements memReqs; |
jvanverth | e50f3e7 | 2016-03-28 07:03:06 -0700 | [diff] [blame] | 150 | GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 151 | |
jvanverth | 7378ac8 | 2016-06-14 08:32:44 -0700 | [diff] [blame] | 152 | uint32_t typeIndex = 0; |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 153 | uint32_t heapIndex = 0; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 154 | GrVkHeap* heap; |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 155 | const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties(); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 156 | if (linearTiling) { |
| 157 | VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 158 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 159 | if (!get_valid_memory_type_index(phDevMemProps, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 160 | memReqs.memoryTypeBits, |
| 161 | desiredMemProps, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 162 | &typeIndex, |
| 163 | &heapIndex)) { |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 164 | // some sort of host-visible memory type should always be available |
| 165 | SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 166 | memReqs.memoryTypeBits, |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 167 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 168 | &typeIndex, |
| 169 | &heapIndex)); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 170 | } |
| 171 | heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 172 | VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags; |
| 173 | alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
| 174 | : GrVkAlloc::kNoncoherent_Flag; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 175 | } else { |
| 176 | // this memory type should always be available |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 177 | SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 178 | memReqs.memoryTypeBits, |
| 179 | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 180 | &typeIndex, |
| 181 | &heapIndex)); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 182 | if (memReqs.size <= kMaxSmallImageSize) { |
| 183 | heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); |
| 184 | } else { |
| 185 | heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); |
| 186 | } |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 187 | alloc->fFlags = 0x0; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 188 | } |
| 189 | |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 190 | if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) { |
| 191 | // if optimal, try to allocate from non-host-visible non-device-local memory instead |
| 192 | if (linearTiling || |
| 193 | !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, |
| 194 | 0, &typeIndex, &heapIndex) || |
| 195 | !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) { |
| 196 | SkDebugf("Failed to alloc image\n"); |
| 197 | return false; |
| 198 | } |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 199 | } |
| 200 | |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 201 | // Bind image |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 202 | VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, |
| 203 | alloc->fMemory, alloc->fOffset)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 204 | if (err) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 205 | SkASSERT_RELEASE(heap->free(*alloc)); |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 206 | return false; |
| 207 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 208 | |
| 209 | gTotalImageMemory += alloc->fSize; |
| 210 | |
| 211 | VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize); |
| 212 | gTotalImageMemoryFullPage += pageAlignedSize; |
| 213 | |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 214 | return true; |
| 215 | } |
| 216 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 217 | void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, |
| 218 | const GrVkAlloc& alloc) { |
| 219 | GrVkHeap* heap; |
| 220 | if (linearTiling) { |
| 221 | heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); |
| 222 | } else if (alloc.fSize <= kMaxSmallImageSize) { |
| 223 | heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); |
| 224 | } else { |
| 225 | heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); |
| 226 | } |
| 227 | if (!heap->free(alloc)) { |
| 228 | // must be an adopted allocation |
| 229 | GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr)); |
| 230 | } else { |
| 231 | gTotalImageMemory -= alloc.fSize; |
| 232 | VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize); |
| 233 | gTotalImageMemoryFullPage -= pageAlignedSize; |
| 234 | } |
jvanverth | 1e305ba | 2016-06-01 09:39:15 -0700 | [diff] [blame] | 235 | } |
| 236 | |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 237 | VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) { |
| 238 | if (VK_IMAGE_LAYOUT_GENERAL == layout) { |
| 239 | return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; |
| 240 | } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout || |
| 241 | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { |
| 242 | return VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 243 | } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout || |
| 244 | VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout || |
| 245 | VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout || |
| 246 | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) { |
egdaniel | bc9b296 | 2016-09-27 08:00:53 -0700 | [diff] [blame] | 247 | return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT; |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 248 | } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) { |
| 249 | return VK_PIPELINE_STAGE_HOST_BIT; |
| 250 | } |
| 251 | |
| 252 | SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout); |
| 253 | return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
| 254 | } |
| 255 | |
| 256 | VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) { |
| 257 | // Currently we assume we will never being doing any explict shader writes (this doesn't include |
| 258 | // color attachment or depth/stencil writes). So we will ignore the |
| 259 | // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT. |
| 260 | |
| 261 | // We can only directly access the host memory if we are in preinitialized or general layout, |
| 262 | // and the image is linear. |
| 263 | // TODO: Add check for linear here so we are not always adding host to general, and we should |
| 264 | // only be in preinitialized if we are linear |
| 265 | VkAccessFlags flags = 0;; |
| 266 | if (VK_IMAGE_LAYOUT_GENERAL == layout) { |
| 267 | flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | |
egdaniel | 19ff103 | 2016-08-31 10:13:08 -0700 | [diff] [blame] | 268 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
| 269 | VK_ACCESS_TRANSFER_WRITE_BIT | |
| 270 | VK_ACCESS_TRANSFER_READ_BIT | |
| 271 | VK_ACCESS_SHADER_READ_BIT | |
| 272 | VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT; |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 273 | } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) { |
egdaniel | c2fde8b | 2016-06-24 10:29:02 -0700 | [diff] [blame] | 274 | flags = VK_ACCESS_HOST_WRITE_BIT; |
Greg Daniel | 164a9f0 | 2016-02-22 09:56:40 -0500 | [diff] [blame] | 275 | } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) { |
| 276 | flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| 277 | } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) { |
| 278 | flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; |
| 279 | } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { |
| 280 | flags = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 281 | } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) { |
| 282 | flags = VK_ACCESS_TRANSFER_READ_BIT; |
| 283 | } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) { |
| 284 | flags = VK_ACCESS_SHADER_READ_BIT; |
| 285 | } |
| 286 | return flags; |
| 287 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 288 | |
jvanverth | 9d54afc | 2016-09-20 09:20:03 -0700 | [diff] [blame] | 289 | void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) { |
| 290 | if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) { |
| 291 | VkMappedMemoryRange mappedMemoryRange; |
| 292 | memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange)); |
| 293 | mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; |
| 294 | mappedMemoryRange.memory = alloc.fMemory; |
| 295 | mappedMemoryRange.offset = alloc.fOffset; |
| 296 | mappedMemoryRange.size = alloc.fSize; |
| 297 | GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), |
| 298 | 1, &mappedMemoryRange)); |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) { |
| 303 | if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) { |
| 304 | VkMappedMemoryRange mappedMemoryRange; |
| 305 | memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange)); |
| 306 | mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; |
| 307 | mappedMemoryRange.memory = alloc.fMemory; |
| 308 | mappedMemoryRange.offset = alloc.fOffset; |
| 309 | mappedMemoryRange.size = alloc.fSize; |
| 310 | GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), |
| 311 | 1, &mappedMemoryRange)); |
| 312 | } |
| 313 | } |
| 314 | |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 315 | bool GrVkFreeListAlloc::alloc(VkDeviceSize requestedSize, |
| 316 | VkDeviceSize* allocOffset, VkDeviceSize* allocSize) { |
| 317 | VkDeviceSize alignedSize = align_size(requestedSize, fAlignment); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 318 | |
| 319 | // find the smallest block big enough for our allocation |
| 320 | FreeList::Iter iter = fFreeList.headIter(); |
| 321 | FreeList::Iter bestFitIter; |
| 322 | VkDeviceSize bestFitSize = fSize + 1; |
| 323 | VkDeviceSize secondLargestSize = 0; |
| 324 | VkDeviceSize secondLargestOffset = 0; |
| 325 | while (iter.get()) { |
| 326 | Block* block = iter.get(); |
| 327 | // need to adjust size to match desired alignment |
| 328 | SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0); |
| 329 | if (block->fSize >= alignedSize && block->fSize < bestFitSize) { |
| 330 | bestFitIter = iter; |
| 331 | bestFitSize = block->fSize; |
| 332 | } |
| 333 | if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) { |
| 334 | secondLargestSize = block->fSize; |
| 335 | secondLargestOffset = block->fOffset; |
| 336 | } |
| 337 | iter.next(); |
| 338 | } |
| 339 | SkASSERT(secondLargestSize <= fLargestBlockSize); |
| 340 | |
| 341 | Block* bestFit = bestFitIter.get(); |
| 342 | if (bestFit) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 343 | SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset); |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 344 | *allocOffset = bestFit->fOffset; |
| 345 | *allocSize = alignedSize; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 346 | // adjust or remove current block |
| 347 | VkDeviceSize originalBestFitOffset = bestFit->fOffset; |
| 348 | if (bestFit->fSize > alignedSize) { |
| 349 | bestFit->fOffset += alignedSize; |
| 350 | bestFit->fSize -= alignedSize; |
| 351 | if (fLargestBlockOffset == originalBestFitOffset) { |
| 352 | if (bestFit->fSize >= secondLargestSize) { |
| 353 | fLargestBlockSize = bestFit->fSize; |
| 354 | fLargestBlockOffset = bestFit->fOffset; |
| 355 | } else { |
| 356 | fLargestBlockSize = secondLargestSize; |
| 357 | fLargestBlockOffset = secondLargestOffset; |
| 358 | } |
| 359 | } |
| 360 | #ifdef SK_DEBUG |
| 361 | VkDeviceSize largestSize = 0; |
| 362 | iter = fFreeList.headIter(); |
| 363 | while (iter.get()) { |
| 364 | Block* block = iter.get(); |
| 365 | if (largestSize < block->fSize) { |
| 366 | largestSize = block->fSize; |
| 367 | } |
| 368 | iter.next(); |
| 369 | } |
caryclark | d656200 | 2016-07-27 12:02:07 -0700 | [diff] [blame] | 370 | SkASSERT(largestSize == fLargestBlockSize); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 371 | #endif |
| 372 | } else { |
| 373 | SkASSERT(bestFit->fSize == alignedSize); |
| 374 | if (fLargestBlockOffset == originalBestFitOffset) { |
| 375 | fLargestBlockSize = secondLargestSize; |
| 376 | fLargestBlockOffset = secondLargestOffset; |
| 377 | } |
| 378 | fFreeList.remove(bestFit); |
| 379 | #ifdef SK_DEBUG |
| 380 | VkDeviceSize largestSize = 0; |
| 381 | iter = fFreeList.headIter(); |
| 382 | while (iter.get()) { |
| 383 | Block* block = iter.get(); |
| 384 | if (largestSize < block->fSize) { |
| 385 | largestSize = block->fSize; |
| 386 | } |
| 387 | iter.next(); |
| 388 | } |
| 389 | SkASSERT(largestSize == fLargestBlockSize); |
| 390 | #endif |
| 391 | } |
| 392 | fFreeSize -= alignedSize; |
egdaniel | 6e46eea | 2016-07-07 08:12:33 -0700 | [diff] [blame] | 393 | SkASSERT(*allocSize > 0); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 394 | |
| 395 | return true; |
| 396 | } |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 397 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 398 | SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize); |
| 399 | |
| 400 | return false; |
| 401 | } |
| 402 | |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 403 | void GrVkFreeListAlloc::free(VkDeviceSize allocOffset, VkDeviceSize allocSize) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 404 | // find the block right after this allocation |
| 405 | FreeList::Iter iter = fFreeList.headIter(); |
jvanverth | d6f8034 | 2016-06-16 04:42:30 -0700 | [diff] [blame] | 406 | FreeList::Iter prev; |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 407 | while (iter.get() && iter.get()->fOffset < allocOffset) { |
jvanverth | d6f8034 | 2016-06-16 04:42:30 -0700 | [diff] [blame] | 408 | prev = iter; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 409 | iter.next(); |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 410 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 411 | // we have four cases: |
| 412 | // we exactly follow the previous one |
| 413 | Block* block; |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 414 | if (prev.get() && prev.get()->fOffset + prev.get()->fSize == allocOffset) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 415 | block = prev.get(); |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 416 | block->fSize += allocSize; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 417 | if (block->fOffset == fLargestBlockOffset) { |
| 418 | fLargestBlockSize = block->fSize; |
| 419 | } |
| 420 | // and additionally we may exactly precede the next one |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 421 | if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 422 | block->fSize += iter.get()->fSize; |
| 423 | if (iter.get()->fOffset == fLargestBlockOffset) { |
| 424 | fLargestBlockOffset = block->fOffset; |
| 425 | fLargestBlockSize = block->fSize; |
| 426 | } |
| 427 | fFreeList.remove(iter.get()); |
| 428 | } |
| 429 | // or we only exactly proceed the next one |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 430 | } else if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 431 | block = iter.get(); |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 432 | block->fSize += allocSize; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 433 | if (block->fOffset == fLargestBlockOffset) { |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 434 | fLargestBlockOffset = allocOffset; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 435 | fLargestBlockSize = block->fSize; |
| 436 | } |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 437 | block->fOffset = allocOffset; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 438 | // or we fall somewhere in between, with gaps |
| 439 | } else { |
| 440 | block = fFreeList.addBefore(iter); |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 441 | block->fOffset = allocOffset; |
| 442 | block->fSize = allocSize; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 443 | } |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 444 | fFreeSize += allocSize; |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 445 | if (block->fSize > fLargestBlockSize) { |
| 446 | fLargestBlockSize = block->fSize; |
| 447 | fLargestBlockOffset = block->fOffset; |
| 448 | } |
| 449 | |
| 450 | #ifdef SK_DEBUG |
| 451 | VkDeviceSize largestSize = 0; |
| 452 | iter = fFreeList.headIter(); |
| 453 | while (iter.get()) { |
| 454 | Block* block = iter.get(); |
| 455 | if (largestSize < block->fSize) { |
| 456 | largestSize = block->fSize; |
| 457 | } |
| 458 | iter.next(); |
| 459 | } |
| 460 | SkASSERT(fLargestBlockSize == largestSize); |
| 461 | #endif |
| 462 | } |
| 463 | |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 464 | GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t heapIndex, |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 465 | VkDeviceSize size, VkDeviceSize alignment) |
| 466 | : INHERITED(size, alignment) |
| 467 | , fGpu(gpu) |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 468 | #ifdef SK_DEBUG |
| 469 | , fHeapIndex(heapIndex) |
| 470 | #endif |
jvanverth | ae6e486 | 2016-09-22 13:45:24 -0700 | [diff] [blame] | 471 | , fMemoryTypeIndex(memoryTypeIndex) { |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 472 | |
| 473 | VkMemoryAllocateInfo allocInfo = { |
| 474 | VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
Ben Wagner | a93a14a | 2017-08-28 10:34:05 -0400 | [diff] [blame] | 475 | nullptr, // pNext |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 476 | size, // allocationSize |
| 477 | memoryTypeIndex, // memoryTypeIndex |
| 478 | }; |
| 479 | |
| 480 | VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), |
| 481 | &allocInfo, |
| 482 | nullptr, |
| 483 | &fAlloc)); |
| 484 | if (VK_SUCCESS != err) { |
| 485 | this->reset(); |
Ben Wagner | 63fd760 | 2017-10-09 15:45:33 -0400 | [diff] [blame^] | 486 | } |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 487 | #ifdef SK_DEBUG |
| 488 | else { |
| 489 | gHeapUsage[heapIndex] += size; |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 490 | } |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 491 | #endif |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | GrVkSubHeap::~GrVkSubHeap() { |
| 495 | const GrVkInterface* iface = fGpu->vkInterface(); |
| 496 | GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 497 | #ifdef SK_DEBUG |
| 498 | gHeapUsage[fHeapIndex] -= fSize; |
| 499 | #endif |
jvanverth | 82356cc | 2016-07-07 07:16:42 -0700 | [diff] [blame] | 500 | } |
| 501 | |
| 502 | bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) { |
| 503 | alloc->fMemory = fAlloc; |
| 504 | return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize); |
| 505 | } |
| 506 | |
| 507 | void GrVkSubHeap::free(const GrVkAlloc& alloc) { |
| 508 | SkASSERT(alloc.fMemory == fAlloc); |
| 509 | |
| 510 | INHERITED::free(alloc.fOffset, alloc.fSize); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 511 | } |
| 512 | |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 513 | bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 514 | uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 515 | VkDeviceSize alignedSize = align_size(size, alignment); |
| 516 | |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 517 | // if requested is larger than our subheap allocation, just alloc directly |
| 518 | if (alignedSize > fSubHeapSize) { |
| 519 | VkMemoryAllocateInfo allocInfo = { |
| 520 | VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
Ben Wagner | a93a14a | 2017-08-28 10:34:05 -0400 | [diff] [blame] | 521 | nullptr, // pNext |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 522 | size, // allocationSize |
| 523 | memoryTypeIndex, // memoryTypeIndex |
| 524 | }; |
| 525 | |
| 526 | VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(), |
| 527 | &allocInfo, |
| 528 | nullptr, |
| 529 | &alloc->fMemory)); |
| 530 | if (VK_SUCCESS != err) { |
| 531 | return false; |
| 532 | } |
| 533 | alloc->fOffset = 0; |
| 534 | alloc->fSize = 0; // hint that this is not a subheap allocation |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 535 | #ifdef SK_DEBUG |
| 536 | gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize; |
| 537 | #endif |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 538 | |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 539 | return true; |
| 540 | } |
| 541 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 542 | // first try to find a subheap that fits our allocation request |
| 543 | int bestFitIndex = -1; |
| 544 | VkDeviceSize bestFitSize = 0x7FFFFFFF; |
| 545 | for (auto i = 0; i < fSubHeaps.count(); ++i) { |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 546 | if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
| 547 | fSubHeaps[i]->alignment() == alignment) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 548 | VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); |
jvanverth | d6f8034 | 2016-06-16 04:42:30 -0700 | [diff] [blame] | 549 | if (heapSize >= alignedSize && heapSize < bestFitSize) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 550 | bestFitIndex = i; |
| 551 | bestFitSize = heapSize; |
| 552 | } |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | if (bestFitIndex >= 0) { |
| 557 | SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
| 558 | if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
| 559 | fUsedSize += alloc->fSize; |
| 560 | return true; |
| 561 | } |
| 562 | return false; |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 563 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 564 | |
| 565 | // need to allocate a new subheap |
Ben Wagner | 145dbcd | 2016-11-03 14:40:50 -0400 | [diff] [blame] | 566 | std::unique_ptr<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 567 | subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize, alignment)); |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 568 | // try to recover from failed allocation by only allocating what we need |
| 569 | if (subHeap->size() == 0) { |
| 570 | VkDeviceSize alignedSize = align_size(size, alignment); |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 571 | subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment)); |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 572 | if (subHeap->size() == 0) { |
| 573 | return false; |
| 574 | } |
| 575 | } |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 576 | fAllocSize += fSubHeapSize; |
| 577 | if (subHeap->alloc(size, alloc)) { |
| 578 | fUsedSize += alloc->fSize; |
| 579 | return true; |
| 580 | } |
| 581 | |
| 582 | return false; |
| 583 | } |
| 584 | |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 585 | bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment, |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 586 | uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 587 | VkDeviceSize alignedSize = align_size(size, alignment); |
| 588 | |
| 589 | // first try to find an unallocated subheap that fits our allocation request |
| 590 | int bestFitIndex = -1; |
| 591 | VkDeviceSize bestFitSize = 0x7FFFFFFF; |
| 592 | for (auto i = 0; i < fSubHeaps.count(); ++i) { |
egdaniel | 6e90d42 | 2016-08-10 08:29:53 -0700 | [diff] [blame] | 593 | if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
| 594 | fSubHeaps[i]->alignment() == alignment && |
| 595 | fSubHeaps[i]->unallocated()) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 596 | VkDeviceSize heapSize = fSubHeaps[i]->size(); |
jvanverth | d6f8034 | 2016-06-16 04:42:30 -0700 | [diff] [blame] | 597 | if (heapSize >= alignedSize && heapSize < bestFitSize) { |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 598 | bestFitIndex = i; |
| 599 | bestFitSize = heapSize; |
| 600 | } |
| 601 | } |
| 602 | } |
| 603 | |
| 604 | if (bestFitIndex >= 0) { |
| 605 | SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
| 606 | if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
| 607 | fUsedSize += alloc->fSize; |
| 608 | return true; |
| 609 | } |
| 610 | return false; |
| 611 | } |
| 612 | |
| 613 | // need to allocate a new subheap |
Ben Wagner | 145dbcd | 2016-11-03 14:40:50 -0400 | [diff] [blame] | 614 | std::unique_ptr<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
jvanverth | 68c3d30 | 2016-09-23 10:30:04 -0700 | [diff] [blame] | 615 | subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment)); |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 616 | fAllocSize += alignedSize; |
| 617 | if (subHeap->alloc(size, alloc)) { |
| 618 | fUsedSize += alloc->fSize; |
| 619 | return true; |
| 620 | } |
| 621 | |
| 622 | return false; |
| 623 | } |
| 624 | |
| 625 | bool GrVkHeap::free(const GrVkAlloc& alloc) { |
jvanverth | 6dc3af4 | 2016-06-16 14:05:09 -0700 | [diff] [blame] | 626 | // a size of 0 means we're using the system heap |
| 627 | if (0 == alloc.fSize) { |
| 628 | const GrVkInterface* iface = fGpu->vkInterface(); |
| 629 | GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr)); |
| 630 | return true; |
| 631 | } |
| 632 | |
jvanverth | 6b6ffc4 | 2016-06-13 14:28:07 -0700 | [diff] [blame] | 633 | for (auto i = 0; i < fSubHeaps.count(); ++i) { |
| 634 | if (fSubHeaps[i]->memory() == alloc.fMemory) { |
| 635 | fSubHeaps[i]->free(alloc); |
| 636 | fUsedSize -= alloc.fSize; |
| 637 | return true; |
| 638 | } |
| 639 | } |
| 640 | |
| 641 | return false; |
| 642 | } |
| 643 | |
| 644 | |