blob: f999c26546491356283967ab06942a46c20adb75 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
Greg Daniel331c2662018-05-30 14:51:53 -040012#include "vk/GrVkMemoryAllocator.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050013
Greg Daniel331c2662018-05-30 14:51:53 -040014using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15using BufferUsage = GrVkMemoryAllocator::BufferUsage;
jvanverth68c3d302016-09-23 10:30:04 -070016
Greg Daniel331c2662018-05-30 14:51:53 -040017static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18 switch (type) {
19 case GrVkBuffer::kVertex_Type: // fall through
20 case GrVkBuffer::kIndex_Type: // fall through
21 case GrVkBuffer::kTexel_Type:
22 return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23 case GrVkBuffer::kUniform_Type:
24 SkASSERT(dynamic);
25 return BufferUsage::kCpuWritesGpuReads;
26 case GrVkBuffer::kCopyRead_Type: // fall through
27 case GrVkBuffer::kCopyWrite_Type:
28 return BufferUsage::kCpuOnly;
Greg Daniel164a9f02016-02-22 09:56:40 -050029 }
Greg Daniel331c2662018-05-30 14:51:53 -040030 SK_ABORT("Invalid GrVkBuffer::Type");
31 return BufferUsage::kCpuOnly; // Just returning an arbitrary value.
Greg Daniel164a9f02016-02-22 09:56:40 -050032}
33
34bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
35 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070036 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070037 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070038 GrVkAlloc* alloc) {
Greg Daniel331c2662018-05-30 14:51:53 -040039 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
40 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050041
Greg Daniel331c2662018-05-30 14:51:53 -040042 GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
Greg Daniel164a9f02016-02-22 09:56:40 -050043
Greg Daniel331c2662018-05-30 14:51:53 -040044 if (!allocator->allocateMemoryForBuffer(buffer, usage, AllocationPropertyFlags::kNone,
45 &memory)) {
46 return false;
jvanverth6b6ffc42016-06-13 14:28:07 -070047 }
Greg Daniel331c2662018-05-30 14:51:53 -040048 allocator->getAllocInfo(memory, alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050049
jvanverth9d54afc2016-09-20 09:20:03 -070050 // Bind buffer
Greg Daniel331c2662018-05-30 14:51:53 -040051 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
52 alloc->fMemory,
53 alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050054 if (err) {
Greg Daniel331c2662018-05-30 14:51:53 -040055 FreeBufferMemory(gpu, type, *alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050056 return false;
57 }
jvanverth6b6ffc42016-06-13 14:28:07 -070058
Greg Daniel164a9f02016-02-22 09:56:40 -050059 return true;
60}
61
jvanverth6b6ffc42016-06-13 14:28:07 -070062void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
63 const GrVkAlloc& alloc) {
Greg Daniel331c2662018-05-30 14:51:53 -040064 if (alloc.fBackendMemory) {
65 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
66 allocator->freeMemory(alloc.fBackendMemory);
67 } else {
68 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
69 }
jvanverth6b6ffc42016-06-13 14:28:07 -070070}
71
jvanverth6b6ffc42016-06-13 14:28:07 -070072const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
jvanverth1e305ba2016-06-01 09:39:15 -070073
Greg Daniel164a9f02016-02-22 09:56:40 -050074bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
75 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -070076 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -070077 GrVkAlloc* alloc) {
Greg Daniel331c2662018-05-30 14:51:53 -040078 SkASSERT(!linearTiling);
79 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
80 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050081
82 VkMemoryRequirements memReqs;
Greg Daniel331c2662018-05-30 14:51:53 -040083 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050084
Greg Daniel331c2662018-05-30 14:51:53 -040085 AllocationPropertyFlags propFlags;
86 if (memReqs.size <= kMaxSmallImageSize) {
87 propFlags = AllocationPropertyFlags::kNone;
jvanverth6b6ffc42016-06-13 14:28:07 -070088 } else {
Greg Daniel331c2662018-05-30 14:51:53 -040089 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
jvanverth6b6ffc42016-06-13 14:28:07 -070090 }
91
Greg Daniel331c2662018-05-30 14:51:53 -040092 if (!allocator->allocateMemoryForImage(image, AllocationPropertyFlags::kDedicatedAllocation,
93 &memory)) {
Greg Daniel164a9f02016-02-22 09:56:40 -050094 return false;
95 }
Greg Daniel331c2662018-05-30 14:51:53 -040096 allocator->getAllocInfo(memory, alloc);
jvanverth6b6ffc42016-06-13 14:28:07 -070097
Greg Daniel331c2662018-05-30 14:51:53 -040098 // Bind buffer
99 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
100 alloc->fMemory, alloc->fOffset));
101 if (err) {
102 FreeImageMemory(gpu, linearTiling, *alloc);
103 return false;
104 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700105
Greg Daniel164a9f02016-02-22 09:56:40 -0500106 return true;
107}
108
jvanverth6b6ffc42016-06-13 14:28:07 -0700109void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
110 const GrVkAlloc& alloc) {
Greg Daniel331c2662018-05-30 14:51:53 -0400111 if (alloc.fBackendMemory) {
112 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
113 allocator->freeMemory(alloc.fBackendMemory);
jvanverth6b6ffc42016-06-13 14:28:07 -0700114 } else {
jvanverth6b6ffc42016-06-13 14:28:07 -0700115 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
jvanverth6b6ffc42016-06-13 14:28:07 -0700116 }
jvanverth1e305ba2016-06-01 09:39:15 -0700117}
118
Greg Daniel331c2662018-05-30 14:51:53 -0400119void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
120 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
121#ifdef SK_DEBUG
122 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
123 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
124 SkASSERT(0 == (alloc.fOffset & (alignment-1)));
125 SkASSERT(0 == (alloc.fSize & (alignment-1)));
126 }
127#endif
128 if (alloc.fBackendMemory) {
129 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
130 return allocator->mapMemory(alloc.fBackendMemory);
131 }
132
133 void* mapPtr;
134 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
135 alloc.fOffset,
136 alloc.fSize, 0, &mapPtr));
137 if (err) {
138 mapPtr = nullptr;
139 }
140 return mapPtr;
141}
142
143void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
144 if (alloc.fBackendMemory) {
145 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
146 allocator->unmapMemory(alloc.fBackendMemory);
147 } else {
148 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
149 }
150}
151
152void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
153 VkDeviceSize size, VkDeviceSize alignment,
154 VkMappedMemoryRange* range) {
155 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
156 offset = offset + alloc.fOffset;
157 VkDeviceSize offsetDiff = offset & (alignment -1);
158 offset = offset - offsetDiff;
159 size = (size + alignment - 1) & ~(alignment - 1);
160#ifdef SK_DEBUG
161 SkASSERT(offset >= alloc.fOffset);
162 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
163 SkASSERT(0 == (offset & (alignment-1)));
164 SkASSERT(size > 0);
165 SkASSERT(0 == (size & (alignment-1)));
166#endif
167
168 memset(range, 0, sizeof(VkMappedMemoryRange));
169 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
170 range->memory = alloc.fMemory;
171 range->offset = offset;
172 range->size = size;
173}
174
Greg Daniele35a99e2018-03-02 11:44:22 -0500175void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
176 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700177 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel331c2662018-05-30 14:51:53 -0400178 SkASSERT(offset == 0);
179 SkASSERT(size <= alloc.fSize);
180 if (alloc.fBackendMemory) {
181 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
182 allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
183 } else {
184 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
185 VkMappedMemoryRange mappedMemoryRange;
186 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
187 &mappedMemoryRange);
188 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
189 &mappedMemoryRange));
Greg Daniele35a99e2018-03-02 11:44:22 -0500190 }
jvanverth9d54afc2016-09-20 09:20:03 -0700191 }
192}
193
Greg Daniele35a99e2018-03-02 11:44:22 -0500194void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
195 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700196 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel331c2662018-05-30 14:51:53 -0400197 SkASSERT(offset == 0);
198 SkASSERT(size <= alloc.fSize);
199 if (alloc.fBackendMemory) {
200 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
201 allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
jvanverth6b6ffc42016-06-13 14:28:07 -0700202 } else {
Greg Daniel331c2662018-05-30 14:51:53 -0400203 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
204 VkMappedMemoryRange mappedMemoryRange;
205 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
206 &mappedMemoryRange);
207 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
208 &mappedMemoryRange));
jvanverth6b6ffc42016-06-13 14:28:07 -0700209 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700210 }
jvanverth6b6ffc42016-06-13 14:28:07 -0700211}
212