blob: 942fc3c6030c664e77ff1ea546f91710637b9979 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
Greg Daniel81df0412018-05-31 13:13:33 -040012#include "vk/GrVkMemoryAllocator.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050013
Greg Daniel81df0412018-05-31 13:13:33 -040014using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15using BufferUsage = GrVkMemoryAllocator::BufferUsage;
jvanverth68c3d302016-09-23 10:30:04 -070016
Greg Daniel81df0412018-05-31 13:13:33 -040017static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18 switch (type) {
19 case GrVkBuffer::kVertex_Type: // fall through
20 case GrVkBuffer::kIndex_Type: // fall through
21 case GrVkBuffer::kTexel_Type:
22 return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23 case GrVkBuffer::kUniform_Type:
24 SkASSERT(dynamic);
25 return BufferUsage::kCpuWritesGpuReads;
26 case GrVkBuffer::kCopyRead_Type: // fall through
27 case GrVkBuffer::kCopyWrite_Type:
28 return BufferUsage::kCpuOnly;
Greg Daniel164a9f02016-02-22 09:56:40 -050029 }
Greg Daniel81df0412018-05-31 13:13:33 -040030 SK_ABORT("Invalid GrVkBuffer::Type");
31 return BufferUsage::kCpuOnly; // Just returning an arbitrary value.
Greg Daniel164a9f02016-02-22 09:56:40 -050032}
33
34bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
35 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070036 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070037 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070038 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040039 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
40 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050041
Greg Daniel81df0412018-05-31 13:13:33 -040042 GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
Greg Daniel164a9f02016-02-22 09:56:40 -050043
Greg Daniel86830372018-06-01 13:35:16 -040044 AllocationPropertyFlags propFlags;
45 if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
46 // In general it is always fine (and often better) to keep buffers always mapped.
47 // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
48 // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
49 // is fine). In general, by the time Vulkan ships it is probably less likely to be running
50 // on non Win10 or newer machines. The second use case is if running on an AMD card and you
51 // are using the special GPU local and host mappable memory. However, in general we don't
52 // pick this memory as we've found it slower than using the cached host visible memory. In
53 // the future if we find the need to special case either of these two issues we can add
54 // checks for them here.
55 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
56 } else {
57 propFlags = AllocationPropertyFlags::kNone;
58 }
59
60 if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
Greg Daniel81df0412018-05-31 13:13:33 -040061 return false;
jvanverth6b6ffc42016-06-13 14:28:07 -070062 }
Greg Daniel81df0412018-05-31 13:13:33 -040063 allocator->getAllocInfo(memory, alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050064
jvanverth9d54afc2016-09-20 09:20:03 -070065 // Bind buffer
Greg Daniel81df0412018-05-31 13:13:33 -040066 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
67 alloc->fMemory,
68 alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050069 if (err) {
Greg Daniel81df0412018-05-31 13:13:33 -040070 FreeBufferMemory(gpu, type, *alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050071 return false;
72 }
jvanverth6b6ffc42016-06-13 14:28:07 -070073
Greg Daniel164a9f02016-02-22 09:56:40 -050074 return true;
75}
76
jvanverth6b6ffc42016-06-13 14:28:07 -070077void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
78 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040079 if (alloc.fBackendMemory) {
80 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81 allocator->freeMemory(alloc.fBackendMemory);
82 } else {
83 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
84 }
jvanverth6b6ffc42016-06-13 14:28:07 -070085}
86
jvanverth6b6ffc42016-06-13 14:28:07 -070087const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
jvanverth1e305ba2016-06-01 09:39:15 -070088
Greg Daniel164a9f02016-02-22 09:56:40 -050089bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
90 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -070091 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -070092 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040093 SkASSERT(!linearTiling);
94 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
95 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050096
97 VkMemoryRequirements memReqs;
Greg Daniel81df0412018-05-31 13:13:33 -040098 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050099
Greg Daniel81df0412018-05-31 13:13:33 -0400100 AllocationPropertyFlags propFlags;
101 if (memReqs.size <= kMaxSmallImageSize) {
102 propFlags = AllocationPropertyFlags::kNone;
jvanverth6b6ffc42016-06-13 14:28:07 -0700103 } else {
Greg Daniel81df0412018-05-31 13:13:33 -0400104 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
jvanverth6b6ffc42016-06-13 14:28:07 -0700105 }
106
Greg Daniel81df0412018-05-31 13:13:33 -0400107 if (!allocator->allocateMemoryForImage(image, AllocationPropertyFlags::kDedicatedAllocation,
108 &memory)) {
Greg Daniel331c2662018-05-30 14:51:53 -0400109 return false;
110 }
Greg Daniel81df0412018-05-31 13:13:33 -0400111 allocator->getAllocInfo(memory, alloc);
jvanverth6b6ffc42016-06-13 14:28:07 -0700112
Greg Daniel81df0412018-05-31 13:13:33 -0400113 // Bind buffer
114 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
115 alloc->fMemory, alloc->fOffset));
116 if (err) {
117 FreeImageMemory(gpu, linearTiling, *alloc);
118 return false;
119 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000120
Greg Daniel164a9f02016-02-22 09:56:40 -0500121 return true;
122}
123
jvanverth6b6ffc42016-06-13 14:28:07 -0700124void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
125 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -0400126 if (alloc.fBackendMemory) {
127 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
128 allocator->freeMemory(alloc.fBackendMemory);
jvanverth6b6ffc42016-06-13 14:28:07 -0700129 } else {
jvanverth6b6ffc42016-06-13 14:28:07 -0700130 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
Greg Daniel331c2662018-05-30 14:51:53 -0400131 }
132}
133
Greg Daniel81df0412018-05-31 13:13:33 -0400134void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
135 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
136#ifdef SK_DEBUG
137 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
138 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
139 SkASSERT(0 == (alloc.fOffset & (alignment-1)));
140 SkASSERT(0 == (alloc.fSize & (alignment-1)));
141 }
142#endif
143 if (alloc.fBackendMemory) {
144 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
145 return allocator->mapMemory(alloc.fBackendMemory);
146 }
147
148 void* mapPtr;
149 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
150 alloc.fOffset,
151 alloc.fSize, 0, &mapPtr));
152 if (err) {
153 mapPtr = nullptr;
154 }
155 return mapPtr;
156}
157
158void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
159 if (alloc.fBackendMemory) {
160 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
161 allocator->unmapMemory(alloc.fBackendMemory);
162 } else {
163 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
164 }
165}
166
167void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
168 VkDeviceSize size, VkDeviceSize alignment,
169 VkMappedMemoryRange* range) {
170 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
171 offset = offset + alloc.fOffset;
172 VkDeviceSize offsetDiff = offset & (alignment -1);
173 offset = offset - offsetDiff;
174 size = (size + alignment - 1) & ~(alignment - 1);
175#ifdef SK_DEBUG
176 SkASSERT(offset >= alloc.fOffset);
177 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
178 SkASSERT(0 == (offset & (alignment-1)));
179 SkASSERT(size > 0);
180 SkASSERT(0 == (size & (alignment-1)));
181#endif
182
183 memset(range, 0, sizeof(VkMappedMemoryRange));
184 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
185 range->memory = alloc.fMemory;
186 range->offset = offset;
187 range->size = size;
188}
189
Greg Daniele35a99e2018-03-02 11:44:22 -0500190void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
191 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700192 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400193 SkASSERT(offset == 0);
194 SkASSERT(size <= alloc.fSize);
195 if (alloc.fBackendMemory) {
196 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
197 allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
198 } else {
199 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
200 VkMappedMemoryRange mappedMemoryRange;
201 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
202 &mappedMemoryRange);
203 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
204 &mappedMemoryRange));
Greg Daniele35a99e2018-03-02 11:44:22 -0500205 }
jvanverth9d54afc2016-09-20 09:20:03 -0700206 }
207}
208
Greg Daniele35a99e2018-03-02 11:44:22 -0500209void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
210 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700211 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400212 SkASSERT(offset == 0);
213 SkASSERT(size <= alloc.fSize);
214 if (alloc.fBackendMemory) {
215 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
216 allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
Greg Daniela9d3dae2018-05-30 22:59:03 +0000217 } else {
Greg Daniel81df0412018-05-31 13:13:33 -0400218 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
219 VkMappedMemoryRange mappedMemoryRange;
220 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
221 &mappedMemoryRange);
222 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
223 &mappedMemoryRange));
Greg Daniela9d3dae2018-05-30 22:59:03 +0000224 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000225 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000226}
227