blob: 6ffe08a836fed89a98cb46c7841b641f671ea172 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/vk/GrVkMemory.h"
Greg Daniel164a9f02016-02-22 09:56:40 -05009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/gpu/vk/GrVkMemoryAllocator.h"
11#include "src/gpu/vk/GrVkGpu.h"
12#include "src/gpu/vk/GrVkUtil.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050013
Greg Daniel81df0412018-05-31 13:13:33 -040014using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15using BufferUsage = GrVkMemoryAllocator::BufferUsage;
jvanverth68c3d302016-09-23 10:30:04 -070016
Greg Daniel81df0412018-05-31 13:13:33 -040017static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18 switch (type) {
19 case GrVkBuffer::kVertex_Type: // fall through
20 case GrVkBuffer::kIndex_Type: // fall through
21 case GrVkBuffer::kTexel_Type:
22 return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23 case GrVkBuffer::kUniform_Type:
24 SkASSERT(dynamic);
25 return BufferUsage::kCpuWritesGpuReads;
26 case GrVkBuffer::kCopyRead_Type: // fall through
27 case GrVkBuffer::kCopyWrite_Type:
28 return BufferUsage::kCpuOnly;
Greg Daniel164a9f02016-02-22 09:56:40 -050029 }
Greg Daniel81df0412018-05-31 13:13:33 -040030 SK_ABORT("Invalid GrVkBuffer::Type");
31 return BufferUsage::kCpuOnly; // Just returning an arbitrary value.
Greg Daniel164a9f02016-02-22 09:56:40 -050032}
33
34bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
35 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070036 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070037 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070038 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040039 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
40 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050041
Greg Daniel81df0412018-05-31 13:13:33 -040042 GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
Greg Daniel164a9f02016-02-22 09:56:40 -050043
Greg Daniel86830372018-06-01 13:35:16 -040044 AllocationPropertyFlags propFlags;
45 if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
46 // In general it is always fine (and often better) to keep buffers always mapped.
47 // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
48 // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
49 // is fine). In general, by the time Vulkan ships it is probably less likely to be running
50 // on non Win10 or newer machines. The second use case is if running on an AMD card and you
51 // are using the special GPU local and host mappable memory. However, in general we don't
52 // pick this memory as we've found it slower than using the cached host visible memory. In
53 // the future if we find the need to special case either of these two issues we can add
54 // checks for them here.
55 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
56 } else {
57 propFlags = AllocationPropertyFlags::kNone;
58 }
59
60 if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
Greg Daniel81df0412018-05-31 13:13:33 -040061 return false;
jvanverth6b6ffc42016-06-13 14:28:07 -070062 }
Greg Daniel81df0412018-05-31 13:13:33 -040063 allocator->getAllocInfo(memory, alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050064
jvanverth9d54afc2016-09-20 09:20:03 -070065 // Bind buffer
Greg Daniel81df0412018-05-31 13:13:33 -040066 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
67 alloc->fMemory,
68 alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050069 if (err) {
Greg Daniel81df0412018-05-31 13:13:33 -040070 FreeBufferMemory(gpu, type, *alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050071 return false;
72 }
jvanverth6b6ffc42016-06-13 14:28:07 -070073
Greg Daniel164a9f02016-02-22 09:56:40 -050074 return true;
75}
76
jvanverth6b6ffc42016-06-13 14:28:07 -070077void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
78 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040079 if (alloc.fBackendMemory) {
80 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81 allocator->freeMemory(alloc.fBackendMemory);
82 } else {
83 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
84 }
jvanverth6b6ffc42016-06-13 14:28:07 -070085}
86
Greg Daniel865dc562019-04-19 14:48:04 -040087const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
jvanverth1e305ba2016-06-01 09:39:15 -070088
Greg Daniel164a9f02016-02-22 09:56:40 -050089bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
90 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -070091 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -070092 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040093 SkASSERT(!linearTiling);
94 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
95 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050096
97 VkMemoryRequirements memReqs;
Greg Daniel81df0412018-05-31 13:13:33 -040098 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050099
Greg Daniel81df0412018-05-31 13:13:33 -0400100 AllocationPropertyFlags propFlags;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400101 if (gpu->protectedContext()) {
102 propFlags = AllocationPropertyFlags::kProtected;
103 } else if (memReqs.size > kMaxSmallImageSize ||
104 gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
Greg Daniel81df0412018-05-31 13:13:33 -0400105 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
Greg Danielddc0c602018-06-18 11:26:30 -0400106 } else {
107 propFlags = AllocationPropertyFlags::kNone;
jvanverth6b6ffc42016-06-13 14:28:07 -0700108 }
109
Greg Daniel2f6e0692018-06-15 13:07:20 -0400110 if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
Greg Daniel331c2662018-05-30 14:51:53 -0400111 return false;
112 }
Greg Daniel81df0412018-05-31 13:13:33 -0400113 allocator->getAllocInfo(memory, alloc);
jvanverth6b6ffc42016-06-13 14:28:07 -0700114
Greg Daniel81df0412018-05-31 13:13:33 -0400115 // Bind buffer
116 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
117 alloc->fMemory, alloc->fOffset));
118 if (err) {
119 FreeImageMemory(gpu, linearTiling, *alloc);
120 return false;
121 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000122
Greg Daniel164a9f02016-02-22 09:56:40 -0500123 return true;
124}
125
jvanverth6b6ffc42016-06-13 14:28:07 -0700126void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
127 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -0400128 if (alloc.fBackendMemory) {
129 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
130 allocator->freeMemory(alloc.fBackendMemory);
jvanverth6b6ffc42016-06-13 14:28:07 -0700131 } else {
jvanverth6b6ffc42016-06-13 14:28:07 -0700132 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
Greg Daniel331c2662018-05-30 14:51:53 -0400133 }
134}
135
Greg Daniel81df0412018-05-31 13:13:33 -0400136void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
137 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
138#ifdef SK_DEBUG
139 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
140 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
141 SkASSERT(0 == (alloc.fOffset & (alignment-1)));
142 SkASSERT(0 == (alloc.fSize & (alignment-1)));
143 }
144#endif
145 if (alloc.fBackendMemory) {
146 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
147 return allocator->mapMemory(alloc.fBackendMemory);
148 }
149
150 void* mapPtr;
151 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
152 alloc.fOffset,
153 alloc.fSize, 0, &mapPtr));
154 if (err) {
155 mapPtr = nullptr;
156 }
157 return mapPtr;
158}
159
160void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
161 if (alloc.fBackendMemory) {
162 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
163 allocator->unmapMemory(alloc.fBackendMemory);
164 } else {
165 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
166 }
167}
168
169void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
170 VkDeviceSize size, VkDeviceSize alignment,
171 VkMappedMemoryRange* range) {
172 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
173 offset = offset + alloc.fOffset;
174 VkDeviceSize offsetDiff = offset & (alignment -1);
175 offset = offset - offsetDiff;
176 size = (size + alignment - 1) & ~(alignment - 1);
177#ifdef SK_DEBUG
178 SkASSERT(offset >= alloc.fOffset);
179 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
180 SkASSERT(0 == (offset & (alignment-1)));
181 SkASSERT(size > 0);
182 SkASSERT(0 == (size & (alignment-1)));
183#endif
184
185 memset(range, 0, sizeof(VkMappedMemoryRange));
186 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
187 range->memory = alloc.fMemory;
188 range->offset = offset;
189 range->size = size;
190}
191
Greg Daniele35a99e2018-03-02 11:44:22 -0500192void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
193 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700194 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400195 SkASSERT(offset == 0);
196 SkASSERT(size <= alloc.fSize);
197 if (alloc.fBackendMemory) {
198 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
199 allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
200 } else {
201 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
202 VkMappedMemoryRange mappedMemoryRange;
203 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
204 &mappedMemoryRange);
205 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
206 &mappedMemoryRange));
Greg Daniele35a99e2018-03-02 11:44:22 -0500207 }
jvanverth9d54afc2016-09-20 09:20:03 -0700208 }
209}
210
Greg Daniele35a99e2018-03-02 11:44:22 -0500211void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
212 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700213 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400214 SkASSERT(offset == 0);
215 SkASSERT(size <= alloc.fSize);
216 if (alloc.fBackendMemory) {
217 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
218 allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
Greg Daniela9d3dae2018-05-30 22:59:03 +0000219 } else {
Greg Daniel81df0412018-05-31 13:13:33 -0400220 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
221 VkMappedMemoryRange mappedMemoryRange;
222 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
223 &mappedMemoryRange);
224 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
225 &mappedMemoryRange));
Greg Daniela9d3dae2018-05-30 22:59:03 +0000226 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000227 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000228}
229