blob: 0299355a3b9c14f37397e4b2e7bc43dcb38a127f [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/vk/GrVkMemory.h"
Greg Daniel164a9f02016-02-22 09:56:40 -05009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/gpu/vk/GrVkMemoryAllocator.h"
11#include "src/gpu/vk/GrVkGpu.h"
12#include "src/gpu/vk/GrVkUtil.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050013
Greg Daniel81df0412018-05-31 13:13:33 -040014using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15using BufferUsage = GrVkMemoryAllocator::BufferUsage;
jvanverth68c3d302016-09-23 10:30:04 -070016
Greg Daniel81df0412018-05-31 13:13:33 -040017static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18 switch (type) {
19 case GrVkBuffer::kVertex_Type: // fall through
20 case GrVkBuffer::kIndex_Type: // fall through
21 case GrVkBuffer::kTexel_Type:
22 return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23 case GrVkBuffer::kUniform_Type:
24 SkASSERT(dynamic);
25 return BufferUsage::kCpuWritesGpuReads;
26 case GrVkBuffer::kCopyRead_Type: // fall through
27 case GrVkBuffer::kCopyWrite_Type:
28 return BufferUsage::kCpuOnly;
Greg Daniel164a9f02016-02-22 09:56:40 -050029 }
Greg Daniel81df0412018-05-31 13:13:33 -040030 SK_ABORT("Invalid GrVkBuffer::Type");
Greg Daniel164a9f02016-02-22 09:56:40 -050031}
32
33bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
34 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070035 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070036 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070037 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040038 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
39 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050040
Greg Daniel81df0412018-05-31 13:13:33 -040041 GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
Greg Daniel164a9f02016-02-22 09:56:40 -050042
Greg Daniel86830372018-06-01 13:35:16 -040043 AllocationPropertyFlags propFlags;
44 if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
45 // In general it is always fine (and often better) to keep buffers always mapped.
46 // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
47 // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
48 // is fine). In general, by the time Vulkan ships it is probably less likely to be running
49 // on non Win10 or newer machines. The second use case is if running on an AMD card and you
50 // are using the special GPU local and host mappable memory. However, in general we don't
51 // pick this memory as we've found it slower than using the cached host visible memory. In
52 // the future if we find the need to special case either of these two issues we can add
53 // checks for them here.
54 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
55 } else {
56 propFlags = AllocationPropertyFlags::kNone;
57 }
58
59 if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
Greg Daniel81df0412018-05-31 13:13:33 -040060 return false;
jvanverth6b6ffc42016-06-13 14:28:07 -070061 }
Greg Daniel81df0412018-05-31 13:13:33 -040062 allocator->getAllocInfo(memory, alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050063
jvanverth9d54afc2016-09-20 09:20:03 -070064 // Bind buffer
Greg Daniel81df0412018-05-31 13:13:33 -040065 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
66 alloc->fMemory,
67 alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050068 if (err) {
Greg Daniel81df0412018-05-31 13:13:33 -040069 FreeBufferMemory(gpu, type, *alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050070 return false;
71 }
jvanverth6b6ffc42016-06-13 14:28:07 -070072
Greg Daniel164a9f02016-02-22 09:56:40 -050073 return true;
74}
75
jvanverth6b6ffc42016-06-13 14:28:07 -070076void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
77 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040078 if (alloc.fBackendMemory) {
79 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
80 allocator->freeMemory(alloc.fBackendMemory);
81 } else {
82 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
83 }
jvanverth6b6ffc42016-06-13 14:28:07 -070084}
85
Greg Daniel865dc562019-04-19 14:48:04 -040086const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
jvanverth1e305ba2016-06-01 09:39:15 -070087
Greg Daniel164a9f02016-02-22 09:56:40 -050088bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
89 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -070090 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -070091 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040092 SkASSERT(!linearTiling);
93 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
94 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050095
96 VkMemoryRequirements memReqs;
Greg Daniel81df0412018-05-31 13:13:33 -040097 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050098
Greg Daniel81df0412018-05-31 13:13:33 -040099 AllocationPropertyFlags propFlags;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400100 if (gpu->protectedContext()) {
101 propFlags = AllocationPropertyFlags::kProtected;
102 } else if (memReqs.size > kMaxSmallImageSize ||
103 gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
Greg Daniel81df0412018-05-31 13:13:33 -0400104 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
Greg Danielddc0c602018-06-18 11:26:30 -0400105 } else {
106 propFlags = AllocationPropertyFlags::kNone;
jvanverth6b6ffc42016-06-13 14:28:07 -0700107 }
108
Greg Daniel2f6e0692018-06-15 13:07:20 -0400109 if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
Greg Daniel331c2662018-05-30 14:51:53 -0400110 return false;
111 }
Greg Daniel81df0412018-05-31 13:13:33 -0400112 allocator->getAllocInfo(memory, alloc);
jvanverth6b6ffc42016-06-13 14:28:07 -0700113
Greg Daniel81df0412018-05-31 13:13:33 -0400114 // Bind buffer
115 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
116 alloc->fMemory, alloc->fOffset));
117 if (err) {
118 FreeImageMemory(gpu, linearTiling, *alloc);
119 return false;
120 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000121
Greg Daniel164a9f02016-02-22 09:56:40 -0500122 return true;
123}
124
jvanverth6b6ffc42016-06-13 14:28:07 -0700125void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
126 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -0400127 if (alloc.fBackendMemory) {
128 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
129 allocator->freeMemory(alloc.fBackendMemory);
jvanverth6b6ffc42016-06-13 14:28:07 -0700130 } else {
jvanverth6b6ffc42016-06-13 14:28:07 -0700131 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
Greg Daniel331c2662018-05-30 14:51:53 -0400132 }
133}
134
Greg Daniel81df0412018-05-31 13:13:33 -0400135void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
136 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
137#ifdef SK_DEBUG
138 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
139 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
140 SkASSERT(0 == (alloc.fOffset & (alignment-1)));
141 SkASSERT(0 == (alloc.fSize & (alignment-1)));
142 }
143#endif
144 if (alloc.fBackendMemory) {
145 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
146 return allocator->mapMemory(alloc.fBackendMemory);
147 }
148
149 void* mapPtr;
150 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
151 alloc.fOffset,
152 alloc.fSize, 0, &mapPtr));
153 if (err) {
154 mapPtr = nullptr;
155 }
156 return mapPtr;
157}
158
159void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
160 if (alloc.fBackendMemory) {
161 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
162 allocator->unmapMemory(alloc.fBackendMemory);
163 } else {
164 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
165 }
166}
167
168void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
169 VkDeviceSize size, VkDeviceSize alignment,
170 VkMappedMemoryRange* range) {
171 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
172 offset = offset + alloc.fOffset;
173 VkDeviceSize offsetDiff = offset & (alignment -1);
174 offset = offset - offsetDiff;
175 size = (size + alignment - 1) & ~(alignment - 1);
176#ifdef SK_DEBUG
177 SkASSERT(offset >= alloc.fOffset);
178 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
179 SkASSERT(0 == (offset & (alignment-1)));
180 SkASSERT(size > 0);
181 SkASSERT(0 == (size & (alignment-1)));
182#endif
183
184 memset(range, 0, sizeof(VkMappedMemoryRange));
185 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
186 range->memory = alloc.fMemory;
187 range->offset = offset;
188 range->size = size;
189}
190
Greg Daniele35a99e2018-03-02 11:44:22 -0500191void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
192 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700193 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400194 SkASSERT(offset == 0);
195 SkASSERT(size <= alloc.fSize);
196 if (alloc.fBackendMemory) {
197 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
198 allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
199 } else {
200 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
201 VkMappedMemoryRange mappedMemoryRange;
202 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
203 &mappedMemoryRange);
204 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
205 &mappedMemoryRange));
Greg Daniele35a99e2018-03-02 11:44:22 -0500206 }
jvanverth9d54afc2016-09-20 09:20:03 -0700207 }
208}
209
Greg Daniele35a99e2018-03-02 11:44:22 -0500210void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
211 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700212 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400213 SkASSERT(offset == 0);
214 SkASSERT(size <= alloc.fSize);
215 if (alloc.fBackendMemory) {
216 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
217 allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
Greg Daniela9d3dae2018-05-30 22:59:03 +0000218 } else {
Greg Daniel81df0412018-05-31 13:13:33 -0400219 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
220 VkMappedMemoryRange mappedMemoryRange;
221 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
222 &mappedMemoryRange);
223 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
224 &mappedMemoryRange));
Greg Daniela9d3dae2018-05-30 22:59:03 +0000225 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000226 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000227}
228