blob: 9e49721e71c8a20ffea941c775084e6877ff8e45 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/vk/GrVkMemory.h"
Greg Daniel164a9f02016-02-22 09:56:40 -05009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/gpu/vk/GrVkMemoryAllocator.h"
11#include "src/gpu/vk/GrVkGpu.h"
12#include "src/gpu/vk/GrVkUtil.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050013
Greg Daniel81df0412018-05-31 13:13:33 -040014using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15using BufferUsage = GrVkMemoryAllocator::BufferUsage;
jvanverth68c3d302016-09-23 10:30:04 -070016
Greg Daniel81df0412018-05-31 13:13:33 -040017static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18 switch (type) {
19 case GrVkBuffer::kVertex_Type: // fall through
20 case GrVkBuffer::kIndex_Type: // fall through
21 case GrVkBuffer::kTexel_Type:
22 return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23 case GrVkBuffer::kUniform_Type:
24 SkASSERT(dynamic);
25 return BufferUsage::kCpuWritesGpuReads;
26 case GrVkBuffer::kCopyRead_Type: // fall through
27 case GrVkBuffer::kCopyWrite_Type:
28 return BufferUsage::kCpuOnly;
Greg Daniel164a9f02016-02-22 09:56:40 -050029 }
Greg Daniel81df0412018-05-31 13:13:33 -040030 SK_ABORT("Invalid GrVkBuffer::Type");
31 return BufferUsage::kCpuOnly; // Just returning an arbitrary value.
Greg Daniel164a9f02016-02-22 09:56:40 -050032}
33
34bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
35 VkBuffer buffer,
jvanverth6b6ffc42016-06-13 14:28:07 -070036 GrVkBuffer::Type type,
jvanvertha584de92016-06-30 09:10:52 -070037 bool dynamic,
jvanverth1e305ba2016-06-01 09:39:15 -070038 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040039 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
40 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050041
Greg Daniel81df0412018-05-31 13:13:33 -040042 GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
Greg Daniel164a9f02016-02-22 09:56:40 -050043
Greg Daniel86830372018-06-01 13:35:16 -040044 AllocationPropertyFlags propFlags;
45 if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
46 // In general it is always fine (and often better) to keep buffers always mapped.
47 // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
48 // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
49 // is fine). In general, by the time Vulkan ships it is probably less likely to be running
50 // on non Win10 or newer machines. The second use case is if running on an AMD card and you
51 // are using the special GPU local and host mappable memory. However, in general we don't
52 // pick this memory as we've found it slower than using the cached host visible memory. In
53 // the future if we find the need to special case either of these two issues we can add
54 // checks for them here.
55 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
56 } else {
57 propFlags = AllocationPropertyFlags::kNone;
58 }
59
60 if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
Greg Daniel81df0412018-05-31 13:13:33 -040061 return false;
jvanverth6b6ffc42016-06-13 14:28:07 -070062 }
Greg Daniel81df0412018-05-31 13:13:33 -040063 allocator->getAllocInfo(memory, alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050064
jvanverth9d54afc2016-09-20 09:20:03 -070065 // Bind buffer
Greg Daniel81df0412018-05-31 13:13:33 -040066 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
67 alloc->fMemory,
68 alloc->fOffset));
Greg Daniel164a9f02016-02-22 09:56:40 -050069 if (err) {
Greg Daniel81df0412018-05-31 13:13:33 -040070 FreeBufferMemory(gpu, type, *alloc);
Greg Daniel164a9f02016-02-22 09:56:40 -050071 return false;
72 }
jvanverth6b6ffc42016-06-13 14:28:07 -070073
Greg Daniel164a9f02016-02-22 09:56:40 -050074 return true;
75}
76
jvanverth6b6ffc42016-06-13 14:28:07 -070077void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
78 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040079 if (alloc.fBackendMemory) {
80 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81 allocator->freeMemory(alloc.fBackendMemory);
82 } else {
83 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
84 }
jvanverth6b6ffc42016-06-13 14:28:07 -070085}
86
Greg Daniel865dc562019-04-19 14:48:04 -040087const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
jvanverth1e305ba2016-06-01 09:39:15 -070088
Greg Daniel164a9f02016-02-22 09:56:40 -050089bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
90 VkImage image,
jvanverth6b6ffc42016-06-13 14:28:07 -070091 bool linearTiling,
jvanverth1e305ba2016-06-01 09:39:15 -070092 GrVkAlloc* alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -040093 SkASSERT(!linearTiling);
94 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
95 GrVkBackendMemory memory = 0;
Greg Daniel164a9f02016-02-22 09:56:40 -050096
97 VkMemoryRequirements memReqs;
Greg Daniel81df0412018-05-31 13:13:33 -040098 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050099
Greg Daniel81df0412018-05-31 13:13:33 -0400100 AllocationPropertyFlags propFlags;
Greg Danielddc0c602018-06-18 11:26:30 -0400101 if (memReqs.size > kMaxSmallImageSize || gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
Greg Daniel81df0412018-05-31 13:13:33 -0400102 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
Greg Danielddc0c602018-06-18 11:26:30 -0400103 } else {
104 propFlags = AllocationPropertyFlags::kNone;
jvanverth6b6ffc42016-06-13 14:28:07 -0700105 }
106
Greg Daniel2f6e0692018-06-15 13:07:20 -0400107 if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
Greg Daniel331c2662018-05-30 14:51:53 -0400108 return false;
109 }
Greg Daniel81df0412018-05-31 13:13:33 -0400110 allocator->getAllocInfo(memory, alloc);
jvanverth6b6ffc42016-06-13 14:28:07 -0700111
Greg Daniel81df0412018-05-31 13:13:33 -0400112 // Bind buffer
113 VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
114 alloc->fMemory, alloc->fOffset));
115 if (err) {
116 FreeImageMemory(gpu, linearTiling, *alloc);
117 return false;
118 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000119
Greg Daniel164a9f02016-02-22 09:56:40 -0500120 return true;
121}
122
jvanverth6b6ffc42016-06-13 14:28:07 -0700123void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
124 const GrVkAlloc& alloc) {
Greg Daniel81df0412018-05-31 13:13:33 -0400125 if (alloc.fBackendMemory) {
126 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
127 allocator->freeMemory(alloc.fBackendMemory);
jvanverth6b6ffc42016-06-13 14:28:07 -0700128 } else {
jvanverth6b6ffc42016-06-13 14:28:07 -0700129 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
Greg Daniel331c2662018-05-30 14:51:53 -0400130 }
131}
132
Greg Daniel81df0412018-05-31 13:13:33 -0400133void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
134 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
135#ifdef SK_DEBUG
136 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
137 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
138 SkASSERT(0 == (alloc.fOffset & (alignment-1)));
139 SkASSERT(0 == (alloc.fSize & (alignment-1)));
140 }
141#endif
142 if (alloc.fBackendMemory) {
143 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
144 return allocator->mapMemory(alloc.fBackendMemory);
145 }
146
147 void* mapPtr;
148 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
149 alloc.fOffset,
150 alloc.fSize, 0, &mapPtr));
151 if (err) {
152 mapPtr = nullptr;
153 }
154 return mapPtr;
155}
156
157void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
158 if (alloc.fBackendMemory) {
159 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
160 allocator->unmapMemory(alloc.fBackendMemory);
161 } else {
162 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
163 }
164}
165
166void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
167 VkDeviceSize size, VkDeviceSize alignment,
168 VkMappedMemoryRange* range) {
169 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
170 offset = offset + alloc.fOffset;
171 VkDeviceSize offsetDiff = offset & (alignment -1);
172 offset = offset - offsetDiff;
173 size = (size + alignment - 1) & ~(alignment - 1);
174#ifdef SK_DEBUG
175 SkASSERT(offset >= alloc.fOffset);
176 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
177 SkASSERT(0 == (offset & (alignment-1)));
178 SkASSERT(size > 0);
179 SkASSERT(0 == (size & (alignment-1)));
180#endif
181
182 memset(range, 0, sizeof(VkMappedMemoryRange));
183 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
184 range->memory = alloc.fMemory;
185 range->offset = offset;
186 range->size = size;
187}
188
Greg Daniele35a99e2018-03-02 11:44:22 -0500189void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
190 VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700191 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400192 SkASSERT(offset == 0);
193 SkASSERT(size <= alloc.fSize);
194 if (alloc.fBackendMemory) {
195 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
196 allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
197 } else {
198 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
199 VkMappedMemoryRange mappedMemoryRange;
200 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
201 &mappedMemoryRange);
202 GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
203 &mappedMemoryRange));
Greg Daniele35a99e2018-03-02 11:44:22 -0500204 }
jvanverth9d54afc2016-09-20 09:20:03 -0700205 }
206}
207
Greg Daniele35a99e2018-03-02 11:44:22 -0500208void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
209 VkDeviceSize offset, VkDeviceSize size) {
jvanverth9d54afc2016-09-20 09:20:03 -0700210 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
Greg Daniel81df0412018-05-31 13:13:33 -0400211 SkASSERT(offset == 0);
212 SkASSERT(size <= alloc.fSize);
213 if (alloc.fBackendMemory) {
214 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
215 allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
Greg Daniela9d3dae2018-05-30 22:59:03 +0000216 } else {
Greg Daniel81df0412018-05-31 13:13:33 -0400217 VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
218 VkMappedMemoryRange mappedMemoryRange;
219 GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
220 &mappedMemoryRange);
221 GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
222 &mappedMemoryRange));
Greg Daniela9d3dae2018-05-30 22:59:03 +0000223 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000224 }
Greg Daniela9d3dae2018-05-30 22:59:03 +0000225}
226