blob: c9663ed1502393c3e185f2b384ce64917be8fa4e [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkMemory.h"
9
10#include "GrVkGpu.h"
11#include "GrVkUtil.h"
12
13static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
14 uint32_t typeBits,
15 VkMemoryPropertyFlags requestedMemFlags,
16 uint32_t* typeIndex) {
17 uint32_t checkBit = 1;
18 for (uint32_t i = 0; i < 32; ++i) {
19 if (typeBits & checkBit) {
20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
21 requestedMemFlags;
22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i;
24 return true;
25 }
26 }
27 checkBit <<= 1;
28 }
29 return false;
30}
31
32static bool alloc_device_memory(const GrVkGpu* gpu,
33 VkMemoryRequirements* memReqs,
34 const VkMemoryPropertyFlags flags,
35 VkDeviceMemory* memory) {
36 uint32_t typeIndex;
37 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
38 memReqs->memoryTypeBits,
39 flags,
40 &typeIndex)) {
41 return false;
42 }
43
44 VkMemoryAllocateInfo allocInfo = {
45 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
46 NULL, // pNext
47 memReqs->size, // allocationSize
48 typeIndex, // memoryTypeIndex
49 };
50
51 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
52 &allocInfo,
53 nullptr,
54 memory));
55 if (err) {
56 return false;
57 }
58 return true;
59}
60
61bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
62 VkBuffer buffer,
63 const VkMemoryPropertyFlags flags,
64 VkDeviceMemory* memory) {
jvanverthe50f3e72016-03-28 07:03:06 -070065 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050066 VkDevice device = gpu->device();
67
68 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070069 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050070
Greg Daniel164a9f02016-02-22 09:56:40 -050071 if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
72 return false;
73 }
74
75 // Bind Memory to queue
jvanverthe50f3e72016-03-28 07:03:06 -070076 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, *memory, 0));
Greg Daniel164a9f02016-02-22 09:56:40 -050077 if (err) {
jvanverthe50f3e72016-03-28 07:03:06 -070078 GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -050079 return false;
80 }
81 return true;
82}
83
84bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
85 VkImage image,
86 const VkMemoryPropertyFlags flags,
87 VkDeviceMemory* memory) {
jvanverthe50f3e72016-03-28 07:03:06 -070088 const GrVkInterface* iface = gpu->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -050089 VkDevice device = gpu->device();
90
91 VkMemoryRequirements memReqs;
jvanverthe50f3e72016-03-28 07:03:06 -070092 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
Greg Daniel164a9f02016-02-22 09:56:40 -050093
94 if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
95 return false;
96 }
97
98 // Bind Memory to queue
jvanverthe50f3e72016-03-28 07:03:06 -070099 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, *memory, 0));
Greg Daniel164a9f02016-02-22 09:56:40 -0500100 if (err) {
jvanverthe50f3e72016-03-28 07:03:06 -0700101 GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500102 return false;
103 }
104 return true;
105}
106
107VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
108 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
109 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
110 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
111 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
112 return VK_PIPELINE_STAGE_TRANSFER_BIT;
113 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
114 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
115 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
116 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
117 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
118 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
119 return VK_PIPELINE_STAGE_HOST_BIT;
120 }
121
122 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
123 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
124}
125
126VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
127 // Currently we assume we will never being doing any explict shader writes (this doesn't include
128 // color attachment or depth/stencil writes). So we will ignore the
129 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
130
131 // We can only directly access the host memory if we are in preinitialized or general layout,
132 // and the image is linear.
133 // TODO: Add check for linear here so we are not always adding host to general, and we should
134 // only be in preinitialized if we are linear
135 VkAccessFlags flags = 0;;
136 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
137 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
138 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
139 VK_ACCESS_TRANSFER_WRITE_BIT |
140 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
141 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
142 flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
143 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
144 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
145 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
146 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
147 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
148 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
149 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
150 flags = VK_ACCESS_TRANSFER_READ_BIT;
151 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
152 flags = VK_ACCESS_SHADER_READ_BIT;
153 }
154 return flags;
155}