Enable creating protected vulkan backend
This CL allows user to indicate that they have a protected content in
GrVkBackendContext creation which results in protected CommandPool and Queue
usage.
Bug: skia:9016
Change-Id: I6a478d688b6988c2c5e5e98f18f58fb21f9d26ae
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/210067
Commit-Queue: Greg Daniel <egdaniel@google.com>
Auto-Submit: Emircan Uysaler <emircan@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/vk/GrVkAMDMemoryAllocator.cpp b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
index 7e3c0c6..0fe6526 100644
--- a/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
+++ b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
@@ -81,6 +81,10 @@
info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
}
+ if (AllocationPropertyFlags::kProtected & flags) {
+ info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
+ }
+
VmaAllocation allocation;
VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
if (VK_SUCCESS != result) {
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
index 4b84080..833dfdb5 100644
--- a/src/gpu/vk/GrVkBuffer.cpp
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -20,6 +20,7 @@
#endif
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
VkBuffer buffer;
GrVkAlloc alloc;
@@ -186,6 +187,8 @@
void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
SkASSERT(src);
+ // We should never call this method in protected contexts.
+ SkASSERT(!gpu->protectedContext());
// The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
// to 65536 bytes and a size the is 4 byte aligned.
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
index 6ef4b5d..dfd6ef0 100644
--- a/src/gpu/vk/GrVkCaps.cpp
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -17,12 +17,15 @@
#include "src/gpu/vk/GrVkTexture.h"
#include "src/gpu/vk/GrVkUtil.h"
+#ifdef SK_BUILD_FOR_ANDROID
+#include <sys/system_properties.h>
+#endif
+
GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
uint32_t instanceVersion, uint32_t physicalDeviceVersion,
- const GrVkExtensions& extensions)
- : INHERITED(contextOptions) {
-
+ const GrVkExtensions& extensions, GrProtected isProtected)
+ : INHERITED(contextOptions) {
/**************************************************************************
* GrCaps fields
**************************************************************************/
@@ -50,7 +53,8 @@
fShaderCaps.reset(new GrShaderCaps(contextOptions));
- this->init(contextOptions, vkInterface, physDev, features, physicalDeviceVersion, extensions);
+ this->init(contextOptions, vkInterface, physDev, features, physicalDeviceVersion, extensions,
+ isProtected);
}
bool GrVkCaps::initDescForDstCopy(const GrRenderTargetProxy* src, GrSurfaceDesc* desc,
@@ -194,6 +198,10 @@
bool GrVkCaps::onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ if (src->isProtected() && !dst->isProtected()) {
+ return false;
+ }
+
GrPixelConfig dstConfig = dst->config();
GrPixelConfig srcConfig = src->config();
@@ -272,7 +280,8 @@
void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
- uint32_t physicalDeviceVersion, const GrVkExtensions& extensions) {
+ uint32_t physicalDeviceVersion, const GrVkExtensions& extensions,
+ GrProtected isProtected) {
VkPhysicalDeviceProperties properties;
GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
@@ -360,6 +369,13 @@
// will return a key of 0.
fYcbcrInfos.push_back(GrVkYcbcrConversionInfo());
+ if ((isProtected == GrProtected::kYes) &&
+ (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0))) {
+ fSupportsProtectedMemory = true;
+ fAvoidUpdateBuffers = true;
+ fShouldAlwaysUseDedicatedImageMemory = true;
+ }
+
this->initGrCaps(vkInterface, physDev, properties, memoryProperties, features, extensions);
this->initShaderCaps(properties, features);
@@ -427,6 +443,17 @@
}
#endif
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Protected memory features have problems in Android P and earlier.
+ if (fSupportsProtectedMemory && (kQualcomm_VkVendor == properties.vendorID)) {
+ char androidAPIVersion[PROP_VALUE_MAX];
+ int strLength = __system_property_get("ro.build.version.sdk", androidAPIVersion);
+ if (strLength == 0 || atoi(androidAPIVersion) <= 28) {
+ fSupportsProtectedMemory = false;
+ }
+ }
+#endif
+
// AMD seems to have issues binding new VkPipelines inside a secondary command buffer.
// Current workaround is to use a different secondary command buffer for each new VkPipeline.
if (kAMD_VkVendor == properties.vendorID) {
@@ -861,14 +888,17 @@
return table[table.count() - 1];
}
-bool GrVkCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
+GrCaps::ReadFlags GrVkCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
+ if (surface->isProtected()) {
+ return kProtected_ReadFlag;
+ }
if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
// We can't directly read from a VkImage that has a ycbcr sampler.
if (tex->ycbcrConversionInfo().isValid()) {
- return false;
+ return kRequiresCopy_ReadFlag;
}
}
- return true;
+ return kSupported_ReadFlag;
}
bool GrVkCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
diff --git a/src/gpu/vk/GrVkCaps.h b/src/gpu/vk/GrVkCaps.h
index b7c7a00..204c3d0 100644
--- a/src/gpu/vk/GrVkCaps.h
+++ b/src/gpu/vk/GrVkCaps.h
@@ -30,7 +30,7 @@
GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice device, const VkPhysicalDeviceFeatures2& features,
uint32_t instanceVersion, uint32_t physicalDeviceVersion,
- const GrVkExtensions& extensions);
+ const GrVkExtensions& extensions, GrProtected isProtected = GrProtected::kNo);
bool isFormatTexturable(VkFormat) const;
bool isConfigTexturable(GrPixelConfig config) const override;
@@ -46,7 +46,7 @@
int maxRenderTargetSampleCount(GrPixelConfig config) const override;
int maxRenderTargetSampleCount(VkFormat format) const;
- bool surfaceSupportsReadPixels(const GrSurface*) const override;
+ ReadFlags surfaceSupportsReadPixels(const GrSurface*) const override;
bool isFormatTexturableLinearly(VkFormat format) const {
return SkToBool(FormatInfo::kTextureable_Flag & this->getFormatInfo(format).fLinearFlags);
@@ -134,6 +134,9 @@
// Returns true if it supports ycbcr conversion for samplers
bool supportsYcbcrConversion() const { return fSupportsYcbcrConversion; }
+ // Returns true if the device supports protected memory.
+ bool supportsProtectedMemory() const { return fSupportsProtectedMemory; }
+
/**
* Helpers used by canCopySurface. In all cases if the SampleCnt parameter is zero that means
* the surface is not a render target, otherwise it is the number of samples in the render
@@ -176,7 +179,7 @@
void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice device, const VkPhysicalDeviceFeatures2&,
- uint32_t physicalDeviceVersion, const GrVkExtensions&);
+ uint32_t physicalDeviceVersion, const GrVkExtensions&, GrProtected isProtected);
void initGrCaps(const GrVkInterface* vkInterface,
VkPhysicalDevice physDev,
const VkPhysicalDeviceProperties&,
@@ -249,6 +252,8 @@
bool fSupportsYcbcrConversion = false;
+ bool fSupportsProtectedMemory = false;
+
typedef GrCaps INHERITED;
};
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp
index 0dfdade..e8bba1d 100644
--- a/src/gpu/vk/GrVkCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -541,11 +541,20 @@
uint32_t commandBufferCount,
const VkCommandBuffer* commandBuffers,
uint32_t signalCount,
- const VkSemaphore* signalSemaphores) {
+ const VkSemaphore* signalSemaphores,
+ GrProtected protectedContext) {
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (protectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
+ submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
submitInfo.waitSemaphoreCount = waitCount;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
@@ -582,8 +591,9 @@
if (0 == signalCount && 0 == waitCount) {
// This command buffer has no dependent semaphores so we can simply just submit it to the
// queue with no worries.
- submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr,
- 1, &fCmdBuffer, 0, nullptr);
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr, 1,
+ &fCmdBuffer, 0, nullptr,
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
} else {
SkTArray<VkSemaphore> vkSignalSems(signalCount);
for (int i = 0; i < signalCount; ++i) {
@@ -602,11 +612,10 @@
vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
}
}
- submit_to_queue(gpu->vkInterface(), queue, fSubmitFence,
- vkWaitSems.count(), vkWaitSems.begin(), vkWaitStages.begin(),
- 1, &fCmdBuffer,
- vkSignalSems.count(), vkSignalSems.begin());
-
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, vkWaitSems.count(),
+ vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
+ vkSignalSems.count(), vkSignalSems.begin(),
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
for (int i = 0; i < signalCount; ++i) {
signalSemaphores[i]->markAsSignaled();
}
diff --git a/src/gpu/vk/GrVkCommandPool.cpp b/src/gpu/vk/GrVkCommandPool.cpp
index 22d0fbf..aae9355 100644
--- a/src/gpu/vk/GrVkCommandPool.cpp
+++ b/src/gpu/vk/GrVkCommandPool.cpp
@@ -12,17 +12,24 @@
#include "src/gpu/vk/GrVkGpu.h"
GrVkCommandPool* GrVkCommandPool::Create(const GrVkGpu* gpu) {
- const VkCommandPoolCreateInfo cmdPoolInfo = {
- VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
- nullptr, // pNext
- VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
- VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags
- gpu->queueIndex(), // queueFamilyIndex
- };
- VkCommandPool pool;
- GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateCommandPool(gpu->device(), &cmdPoolInfo,
- nullptr, &pool));
- return new GrVkCommandPool(gpu, pool);
+ VkCommandPoolCreateFlags cmdPoolCreateFlags =
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ if (gpu->protectedContext()) {
+ cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
+ }
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ cmdPoolCreateFlags, // CmdPoolCreateFlags
+ gpu->queueIndex(), // queueFamilyIndex
+ };
+ VkCommandPool pool;
+ GR_VK_CALL_ERRCHECK(
+ gpu->vkInterface(),
+ CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
+ return new GrVkCommandPool(gpu, pool);
}
GrVkCommandPool::GrVkCommandPool(const GrVkGpu* gpu, VkCommandPool commandPool)
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 375db6b..1e579c6 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -141,8 +141,13 @@
}
}
- return sk_sp<GrGpu>(new GrVkGpu(context, options, backendContext, interface, instanceVersion,
- physDevVersion));
+ sk_sp<GrVkGpu> vkGpu(new GrVkGpu(context, options, backendContext, interface,
+ instanceVersion, physDevVersion));
+ if (backendContext.fProtectedContext == GrProtected::kYes &&
+ !vkGpu->vkCaps().supportsProtectedMemory()) {
+ return nullptr;
+ }
+ return std::move(vkGpu);
}
////////////////////////////////////////////////////////////////////////////////
@@ -159,7 +164,8 @@
, fQueue(backendContext.fQueue)
, fQueueIndex(backendContext.fGraphicsQueueIndex)
, fResourceProvider(this)
- , fDisconnected(false) {
+ , fDisconnected(false)
+ , fProtectedContext(backendContext.fProtectedContext) {
SkASSERT(!backendContext.fOwnsInstanceAndDevice);
if (!fMemoryAllocator) {
@@ -174,14 +180,14 @@
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
*backendContext.fDeviceFeatures2, instanceVersion,
physicalDeviceVersion,
- *backendContext.fVkExtensions));
+ *backendContext.fVkExtensions, fProtectedContext));
} else if (backendContext.fDeviceFeatures) {
VkPhysicalDeviceFeatures2 features2;
features2.pNext = nullptr;
features2.features = *backendContext.fDeviceFeatures;
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
features2, instanceVersion, physicalDeviceVersion,
- *backendContext.fVkExtensions));
+ *backendContext.fVkExtensions, fProtectedContext));
} else {
VkPhysicalDeviceFeatures2 features;
memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
@@ -205,7 +211,8 @@
backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
}
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
- features, instanceVersion, physicalDeviceVersion, extensions));
+ features, instanceVersion, physicalDeviceVersion, extensions,
+ fProtectedContext));
}
fCaps.reset(SkRef(fVkCaps.get()));
@@ -517,6 +524,9 @@
size_t offset) {
SkASSERT(surface);
SkASSERT(transferBuffer);
+ if (fProtectedContext == GrProtected::kYes) {
+ return false;
+ }
GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
@@ -1042,7 +1052,7 @@
imageDesc.fSamples = 1;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
- imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ imageDesc.fIsProtected = desc.fIsProtected;
GrMipMapsStatus mipMapsStatus = GrMipMapsStatus::kNotAllocated;
if (mipLevels > 1) {
@@ -1184,12 +1194,17 @@
return nullptr;
}
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc surfDesc;
surfDesc.fFlags = kNone_GrSurfaceFlags;
surfDesc.fWidth = backendTex.width();
surfDesc.fHeight = backendTex.height();
surfDesc.fConfig = backendTex.config();
surfDesc.fSampleCnt = 1;
+ surfDesc.fIsProtected = backendTex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
SkASSERT(layout);
@@ -1216,10 +1231,15 @@
return nullptr;
}
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc surfDesc;
surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
surfDesc.fWidth = backendTex.width();
surfDesc.fHeight = backendTex.height();
+ surfDesc.fIsProtected = backendTex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
surfDesc.fConfig = backendTex.config();
surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config());
@@ -1251,11 +1271,15 @@
return nullptr;
}
+ if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = backendRT.width();
desc.fHeight = backendRT.height();
+ desc.fIsProtected = backendRT.isProtected() ? GrProtected::kYes : GrProtected::kNo;
desc.fConfig = backendRT.config();
desc.fSampleCnt = 1;
@@ -1287,10 +1311,15 @@
return nullptr;
}
+ if (tex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = tex.width();
desc.fHeight = tex.height();
+ desc.fIsProtected = tex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
desc.fConfig = tex.config();
desc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config());
if (!desc.fSampleCnt) {
@@ -1530,12 +1559,18 @@
bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable,
bool renderable, GrMipMapped mipMapped, const void* srcData,
size_t srcRowBytes, const SkColor4f* color,
- GrVkImageInfo* info) {
+ GrVkImageInfo* info, GrProtected isProtected) {
SkASSERT(texturable || renderable);
if (!texturable) {
SkASSERT(GrMipMapped::kNo == mipMapped);
SkASSERT(!srcData);
}
+
+ if (fProtectedContext != isProtected) {
+ SkDebugf("Can only create protected image in protected context\n");
+ return false;
+ }
+
VkFormat vkFormat;
if (!GrPixelConfigToVkFormat(config, &vkFormat)) {
return false;
@@ -1580,8 +1615,10 @@
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ imageDesc.fIsProtected = fProtectedContext;
if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
+ SkDebugf("Failed to init image info\n");
return false;
}
@@ -1633,7 +1670,7 @@
VkBufferCreateInfo bufInfo;
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- bufInfo.flags = 0;
+ bufInfo.flags = fProtectedContext == GrProtected::kYes ? VK_BUFFER_CREATE_PROTECTED_BIT : 0;
bufInfo.size = combinedBufferSize;
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
@@ -1725,10 +1762,18 @@
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
SkASSERT(!err);
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (fProtectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
+ submitInfo.pNext = fProtectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
submitInfo.waitSemaphoreCount = 0;
submitInfo.pWaitSemaphores = nullptr;
submitInfo.pWaitDstStageMask = 0;
@@ -1847,33 +1892,42 @@
GrMipMapped mipMapped,
GrRenderable renderable,
const void* srcData, size_t rowBytes,
- const SkColor4f* color) {
+ const SkColor4f* color, GrProtected isProtected) {
this->handleDirtyContext();
+ if (fProtectedContext != isProtected) {
+ SkDebugf("Can only create protected image in protected context\n");
+ return GrBackendTexture();
+ }
+
if (w > this->caps()->maxTextureSize() || h > this->caps()->maxTextureSize()) {
return GrBackendTexture();
}
const VkFormat* vkFormat = format.getVkFormat();
if (!vkFormat) {
+ SkDebugf("Could net get vkformat\n");
return GrBackendTexture();
}
GrPixelConfig config;
if (!vk_format_to_pixel_config(*vkFormat, &config)) {
+ SkDebugf("Could net get vkformat\n");
return GrBackendTexture();
}
if (!this->caps()->isConfigTexturable(config)) {
+ SkDebugf("Config is not texturable\n");
return GrBackendTexture();
}
GrVkImageInfo info;
if (!this->createTestingOnlyVkImage(config, w, h, true, GrRenderable::kYes == renderable,
- mipMapped, srcData, rowBytes, color, &info)) {
- return {};
+ mipMapped, srcData, rowBytes, color, &info, isProtected)) {
+ SkDebugf("Failed to create testing only image\n");
+ return GrBackendTexture();
}
- GrBackendTexture beTex = GrBackendTexture(w, h, info);
+ GrBackendTexture beTex = GrBackendTexture(w, h, isProtected, info);
#if GR_TEST_UTILS
// Lots of tests don't go through Skia's public interface which will set the config so for
// testing we make sure we set a config here.
@@ -1928,7 +1982,7 @@
GrVkImageInfo info;
if (!this->createTestingOnlyVkImage(config, w, h, false, true, GrMipMapped::kNo, nullptr, 0,
- &SkColors::kTransparent, &info)) {
+ &SkColors::kTransparent, &info, GrProtected::kNo)) {
return {};
}
GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, 0, info);
@@ -2084,8 +2138,11 @@
bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
SkASSERT(this->vkCaps().canCopyImage(dst->config(), dstSampleCnt, dstHasYcbcr,
src->config(), srcSampleCnt, srcHasYcbcr));
-
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
// These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
// the cache is flushed since it is only being written to.
@@ -2136,6 +2193,11 @@
srcImage->isLinearTiled(), srcHasYcbcr));
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
+
dstImage->setImageLayout(this,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
@@ -2174,6 +2236,10 @@
void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
this->resolveImage(dst, srcRT, srcRect, dstPoint);
SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
@@ -2192,6 +2258,10 @@
SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
}
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return false;
+ }
GrPixelConfig dstConfig = dst->config();
GrPixelConfig srcConfig = src->config();
@@ -2248,6 +2318,10 @@
bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
GrColorType dstColorType, void* buffer, size_t rowBytes) {
+ if (surface->isProtected()) {
+ return false;
+ }
+
if (GrPixelConfigToColorType(surface->config()) != dstColorType) {
return false;
}
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 70a1ce5..5600eac 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -62,6 +62,7 @@
const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
return fPhysDevMemProps;
}
+ bool protectedContext() const { return fProtectedContext == GrProtected::kYes; }
GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
@@ -82,7 +83,8 @@
GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) override;
+ const SkColor4f* color,
+ GrProtected isProtected) override;
void deleteBackendTexture(const GrBackendTexture&) override;
#if GR_TEST_UTILS
bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
@@ -261,7 +263,8 @@
bool createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable,
bool renderable, GrMipMapped mipMapped, const void* srcData,
- size_t srcRowBytes, const SkColor4f* color, GrVkImageInfo* info);
+ size_t srcRowBytes, const SkColor4f* color, GrVkImageInfo* info,
+ GrProtected isProtected);
sk_sp<const GrVkInterface> fInterface;
sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
@@ -297,6 +300,8 @@
// vulkan context.
bool fDisconnected;
+ GrProtected fProtectedContext;
+
std::unique_ptr<GrVkGpuRTCommandBuffer> fCachedRTCommandBuffer;
std::unique_ptr<GrVkGpuTextureCommandBuffer> fCachedTexCommandBuffer;
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 422c95a..d893d2e 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -92,6 +92,7 @@
void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
+ SkASSERT(!src->isProtected() || (fTexture->isProtected() && fGpu->protectedContext()));
fTasks.emplace<Copy>(src, srcRect, dstPoint, false);
}
@@ -629,6 +630,7 @@
const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+ SkASSERT(!src->isProtected() || (fRenderTarget->isProtected() && fGpu->protectedContext()));
const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
vkRT->compatibleRenderPassHandle();
if (rpHandle.isValid()) {
@@ -795,6 +797,9 @@
}
}
cbInfo.fSampledTextures.push_back(vkTexture);
+
+ SkASSERT(!texture->isProtected() ||
+ (fRenderTarget->isProtected() && fGpu->protectedContext()));
};
if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index f66c70c..059f191 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -157,6 +157,9 @@
if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
return false;
}
+ if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
+ return false;
+ }
VkImage image = VK_NULL_HANDLE;
GrVkAlloc alloc;
@@ -173,10 +176,11 @@
SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
VK_SAMPLE_COUNT_1_BIT == vkSamples);
+ VkImageCreateFlags createflags = gpu->protectedContext() ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
const VkImageCreateInfo imageCreateInfo = {
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
nullptr, // pNext
- 0, // VkImageCreateFlags
+ createflags, // VkImageCreateFlags
imageDesc.fImageType, // VkImageType
imageDesc.fFormat, // VkFormat
{ imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index ec59be6..10083a8 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -121,17 +121,19 @@
VkImageTiling fImageTiling;
VkImageUsageFlags fUsageFlags;
VkFlags fMemProps;
+ GrProtected fIsProtected;
ImageDesc()
- : fImageType(VK_IMAGE_TYPE_2D)
- , fFormat(VK_FORMAT_UNDEFINED)
- , fWidth(0)
- , fHeight(0)
- , fLevels(1)
- , fSamples(1)
- , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
- , fUsageFlags(0)
- , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {}
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ , fIsProtected(GrProtected::kNo) {}
};
static bool InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo*);
diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp
index a440d4a..c8dfd94 100644
--- a/src/gpu/vk/GrVkIndexBuffer.cpp
+++ b/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -18,7 +18,7 @@
sk_sp<GrVkIndexBuffer> GrVkIndexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
GrVkBuffer::Desc desc;
- desc.fDynamic = dynamic;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
desc.fType = GrVkBuffer::kIndex_Type;
desc.fSizeInBytes = size;
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index 9e49721..6ffe08a 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -98,7 +98,10 @@
GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
AllocationPropertyFlags propFlags;
- if (memReqs.size > kMaxSmallImageSize || gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
+ if (gpu->protectedContext()) {
+ propFlags = AllocationPropertyFlags::kProtected;
+ } else if (memReqs.size > kMaxSmallImageSize ||
+ gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
propFlags = AllocationPropertyFlags::kDedicatedAllocation;
} else {
propFlags = AllocationPropertyFlags::kNone;
diff --git a/src/gpu/vk/GrVkRenderTarget.cpp b/src/gpu/vk/GrVkRenderTarget.cpp
index 786f502..d3acfc1 100644
--- a/src/gpu/vk/GrVkRenderTarget.cpp
+++ b/src/gpu/vk/GrVkRenderTarget.cpp
@@ -156,6 +156,7 @@
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ msImageDesc.fIsProtected = desc.fIsProtected;
if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &msInfo)) {
return nullptr;
@@ -377,7 +378,8 @@
GrBackendRenderTarget GrVkRenderTarget::getBackendRenderTarget() const {
SkASSERT(!this->wrapsSecondaryCommandBuffer());
return GrBackendRenderTarget(this->width(), this->height(), this->numSamples(),
- fInfo, this->grVkImageLayout());
+ this->isProtected() ? GrProtected::kYes : GrProtected::kNo, fInfo,
+ this->grVkImageLayout());
}
const GrVkResource* GrVkRenderTarget::stencilImageResource() const {
diff --git a/src/gpu/vk/GrVkTexture.cpp b/src/gpu/vk/GrVkTexture.cpp
index bd443e4..eb0637f 100644
--- a/src/gpu/vk/GrVkTexture.cpp
+++ b/src/gpu/vk/GrVkTexture.cpp
@@ -157,7 +157,9 @@
}
GrBackendTexture GrVkTexture::getBackendTexture() const {
- return GrBackendTexture(this->width(), this->height(), fInfo, this->grVkImageLayout());
+ return GrBackendTexture(this->width(), this->height(),
+ this->isProtected() ? GrProtected::kYes : GrProtected::kNo, fInfo,
+ this->grVkImageLayout());
}
GrVkGpu* GrVkTexture::getVkGpu() const {
diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp
index 5be5463..50cfc34 100644
--- a/src/gpu/vk/GrVkVertexBuffer.cpp
+++ b/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -18,7 +18,7 @@
sk_sp<GrVkVertexBuffer> GrVkVertexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
GrVkBuffer::Desc desc;
- desc.fDynamic = dynamic;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
desc.fType = GrVkBuffer::kVertex_Type;
desc.fSizeInBytes = size;