Enable creating protected vulkan backend
This CL allows user to indicate that they have a protected content in
GrVkBackendContext creation which results in protected CommandPool and Queue
usage.
Bug: skia:9016
Change-Id: I6a478d688b6988c2c5e5e98f18f58fb21f9d26ae
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/210067
Commit-Queue: Greg Daniel <egdaniel@google.com>
Auto-Submit: Emircan Uysaler <emircan@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/GrBackendSurface.cpp b/src/gpu/GrBackendSurface.cpp
index 8eff3e4..ee24b89 100644
--- a/src/gpu/GrBackendSurface.cpp
+++ b/src/gpu/GrBackendSurface.cpp
@@ -180,8 +180,14 @@
GrBackendTexture::GrBackendTexture(int width,
int height,
const GrVkImageInfo& vkInfo)
+ : GrBackendTexture(width, height, GrProtected::kNo, vkInfo) {}
+
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ GrProtected isProtected,
+ const GrVkImageInfo& vkInfo)
#ifdef SK_VULKAN
- : GrBackendTexture(width, height, vkInfo,
+ : GrBackendTexture(width, height, isProtected, vkInfo,
sk_sp<GrVkImageLayout>(new GrVkImageLayout(vkInfo.fImageLayout))) {}
#else
: fIsValid(false) {}
@@ -212,16 +218,17 @@
#ifdef SK_VULKAN
GrBackendTexture::GrBackendTexture(int width,
int height,
+ GrProtected isProtected,
const GrVkImageInfo& vkInfo,
sk_sp<GrVkImageLayout> layout)
: fIsValid(true)
+ , fIsProtected(isProtected)
, fWidth(width)
, fHeight(height)
, fConfig(kUnknown_GrPixelConfig)
, fMipMapped(GrMipMapped(vkInfo.fLevelCount > 1))
, fBackend(GrBackendApi::kVulkan)
- , fVkInfo(vkInfo, layout.release()) {
-}
+ , fVkInfo(vkInfo, layout.release()) {}
#endif
#ifdef SK_METAL
@@ -290,6 +297,7 @@
fIsValid = false;
}
fWidth = that.fWidth;
+ fIsProtected = that.fIsProtected;
fHeight = that.fHeight;
fConfig = that.fConfig;
fMipMapped = that.fMipMapped;
@@ -494,7 +502,7 @@
int sampleCnt,
int stencilBits,
const GrVkImageInfo& vkInfo)
- : GrBackendRenderTarget(width, height, sampleCnt, vkInfo) {
+ : GrBackendRenderTarget(width, height, sampleCnt, GrProtected::kNo, vkInfo) {
// This is a deprecated constructor that takes a bogus stencil bits.
SkASSERT(0 == stencilBits);
}
@@ -504,7 +512,18 @@
int sampleCnt,
const GrVkImageInfo& vkInfo)
#ifdef SK_VULKAN
- : GrBackendRenderTarget(width, height, sampleCnt, vkInfo,
+ : GrBackendRenderTarget(width, height, sampleCnt, false, vkInfo) {}
+#else
+ : fIsValid(false) {}
+#endif
+
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ GrProtected isProtected,
+ const GrVkImageInfo& vkInfo)
+#ifdef SK_VULKAN
+ : GrBackendRenderTarget(width, height, sampleCnt, isProtected, vkInfo,
sk_sp<GrVkImageLayout>(new GrVkImageLayout(vkInfo.fImageLayout))) {}
#else
: fIsValid(false) {}
@@ -514,9 +533,11 @@
GrBackendRenderTarget::GrBackendRenderTarget(int width,
int height,
int sampleCnt,
+ GrProtected isProtected,
const GrVkImageInfo& vkInfo,
sk_sp<GrVkImageLayout> layout)
: fIsValid(true)
+ , fIsProtected(isProtected)
, fWidth(width)
, fHeight(height)
, fSampleCnt(SkTMax(1, sampleCnt))
@@ -596,6 +617,7 @@
}
fWidth = that.fWidth;
fHeight = that.fHeight;
+ fIsProtected = that.fIsProtected;
fSampleCnt = that.fSampleCnt;
fStencilBits = that.fStencilBits;
fConfig = that.fConfig;
diff --git a/src/gpu/GrCaps.h b/src/gpu/GrCaps.h
index 1bc0c56..3235c69 100644
--- a/src/gpu/GrCaps.h
+++ b/src/gpu/GrCaps.h
@@ -198,13 +198,24 @@
*/
bool surfaceSupportsWritePixels(const GrSurface*) const;
+
+ /**
+ * Indicates whether surface supports readPixels or the alternatives.
+ */
+ enum ReadFlags {
+ kSupported_ReadFlag = 0x0,
+ kRequiresCopy_ReadFlag = 0x1,
+ kProtected_ReadFlag = 0x2,
+ };
+
/**
* Backends may have restrictions on what types of surfaces support GrGpu::readPixels().
- * If this returns false then the caller should implement a fallback where a temporary texture
- * is created, the surface is drawn or copied into the temporary, and pixels are read from the
- * temporary.
+ * If this returns kRequiresCopy_ReadFlag then the caller should implement a fallback where a
+ * temporary texture is created, the surface is drawn or copied into the temporary, and
+ * pixels are read from the temporary. If this returns kProtected_ReadFlag, then the caller
+ * should not attempt reading it.
*/
- virtual bool surfaceSupportsReadPixels(const GrSurface*) const = 0;
+ virtual ReadFlags surfaceSupportsReadPixels(const GrSurface*) const = 0;
/**
* Given a dst pixel config and a src color type what color type must the caller coax the
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 925e24e..b7be58a 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -359,16 +359,10 @@
GrBackendFormat format =
context->priv().caps()->getBackendFormatFromColorType(kAlpha_8_SkColorType);
- sk_sp<GrRenderTargetContext> rtc(
- context->priv().makeDeferredRenderTargetContextWithFallback(
- format,
- SkBackingFit::kApprox,
- reducedClip.width(),
- reducedClip.height(),
- kAlpha_8_GrPixelConfig,
- nullptr, 1,
- GrMipMapped::kNo,
- kTopLeft_GrSurfaceOrigin));
+ sk_sp<GrRenderTargetContext> rtc(context->priv().makeDeferredRenderTargetContextWithFallback(
+ format, SkBackingFit::kApprox, reducedClip.width(), reducedClip.height(),
+ kAlpha_8_GrPixelConfig, nullptr, 1, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin, nullptr,
+ SkBudgeted::kYes, proxy->isProtected() ? GrProtected::kYes : GrProtected::kNo));
if (!rtc) {
return nullptr;
}
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 2226247..9cf02d7 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -320,7 +320,8 @@
GrBackendTexture GrContext::createBackendTexture(int width, int height,
const GrBackendFormat& backendFormat,
GrMipMapped mipMapped,
- GrRenderable renderable) {
+ GrRenderable renderable,
+ GrProtected isProtected) {
if (!this->asDirectContext()) {
return GrBackendTexture();
}
@@ -335,13 +336,14 @@
return fGpu->createBackendTexture(width, height, backendFormat,
mipMapped, renderable,
- nullptr, 0, nullptr);
+ nullptr, 0, nullptr, isProtected);
}
GrBackendTexture GrContext::createBackendTexture(int width, int height,
SkColorType colorType,
GrMipMapped mipMapped,
- GrRenderable renderable) {
+ GrRenderable renderable,
+ GrProtected isProtected) {
if (!this->asDirectContext()) {
return GrBackendTexture();
}
@@ -355,7 +357,7 @@
return GrBackendTexture();
}
- return this->createBackendTexture(width, height, format, mipMapped, renderable);
+ return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
}
GrBackendTexture GrContext::createBackendTexture(int width, int height,
@@ -377,7 +379,7 @@
return fGpu->createBackendTexture(width, height, backendFormat,
mipMapped, renderable,
- nullptr, 0, &color);
+ nullptr, 0, &color, GrProtected::kNo);
}
GrBackendTexture GrContext::createBackendTexture(int width, int height,
diff --git a/src/gpu/GrContextPriv.cpp b/src/gpu/GrContextPriv.cpp
index 562ba85..2a23fe7 100644
--- a/src/gpu/GrContextPriv.cpp
+++ b/src/gpu/GrContextPriv.cpp
@@ -69,19 +69,13 @@
}
sk_sp<GrRenderTargetContext> GrContextPriv::makeDeferredRenderTargetContext(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt,
- GrMipMapped mipMapped,
- GrSurfaceOrigin origin,
- const SkSurfaceProps* surfaceProps,
- SkBudgeted budgeted) {
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
return fContext->makeDeferredRenderTargetContext(format, fit, width, height, config,
std::move(colorSpace), sampleCnt, mipMapped,
- origin, surfaceProps, budgeted);
+ origin, surfaceProps, budgeted, isProtected);
}
sk_sp<GrRenderTargetContext> GrContextPriv::makeDeferredRenderTargetContextWithFallback(
@@ -423,5 +417,5 @@
return gpu->createBackendTexture(baseWidth, baseHeight, backendFormat,
GrMipMapped::kNo, // TODO: use real mipmap setting here
renderable, srcData[0].addr(), srcData[0].rowBytes(),
- nullptr);
+ nullptr, GrProtected::kNo);
}
diff --git a/src/gpu/GrContextPriv.h b/src/gpu/GrContextPriv.h
index 6e261e3..7fbf8f8 100644
--- a/src/gpu/GrContextPriv.h
+++ b/src/gpu/GrContextPriv.h
@@ -92,16 +92,11 @@
* renderTargetContexts created via this entry point.
*/
sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContext(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt = 1,
- GrMipMapped = GrMipMapped::kNo,
- GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
- const SkSurfaceProps* surfaceProps = nullptr,
- SkBudgeted = SkBudgeted::kYes);
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo, GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr, SkBudgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
/*
* This method will attempt to create a renderTargetContext that has, at least, the number of
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 025273d..3fb9862 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -388,7 +388,8 @@
virtual GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) = 0;
+ const SkColor4f* color,
+ GrProtected isProtected) = 0;
/**
* Frees a texture created by createBackendTexture(). If ownership of the backend
diff --git a/src/gpu/GrRecordingContext.cpp b/src/gpu/GrRecordingContext.cpp
index bdf5edf..405ec01 100644
--- a/src/gpu/GrRecordingContext.cpp
+++ b/src/gpu/GrRecordingContext.cpp
@@ -190,16 +190,10 @@
}
sk_sp<GrRenderTargetContext> GrRecordingContext::makeDeferredRenderTargetContext(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt,
- GrMipMapped mipMapped,
- GrSurfaceOrigin origin,
- const SkSurfaceProps* surfaceProps,
- SkBudgeted budgeted) {
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
SkASSERT(sampleCnt > 0);
if (this->abandoned()) {
return nullptr;
@@ -209,6 +203,7 @@
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = width;
desc.fHeight = height;
+ desc.fIsProtected = isProtected;
desc.fConfig = config;
desc.fSampleCnt = sampleCnt;
@@ -263,16 +258,10 @@
}
sk_sp<GrRenderTargetContext> GrRecordingContext::makeDeferredRenderTargetContextWithFallback(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt,
- GrMipMapped mipMapped,
- GrSurfaceOrigin origin,
- const SkSurfaceProps* surfaceProps,
- SkBudgeted budgeted) {
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
GrBackendFormat localFormat = format;
SkASSERT(sampleCnt > 0);
if (0 == this->caps()->getRenderTargetSampleCount(sampleCnt, config)) {
@@ -331,36 +320,24 @@
}
sk_sp<GrRenderTargetContext> GrRecordingContextPriv::makeDeferredRenderTargetContext(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt,
- GrMipMapped mipMapped,
- GrSurfaceOrigin origin,
- const SkSurfaceProps* surfaceProps,
- SkBudgeted budgeted) {
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
return fContext->makeDeferredRenderTargetContext(format, fit, width, height, config,
std::move(colorSpace), sampleCnt, mipMapped,
- origin, surfaceProps, budgeted);
+ origin, surfaceProps, budgeted, isProtected);
}
sk_sp<GrRenderTargetContext> GrRecordingContextPriv::makeDeferredRenderTargetContextWithFallback(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt,
- GrMipMapped mipMapped,
- GrSurfaceOrigin origin,
- const SkSurfaceProps* surfaceProps,
- SkBudgeted budgeted) {
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
return fContext->makeDeferredRenderTargetContextWithFallback(format, fit, width, height, config,
std::move(colorSpace), sampleCnt,
mipMapped, origin, surfaceProps,
- budgeted);
+ budgeted, isProtected);
}
GrContext* GrRecordingContextPriv::backdoor() {
diff --git a/src/gpu/GrRecordingContextPriv.h b/src/gpu/GrRecordingContextPriv.h
index 0b9a3f9..ff4012e 100644
--- a/src/gpu/GrRecordingContextPriv.h
+++ b/src/gpu/GrRecordingContextPriv.h
@@ -76,16 +76,11 @@
* renderTargetContexts created via this entry point.
*/
sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContext(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt = 1,
- GrMipMapped = GrMipMapped::kNo,
- GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
- const SkSurfaceProps* surfaceProps = nullptr,
- SkBudgeted = SkBudgeted::kYes);
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo, GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr, SkBudgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
/*
* This method will attempt to create a renderTargetContext that has, at least, the number of
@@ -94,16 +89,11 @@
* SRGB-ness will be preserved.
*/
sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback(
- const GrBackendFormat& format,
- SkBackingFit fit,
- int width, int height,
- GrPixelConfig config,
- sk_sp<SkColorSpace> colorSpace,
- int sampleCnt = 1,
- GrMipMapped = GrMipMapped::kNo,
- GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
- const SkSurfaceProps* surfaceProps = nullptr,
- SkBudgeted budgeted = SkBudgeted::kYes);
+ const GrBackendFormat& format, SkBackingFit fit, int width, int height,
+ GrPixelConfig config, sk_sp<SkColorSpace> colorSpace, int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo, GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr, SkBudgeted budgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
GrAuditTrail* auditTrail() { return fContext->auditTrail(); }
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index d67db2a..3c3af4a 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -2701,6 +2701,8 @@
desc.fConfig = rtProxy->config();
}
+ desc.fIsProtected = rtProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+
if (!disallowSubrect) {
copyRect = clippedRect;
}
diff --git a/src/gpu/GrSurfaceContext.cpp b/src/gpu/GrSurfaceContext.cpp
index 9a8abb3..49e050a 100644
--- a/src/gpu/GrSurfaceContext.cpp
+++ b/src/gpu/GrSurfaceContext.cpp
@@ -178,8 +178,12 @@
caps->isConfigRenderable(kRGBA_8888_GrPixelConfig) &&
direct->priv().validPMUPMConversionExists();
- if (!caps->surfaceSupportsReadPixels(srcSurface) ||
- canvas2DFastPath) {
+ auto readFlag = caps->surfaceSupportsReadPixels(srcSurface);
+ if (readFlag == GrCaps::kProtected_ReadFlag) {
+ return false;
+ }
+
+ if (readFlag == GrCaps::kRequiresCopy_ReadFlag || canvas2DFastPath) {
GrBackendFormat format;
GrPixelConfig config;
if (canvas2DFastPath) {
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index 0370263..776ddc4 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -69,6 +69,7 @@
, fLazyInstantiateCallback(std::move(callback))
, fLazyInstantiationType(lazyType)
, fNeedsClear(SkToBool(desc.fFlags & kPerformInitialClear_GrSurfaceFlag))
+ , fIsProtected(desc.fIsProtected)
, fGpuMemorySize(kInvalidGpuMemorySize)
, fLastOpList(nullptr) {
SkASSERT(fFormat.isValid());
@@ -102,6 +103,7 @@
: SkBudgeted::kNo)
, fUniqueID(fTarget->uniqueID()) // Note: converting from unique resource ID to a proxy ID!
, fNeedsClear(false)
+ , fIsProtected(fTarget->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, fGpuMemorySize(kInvalidGpuMemorySize)
, fLastOpList(nullptr) {
SkASSERT(fFormat.isValid());
@@ -143,6 +145,7 @@
}
desc.fWidth = fWidth;
desc.fHeight = fHeight;
+ desc.fIsProtected = fIsProtected;
desc.fConfig = fConfig;
desc.fSampleCnt = sampleCnt;
@@ -342,6 +345,7 @@
RectsMustMatch rectsMustMatch) {
SkASSERT(LazyState::kFully != src->lazyInstantiationState());
GrSurfaceDesc dstDesc;
+ dstDesc.fIsProtected = src->isProtected() ? GrProtected::kYes : GrProtected::kNo;
dstDesc.fConfig = src->config();
SkIPoint dstPoint;
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index 0b289e3..02cbe24 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -320,6 +320,9 @@
inline GrSurfaceProxyPriv priv();
inline const GrSurfaceProxyPriv priv() const;
+ // Returns true if we are working with protected content.
+ bool isProtected() const { return fIsProtected == GrProtected::kYes; }
+
protected:
// Deferred version
GrSurfaceProxy(const GrBackendFormat& format, const GrSurfaceDesc& desc,
@@ -420,6 +423,7 @@
bool fNeedsClear;
bool fIgnoredByResourceAllocator = false;
+ GrProtected fIsProtected;
// This entry is lazily evaluated so, when the proxy wraps a resource, the resource
// will be called but, when the proxy is deferred, it will compute the answer itself.
diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp
index b8e82b5..50333f3 100644
--- a/src/gpu/SkGpuDevice.cpp
+++ b/src/gpu/SkGpuDevice.cpp
@@ -1693,7 +1693,9 @@
format, fit, cinfo.fInfo.width(), cinfo.fInfo.height(), config,
fRenderTargetContext->colorSpaceInfo().refColorSpace(),
fRenderTargetContext->numSamples(), GrMipMapped::kNo,
- kBottomLeft_GrSurfaceOrigin, &props));
+ kBottomLeft_GrSurfaceOrigin, &props, SkBudgeted::kYes,
+ fRenderTargetContext->asSurfaceProxy()->isProtected() ? GrProtected::kYes
+ : GrProtected::kNo));
if (!rtc) {
return nullptr;
}
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index eb8f061..8405436 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -3065,15 +3065,15 @@
return true;
}
-bool GrGLCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
+GrCaps::ReadFlags GrGLCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
if (auto tex = static_cast<const GrGLTexture*>(surface->asTexture())) {
// We don't support reading pixels directly from EXTERNAL textures as it would require
// binding the texture to a FBO.
if (tex->target() == GR_GL_TEXTURE_EXTERNAL) {
- return false;
+ return kRequiresCopy_ReadFlag;
}
}
- return true;
+ return kSupported_ReadFlag;
}
GrCaps::SupportedRead GrGLCaps::supportedReadPixelsColorType(GrPixelConfig srcPixelConfig,
diff --git a/src/gpu/gl/GrGLCaps.h b/src/gpu/gl/GrGLCaps.h
index ef883f2..cc7cb35 100644
--- a/src/gpu/gl/GrGLCaps.h
+++ b/src/gpu/gl/GrGLCaps.h
@@ -301,7 +301,7 @@
/// Use indices or vertices in CPU arrays rather than VBOs for dynamic content.
bool useNonVBOVertexAndIndexDynamicData() const { return fUseNonVBOVertexAndIndexDynamicData; }
- bool surfaceSupportsReadPixels(const GrSurface*) const override;
+ ReadFlags surfaceSupportsReadPixels(const GrSurface*) const override;
SupportedRead supportedReadPixelsColorType(GrPixelConfig, const GrBackendFormat&,
GrColorType) const override;
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 52e59b7..1efa4e4 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -4005,7 +4005,8 @@
GrMipMapped mipMapped,
GrRenderable renderable,
const void* srcPixels, size_t rowBytes,
- const SkColor4f* color) {
+ const SkColor4f* color,
+ GrProtected isProtected) {
this->handleDirtyContext();
const GrGLenum* glFormat = format.getGLFormat();
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 9b25de0..47e75e6 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -139,7 +139,8 @@
GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) override;
+ const SkColor4f* color,
+ GrProtected isProtected) override;
void deleteBackendTexture(const GrBackendTexture&) override;
#if GR_TEST_UTILS
diff --git a/src/gpu/mock/GrMockCaps.h b/src/gpu/mock/GrMockCaps.h
index c3806d1..41fff59 100644
--- a/src/gpu/mock/GrMockCaps.h
+++ b/src/gpu/mock/GrMockCaps.h
@@ -68,7 +68,9 @@
return 0;
}
- bool surfaceSupportsReadPixels(const GrSurface*) const override { return true; }
+ ReadFlags surfaceSupportsReadPixels(const GrSurface*) const override {
+ return kSupported_ReadFlag;
+ }
bool initDescForDstCopy(const GrRenderTargetProxy* src, GrSurfaceDesc* desc,
bool* rectsMustMatch, bool* disallowSubrect) const override {
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index b873269..f0355e5 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -202,7 +202,8 @@
GrRenderable /* renderable */,
const void* /* pixels */,
size_t /* rowBytes */,
- const SkColor4f* /* color */) {
+ const SkColor4f* /* color */,
+ GrProtected /* isProtected */) {
const GrPixelConfig* pixelConfig = format.getMockFormat();
if (!pixelConfig) {
diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h
index e63e51d..b95f90b 100644
--- a/src/gpu/mock/GrMockGpu.h
+++ b/src/gpu/mock/GrMockGpu.h
@@ -123,7 +123,7 @@
GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) override;
+ const SkColor4f* color, GrProtected isProtected) override;
void deleteBackendTexture(const GrBackendTexture&) override;
#if GR_TEST_UTILS
diff --git a/src/gpu/mtl/GrMtlCaps.h b/src/gpu/mtl/GrMtlCaps.h
index 2ee1d88..091fc7f 100644
--- a/src/gpu/mtl/GrMtlCaps.h
+++ b/src/gpu/mtl/GrMtlCaps.h
@@ -33,7 +33,9 @@
int getRenderTargetSampleCount(int requestedCount, GrPixelConfig) const override;
int maxRenderTargetSampleCount(GrPixelConfig) const override;
- bool surfaceSupportsReadPixels(const GrSurface*) const override { return true; }
+ ReadFlags surfaceSupportsReadPixels(const GrSurface*) const override {
+ return kSupported_ReadFlag;
+ }
bool isConfigCopyable(GrPixelConfig config) const override {
return true;
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index 48c0247..6d5979a 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -58,7 +58,8 @@
GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) override;
+ const SkColor4f* color,
+ GrProtected isProtected) override;
void deleteBackendTexture(const GrBackendTexture&) override;
diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm
index 27c2b7d..b431acc 100644
--- a/src/gpu/mtl/GrMtlGpu.mm
+++ b/src/gpu/mtl/GrMtlGpu.mm
@@ -781,7 +781,7 @@
GrMipMapped mipMapped,
GrRenderable renderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) {
+ const SkColor4f* color, GrProtected isProtected) {
if (w > this->caps()->maxTextureSize() || h > this->caps()->maxTextureSize()) {
return GrBackendTexture();
}
diff --git a/src/gpu/vk/GrVkAMDMemoryAllocator.cpp b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
index 7e3c0c6..0fe6526 100644
--- a/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
+++ b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
@@ -81,6 +81,10 @@
info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
}
+ if (AllocationPropertyFlags::kProtected & flags) {
+ info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
+ }
+
VmaAllocation allocation;
VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
if (VK_SUCCESS != result) {
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
index 4b84080..833dfdb5 100644
--- a/src/gpu/vk/GrVkBuffer.cpp
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -20,6 +20,7 @@
#endif
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
VkBuffer buffer;
GrVkAlloc alloc;
@@ -186,6 +187,8 @@
void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
SkASSERT(src);
+ // We should never call this method in protected contexts.
+ SkASSERT(!gpu->protectedContext());
// The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
// to 65536 bytes and a size the is 4 byte aligned.
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
index 6ef4b5d..dfd6ef0 100644
--- a/src/gpu/vk/GrVkCaps.cpp
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -17,12 +17,15 @@
#include "src/gpu/vk/GrVkTexture.h"
#include "src/gpu/vk/GrVkUtil.h"
+#ifdef SK_BUILD_FOR_ANDROID
+#include <sys/system_properties.h>
+#endif
+
GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
uint32_t instanceVersion, uint32_t physicalDeviceVersion,
- const GrVkExtensions& extensions)
- : INHERITED(contextOptions) {
-
+ const GrVkExtensions& extensions, GrProtected isProtected)
+ : INHERITED(contextOptions) {
/**************************************************************************
* GrCaps fields
**************************************************************************/
@@ -50,7 +53,8 @@
fShaderCaps.reset(new GrShaderCaps(contextOptions));
- this->init(contextOptions, vkInterface, physDev, features, physicalDeviceVersion, extensions);
+ this->init(contextOptions, vkInterface, physDev, features, physicalDeviceVersion, extensions,
+ isProtected);
}
bool GrVkCaps::initDescForDstCopy(const GrRenderTargetProxy* src, GrSurfaceDesc* desc,
@@ -194,6 +198,10 @@
bool GrVkCaps::onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ if (src->isProtected() && !dst->isProtected()) {
+ return false;
+ }
+
GrPixelConfig dstConfig = dst->config();
GrPixelConfig srcConfig = src->config();
@@ -272,7 +280,8 @@
void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
- uint32_t physicalDeviceVersion, const GrVkExtensions& extensions) {
+ uint32_t physicalDeviceVersion, const GrVkExtensions& extensions,
+ GrProtected isProtected) {
VkPhysicalDeviceProperties properties;
GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
@@ -360,6 +369,13 @@
// will return a key of 0.
fYcbcrInfos.push_back(GrVkYcbcrConversionInfo());
+ if ((isProtected == GrProtected::kYes) &&
+ (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0))) {
+ fSupportsProtectedMemory = true;
+ fAvoidUpdateBuffers = true;
+ fShouldAlwaysUseDedicatedImageMemory = true;
+ }
+
this->initGrCaps(vkInterface, physDev, properties, memoryProperties, features, extensions);
this->initShaderCaps(properties, features);
@@ -427,6 +443,17 @@
}
#endif
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Protected memory features have problems in Android P and earlier.
+ if (fSupportsProtectedMemory && (kQualcomm_VkVendor == properties.vendorID)) {
+ char androidAPIVersion[PROP_VALUE_MAX];
+ int strLength = __system_property_get("ro.build.version.sdk", androidAPIVersion);
+ if (strLength == 0 || atoi(androidAPIVersion) <= 28) {
+ fSupportsProtectedMemory = false;
+ }
+ }
+#endif
+
// AMD seems to have issues binding new VkPipelines inside a secondary command buffer.
// Current workaround is to use a different secondary command buffer for each new VkPipeline.
if (kAMD_VkVendor == properties.vendorID) {
@@ -861,14 +888,17 @@
return table[table.count() - 1];
}
-bool GrVkCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
+GrCaps::ReadFlags GrVkCaps::surfaceSupportsReadPixels(const GrSurface* surface) const {
+ if (surface->isProtected()) {
+ return kProtected_ReadFlag;
+ }
if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
// We can't directly read from a VkImage that has a ycbcr sampler.
if (tex->ycbcrConversionInfo().isValid()) {
- return false;
+ return kRequiresCopy_ReadFlag;
}
}
- return true;
+ return kSupported_ReadFlag;
}
bool GrVkCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
diff --git a/src/gpu/vk/GrVkCaps.h b/src/gpu/vk/GrVkCaps.h
index b7c7a00..204c3d0 100644
--- a/src/gpu/vk/GrVkCaps.h
+++ b/src/gpu/vk/GrVkCaps.h
@@ -30,7 +30,7 @@
GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice device, const VkPhysicalDeviceFeatures2& features,
uint32_t instanceVersion, uint32_t physicalDeviceVersion,
- const GrVkExtensions& extensions);
+ const GrVkExtensions& extensions, GrProtected isProtected = GrProtected::kNo);
bool isFormatTexturable(VkFormat) const;
bool isConfigTexturable(GrPixelConfig config) const override;
@@ -46,7 +46,7 @@
int maxRenderTargetSampleCount(GrPixelConfig config) const override;
int maxRenderTargetSampleCount(VkFormat format) const;
- bool surfaceSupportsReadPixels(const GrSurface*) const override;
+ ReadFlags surfaceSupportsReadPixels(const GrSurface*) const override;
bool isFormatTexturableLinearly(VkFormat format) const {
return SkToBool(FormatInfo::kTextureable_Flag & this->getFormatInfo(format).fLinearFlags);
@@ -134,6 +134,9 @@
// Returns true if it supports ycbcr conversion for samplers
bool supportsYcbcrConversion() const { return fSupportsYcbcrConversion; }
+ // Returns true if the device supports protected memory.
+ bool supportsProtectedMemory() const { return fSupportsProtectedMemory; }
+
/**
* Helpers used by canCopySurface. In all cases if the SampleCnt parameter is zero that means
* the surface is not a render target, otherwise it is the number of samples in the render
@@ -176,7 +179,7 @@
void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
VkPhysicalDevice device, const VkPhysicalDeviceFeatures2&,
- uint32_t physicalDeviceVersion, const GrVkExtensions&);
+ uint32_t physicalDeviceVersion, const GrVkExtensions&, GrProtected isProtected);
void initGrCaps(const GrVkInterface* vkInterface,
VkPhysicalDevice physDev,
const VkPhysicalDeviceProperties&,
@@ -249,6 +252,8 @@
bool fSupportsYcbcrConversion = false;
+ bool fSupportsProtectedMemory = false;
+
typedef GrCaps INHERITED;
};
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp
index 0dfdade..e8bba1d 100644
--- a/src/gpu/vk/GrVkCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -541,11 +541,20 @@
uint32_t commandBufferCount,
const VkCommandBuffer* commandBuffers,
uint32_t signalCount,
- const VkSemaphore* signalSemaphores) {
+ const VkSemaphore* signalSemaphores,
+ GrProtected protectedContext) {
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (protectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
+ submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
submitInfo.waitSemaphoreCount = waitCount;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
@@ -582,8 +591,9 @@
if (0 == signalCount && 0 == waitCount) {
// This command buffer has no dependent semaphores so we can simply just submit it to the
// queue with no worries.
- submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr,
- 1, &fCmdBuffer, 0, nullptr);
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr, 1,
+ &fCmdBuffer, 0, nullptr,
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
} else {
SkTArray<VkSemaphore> vkSignalSems(signalCount);
for (int i = 0; i < signalCount; ++i) {
@@ -602,11 +612,10 @@
vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
}
}
- submit_to_queue(gpu->vkInterface(), queue, fSubmitFence,
- vkWaitSems.count(), vkWaitSems.begin(), vkWaitStages.begin(),
- 1, &fCmdBuffer,
- vkSignalSems.count(), vkSignalSems.begin());
-
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, vkWaitSems.count(),
+ vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
+ vkSignalSems.count(), vkSignalSems.begin(),
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
for (int i = 0; i < signalCount; ++i) {
signalSemaphores[i]->markAsSignaled();
}
diff --git a/src/gpu/vk/GrVkCommandPool.cpp b/src/gpu/vk/GrVkCommandPool.cpp
index 22d0fbf..aae9355 100644
--- a/src/gpu/vk/GrVkCommandPool.cpp
+++ b/src/gpu/vk/GrVkCommandPool.cpp
@@ -12,17 +12,24 @@
#include "src/gpu/vk/GrVkGpu.h"
GrVkCommandPool* GrVkCommandPool::Create(const GrVkGpu* gpu) {
- const VkCommandPoolCreateInfo cmdPoolInfo = {
- VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
- nullptr, // pNext
- VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
- VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags
- gpu->queueIndex(), // queueFamilyIndex
- };
- VkCommandPool pool;
- GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateCommandPool(gpu->device(), &cmdPoolInfo,
- nullptr, &pool));
- return new GrVkCommandPool(gpu, pool);
+ VkCommandPoolCreateFlags cmdPoolCreateFlags =
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ if (gpu->protectedContext()) {
+ cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
+ }
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ cmdPoolCreateFlags, // CmdPoolCreateFlags
+ gpu->queueIndex(), // queueFamilyIndex
+ };
+ VkCommandPool pool;
+ GR_VK_CALL_ERRCHECK(
+ gpu->vkInterface(),
+ CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
+ return new GrVkCommandPool(gpu, pool);
}
GrVkCommandPool::GrVkCommandPool(const GrVkGpu* gpu, VkCommandPool commandPool)
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 375db6b..1e579c6 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -141,8 +141,13 @@
}
}
- return sk_sp<GrGpu>(new GrVkGpu(context, options, backendContext, interface, instanceVersion,
- physDevVersion));
+ sk_sp<GrVkGpu> vkGpu(new GrVkGpu(context, options, backendContext, interface,
+ instanceVersion, physDevVersion));
+ if (backendContext.fProtectedContext == GrProtected::kYes &&
+ !vkGpu->vkCaps().supportsProtectedMemory()) {
+ return nullptr;
+ }
+ return std::move(vkGpu);
}
////////////////////////////////////////////////////////////////////////////////
@@ -159,7 +164,8 @@
, fQueue(backendContext.fQueue)
, fQueueIndex(backendContext.fGraphicsQueueIndex)
, fResourceProvider(this)
- , fDisconnected(false) {
+ , fDisconnected(false)
+ , fProtectedContext(backendContext.fProtectedContext) {
SkASSERT(!backendContext.fOwnsInstanceAndDevice);
if (!fMemoryAllocator) {
@@ -174,14 +180,14 @@
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
*backendContext.fDeviceFeatures2, instanceVersion,
physicalDeviceVersion,
- *backendContext.fVkExtensions));
+ *backendContext.fVkExtensions, fProtectedContext));
} else if (backendContext.fDeviceFeatures) {
VkPhysicalDeviceFeatures2 features2;
features2.pNext = nullptr;
features2.features = *backendContext.fDeviceFeatures;
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
features2, instanceVersion, physicalDeviceVersion,
- *backendContext.fVkExtensions));
+ *backendContext.fVkExtensions, fProtectedContext));
} else {
VkPhysicalDeviceFeatures2 features;
memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
@@ -205,7 +211,8 @@
backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
}
fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
- features, instanceVersion, physicalDeviceVersion, extensions));
+ features, instanceVersion, physicalDeviceVersion, extensions,
+ fProtectedContext));
}
fCaps.reset(SkRef(fVkCaps.get()));
@@ -517,6 +524,9 @@
size_t offset) {
SkASSERT(surface);
SkASSERT(transferBuffer);
+ if (fProtectedContext == GrProtected::kYes) {
+ return false;
+ }
GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
@@ -1042,7 +1052,7 @@
imageDesc.fSamples = 1;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
- imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ imageDesc.fIsProtected = desc.fIsProtected;
GrMipMapsStatus mipMapsStatus = GrMipMapsStatus::kNotAllocated;
if (mipLevels > 1) {
@@ -1184,12 +1194,17 @@
return nullptr;
}
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc surfDesc;
surfDesc.fFlags = kNone_GrSurfaceFlags;
surfDesc.fWidth = backendTex.width();
surfDesc.fHeight = backendTex.height();
surfDesc.fConfig = backendTex.config();
surfDesc.fSampleCnt = 1;
+ surfDesc.fIsProtected = backendTex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
SkASSERT(layout);
@@ -1216,10 +1231,15 @@
return nullptr;
}
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc surfDesc;
surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
surfDesc.fWidth = backendTex.width();
surfDesc.fHeight = backendTex.height();
+ surfDesc.fIsProtected = backendTex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
surfDesc.fConfig = backendTex.config();
surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config());
@@ -1251,11 +1271,15 @@
return nullptr;
}
+ if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = backendRT.width();
desc.fHeight = backendRT.height();
+ desc.fIsProtected = backendRT.isProtected() ? GrProtected::kYes : GrProtected::kNo;
desc.fConfig = backendRT.config();
desc.fSampleCnt = 1;
@@ -1287,10 +1311,15 @@
return nullptr;
}
+ if (tex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = tex.width();
desc.fHeight = tex.height();
+ desc.fIsProtected = tex.isProtected() ? GrProtected::kYes : GrProtected::kNo;
desc.fConfig = tex.config();
desc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config());
if (!desc.fSampleCnt) {
@@ -1530,12 +1559,18 @@
bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable,
bool renderable, GrMipMapped mipMapped, const void* srcData,
size_t srcRowBytes, const SkColor4f* color,
- GrVkImageInfo* info) {
+ GrVkImageInfo* info, GrProtected isProtected) {
SkASSERT(texturable || renderable);
if (!texturable) {
SkASSERT(GrMipMapped::kNo == mipMapped);
SkASSERT(!srcData);
}
+
+ if (fProtectedContext != isProtected) {
+ SkDebugf("Can only create protected image in protected context\n");
+ return false;
+ }
+
VkFormat vkFormat;
if (!GrPixelConfigToVkFormat(config, &vkFormat)) {
return false;
@@ -1580,8 +1615,10 @@
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ imageDesc.fIsProtected = fProtectedContext;
if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
+ SkDebugf("Failed to init image info\n");
return false;
}
@@ -1633,7 +1670,7 @@
VkBufferCreateInfo bufInfo;
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- bufInfo.flags = 0;
+ bufInfo.flags = fProtectedContext == GrProtected::kYes ? VK_BUFFER_CREATE_PROTECTED_BIT : 0;
bufInfo.size = combinedBufferSize;
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
@@ -1725,10 +1762,18 @@
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
SkASSERT(!err);
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (fProtectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
+ submitInfo.pNext = fProtectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
submitInfo.waitSemaphoreCount = 0;
submitInfo.pWaitSemaphores = nullptr;
submitInfo.pWaitDstStageMask = 0;
@@ -1847,33 +1892,42 @@
GrMipMapped mipMapped,
GrRenderable renderable,
const void* srcData, size_t rowBytes,
- const SkColor4f* color) {
+ const SkColor4f* color, GrProtected isProtected) {
this->handleDirtyContext();
+ if (fProtectedContext != isProtected) {
+ SkDebugf("Can only create protected image in protected context\n");
+ return GrBackendTexture();
+ }
+
if (w > this->caps()->maxTextureSize() || h > this->caps()->maxTextureSize()) {
return GrBackendTexture();
}
const VkFormat* vkFormat = format.getVkFormat();
if (!vkFormat) {
+ SkDebugf("Could net get vkformat\n");
return GrBackendTexture();
}
GrPixelConfig config;
if (!vk_format_to_pixel_config(*vkFormat, &config)) {
+ SkDebugf("Could net get vkformat\n");
return GrBackendTexture();
}
if (!this->caps()->isConfigTexturable(config)) {
+ SkDebugf("Config is not texturable\n");
return GrBackendTexture();
}
GrVkImageInfo info;
if (!this->createTestingOnlyVkImage(config, w, h, true, GrRenderable::kYes == renderable,
- mipMapped, srcData, rowBytes, color, &info)) {
- return {};
+ mipMapped, srcData, rowBytes, color, &info, isProtected)) {
+ SkDebugf("Failed to create testing only image\n");
+ return GrBackendTexture();
}
- GrBackendTexture beTex = GrBackendTexture(w, h, info);
+ GrBackendTexture beTex = GrBackendTexture(w, h, isProtected, info);
#if GR_TEST_UTILS
// Lots of tests don't go through Skia's public interface which will set the config so for
// testing we make sure we set a config here.
@@ -1928,7 +1982,7 @@
GrVkImageInfo info;
if (!this->createTestingOnlyVkImage(config, w, h, false, true, GrMipMapped::kNo, nullptr, 0,
- &SkColors::kTransparent, &info)) {
+ &SkColors::kTransparent, &info, GrProtected::kNo)) {
return {};
}
GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, 0, info);
@@ -2084,8 +2138,11 @@
bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
SkASSERT(this->vkCaps().canCopyImage(dst->config(), dstSampleCnt, dstHasYcbcr,
src->config(), srcSampleCnt, srcHasYcbcr));
-
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
// These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
// the cache is flushed since it is only being written to.
@@ -2136,6 +2193,11 @@
srcImage->isLinearTiled(), srcHasYcbcr));
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
+
dstImage->setImageLayout(this,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
@@ -2174,6 +2236,10 @@
void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
this->resolveImage(dst, srcRT, srcRect, dstPoint);
SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
@@ -2192,6 +2258,10 @@
SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
}
#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return false;
+ }
GrPixelConfig dstConfig = dst->config();
GrPixelConfig srcConfig = src->config();
@@ -2248,6 +2318,10 @@
bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
GrColorType dstColorType, void* buffer, size_t rowBytes) {
+ if (surface->isProtected()) {
+ return false;
+ }
+
if (GrPixelConfigToColorType(surface->config()) != dstColorType) {
return false;
}
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 70a1ce5..5600eac 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -62,6 +62,7 @@
const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
return fPhysDevMemProps;
}
+ bool protectedContext() const { return fProtectedContext == GrProtected::kYes; }
GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
@@ -82,7 +83,8 @@
GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
GrMipMapped, GrRenderable,
const void* pixels, size_t rowBytes,
- const SkColor4f* color) override;
+ const SkColor4f* color,
+ GrProtected isProtected) override;
void deleteBackendTexture(const GrBackendTexture&) override;
#if GR_TEST_UTILS
bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
@@ -261,7 +263,8 @@
bool createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable,
bool renderable, GrMipMapped mipMapped, const void* srcData,
- size_t srcRowBytes, const SkColor4f* color, GrVkImageInfo* info);
+ size_t srcRowBytes, const SkColor4f* color, GrVkImageInfo* info,
+ GrProtected isProtected);
sk_sp<const GrVkInterface> fInterface;
sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
@@ -297,6 +300,8 @@
// vulkan context.
bool fDisconnected;
+ GrProtected fProtectedContext;
+
std::unique_ptr<GrVkGpuRTCommandBuffer> fCachedRTCommandBuffer;
std::unique_ptr<GrVkGpuTextureCommandBuffer> fCachedTexCommandBuffer;
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 422c95a..d893d2e 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -92,6 +92,7 @@
void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
+ SkASSERT(!src->isProtected() || (fTexture->isProtected() && fGpu->protectedContext()));
fTasks.emplace<Copy>(src, srcRect, dstPoint, false);
}
@@ -629,6 +630,7 @@
const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+ SkASSERT(!src->isProtected() || (fRenderTarget->isProtected() && fGpu->protectedContext()));
const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
vkRT->compatibleRenderPassHandle();
if (rpHandle.isValid()) {
@@ -795,6 +797,9 @@
}
}
cbInfo.fSampledTextures.push_back(vkTexture);
+
+ SkASSERT(!texture->isProtected() ||
+ (fRenderTarget->isProtected() && fGpu->protectedContext()));
};
if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index f66c70c..059f191 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -157,6 +157,9 @@
if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
return false;
}
+ if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
+ return false;
+ }
VkImage image = VK_NULL_HANDLE;
GrVkAlloc alloc;
@@ -173,10 +176,11 @@
SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
VK_SAMPLE_COUNT_1_BIT == vkSamples);
+ VkImageCreateFlags createflags = gpu->protectedContext() ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
const VkImageCreateInfo imageCreateInfo = {
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
nullptr, // pNext
- 0, // VkImageCreateFlags
+ createflags, // VkImageCreateFlags
imageDesc.fImageType, // VkImageType
imageDesc.fFormat, // VkFormat
{ imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index ec59be6..10083a8 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -121,17 +121,19 @@
VkImageTiling fImageTiling;
VkImageUsageFlags fUsageFlags;
VkFlags fMemProps;
+ GrProtected fIsProtected;
ImageDesc()
- : fImageType(VK_IMAGE_TYPE_2D)
- , fFormat(VK_FORMAT_UNDEFINED)
- , fWidth(0)
- , fHeight(0)
- , fLevels(1)
- , fSamples(1)
- , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
- , fUsageFlags(0)
- , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {}
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ , fIsProtected(GrProtected::kNo) {}
};
static bool InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo*);
diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp
index a440d4a..c8dfd94 100644
--- a/src/gpu/vk/GrVkIndexBuffer.cpp
+++ b/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -18,7 +18,7 @@
sk_sp<GrVkIndexBuffer> GrVkIndexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
GrVkBuffer::Desc desc;
- desc.fDynamic = dynamic;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
desc.fType = GrVkBuffer::kIndex_Type;
desc.fSizeInBytes = size;
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index 9e49721..6ffe08a 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -98,7 +98,10 @@
GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
AllocationPropertyFlags propFlags;
- if (memReqs.size > kMaxSmallImageSize || gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
+ if (gpu->protectedContext()) {
+ propFlags = AllocationPropertyFlags::kProtected;
+ } else if (memReqs.size > kMaxSmallImageSize ||
+ gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
propFlags = AllocationPropertyFlags::kDedicatedAllocation;
} else {
propFlags = AllocationPropertyFlags::kNone;
diff --git a/src/gpu/vk/GrVkRenderTarget.cpp b/src/gpu/vk/GrVkRenderTarget.cpp
index 786f502..d3acfc1 100644
--- a/src/gpu/vk/GrVkRenderTarget.cpp
+++ b/src/gpu/vk/GrVkRenderTarget.cpp
@@ -156,6 +156,7 @@
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ msImageDesc.fIsProtected = desc.fIsProtected;
if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &msInfo)) {
return nullptr;
@@ -377,7 +378,8 @@
GrBackendRenderTarget GrVkRenderTarget::getBackendRenderTarget() const {
SkASSERT(!this->wrapsSecondaryCommandBuffer());
return GrBackendRenderTarget(this->width(), this->height(), this->numSamples(),
- fInfo, this->grVkImageLayout());
+ this->isProtected() ? GrProtected::kYes : GrProtected::kNo, fInfo,
+ this->grVkImageLayout());
}
const GrVkResource* GrVkRenderTarget::stencilImageResource() const {
diff --git a/src/gpu/vk/GrVkTexture.cpp b/src/gpu/vk/GrVkTexture.cpp
index bd443e4..eb0637f 100644
--- a/src/gpu/vk/GrVkTexture.cpp
+++ b/src/gpu/vk/GrVkTexture.cpp
@@ -157,7 +157,9 @@
}
GrBackendTexture GrVkTexture::getBackendTexture() const {
- return GrBackendTexture(this->width(), this->height(), fInfo, this->grVkImageLayout());
+ return GrBackendTexture(this->width(), this->height(),
+ this->isProtected() ? GrProtected::kYes : GrProtected::kNo, fInfo,
+ this->grVkImageLayout());
}
GrVkGpu* GrVkTexture::getVkGpu() const {
diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp
index 5be5463..50cfc34 100644
--- a/src/gpu/vk/GrVkVertexBuffer.cpp
+++ b/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -18,7 +18,7 @@
sk_sp<GrVkVertexBuffer> GrVkVertexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
GrVkBuffer::Desc desc;
- desc.fDynamic = dynamic;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
desc.fType = GrVkBuffer::kVertex_Type;
desc.fSizeInBytes = size;