Two (related) changes here:
1) Our older iOS devices failed our sRGB tests, due to precision issues
with alpha. At this point, we only test on iPadMini 4, and that appears
not to have any problems.
2) iOS devices still don't have the sRGB texture decode extension. But,
some clients have no interest in mixing legacy/color-correct rendering,
and would like to use sRGB on these devices. This GrContextOptions flag
enables sRGB support in those cases.
Adjust the test code to produce sRGB capable contexts on these devices,
but only for configs that have a color space. (See comment).
BUG=skia:4148
Committed: https://skia.googlesource.com/skia/+/9db12d2341f3f8722c8b90b11dd4cce138a8a64e
Committed: https://skia.googlesource.com/skia/+/1aeb78c5d978b35b256525b711edd942bce01444
Review-Url: https://codereview.chromium.org/2539993002
diff --git a/include/gpu/GrContextOptions.h b/include/gpu/GrContextOptions.h
index 6a1b51b..65daf72 100644
--- a/include/gpu/GrContextOptions.h
+++ b/include/gpu/GrContextOptions.h
@@ -77,6 +77,14 @@
* purposes.
*/
bool fForceSWPathMasks = false;
+
+ /**
+ * If true, sRGB support will not be enabled unless sRGB decoding can be disabled (via an
+ * extension). If mixed use of "legacy" mode and sRGB/color-correct mode is not required, this
+ * can be set to false, which will significantly expand the number of devices that qualify for
+ * sRGB support.
+ */
+ bool fRequireDecodeDisableForSRGB = true;
};
#endif
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index ec0f628..168acde 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -51,6 +51,7 @@
fMipMapLevelAndLodControlSupport = false;
fRGBAToBGRAReadbackConversionsAreSlow = false;
fDoManualMipmapping = false;
+ fSRGBDecodeDisableSupport = false;
fBlitFramebufferFlags = kNoSupport_BlitFramebufferFlag;
@@ -607,9 +608,11 @@
fDoManualMipmapping = true;
}
+ fSRGBDecodeDisableSupport = ctxInfo.hasExtension("GL_EXT_texture_sRGB_decode");
+
// Requires fTextureRedSupport, fTextureSwizzleSupport, msaa support, ES compatibility have
// already been detected.
- this->initConfigTable(ctxInfo, gli, shaderCaps);
+ this->initConfigTable(contextOptions, ctxInfo, gli, shaderCaps);
this->applyOptionsOverrides(contextOptions);
shaderCaps->applyOptionsOverrides(contextOptions);
@@ -1389,7 +1392,8 @@
return true;
}
-void GrGLCaps::initConfigTable(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli,
+void GrGLCaps::initConfigTable(const GrContextOptions& contextOptions,
+ const GrGLContextInfo& ctxInfo, const GrGLInterface* gli,
GrShaderCaps* shaderCaps) {
/*
Comments on renderability of configs on various GL versions.
@@ -1575,19 +1579,37 @@
fSRGBWriteControl = true;
}
} else {
- // See https://bug.skia.org/4148 for PowerVR issue.
- fSRGBSupport = kPowerVRRogue_GrGLRenderer != ctxInfo.renderer() &&
- (ctxInfo.version() >= GR_GL_VER(3,0) || ctxInfo.hasExtension("GL_EXT_sRGB"));
+ fSRGBSupport = ctxInfo.version() >= GR_GL_VER(3,0) || ctxInfo.hasExtension("GL_EXT_sRGB");
+#if defined(SK_CPU_X86)
+ if (kPowerVRRogue_GrGLRenderer == ctxInfo.renderer()) {
+ // NexusPlayer has strange bugs with sRGB (skbug.com/4148). This is a targeted fix to
+ // blacklist that device (and any others that might be sharing the same driver).
+ fSRGBSupport = false;
+ }
+#endif
// ES through 3.1 requires EXT_srgb_write_control to support toggling
// sRGB writing for destinations.
// See https://bug.skia.org/5329 for Adreno4xx issue.
fSRGBWriteControl = kAdreno4xx_GrGLRenderer != ctxInfo.renderer() &&
ctxInfo.hasExtension("GL_EXT_sRGB_write_control");
}
- if (!ctxInfo.hasExtension("GL_EXT_texture_sRGB_decode")) {
- // To support "legacy" L32 mode, we require the ability to turn off sRGB decode:
+ if (contextOptions.fRequireDecodeDisableForSRGB && !fSRGBDecodeDisableSupport) {
+ // To support "legacy" L32 mode, we require the ability to turn off sRGB decode. Clients
+ // can opt-out of that requirement, if they intend to always do linear blending.
fSRGBSupport = false;
}
+
+ // This is very conservative, if we're on a platform where N32 is BGRA, and using ES, disable
+ // all sRGB support. Too much code relies on creating surfaces with N32 + sRGB colorspace,
+ // and sBGRA is basically impossible to support on any version of ES (with our current code).
+ // In particular, ES2 doesn't support sBGRA at all, and even in ES3, there is no valid pair
+ // of formats that can be used for TexImage calls to upload BGRA data to sRGBA (which is what
+ // we *have* to use as the internal format, because sBGRA doesn't exist). This primarily
+ // affects Windows.
+ if (kSkia8888_GrPixelConfig == kBGRA_8888_GrPixelConfig && kGLES_GrGLStandard == standard) {
+ fSRGBSupport = false;
+ }
+
fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_SRGB_ALPHA;
fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_SRGB8_ALPHA8;
// GL does not do srgb<->rgb conversions when transferring between cpu and gpu. Thus, the
diff --git a/src/gpu/gl/GrGLCaps.h b/src/gpu/gl/GrGLCaps.h
index 5c7e028..deb0889 100644
--- a/src/gpu/gl/GrGLCaps.h
+++ b/src/gpu/gl/GrGLCaps.h
@@ -345,6 +345,8 @@
bool doManualMipmapping() const { return fDoManualMipmapping; }
+ bool srgbDecodeDisableSupport() const { return fSRGBDecodeDisableSupport; }
+
/**
* Returns a string containing the caps info.
*/
@@ -378,7 +380,8 @@
void initBlendEqationSupport(const GrGLContextInfo&);
void initStencilFormats(const GrGLContextInfo&);
// This must be called after initFSAASupport().
- void initConfigTable(const GrGLContextInfo&, const GrGLInterface*, GrShaderCaps*);
+ void initConfigTable(const GrContextOptions&, const GrGLContextInfo&, const GrGLInterface*,
+ GrShaderCaps*);
void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*, GrShaderCaps*);
@@ -420,6 +423,7 @@
bool fMipMapLevelAndLodControlSupport : 1;
bool fRGBAToBGRAReadbackConversionsAreSlow : 1;
bool fDoManualMipmapping : 1;
+ bool fSRGBDecodeDisableSupport : 1;
uint32_t fBlitFramebufferFlags;
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 4820ca8..5da585f 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -3243,7 +3243,7 @@
newTexParams.fMinFilter = glMinFilterModes[filterMode];
newTexParams.fMagFilter = glMagFilterModes[filterMode];
- if (GrPixelConfigIsSRGB(texture->config())) {
+ if (this->glCaps().srgbDecodeDisableSupport() && GrPixelConfigIsSRGB(texture->config())) {
newTexParams.fSRGBDecode = allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT;
if (setAll || newTexParams.fSRGBDecode != oldTexParams.fSRGBDecode) {
this->setTextureUnit(unitIdx);
@@ -3419,7 +3419,7 @@
// Configure sRGB decode, if necessary. This state is the only thing needed for the driver
// call (glGenerateMipmap) to work correctly. Our manual method dirties other state, too.
- if (GrPixelConfigIsSRGB(texture->config())) {
+ if (this->glCaps().srgbDecodeDisableSupport() && GrPixelConfigIsSRGB(texture->config())) {
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT,
allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT));
}
diff --git a/tests/SRGBMipMapTest.cpp b/tests/SRGBMipMapTest.cpp
index a875c5c..88f08d4 100644
--- a/tests/SRGBMipMapTest.cpp
+++ b/tests/SRGBMipMapTest.cpp
@@ -10,6 +10,7 @@
#include "GrCaps.h"
#include "GrContext.h"
#include "GrRenderTargetContext.h"
+#include "gl/GrGLGpu.h"
#include "SkCanvas.h"
#include "SkSurface.h"
@@ -142,8 +143,18 @@
// 2) Draw texture to L32 surface (should generate/use linear mips)
paint.setGammaCorrect(false);
l32RenderTargetContext->drawRect(noClip, paint, SkMatrix::I(), rect);
- read_and_check_pixels(reporter, l32RenderTargetContext->asTexture().get(), expectedLinear,
- error, "re-render as linear");
+
+ // Right now, this test only runs on GL (because Vulkan doesn't support legacy mip-mapping
+ // skbug.com/5048). On GL, we may not have sRGB decode support. In that case, rendering sRGB
+ // textures to a legacy surface produces nonsense, so this part of the test is meaningless.
+ //
+ // TODO: Once Vulkan supports legacy mip-mapping, we can promote this to GrCaps. Right now,
+ // Vulkan has most of the functionality, but not the mip-mapping part that's being tested here.
+ GrGLGpu* glGpu = static_cast<GrGLGpu*>(context->getGpu());
+ if (glGpu->glCaps().srgbDecodeDisableSupport()) {
+ read_and_check_pixels(reporter, l32RenderTargetContext->asTexture().get(), expectedLinear,
+ error, "re-render as linear");
+ }
// 3) Go back to sRGB
paint.setGammaCorrect(true);
diff --git a/tools/flags/SkCommonFlagsConfig.cpp b/tools/flags/SkCommonFlagsConfig.cpp
index abd8bdb..5763e4a 100644
--- a/tools/flags/SkCommonFlagsConfig.cpp
+++ b/tools/flags/SkCommonFlagsConfig.cpp
@@ -185,8 +185,19 @@
if (useInstanced) {
fContextOptions |= ContextOptions::kUseInstanced;
}
+ // Subtle logic: If the config has a color space attached, we're going to be rendering to sRGB,
+ // so we need that capability. In addition, to get the widest test coverage, we DO NOT require
+ // that we can disable sRGB decode. (That's for rendering sRGB sources to legacy surfaces).
+ //
+ // If the config doesn't have a color space attached, we're going to be rendering in legacy
+ // mode. In that case, we can't allow a context to be created that has sRGB support without
+ // the ability to disable sRGB decode. Otherwise, all of our sRGB source resources will be
+ // treated as sRGB textures, but we will be unable to prevent the decode, causing them to be
+ // too dark.
if (fColorSpace) {
fContextOptions |= ContextOptions::kRequireSRGBSupport;
+ } else {
+ fContextOptions |= ContextOptions::kRequireSRGBDecodeDisableSupport;
}
}
static bool parse_option_int(const SkString& value, int* outInt) {
diff --git a/tools/gpu/GrContextFactory.cpp b/tools/gpu/GrContextFactory.cpp
index ffc5295..8d9bd41 100644
--- a/tools/gpu/GrContextFactory.cpp
+++ b/tools/gpu/GrContextFactory.cpp
@@ -198,6 +198,8 @@
if (ContextOptions::kUseInstanced & options) {
grOptions.fEnableInstancedRendering = true;
}
+ grOptions.fRequireDecodeDisableForSRGB =
+ SkToBool(ContextOptions::kRequireSRGBDecodeDisableSupport & options);
grCtx.reset(GrContext::Create(backend, backendContext, grOptions));
if (!grCtx.get()) {
return ContextInfo();
diff --git a/tools/gpu/GrContextFactory.h b/tools/gpu/GrContextFactory.h
index 54b6611..a232112 100644
--- a/tools/gpu/GrContextFactory.h
+++ b/tools/gpu/GrContextFactory.h
@@ -93,10 +93,11 @@
* to not using GL_NV_path_rendering extension even when the driver supports it.
*/
enum class ContextOptions {
- kNone = 0x0,
- kEnableNVPR = 0x1,
- kUseInstanced = 0x2,
- kRequireSRGBSupport = 0x4,
+ kNone = 0x0,
+ kEnableNVPR = 0x1,
+ kUseInstanced = 0x2,
+ kRequireSRGBSupport = 0x4,
+ kRequireSRGBDecodeDisableSupport = 0x8,
};
static ContextType NativeContextTypeForBackend(GrBackend backend) {