Restore ETC1 compressed texture support.
Brings back GL ETC1 support and adds Vulkan support as well.
Bug: skia:8684
Change-Id: Ie65da0a3172793081f0e4072f161bfb9b14678bc
Reviewed-on: https://skia-review.googlesource.com/c/179724
Commit-Queue: Jim Van Verth <jvanverth@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrCaps.cpp b/src/gpu/GrCaps.cpp
index f77f83e..638a70f 100644
--- a/src/gpu/GrCaps.cpp
+++ b/src/gpu/GrCaps.cpp
@@ -148,6 +148,7 @@
case kAlpha_half_GrPixelConfig: return "AlphaHalf";
case kAlpha_half_as_Red_GrPixelConfig: return "AlphaHalf_asRed";
case kRGBA_half_GrPixelConfig: return "RGBAHalf";
+ case kRGB_ETC1_GrPixelConfig: return "RGBETC1";
}
SK_ABORT("Invalid pixel config");
return "<invalid>";
diff --git a/src/gpu/GrCaps.h b/src/gpu/GrCaps.h
index 796f981..633e066 100644
--- a/src/gpu/GrCaps.h
+++ b/src/gpu/GrCaps.h
@@ -52,6 +52,7 @@
bool srgbWriteControl() const { return fSRGBWriteControl; }
bool discardRenderTargetSupport() const { return fDiscardRenderTargetSupport; }
bool gpuTracingSupport() const { return fGpuTracingSupport; }
+ bool compressedTexSubImageSupport() const { return fCompressedTexSubImageSupport; }
bool oversizedStencilSupport() const { return fOversizedStencilSupport; }
bool textureBarrierSupport() const { return fTextureBarrierSupport; }
bool sampleLocationsSupport() const { return fSampleLocationsSupport; }
@@ -333,6 +334,7 @@
bool fReuseScratchTextures : 1;
bool fReuseScratchBuffers : 1;
bool fGpuTracingSupport : 1;
+ bool fCompressedTexSubImageSupport : 1;
bool fOversizedStencilSupport : 1;
bool fTextureBarrierSupport : 1;
bool fSampleLocationsSupport : 1;
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 88d1ec2..d4f99b2 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -427,6 +427,7 @@
case kRG_float_GrPixelConfig: return false;
case kAlpha_half_GrPixelConfig: return false;
case kRGBA_half_GrPixelConfig: return true;
+ case kRGB_ETC1_GrPixelConfig: return false;
case kAlpha_8_as_Alpha_GrPixelConfig: return false;
case kAlpha_8_as_Red_GrPixelConfig: return false;
case kAlpha_half_as_Red_GrPixelConfig: return false;
@@ -453,6 +454,7 @@
case GrColorType::kRGBA_F16: return true;
case GrColorType::kRG_F32: return false;
case GrColorType::kRGBA_F32: return true;
+ case GrColorType::kRGB_ETC1: return false;
}
SK_ABORT("Invalid GrColorType");
return false;
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index cd93f8d..0e520d8 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -123,6 +123,10 @@
return nullptr;
}
+ // We shouldn't be rendering into compressed textures
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig) || !isRT);
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig) || 1 == desc.fSampleCnt);
+
this->handleDirtyContext();
sk_sp<GrTexture> tex = this->onCreateTexture(desc, budgeted, texels, mipLevelCount);
if (tex) {
@@ -149,6 +153,7 @@
bool purgeImmediately) {
SkASSERT(ioType != kWrite_GrIOType);
this->handleDirtyContext();
+ SkASSERT(this->caps());
if (!this->caps()->isConfigTexturable(backendTex.config())) {
return nullptr;
}
@@ -250,6 +255,10 @@
return false;
}
+ if (GrPixelConfigIsCompressed(surface->config())) {
+ return false;
+ }
+
this->handleDirtyContext();
return this->onReadPixels(surface, left, top, width, height, dstColorType, buffer, rowBytes);
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 6e821db..6b5ff4f 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -150,9 +150,12 @@
return nullptr;
}
- sk_sp<GrTexture> tex = this->getExactScratch(desc, budgeted, flags);
- if (tex) {
- return tex;
+ // Compressed textures are read-only so they don't support re-use for scratch.
+ if (!GrPixelConfigIsCompressed(desc.fConfig)) {
+ sk_sp<GrTexture> tex = this->getExactScratch(desc, budgeted, flags);
+ if (tex) {
+ return tex;
+ }
}
return fGpu->createTexture(desc, budgeted);
@@ -167,6 +170,11 @@
return nullptr;
}
+ // Currently we don't recycle compressed textures as scratch.
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ return nullptr;
+ }
+
if (!fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo)) {
return nullptr;
}
@@ -195,6 +203,7 @@
sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc, Flags flags) {
ASSERT_SINGLE_OWNER
SkASSERT(!this->isAbandoned());
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
SkASSERT(fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo));
// We could make initial clears work with scratch textures but it is a rare case so we just opt
diff --git a/src/gpu/GrSurface.cpp b/src/gpu/GrSurface.cpp
index b35683c..9af60bb 100644
--- a/src/gpu/GrSurface.cpp
+++ b/src/gpu/GrSurface.cpp
@@ -36,6 +36,7 @@
colorValuesPerPixel += 1;
}
SkASSERT(kUnknown_GrPixelConfig != desc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
size_t colorBytes = (size_t) width * height * GrBytesPerPixel(desc.fConfig);
// This would be a nice assert to have (i.e., we aren't creating 0 width/height surfaces).
@@ -45,7 +46,11 @@
size = colorValuesPerPixel * colorBytes;
size += colorBytes/3; // in case we have to mipmap
} else {
- size = (size_t) width * height * GrBytesPerPixel(desc.fConfig);
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ size = GrCompressedFormatDataSize(desc.fConfig, width, height);
+ } else {
+ size = (size_t)width * height * GrBytesPerPixel(desc.fConfig);
+ }
size += size/3; // in case we have to mipmap
}
@@ -59,6 +64,8 @@
int colorSamplesPerPixel,
GrMipMapped mipMapped,
bool useNextPow2) {
+ size_t colorSize;
+
width = useNextPow2
? SkTMax(GrResourceProvider::kMinScratchTextureSize, GrNextPow2(width))
: width;
@@ -67,7 +74,11 @@
: height;
SkASSERT(kUnknown_GrPixelConfig != config);
- size_t colorSize = (size_t)width * height * GrBytesPerPixel(config);
+ if (GrPixelConfigIsCompressed(config)) {
+ colorSize = GrCompressedFormatDataSize(config, width, height);
+ } else {
+ colorSize = (size_t)width * height * GrBytesPerPixel(config);
+ }
SkASSERT(colorSize > 0);
size_t finalSize = colorSamplesPerPixel * colorSize;
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index 61fb25d..3da6aaa 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -72,6 +72,11 @@
} else {
SkASSERT(is_valid_non_lazy(desc));
}
+
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ SkASSERT(!SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag));
+ fSurfaceFlags |= GrInternalSurfaceFlags::kReadOnly;
+ }
}
// Wrapped version
diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp
index bd8ad3b..f998c6e 100644
--- a/src/gpu/GrTexture.cpp
+++ b/src/gpu/GrTexture.cpp
@@ -65,14 +65,16 @@
}
void GrTexture::computeScratchKey(GrScratchKey* key) const {
- const GrRenderTarget* rt = this->asRenderTarget();
- int sampleCount = 1;
- if (rt) {
- sampleCount = rt->numStencilSamples();
+ if (!GrPixelConfigIsCompressed(this->config())) {
+ const GrRenderTarget* rt = this->asRenderTarget();
+ int sampleCount = 1;
+ if (rt) {
+ sampleCount = rt->numStencilSamples();
+ }
+ GrTexturePriv::ComputeScratchKey(this->config(), this->width(), this->height(),
+ SkToBool(rt), sampleCount,
+ this->texturePriv().mipMapped(), key);
}
- GrTexturePriv::ComputeScratchKey(this->config(), this->width(), this->height(),
- SkToBool(rt), sampleCount,
- this->texturePriv().mipMapped(), key);
}
void GrTexturePriv::ComputeScratchKey(GrPixelConfig config, int width, int height,
diff --git a/src/gpu/GrTextureProducer.cpp b/src/gpu/GrTextureProducer.cpp
index 97417f9..9ae2eb4 100644
--- a/src/gpu/GrTextureProducer.cpp
+++ b/src/gpu/GrTextureProducer.cpp
@@ -23,6 +23,8 @@
bool dstWillRequireMipMaps) {
SkASSERT(context);
+ GrPixelConfig config = GrMakePixelConfigUncompressed(inputProxy->config());
+
const SkRect dstRect = SkRect::MakeIWH(copyParams.fWidth, copyParams.fHeight);
GrMipMapped mipMapped = dstWillRequireMipMaps ? GrMipMapped::kYes : GrMipMapped::kNo;
@@ -52,7 +54,7 @@
sk_sp<GrRenderTargetContext> copyRTC =
context->contextPriv().makeDeferredRenderTargetContextWithFallback(
format, SkBackingFit::kExact, dstRect.width(), dstRect.height(),
- inputProxy->config(), nullptr, 1, mipMapped, inputProxy->origin());
+ config, nullptr, 1, mipMapped, inputProxy->origin());
if (!copyRTC) {
return nullptr;
}
diff --git a/src/gpu/SkGr.cpp b/src/gpu/SkGr.cpp
index 261c1f9..27b4248 100644
--- a/src/gpu/SkGr.cpp
+++ b/src/gpu/SkGr.cpp
@@ -358,6 +358,7 @@
case kRGBA_float_GrPixelConfig:
case kRG_float_GrPixelConfig:
case kRGBA_half_GrPixelConfig:
+ case kRGB_ETC1_GrPixelConfig:
case kAlpha_8_GrPixelConfig:
case kAlpha_8_as_Alpha_GrPixelConfig:
case kAlpha_8_as_Red_GrPixelConfig:
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index 42ee309..04ef735 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -1202,6 +1202,15 @@
return true;
}
+bool GrGLCaps::getCompressedTexImageFormats(GrPixelConfig surfaceConfig,
+ GrGLenum* internalFormat) const {
+ if (!GrPixelConfigIsCompressed(surfaceConfig)) {
+ return false;
+ }
+ *internalFormat = fConfigTable[surfaceConfig].fFormats.fInternalFormatTexImage;
+ return true;
+}
+
bool GrGLCaps::getReadPixelsFormat(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
GrGLenum* externalFormat, GrGLenum* externalType) const {
if (!this->getExternalFormat(surfaceConfig, externalConfig, kReadPixels_ExternalFormatUsage,
@@ -1212,6 +1221,7 @@
}
void GrGLCaps::getRenderbufferFormat(GrPixelConfig config, GrGLenum* internalFormat) const {
+ SkASSERT(!GrPixelConfigIsCompressed(config));
*internalFormat = fConfigTable[config].fFormats.fInternalFormatRenderbuffer;
}
@@ -1223,6 +1233,9 @@
ExternalFormatUsage usage, GrGLenum* externalFormat,
GrGLenum* externalType) const {
SkASSERT(externalFormat && externalType);
+ if (GrPixelConfigIsCompressed(memoryConfig)) {
+ return false;
+ }
bool surfaceIsAlphaOnly = GrPixelConfigIsAlphaOnly(surfaceConfig);
bool memoryIsAlphaOnly = GrPixelConfigIsAlphaOnly(memoryConfig);
@@ -1891,6 +1904,41 @@
}
fConfigTable[kRGBA_half_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+ // Compressed texture support
+
+ // glCompressedTexImage2D is available on all OpenGL ES devices. It is available on standard
+ // OpenGL after version 1.3. We'll assume at least that level of OpenGL support.
+
+ // TODO: Fix command buffer bindings and remove this.
+ fCompressedTexSubImageSupport = (bool)(gli->fFunctions.fCompressedTexSubImage2D);
+
+ // No sized/unsized internal format distinction for compressed formats, no external format.
+ // Below we set the external formats and types to 0.
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_COMPRESSED_RGB8_ETC2;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_RGB8_ETC2;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fExternalFormat[kReadPixels_ExternalFormatUsage]
+ = 0;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(4, 3) || ctxInfo.hasExtension("GL_ARB_ES3_compatibility")) {
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ } else {
+ if (version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_compressed_ETC2_RGB8_texture")) {
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ } else if (ctxInfo.hasExtension("GL_OES_compressed_ETC1_RGB8_texture")) {
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fBaseInternalFormat =
+ GR_GL_COMPRESSED_ETC1_RGB8;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_ETC1_RGB8;
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ }
+ fConfigTable[kRGB_ETC1_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
// Bulk populate the texture internal/external formats here and then deal with exceptions below.
// ES 2.0 requires that the internal/external formats match.
diff --git a/src/gpu/gl/GrGLCaps.h b/src/gpu/gl/GrGLCaps.h
index 05f2aba..1d42f0d 100644
--- a/src/gpu/gl/GrGLCaps.h
+++ b/src/gpu/gl/GrGLCaps.h
@@ -146,6 +146,9 @@
GrGLenum* internalFormat, GrGLenum* externalFormat,
GrGLenum* externalType) const;
+ bool getCompressedTexImageFormats(GrPixelConfig surfaceConfig, GrGLenum* internalFormat) const;
+
+
bool getReadPixelsFormat(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
GrGLenum* externalFormat, GrGLenum* externalType) const;
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 6d5c401..d0aadd3 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -818,6 +818,8 @@
// caps knows to make the external format be GL_RGBA.
auto srgbEncoded = GrPixelConfigIsSRGBEncoded(surface->config());
auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srgbEncoded);
+
+ SkASSERT(!GrPixelConfigIsCompressed(glTex->config()));
return this->uploadTexData(glTex->config(), glTex->width(), glTex->height(), glTex->target(),
kWrite_UploadType, left, top, width, height, srcAsConfig, texels,
mipLevelCount);
@@ -825,6 +827,7 @@
// For GL_[UN]PACK_ALIGNMENT. TODO: This really wants to be GrColorType.
static inline GrGLint config_alignment(GrPixelConfig config) {
+ SkASSERT(!GrPixelConfigIsCompressed(config));
switch (config) {
case kAlpha_8_GrPixelConfig:
case kAlpha_8_as_Alpha_GrPixelConfig:
@@ -849,6 +852,7 @@
case kRGBA_float_GrPixelConfig:
case kRG_float_GrPixelConfig:
return 4;
+ case kRGB_ETC1_GrPixelConfig:
case kUnknown_GrPixelConfig:
return 0;
}
@@ -863,6 +867,9 @@
GrPixelConfig texConfig = glTex->config();
SkASSERT(this->caps()->isConfigTexturable(texConfig));
+ // Can't transfer compressed data
+ SkASSERT(!GrPixelConfigIsCompressed(glTex->config()));
+
if (!check_write_and_transfer_input(glTex)) {
return false;
}
@@ -937,11 +944,13 @@
* @param config Pixel config of the texture.
* @param interface The GL interface in use.
* @param caps The capabilities of the GL device.
+ * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.)
* @param internalFormat The data format used for the internal storage of the texture. May be sized.
* @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized.
* @param externalFormat The data format used for the external storage of the texture.
* @param externalType The type of the data used for the external storage of the texture.
* @param texels The texel data of the texture being created.
+ * @param mipLevelCount Number of mipmap levels
* @param baseWidth The width of the texture's base mipmap level
* @param baseHeight The height of the texture's base mipmap level
*/
@@ -1038,6 +1047,107 @@
}
/**
+ * Creates storage space for the texture and fills it with texels.
+ *
+ * @param config Compressed pixel config of the texture.
+ * @param interface The GL interface in use.
+ * @param caps The capabilities of the GL device.
+ * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.)
+ * @param internalFormat The data format used for the internal storage of the texture.
+ * @param texels The texel data of the texture being created.
+ * @param mipLevelCount Number of mipmap levels
+ * @param baseWidth The width of the texture's base mipmap level
+ * @param baseHeight The height of the texture's base mipmap level
+ */
+static bool allocate_and_populate_compressed_texture(GrPixelConfig config,
+ const GrGLInterface& interface,
+ const GrGLCaps& caps,
+ GrGLenum target, GrGLenum internalFormat,
+ const GrMipLevel texels[], int mipLevelCount,
+ int baseWidth, int baseHeight) {
+ CLEAR_ERROR_BEFORE_ALLOC(&interface);
+ SkASSERT(GrPixelConfigIsCompressed(config));
+
+ bool useTexStorage = caps.isConfigTexSupportEnabled(config);
+ // We can only use TexStorage if we know we will not later change the storage requirements.
+ // This means if we may later want to add mipmaps, we cannot use TexStorage.
+ // Right now, we cannot know if we will later add mipmaps or not.
+ // The only time we can use TexStorage is when we already have the
+ // mipmaps.
+ useTexStorage &= mipLevelCount > 1;
+
+ if (useTexStorage) {
+ // We never resize or change formats of textures.
+ GL_ALLOC_CALL(&interface,
+ TexStorage2D(target,
+ mipLevelCount,
+ internalFormat,
+ baseWidth, baseHeight));
+ GrGLenum error = CHECK_ALLOC_ERROR(&interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ if (currentMipData == nullptr) {
+ // Compressed textures require data for every level
+ return false;
+ }
+
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedFormatDataSize(config, currentWidth, currentHeight);
+ GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
+ currentMipLevel,
+ 0, // left
+ 0, // top
+ currentWidth,
+ currentHeight,
+ internalFormat,
+ SkToInt(dataSize),
+ currentMipData));
+ }
+ }
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ if (currentMipData == nullptr) {
+ // Compressed textures require data for every level
+ return false;
+ }
+
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedFormatDataSize(config, baseWidth, baseHeight);
+
+ GL_ALLOC_CALL(&interface,
+ CompressedTexImage2D(target,
+ currentMipLevel,
+ internalFormat,
+ currentWidth,
+ currentHeight,
+ 0, // border
+ SkToInt(dataSize),
+ currentMipData));
+
+ GrGLenum error = CHECK_ALLOC_ERROR(&interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+/**
* After a texture is created, any state which was altered during its creation
* needs to be restored.
*
@@ -1069,6 +1179,9 @@
UploadType uploadType, int left, int top, int width, int height,
GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount,
GrMipMapsStatus* mipMapsStatus) {
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
SkASSERT(this->caps()->isConfigTexturable(texConfig));
SkDEBUGCODE(
SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
@@ -1249,6 +1362,34 @@
return succeeded;
}
+bool GrGLGpu::uploadCompressedTexData(GrPixelConfig texConfig, int texWidth, int texHeight,
+ GrGLenum target, GrPixelConfig dataConfig,
+ const GrMipLevel texels[], int mipLevelCount,
+ GrMipMapsStatus* mipMapsStatus) {
+ SkASSERT(this->caps()->isConfigTexturable(texConfig));
+
+ const GrGLInterface* interface = this->glInterface();
+ const GrGLCaps& caps = this->glCaps();
+
+ // We only need the internal format for compressed 2D textures.
+ GrGLenum internalFormat;
+ if (!caps.getCompressedTexImageFormats(texConfig, &internalFormat)) {
+ return false;
+ }
+
+ if (mipMapsStatus && mipLevelCount <= 1) {
+ *mipMapsStatus = GrMipMapsStatus::kNotAllocated;
+ } else {
+ *mipMapsStatus = GrMipMapsStatus::kValid;
+ }
+
+ return allocate_and_populate_compressed_texture(texConfig, *interface, caps, target,
+ internalFormat, texels, mipLevelCount,
+ texWidth, texHeight);
+
+ return true;
+}
+
static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
int sampleCount,
GrGLenum format,
@@ -1419,7 +1560,8 @@
return return_null_texture();
}
- bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag);
+ bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) &&
+ !GrPixelConfigIsCompressed(desc.fConfig);
GrMipLevel zeroLevel;
std::unique_ptr<uint8_t[]> zeros;
@@ -1656,9 +1798,18 @@
*initialTexParams = set_initial_texture_params(this->glInterface(), *info);
}
- if (!this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget,
- kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
- texels, mipLevelCount, mipMapsStatus)) {
+ bool success = false;
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ SkASSERT(!renderTarget);
+ success = this->uploadCompressedTexData(desc.fConfig, desc.fWidth, desc.fHeight,
+ info->fTarget, desc.fConfig,
+ texels, mipLevelCount, mipMapsStatus);
+ } else {
+ success = this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget,
+ kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight,
+ desc.fConfig, texels, mipLevelCount, mipMapsStatus);
+ }
+ if (!success) {
GL_CALL(DeleteTextures(1, &(info->fID)));
return false;
}
@@ -4042,62 +4193,78 @@
GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
- bool restoreGLRowLength = false;
- if (trimRowBytes != rowBytes && this->glCaps().unpackRowLengthSupport()) {
- GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
- restoreGLRowLength = true;
- }
-
- GrGLenum internalFormat;
- GrGLenum externalFormat;
- GrGLenum externalType;
-
- if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
- &externalType)) {
- return GrBackendTexture(); // invalid
- }
-
- info.fFormat = this->glCaps().configSizedInternalFormat(config);
-
- this->unbindCpuToGpuXferBuffer();
-
- // Figure out the number of mip levels.
- int mipLevels = 1;
- if (GrMipMapped::kYes == mipMapped) {
- mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
- }
-
- size_t baseLayerSize = bpp * w * h;
- SkAutoMalloc defaultStorage(baseLayerSize);
- if (!pixels) {
- // Fill in the texture with all zeros so we don't have random garbage
- pixels = defaultStorage.get();
- memset(defaultStorage.get(), 0, baseLayerSize);
- } else if (trimRowBytes != rowBytes && !restoreGLRowLength) {
- // We weren't able to use GR_GL_UNPACK_ROW_LENGTH so make a copy
- char* copy = (char*) defaultStorage.get();
- for (int y = 0; y < h; ++y) {
- memcpy(©[y*trimRowBytes], &((const char*)pixels)[y*rowBytes], trimRowBytes);
+ // we have to do something special for compressed textures
+ if (GrPixelConfigIsCompressed(config)) {
+ GrGLenum internalFormat;
+ const GrGLInterface* interface = this->glInterface();
+ const GrGLCaps& caps = this->glCaps();
+ if (!caps.getCompressedTexImageFormats(config, &internalFormat)) {
+ return GrBackendTexture();
}
- pixels = copy;
- }
- int width = w;
- int height = h;
- for (int i = 0; i < mipLevels; ++i) {
- GL_CALL(TexImage2D(info.fTarget, i, internalFormat, width, height, 0, externalFormat,
- externalType, pixels));
- width = SkTMax(1, width / 2);
- height = SkTMax(1, height / 2);
+ GrMipLevel mipLevel = { pixels, rowBytes };
+ if (!allocate_and_populate_compressed_texture(config, *interface, caps, info.fTarget,
+ internalFormat, &mipLevel, 1,
+ w, h)) {
+ return GrBackendTexture();
+ }
+ } else {
+ bool restoreGLRowLength = false;
+ if (trimRowBytes != rowBytes && this->glCaps().unpackRowLengthSupport()) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
+ restoreGLRowLength = true;
+ }
+
+ GrGLenum internalFormat;
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+
+ if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
+ &externalType)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ info.fFormat = this->glCaps().configSizedInternalFormat(config);
+
+ this->unbindCpuToGpuXferBuffer();
+
+ // Figure out the number of mip levels.
+ int mipLevels = 1;
+ if (GrMipMapped::kYes == mipMapped) {
+ mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ size_t baseLayerSize = bpp * w * h;
+ SkAutoMalloc defaultStorage(baseLayerSize);
+ if (!pixels) {
+ // Fill in the texture with all zeros so we don't have random garbage
+ pixels = defaultStorage.get();
+ memset(defaultStorage.get(), 0, baseLayerSize);
+ } else if (trimRowBytes != rowBytes && !restoreGLRowLength) {
+ // We weren't able to use GR_GL_UNPACK_ROW_LENGTH so make a copy
+ char* copy = (char*)defaultStorage.get();
+ for (int y = 0; y < h; ++y) {
+ memcpy(©[y*trimRowBytes], &((const char*)pixels)[y*rowBytes], trimRowBytes);
+ }
+ pixels = copy;
+ }
+
+ int width = w;
+ int height = h;
+ for (int i = 0; i < mipLevels; ++i) {
+ GL_CALL(TexImage2D(info.fTarget, i, internalFormat, width, height, 0, externalFormat,
+ externalType, pixels));
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+ }
+ if (restoreGLRowLength) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
}
// unbind the texture from the texture unit to avoid asserts
GL_CALL(BindTexture(info.fTarget, 0));
- if (restoreGLRowLength) {
- GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
- }
-
GrBackendTexture beTex = GrBackendTexture(w, h, mipMapped, info);
// Lots of tests don't go through Skia's public interface which will set the config so for
// testing we make sure we set a config here.
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index bac9120..f6524b0 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -387,6 +387,13 @@
GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount,
GrMipMapsStatus* mipMapsStatus = nullptr);
+ // helper for onCreateCompressedTexture. Compressed textures are read-only so we
+ // only use this to populate a new texture.
+ bool uploadCompressedTexData(GrPixelConfig texConfig, int texWidth, int texHeight,
+ GrGLenum target, GrPixelConfig dataConfig,
+ const GrMipLevel texels[], int mipLevelCount,
+ GrMipMapsStatus* mipMapsStatus = nullptr);
+
bool createRenderTargetObjects(const GrSurfaceDesc&, const GrGLTextureInfo& texInfo,
GrGLRenderTarget::IDDesc*);
diff --git a/src/gpu/gl/GrGLTexture.cpp b/src/gpu/gl/GrGLTexture.cpp
index 06bf662..cbb6b5a 100644
--- a/src/gpu/gl/GrGLTexture.cpp
+++ b/src/gpu/gl/GrGLTexture.cpp
@@ -48,6 +48,9 @@
, INHERITED(gpu, desc, TextureTypeFromTarget(idDesc.fInfo.fTarget), mipMapsStatus) {
this->init(desc, idDesc);
this->registerWithCache(budgeted);
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ this->setReadOnly();
+ }
}
GrGLTexture::GrGLTexture(GrGLGpu* gpu, Wrapped, const GrSurfaceDesc& desc,
diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm
index 132f455..68693b1 100644
--- a/src/gpu/mtl/GrMtlGpu.mm
+++ b/src/gpu/mtl/GrMtlGpu.mm
@@ -247,6 +247,10 @@
return nullptr;
}
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ return nullptr; // TODO: add compressed texture support
+ }
+
bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
// This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
diff --git a/src/gpu/mtl/GrMtlUtil.mm b/src/gpu/mtl/GrMtlUtil.mm
index 6d80fbe..824bbf5 100644
--- a/src/gpu/mtl/GrMtlUtil.mm
+++ b/src/gpu/mtl/GrMtlUtil.mm
@@ -87,6 +87,13 @@
case kAlpha_half_as_Red_GrPixelConfig:
*format = MTLPixelFormatR16Float;
return true;
+ case kRGB_ETC1_GrPixelConfig:
+#ifdef SK_BUILD_FOR_IOS
+ *format = MTLPixelFormatETC2_RGB8;
+ return true;
+#else
+ return false;
+#endif
}
SK_ABORT("Unexpected config");
return false;
@@ -123,6 +130,10 @@
return kRGBA_half_GrPixelConfig;
case MTLPixelFormatR16Float:
return kAlpha_half_GrPixelConfig;
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatETC2_RGB8:
+ return kRGB_ETC1_GrPixelConfig;
+#endif
default:
return kUnknown_GrPixelConfig;
}
diff --git a/src/gpu/ops/GrCopySurfaceOp.cpp b/src/gpu/ops/GrCopySurfaceOp.cpp
index 4da757e..9e3a521 100644
--- a/src/gpu/ops/GrCopySurfaceOp.cpp
+++ b/src/gpu/ops/GrCopySurfaceOp.cpp
@@ -77,6 +77,9 @@
&clippedSrcRect, &clippedDstPoint)) {
return nullptr;
}
+ if (GrPixelConfigIsCompressed(dstProxy->config())) {
+ return nullptr;
+ }
GrOpMemoryPool* pool = context->contextPriv().opMemoryPool();
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
index ba4d1a6..e115ea0 100644
--- a/src/gpu/vk/GrVkCaps.cpp
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -30,6 +30,7 @@
fDiscardRenderTargetSupport = true;
fReuseScratchTextures = true; //TODO: figure this out
fGpuTracingSupport = false; //TODO: figure this out
+ fCompressedTexSubImageSupport = true;
fOversizedStencilSupport = false; //TODO: figure this out
fInstanceAttribSupport = true;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 1bb1062..619394d 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -371,6 +371,7 @@
return false;
}
+ SkASSERT(!GrPixelConfigIsCompressed(vkTex->config()));
bool success = false;
bool linearTiling = vkTex->isLinearTiled();
if (linearTiling) {
@@ -401,6 +402,9 @@
bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer,
size_t bufferOffset, size_t rowBytes) {
+ // Can't transfer compressed data
+ SkASSERT(!GrPixelConfigIsCompressed(texture->config()));
+
// Vulkan only supports 4-byte aligned offsets
if (SkToBool(bufferOffset & 0x2)) {
return false;
@@ -512,6 +516,10 @@
SkASSERT(data);
SkASSERT(tex->isLinearTiled());
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(GrColorTypeToPixelConfig(dataColorType,
+ GrSRGBEncoded::kNo)));
+
SkDEBUGCODE(
SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
@@ -570,6 +578,10 @@
// first.
SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(GrColorTypeToPixelConfig(dataColorType,
+ GrSRGBEncoded::kNo)));
+
if (width == 0 || height == 0) {
return false;
}
@@ -751,6 +763,133 @@
return true;
}
+// It's probably possible to roll this into uploadTexDataOptimal,
+// but for now it's easier to maintain as a separate entity.
+bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const GrMipLevel texels[],
+ int mipLevelCount) {
+ SkASSERT(!tex->isLinearTiled());
+ // For now the assumption is that our rect is the entire texture.
+ // Compressed textures are read-only so this should be a reasonable assumption.
+ SkASSERT(0 == left && 0 == top && width == tex->width() && height == tex->height());
+
+ // We assume that if the texture has mip levels, we either upload to all the levels or just the
+ // first.
+ SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
+
+ SkASSERT(GrPixelConfigIsCompressed(GrColorTypeToPixelConfig(dataColorType,
+ GrSRGBEncoded::kNo)));
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ if (GrPixelConfigToColorType(tex->config()) != dataColorType) {
+ return false;
+ }
+
+ SkASSERT(this->caps()->isConfigTexturable(tex->config()));
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+ individualMipOffsets.push_back(0);
+ size_t combinedBufferSize = GrCompressedFormatDataSize(tex->config(), width, height);
+ int currentWidth = width;
+ int currentHeight = height;
+ if (!texels[0].fPixels) {
+ return false;
+ }
+
+ // We assume that the alignment for any compressed format is at least 4 bytes and so we don't
+ // need to worry about alignment issues. For example, each block in ETC1 is 8 bytes.
+ for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+
+ if (texels[currentMipLevel].fPixels) {
+ const size_t dataSize = GrCompressedFormatDataSize(tex->config(), currentWidth,
+ currentHeight);
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += dataSize;
+ } else {
+ return false;
+ }
+ }
+ if (0 == combinedBufferSize) {
+ // We don't have any data to upload so fail (compressed textures are read-only).
+ return false;
+ }
+
+ // allocate buffer to hold our mip data
+ GrVkTransferBuffer* transferBuffer =
+ GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
+ if (!transferBuffer) {
+ return false;
+ }
+
+ int uploadLeft = left;
+ int uploadTop = top;
+ GrVkTexture* uploadTexture = tex;
+
+ char* buffer = (char*)transferBuffer->map();
+ SkTArray<VkBufferImageCopy> regions(mipLevelCount);
+
+ currentWidth = width;
+ currentHeight = height;
+ int layerHeight = uploadTexture->height();
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (texels[currentMipLevel].fPixels) {
+ // Again, we're assuming that our rect is the entire texture
+ SkASSERT(currentHeight == layerHeight);
+ SkASSERT(0 == uploadLeft && 0 == uploadTop);
+
+ const size_t dataSize = GrCompressedFormatDataSize(tex->config(), currentWidth,
+ currentHeight);
+
+ // copy data into the buffer, skipping the trailing bytes
+ char* dst = buffer + individualMipOffsets[currentMipLevel];
+ const char* src = (const char*)texels[currentMipLevel].fPixels;
+ memcpy(dst, src, dataSize);
+
+ VkBufferImageCopy& region = regions.push_back();
+ memset(®ion, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
+ region.imageOffset = { uploadLeft, uploadTop, 0 };
+ region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
+ }
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+ layerHeight = currentHeight;
+ }
+
+ // no need to flush non-coherent memory, unmap will do that for us
+ transferBuffer->unmap();
+
+ // Change layout of our target so it can be copied to
+ uploadTexture->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer,
+ uploadTexture,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ regions.count(),
+ regions.begin());
+ transferBuffer->unref();
+
+ if (1 == mipLevelCount) {
+ tex->texturePriv().markMipMapsDirty();
+ }
+
+ return true;
+}
+
////////////////////////////////////////////////////////////////////////////////
sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) {
@@ -811,16 +950,24 @@
return nullptr;
}
+ bool isCompressed = GrPixelConfigIsCompressed(desc.fConfig);
auto colorType = GrPixelConfigToColorType(desc.fConfig);
if (mipLevelCount) {
- if (!this->uploadTexDataOptimal(tex.get(), 0, 0, desc.fWidth, desc.fHeight, colorType,
- texels, mipLevelCount)) {
+ bool success;
+ if (isCompressed) {
+ success = this->uploadTexDataCompressed(tex.get(), 0, 0, desc.fWidth, desc.fHeight,
+ colorType, texels, mipLevelCount);
+ } else {
+ success = this->uploadTexDataOptimal(tex.get(), 0, 0, desc.fWidth, desc.fHeight,
+ colorType, texels, mipLevelCount);
+ }
+ if (!success) {
tex->unref();
return nullptr;
}
}
- if (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) {
+ if (SkToBool(desc.fFlags & kPerformInitialClear_GrSurfaceFlag) && !isCompressed) {
VkClearColorValue zeroClearColor;
memset(&zeroClearColor, 0, sizeof(zeroClearColor));
VkImageSubresourceRange range;
@@ -1276,6 +1423,10 @@
SkTArray<size_t> individualMipOffsets(mipLevels);
individualMipOffsets.push_back(0);
size_t combinedBufferSize = w * bpp * h;
+ if (GrPixelConfigIsCompressed(config)) {
+ combinedBufferSize = GrCompressedFormatDataSize(config, w, h);
+ bpp = 4; // we have at least this alignment, which will pass the code below
+ }
int currentWidth = w;
int currentHeight = h;
// The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
@@ -1287,7 +1438,12 @@
currentWidth = SkTMax(1, currentWidth / 2);
currentHeight = SkTMax(1, currentHeight / 2);
- const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ size_t trimmedSize;
+ if (GrPixelConfigIsCompressed(config)) {
+ trimmedSize = GrCompressedFormatDataSize(config, currentWidth, currentHeight);
+ } else {
+ trimmedSize = currentWidth * bpp * currentHeight;
+ }
const size_t alignmentDiff = combinedBufferSize & alignmentMask;
if (alignmentDiff != 0) {
combinedBufferSize += alignmentMask - alignmentDiff + 1;
@@ -1329,10 +1485,19 @@
currentHeight = h;
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
SkASSERT(0 == currentMipLevel || !srcData);
- size_t currentRowBytes = bpp * currentWidth;
size_t bufferOffset = individualMipOffsets[currentMipLevel];
- if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset, srcRowBytes,
- currentRowBytes, trimRowBytes, currentHeight)) {
+ bool result;
+ if (GrPixelConfigIsCompressed(config)) {
+ size_t levelSize = GrCompressedFormatDataSize(config, currentWidth, currentHeight);
+ size_t currentRowBytes = levelSize / currentHeight;
+ result = copy_testing_data(this, srcData, bufferAlloc, bufferOffset, currentRowBytes,
+ currentRowBytes, currentRowBytes, currentHeight);
+ } else {
+ size_t currentRowBytes = bpp * currentWidth;
+ result = copy_testing_data(this, srcData, bufferAlloc, bufferOffset, srcRowBytes,
+ currentRowBytes, trimRowBytes, currentHeight);
+ }
+ if (!result) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
@@ -2209,4 +2374,3 @@
this->resourceProvider().storePipelineCacheData();
}
}
-
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index acd814c..6358560 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -247,7 +247,9 @@
GrColorType colorType, const void* data, size_t rowBytes);
bool uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
GrColorType colorType, const GrMipLevel texels[], int mipLevelCount);
-
+ bool uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const GrMipLevel texels[],
+ int mipLevelCount);
void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
const SkIPoint& dstPoint);
diff --git a/src/gpu/vk/GrVkTexture.cpp b/src/gpu/vk/GrVkTexture.cpp
index fe9723d..802bbcd 100644
--- a/src/gpu/vk/GrVkTexture.cpp
+++ b/src/gpu/vk/GrVkTexture.cpp
@@ -31,6 +31,9 @@
, fTextureView(view) {
SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == info.fLevelCount));
this->registerWithCache(budgeted);
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ this->setReadOnly();
+ }
}
GrVkTexture::GrVkTexture(GrVkGpu* gpu,
diff --git a/src/gpu/vk/GrVkUtil.cpp b/src/gpu/vk/GrVkUtil.cpp
index 90798c9..6524bb6 100644
--- a/src/gpu/vk/GrVkUtil.cpp
+++ b/src/gpu/vk/GrVkUtil.cpp
@@ -69,6 +69,10 @@
case kRGBA_half_GrPixelConfig:
*format = VK_FORMAT_R16G16B16A16_SFLOAT;
return true;
+ case kRGB_ETC1_GrPixelConfig:
+ // converting to ETC2 which is a superset of ETC1
+ *format = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ return true;
case kAlpha_half_GrPixelConfig: // fall through
case kAlpha_half_as_Red_GrPixelConfig:
*format = VK_FORMAT_R16_SFLOAT;
@@ -106,6 +110,8 @@
kAlpha_8_as_Red_GrPixelConfig == config ||
kGray_8_GrPixelConfig == config ||
kGray_8_as_Red_GrPixelConfig == config;
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ return kRGB_ETC1_GrPixelConfig == config;
case VK_FORMAT_R32G32B32A32_SFLOAT:
return kRGBA_float_GrPixelConfig == config;
case VK_FORMAT_R32G32_SFLOAT: