Remove GrBackendObject from tests (except image_from_yuv_textures)
This sets the stage for landing https://skia-review.googlesource.com/c/skia/+/82823 (Update SkImage::MakeFromYUVTexturesCopy to GrBackendTexture)
Change-Id: I2c62f23dc447b9ad55c0b5f06bcd6d7ca0ec4d2b
Reviewed-on: https://skia-review.googlesource.com/83920
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index cf91dbe..c020158 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -453,17 +453,31 @@
/** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
only to be used for testing (particularly for testing the methods that import an externally
created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */
- virtual GrBackendObject createTestingOnlyBackendTexture(
+ virtual GrBackendObject createTestingOnlyBackendObject(
void* pixels, int w, int h,
GrPixelConfig config,
bool isRenderTarget = false,
GrMipMapped mipMapped = GrMipMapped::kNo) = 0;
- /** Check a handle represents an actual texture in the backend API that has not been freed. */
- virtual bool isTestingOnlyBackendTexture(GrBackendObject) const = 0;
/** If ownership of the backend texture has been transferred pass true for abandonTexture. This
will do any necessary cleanup of the handle without freeing the texture in the backend
API. */
- virtual void deleteTestingOnlyBackendTexture(GrBackendObject,
+ virtual void deleteTestingOnlyBackendObject(GrBackendObject,
+ bool abandonTexture = false) = 0;
+
+ /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
+ only to be used for testing (particularly for testing the methods that import an externally
+ created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */
+ virtual GrBackendTexture createTestingOnlyBackendTexture(
+ void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) = 0;
+ /** Check a handle represents an actual texture in the backend API that has not been freed. */
+ virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
+ /** If ownership of the backend texture has been transferred pass true for abandonTexture. This
+ will do any necessary cleanup of the handle without freeing the texture in the backend
+ API. */
+ virtual void deleteTestingOnlyBackendTexture(GrBackendTexture*,
bool abandonTexture = false) = 0;
// width and height may be larger than rt (if underlying API allows it).
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index ddb80b4..c114c6c 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -4387,9 +4387,9 @@
}
}
-GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
- GrPixelConfig config, bool /*isRT*/,
- GrMipMapped mipMapped) {
+GrBackendObject GrGLGpu::createTestingOnlyBackendObject(void* pixels, int w, int h,
+ GrPixelConfig config, bool /*isRT*/,
+ GrMipMapped mipMapped) {
if (!this->caps()->isConfigTexturable(config)) {
return reinterpret_cast<GrBackendObject>(nullptr);
}
@@ -4450,16 +4450,7 @@
return reinterpret_cast<GrBackendObject>(info.release());
}
-bool GrGLGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
- GrGLuint texID = reinterpret_cast<const GrGLTextureInfo*>(id)->fID;
-
- GrGLboolean result;
- GL_CALL_RET(result, IsTexture(texID));
-
- return (GR_GL_TRUE == result);
-}
-
-void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) {
+void GrGLGpu::deleteTestingOnlyBackendObject(GrBackendObject id, bool abandonTexture) {
std::unique_ptr<const GrGLTextureInfo> info(reinterpret_cast<const GrGLTextureInfo*>(id));
GrGLuint texID = info->fID;
@@ -4468,6 +4459,94 @@
}
}
+GrBackendTexture GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config, bool /*isRT*/,
+ GrMipMapped mipMapped) {
+ if (!this->caps()->isConfigTexturable(config)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Currently we don't support uploading pixel data when mipped.
+ if (pixels && GrMipMapped::kYes == mipMapped) {
+ return GrBackendTexture(); // invalid
+ }
+
+ GrGLTextureInfo info;
+ info.fTarget = GR_GL_TEXTURE_2D;
+ info.fID = 0;
+ GL_CALL(GenTextures(1, &info.fID));
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
+ GL_CALL(BindTexture(info.fTarget, info.fID));
+ fHWBoundTextureUniqueIDs[0].makeInvalid();
+ GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
+ GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
+ GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
+ GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
+
+ GrGLenum internalFormat;
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+
+ if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
+ &externalType)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ this->unbindCpuToGpuXferBuffer();
+
+ // Figure out the number of mip levels.
+ int mipLevels = 1;
+ if (GrMipMapped::kYes == mipMapped) {
+ mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t baseLayerSize = bpp * w * h;
+ SkAutoMalloc defaultStorage(baseLayerSize);
+ if (!pixels) {
+ // Fill in the texture with all zeros so we don't have random garbage
+ pixels = defaultStorage.get();
+ memset(pixels, 0, baseLayerSize);
+ }
+
+ int width = w;
+ int height = h;
+ for (int i = 0; i < mipLevels; ++i) {
+ GL_CALL(TexImage2D(info.fTarget, i, internalFormat, width, height, 0, externalFormat,
+ externalType, pixels));
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+ }
+
+ return GrBackendTexture(w, h, config, mipMapped, info);
+}
+
+bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(kOpenGL_GrBackend == tex.backend());
+
+ const GrGLTextureInfo* info = tex.getGLTextureInfo();
+ if (!info) {
+ return false;
+ }
+
+ GrGLboolean result;
+ GL_CALL_RET(result, IsTexture(info->fID));
+
+ return (GR_GL_TRUE == result);
+}
+
+void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendTexture* tex, bool abandonTexture) {
+ SkASSERT(kOpenGL_GrBackend == tex->backend());
+
+ const GrGLTextureInfo* info = tex->getGLTextureInfo();
+ if (info && !abandonTexture) {
+ GrGLuint texID = info->fID;
+
+ GL_CALL(DeleteTextures(1, &texID));
+ }
+}
+
void GrGLGpu::resetShaderCacheForTesting() const {
fProgramCache->abandon();
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 3d9533d..42897ab 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -161,12 +161,18 @@
int width,
int height) override;
- GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
- GrPixelConfig config,
- bool isRenderTarget,
- GrMipMapped mipMapped) override;
- bool isTestingOnlyBackendTexture(GrBackendObject) const override;
- void deleteTestingOnlyBackendTexture(GrBackendObject, bool abandonTexture) override;
+ GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) override;
+ void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture) override;
+
+ GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) override;
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
void resetShaderCacheForTesting() const override;
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index 5969f2f..4080019 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -90,22 +90,46 @@
return new GrMockStencilAttachment(this, width, height, kBits, rt->numColorSamples());
}
-GrBackendObject GrMockGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
- GrPixelConfig config, bool isRT,
- GrMipMapped) {
+GrBackendObject GrMockGpu::createTestingOnlyBackendObject(void* pixels, int w, int h,
+ GrPixelConfig config, bool isRT,
+ GrMipMapped) {
auto info = new GrMockTextureInfo;
info->fID = NextExternalTextureID();
fOutstandingTestingOnlyTextureIDs.add(info->fID);
return reinterpret_cast<GrBackendObject>(info);
}
-bool GrMockGpu::isTestingOnlyBackendTexture(GrBackendObject object) const {
- return fOutstandingTestingOnlyTextureIDs.contains(
- reinterpret_cast<const GrMockTextureInfo*>(object)->fID);
-}
-
-void GrMockGpu::deleteTestingOnlyBackendTexture(GrBackendObject object, bool abandonTexture) {
+void GrMockGpu::deleteTestingOnlyBackendObject(GrBackendObject object, bool abandonTexture) {
auto info = reinterpret_cast<const GrMockTextureInfo*>(object);
fOutstandingTestingOnlyTextureIDs.remove(info->fID);
delete info;
}
+
+GrBackendTexture GrMockGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config, bool isRT,
+ GrMipMapped) {
+ GrMockTextureInfo info;
+ info.fID = NextExternalTextureID();
+ fOutstandingTestingOnlyTextureIDs.add(info.fID);
+ return GrBackendTexture(w, h, config, info);
+}
+
+bool GrMockGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(kMock_GrBackend == tex.backend());
+
+ const GrMockTextureInfo* info = tex.getMockTextureInfo();
+ if (!info) {
+ return false;
+ }
+
+ return fOutstandingTestingOnlyTextureIDs.contains(info->fID);
+}
+
+void GrMockGpu::deleteTestingOnlyBackendTexture(GrBackendTexture* tex, bool abandonTexture) {
+ SkASSERT(kMock_GrBackend == tex->backend());
+
+ const GrMockTextureInfo* info = tex->getMockTextureInfo();
+ if (info) {
+ fOutstandingTestingOnlyTextureIDs.remove(info->fID);
+ }
+}
diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h
index 2a86a4e..aaa465b 100644
--- a/src/gpu/mock/GrMockGpu.h
+++ b/src/gpu/mock/GrMockGpu.h
@@ -129,12 +129,14 @@
int height) override;
void clearStencil(GrRenderTarget*, int clearValue) override {}
- GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h, GrPixelConfig,
+ GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h, GrPixelConfig,
+ bool isRT, GrMipMapped) override;
+ void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture) override;
+
+ GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrPixelConfig,
bool isRT, GrMipMapped) override;
-
- bool isTestingOnlyBackendTexture(GrBackendObject) const override;
-
- void deleteTestingOnlyBackendTexture(GrBackendObject, bool abandonTexture) override;
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
static int NextInternalTextureID();
static int NextExternalTextureID();
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index 4dde825..6d30d1b 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -141,13 +141,20 @@
void clearStencil(GrRenderTarget* target, int clearValue) override {}
- GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
- GrPixelConfig config, bool isRT,
- GrMipMapped) override {
+ GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
+ GrPixelConfig config, bool isRT,
+ GrMipMapped) override {
return 0;
}
- bool isTestingOnlyBackendTexture(GrBackendObject ) const override { return false; }
- void deleteTestingOnlyBackendTexture(GrBackendObject, bool abandonTexture) override {}
+ void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture = false) override {}
+
+ GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config, bool isRT,
+ GrMipMapped) override {
+ return GrBackendTexture();
+ }
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override { return false; }
+ void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandon = false) override {}
sk_sp<GrMtlCaps> fMtlCaps;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 0a64234..85ddd08 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1173,10 +1173,10 @@
return true;
}
-GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
- GrPixelConfig config,
- bool isRenderTarget,
- GrMipMapped mipMapped) {
+GrBackendObject GrVkGpu::createTestingOnlyBackendObject(void* srcData, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) {
VkFormat pixelFormat;
if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
@@ -1508,8 +1508,357 @@
return (GrBackendObject)info;
}
-bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
- const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
+void GrVkGpu::deleteTestingOnlyBackendObject(GrBackendObject id, bool abandon) {
+ GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
+ if (backend) {
+ if (!abandon) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, backend);
+ }
+ delete backend;
+ }
+}
+
+GrBackendTexture GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) {
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ bool linearTiling = false;
+ if (!fVkCaps->isConfigTexturable(config)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Currently we don't support uploading pixel data when mipped.
+ if (srcData && GrMipMapped::kYes == mipMapped) {
+ return GrBackendTexture(); // invalid
+ }
+
+ if (fVkCaps->isConfigTexturableLinearly(config) &&
+ (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false)) &&
+ GrMipMapped::kNo == mipMapped) {
+ linearTiling = true;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (isRenderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ VkImage image = VK_NULL_HANDLE;
+ GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0, 0 };
+
+ VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
+ ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Figure out the number of mip levels.
+ uint32_t mipLevels = 1;
+ if (GrMipMapped::kYes == mipMapped) {
+ mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkImageCreateFlags
+ VK_IMAGE_TYPE_2D, // VkImageType
+ pixelFormat, // VkFormat
+ { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
+ mipLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageTiling, // VkImageTiling
+ usageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
+
+ if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
+ VK_CALL(DestroyImage(this->device(), image, nullptr));
+ return GrBackendTexture(); // invalid
+ }
+
+ // We need to declare these early so that we can delete them at the end outside of the if block.
+ GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
+ VkBuffer buffer = VK_NULL_HANDLE;
+
+ VkResult err;
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ nullptr, // pNext
+ fCmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ return GrBackendTexture(); // invalid
+ }
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
+ SkASSERT(!err);
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t rowCopyBytes = bpp * w;
+ if (linearTiling) {
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+
+ VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
+
+ if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
+ static_cast<size_t>(layout.rowPitch), h)) {
+ GrVkMemory::FreeImageMemory(this, true, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+ } else {
+ SkASSERT(w && h);
+
+ SkTArray<size_t> individualMipOffsets(mipLevels);
+ individualMipOffsets.push_back(0);
+ size_t combinedBufferSize = w * bpp * h;
+ int currentWidth = w;
+ int currentHeight = h;
+ // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
+ // config. This works with the assumption that the bytes in pixel config is always a power
+ // of 2.
+ SkASSERT((bpp & (bpp - 1)) == 0);
+ const size_t alignmentMask = 0x3 | (bpp - 1);
+ for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ const size_t alignmentDiff = combinedBufferSize & alignmentMask;
+ if (alignmentDiff != 0) {
+ combinedBufferSize += alignmentMask - alignmentDiff + 1;
+ }
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ }
+
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = combinedBufferSize;
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+ err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
+
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
+ true, &bufferAlloc)) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+
+ currentWidth = w;
+ currentHeight = h;
+ for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
+ SkASSERT(0 == currentMipLevel || !srcData);
+ size_t currentRowBytes = bpp * currentWidth;
+ size_t bufferOffset = individualMipOffsets[currentMipLevel];
+ if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
+ currentRowBytes, currentRowBytes, currentHeight)) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ // Set image layout and add barrier
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.oldLayout = initialLayout;
+ barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = image;
+ barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
+
+ VK_CALL(CmdPipelineBarrier(cmdBuffer,
+ GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+ initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ SkTArray<VkBufferImageCopy> regions(mipLevels);
+
+ currentWidth = w;
+ currentHeight = h;
+ for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
+ // Submit copy command
+ VkBufferImageCopy& region = regions.push_back();
+ memset(®ion, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { 0, 0, 0 };
+ region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
+ regions.begin()));
+ }
+ // Change Image layout to shader read since if we use this texture as a borrowed textures within
+ // Ganesh we require that its layout be set to that
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ barrier.oldLayout = initialLayout;
+ barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = image;
+ barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
+
+ VK_CALL(CmdPipelineBarrier(cmdBuffer,
+ GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+
+ // End CommandBuffer
+ err = VK_CALL(EndCommandBuffer(cmdBuffer));
+ SkASSERT(!err);
+
+ // Create Fence for queue
+ VkFence fence;
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+
+ err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
+ SkASSERT(!err);
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.pWaitDstStageMask = 0;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &cmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
+ SkASSERT(!err);
+
+ err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SK_ABORT("failing");
+ }
+ SkASSERT(!err);
+
+ // Clean up transfer resources
+ if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ }
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+
+
+ GrVkImageInfo info;
+ info.fImage = image;
+ info.fAlloc = alloc;
+ info.fImageTiling = imageTiling;
+ info.fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ info.fFormat = pixelFormat;
+ info.fLevelCount = mipLevels;
+
+ return GrBackendTexture(w, h, info);
+}
+
+bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(kVulkan_GrBackend == tex.fBackend);
+
+ const GrVkImageInfo* backend = tex.getVkImageInfo();
if (backend && backend->fImage && backend->fAlloc.fMemory) {
VkMemoryRequirements req;
@@ -1525,15 +1874,15 @@
return false;
}
-void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
- GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
- if (backend) {
- if (!abandon) {
- // something in the command buffer may still be using this, so force submit
- this->submitCommandBuffer(kForce_SyncQueue);
- GrVkImage::DestroyImageInfo(this, backend);
- }
- delete backend;
+void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendTexture* tex, bool abandon) {
+ SkASSERT(kVulkan_GrBackend == tex->fBackend);
+
+ const GrVkImageInfo* info = tex->getVkImageInfo();
+
+ if (info && !abandon) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(info));
}
}
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 75c7cb8..dd45ed1 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -85,12 +85,18 @@
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
- GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
- GrPixelConfig config,
- bool isRenderTarget,
- GrMipMapped) override;
- bool isTestingOnlyBackendTexture(GrBackendObject id) const override;
- void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override;
+ GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped) override;
+ void deleteTestingOnlyBackendObject(GrBackendObject id, bool abandonTexture) override;
+
+ GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped) override;
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
int width,