Remove GrBackendObject from tests (except image_from_yuv_textures)
This sets the stage for landing https://skia-review.googlesource.com/c/skia/+/82823 (Update SkImage::MakeFromYUVTexturesCopy to GrBackendTexture)
Change-Id: I2c62f23dc447b9ad55c0b5f06bcd6d7ca0ec4d2b
Reviewed-on: https://skia-review.googlesource.com/83920
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 0a64234..85ddd08 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1173,10 +1173,10 @@
return true;
}
-GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
- GrPixelConfig config,
- bool isRenderTarget,
- GrMipMapped mipMapped) {
+GrBackendObject GrVkGpu::createTestingOnlyBackendObject(void* srcData, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) {
VkFormat pixelFormat;
if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
@@ -1508,8 +1508,357 @@
return (GrBackendObject)info;
}
-bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
- const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
+void GrVkGpu::deleteTestingOnlyBackendObject(GrBackendObject id, bool abandon) {
+ GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
+ if (backend) {
+ if (!abandon) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, backend);
+ }
+ delete backend;
+ }
+}
+
+GrBackendTexture GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget,
+ GrMipMapped mipMapped) {
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ bool linearTiling = false;
+ if (!fVkCaps->isConfigTexturable(config)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Currently we don't support uploading pixel data when mipped.
+ if (srcData && GrMipMapped::kYes == mipMapped) {
+ return GrBackendTexture(); // invalid
+ }
+
+ if (fVkCaps->isConfigTexturableLinearly(config) &&
+ (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false)) &&
+ GrMipMapped::kNo == mipMapped) {
+ linearTiling = true;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (isRenderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ VkImage image = VK_NULL_HANDLE;
+ GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0, 0 };
+
+ VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
+ ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Figure out the number of mip levels.
+ uint32_t mipLevels = 1;
+ if (GrMipMapped::kYes == mipMapped) {
+ mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkImageCreateFlags
+ VK_IMAGE_TYPE_2D, // VkImageType
+ pixelFormat, // VkFormat
+ { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
+ mipLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageTiling, // VkImageTiling
+ usageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
+
+ if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
+ VK_CALL(DestroyImage(this->device(), image, nullptr));
+ return GrBackendTexture(); // invalid
+ }
+
+ // We need to declare these early so that we can delete them at the end outside of the if block.
+ GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
+ VkBuffer buffer = VK_NULL_HANDLE;
+
+ VkResult err;
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ nullptr, // pNext
+ fCmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ return GrBackendTexture(); // invalid
+ }
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
+ SkASSERT(!err);
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t rowCopyBytes = bpp * w;
+ if (linearTiling) {
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+
+ VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
+
+ if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
+ static_cast<size_t>(layout.rowPitch), h)) {
+ GrVkMemory::FreeImageMemory(this, true, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+ } else {
+ SkASSERT(w && h);
+
+ SkTArray<size_t> individualMipOffsets(mipLevels);
+ individualMipOffsets.push_back(0);
+ size_t combinedBufferSize = w * bpp * h;
+ int currentWidth = w;
+ int currentHeight = h;
+ // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
+ // config. This works with the assumption that the bytes in pixel config is always a power
+ // of 2.
+ SkASSERT((bpp & (bpp - 1)) == 0);
+ const size_t alignmentMask = 0x3 | (bpp - 1);
+ for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ const size_t alignmentDiff = combinedBufferSize & alignmentMask;
+ if (alignmentDiff != 0) {
+ combinedBufferSize += alignmentMask - alignmentDiff + 1;
+ }
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ }
+
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = combinedBufferSize;
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+ err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
+
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
+ true, &bufferAlloc)) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+
+ currentWidth = w;
+ currentHeight = h;
+ for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
+ SkASSERT(0 == currentMipLevel || !srcData);
+ size_t currentRowBytes = bpp * currentWidth;
+ size_t bufferOffset = individualMipOffsets[currentMipLevel];
+ if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
+ currentRowBytes, currentRowBytes, currentHeight)) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ return GrBackendTexture(); // invalid
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ // Set image layout and add barrier
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.oldLayout = initialLayout;
+ barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = image;
+ barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
+
+ VK_CALL(CmdPipelineBarrier(cmdBuffer,
+ GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+ initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ SkTArray<VkBufferImageCopy> regions(mipLevels);
+
+ currentWidth = w;
+ currentHeight = h;
+ for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
+ // Submit copy command
+ VkBufferImageCopy& region = regions.push_back();
+ memset(®ion, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { 0, 0, 0 };
+ region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
+ regions.begin()));
+ }
+ // Change Image layout to shader read since if we use this texture as a borrowed textures within
+ // Ganesh we require that its layout be set to that
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ barrier.oldLayout = initialLayout;
+ barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = image;
+ barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
+
+ VK_CALL(CmdPipelineBarrier(cmdBuffer,
+ GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+
+ // End CommandBuffer
+ err = VK_CALL(EndCommandBuffer(cmdBuffer));
+ SkASSERT(!err);
+
+ // Create Fence for queue
+ VkFence fence;
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+
+ err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
+ SkASSERT(!err);
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.pWaitDstStageMask = 0;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &cmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
+ SkASSERT(!err);
+
+ err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ GrVkMemory::FreeImageMemory(this, false, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SK_ABORT("failing");
+ }
+ SkASSERT(!err);
+
+ // Clean up transfer resources
+ if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ }
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+
+
+ GrVkImageInfo info;
+ info.fImage = image;
+ info.fAlloc = alloc;
+ info.fImageTiling = imageTiling;
+ info.fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ info.fFormat = pixelFormat;
+ info.fLevelCount = mipLevels;
+
+ return GrBackendTexture(w, h, info);
+}
+
+bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(kVulkan_GrBackend == tex.fBackend);
+
+ const GrVkImageInfo* backend = tex.getVkImageInfo();
if (backend && backend->fImage && backend->fAlloc.fMemory) {
VkMemoryRequirements req;
@@ -1525,15 +1874,15 @@
return false;
}
-void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
- GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
- if (backend) {
- if (!abandon) {
- // something in the command buffer may still be using this, so force submit
- this->submitCommandBuffer(kForce_SyncQueue);
- GrVkImage::DestroyImageInfo(this, backend);
- }
- delete backend;
+void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendTexture* tex, bool abandon) {
+ SkASSERT(kVulkan_GrBackend == tex->fBackend);
+
+ const GrVkImageInfo* info = tex->getVkImageInfo();
+
+ if (info && !abandon) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(info));
}
}