Roll external/skia 6f59443c9..1c2052363 (8 commits)
https://skia.googlesource.com/skia.git/+log/6f59443c9..1c2052363
2019-01-31 reed@google.com fix tiler to handle large float rects
2019-01-31 brianosman@google.com Update tracing docs slightly
2019-01-31 skia-autoroll@skia-public.iam.gserviceaccount.com Roll skia/third_party/skcms cd2260c9f528..0c0f6dee2779 (1 commits)
2019-01-31 skia-autoroll@skia-public.iam.gserviceaccount.com Roll third_party/externals/swiftshader 5ebd2c0fafca..ebe5f7fad064 (1 commits)
2019-01-31 rmistry@google.com [cq.cfg] Remove cq.cfg.
2019-01-31 kjlubick@google.com Make cmake copy_directory instead of just files
2019-01-31 reed@google.com change measureText impl to first convert to glyphs
2019-01-31 egdaniel@google.com Add hardware buffer unit test from old skqp to skia.
The AutoRoll server is located here: https://autoroll-internal.skia.org/r/android-master-autoroll
Documentation for the AutoRoller is here:
https://skia.googlesource.com/buildbot/+/master/autoroll/README.md
If the roll is causing failures, please contact the current sheriff, who should
be CC'd on the roll, and stop the roller if necessary.
Test: Presubmit checks will test this change.
Change-Id: Iceb3a2b6fbabad2ab920e992e3703a1e8f9d1af0
Exempt-From-Owner-Approval: The autoroll bot does not require owner approval.
diff --git a/Android.bp b/Android.bp
index 6fa7de2..16c16ca 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1759,6 +1759,7 @@
"tests/VerticesTest.cpp",
"tests/VkBackendSurfaceTest.cpp",
"tests/VkDrawableTest.cpp",
+ "tests/VkHardwareBufferTest.cpp",
"tests/VkMakeCopyPipelineTest.cpp",
"tests/VkWrapTests.cpp",
"tests/VptrTest.cpp",
diff --git a/DEPS b/DEPS
index 48221a5..53a301d 100644
--- a/DEPS
+++ b/DEPS
@@ -29,7 +29,7 @@
"third_party/externals/sfntly" : "https://chromium.googlesource.com/external/github.com/googlei18n/sfntly.git@b55ff303ea2f9e26702b514cf6a3196a2e3e2974",
"third_party/externals/spirv-headers" : "https://skia.googlesource.com/external/github.com/KhronosGroup/SPIRV-Headers.git@661ad91124e6af2272afd00f804d8aa276e17107",
"third_party/externals/spirv-tools" : "https://skia.googlesource.com/external/github.com/KhronosGroup/SPIRV-Tools.git@e9e4393b1c5aad7553c05782acefbe32b42644bd",
- "third_party/externals/swiftshader" : "https://swiftshader.googlesource.com/SwiftShader@5ebd2c0fafca2f0d570ca25605092b5c7aaadd42",
+ "third_party/externals/swiftshader" : "https://swiftshader.googlesource.com/SwiftShader@ebe5f7fad06476b2828271977ee0d56ee45385ac",
#"third_party/externals/v8" : "https://chromium.googlesource.com/v8/v8.git@5f1ae66d5634e43563b2d25ea652dfb94c31a3b4",
"third_party/externals/wuffs" : "https://skia.googlesource.com/external/github.com/google/wuffs.git@fda3c4c9863d9f9fcec58ae66508c4621fc71ea5",
"third_party/externals/zlib" : "https://chromium.googlesource.com/chromium/src/third_party/zlib@47af7c547f8551bd25424e56354a2ae1e9062859",
diff --git a/gn/gn_to_cmake.py b/gn/gn_to_cmake.py
index 7f2f364..7768064 100644
--- a/gn/gn_to_cmake.py
+++ b/gn/gn_to_cmake.py
@@ -372,7 +372,7 @@
out.write('\n')
for src, dst in zip(inputs, outputs):
- out.write(' COMMAND ${CMAKE_COMMAND} -E copy "')
+ out.write(' COMMAND ${CMAKE_COMMAND} -E copy_directory "')
out.write(CMakeStringEscape(project.GetAbsolutePath(src)))
out.write('" "')
out.write(CMakeStringEscape(dst))
diff --git a/gn/tests.gni b/gn/tests.gni
index 79cea4b..4d3a2cb 100644
--- a/gn/tests.gni
+++ b/gn/tests.gni
@@ -284,6 +284,7 @@
"$_tests/VerticesTest.cpp",
"$_tests/VkBackendSurfaceTest.cpp",
"$_tests/VkDrawableTest.cpp",
+ "$_tests/VkHardwareBufferTest.cpp",
"$_tests/VkMakeCopyPipelineTest.cpp",
"$_tests/VkWrapTests.cpp",
"$_tests/VptrTest.cpp",
diff --git a/include/core/SkFont.h b/include/core/SkFont.h
index fda4e06..cea3dd3 100644
--- a/include/core/SkFont.h
+++ b/include/core/SkFont.h
@@ -546,6 +546,9 @@
void glyphsToUnichars(const SkGlyphID glyphs[], int count, SkUnichar text[]) const;
+ SkScalar legacy_measureText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const;
+
friend class GrTextBlob;
friend class SkCanonicalizeFont;
friend class SkFontPriv;
diff --git a/infra/branch-config/README.md b/infra/branch-config/README.md
deleted file mode 100644
index c036d61..0000000
--- a/infra/branch-config/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This directory contains configuration files for infra services.
diff --git a/infra/branch-config/cq.cfg b/infra/branch-config/cq.cfg
deleted file mode 100644
index b61e24d..0000000
--- a/infra/branch-config/cq.cfg
+++ /dev/null
@@ -1,81 +0,0 @@
-# Commit Queue configuration file. The documentation of the format can be found
-# at http://luci-config.appspot.com/schemas/projects/refs:cq.cfg.
-
-version: 1
-cq_status_url: "https://chromium-cq-status.appspot.com"
-git_repo_url: "https://skia.googlesource.com/skia/"
-gerrit {}
-
-commit_burst_delay: 300
-max_commit_burst: 2
-
-verifiers {
- gerrit_cq_ability {
- committer_list: "project-skia-committers"
- dry_run_access_list: "project-skia-tryjob-access"
- }
-
- tree_status {
- tree_status_url: "https://skia-tree-status.appspot.com"
- }
-
- try_job {
- buckets {
- name: "luci.chromium.try"
- # At time of writing, this is the fastest Chrome compile bot on average.
- builders { name: "mac_chromium_compile_dbg_ng" }
- }
- buckets {
- name: "skia.primary"
- builders { name: "Build-Debian9-Clang-arm-Debug-Android" }
- builders { name: "Build-Debian9-Clang-arm-Debug-Chromecast" }
- builders { name: "Build-Debian9-Clang-arm-Release-Android_API26" }
- builders { name: "Build-Debian9-Clang-arm64-Debug-Android" }
- builders { name: "Build-Debian9-Clang-cf_x86_phone-eng-Android_Framework" }
- builders { name: "Build-Debian9-Clang-host-sdk-Android_Framework" }
- builders { name: "Build-Debian9-Clang-x86_64-Debug" }
- builders { name: "Build-Debian9-Clang-x86_64-Debug-Tidy" }
- builders { name: "Build-Debian9-GCC-x86_64-Debug-NoGPU" }
- builders { name: "Build-Debian9-GCC-x86_64-Release" }
- builders { name: "Build-Mac-Clang-arm64-Debug-iOS" }
- builders { name: "Build-Mac-Clang-x86_64-Debug-Metal" }
- builders { name: "Build-Mac-Clang-x86_64-Release" }
- builders { name: "Build-Win-Clang-x86-Debug" }
- builders { name: "Build-Win-Clang-x86_64-Release-Vulkan" }
- builders { name: "Build-Win-MSVC-x86_64-Release-Vulkan" }
- builders { name: "Housekeeper-OnDemand-Presubmit" }
- builders { name: "Housekeeper-PerCommit-InfraTests" }
- builders { name: "Perf-Debian9-Clang-GCE-CPU-AVX2-x86-Debug-All" }
- builders { name: "Perf-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-ASAN" }
- builders { name: "Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Release-All-Android" }
- builders { name: "Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Release-All-Android_Vulkan" }
- builders { name: "Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All" }
- builders { name: "Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-BonusConfigs" }
- builders { name: "Test-Debian9-EMCC-GCE-CPU-AVX2-asmjs-Release-All-PathKit" }
- builders { name: "Test-Debian9-EMCC-GCE-CPU-AVX2-wasm-Release-All-PathKit" }
- builders { name: "Test-Debian9-EMCC-GCE-GPU-AVX2-wasm-Release-All-CanvasKit" }
- builders { name: "Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-ASAN" }
- builders { name: "Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-Vulkan" }
- builders { name: "Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Release-All" }
- builders { name: "Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL1" }
- builders { name: "Test-Win10-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-All" }
- builders { name: "Test-Win2016-Clang-GCE-CPU-AVX2-x86_64-Release-All" }
- builders { name: "Test-iOS-Clang-iPhone7-GPU-PowerVRGT7600-arm64-Debug-All" }
- builders {
- name: "Test-Debian9-Clang-NUC7i5BNK-GPU-IntelIris640-x86_64-Debug-All"
- experiment_percentage: 100
- }
- builders {
- name: "Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-ANGLE"
- experiment_percentage: 100
- }
- }
- try_job_retry_config {
- try_job_retry_quota: 1
- global_retry_quota: 2
- failure_retry_weight: 2
- transient_failure_retry_weight: 1
- timeout_retry_weight: 2
- }
- }
-}
diff --git a/site/dev/tools/tracing.md b/site/dev/tools/tracing.md
index 60595bc..b4d2c76 100644
--- a/site/dev/tools/tracing.md
+++ b/site/dev/tools/tracing.md
@@ -12,6 +12,32 @@
For standalone builds, Skia's tools (DM, nanobench, and Viewer) are capable of tracing execution
in three ways, controlled by the `--trace` command line argument.
+Standalone Tracing
+------------------
+
+Most arguments to `--trace` will be interpreted as a filename (the two exceptions are described
+below), and trace events will be written to that file in JSON format, suitable for viewing with
+[chrome://tracing](chrome://tracing).
+
+<!--?prettify lang=sh?-->
+
+ # Run DM on several GMs to get tracing data
+ out/Release/dm --config gl --match bleed --trace gl_bleed_gms.json
+
+This creates a file `gl_bleed_gms.json` in the current directory. There are limitations in Chrome's
+tracing tool that prevent loading a file larger than 256 MB. To stay under that limit (and avoid
+clutter and slowdown in the interface), it's best to run a small number of tests/benchmarks when
+tracing. Once you have generated a file in this way, go to
+[chrome://tracing](chrome://tracing), click Load:
+
+![Load Button](tracing_load.png)
+
+... then select the JSON file. The data will be loaded and can be navigated/inspected using the
+tracing tools. Tip: press '?' for a help screen explaining the available keyboard and mouse
+controls.
+
+![Tracing interface](tracing.png)
+
Android ATrace
--------------
@@ -56,24 +82,28 @@
...
~~~
-Chrome Tracing
---------------
+Adding More Trace Events
+------------------------
-Any other argument to `--trace` will be interpreted as a filename, and trace events will be written
-to that file in JSON format, suitable for viewing with [chrome://tracing](chrome://tracing).
+Adding more trace events involves using a set of `TRACE_` macros. The simplest example, to record
+the time spent in a function or other scope, is:
-<!--?prettify lang=sh?-->
+~~~
+#include <SkTraceEvent.h>
+...
+void doSomething() {
+ // Add an event for the duration of the current function (or other scope)
+ // "skia" is a category name, for filtering events while recording
+ // TRACE_FUNC is the event name, and expands to the name of the current function
+ TRACE_EVENT0("skia", TRACE_FUNC);
- # Run DM on several GMs to get tracing data
- out/Release/dm --config gl --match bleed --trace gl_bleed_gms.json
+ if (doExtraWork) {
+ TRACE_EVENT0("skia", "ExtraWorkBeingDone");
+ ...
+ }
+}
+~~~
-This creates a file `gl_bleed_gms.json` in the current directory. Go to
-[chrome://tracing](chrome://tracing), click Load:
-
-![Load Button](tracing_load.png)
-
-... then select the JSON file. The data will be loaded and can be navigated/inspected using the
-tracing tools. Tip: press '?' for a help screen explaining the available keyboard and mouse
-controls.
-
-![Tracing interface](tracing.png)
+For more examples, including other kinds of trace events and attaching parameters to events, see
+the comments in
+[SkTraceEventCommon.h](https://cs.chromium.org/chromium/src/third_party/skia/src/core/SkTraceEventCommon.h).
\ No newline at end of file
diff --git a/src/core/SkBitmapDevice.cpp b/src/core/SkBitmapDevice.cpp
index 17dd92d..9fffbb5 100644
--- a/src/core/SkBitmapDevice.cpp
+++ b/src/core/SkBitmapDevice.cpp
@@ -77,10 +77,20 @@
fNeedsTiling = clipR.right() > kMaxDim || clipR.bottom() > kMaxDim;
if (fNeedsTiling) {
if (bounds) {
- SkRect devBounds;
- dev->ctm().mapRect(&devBounds, *bounds);
- if (devBounds.intersect(SkRect::Make(clipR))) {
- fSrcBounds = devBounds.roundOut();
+ // Make sure we round first, and then intersect. We can't rely on promoting the
+ // clipR to floats (and then intersecting with devBounds) since promoting
+ // int --> float can make the float larger than the int.
+ // rounding(out) first runs the risk of clamping if the float is larger an intmax
+ // but our roundOut() is saturating, which is fine for this use case
+ //
+ // e.g. the older version of this code did this:
+ // devBounds = mapRect(bounds);
+ // if (devBounds.intersect(SkRect::Make(clipR))) {
+ // fSrcBounds = devBounds.roundOut();
+ // The problem being that the promotion of clipR to SkRect was unreliable
+ //
+ fSrcBounds = dev->ctm().mapRect(*bounds).roundOut();
+ if (fSrcBounds.intersect(clipR)) {
// Check again, now that we have computed srcbounds.
fNeedsTiling = fSrcBounds.right() > kMaxDim || fSrcBounds.bottom() > kMaxDim;
} else {
diff --git a/src/core/SkFont.cpp b/src/core/SkFont.cpp
index 80b589b..c9f8e50 100644
--- a/src/core/SkFont.cpp
+++ b/src/core/SkFont.cpp
@@ -321,8 +321,8 @@
SkIntToScalar(g.fTop + g.fHeight));
}
-SkScalar SkFont::measureText(const void* textD, size_t length, SkTextEncoding encoding,
- SkRect* bounds, const SkPaint* paint) const {
+SkScalar SkFont::legacy_measureText(const void* textD, size_t length, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const {
if (length == 0) {
if (bounds) {
bounds->setEmpty();
@@ -368,6 +368,63 @@
return width;
}
+SkScalar SkFont::measureText(const void* text, size_t length, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const {
+ SkCanonicalizeFont canon(*this, paint);
+ const SkFont& font = canon.getFont();
+ const SkScalar scale = canon.getScale();
+
+ SkAutoToGlyphs atg(font, text, length, encoding);
+ const int count = atg.count();
+ if (count == 0) {
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return 0;
+ }
+ const uint16_t* glyphs = atg.glyphs();
+
+ auto cache = SkStrikeCache::FindOrCreateStrikeWithNoDeviceExclusive(font, canon.getPaint());
+
+ SkScalar width = 0;
+ if (bounds) {
+ const SkGlyph* g = &cache->getGlyphIDMetrics(glyphs[0]);
+ set_bounds(*g, bounds);
+ width = g->fAdvanceX;
+ for (int i = 1; i < count; ++i) {
+ g = &cache->getGlyphIDMetrics(glyphs[i]);
+ join_bounds_x(*g, bounds, width);
+ width += g->fAdvanceX;
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ width += cache->getGlyphIDAdvance(glyphs[i]).fAdvanceX;
+ }
+ }
+
+ if (scale) {
+ width *= scale;
+ if (bounds) {
+ bounds->fLeft *= scale;
+ bounds->fTop *= scale;
+ bounds->fRight *= scale;
+ bounds->fBottom *= scale;
+ }
+ }
+
+#ifdef SK_DEBUG
+ {
+ SkRect b2;
+ SkScalar w2 = this->legacy_measureText(text, length, encoding, &b2, paint);
+ SkASSERT(width == w2);
+ if (bounds) {
+ SkASSERT(*bounds == b2);
+ }
+ }
+#endif
+ return width;
+}
+
static SkRect make_bounds(const SkGlyph& g, SkScalar scale) {
return {
g.fLeft * scale,
diff --git a/tests/RectTest.cpp b/tests/RectTest.cpp
index 2594c53..43301ce 100644
--- a/tests/RectTest.cpp
+++ b/tests/RectTest.cpp
@@ -160,3 +160,22 @@
REPORTER_ASSERT(reporter, !SkScalarIsFinite(r.height()));
}
+#include "SkSurface.h"
+
+// Before the fix, this sequence would trigger a release_assert in the Tiler
+// in SkBitmapDevice.cpp
+DEF_TEST(big_tiled_rect_crbug_927075, reporter) {
+ const int w = 67108863;
+ const int h = 1;
+ const auto info = SkImageInfo::MakeN32Premul(w, h);
+
+ auto surf = SkSurface::MakeRaster(info);
+ auto canvas = surf->getCanvas();
+
+ const SkRect r = { 257, 213, 67109120, 214 };
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ canvas->translate(-r.fLeft, -r.fTop);
+ canvas->drawRect(r, paint);
+}
diff --git a/tests/VkHardwareBufferTest.cpp b/tests/VkHardwareBufferTest.cpp
new file mode 100644
index 0000000..7c9117b
--- /dev/null
+++ b/tests/VkHardwareBufferTest.cpp
@@ -0,0 +1,1323 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This is a GPU-backend specific test. It relies on static intializers to work
+
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU && defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26 && defined(SK_VULKAN)
+
+#include "GrBackendSemaphore.h"
+#include "GrContext.h"
+#include "GrContextFactory.h"
+#include "GrContextPriv.h"
+#include "GrGpu.h"
+#include "GrProxyProvider.h"
+#include "SkAutoMalloc.h"
+#include "SkCanvas.h"
+#include "SkGr.h"
+#include "SkImage.h"
+#include "SkSurface.h"
+#include "Test.h"
+#include "../tools/gpu/vk/VkTestUtils.h"
+#include "gl/GrGLDefines.h"
+#include "gl/GrGLUtil.h"
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkExtensions.h"
+
+#include <android/hardware_buffer.h>
+#include <cinttypes>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+
+static const int DEV_W = 16, DEV_H = 16;
+
+class BaseTestHelper {
+public:
+ virtual ~BaseTestHelper() {}
+
+ virtual bool init(skiatest::Reporter* reporter) = 0;
+
+ virtual void cleanup() = 0;
+ virtual void releaseImage() = 0;
+
+ virtual sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) = 0;
+ virtual sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) = 0;
+
+ virtual void doClientSync() = 0;
+ virtual bool flushSurfaceAndSignalSemaphore(skiatest::Reporter* reporter, sk_sp<SkSurface>) = 0;
+ virtual bool importAndWaitOnSemaphore(skiatest::Reporter* reporter, int fdHandle,
+ sk_sp<SkSurface>) = 0;
+
+ virtual void makeCurrent() = 0;
+
+ virtual GrContext* grContext() = 0;
+
+ int getFdHandle() { return fFdHandle; }
+
+protected:
+ BaseTestHelper() {}
+
+ int fFdHandle = 0;
+};
+
+class EGLTestHelper : public BaseTestHelper {
+public:
+ EGLTestHelper(const GrContextOptions& options) : fFactory(options) {}
+
+ ~EGLTestHelper() override {}
+
+ void releaseImage() override {
+ this->makeCurrent();
+ if (!fGLCtx) {
+ return;
+ }
+ if (EGL_NO_IMAGE_KHR != fImage) {
+ fGLCtx->destroyEGLImage(fImage);
+ fImage = EGL_NO_IMAGE_KHR;
+ }
+ if (fTexID) {
+ GR_GL_CALL(fGLCtx->gl(), DeleteTextures(1, &fTexID));
+ fTexID = 0;
+ }
+ }
+
+ void cleanup() override {
+ this->releaseImage();
+ }
+
+ bool init(skiatest::Reporter* reporter) override;
+
+ sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) override;
+ sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) override;
+
+ void doClientSync() override;
+ bool flushSurfaceAndSignalSemaphore(skiatest::Reporter* reporter, sk_sp<SkSurface>) override;
+ bool importAndWaitOnSemaphore(skiatest::Reporter* reporter, int fdHandle,
+ sk_sp<SkSurface>) override;
+
+ void makeCurrent() override { fGLCtx->makeCurrent(); }
+
+ GrContext* grContext() override { return fGrContext; }
+
+private:
+ bool importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer);
+
+ typedef EGLClientBuffer (*EGLGetNativeClientBufferANDROIDProc)(const struct AHardwareBuffer*);
+ typedef EGLImageKHR (*EGLCreateImageKHRProc)(EGLDisplay, EGLContext, EGLenum, EGLClientBuffer,
+ const EGLint*);
+ typedef void (*EGLImageTargetTexture2DOESProc)(EGLenum, void*);
+ EGLGetNativeClientBufferANDROIDProc fEGLGetNativeClientBufferANDROID;
+ EGLCreateImageKHRProc fEGLCreateImageKHR;
+ EGLImageTargetTexture2DOESProc fEGLImageTargetTexture2DOES;
+
+ PFNEGLCREATESYNCKHRPROC fEGLCreateSyncKHR;
+ PFNEGLWAITSYNCKHRPROC fEGLWaitSyncKHR;
+ PFNEGLGETSYNCATTRIBKHRPROC fEGLGetSyncAttribKHR;
+ PFNEGLDUPNATIVEFENCEFDANDROIDPROC fEGLDupNativeFenceFDANDROID;
+ PFNEGLDESTROYSYNCKHRPROC fEGLDestroySyncKHR;
+
+ EGLImageKHR fImage = EGL_NO_IMAGE_KHR;
+ GrGLuint fTexID = 0;
+
+ sk_gpu_test::GrContextFactory fFactory;
+ sk_gpu_test::ContextInfo fGLESContextInfo;
+
+ sk_gpu_test::GLTestContext* fGLCtx = nullptr;
+ GrContext* fGrContext = nullptr;
+};
+
+bool EGLTestHelper::init(skiatest::Reporter* reporter) {
+ fGLESContextInfo = fFactory.getContextInfo(sk_gpu_test::GrContextFactory::kGLES_ContextType);
+ fGrContext = fGLESContextInfo.grContext();
+ fGLCtx = fGLESContextInfo.glContext();
+ if (!fGrContext || !fGLCtx) {
+ return false;
+ }
+
+ if (kGLES_GrGLStandard != fGLCtx->gl()->fStandard) {
+ return false;
+ }
+
+ // Confirm we have egl and the needed extensions
+ if (!fGLCtx->gl()->hasExtension("EGL_KHR_image") ||
+ !fGLCtx->gl()->hasExtension("EGL_ANDROID_get_native_client_buffer") ||
+ !fGLCtx->gl()->hasExtension("GL_OES_EGL_image_external") ||
+ !fGLCtx->gl()->hasExtension("GL_OES_EGL_image") ||
+ !fGLCtx->gl()->hasExtension("EGL_KHR_fence_sync")) {
+ return false;
+ }
+
+ fEGLGetNativeClientBufferANDROID =
+ (EGLGetNativeClientBufferANDROIDProc) eglGetProcAddress("eglGetNativeClientBufferANDROID");
+ if (!fEGLGetNativeClientBufferANDROID) {
+ ERRORF(reporter, "Failed to get the eglGetNativeClientBufferAndroid proc");
+ return false;
+ }
+
+ fEGLCreateImageKHR = (EGLCreateImageKHRProc) eglGetProcAddress("eglCreateImageKHR");
+ if (!fEGLCreateImageKHR) {
+ ERRORF(reporter, "Failed to get the proc eglCreateImageKHR");
+ return false;
+ }
+
+ fEGLImageTargetTexture2DOES =
+ (EGLImageTargetTexture2DOESProc) eglGetProcAddress("glEGLImageTargetTexture2DOES");
+ if (!fEGLImageTargetTexture2DOES) {
+ ERRORF(reporter, "Failed to get the proc EGLImageTargetTexture2DOES");
+ return false;
+ }
+
+ fEGLCreateSyncKHR = (PFNEGLCREATESYNCKHRPROC) eglGetProcAddress("eglCreateSyncKHR");
+ if (!fEGLCreateSyncKHR) {
+ ERRORF(reporter, "Failed to get the proc eglCreateSyncKHR");
+ return false;
+
+ }
+ fEGLWaitSyncKHR = (PFNEGLWAITSYNCKHRPROC) eglGetProcAddress("eglWaitSyncKHR");
+ if (!fEGLWaitSyncKHR) {
+ ERRORF(reporter, "Failed to get the proc eglWaitSyncKHR");
+ return false;
+
+ }
+ fEGLGetSyncAttribKHR = (PFNEGLGETSYNCATTRIBKHRPROC) eglGetProcAddress("eglGetSyncAttribKHR");
+ if (!fEGLGetSyncAttribKHR) {
+ ERRORF(reporter, "Failed to get the proc eglGetSyncAttribKHR");
+ return false;
+
+ }
+ fEGLDupNativeFenceFDANDROID =
+ (PFNEGLDUPNATIVEFENCEFDANDROIDPROC) eglGetProcAddress("eglDupNativeFenceFDANDROID");
+ if (!fEGLDupNativeFenceFDANDROID) {
+ ERRORF(reporter, "Failed to get the proc eglDupNativeFenceFDANDROID");
+ return false;
+
+ }
+ fEGLDestroySyncKHR = (PFNEGLDESTROYSYNCKHRPROC) eglGetProcAddress("eglDestroySyncKHR");
+ if (!fEGLDestroySyncKHR) {
+ ERRORF(reporter, "Failed to get the proc eglDestroySyncKHR");
+ return false;
+
+ }
+
+ return true;
+}
+
+bool EGLTestHelper::importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer) {
+ GrGLClearErr(fGLCtx->gl());
+
+ EGLClientBuffer eglClientBuffer = fEGLGetNativeClientBufferANDROID(buffer);
+ EGLint eglAttribs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
+ EGL_NONE };
+ EGLDisplay eglDisplay = eglGetCurrentDisplay();
+ fImage = fEGLCreateImageKHR(eglDisplay, EGL_NO_CONTEXT,
+ EGL_NATIVE_BUFFER_ANDROID,
+ eglClientBuffer, eglAttribs);
+ if (EGL_NO_IMAGE_KHR == fImage) {
+ SkDebugf("Could not create EGL image, err = (%#x)\n", (int) eglGetError() );
+ return false;
+ }
+
+ GR_GL_CALL(fGLCtx->gl(), GenTextures(1, &fTexID));
+ if (!fTexID) {
+ ERRORF(reporter, "Failed to create GL Texture");
+ return false;
+ }
+ GR_GL_CALL_NOERRCHECK(fGLCtx->gl(), BindTexture(GR_GL_TEXTURE_2D, fTexID));
+ if (GR_GL_GET_ERROR(fGLCtx->gl()) != GR_GL_NO_ERROR) {
+ ERRORF(reporter, "Failed to bind GL Texture");
+ return false;
+ }
+
+ fEGLImageTargetTexture2DOES(GL_TEXTURE_2D, fImage);
+ GLenum status = GL_NO_ERROR;
+ if ((status = glGetError()) != GL_NO_ERROR) {
+ ERRORF(reporter, "EGLImageTargetTexture2DOES failed (%#x)", (int) status);
+ return false;
+ }
+
+ fGrContext->resetContext(kTextureBinding_GrGLBackendState);
+ return true;
+}
+
+sk_sp<SkImage> EGLTestHelper::importHardwareBufferForRead(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) {
+ if (!this->importHardwareBuffer(reporter, buffer)) {
+ return nullptr;
+ }
+ GrGLTextureInfo textureInfo;
+ textureInfo.fTarget = GR_GL_TEXTURE_2D;
+ textureInfo.fID = fTexID;
+ textureInfo.fFormat = GR_GL_RGBA8;
+
+ GrBackendTexture backendTex(DEV_W, DEV_H, GrMipMapped::kNo, textureInfo);
+ REPORTER_ASSERT(reporter, backendTex.isValid());
+
+ sk_sp<SkImage> image = SkImage::MakeFromTexture(fGrContext,
+ backendTex,
+ kTopLeft_GrSurfaceOrigin,
+ kRGBA_8888_SkColorType,
+ kPremul_SkAlphaType,
+ nullptr);
+
+ if (!image) {
+ ERRORF(reporter, "Failed to make wrapped GL SkImage");
+ return nullptr;
+ }
+
+ return image;
+}
+
+sk_sp<SkSurface> EGLTestHelper::importHardwareBufferForWrite(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) {
+ if (!this->importHardwareBuffer(reporter, buffer)) {
+ return nullptr;
+ }
+ GrGLTextureInfo textureInfo;
+ textureInfo.fTarget = GR_GL_TEXTURE_2D;
+ textureInfo.fID = fTexID;
+ textureInfo.fFormat = GR_GL_RGBA8;
+
+ GrBackendTexture backendTex(DEV_W, DEV_H, GrMipMapped::kNo, textureInfo);
+ REPORTER_ASSERT(reporter, backendTex.isValid());
+
+ sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(fGrContext,
+ backendTex,
+ kTopLeft_GrSurfaceOrigin,
+ 0,
+ kRGBA_8888_SkColorType,
+ nullptr, nullptr);
+
+ if (!surface) {
+ ERRORF(reporter, "Failed to make wrapped GL SkSurface");
+ return nullptr;
+ }
+
+ return surface;
+}
+
+bool EGLTestHelper::flushSurfaceAndSignalSemaphore(skiatest::Reporter* reporter,
+ sk_sp<SkSurface> surface) {
+ EGLDisplay eglDisplay = eglGetCurrentDisplay();
+ EGLSyncKHR eglsync = fEGLCreateSyncKHR(eglDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
+ if (EGL_NO_SYNC_KHR == eglsync) {
+ ERRORF(reporter, "Failed to create EGLSync for EGL_SYNC_NATIVE_FENCE_ANDROID\n");
+ return false;
+ }
+
+ surface->flush();
+ GR_GL_CALL(fGLCtx->gl(), Flush());
+ fFdHandle = fEGLDupNativeFenceFDANDROID(eglDisplay, eglsync);
+
+ EGLint result = fEGLDestroySyncKHR(eglDisplay, eglsync);
+ if (EGL_TRUE != result) {
+ ERRORF(reporter, "Failed to delete EGLSync, error: %d\n", result);
+ return false;
+ }
+
+ return true;
+}
+
+bool EGLTestHelper::importAndWaitOnSemaphore(skiatest::Reporter* reporter, int fdHandle,
+ sk_sp<SkSurface> surface) {
+ EGLDisplay eglDisplay = eglGetCurrentDisplay();
+ EGLint attr[] = {
+ EGL_SYNC_NATIVE_FENCE_FD_ANDROID, fdHandle,
+ EGL_NONE
+ };
+ EGLSyncKHR eglsync = fEGLCreateSyncKHR(eglDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, attr);
+ if (EGL_NO_SYNC_KHR == eglsync) {
+ ERRORF(reporter,
+ "Failed to create EGLSync when importing EGL_SYNC_NATIVE_FENCE_FD_ANDROID\n");
+ return false;
+ }
+ EGLint result = fEGLWaitSyncKHR(eglDisplay, eglsync, 0);
+ if (EGL_TRUE != result) {
+ ERRORF(reporter, "Failed called to eglWaitSyncKHR, error: %d\n", result);
+ // Don't return false yet, try to delete the sync first
+ }
+ result = fEGLDestroySyncKHR(eglDisplay, eglsync);
+ if (EGL_TRUE != result) {
+ ERRORF(reporter, "Failed to delete EGLSync, error: %d\n", result);
+ return false;
+ }
+ return true;
+}
+
+void EGLTestHelper::doClientSync() {
+ sk_gpu_test::FenceSync* fenceSync = fGLCtx->fenceSync();
+ sk_gpu_test::PlatformFence fence = fenceSync->insertFence();
+ fenceSync->waitFence(fence);
+ fenceSync->deleteFence(fence);
+}
+
+#define DECLARE_VK_PROC(name) PFN_vk##name fVk##name
+
+#define ACQUIRE_INST_VK_PROC(name) \
+ fVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, fBackendContext.fInstance,\
+ VK_NULL_HANDLE)); \
+ if (fVk##name == nullptr) { \
+ ERRORF(reporter, "Function ptr for vk%s could not be acquired\n", #name); \
+ return false; \
+ }
+
+#define ACQUIRE_DEVICE_VK_PROC(name) \
+ fVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, VK_NULL_HANDLE, fDevice)); \
+ if (fVk##name == nullptr) { \
+ ERRORF(reporter, "Function ptr for vk%s could not be acquired\n", #name); \
+ return false; \
+ }
+
+class VulkanTestHelper : public BaseTestHelper {
+public:
+ VulkanTestHelper() {}
+
+ ~VulkanTestHelper() override {}
+
+ void releaseImage() override {
+ if (VK_NULL_HANDLE == fDevice) {
+ return;
+ }
+ if (fImage != VK_NULL_HANDLE) {
+ fVkDestroyImage(fDevice, fImage, nullptr);
+ fImage = VK_NULL_HANDLE;
+ }
+
+ if (fMemory != VK_NULL_HANDLE) {
+ fVkFreeMemory(fDevice, fMemory, nullptr);
+ fMemory = VK_NULL_HANDLE;
+ }
+ }
+ void cleanup() override {
+ this->releaseImage();
+
+ fGrContext.reset();
+ fBackendContext.fMemoryAllocator.reset();
+ if (fDevice != VK_NULL_HANDLE) {
+ fVkDeviceWaitIdle(fDevice);
+ fVkDestroyDevice(fDevice, nullptr);
+ fDevice = VK_NULL_HANDLE;
+ }
+#ifdef SK_ENABLE_VK_LAYERS
+ if (fDebugCallback != VK_NULL_HANDLE) {
+ fDestroyDebugCallback(fBackendContext.fInstance, fDebugCallback, nullptr);
+ }
+#endif
+ if (fBackendContext.fInstance != VK_NULL_HANDLE) {
+ fVkDestroyInstance(fBackendContext.fInstance, nullptr);
+ fBackendContext.fInstance = VK_NULL_HANDLE;
+ }
+
+ delete fExtensions;
+
+ sk_gpu_test::FreeVulkanFeaturesStructs(fFeatures);
+ delete fFeatures;
+ }
+
+ bool init(skiatest::Reporter* reporter) override;
+
+ void doClientSync() override {
+ if (!fGrContext) {
+ return;
+ }
+
+ fGrContext->contextPriv().getGpu()->testingOnly_flushGpuAndSync();
+ }
+
+ bool flushSurfaceAndSignalSemaphore(skiatest::Reporter* reporter, sk_sp<SkSurface>) override;
+ bool importAndWaitOnSemaphore(skiatest::Reporter* reporter, int fdHandle,
+ sk_sp<SkSurface>) override;
+
+ sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) override;
+
+ sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) override;
+
+ void makeCurrent() override {}
+
+ GrContext* grContext() override { return fGrContext.get(); }
+
+private:
+ bool checkOptimalHardwareBuffer(skiatest::Reporter* reporter);
+
+ bool importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer, bool forWrite,
+ GrVkImageInfo* outImageInfo);
+
+ bool setupSemaphoreForSignaling(skiatest::Reporter* reporter, GrBackendSemaphore*);
+ bool exportSemaphore(skiatest::Reporter* reporter, const GrBackendSemaphore&);
+
+ DECLARE_VK_PROC(DestroyInstance);
+ DECLARE_VK_PROC(DeviceWaitIdle);
+ DECLARE_VK_PROC(DestroyDevice);
+
+ DECLARE_VK_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+ DECLARE_VK_PROC(GetPhysicalDeviceImageFormatProperties2);
+ DECLARE_VK_PROC(GetPhysicalDeviceMemoryProperties2);
+
+ DECLARE_VK_PROC(GetAndroidHardwareBufferPropertiesANDROID);
+
+ DECLARE_VK_PROC(CreateImage);
+ DECLARE_VK_PROC(GetImageMemoryRequirements2);
+ DECLARE_VK_PROC(DestroyImage);
+
+ DECLARE_VK_PROC(AllocateMemory);
+ DECLARE_VK_PROC(BindImageMemory2);
+ DECLARE_VK_PROC(FreeMemory);
+
+ DECLARE_VK_PROC(CreateSemaphore);
+ DECLARE_VK_PROC(GetSemaphoreFdKHR);
+ DECLARE_VK_PROC(ImportSemaphoreFdKHR);
+ DECLARE_VK_PROC(DestroySemaphore);
+
+ VkImage fImage = VK_NULL_HANDLE;
+ VkDeviceMemory fMemory = VK_NULL_HANDLE;
+
+ GrVkExtensions* fExtensions = nullptr;
+ VkPhysicalDeviceFeatures2* fFeatures = nullptr;
+ VkDebugReportCallbackEXT fDebugCallback = VK_NULL_HANDLE;
+ PFN_vkDestroyDebugReportCallbackEXT fDestroyDebugCallback = nullptr;
+
+ VkDevice fDevice = VK_NULL_HANDLE;
+
+ GrVkBackendContext fBackendContext;
+ sk_sp<GrContext> fGrContext;
+};
+
+bool VulkanTestHelper::init(skiatest::Reporter* reporter) {
+ PFN_vkGetInstanceProcAddr instProc;
+ PFN_vkGetDeviceProcAddr devProc;
+ if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) {
+ return false;
+ }
+ auto getProc = [&instProc, &devProc](const char* proc_name,
+ VkInstance instance, VkDevice device) {
+ if (device != VK_NULL_HANDLE) {
+ return devProc(device, proc_name);
+ }
+ return instProc(instance, proc_name);
+ };
+
+ fExtensions = new GrVkExtensions();
+ fFeatures = new VkPhysicalDeviceFeatures2;
+ memset(fFeatures, 0, sizeof(VkPhysicalDeviceFeatures2));
+ fFeatures->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ fFeatures->pNext = nullptr;
+
+ fBackendContext.fInstance = VK_NULL_HANDLE;
+ fBackendContext.fDevice = VK_NULL_HANDLE;
+
+ if (!sk_gpu_test::CreateVkBackendContext(getProc, &fBackendContext, fExtensions,
+ fFeatures, &fDebugCallback)) {
+ return false;
+ }
+ fDevice = fBackendContext.fDevice;
+
+ if (fDebugCallback != VK_NULL_HANDLE) {
+ fDestroyDebugCallback = (PFN_vkDestroyDebugReportCallbackEXT) instProc(
+ fBackendContext.fInstance, "vkDestroyDebugReportCallbackEXT");
+ }
+
+ ACQUIRE_INST_VK_PROC(DestroyInstance);
+ ACQUIRE_INST_VK_PROC(DeviceWaitIdle);
+ ACQUIRE_INST_VK_PROC(DestroyDevice);
+
+ if (!fExtensions->hasExtension(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
+ 2)) {
+ return false;
+ }
+ if (!fExtensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
+ return false;
+ }
+ if (!fExtensions->hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
+ return false;
+ }
+ if (!fExtensions->hasExtension(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME, 1)) {
+ // return false;
+ }
+
+ ACQUIRE_INST_VK_PROC(GetPhysicalDeviceMemoryProperties2);
+ ACQUIRE_INST_VK_PROC(GetPhysicalDeviceImageFormatProperties2);
+ ACQUIRE_INST_VK_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+
+ ACQUIRE_DEVICE_VK_PROC(GetAndroidHardwareBufferPropertiesANDROID);
+
+ ACQUIRE_DEVICE_VK_PROC(CreateImage);
+ ACQUIRE_DEVICE_VK_PROC(GetImageMemoryRequirements2);
+ ACQUIRE_DEVICE_VK_PROC(DestroyImage);
+
+ ACQUIRE_DEVICE_VK_PROC(AllocateMemory);
+ ACQUIRE_DEVICE_VK_PROC(BindImageMemory2);
+ ACQUIRE_DEVICE_VK_PROC(FreeMemory);
+
+ ACQUIRE_DEVICE_VK_PROC(CreateSemaphore);
+ ACQUIRE_DEVICE_VK_PROC(GetSemaphoreFdKHR);
+ ACQUIRE_DEVICE_VK_PROC(ImportSemaphoreFdKHR);
+ ACQUIRE_DEVICE_VK_PROC(DestroySemaphore);
+
+ fGrContext = GrContext::MakeVulkan(fBackendContext);
+ REPORTER_ASSERT(reporter, fGrContext.get());
+ if (!fGrContext) {
+ return false;
+ }
+
+ return this->checkOptimalHardwareBuffer(reporter);
+}
+
+bool VulkanTestHelper::checkOptimalHardwareBuffer(skiatest::Reporter* reporter) {
+ VkResult err;
+
+ VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo;
+ externalImageFormatInfo.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
+ externalImageFormatInfo.pNext = nullptr;
+ externalImageFormatInfo.handleType =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
+ //externalImageFormatInfo.handType = 0x80;
+
+ // We will create the hardware buffer with gpu sampled so these usages should all be valid
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ VkPhysicalDeviceImageFormatInfo2 imageFormatInfo;
+ imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+ imageFormatInfo.pNext = &externalImageFormatInfo;
+ imageFormatInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+ imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+ imageFormatInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageFormatInfo.usage = usageFlags;
+ imageFormatInfo.flags = 0;
+
+ VkAndroidHardwareBufferUsageANDROID hwbUsage;
+ hwbUsage.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID;
+ hwbUsage.pNext = nullptr;
+
+ VkExternalImageFormatProperties externalImgFormatProps;
+ externalImgFormatProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
+ externalImgFormatProps.pNext = &hwbUsage;
+
+ VkImageFormatProperties2 imgFormProps;
+ imgFormProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+ imgFormProps.pNext = &externalImgFormatProps;
+
+ err = fVkGetPhysicalDeviceImageFormatProperties2(fBackendContext.fPhysicalDevice,
+ &imageFormatInfo, &imgFormProps);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "vkGetPhysicalDeviceImageFormatProperites failed, err: %d", err);
+ return false;
+ }
+
+ const VkImageFormatProperties& imageFormatProperties = imgFormProps.imageFormatProperties;
+ REPORTER_ASSERT(reporter, DEV_W <= imageFormatProperties.maxExtent.width);
+ REPORTER_ASSERT(reporter, DEV_H <= imageFormatProperties.maxExtent.height);
+
+ const VkExternalMemoryProperties& externalImageFormatProps =
+ externalImgFormatProps.externalMemoryProperties;
+ REPORTER_ASSERT(reporter, SkToBool(VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT &
+ externalImageFormatProps.externalMemoryFeatures));
+ REPORTER_ASSERT(reporter, SkToBool(VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT &
+ externalImageFormatProps.externalMemoryFeatures));
+
+ REPORTER_ASSERT(reporter, SkToBool(AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE &
+ hwbUsage.androidHardwareBufferUsage));
+
+ return true;
+}
+
+bool VulkanTestHelper::importHardwareBuffer(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer,
+ bool forWrite,
+ GrVkImageInfo* outImageInfo) {
+ VkResult err;
+
+ VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps;
+ hwbFormatProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+ hwbFormatProps.pNext = nullptr;
+
+ VkAndroidHardwareBufferPropertiesANDROID hwbProps;
+ hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+ hwbProps.pNext = &hwbFormatProps;
+
+ err = fVkGetAndroidHardwareBufferPropertiesANDROID(fDevice, buffer, &hwbProps);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "GetAndroidHardwareBufferPropertiesAndoird failed, err: %d", err);
+ return false;
+ }
+
+ REPORTER_ASSERT(reporter, VK_FORMAT_R8G8B8A8_UNORM == hwbFormatProps.format);
+ REPORTER_ASSERT(reporter,
+ SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures) &&
+ SkToBool(VK_FORMAT_FEATURE_TRANSFER_SRC_BIT & hwbFormatProps.formatFeatures) &&
+ SkToBool(VK_FORMAT_FEATURE_TRANSFER_DST_BIT & hwbFormatProps.formatFeatures));
+ if (forWrite) {
+ REPORTER_ASSERT(reporter,
+ SkToBool(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT & hwbFormatProps.formatFeatures));
+
+ }
+
+ bool useExternalFormat = VK_FORMAT_UNDEFINED == hwbFormatProps.format;
+ const VkExternalFormatANDROID externalFormatInfo {
+ VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID, // sType
+ nullptr, // pNext
+ useExternalFormat ? hwbFormatProps.externalFormat : 0, // externalFormat
+ };
+
+ const VkExternalMemoryImageCreateInfo externalMemoryImageInfo {
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType
+ &externalFormatInfo, // pNext
+ //nullptr, // pNext
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes
+ //0x80, // handleTypes
+ };
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (forWrite) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ &externalMemoryImageInfo, // pNext
+ 0, // VkImageCreateFlags
+ VK_IMAGE_TYPE_2D, // VkImageType
+ hwbFormatProps.format, // VkFormat
+ { DEV_W, DEV_H, 1 }, // VkExtent3D
+ 1, // mipLevels
+ 1, // arrayLayers
+ VK_SAMPLE_COUNT_1_BIT, // samples
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling
+ usageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
+ };
+
+ err = fVkCreateImage(fDevice, &imageCreateInfo, nullptr, &fImage);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "Create Image failed, err: %d", err);
+ return false;
+ }
+
+ VkImageMemoryRequirementsInfo2 memReqsInfo;
+ memReqsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
+ memReqsInfo.pNext = nullptr;
+ memReqsInfo.image = fImage;
+
+ VkMemoryDedicatedRequirements dedicatedMemReqs;
+ dedicatedMemReqs.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
+ dedicatedMemReqs.pNext = nullptr;
+
+ VkMemoryRequirements2 memReqs;
+ memReqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
+ memReqs.pNext = &dedicatedMemReqs;
+
+ fVkGetImageMemoryRequirements2(fDevice, &memReqsInfo, &memReqs);
+ REPORTER_ASSERT(reporter, VK_TRUE == dedicatedMemReqs.requiresDedicatedAllocation);
+
+ VkPhysicalDeviceMemoryProperties2 phyDevMemProps;
+ phyDevMemProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
+ phyDevMemProps.pNext = nullptr;
+
+ uint32_t typeIndex = 0;
+ uint32_t heapIndex = 0;
+ bool foundHeap = false;
+ fVkGetPhysicalDeviceMemoryProperties2(fBackendContext.fPhysicalDevice, &phyDevMemProps);
+ uint32_t memTypeCnt = phyDevMemProps.memoryProperties.memoryTypeCount;
+ for (uint32_t i = 0; i < memTypeCnt && !foundHeap; ++i) {
+ if (hwbProps.memoryTypeBits & (1 << i)) {
+ const VkPhysicalDeviceMemoryProperties& pdmp = phyDevMemProps.memoryProperties;
+ uint32_t supportedFlags = pdmp.memoryTypes[i].propertyFlags &
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ if (supportedFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
+ typeIndex = i;
+ heapIndex = pdmp.memoryTypes[i].heapIndex;
+ foundHeap = true;
+ }
+ }
+ }
+ if (!foundHeap) {
+ ERRORF(reporter, "Failed to find valid heap for imported memory");
+ return false;
+ }
+
+ VkImportAndroidHardwareBufferInfoANDROID hwbImportInfo;
+ hwbImportInfo.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
+ hwbImportInfo.pNext = nullptr;
+ hwbImportInfo.buffer = buffer;
+
+ VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
+ dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+ dedicatedAllocInfo.pNext = &hwbImportInfo;
+ dedicatedAllocInfo.image = fImage;
+ dedicatedAllocInfo.buffer = VK_NULL_HANDLE;
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ &dedicatedAllocInfo, // pNext
+ hwbProps.allocationSize, // allocationSize
+ typeIndex, // memoryTypeIndex
+ };
+
+ err = fVkAllocateMemory(fDevice, &allocInfo, nullptr, &fMemory);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "AllocateMemory failed for imported buffer, err: %d", err);
+ return false;
+ }
+
+ VkBindImageMemoryInfo bindImageInfo;
+ bindImageInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
+ bindImageInfo.pNext = nullptr;
+ bindImageInfo.image = fImage;
+ bindImageInfo.memory = fMemory;
+ bindImageInfo.memoryOffset = 0;
+
+ err = fVkBindImageMemory2(fDevice, 1, &bindImageInfo);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "BindImageMemory failed for imported buffer, err: %d", err);
+ return false;
+ }
+
+ outImageInfo->fImage = fImage;
+ outImageInfo->fAlloc = GrVkAlloc(fMemory, 0, hwbProps.allocationSize, 0);
+ outImageInfo->fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ outImageInfo->fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ outImageInfo->fFormat = VK_FORMAT_R8G8B8A8_UNORM;
+ outImageInfo->fLevelCount = 1;
+ outImageInfo->fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;
+ return true;
+}
+
+sk_sp<SkImage> VulkanTestHelper::importHardwareBufferForRead(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) {
+ GrVkImageInfo imageInfo;
+ if (!this->importHardwareBuffer(reporter, buffer, false, &imageInfo)) {
+ return nullptr;
+ }
+
+ GrBackendTexture backendTex(DEV_W, DEV_H, imageInfo);
+
+ sk_sp<SkImage> wrappedImage = SkImage::MakeFromTexture(fGrContext.get(),
+ backendTex,
+ kTopLeft_GrSurfaceOrigin,
+ kRGBA_8888_SkColorType,
+ kPremul_SkAlphaType,
+ nullptr);
+
+ if (!wrappedImage.get()) {
+ ERRORF(reporter, "Failed to create wrapped Vulkan SkImage");
+ return nullptr;
+ }
+
+ return wrappedImage;
+}
+
+bool VulkanTestHelper::flushSurfaceAndSignalSemaphore(skiatest::Reporter* reporter,
+ sk_sp<SkSurface> surface) {
+ surface->flush();
+ surface.reset();
+ GrBackendSemaphore semaphore;
+ if (!this->setupSemaphoreForSignaling(reporter, &semaphore)) {
+ return false;
+ }
+ GrSemaphoresSubmitted submitted = fGrContext->flushAndSignalSemaphores(1, &semaphore);
+ if (GrSemaphoresSubmitted::kNo == submitted) {
+ ERRORF(reporter, "Failing call to flushAndSignalSemaphores on SkSurface");
+ return false;
+ }
+ SkASSERT(semaphore.isInitialized());
+ if (!this->exportSemaphore(reporter, semaphore)) {
+ return false;
+ }
+ return true;
+}
+
+bool VulkanTestHelper::setupSemaphoreForSignaling(skiatest::Reporter* reporter,
+ GrBackendSemaphore* beSemaphore) {
+ // Query supported info
+ VkPhysicalDeviceExternalSemaphoreInfo exSemInfo;
+ exSemInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO;
+ exSemInfo.pNext = nullptr;
+ exSemInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ VkExternalSemaphoreProperties exSemProps;
+ exSemProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES;
+ exSemProps.pNext = nullptr;
+
+ fVkGetPhysicalDeviceExternalSemaphoreProperties(fBackendContext.fPhysicalDevice, &exSemInfo,
+ &exSemProps);
+
+ if (!SkToBool(exSemProps.exportFromImportedHandleTypes &
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
+ ERRORF(reporter, "HANDLE_TYPE_SYNC_FD not listed as exportFromImportedHandleTypes");
+ return false;
+ }
+ if (!SkToBool(exSemProps.compatibleHandleTypes &
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
+ ERRORF(reporter, "HANDLE_TYPE_SYNC_FD not listed as compatibleHandleTypes");
+ return false;
+ }
+ if (!SkToBool(exSemProps.externalSemaphoreFeatures &
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT) ||
+ !SkToBool(exSemProps.externalSemaphoreFeatures &
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT)) {
+ ERRORF(reporter, "HANDLE_TYPE_SYNC_FD doesn't support export and import feature");
+ return false;
+ }
+
+ VkExportSemaphoreCreateInfo exportInfo;
+ exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
+ exportInfo.pNext = nullptr;
+ exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ VkSemaphoreCreateInfo semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreInfo.pNext = &exportInfo;
+ semaphoreInfo.flags = 0;
+
+ VkSemaphore semaphore;
+ VkResult err = fVkCreateSemaphore(fDevice, &semaphoreInfo, nullptr, &semaphore);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "Failed to create signal semaphore, err: %d", err);
+ return false;
+ }
+ beSemaphore->initVulkan(semaphore);
+ return true;
+}
+
+bool VulkanTestHelper::exportSemaphore(skiatest::Reporter* reporter,
+ const GrBackendSemaphore& beSemaphore) {
+ VkSemaphore semaphore = beSemaphore.vkSemaphore();
+ if (VK_NULL_HANDLE == semaphore) {
+ ERRORF(reporter, "Invalid vulkan handle in export call");
+ return false;
+ }
+
+ VkSemaphoreGetFdInfoKHR getFdInfo;
+ getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+ getFdInfo.pNext = nullptr;
+ getFdInfo.semaphore = semaphore;
+ getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ VkResult err = fVkGetSemaphoreFdKHR(fDevice, &getFdInfo, &fFdHandle);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "Failed to export signal semaphore, err: %d", err);
+ return false;
+ }
+ fVkDestroySemaphore(fDevice, semaphore, nullptr);
+ return true;
+}
+
+bool VulkanTestHelper::importAndWaitOnSemaphore(skiatest::Reporter* reporter, int fdHandle,
+ sk_sp<SkSurface> surface) {
+ VkSemaphoreCreateInfo semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.flags = 0;
+
+ VkSemaphore semaphore;
+ VkResult err = fVkCreateSemaphore(fDevice, &semaphoreInfo, nullptr, &semaphore);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "Failed to create import semaphore, err: %d", err);
+ return false;
+ }
+
+ VkImportSemaphoreFdInfoKHR importInfo;
+ importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+ importInfo.pNext = nullptr;
+ importInfo.semaphore = semaphore;
+ importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
+ importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+ importInfo.fd = fdHandle;
+
+ err = fVkImportSemaphoreFdKHR(fDevice, &importInfo);
+ if (VK_SUCCESS != err) {
+ ERRORF(reporter, "Failed to import semaphore, err: %d", err);
+ return false;
+ }
+
+ GrBackendSemaphore beSemaphore;
+ beSemaphore.initVulkan(semaphore);
+ if (!surface->wait(1, &beSemaphore)) {
+ ERRORF(reporter, "Failed to add wait semaphore to surface");
+ fVkDestroySemaphore(fDevice, semaphore, nullptr);
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkSurface> VulkanTestHelper::importHardwareBufferForWrite(skiatest::Reporter* reporter,
+ AHardwareBuffer* buffer) {
+ GrVkImageInfo imageInfo;
+ if (!this->importHardwareBuffer(reporter, buffer, true, &imageInfo)) {
+ return nullptr;
+ }
+
+ GrBackendTexture backendTex(DEV_W, DEV_H, imageInfo);
+
+ sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(fGrContext.get(),
+ backendTex,
+ kTopLeft_GrSurfaceOrigin,
+ 0,
+ kRGBA_8888_SkColorType,
+ nullptr, nullptr);
+
+ if (!surface.get()) {
+ ERRORF(reporter, "Failed to create wrapped Vulkan SkSurface");
+ return nullptr;
+ }
+
+ return surface;
+}
+
+static SkPMColor get_src_color(int x, int y) {
+ SkASSERT(x >= 0 && x < DEV_W);
+ SkASSERT(y >= 0 && y < DEV_H);
+
+ U8CPU r = x;
+ U8CPU g = y;
+ U8CPU b = 0xc;
+
+ U8CPU a = 0xff;
+ switch ((x+y) % 5) {
+ case 0:
+ a = 0xff;
+ break;
+ case 1:
+ a = 0x80;
+ break;
+ case 2:
+ a = 0xCC;
+ break;
+ case 4:
+ a = 0x01;
+ break;
+ case 3:
+ a = 0x00;
+ break;
+ }
+ a = 0xff;
+ return SkPremultiplyARGBInline(a, r, g, b);
+}
+
+static SkBitmap make_src_bitmap() {
+ static SkBitmap bmp;
+ if (bmp.isNull()) {
+ bmp.allocN32Pixels(DEV_W, DEV_H);
+ intptr_t pixels = reinterpret_cast<intptr_t>(bmp.getPixels());
+ for (int y = 0; y < DEV_H; ++y) {
+ for (int x = 0; x < DEV_W; ++x) {
+ SkPMColor* pixel = reinterpret_cast<SkPMColor*>(
+ pixels + y * bmp.rowBytes() + x * bmp.bytesPerPixel());
+ *pixel = get_src_color(x, y);
+ }
+ }
+ }
+ return bmp;
+}
+
+static bool check_read(skiatest::Reporter* reporter, const SkBitmap& srcBitmap,
+ const SkBitmap& dstBitmap) {
+ bool result = true;
+ for (int y = 0; y < DEV_H && result; ++y) {
+ for (int x = 0; x < DEV_W && result; ++x) {
+ const uint32_t srcPixel = *srcBitmap.getAddr32(x, y);
+ const uint32_t dstPixel = *dstBitmap.getAddr32(x, y);
+ if (srcPixel != dstPixel) {
+ ERRORF(reporter, "Expected readback pixel (%d, %d) value 0x%08x, got 0x%08x.",
+ x, y, srcPixel, dstPixel);
+ result = false;
+ } /*else {
+ ERRORF(reporter, "Got good readback pixel (%d, %d) value 0x%08x, got 0x%08x.",
+ x, y, srcPixel, dstPixel);
+
+ }*/
+ }
+ }
+ return result;
+}
+
+static void cleanup_resources(BaseTestHelper* srcHelper, BaseTestHelper* dstHelper,
+ AHardwareBuffer* buffer) {
+ if (srcHelper) {
+ srcHelper->cleanup();
+ }
+ if (dstHelper) {
+ dstHelper->cleanup();
+ }
+ if (buffer) {
+ AHardwareBuffer_release(buffer);
+ }
+}
+
+enum class SrcType {
+ kCPU,
+ kEGL,
+ kVulkan,
+};
+
+enum class DstType {
+ kEGL,
+ kVulkan,
+};
+
+void run_test(skiatest::Reporter* reporter, const GrContextOptions& options,
+ SrcType srcType, DstType dstType, bool shareSyncs) {
+ if (SrcType::kCPU == srcType && shareSyncs) {
+ // We don't currently test this since we don't do any syncs in this case.
+ return;
+ }
+ std::unique_ptr<BaseTestHelper> srcHelper;
+ std::unique_ptr<BaseTestHelper> dstHelper;
+ AHardwareBuffer* buffer = nullptr;
+ if (SrcType::kVulkan == srcType) {
+ srcHelper.reset(new VulkanTestHelper());
+ } else if (SrcType::kEGL == srcType) {
+ srcHelper.reset(new EGLTestHelper(options));
+ }
+ if (srcHelper) {
+ if (!srcHelper->init(reporter)) {
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+ }
+
+ if (DstType::kVulkan == dstType) {
+ dstHelper.reset(new VulkanTestHelper());
+ } else {
+ SkASSERT(DstType::kEGL == dstType);
+ dstHelper.reset(new EGLTestHelper(options));
+ }
+ if (dstHelper) {
+ if (!dstHelper->init(reporter)) {
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Setup SkBitmaps
+ ///////////////////////////////////////////////////////////////////////////
+
+ SkBitmap srcBitmap = make_src_bitmap();
+ SkBitmap dstBitmapSurface;
+ dstBitmapSurface.allocN32Pixels(DEV_W, DEV_H);
+ SkBitmap dstBitmapFinal;
+ dstBitmapFinal.allocN32Pixels(DEV_W, DEV_H);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Setup AHardwareBuffer
+ ///////////////////////////////////////////////////////////////////////////
+
+ AHardwareBuffer_Desc hwbDesc;
+ hwbDesc.width = DEV_W;
+ hwbDesc.height = DEV_H;
+ hwbDesc.layers = 1;
+ if (SrcType::kCPU == srcType) {
+ hwbDesc.usage = AHARDWAREBUFFER_USAGE_CPU_READ_NEVER |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
+ } else {
+ hwbDesc.usage = AHARDWAREBUFFER_USAGE_CPU_READ_NEVER |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_NEVER |
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+ AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
+ }
+ hwbDesc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+ // The following three are not used in the allocate
+ hwbDesc.stride = 0;
+ hwbDesc.rfu0= 0;
+ hwbDesc.rfu1= 0;
+
+ if (int error = AHardwareBuffer_allocate(&hwbDesc, &buffer)) {
+ ERRORF(reporter, "Failed to allocated hardware buffer, error: %d", error);
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ if (SrcType::kCPU == srcType) {
+ // Get actual desc for allocated buffer so we know the stride for uploading cpu data.
+ AHardwareBuffer_describe(buffer, &hwbDesc);
+
+ uint32_t* bufferAddr;
+ if (AHardwareBuffer_lock(buffer, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, -1, nullptr,
+ reinterpret_cast<void**>(&bufferAddr))) {
+ ERRORF(reporter, "Failed to lock hardware buffer");
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ int bbp = srcBitmap.bytesPerPixel();
+ uint32_t* src = (uint32_t*)srcBitmap.getPixels();
+ uint32_t* dst = bufferAddr;
+ for (int y = 0; y < DEV_H; ++y) {
+ memcpy(dst, src, DEV_W * bbp);
+ src += DEV_W;
+ dst += hwbDesc.stride;
+ }
+
+ for (int y = 0; y < DEV_H; ++y) {
+ for (int x = 0; x < DEV_W; ++x) {
+ const uint32_t srcPixel = *srcBitmap.getAddr32(x, y);
+ uint32_t dstPixel = bufferAddr[y * hwbDesc.stride + x];
+ if (srcPixel != dstPixel) {
+ ERRORF(reporter, "CPU HWB Expected readpix (%d, %d) value 0x%08x, got 0x%08x.",
+ x, y, srcPixel, dstPixel);
+ }
+ }
+ }
+
+ AHardwareBuffer_unlock(buffer, nullptr);
+
+ } else {
+ srcHelper->makeCurrent();
+ sk_sp<SkSurface> surface = srcHelper->importHardwareBufferForWrite(reporter, buffer);
+
+ if (!surface) {
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ sk_sp<SkImage> srcBmpImage = SkImage::MakeFromBitmap(srcBitmap);
+ surface->getCanvas()->drawImage(srcBmpImage, 0, 0);
+
+ // If we are testing sharing of syncs, don't do a read here since it forces sychronization
+ // to occur.
+ if (!shareSyncs) {
+ bool readResult = surface->readPixels(dstBitmapSurface, 0, 0);
+ if (!readResult) {
+ ERRORF(reporter, "Read Pixels on surface failed");
+ surface.reset();
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+ REPORTER_ASSERT(reporter, check_read(reporter, srcBitmap, dstBitmapSurface));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Cleanup GL/EGL and add syncs
+ ///////////////////////////////////////////////////////////////////////////
+
+ if (shareSyncs) {
+ if (!srcHelper->flushSurfaceAndSignalSemaphore(reporter, std::move(surface))) {
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+ } else {
+ surface.reset();
+ srcHelper->doClientSync();
+ srcHelper->releaseImage();
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Import the HWB into backend and draw it to a surface
+ ///////////////////////////////////////////////////////////////////////////
+
+ dstHelper->makeCurrent();
+ sk_sp<SkImage> wrappedImage = dstHelper->importHardwareBufferForRead(reporter, buffer);
+
+ if (!wrappedImage) {
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ GrContext* grContext = dstHelper->grContext();
+
+ // Make SkSurface to render wrapped HWB into.
+ SkImageInfo imageInfo = SkImageInfo::Make(DEV_W, DEV_H, kRGBA_8888_SkColorType,
+ kPremul_SkAlphaType, nullptr);
+
+ sk_sp<SkSurface> dstSurf = SkSurface::MakeRenderTarget(grContext,
+ SkBudgeted::kNo, imageInfo, 0,
+ kTopLeft_GrSurfaceOrigin,
+ nullptr, false);
+ if (!dstSurf.get()) {
+ ERRORF(reporter, "Failed to create destination SkSurface");
+ wrappedImage.reset();
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ if (shareSyncs) {
+ if (!dstHelper->importAndWaitOnSemaphore(reporter, srcHelper->getFdHandle(), dstSurf)) {
+ wrappedImage.reset();
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+ }
+ dstSurf->getCanvas()->drawImage(wrappedImage, 0, 0);
+
+ bool readResult = dstSurf->readPixels(dstBitmapFinal, 0, 0);
+ if (!readResult) {
+ ERRORF(reporter, "Read Pixels failed");
+ wrappedImage.reset();
+ dstSurf.reset();
+ dstHelper->doClientSync();
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+ return;
+ }
+
+ REPORTER_ASSERT(reporter, check_read(reporter, srcBitmap, dstBitmapFinal));
+
+ dstSurf.reset();
+ wrappedImage.reset();
+ dstHelper->doClientSync();
+ cleanup_resources(srcHelper.get(), dstHelper.get(), buffer);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_CPU_Vulkan, reporter, options) {
+ run_test(reporter, options, SrcType::kCPU, DstType::kVulkan, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_Vulkan, reporter, options) {
+ run_test(reporter, options, SrcType::kEGL, DstType::kVulkan, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_Vulkan, reporter, options) {
+ run_test(reporter, options, SrcType::kVulkan, DstType::kVulkan, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_CPU_EGL, reporter, options) {
+ run_test(reporter, options, SrcType::kCPU, DstType::kEGL, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_EGL, reporter, options) {
+ run_test(reporter, options, SrcType::kEGL, DstType::kEGL, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_EGL, reporter, options) {
+ run_test(reporter, options, SrcType::kVulkan, DstType::kEGL, false);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_EGL_Syncs, reporter, options) {
+ run_test(reporter, options, SrcType::kEGL, DstType::kEGL, true);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_EGL_Syncs, reporter, options) {
+ run_test(reporter, options, SrcType::kVulkan, DstType::kEGL, true);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_Vulkan_Syncs, reporter, options) {
+ run_test(reporter, options, SrcType::kEGL, DstType::kVulkan, true);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_Vulkan_Syncs, reporter, options) {
+ run_test(reporter, options, SrcType::kVulkan, DstType::kVulkan, true);
+}
+
+#endif
+
diff --git a/third_party/skcms/version.sha1 b/third_party/skcms/version.sha1
index 2e80ce1..9bce9d7 100755
--- a/third_party/skcms/version.sha1
+++ b/third_party/skcms/version.sha1
@@ -1 +1 @@
-cd2260c9f5289b05d6b23797a291e54508026823
\ No newline at end of file
+0c0f6dee27794e35e513122f703b848709ec8bbd
\ No newline at end of file
diff --git a/tools/gpu/vk/VkTestContext.cpp b/tools/gpu/vk/VkTestContext.cpp
index e73835a..18c2393 100644
--- a/tools/gpu/vk/VkTestContext.cpp
+++ b/tools/gpu/vk/VkTestContext.cpp
@@ -180,6 +180,7 @@
features, &debugCallback)) {
sk_gpu_test::FreeVulkanFeaturesStructs(features);
delete features;
+ delete extensions;
return nullptr;
}
if (debugCallback != VK_NULL_HANDLE) {
@@ -226,8 +227,7 @@
grVkDestroyDevice(fVk.fDevice, nullptr);
#ifdef SK_ENABLE_VK_LAYERS
if (fDebugCallback != VK_NULL_HANDLE) {
- ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, fVk.fInstance);
- grVkDestroyDebugReportCallbackEXT(fVk.fInstance, fDebugCallback, nullptr);
+ fDestroyDebugReportCallbackEXT(fVk.fInstance, fDebugCallback, nullptr);
}
#endif
grVkDestroyInstance(fVk.fInstance, nullptr);