Add GrContext::oomed() and implement for GL and VK.

Surfaces to client whether GrContext has seen a GL_OUT_MEMORY,
VK_ERROR_OUT_OF_HOST_MEMORY, or VK_ERROR_OUT_OF_DEVICE_MEMORY error.

Bug: chromium:1093997
Change-Id: I8e9799a0f7d8a74df056629d7d1d07c0d0a0fe30
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/298216
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Brian Salomon <bsalomon@google.com>
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 9dab791..de7a174 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -167,6 +167,8 @@
     return false;
 }
 
+bool GrContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
+
 void GrContext::resetGLTextureBindings() {
     if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
         return;
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 9452250..8c75788 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -715,6 +715,14 @@
     return submitted;
 }
 
+bool GrGpu::checkAndResetOOMed() {
+    if (fOOMed) {
+        fOOMed = false;
+        return true;
+    }
+    return false;
+}
+
 void GrGpu::callSubmittedProcs(bool success) {
     for (int i = 0; i < fSubmittedProcs.count(); ++i) {
         fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 9ede5b3..d93c47f 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -389,6 +389,12 @@
     virtual void checkFinishProcs() = 0;
 
     /**
+     * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
+     * the internal OOM state to false. Otherwise, returns false.
+     */
+    bool checkAndResetOOMed();
+
+    /**
      *  Put this texture in a safe and known state for use across multiple GrContexts. Depending on
      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
      *  semaphore before using this texture.
@@ -721,6 +727,8 @@
     void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
                            uint32_t mipLevels = 1) const;
 
+    void setOOMed() { fOOMed = true; }
+
     typedef SkTInternalLList<GrStagingBuffer> StagingBufferList;
     const StagingBufferList& availableStagingBuffers() { return fAvailableStagingBuffers; }
     const StagingBufferList& activeStagingBuffers() { return fActiveStagingBuffers; }
@@ -884,6 +892,8 @@
     };
     SkSTArray<4, SubmittedProc> fSubmittedProcs;
 
+    bool fOOMed = false;
+
     friend class GrPathRendering;
     typedef SkRefCnt INHERITED;
 };
diff --git a/src/gpu/GrLegacyDirectContext.cpp b/src/gpu/GrLegacyDirectContext.cpp
index 07be4fb..4b027ea 100644
--- a/src/gpu/GrLegacyDirectContext.cpp
+++ b/src/gpu/GrLegacyDirectContext.cpp
@@ -31,6 +31,13 @@
 #include "src/gpu/dawn/GrDawnGpu.h"
 #endif
 
+#if GR_TEST_UTILS
+#   include "include/utils/SkRandom.h"
+#   if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+#       include <sanitizer/lsan_interface.h>
+#   endif
+#endif
+
 #ifdef SK_DISABLE_REDUCE_OPLIST_SPLITTING
 static const bool kDefaultReduceOpsTaskSplitting = false;
 #else
@@ -135,10 +142,50 @@
     return MakeGL(nullptr, defaultOptions);
 }
 
+#if GR_TEST_UTILS
+GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
+    // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
+    // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
+    // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
+    // on the thing it captures. So we leak the context.
+    struct GetErrorContext {
+        SkRandom fRandom;
+        GrGLFunction<GrGLGetErrorFn> fGetError;
+    };
+
+    auto errorContext = new GetErrorContext;
+
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+    __lsan_ignore_object(errorContext);
+#endif
+
+    errorContext->fGetError = original;
+
+    return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
+        GrGLenum error = errorContext->fGetError();
+        if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
+            error = GR_GL_OUT_OF_MEMORY;
+        }
+        return error;
+    });
+}
+#endif
+
 sk_sp<GrContext> GrContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
                                    const GrContextOptions& options) {
     sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kOpenGL, options));
-
+#if GR_TEST_UTILS
+    if (options.fRandomGLOOM) {
+        auto copy = sk_make_sp<GrGLInterface>(*glInterface);
+        copy->fFunctions.fGetError =
+                make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
+#if GR_GL_CHECK_ERROR
+        // Suppress logging GL errors since we'll be synthetically generating them.
+        copy->suppressErrorLogging();
+#endif
+        glInterface = std::move(copy);
+    }
+#endif
     context->fGpu = GrGLGpu::Make(std::move(glInterface), options, context.get());
     if (!context->init()) {
         return nullptr;
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
index fc11731..0ac421d 100644
--- a/src/gpu/gl/GrGLBuffer.cpp
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -14,15 +14,17 @@
 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
 
-#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
-    #define CLEAR_ERROR_BEFORE_ALLOC(iface)   GrGLClearErr(iface)
-    #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL_NOERRCHECK(iface, call)
-    #define CHECK_ALLOC_ERROR(iface)          GR_GL_GET_ERROR(iface)
-#else
-    #define CLEAR_ERROR_BEFORE_ALLOC(iface)
-    #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL(iface, call)
-    #define CHECK_ALLOC_ERROR(iface)          GR_GL_NO_ERROR
-#endif
+#define GL_ALLOC_CALL(call)                                            \
+    [&] {                                                              \
+        if (this->glGpu()->glCaps().skipErrorChecks()) {               \
+            GR_GL_CALL(this->glGpu()->glInterface(), call);            \
+            return static_cast<GrGLenum>(GR_GL_NO_ERROR);              \
+        } else {                                                       \
+            this->glGpu()->clearErrorsAndCheckForOOM();                \
+            GR_GL_CALL_NOERRCHECK(this->glGpu()->glInterface(), call); \
+            return this->glGpu()->getErrorAndCheckForOOM();            \
+        }                                                              \
+    }()
 
 #ifdef SK_DEBUG
 #define VALIDATE() this->validate()
@@ -109,13 +111,8 @@
     GL_CALL(GenBuffers(1, &fBufferID));
     if (fBufferID) {
         GrGLenum target = gpu->bindBuffer(fIntendedType, this);
-        CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
-        // make sure driver can allocate memory for this buffer
-        GL_ALLOC_CALL(gpu->glInterface(), BufferData(target,
-                                                     (GrGLsizeiptr) size,
-                                                     data,
-                                                     fUsage));
-        if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
+        GrGLenum error = GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)size, data, fUsage));
+        if (error != GR_GL_NO_ERROR) {
             GL_CALL(DeleteBuffers(1, &fBufferID));
             fBufferID = 0;
         } else {
@@ -182,7 +179,11 @@
             if (!readOnly) {
                 // Let driver know it can discard the old data
                 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
-                    GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                    GrGLenum error =
+                            GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                    if (error != GR_GL_NO_ERROR) {
+                        return;
+                    }
                 }
             }
             GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
@@ -192,7 +193,10 @@
             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
             // Make sure the GL buffer size agrees with fDesc before mapping.
             if (fGLSizeInBytes != this->size()) {
-                GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                if (error != GR_GL_NO_ERROR) {
+                    return;
+                }
             }
             GrGLbitfield access;
             if (readOnly) {
@@ -211,7 +215,10 @@
             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
             // Make sure the GL buffer size agrees with fDesc before mapping.
             if (fGLSizeInBytes != this->size()) {
-                GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
+                if (error != GR_GL_NO_ERROR) {
+                    return;
+                }
             }
             GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
                                                   readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
@@ -266,7 +273,11 @@
 
     if (this->glCaps().useBufferDataNullHint()) {
         if (this->size() == srcSizeInBytes) {
-            GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
+            GrGLenum error =
+                    GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
+            if (error != GR_GL_NO_ERROR) {
+                return false;
+            }
         } else {
             // Before we call glBufferSubData we give the driver a hint using
             // glBufferData with nullptr. This makes the old buffer contents
@@ -275,7 +286,11 @@
             // assign a different allocation for the new contents to avoid
             // flushing the gpu past draws consuming the old contents.
             // TODO I think we actually want to try calling bufferData here
-            GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+            GrGLenum error =
+                    GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)this->size(), nullptr, fUsage));
+            if (error != GR_GL_NO_ERROR) {
+                return false;
+            }
             GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
         }
         fGLSizeInBytes = this->size();
@@ -283,7 +298,11 @@
         // Note that we're cheating on the size here. Currently no methods
         // allow a partial update that preserves contents of non-updated
         // portions of the buffer (map() does a glBufferData(..size, nullptr..))
-        GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage));
+        GrGLenum error =
+                GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
+        if (error != GR_GL_NO_ERROR) {
+            return false;
+        }
         fGLSizeInBytes = srcSizeInBytes;
     }
     VALIDATE();
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index b006011..7e54aec 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -50,9 +50,9 @@
             GR_GL_CALL(this->glInterface(), call);            \
             return static_cast<GrGLenum>(GR_GL_NO_ERROR);     \
         } else {                                              \
-            GrGLClearErr(this->glInterface());                \
+            this->clearErrorsAndCheckForOOM();                \
             GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \
-            return GR_GL_GET_ERROR(this->glInterface());      \
+            return this->getErrorAndCheckForOOM();            \
         }                                                     \
     }()
 
@@ -333,7 +333,11 @@
         , fStencilClearFBOID(0)
         , fFinishCallbacks(this) {
     SkASSERT(fGLContext);
-    GrGLClearErr(this->glInterface());
+    // Clear errors so we don't get confused whether we caused an error.
+    this->clearErrorsAndCheckForOOM();
+    // Toss out any pre-existing OOM that was hanging around before we got started.
+    this->checkAndResetOOMed();
+
     fCaps = sk_ref_sp(fGLContext->caps());
 
     fHWTextureUnitBindings.reset(this->numTextureUnits());
@@ -3862,6 +3866,9 @@
         // See if any previously inserted finish procs are good to go.
         fFinishCallbacks.check();
     }
+    if (!this->glCaps().skipErrorChecks()) {
+        this->clearErrorsAndCheckForOOM();
+    }
     return true;
 }
 
@@ -3959,6 +3966,23 @@
     fFinishCallbacks.check();
 }
 
+void GrGLGpu::clearErrorsAndCheckForOOM() {
+    while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {}
+}
+
+GrGLenum GrGLGpu::getErrorAndCheckForOOM() {
+#if GR_GL_CHECK_ERROR
+    if (this->glInterface()->checkAndResetOOMed()) {
+        this->setOOMed();
+    }
+#endif
+    GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError();
+    if (error == GR_GL_OUT_OF_MEMORY) {
+        this->setOOMed();
+    }
+    return error;
+}
+
 void GrGLGpu::deleteSync(GrGLsync sync) const {
     if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
         GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync));
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 64ba4e7..0a40333 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -169,6 +169,11 @@
 
     void checkFinishProcs() override;
 
+    // Calls glGetError() until no errors are reported. Also looks for OOMs.
+    void clearErrorsAndCheckForOOM();
+    // Calls glGetError() once and returns the result. Also looks for an OOM.
+    GrGLenum getErrorAndCheckForOOM();
+
     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
 
     void deleteSync(GrGLsync) const;
diff --git a/src/gpu/gl/GrGLInterfaceAutogen.cpp b/src/gpu/gl/GrGLInterfaceAutogen.cpp
index 2a0dee8..007652c 100644
--- a/src/gpu/gl/GrGLInterfaceAutogen.cpp
+++ b/src/gpu/gl/GrGLInterfaceAutogen.cpp
@@ -19,6 +19,54 @@
     fStandard = kNone_GrGLStandard;
 }
 
+#if GR_GL_CHECK_ERROR
+static const char* get_error_string(GrGLenum err) {
+    switch (err) {
+        case GR_GL_NO_ERROR:
+            return "";
+        case GR_GL_INVALID_ENUM:
+            return "Invalid Enum";
+        case GR_GL_INVALID_VALUE:
+            return "Invalid Value";
+        case GR_GL_INVALID_OPERATION:
+            return "Invalid Operation";
+        case GR_GL_OUT_OF_MEMORY:
+            return "Out of Memory";
+        case GR_GL_CONTEXT_LOST:
+            return "Context Lost";
+    }
+    return "Unknown";
+}
+
+GrGLenum GrGLInterface::checkError(const char* location, const char* call) const {
+    GrGLenum error = fFunctions.fGetError();
+    if (error != GR_GL_NO_ERROR && !fSuppressErrorLogging) {
+        SkDebugf("---- glGetError 0x%x(%s)", error, get_error_string(error));
+        if (location) {
+            SkDebugf(" at\n\t%s", location);
+        }
+        if (call) {
+            SkDebugf("\n\t\t%s", call);
+        }
+        SkDebugf("\n");
+        if (error == GR_GL_OUT_OF_MEMORY) {
+            fOOMed = true;
+        }
+    }
+    return error;
+}
+
+bool GrGLInterface::checkAndResetOOMed() const {
+    if (fOOMed) {
+        fOOMed = false;
+        return true;
+    }
+    return false;
+}
+
+void GrGLInterface::suppressErrorLogging() { fSuppressErrorLogging = true; }
+#endif
+
 #define RETURN_FALSE_INTERFACE                                                 \
     SkDEBUGF("%s:%d GrGLInterface::validate() failed.\n", __FILE__, __LINE__); \
     return false
diff --git a/src/gpu/gl/GrGLUtil.cpp b/src/gpu/gl/GrGLUtil.cpp
index 5dadfbb..fc4781d 100644
--- a/src/gpu/gl/GrGLUtil.cpp
+++ b/src/gpu/gl/GrGLUtil.cpp
@@ -12,46 +12,6 @@
 #include "src/gpu/gl/GrGLUtil.h"
 #include <stdio.h>
 
-void GrGLClearErr(const GrGLInterface* gl) {
-    while (GR_GL_NO_ERROR != gl->fFunctions.fGetError()) {}
-}
-
-namespace {
-const char *get_error_string(uint32_t err) {
-    switch (err) {
-    case GR_GL_NO_ERROR:
-        return "";
-    case GR_GL_INVALID_ENUM:
-        return "Invalid Enum";
-    case GR_GL_INVALID_VALUE:
-        return "Invalid Value";
-    case GR_GL_INVALID_OPERATION:
-        return "Invalid Operation";
-    case GR_GL_OUT_OF_MEMORY:
-        return "Out of Memory";
-    case GR_GL_CONTEXT_LOST:
-        return "Context Lost";
-    }
-    return "Unknown";
-}
-}
-
-void GrGLCheckErr(const GrGLInterface* gl,
-                  const char* location,
-                  const char* call) {
-    uint32_t err = GR_GL_GET_ERROR(gl);
-    if (GR_GL_NO_ERROR != err) {
-        SkDebugf("---- glGetError 0x%x(%s)", err, get_error_string(err));
-        if (location) {
-            SkDebugf(" at\n\t%s", location);
-        }
-        if (call) {
-            SkDebugf("\n\t\t%s", call);
-        }
-        SkDebugf("\n");
-    }
-}
-
 ///////////////////////////////////////////////////////////////////////////////
 
 #if GR_GL_LOG_CALLS
diff --git a/src/gpu/gl/GrGLUtil.h b/src/gpu/gl/GrGLUtil.h
index 550d98f..08435d0 100644
--- a/src/gpu/gl/GrGLUtil.h
+++ b/src/gpu/gl/GrGLUtil.h
@@ -257,23 +257,25 @@
                   const char* location,
                   const char* call);
 
-void GrGLClearErr(const GrGLInterface* gl);
-
 ////////////////////////////////////////////////////////////////////////////////
 
 /**
  * Macros for using GrGLInterface to make GL calls
  */
 
-// internal macro to conditionally call glGetError based on compile-time and
-// run-time flags.
+// Conditionally checks glGetError based on compile-time and run-time flags.
 #if GR_GL_CHECK_ERROR
     extern bool gCheckErrorGL;
-    #define GR_GL_CHECK_ERROR_IMPL(IFACE, X)                    \
-        if (gCheckErrorGL)                                      \
-            GrGLCheckErr(IFACE, GR_FILE_AND_LINE_STR, #X)
+#define GR_GL_CHECK_ERROR_IMPL(IFACE, X)                 \
+    do {                                                 \
+        if (gCheckErrorGL) {                             \
+            IFACE->checkError(GR_FILE_AND_LINE_STR, #X); \
+        }                                                \
+    } while (false)
 #else
-    #define GR_GL_CHECK_ERROR_IMPL(IFACE, X)
+#define GR_GL_CHECK_ERROR_IMPL(IFACE, X) \
+    do {                                 \
+    } while (false)
 #endif
 
 // internal macro to conditionally log the gl call using SkDebugf based on
@@ -316,9 +318,6 @@
         GR_GL_LOG_CALLS_IMPL(X);                                \
     } while (false)
 
-// call glGetError without doing a redundant error check or logging.
-#define GR_GL_GET_ERROR(IFACE) (IFACE)->fFunctions.fGetError()
-
 static constexpr GrGLFormat GrGLFormatFromGLEnum(GrGLenum glFormat) {
     switch (glFormat) {
         case GR_GL_RGBA8:                return GrGLFormat::kRGBA8;
diff --git a/src/gpu/gl/builders/GrGLProgramBuilder.cpp b/src/gpu/gl/builders/GrGLProgramBuilder.cpp
index aea6b49..4e0d6f3 100644
--- a/src/gpu/gl/builders/GrGLProgramBuilder.cpp
+++ b/src/gpu/gl/builders/GrGLProgramBuilder.cpp
@@ -275,11 +275,11 @@
                 if (!reader.isValid()) {
                     break;
                 }
-                GrGLClearErr(this->gpu()->glInterface());
+                this->gpu()->clearErrorsAndCheckForOOM();
                 GR_GL_CALL_NOERRCHECK(this->gpu()->glInterface(),
                                       ProgramBinary(programID, binaryFormat,
                                                     const_cast<void*>(binary), length));
-                if (GR_GL_GET_ERROR(this->gpu()->glInterface()) == GR_GL_NO_ERROR) {
+                if (this->gpu()->getErrorAndCheckForOOM() == GR_GL_NO_ERROR) {
                     if (checkLinked) {
                         cached = this->checkLinkStatus(programID, errorHandler, nullptr, nullptr);
                     }
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 61eb1f3..312792b 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -171,6 +171,8 @@
                          const SkIRect& bounds, bool forSecondaryCB);
     void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds);
 
+    using GrGpu::setOOMed;
+
 private:
     enum SyncQueue {
         kForce_SyncQueue,
diff --git a/src/gpu/vk/GrVkUtil.h b/src/gpu/vk/GrVkUtil.h
index 37d7d91..ba24ea4 100644
--- a/src/gpu/vk/GrVkUtil.h
+++ b/src/gpu/vk/GrVkUtil.h
@@ -21,25 +21,31 @@
 // makes a Vk call on the interface
 #define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X
 
-#define GR_VK_CALL_RESULT(GPU, RESULT, X)                             \
-    do {                                                              \
-    (RESULT) = GR_VK_CALL(GPU->vkInterface(), X);                     \
-    SkASSERT(VK_SUCCESS == RESULT || VK_ERROR_DEVICE_LOST == RESULT); \
-    if (RESULT != VK_SUCCESS && !GPU->isDeviceLost()) {               \
-        SkDebugf("Failed vulkan call. Error: %d\n", RESULT);          \
-    }                                                                 \
-    if (VK_ERROR_DEVICE_LOST == RESULT) {                             \
-        GPU->setDeviceLost();                                         \
-    }                                                                 \
-    } while(false)
+#define GR_VK_CALL_RESULT(GPU, RESULT, X)                                 \
+    do {                                                                  \
+        (RESULT) = GR_VK_CALL(GPU->vkInterface(), X);                     \
+        SkASSERT(VK_SUCCESS == RESULT || VK_ERROR_DEVICE_LOST == RESULT); \
+        if (RESULT != VK_SUCCESS && !GPU->isDeviceLost()) {               \
+            SkDebugf("Failed vulkan call. Error: %d," #X "\n", RESULT);   \
+        }                                                                 \
+        if (RESULT == VK_ERROR_DEVICE_LOST) {                             \
+            GPU->setDeviceLost();                                         \
+        } else if (RESULT == VK_ERROR_OUT_OF_HOST_MEMORY ||               \
+                   RESULT == VK_ERROR_OUT_OF_DEVICE_MEMORY) {             \
+            GPU->setOOMed();                                              \
+        }                                                                 \
+    } while (false)
 
-#define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X)                     \
-    do {                                                              \
-    (RESULT) = GR_VK_CALL(GPU->vkInterface(), X);                     \
-    if (VK_ERROR_DEVICE_LOST == RESULT) {                             \
-        GPU->setDeviceLost();                                         \
-    }                                                                 \
-    } while(false)
+#define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X)             \
+    do {                                                      \
+        (RESULT) = GR_VK_CALL(GPU->vkInterface(), X);         \
+        if (RESULT == VK_ERROR_DEVICE_LOST) {                 \
+            GPU->setDeviceLost();                             \
+        } else if (RESULT == VK_ERROR_OUT_OF_HOST_MEMORY ||   \
+                   RESULT == VK_ERROR_OUT_OF_DEVICE_MEMORY) { \
+            GPU->setOOMed();                                  \
+        }                                                     \
+    } while (false)
 
 // same as GR_VK_CALL but checks for success
 #define GR_VK_CALL_ERRCHECK(GPU, X)                                  \