Cleanup resource flags
Converts GrResourceProvider::Flags and GrResourceCache::ScratchFlags
to "enum class" and fixes a case where we were accidentally using the
wrong type of flag. Makes sure to allocate GrSWMaskHelper proxies with
kNoPendingIO.
Bug: skia:8351
Change-Id: Ibcaa26314a53d0cb31ae22915ab94ab0fc07e76d
Reviewed-on: https://skia-review.googlesource.com/157280
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index c47881c..1f16c69 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -373,7 +373,7 @@
auto resourceProvider = fGpu->getContext()->contextPriv().resourceProvider();
// Shouldn't have to use this flag (https://bug.skia.org/4156)
- static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
+ static const auto kFlags = GrResourceProvider::Flags::kNoPendingIO;
return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
}
diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp
index 26b59f9..d63c5ac 100644
--- a/src/gpu/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/GrOnFlushResourceProvider.cpp
@@ -115,7 +115,7 @@
auto resourceProvider = fDrawingMgr->getContext()->contextPriv().resourceProvider();
return sk_sp<GrBuffer>(resourceProvider->createBuffer(size, intendedType,
kDynamic_GrAccessPattern,
- GrResourceProvider::kNoPendingIO_Flag,
+ GrResourceProvider::Flags::kNoPendingIO,
data));
}
diff --git a/src/gpu/GrProxyProvider.cpp b/src/gpu/GrProxyProvider.cpp
index 9f9de96..c1f8fad 100644
--- a/src/gpu/GrProxyProvider.cpp
+++ b/src/gpu/GrProxyProvider.cpp
@@ -174,30 +174,12 @@
return result;
}
-sk_sp<GrTextureProxy> GrProxyProvider::createInstantiatedProxy(const GrSurfaceDesc& desc,
- GrSurfaceOrigin origin,
- SkBackingFit fit,
- SkBudgeted budgeted,
- GrSurfaceDescFlags descFlags) {
- sk_sp<GrTexture> tex;
-
- if (SkBackingFit::kApprox == fit) {
- tex = fResourceProvider->createApproxTexture(desc, descFlags);
- } else {
- tex = fResourceProvider->createTexture(desc, budgeted, descFlags);
- }
- if (!tex) {
- return nullptr;
- }
-
- return this->createWrapped(std::move(tex), origin);
-}
-
sk_sp<GrTextureProxy> GrProxyProvider::createTextureProxy(sk_sp<SkImage> srcImage,
GrSurfaceDescFlags descFlags,
int sampleCnt,
SkBudgeted budgeted,
- SkBackingFit fit) {
+ SkBackingFit fit,
+ GrInternalSurfaceFlags surfaceFlags) {
ASSERT_SINGLE_OWNER
SkASSERT(srcImage);
@@ -230,7 +212,6 @@
}
}
- GrInternalSurfaceFlags surfaceFlags = GrInternalSurfaceFlags::kNone;
if (SkToBool(descFlags & kRenderTarget_GrSurfaceFlag)) {
if (fCaps->usesMixedSamples() && sampleCnt > 1) {
surfaceFlags |= GrInternalSurfaceFlags::kMixedSampled;
@@ -248,7 +229,7 @@
desc.fConfig = config;
sk_sp<GrTextureProxy> proxy = this->createLazyProxy(
- [desc, budgeted, srcImage, fit](GrResourceProvider* resourceProvider) {
+ [desc, budgeted, srcImage, fit, surfaceFlags](GrResourceProvider* resourceProvider) {
if (!resourceProvider) {
// Nothing to clean up here. Once the proxy (and thus lambda) is deleted the ref
// on srcImage will be released.
@@ -258,7 +239,12 @@
SkAssertResult(srcImage->peekPixels(&pixMap));
GrMipLevel mipLevel = { pixMap.addr(), pixMap.rowBytes() };
- return resourceProvider->createTexture(desc, budgeted, fit, mipLevel);
+ auto resourceProviderFlags = GrResourceProvider::Flags::kNone;
+ if (surfaceFlags & GrInternalSurfaceFlags::kNoPendingIO) {
+ resourceProviderFlags |= GrResourceProvider::Flags::kNoPendingIO;
+ }
+ return resourceProvider->createTexture(desc, budgeted, fit, mipLevel,
+ resourceProviderFlags);
},
desc, kTopLeft_GrSurfaceOrigin, GrMipMapped::kNo, GrTextureType::k2D, surfaceFlags, fit,
budgeted);
diff --git a/src/gpu/GrProxyProvider.h b/src/gpu/GrProxyProvider.h
index df6903b..26dc6e9 100644
--- a/src/gpu/GrProxyProvider.h
+++ b/src/gpu/GrProxyProvider.h
@@ -61,25 +61,13 @@
sk_sp<GrTextureProxy> findOrCreateProxyByUniqueKey(const GrUniqueKey&, GrSurfaceOrigin);
/*
- * Create a texture proxy that is backed by an instantiated GrSurface. This is almost entirely
- * used by Skia's testing code.
- * DDL TODO: remove the remaining Skia-internal use of this method and make it truly
- * testing-only.
- */
- sk_sp<GrTextureProxy> createInstantiatedProxy(const GrSurfaceDesc&, GrSurfaceOrigin,
- SkBackingFit, SkBudgeted,
- GrSurfaceDescFlags = kNone_GrSurfaceFlags);
-
- /*
* Create an un-mipmapped texture proxy with data. The SkImage must be a raster backend image.
* Since the SkImage is ref counted, we simply take a ref on it to keep the data alive until we
* actually upload the data to the gpu.
*/
- sk_sp<GrTextureProxy> createTextureProxy(sk_sp<SkImage> srcImage,
- GrSurfaceDescFlags descFlags,
- int sampleCnt,
- SkBudgeted budgeted,
- SkBackingFit fit);
+ sk_sp<GrTextureProxy> createTextureProxy(
+ sk_sp<SkImage> srcImage, GrSurfaceDescFlags, int sampleCnt, SkBudgeted, SkBackingFit,
+ GrInternalSurfaceFlags = GrInternalSurfaceFlags::kNone);
/*
* Create a mipmapped texture proxy without any data.
@@ -244,8 +232,15 @@
*/
bool recordingDDL() const { return !SkToBool(fResourceProvider); }
+ /*
+ * Create a texture proxy that is backed by an instantiated GrSurface.
+ */
+ sk_sp<GrTextureProxy> testingOnly_createInstantiatedProxy(const GrSurfaceDesc&, GrSurfaceOrigin,
+ SkBackingFit, SkBudgeted);
+
private:
friend class GrAHardwareBufferImageGenerator; // for createWrapped
+ friend class GrResourceProvider; // for createWrapped
sk_sp<GrTextureProxy> createWrapped(sk_sp<GrTexture> tex, GrSurfaceOrigin origin);
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index e6f5994..26ec881 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -256,17 +256,17 @@
GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
size_t resourceSize,
- uint32_t flags) {
+ ScratchFlags flags) {
SkASSERT(scratchKey.isValid());
GrGpuResource* resource;
- if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
+ if (flags & (ScratchFlags::kPreferNoPendingIO | ScratchFlags::kRequireNoPendingIO)) {
resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
if (resource) {
this->refAndMakeResourceMRU(resource);
this->validate();
return resource;
- } else if (flags & kRequireNoPendingIO_ScratchFlag) {
+ } else if (flags & ScratchFlags::kRequireNoPendingIO) {
return nullptr;
}
// We would prefer to consume more available VRAM rather than flushing
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 3cc6665..1d3598e 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -119,19 +119,19 @@
*/
void releaseAll();
- enum {
+ enum class ScratchFlags {
+ kNone = 0,
/** Preferentially returns scratch resources with no pending IO. */
- kPreferNoPendingIO_ScratchFlag = 0x1,
+ kPreferNoPendingIO = 0x1,
/** Will not return any resources that match but have pending IO. */
- kRequireNoPendingIO_ScratchFlag = 0x2,
+ kRequireNoPendingIO = 0x2,
};
/**
* Find a resource that matches a scratch key.
*/
- GrGpuResource* findAndRefScratchResource(const GrScratchKey& scratchKey,
- size_t resourceSize,
- uint32_t flags);
+ GrGpuResource* findAndRefScratchResource(const GrScratchKey& scratchKey, size_t resourceSize,
+ ScratchFlags);
#ifdef SK_DEBUG
// This is not particularly fast and only used for validation, so debug only.
@@ -360,6 +360,8 @@
bool fPreferVRAMUseOverFlushes;
};
+GR_MAKE_BITFIELD_CLASS_OPS(GrResourceCache::ScratchFlags);
+
class GrResourceCache::ResourceAccess {
private:
ResourceAccess(GrResourceCache* cache) : fCache(cache) { }
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 12bc5ab..5939101 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -80,7 +80,7 @@
}
sk_sp<GrTexture> GrResourceProvider::getExactScratch(const GrSurfaceDesc& desc,
- SkBudgeted budgeted, uint32_t flags) {
+ SkBudgeted budgeted, Flags flags) {
sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
if (tex && SkBudgeted::kNo == budgeted) {
tex->resourcePriv().makeUnbudgeted();
@@ -92,7 +92,8 @@
sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
SkBudgeted budgeted,
SkBackingFit fit,
- const GrMipLevel& mipLevel) {
+ const GrMipLevel& mipLevel,
+ Flags flags) {
ASSERT_SINGLE_OWNER
if (this->isAbandoned()) {
@@ -112,10 +113,15 @@
SkColorType colorType;
if (GrPixelConfigToColorType(desc.fConfig, &colorType)) {
- // DDL TODO: remove this use of createInstantiatedProxy and convert it to a testing-only
- // method.
- sk_sp<GrTextureProxy> proxy = proxyProvider->createInstantiatedProxy(
- desc, kTopLeft_GrSurfaceOrigin, fit, budgeted);
+ sk_sp<GrTexture> tex = (SkBackingFit::kApprox == fit)
+ ? this->createApproxTexture(desc, flags)
+ : this->createTexture(desc, budgeted, flags);
+ if (!tex) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createWrapped(std::move(tex),
+ kTopLeft_GrSurfaceOrigin);
if (!proxy) {
return nullptr;
}
@@ -134,7 +140,7 @@
}
sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
- uint32_t flags) {
+ Flags flags) {
ASSERT_SINGLE_OWNER
if (this->isAbandoned()) {
return nullptr;
@@ -153,9 +159,9 @@
}
sk_sp<GrTexture> GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc,
- uint32_t flags) {
+ Flags flags) {
ASSERT_SINGLE_OWNER
- SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
+ SkASSERT(Flags::kNone == flags || Flags::kNoPendingIO == flags);
if (this->isAbandoned()) {
return nullptr;
@@ -186,8 +192,7 @@
return fGpu->createTexture(*copyDesc, SkBudgeted::kYes);
}
-sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc,
- uint32_t flags) {
+sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc, Flags flags) {
ASSERT_SINGLE_OWNER
SkASSERT(!this->isAbandoned());
SkASSERT(fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo));
@@ -199,13 +204,13 @@
GrScratchKey key;
GrTexturePriv::ComputeScratchKey(desc, &key);
- uint32_t scratchFlags = 0;
- if (kNoPendingIO_Flag & flags) {
- scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
+ if (Flags::kNoPendingIO & flags) {
+ scratchFlags |= GrResourceCache::ScratchFlags::kRequireNoPendingIO;
} else if (!(desc.fFlags & kRenderTarget_GrSurfaceFlag)) {
// If it is not a render target then it will most likely be populated by
// writePixels() which will trigger a flush if the texture has pending IO.
- scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ scratchFlags |= GrResourceCache::ScratchFlags::kPreferNoPendingIO;
}
GrGpuResource* resource = fCache->findAndRefScratchResource(key,
GrSurface::WorstCaseSize(desc),
@@ -267,7 +272,7 @@
if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) {
return std::move(buffer);
}
- if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, 0,
+ if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, Flags::kNone,
data)) {
// We shouldn't bin and/or cachestatic buffers.
SkASSERT(buffer->sizeInBytes() == size);
@@ -288,7 +293,7 @@
// This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, kIndex_GrBufferType,
- kStatic_GrAccessPattern, kNoPendingIO_Flag));
+ kStatic_GrAccessPattern, Flags::kNoPendingIO));
if (!buffer) {
return nullptr;
}
@@ -336,7 +341,7 @@
}
GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
- GrAccessPattern accessPattern, uint32_t flags,
+ GrAccessPattern accessPattern, Flags flags,
const void* data) {
if (this->isAbandoned()) {
return nullptr;
@@ -344,7 +349,7 @@
if (kDynamic_GrAccessPattern != accessPattern) {
return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
}
- if (!(flags & kRequireGpuMemory_Flag) &&
+ if (!(flags & Flags::kRequireGpuMemory) &&
this->gpu()->caps()->preferClientSideDynamicBuffers() &&
GrBufferTypeIsVertexOrIndex(intendedType) &&
kDynamic_GrAccessPattern == accessPattern) {
@@ -357,11 +362,11 @@
GrScratchKey key;
GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
- uint32_t scratchFlags = 0;
- if (flags & kNoPendingIO_Flag) {
- scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
+ if (flags & Flags::kNoPendingIO) {
+ scratchFlags = GrResourceCache::ScratchFlags::kRequireNoPendingIO;
} else {
- scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ scratchFlags = GrResourceCache::ScratchFlags::kPreferNoPendingIO;
}
GrBuffer* buffer = static_cast<GrBuffer*>(
this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags));
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index d1c6aa3..491d67d 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -39,6 +39,24 @@
*/
class GrResourceProvider {
public:
+ /** These flags govern which scratch resources we are allowed to return */
+ enum class Flags {
+ kNone = 0x0,
+
+ /** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
+ * set when accessing resources during a GrOpList flush. This includes the execution of
+ * GrOp objects. The reason is that these memory operations are done immediately and
+ * will occur out of order WRT the operations being flushed.
+ * Make this automatic: https://bug.skia.org/4156
+ */
+ kNoPendingIO = 0x1,
+
+ /** Normally the caps may indicate a preference for client-side buffers. Set this flag when
+ * creating a buffer to guarantee it resides in GPU memory.
+ */
+ kRequireGpuMemory = 0x2,
+ };
+
GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*,
GrContextOptions::Enable explicitlyAllocateGPUResources);
@@ -61,18 +79,18 @@
* then result will be a render target. Format and sample count will always match the request.
* The contents of the texture are undefined.
*/
- sk_sp<GrTexture> createApproxTexture(const GrSurfaceDesc&, uint32_t flags);
+ sk_sp<GrTexture> createApproxTexture(const GrSurfaceDesc&, Flags);
/** Create an exact fit texture with no initial data to upload.
*/
- sk_sp<GrTexture> createTexture(const GrSurfaceDesc&, SkBudgeted, uint32_t flags = 0);
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc&, SkBudgeted, Flags = Flags::kNone);
sk_sp<GrTexture> createTexture(const GrSurfaceDesc&, SkBudgeted, const GrMipLevel texels[],
int mipLevelCount);
// Create a potentially loose fit texture with the provided data
sk_sp<GrTexture> createTexture(const GrSurfaceDesc&, SkBudgeted, SkBackingFit,
- const GrMipLevel&);
+ const GrMipLevel&, Flags);
///////////////////////////////////////////////////////////////////////////
// Wrapped Backend Surfaces
@@ -169,24 +187,6 @@
*/
sk_sp<GrPath> createPath(const SkPath&, const GrStyle&);
- /** These flags govern which scratch resources we are allowed to return */
- enum Flags {
- kNone_Flag = 0x0,
-
- /** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
- * set when accessing resources during a GrOpList flush. This includes the execution of
- * GrOp objects. The reason is that these memory operations are done immediately and
- * will occur out of order WRT the operations being flushed.
- * Make this automatic: https://bug.skia.org/4156
- */
- kNoPendingIO_Flag = 0x1,
-
- /** Normally the caps may indicate a preference for client-side buffers. Set this flag when
- * creating a buffer to guarantee it resides in GPU memory.
- */
- kRequireGpuMemory_Flag = 0x2,
- };
-
/**
* Returns a buffer.
*
@@ -198,7 +198,7 @@
*
* @return the buffer if successful, otherwise nullptr.
*/
- GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, uint32_t flags,
+ GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, Flags,
const void* data = nullptr);
@@ -258,13 +258,13 @@
// Attempts to find a resource in the cache that exactly matches the GrSurfaceDesc. Failing that
// it returns null. If non-null, the resulting texture is always budgeted.
- sk_sp<GrTexture> refScratchTexture(const GrSurfaceDesc&, uint32_t scratchTextureFlags);
+ sk_sp<GrTexture> refScratchTexture(const GrSurfaceDesc&, Flags);
/*
* Try to find an existing scratch texture that exactly matches 'desc'. If successful
* update the budgeting accordingly.
*/
- sk_sp<GrTexture> getExactScratch(const GrSurfaceDesc&, SkBudgeted, uint32_t flags);
+ sk_sp<GrTexture> getExactScratch(const GrSurfaceDesc&, SkBudgeted, Flags);
GrResourceCache* cache() { return fCache; }
const GrResourceCache* cache() const { return fCache; }
@@ -298,4 +298,6 @@
SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
};
+GR_MAKE_BITFIELD_CLASS_OPS(GrResourceProvider::Flags);
+
#endif
diff --git a/src/gpu/GrSWMaskHelper.cpp b/src/gpu/GrSWMaskHelper.cpp
index 0fbf9dc..852a040 100644
--- a/src/gpu/GrSWMaskHelper.cpp
+++ b/src/gpu/GrSWMaskHelper.cpp
@@ -106,7 +106,15 @@
return nullptr;
}
- return context->contextPriv().proxyProvider()->createTextureProxy(std::move(img),
- kNone_GrSurfaceFlags, 1,
- SkBudgeted::kYes, fit);
+ // TODO: http://skbug.com/8422: Although this fixes http://skbug.com/8351, it seems like these
+ // should just participate in the normal allocation process and not need the pending IO flag.
+ auto surfaceFlags = GrInternalSurfaceFlags::kNone;
+ if (!context->contextPriv().resourceProvider()) {
+ // In DDL mode, this texture proxy will be instantiated at flush time, therfore it cannot
+ // have pending IO.
+ surfaceFlags |= GrInternalSurfaceFlags::kNoPendingIO;
+ }
+
+ return context->contextPriv().proxyProvider()->createTextureProxy(
+ std::move(img), kNone_GrSurfaceFlags, 1, SkBudgeted::kYes, fit, surfaceFlags);
}
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index f14a436..d46480a 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -131,12 +131,12 @@
desc.fConfig = fConfig;
desc.fSampleCnt = sampleCnt;
- GrResourceProvider::Flags resourceProviderFlags = GrResourceProvider::kNone_Flag;
- if (fSurfaceFlags & GrInternalSurfaceFlags::kNoPendingIO ||
+ GrResourceProvider::Flags resourceProviderFlags = GrResourceProvider::Flags::kNone;
+ if ((fSurfaceFlags & GrInternalSurfaceFlags::kNoPendingIO) ||
resourceProvider->explicitlyAllocateGPUResources()) {
// The explicit resource allocator requires that any resources it pulls out of the
// cache have no pending IO.
- resourceProviderFlags = GrResourceProvider::kNoPendingIO_Flag;
+ resourceProviderFlags = GrResourceProvider::Flags::kNoPendingIO;
}
sk_sp<GrSurface> surface;
diff --git a/src/gpu/ops/GrDrawVerticesOp.cpp b/src/gpu/ops/GrDrawVerticesOp.cpp
index 04b537f..b4aa619 100644
--- a/src/gpu/ops/GrDrawVerticesOp.cpp
+++ b/src/gpu/ops/GrDrawVerticesOp.cpp
@@ -312,7 +312,7 @@
vertexBuffer.reset(rp->createBuffer(fVertexCount * vertexStride,
kVertex_GrBufferType,
kStatic_GrAccessPattern,
- 0));
+ GrResourceProvider::Flags::kNone));
void* verts = vertexBuffer ? vertexBuffer->map() : nullptr;
if (!verts) {
SkDebugf("Could not allocate vertices\n");
@@ -325,7 +325,7 @@
indexBuffer.reset(rp->createBuffer(fIndexCount * sizeof(uint16_t),
kIndex_GrBufferType,
kStatic_GrAccessPattern,
- 0));
+ GrResourceProvider::Flags::kNone));
indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr;
if (!indices) {
SkDebugf("Could not allocate indices\n");
diff --git a/src/gpu/ops/GrTessellatingPathRenderer.cpp b/src/gpu/ops/GrTessellatingPathRenderer.cpp
index f722868..75620d3 100644
--- a/src/gpu/ops/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/ops/GrTessellatingPathRenderer.cpp
@@ -78,7 +78,7 @@
void* lock(int vertexCount) override {
size_t size = vertexCount * stride();
fVertexBuffer.reset(fResourceProvider->createBuffer(
- size, kVertex_GrBufferType, kStatic_GrAccessPattern, 0));
+ size, kVertex_GrBufferType, kStatic_GrAccessPattern, GrResourceProvider::Flags::kNone));
if (!fVertexBuffer.get()) {
return nullptr;
}