Convert GrBufferType to enum class, rename, and remove dead values
Renamed to GrGpuBufferType in anticipation of splitting GrBuffer
into GrGpuBuffer and GrCpuBuffer types.
There were two unused values in the enum that are removed, DrawIndirect
and Texel.
Change-Id: Icb6b3da689adbd8e10495c10fd0470a6ee0120b5
Reviewed-on: https://skia-review.googlesource.com/c/189280
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrBuffer.cpp b/src/gpu/GrBuffer.cpp
index 25e7ab0..cec2556 100644
--- a/src/gpu/GrBuffer.cpp
+++ b/src/gpu/GrBuffer.cpp
@@ -9,8 +9,8 @@
#include "GrGpu.h"
#include "GrCaps.h"
-sk_sp<GrBuffer> GrBuffer::MakeCPUBacked(GrGpu* gpu, size_t sizeInBytes, GrBufferType intendedType,
- const void* data) {
+sk_sp<GrBuffer> GrBuffer::MakeCPUBacked(GrGpu* gpu, size_t sizeInBytes,
+ GrGpuBufferType intendedType, const void* data) {
SkASSERT(GrBufferTypeIsVertexOrIndex(intendedType));
void* cpuData;
if (gpu->caps()->mustClearUploadedBufferData()) {
@@ -24,7 +24,7 @@
return sk_sp<GrBuffer>(new GrBuffer(gpu, sizeInBytes, intendedType, cpuData));
}
-GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrBufferType type, void* cpuData)
+GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, void* cpuData)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
@@ -34,7 +34,7 @@
this->registerWithCache(SkBudgeted::kNo);
}
-GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrBufferType type, GrAccessPattern pattern)
+GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrAccessPattern pattern)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
@@ -44,14 +44,14 @@
// Subclass registers with cache.
}
-void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrBufferType intendedType,
+void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
- builder[0] = intendedType;
+ builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
diff --git a/src/gpu/GrBuffer.h b/src/gpu/GrBuffer.h
index 8b0d551..128ec48 100644
--- a/src/gpu/GrBuffer.h
+++ b/src/gpu/GrBuffer.h
@@ -18,14 +18,14 @@
* Creates a client-side buffer.
*/
static SK_WARN_UNUSED_RESULT sk_sp<GrBuffer> MakeCPUBacked(GrGpu*, size_t sizeInBytes,
- GrBufferType,
+ GrGpuBufferType,
const void* data = nullptr);
/**
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
- static void ComputeScratchKeyForDynamicVBO(size_t size, GrBufferType, GrScratchKey*);
+ static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
GrAccessPattern accessPattern() const { return fAccessPattern; }
size_t sizeInBytes() const { return fSizeInBytes; }
@@ -101,7 +101,7 @@
}
protected:
- GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, GrAccessPattern);
+ GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern);
void* fMapPtr;
@@ -109,7 +109,7 @@
/**
* Internal constructor to make a CPU-backed buffer.
*/
- GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, void* cpuData);
+ GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, void* cpuData);
virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; }
virtual void onUnmap() { SkASSERT(this->isCPUBacked()); }
@@ -122,7 +122,7 @@
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;
void* fCPUData;
- GrBufferType fIntendedType;
+ GrGpuBufferType fIntendedType;
typedef GrGpuResource INHERITED;
};
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index 67898c4..0d0e151 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -36,7 +36,7 @@
constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
-GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrBufferType bufferType, void* initialBuffer)
+GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, void* initialBuffer)
: fBlocks(8), fGpu(gpu), fBufferType(bufferType), fInitialCpuData(initialBuffer) {
if (fInitialCpuData) {
fCpuDataSize = kDefaultBufferSize;
@@ -375,7 +375,7 @@
////////////////////////////////////////////////////////////////////////////////
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, void* initialCpuBuffer)
- : GrBufferAllocPool(gpu, kVertex_GrBufferType, initialCpuBuffer) {}
+ : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, initialCpuBuffer) {}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
int vertexCount,
@@ -428,7 +428,7 @@
////////////////////////////////////////////////////////////////////////////////
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, void* initialCpuBuffer)
- : GrBufferAllocPool(gpu, kIndex_GrBufferType, initialCpuBuffer) {}
+ : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, initialCpuBuffer) {}
void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
int* startIndex) {
diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h
index ca1a896..9453660 100644
--- a/src/gpu/GrBufferAllocPool.h
+++ b/src/gpu/GrBufferAllocPool.h
@@ -59,7 +59,7 @@
* This parameter can be used to avoid malloc/free when all
* usages can be satisfied with default-sized buffers.
*/
- GrBufferAllocPool(GrGpu* gpu, GrBufferType bufferType, void* initialBuffer);
+ GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, void* initialBuffer);
virtual ~GrBufferAllocPool();
@@ -136,7 +136,7 @@
SkTArray<BufferBlock> fBlocks;
GrGpu* fGpu;
- GrBufferType fBufferType;
+ GrGpuBufferType fBufferType;
void* fInitialCpuData = nullptr;
void* fCpuData = nullptr;
size_t fCpuDataSize = 0;
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 1462bf1..f21e389 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -217,7 +217,7 @@
return nullptr;
}
-sk_sp<GrBuffer> GrGpu::createBuffer(size_t size, GrBufferType intendedType,
+sk_sp<GrBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
this->handleDirtyContext();
sk_sp<GrBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 64e1978..ed9be12 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -140,7 +140,7 @@
*
* @return the buffer if successful, otherwise nullptr.
*/
- sk_sp<GrBuffer> createBuffer(size_t size, GrBufferType intendedType,
+ sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data = nullptr);
/**
@@ -467,8 +467,8 @@
virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&);
- virtual sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
- const void* data) = 0;
+ virtual sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data) = 0;
// overridden by backend-specific derived class to perform the surface read
virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType,
diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp
index ced73c4..a2c56e3 100644
--- a/src/gpu/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/GrOnFlushResourceProvider.cpp
@@ -73,7 +73,7 @@
return proxy->instantiate(resourceProvider);
}
-sk_sp<GrBuffer> GrOnFlushResourceProvider::makeBuffer(GrBufferType intendedType, size_t size,
+sk_sp<GrBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size,
const void* data) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
return sk_sp<GrBuffer>(resourceProvider->createBuffer(size, intendedType,
@@ -82,10 +82,8 @@
data));
}
-sk_sp<const GrBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(GrBufferType intendedType,
- size_t size,
- const void* data,
- const GrUniqueKey& key) {
+sk_sp<const GrBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(
+ GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
sk_sp<const GrBuffer> buffer = resourceProvider->findOrMakeStaticBuffer(intendedType, size,
data, key);
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index f2ce74f..11caf39 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -86,10 +86,10 @@
bool instatiateProxy(GrSurfaceProxy*);
// Creates a GPU buffer with a "dynamic" access pattern.
- sk_sp<GrBuffer> makeBuffer(GrBufferType, size_t, const void* data = nullptr);
+ sk_sp<GrBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr);
// Either finds and refs, or creates a static GPU buffer with the given data.
- sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrBufferType, size_t, const void* data,
+ sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data,
const GrUniqueKey&);
uint32_t contextID() const;
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index f590176..4ba7de4 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -285,7 +285,7 @@
: sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
}
-sk_sp<const GrBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrBufferType intendedType,
+sk_sp<const GrBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const void* data,
const GrUniqueKey& key) {
@@ -312,7 +312,7 @@
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
// This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
- sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, kIndex_GrBufferType,
+ sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, GrGpuBufferType::kIndex,
kStatic_GrAccessPattern, Flags::kNone));
if (!buffer) {
return nullptr;
@@ -360,7 +360,7 @@
return this->gpu()->pathRendering()->createPath(path, style);
}
-sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
+sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, Flags flags,
const void* data) {
if (this->isAbandoned()) {
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 6803ddd..9005a96 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -145,7 +145,7 @@
*
* @return The buffer if successful, otherwise nullptr.
*/
- sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrBufferType intendedType, size_t size,
+ sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const void* data, const GrUniqueKey& key);
/**
@@ -205,7 +205,7 @@
*
* @return the buffer if successful, otherwise nullptr.
*/
- sk_sp<GrBuffer> createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, Flags,
+ sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, Flags,
const void* data = nullptr);
/**
diff --git a/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp b/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp
index 59fe583..38b1435 100644
--- a/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp
+++ b/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp
@@ -449,19 +449,19 @@
case PrimitiveType::kTriangles:
case PrimitiveType::kWeightedTriangles: {
GR_DEFINE_STATIC_UNIQUE_KEY(gTriangleVertexBufferKey);
- fVSVertexBuffer = rp->findOrMakeStaticBuffer(kVertex_GrBufferType,
+ fVSVertexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kVertex,
sizeof(kTriangleVertices),
kTriangleVertices,
gTriangleVertexBufferKey);
GR_DEFINE_STATIC_UNIQUE_KEY(gTriangleIndexBufferKey);
if (caps.usePrimitiveRestart()) {
- fVSIndexBuffer = rp->findOrMakeStaticBuffer(kIndex_GrBufferType,
+ fVSIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
sizeof(kTriangleIndicesAsStrips),
kTriangleIndicesAsStrips,
gTriangleIndexBufferKey);
fVSNumIndicesPerInstance = SK_ARRAY_COUNT(kTriangleIndicesAsStrips);
} else {
- fVSIndexBuffer = rp->findOrMakeStaticBuffer(kIndex_GrBufferType,
+ fVSIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
sizeof(kTriangleIndicesAsTris),
kTriangleIndicesAsTris,
gTriangleIndexBufferKey);
@@ -474,18 +474,18 @@
case PrimitiveType::kCubics:
case PrimitiveType::kConics: {
GR_DEFINE_STATIC_UNIQUE_KEY(gCurveVertexBufferKey);
- fVSVertexBuffer = rp->findOrMakeStaticBuffer(kVertex_GrBufferType,
- sizeof(kCurveVertices), kCurveVertices,
- gCurveVertexBufferKey);
+ fVSVertexBuffer =
+ rp->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kCurveVertices),
+ kCurveVertices, gCurveVertexBufferKey);
GR_DEFINE_STATIC_UNIQUE_KEY(gCurveIndexBufferKey);
if (caps.usePrimitiveRestart()) {
- fVSIndexBuffer = rp->findOrMakeStaticBuffer(kIndex_GrBufferType,
+ fVSIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
sizeof(kCurveIndicesAsStrips),
kCurveIndicesAsStrips,
gCurveIndexBufferKey);
fVSNumIndicesPerInstance = SK_ARRAY_COUNT(kCurveIndicesAsStrips);
} else {
- fVSIndexBuffer = rp->findOrMakeStaticBuffer(kIndex_GrBufferType,
+ fVSIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
sizeof(kCurveIndicesAsTris),
kCurveIndicesAsTris,
gCurveIndexBufferKey);
diff --git a/src/gpu/ccpr/GrCCFiller.cpp b/src/gpu/ccpr/GrCCFiller.cpp
index a5e2e59..311eb95 100644
--- a/src/gpu/ccpr/GrCCFiller.cpp
+++ b/src/gpu/ccpr/GrCCFiller.cpp
@@ -326,8 +326,8 @@
fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
- fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
- quadEndIdx * sizeof(QuadPointInstance));
+ fInstanceBuffer =
+ onFlushRP->makeBuffer(GrGpuBufferType::kVertex, quadEndIdx * sizeof(QuadPointInstance));
if (!fInstanceBuffer) {
SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
return false;
diff --git a/src/gpu/ccpr/GrCCPathProcessor.cpp b/src/gpu/ccpr/GrCCPathProcessor.cpp
index 81c57dd..e397516 100644
--- a/src/gpu/ccpr/GrCCPathProcessor.cpp
+++ b/src/gpu/ccpr/GrCCPathProcessor.cpp
@@ -36,7 +36,7 @@
sk_sp<const GrBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
- return onFlushRP->findOrMakeStaticBuffer(kVertex_GrBufferType, sizeof(kOctoEdgeNorms),
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms),
kOctoEdgeNorms, gVertexBufferKey);
}
@@ -67,11 +67,13 @@
sk_sp<const GrBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
if (onFlushRP->caps()->usePrimitiveRestart()) {
- return onFlushRP->findOrMakeStaticBuffer(kIndex_GrBufferType, sizeof(kOctoIndicesAsStrips),
- kOctoIndicesAsStrips, gIndexBufferKey);
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
+ sizeof(kOctoIndicesAsStrips), kOctoIndicesAsStrips,
+ gIndexBufferKey);
} else {
- return onFlushRP->findOrMakeStaticBuffer(kIndex_GrBufferType, sizeof(kOctoIndicesAsTris),
- kOctoIndicesAsTris, gIndexBufferKey);
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
+ sizeof(kOctoIndicesAsTris), kOctoIndicesAsTris,
+ gIndexBufferKey);
}
}
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 54e395c..b1aa3fa 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -147,9 +147,9 @@
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCPerFlushResourceSpecs& specs)
- // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
- // (See transform_path_pts below.)
- // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
+ // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
+ // (See transform_path_pts below.)
+ // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
: fLocalDevPtsBuffer(SkTMax(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
, fFiller(specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
@@ -165,7 +165,7 @@
specs.fRenderedAtlasSpecs, onFlushRP->caps())
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
- , fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
+ , fInstanceBuffer(onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
inst_buffer_count(specs) * sizeof(PathInstance)))
, fNextCopyInstanceIdx(0)
, fNextPathInstanceIdx(specs.fNumCopiedPaths[kFillIdx] +
diff --git a/src/gpu/ccpr/GrCCStroker.cpp b/src/gpu/ccpr/GrCCStroker.cpp
index 011acb2..5293658 100644
--- a/src/gpu/ccpr/GrCCStroker.cpp
+++ b/src/gpu/ccpr/GrCCStroker.cpp
@@ -372,7 +372,7 @@
int endConicsIdx = stroker->fBaseInstances[1].fConics +
stroker->fInstanceCounts[1]->fConics;
- fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
+ fInstanceBuffer = onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
endConicsIdx * sizeof(ConicInstance));
if (!fInstanceBuffer) {
SkDebugf("WARNING: failed to allocate CCPR stroke instance buffer.\n");
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
index fdfc45c..dd3e1c5 100644
--- a/src/gpu/gl/GrGLBuffer.cpp
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -29,11 +29,11 @@
#define VALIDATE() do {} while(false)
#endif
-sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
if (gpu->glCaps().transferBufferType() == GrGLCaps::kNone_TransferBufferType &&
- (kXferCpuToGpu_GrBufferType == intendedType ||
- kXferGpuToCpu_GrBufferType == intendedType)) {
+ (GrGpuBufferType::kXferCpuToGpu == intendedType ||
+ GrGpuBufferType::kXferGpuToCpu == intendedType)) {
return nullptr;
}
@@ -48,57 +48,59 @@
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
-inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType,
+inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
GrAccessPattern accessPattern) {
- static const GrGLenum drawUsages[] = {
- DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
- GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern
- GR_GL_STREAM_DRAW // kStream_GrAccessPattern
+ auto drawUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
+ return DYNAMIC_DRAW_PARAM;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_DRAW;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_DRAW;
+ }
+ SK_ABORT("Unexpected access pattern");
+ return GR_GL_STATIC_DRAW;
};
- static const GrGLenum readUsages[] = {
- GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern
- GR_GL_STATIC_READ, // kStatic_GrAccessPattern
- GR_GL_STREAM_READ // kStream_GrAccessPattern
+ auto readUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ return GR_GL_DYNAMIC_READ;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_READ;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_READ;
+ }
+ SK_ABORT("Unexpected access pattern");
+ return GR_GL_STATIC_READ;
};
- GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
- GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
- GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
-
- static GrGLenum const* const usageTypes[] = {
- drawUsages, // kVertex_GrBufferType,
- drawUsages, // kIndex_GrBufferType,
- drawUsages, // kTexel_GrBufferType,
- drawUsages, // kDrawIndirect_GrBufferType,
- drawUsages, // kXferCpuToGpu_GrBufferType,
- readUsages // kXferGpuToCpu_GrBufferType,
+ auto usageType = [&drawUsage, &readUsage](GrGpuBufferType type, GrAccessPattern pattern) {
+ switch (type) {
+ case GrGpuBufferType::kVertex:
+ case GrGpuBufferType::kIndex:
+ case GrGpuBufferType::kXferCpuToGpu:
+ return drawUsage(pattern);
+ case GrGpuBufferType::kXferGpuToCpu:
+ return readUsage(pattern);
+ }
+ SK_ABORT("Unexpected gpu buffer type.");
+ return GR_GL_STATIC_DRAW;
};
- GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
- GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
- GR_STATIC_ASSERT(2 == kTexel_GrBufferType);
- GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType);
- GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType);
- GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount);
-
- SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType);
- SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
-
- return usageTypes[bufferType][accessPattern];
+ return usageType(bufferType, accessPattern);
}
-GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data)
- : INHERITED(gpu, size, intendedType, accessPattern)
- , fIntendedType(intendedType)
- , fBufferID(0)
- , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
- , fGLSizeInBytes(0)
- , fHasAttachedToTexture(false) {
+ : INHERITED(gpu, size, intendedType, accessPattern)
+ , fIntendedType(intendedType)
+ , fBufferID(0)
+ , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
+ , fGLSizeInBytes(0)
+ , fHasAttachedToTexture(false) {
GL_CALL(GenBuffers(1, &fBufferID));
if (fBufferID) {
GrGLenum target = gpu->bindBuffer(fIntendedType, this);
@@ -165,7 +167,7 @@
SkASSERT(!this->isMapped());
// TODO: Make this a function parameter.
- bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType);
+ bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
// Handling dirty context is done in the bindBuffer call
switch (this->glCaps().mapBufferType()) {
@@ -187,7 +189,7 @@
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
}
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
- if (kXferCpuToGpu_GrBufferType != fIntendedType) {
+ if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
// TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
}
diff --git a/src/gpu/gl/GrGLBuffer.h b/src/gpu/gl/GrGLBuffer.h
index 889c4e9..18480ff 100644
--- a/src/gpu/gl/GrGLBuffer.h
+++ b/src/gpu/gl/GrGLBuffer.h
@@ -16,8 +16,8 @@
class GrGLBuffer : public GrBuffer {
public:
- static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern,
- const void* data = nullptr);
+ static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data = nullptr);
~GrGLBuffer() override {
// either release or abandon should have been called by the owner of this object.
@@ -36,7 +36,8 @@
bool hasAttachedToTexture() const { return fHasAttachedToTexture; }
protected:
- GrGLBuffer(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern, const void* data);
+ GrGLBuffer(GrGLGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data);
void onAbandon() override;
void onRelease() override;
@@ -55,11 +56,11 @@
void validate() const;
#endif
- GrBufferType fIntendedType;
- GrGLuint fBufferID;
- GrGLenum fUsage;
- size_t fGLSizeInBytes;
- bool fHasAttachedToTexture;
+ GrGpuBufferType fIntendedType;
+ GrGLuint fBufferID;
+ GrGLenum fUsage;
+ size_t fGLSizeInBytes;
+ bool fHasAttachedToTexture;
typedef GrBuffer INHERITED;
};
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 50b70fa..77a7972 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -343,23 +343,21 @@
fHWBoundTextureUniqueIDs.reset(this->caps()->shaderCaps()->maxFragmentSamplers());
- fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
- fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
- fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
- fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
- fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
- GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
- fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
- GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
+ GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
+ GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
} else {
- fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
- fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
}
- for (int i = 0; i < kGrBufferTypeCount; ++i) {
+ for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
fHWBufferState[i].invalidate();
}
- GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
+ GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(fHWBufferState));
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
fPathRendering.reset(new GrGLPathRendering(this));
@@ -484,10 +482,8 @@
// just set this to the default for self-consistency.
GL_CALL(FrontFace(GR_GL_CCW));
- fHWBufferState[kTexel_GrBufferType].invalidate();
- fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
- fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
- fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
if (kGL_GrGLStandard == this->glStandard()) {
#ifndef USE_NSIGHT
@@ -579,8 +575,8 @@
// Vertex
if (resetBits & kVertex_GrGLBackendState) {
fHWVertexArrayState.invalidate();
- fHWBufferState[kVertex_GrBufferType].invalidate();
- fHWBufferState[kIndex_GrBufferType].invalidate();
+ this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
}
if (resetBits & kRenderTarget_GrGLBackendState) {
@@ -870,7 +866,7 @@
SkASSERT(!transferBuffer->isMapped());
SkASSERT(!transferBuffer->isCPUBacked());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
- this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
+ this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
SkDEBUGCODE(
SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
@@ -1150,12 +1146,11 @@
}
void GrGLGpu::unbindCpuToGpuXferBuffer() {
- auto& xferBufferState = fHWBufferState[kXferCpuToGpu_GrBufferType];
- if (!xferBufferState.fBoundBufferUniqueID.isInvalid()) {
- GL_CALL(BindBuffer(xferBufferState.fGLTarget, 0));
- xferBufferState.invalidate();
+ auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu);
+ if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) {
+ GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
+ xferBufferState->invalidate();
}
-
}
// TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to
@@ -1852,11 +1847,7 @@
////////////////////////////////////////////////////////////////////////////////
-// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
-// objects are implemented as client-side-arrays on tile-deferred architectures.
-#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
-
-sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
+sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
}
@@ -2107,31 +2098,29 @@
}
}
-GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) {
+GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
this->handleDirtyContext();
// Index buffer state is tied to the vertex array.
- if (kIndex_GrBufferType == type) {
+ if (GrGpuBufferType::kIndex == type) {
this->bindVertexArray(0);
}
- SkASSERT(type >= 0 && type <= kLast_GrBufferType);
- auto& bufferState = fHWBufferState[type];
-
- if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) {
- if (buffer->isCPUBacked()) {
- if (!bufferState.fBufferZeroKnownBound) {
- GL_CALL(BindBuffer(bufferState.fGLTarget, 0));
- }
- } else {
- const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
- GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID()));
+ auto* bufferState = this->hwBufferState(type);
+ if (buffer->isCPUBacked()) {
+ if (!bufferState->fBufferZeroKnownBound) {
+ GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
+ bufferState->fBufferZeroKnownBound = true;
+ bufferState->fBoundBufferUniqueID.makeInvalid();
}
- bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
- bufferState.fBoundBufferUniqueID = buffer->uniqueID();
+ } else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
+ GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
+ bufferState->fBufferZeroKnownBound = false;
+ bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
}
- return bufferState.fGLTarget;
+ return bufferState->fGLTarget;
}
void GrGLGpu::disableScissor() {
if (kNo_TriState != fHWScissorSettings.fEnabled) {
@@ -3327,7 +3316,7 @@
1, 0,
1, 1
};
- fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
+ fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, vdata);
}
if (!fCopyProgramArrayBuffer) {
@@ -3815,7 +3804,7 @@
1, 0,
1, 1
};
- fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
+ fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, vdata);
}
if (!fMipmapProgramArrayBuffer) {
@@ -4213,7 +4202,7 @@
} else {
if (ibuf) {
// bindBuffer implicitly binds VAO 0 when binding an index buffer.
- gpu->bindBuffer(kIndex_GrBufferType, ibuf);
+ gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
} else {
this->setVertexArrayID(gpu, 0);
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index fede5cc..5167b4a 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -74,7 +74,7 @@
// returns the GL target the buffer was bound to.
// When 'type' is kIndex_GrBufferType, this function will also implicitly bind the default VAO.
// If the caller wishes to bind an index buffer to a specific VAO, it can call glBind directly.
- GrGLenum bindBuffer(GrBufferType type, const GrBuffer*);
+ GrGLenum bindBuffer(GrGpuBufferType type, const GrBuffer*);
// The GrGLGpuRTCommandBuffer does not buffer up draws before submitting them to the gpu.
// Thus this is the implementation of the draw call for the corresponding passthrough function
@@ -187,7 +187,7 @@
sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
+ sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data) override;
sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
@@ -557,7 +557,13 @@
fBoundBufferUniqueID.makeInvalid();
fBufferZeroKnownBound = false;
}
- } fHWBufferState[kGrBufferTypeCount];
+ } fHWBufferState[kGrGpuBufferTypeCount];
+
+ auto* hwBufferState(GrGpuBufferType type) {
+ unsigned typeAsUInt = static_cast<unsigned>(type);
+ SkASSERT(typeAsUInt < SK_ARRAY_COUNT(fHWBufferState));
+ return &fHWBufferState[typeAsUInt];
+ }
struct {
GrBlendEquation fEquation;
diff --git a/src/gpu/gl/GrGLVertexArray.cpp b/src/gpu/gl/GrGLVertexArray.cpp
index 062ca4f..6b12a3f 100644
--- a/src/gpu/gl/GrGLVertexArray.cpp
+++ b/src/gpu/gl/GrGLVertexArray.cpp
@@ -94,7 +94,7 @@
array->fGPUType != gpuType ||
array->fStride != stride ||
array->fOffset != offsetInBytes) {
- gpu->bindBuffer(kVertex_GrBufferType, vertexBuffer);
+ gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
const AttribLayout& layout = attrib_layout(cpuType);
const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
if (GrSLTypeIsFloatType(gpuType)) {
diff --git a/src/gpu/mock/GrMockBuffer.h b/src/gpu/mock/GrMockBuffer.h
index fa3e712..cf915ee 100644
--- a/src/gpu/mock/GrMockBuffer.h
+++ b/src/gpu/mock/GrMockBuffer.h
@@ -14,7 +14,7 @@
class GrMockBuffer : public GrBuffer {
public:
- GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrBufferType type,
+ GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern)
: INHERITED(gpu, sizeInBytes, type, accessPattern) {
this->registerWithCache(SkBudgeted::kYes);
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index b221eed..4960fd0 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -183,7 +183,7 @@
new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc, rtInfo));
}
-sk_sp<GrBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrBufferType type,
+sk_sp<GrBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern, const void*) {
return sk_sp<GrBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern));
}
diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h
index 0a230d9..d574a5d 100644
--- a/src/gpu/mock/GrMockGpu.h
+++ b/src/gpu/mock/GrMockGpu.h
@@ -72,7 +72,7 @@
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t sizeInBytes, GrBufferType, GrAccessPattern,
+ sk_sp<GrBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern,
const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
diff --git a/src/gpu/mtl/GrMtlBuffer.h b/src/gpu/mtl/GrMtlBuffer.h
index e0c580b..97df71c 100644
--- a/src/gpu/mtl/GrMtlBuffer.h
+++ b/src/gpu/mtl/GrMtlBuffer.h
@@ -17,7 +17,7 @@
class GrMtlBuffer: public GrBuffer {
public:
- static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrBufferType intendedType,
+ static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr);
~GrMtlBuffer() override;
@@ -25,7 +25,7 @@
id<MTLBuffer> mtlBuffer() const { return fMtlBuffer; }
protected:
- GrMtlBuffer(GrMtlGpu*, size_t size, GrBufferType intendedType, GrAccessPattern);
+ GrMtlBuffer(GrMtlGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern);
void onAbandon() override;
void onRelease() override;
@@ -44,7 +44,7 @@
void validate() const;
#endif
- GrBufferType fIntendedType;
+ GrGpuBufferType fIntendedType;
bool fIsDynamic;
id<MTLBuffer> fMtlBuffer;
id<MTLBuffer> fMappedBuffer;
diff --git a/src/gpu/mtl/GrMtlBuffer.mm b/src/gpu/mtl/GrMtlBuffer.mm
index 0e24da6..f6e2c48 100644
--- a/src/gpu/mtl/GrMtlBuffer.mm
+++ b/src/gpu/mtl/GrMtlBuffer.mm
@@ -16,11 +16,8 @@
#define VALIDATE() do {} while(false)
#endif
-sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu, size_t size, GrBufferType intendedType,
+sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
- // TODO: DrawIndirect buffers aren't actually supported yet because we don't have a way of
- // uploading data to them.
- SkASSERT(intendedType != kDrawIndirect_GrBufferType);
sk_sp<GrMtlBuffer> buffer(new GrMtlBuffer(gpu, size, intendedType, accessPattern));
if (data && !buffer->onUpdateData(data, size)) {
return nullptr;
@@ -28,7 +25,7 @@
return buffer;
}
-GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrBufferType intendedType,
+GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern)
: INHERITED(gpu, size, intendedType, accessPattern)
, fIntendedType(intendedType)
@@ -64,10 +61,6 @@
if (srcInBytes > fMtlBuffer.length) {
return false;
}
- if (fIntendedType == kDrawIndirect_GrBufferType) {
- // TODO: implement encoding data into argument (DrawIndirect) buffers.
- return false;
- }
VALIDATE();
this->internalMap(srcInBytes);
@@ -177,11 +170,10 @@
#ifdef SK_DEBUG
void GrMtlBuffer::validate() const {
SkASSERT(fMtlBuffer == nil ||
- fIntendedType == kVertex_GrBufferType ||
- fIntendedType == kIndex_GrBufferType ||
- fIntendedType == kXferCpuToGpu_GrBufferType ||
- fIntendedType == kXferGpuToCpu_GrBufferType);
-// fIntendedType == kDrawIndirect_GrBufferType not yet supported
+ fIntendedType == GrGpuBufferType::kVertex ||
+ fIntendedType == GrGpuBufferType::kIndex ||
+ fIntendedType == GrGpuBufferType::kXferCpuToGpu ||
+ fIntendedType == GrGpuBufferType::kXferGpuToCpu);
SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil ||
fMappedBuffer.length <= fMtlBuffer.length);
SkASSERT(fIsDynamic == false); // TODO: implement synchronization to allow dynamic access.
diff --git a/src/gpu/mtl/GrMtlCopyManager.mm b/src/gpu/mtl/GrMtlCopyManager.mm
index 4a39452..2203c87 100644
--- a/src/gpu/mtl/GrMtlCopyManager.mm
+++ b/src/gpu/mtl/GrMtlCopyManager.mm
@@ -29,7 +29,7 @@
{1, 0},
{1, 1},
};
- sk_sp<GrMtlBuffer> mtlBuffer = GrMtlBuffer::Make(fGpu, sizeof(vdata), kVertex_GrBufferType,
+ sk_sp<GrMtlBuffer> mtlBuffer = GrMtlBuffer::Make(fGpu, sizeof(vdata), GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, vdata);
fVertexAttributeBuffer = mtlBuffer->mtlBuffer();
}
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index ab6c46a..d82a30a 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -143,7 +143,7 @@
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t, GrBufferType, GrAccessPattern, const void*) override;
+ sk_sp<GrBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern, const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override;
diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm
index 2350e0a..833dfdb0 100644
--- a/src/gpu/mtl/GrMtlGpu.mm
+++ b/src/gpu/mtl/GrMtlGpu.mm
@@ -131,7 +131,7 @@
fCmdBuffer = [fQueue commandBuffer];
}
-sk_sp<GrBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrBufferType type,
+sk_sp<GrBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
return GrMtlBuffer::Make(this, size, type, accessPattern, data);
}
diff --git a/src/gpu/mtl/GrMtlPipelineStateBuilder.mm b/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
index 6582840..5168358 100644
--- a/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
+++ b/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
@@ -366,11 +366,11 @@
fUniformHandler.fUniforms,
GrMtlBuffer::Make(fGpu,
fUniformHandler.fCurrentGeometryUBOOffset,
- kVertex_GrBufferType,
+ GrGpuBufferType::kVertex,
kStatic_GrAccessPattern),
GrMtlBuffer::Make(fGpu,
fUniformHandler.fCurrentFragmentUBOOffset,
- kVertex_GrBufferType,
+ GrGpuBufferType::kVertex,
kStatic_GrAccessPattern),
(uint32_t)fUniformHandler.numSamplers(),
std::move(fGeometryProcessor),
diff --git a/src/gpu/ops/GrAAFillRRectOp.cpp b/src/gpu/ops/GrAAFillRRectOp.cpp
index d472fae..3954523 100644
--- a/src/gpu/ops/GrAAFillRRectOp.cpp
+++ b/src/gpu/ops/GrAAFillRRectOp.cpp
@@ -440,18 +440,16 @@
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
- sk_sp<const GrBuffer> indexBuffer =
- flushState->resourceProvider()->findOrMakeStaticBuffer(
- kIndex_GrBufferType, sizeof(kIndexData), kIndexData, gIndexBufferKey);
+ sk_sp<const GrBuffer> indexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kIndexData), kIndexData, gIndexBufferKey);
if (!indexBuffer) {
return;
}
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
- sk_sp<const GrBuffer> vertexBuffer =
- flushState->resourceProvider()->findOrMakeStaticBuffer(
- kVertex_GrBufferType, sizeof(kVertexData), kVertexData, gVertexBufferKey);
+ sk_sp<const GrBuffer> vertexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kVertexData), kVertexData, gVertexBufferKey);
if (!vertexBuffer) {
return;
}
diff --git a/src/gpu/ops/GrDrawVerticesOp.cpp b/src/gpu/ops/GrDrawVerticesOp.cpp
index bdcc20c..2b368fe 100644
--- a/src/gpu/ops/GrDrawVerticesOp.cpp
+++ b/src/gpu/ops/GrDrawVerticesOp.cpp
@@ -301,7 +301,7 @@
// Allocate vertex buffer.
size_t vertexStride = gp->vertexStride();
vertexBuffer = rp->createBuffer(fVertexCount * vertexStride,
- kVertex_GrBufferType,
+ GrGpuBufferType::kVertex,
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
void* verts = vertexBuffer ? vertexBuffer->map() : nullptr;
@@ -314,7 +314,7 @@
uint16_t* indices = nullptr;
if (this->isIndexed()) {
indexBuffer = rp->createBuffer(fIndexCount * sizeof(uint16_t),
- kIndex_GrBufferType,
+ GrGpuBufferType::kIndex,
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr;
diff --git a/src/gpu/ops/GrTessellatingPathRenderer.cpp b/src/gpu/ops/GrTessellatingPathRenderer.cpp
index 7dbfa6b..0314e4a 100644
--- a/src/gpu/ops/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/ops/GrTessellatingPathRenderer.cpp
@@ -77,9 +77,9 @@
}
void* lock(int vertexCount) override {
size_t size = vertexCount * stride();
- fVertexBuffer =
- fResourceProvider->createBuffer(size, kVertex_GrBufferType, kStatic_GrAccessPattern,
- GrResourceProvider::Flags::kNone);
+ fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
+ kStatic_GrAccessPattern,
+ GrResourceProvider::Flags::kNone);
if (!fVertexBuffer.get()) {
return nullptr;
}
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 28c7b2a..ad143ee 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -343,33 +343,30 @@
}
///////////////////////////////////////////////////////////////////////////////
-sk_sp<GrBuffer> GrVkGpu::onCreateBuffer(size_t size, GrBufferType type,
+sk_sp<GrBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrBuffer> buff;
switch (type) {
- case kVertex_GrBufferType:
+ case GrGpuBufferType::kVertex:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
kStatic_GrAccessPattern == accessPattern);
buff = GrVkVertexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
break;
- case kIndex_GrBufferType:
+ case GrGpuBufferType::kIndex:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
kStatic_GrAccessPattern == accessPattern);
buff = GrVkIndexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
break;
- case kXferCpuToGpu_GrBufferType:
+ case GrGpuBufferType::kXferCpuToGpu:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
kStream_GrAccessPattern == accessPattern);
buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type);
break;
- case kXferGpuToCpu_GrBufferType:
+ case GrGpuBufferType::kXferGpuToCpu:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
kStream_GrAccessPattern == accessPattern);
buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type);
break;
- case kDrawIndirect_GrBufferType:
- SK_ABORT("DrawIndirect Buffers not supported in vulkan backend.");
- return nullptr;
default:
SK_ABORT("Unknown buffer type.");
return nullptr;
@@ -2152,7 +2149,7 @@
size_t imageRows = region.imageExtent.height;
auto transferBuffer = sk_sp<GrVkTransferBuffer>(
static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
- kXferGpuToCpu_GrBufferType,
+ GrGpuBufferType::kXferGpuToCpu,
kStream_GrAccessPattern)
.release()));
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 944126a..a52a982 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -198,7 +198,7 @@
sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern,
+ sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
const void* data) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp
index cf4fe98..ec2e11e 100644
--- a/src/gpu/vk/GrVkIndexBuffer.cpp
+++ b/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -10,9 +10,9 @@
GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes, kIndex_GrBufferType,
- desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
- , GrVkBuffer(desc, bufferResource) {
+ : INHERITED(gpu, desc.fSizeInBytes, GrGpuBufferType::kIndex,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
this->registerWithCache(SkBudgeted::kYes);
}
diff --git a/src/gpu/vk/GrVkTransferBuffer.cpp b/src/gpu/vk/GrVkTransferBuffer.cpp
index cb98d2b..7ccf221 100644
--- a/src/gpu/vk/GrVkTransferBuffer.cpp
+++ b/src/gpu/vk/GrVkTransferBuffer.cpp
@@ -31,11 +31,11 @@
GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes,
- kCopyRead_Type == desc.fType ?
- kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
- kStream_GrAccessPattern)
- , GrVkBuffer(desc, bufferResource) {
+ : INHERITED(gpu, desc.fSizeInBytes,
+ kCopyRead_Type == desc.fType ? GrGpuBufferType::kXferCpuToGpu
+ : GrGpuBufferType::kXferGpuToCpu,
+ kStream_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
this->registerWithCache(SkBudgeted::kYes);
}
diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp
index d94f11d..f8e55a8 100644
--- a/src/gpu/vk/GrVkVertexBuffer.cpp
+++ b/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -10,9 +10,9 @@
GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes, kVertex_GrBufferType,
- desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
- , GrVkBuffer(desc, bufferResource) {
+ : INHERITED(gpu, desc.fSizeInBytes, GrGpuBufferType::kVertex,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
this->registerWithCache(SkBudgeted::kYes);
}