Convert GrBufferType to enum class, rename, and remove dead values
Renamed to GrGpuBufferType in anticipation of splitting GrBuffer
into GrGpuBuffer and GrCpuBuffer types.
There were two unused values in the enum that are removed, DrawIndirect
and Texel.
Change-Id: Icb6b3da689adbd8e10495c10fd0470a6ee0120b5
Reviewed-on: https://skia-review.googlesource.com/c/189280
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
index fdfc45c..dd3e1c5 100644
--- a/src/gpu/gl/GrGLBuffer.cpp
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -29,11 +29,11 @@
#define VALIDATE() do {} while(false)
#endif
-sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
if (gpu->glCaps().transferBufferType() == GrGLCaps::kNone_TransferBufferType &&
- (kXferCpuToGpu_GrBufferType == intendedType ||
- kXferGpuToCpu_GrBufferType == intendedType)) {
+ (GrGpuBufferType::kXferCpuToGpu == intendedType ||
+ GrGpuBufferType::kXferGpuToCpu == intendedType)) {
return nullptr;
}
@@ -48,57 +48,59 @@
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
-inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType,
+inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
GrAccessPattern accessPattern) {
- static const GrGLenum drawUsages[] = {
- DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
- GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern
- GR_GL_STREAM_DRAW // kStream_GrAccessPattern
+ auto drawUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
+ return DYNAMIC_DRAW_PARAM;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_DRAW;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_DRAW;
+ }
+ SK_ABORT("Unexpected access pattern");
+ return GR_GL_STATIC_DRAW;
};
- static const GrGLenum readUsages[] = {
- GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern
- GR_GL_STATIC_READ, // kStatic_GrAccessPattern
- GR_GL_STREAM_READ // kStream_GrAccessPattern
+ auto readUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ return GR_GL_DYNAMIC_READ;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_READ;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_READ;
+ }
+ SK_ABORT("Unexpected access pattern");
+ return GR_GL_STATIC_READ;
};
- GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
- GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
- GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
-
- static GrGLenum const* const usageTypes[] = {
- drawUsages, // kVertex_GrBufferType,
- drawUsages, // kIndex_GrBufferType,
- drawUsages, // kTexel_GrBufferType,
- drawUsages, // kDrawIndirect_GrBufferType,
- drawUsages, // kXferCpuToGpu_GrBufferType,
- readUsages // kXferGpuToCpu_GrBufferType,
+ auto usageType = [&drawUsage, &readUsage](GrGpuBufferType type, GrAccessPattern pattern) {
+ switch (type) {
+ case GrGpuBufferType::kVertex:
+ case GrGpuBufferType::kIndex:
+ case GrGpuBufferType::kXferCpuToGpu:
+ return drawUsage(pattern);
+ case GrGpuBufferType::kXferGpuToCpu:
+ return readUsage(pattern);
+ }
+ SK_ABORT("Unexpected gpu buffer type.");
+ return GR_GL_STATIC_DRAW;
};
- GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
- GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
- GR_STATIC_ASSERT(2 == kTexel_GrBufferType);
- GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType);
- GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType);
- GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType);
- GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount);
-
- SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType);
- SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
-
- return usageTypes[bufferType][accessPattern];
+ return usageType(bufferType, accessPattern);
}
-GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data)
- : INHERITED(gpu, size, intendedType, accessPattern)
- , fIntendedType(intendedType)
- , fBufferID(0)
- , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
- , fGLSizeInBytes(0)
- , fHasAttachedToTexture(false) {
+ : INHERITED(gpu, size, intendedType, accessPattern)
+ , fIntendedType(intendedType)
+ , fBufferID(0)
+ , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
+ , fGLSizeInBytes(0)
+ , fHasAttachedToTexture(false) {
GL_CALL(GenBuffers(1, &fBufferID));
if (fBufferID) {
GrGLenum target = gpu->bindBuffer(fIntendedType, this);
@@ -165,7 +167,7 @@
SkASSERT(!this->isMapped());
// TODO: Make this a function parameter.
- bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType);
+ bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
// Handling dirty context is done in the bindBuffer call
switch (this->glCaps().mapBufferType()) {
@@ -187,7 +189,7 @@
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
}
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
- if (kXferCpuToGpu_GrBufferType != fIntendedType) {
+ if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
// TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
}
diff --git a/src/gpu/gl/GrGLBuffer.h b/src/gpu/gl/GrGLBuffer.h
index 889c4e9..18480ff 100644
--- a/src/gpu/gl/GrGLBuffer.h
+++ b/src/gpu/gl/GrGLBuffer.h
@@ -16,8 +16,8 @@
class GrGLBuffer : public GrBuffer {
public:
- static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern,
- const void* data = nullptr);
+ static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data = nullptr);
~GrGLBuffer() override {
// either release or abandon should have been called by the owner of this object.
@@ -36,7 +36,8 @@
bool hasAttachedToTexture() const { return fHasAttachedToTexture; }
protected:
- GrGLBuffer(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern, const void* data);
+ GrGLBuffer(GrGLGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data);
void onAbandon() override;
void onRelease() override;
@@ -55,11 +56,11 @@
void validate() const;
#endif
- GrBufferType fIntendedType;
- GrGLuint fBufferID;
- GrGLenum fUsage;
- size_t fGLSizeInBytes;
- bool fHasAttachedToTexture;
+ GrGpuBufferType fIntendedType;
+ GrGLuint fBufferID;
+ GrGLenum fUsage;
+ size_t fGLSizeInBytes;
+ bool fHasAttachedToTexture;
typedef GrBuffer INHERITED;
};
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 50b70fa..77a7972 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -343,23 +343,21 @@
fHWBoundTextureUniqueIDs.reset(this->caps()->shaderCaps()->maxFragmentSamplers());
- fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
- fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
- fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
- fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
- fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
- GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
- fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
- GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
+ GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
+ GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
} else {
- fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
- fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
}
- for (int i = 0; i < kGrBufferTypeCount; ++i) {
+ for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
fHWBufferState[i].invalidate();
}
- GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
+ GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(fHWBufferState));
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
fPathRendering.reset(new GrGLPathRendering(this));
@@ -484,10 +482,8 @@
// just set this to the default for self-consistency.
GL_CALL(FrontFace(GR_GL_CCW));
- fHWBufferState[kTexel_GrBufferType].invalidate();
- fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
- fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
- fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
if (kGL_GrGLStandard == this->glStandard()) {
#ifndef USE_NSIGHT
@@ -579,8 +575,8 @@
// Vertex
if (resetBits & kVertex_GrGLBackendState) {
fHWVertexArrayState.invalidate();
- fHWBufferState[kVertex_GrBufferType].invalidate();
- fHWBufferState[kIndex_GrBufferType].invalidate();
+ this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
}
if (resetBits & kRenderTarget_GrGLBackendState) {
@@ -870,7 +866,7 @@
SkASSERT(!transferBuffer->isMapped());
SkASSERT(!transferBuffer->isCPUBacked());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
- this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
+ this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
SkDEBUGCODE(
SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
@@ -1150,12 +1146,11 @@
}
void GrGLGpu::unbindCpuToGpuXferBuffer() {
- auto& xferBufferState = fHWBufferState[kXferCpuToGpu_GrBufferType];
- if (!xferBufferState.fBoundBufferUniqueID.isInvalid()) {
- GL_CALL(BindBuffer(xferBufferState.fGLTarget, 0));
- xferBufferState.invalidate();
+ auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu);
+ if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) {
+ GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
+ xferBufferState->invalidate();
}
-
}
// TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to
@@ -1852,11 +1847,7 @@
////////////////////////////////////////////////////////////////////////////////
-// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
-// objects are implemented as client-side-arrays on tile-deferred architectures.
-#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
-
-sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
+sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
}
@@ -2107,31 +2098,29 @@
}
}
-GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) {
+GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
this->handleDirtyContext();
// Index buffer state is tied to the vertex array.
- if (kIndex_GrBufferType == type) {
+ if (GrGpuBufferType::kIndex == type) {
this->bindVertexArray(0);
}
- SkASSERT(type >= 0 && type <= kLast_GrBufferType);
- auto& bufferState = fHWBufferState[type];
-
- if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) {
- if (buffer->isCPUBacked()) {
- if (!bufferState.fBufferZeroKnownBound) {
- GL_CALL(BindBuffer(bufferState.fGLTarget, 0));
- }
- } else {
- const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
- GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID()));
+ auto* bufferState = this->hwBufferState(type);
+ if (buffer->isCPUBacked()) {
+ if (!bufferState->fBufferZeroKnownBound) {
+ GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
+ bufferState->fBufferZeroKnownBound = true;
+ bufferState->fBoundBufferUniqueID.makeInvalid();
}
- bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
- bufferState.fBoundBufferUniqueID = buffer->uniqueID();
+ } else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
+ GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
+ bufferState->fBufferZeroKnownBound = false;
+ bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
}
- return bufferState.fGLTarget;
+ return bufferState->fGLTarget;
}
void GrGLGpu::disableScissor() {
if (kNo_TriState != fHWScissorSettings.fEnabled) {
@@ -3327,7 +3316,7 @@
1, 0,
1, 1
};
- fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
+ fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, vdata);
}
if (!fCopyProgramArrayBuffer) {
@@ -3815,7 +3804,7 @@
1, 0,
1, 1
};
- fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
+ fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, vdata);
}
if (!fMipmapProgramArrayBuffer) {
@@ -4213,7 +4202,7 @@
} else {
if (ibuf) {
// bindBuffer implicitly binds VAO 0 when binding an index buffer.
- gpu->bindBuffer(kIndex_GrBufferType, ibuf);
+ gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
} else {
this->setVertexArrayID(gpu, 0);
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index fede5cc..5167b4a 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -74,7 +74,7 @@
// returns the GL target the buffer was bound to.
// When 'type' is kIndex_GrBufferType, this function will also implicitly bind the default VAO.
// If the caller wishes to bind an index buffer to a specific VAO, it can call glBind directly.
- GrGLenum bindBuffer(GrBufferType type, const GrBuffer*);
+ GrGLenum bindBuffer(GrGpuBufferType type, const GrBuffer*);
// The GrGLGpuRTCommandBuffer does not buffer up draws before submitting them to the gpu.
// Thus this is the implementation of the draw call for the corresponding passthrough function
@@ -187,7 +187,7 @@
sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
+ sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data) override;
sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
@@ -557,7 +557,13 @@
fBoundBufferUniqueID.makeInvalid();
fBufferZeroKnownBound = false;
}
- } fHWBufferState[kGrBufferTypeCount];
+ } fHWBufferState[kGrGpuBufferTypeCount];
+
+ auto* hwBufferState(GrGpuBufferType type) {
+ unsigned typeAsUInt = static_cast<unsigned>(type);
+ SkASSERT(typeAsUInt < SK_ARRAY_COUNT(fHWBufferState));
+ return &fHWBufferState[typeAsUInt];
+ }
struct {
GrBlendEquation fEquation;
diff --git a/src/gpu/gl/GrGLVertexArray.cpp b/src/gpu/gl/GrGLVertexArray.cpp
index 062ca4f..6b12a3f 100644
--- a/src/gpu/gl/GrGLVertexArray.cpp
+++ b/src/gpu/gl/GrGLVertexArray.cpp
@@ -94,7 +94,7 @@
array->fGPUType != gpuType ||
array->fStride != stride ||
array->fOffset != offsetInBytes) {
- gpu->bindBuffer(kVertex_GrBufferType, vertexBuffer);
+ gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
const AttribLayout& layout = attrib_layout(cpuType);
const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
if (GrSLTypeIsFloatType(gpuType)) {