Use different classes for client side arrays and GPU buffer objects.
GrBuffer is a base class for GrGpuBuffer and GrCpuBuffer. GrGpuBuffer is a
GrGpuResource and the others are not. This allows GrCpuBuffers to exist
outside of the GrGpuResourceCache.
Also removes flags from GrResourceProvider buffer factory function. The
only flag still in use was kRequireGpuMemory. Now CPU buffers are made
without using GrResourceProvider.
Change-Id: I82670d1316e28fd6331ca36b26c8c4ead33846f9
Reviewed-on: https://skia-review.googlesource.com/c/188823
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
index dd3e1c5..4335409 100644
--- a/src/gpu/gl/GrGLBuffer.cpp
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -176,8 +176,8 @@
case GrGLCaps::kMapBuffer_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Let driver know it can discard the old data
- if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->sizeInBytes()) {
- GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
@@ -185,30 +185,30 @@
case GrGLCaps::kMapBufferRange_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
- if (fGLSizeInBytes != this->sizeInBytes()) {
- GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ if (fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
// TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
}
- GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(),
- readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
+ GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(),
+ readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
break;
}
case GrGLCaps::kChromium_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
- if (fGLSizeInBytes != this->sizeInBytes()) {
- GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ if (fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
- GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes(),
- readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
+ readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
}
}
- fGLSizeInBytes = this->sizeInBytes();
+ fGLSizeInBytes = this->size();
VALIDATE();
}
@@ -251,15 +251,15 @@
SkASSERT(!this->isMapped());
VALIDATE();
- if (srcSizeInBytes > this->sizeInBytes()) {
+ if (srcSizeInBytes > this->size()) {
return false;
}
- SkASSERT(srcSizeInBytes <= this->sizeInBytes());
+ SkASSERT(srcSizeInBytes <= this->size());
// bindbuffer handles dirty context
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
if (this->glCaps().useBufferDataNullHint()) {
- if (this->sizeInBytes() == srcSizeInBytes) {
+ if (this->size() == srcSizeInBytes) {
GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
} else {
// Before we call glBufferSubData we give the driver a hint using
@@ -269,10 +269,10 @@
// assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents.
// TODO I think we actually want to try calling bufferData here
- GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
}
- fGLSizeInBytes = this->sizeInBytes();
+ fGLSizeInBytes = this->size();
} else {
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
@@ -296,7 +296,7 @@
void GrGLBuffer::validate() const {
SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
- SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes());
+ SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
}
#endif
diff --git a/src/gpu/gl/GrGLBuffer.h b/src/gpu/gl/GrGLBuffer.h
index 18480ff..76d902c 100644
--- a/src/gpu/gl/GrGLBuffer.h
+++ b/src/gpu/gl/GrGLBuffer.h
@@ -8,13 +8,13 @@
#ifndef GrGLBuffer_DEFINED
#define GrGLBuffer_DEFINED
-#include "GrBuffer.h"
+#include "GrGpuBuffer.h"
#include "gl/GrGLTypes.h"
class GrGLGpu;
class GrGLCaps;
-class GrGLBuffer : public GrBuffer {
+class GrGLBuffer : public GrGpuBuffer {
public:
static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr);
@@ -28,7 +28,7 @@
/**
* Returns the actual size of the underlying GL buffer object. In certain cases we may make this
- * smaller than the size reported by GrBuffer.
+ * smaller than the size reported by GrGpuBuffer.
*/
size_t glSizeInBytes() const { return fGLSizeInBytes; }
@@ -62,7 +62,7 @@
size_t fGLSizeInBytes;
bool fHasAttachedToTexture;
- typedef GrBuffer INHERITED;
+ typedef GrGpuBuffer INHERITED;
};
#endif
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index a22df4f..ee743d9 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -8,6 +8,7 @@
#include "GrGLGpu.h"
#include "GrBackendSemaphore.h"
#include "GrBackendSurface.h"
+#include "GrCpuBuffer.h"
#include "GrFixedClip.h"
#include "GrGLBuffer.h"
#include "GrGLGpuCommandBuffer.h"
@@ -842,8 +843,8 @@
}
bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
- GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
- size_t rowBytes) {
+ GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
+ size_t offset, size_t rowBytes) {
GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
GrPixelConfig texConfig = glTex->config();
SkASSERT(this->caps()->isConfigTexturable(texConfig));
@@ -864,7 +865,7 @@
GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
SkASSERT(!transferBuffer->isMapped());
- SkASSERT(!transferBuffer->isCPUBacked());
+ SkASSERT(!transferBuffer->isCpuBuffer());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
@@ -1847,8 +1848,8 @@
////////////////////////////////////////////////////////////////////////////////
-sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
- GrAccessPattern accessPattern, const void* data) {
+sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
}
@@ -2063,7 +2064,8 @@
GrGLAttribArrayState* attribState;
if (indexBuffer) {
- SkASSERT(indexBuffer && !indexBuffer->isMapped());
+ SkASSERT(indexBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer);
} else {
attribState = fHWVertexArrayState.bindInternalVertexArray(this);
@@ -2073,9 +2075,10 @@
attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart);
if (int vertexStride = fHWProgram->vertexStride()) {
- SkASSERT(vertexBuffer && !vertexBuffer->isMapped());
- size_t bufferOffset = vertexBuffer->baseOffset();
- bufferOffset += baseVertex * static_cast<size_t>(vertexStride);
+ SkASSERT(vertexBuffer);
+ SkASSERT(vertexBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
+ size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride);
for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) {
const auto& attrib = fHWProgram->vertexAttribute(i);
static constexpr int kDivisor = 0;
@@ -2084,9 +2087,10 @@
}
}
if (int instanceStride = fHWProgram->instanceStride()) {
- SkASSERT(instanceBuffer && !instanceBuffer->isMapped());
- size_t bufferOffset = instanceBuffer->baseOffset();
- bufferOffset += baseInstance * static_cast<size_t>(instanceStride);
+ SkASSERT(instanceBuffer);
+ SkASSERT(instanceBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
+ size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride);
int attribIdx = fHWProgram->numVertexAttributes();
for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) {
const auto& attrib = fHWProgram->instanceAttribute(i);
@@ -2107,13 +2111,14 @@
}
auto* bufferState = this->hwBufferState(type);
- if (buffer->isCPUBacked()) {
+ if (buffer->isCpuBuffer()) {
if (!bufferState->fBufferZeroKnownBound) {
GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
bufferState->fBufferZeroKnownBound = true;
bufferState->fBoundBufferUniqueID.makeInvalid();
}
- } else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) {
+ } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
+ bufferState->fBoundBufferUniqueID) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
bufferState->fBufferZeroKnownBound = false;
@@ -2608,21 +2613,29 @@
fStats.incNumDraws();
}
+static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) {
+ size_t baseOffset = baseIndex * sizeof(uint16_t);
+ if (indexBuffer->isCpuBuffer()) {
+ return static_cast<const GrCpuBuffer*>(indexBuffer)->data() + baseOffset;
+ } else {
+ return reinterpret_cast<const GrGLvoid*>(baseOffset);
+ }
+}
+
void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer,
int indexCount, int baseIndex, uint16_t minIndexValue,
uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
- GrGLvoid* const indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
- sizeof(uint16_t) * baseIndex);
+ const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart);
if (this->glCaps().drawRangeElementsSupport()) {
GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount,
- GR_GL_UNSIGNED_SHORT, indices));
+ GR_GL_UNSIGNED_SHORT, elementPtr));
} else {
- GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices));
+ GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr));
}
fStats.incNumDraws();
}
@@ -2649,13 +2662,12 @@
int instanceCount, int baseInstance,
GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
- GrGLvoid* indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
- sizeof(uint16_t) * baseIndex);
+ const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
for (int i = 0; i < instanceCount; i += maxInstances) {
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i,
enablePrimitiveRestart);
- GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices,
+ GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr,
SkTMin(instanceCount - i, maxInstances)));
fStats.incNumDraws();
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 5167b4a..83d8bf2 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -187,8 +187,8 @@
sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) override;
- sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
- const void* data) override;
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data) override;
sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
GrIOType) override;
@@ -233,7 +233,7 @@
const GrMipLevel texels[], int mipLevelCount) override;
bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
- GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
// Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the
// PIXEL_UNPACK_BUFFER is unbound.
diff --git a/src/gpu/gl/GrGLVertexArray.cpp b/src/gpu/gl/GrGLVertexArray.cpp
index 6b12a3f..ab52b25 100644
--- a/src/gpu/gl/GrGLVertexArray.cpp
+++ b/src/gpu/gl/GrGLVertexArray.cpp
@@ -6,6 +6,7 @@
*/
#include "GrGLVertexArray.h"
+#include "GrCpuBuffer.h"
#include "GrGLBuffer.h"
#include "GrGLGpu.h"
@@ -89,14 +90,32 @@
SkASSERT(index >= 0 && index < fAttribArrayStates.count());
SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport());
AttribArrayState* array = &fAttribArrayStates[index];
- if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() ||
+ const char* offsetAsPtr;
+ bool bufferChanged = false;
+ if (vertexBuffer->isCpuBuffer()) {
+ if (!array->fUsingCpuBuffer) {
+ bufferChanged = true;
+ array->fUsingCpuBuffer = true;
+ }
+ offsetAsPtr = static_cast<const GrCpuBuffer*>(vertexBuffer)->data() + offsetInBytes;
+ } else {
+ auto gpuBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
+ if (array->fUsingCpuBuffer || array->fVertexBufferUniqueID != gpuBuffer->uniqueID()) {
+ bufferChanged = true;
+ array->fVertexBufferUniqueID = gpuBuffer->uniqueID();
+ }
+ offsetAsPtr = reinterpret_cast<const char*>(offsetInBytes);
+ }
+ if (bufferChanged ||
array->fCPUType != cpuType ||
array->fGPUType != gpuType ||
array->fStride != stride ||
- array->fOffset != offsetInBytes) {
+ array->fOffset != offsetAsPtr) {
+ // We always have to call this if we're going to change the array pointer. 'array' is
+ // tracking the last buffer used to setup attrib pointers, not the last buffer bound.
+ // GrGLGpu will avoid redundant binds.
gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
const AttribLayout& layout = attrib_layout(cpuType);
- const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
if (GrSLTypeIsFloatType(gpuType)) {
GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
layout.fCount,
@@ -113,11 +132,10 @@
stride,
offsetAsPtr));
}
- array->fVertexBufferUniqueID = vertexBuffer->uniqueID();
array->fCPUType = cpuType;
array->fGPUType = gpuType;
array->fStride = stride;
- array->fOffset = offsetInBytes;
+ array->fOffset = offsetAsPtr;
}
if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) {
SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect.
@@ -179,15 +197,19 @@
GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
GrGLAttribArrayState* state = this->bind(gpu);
- if (state && fIndexBufferUniqueID != ibuff->uniqueID()) {
- if (ibuff->isCPUBacked()) {
- GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
- } else {
+ if (!state) {
+ return nullptr;
+ }
+ if (ibuff->isCpuBuffer()) {
+ GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
+ } else {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
+ if (fIndexBufferUniqueID != glBuffer->uniqueID()) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
- GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
- glBuffer->bufferID()));
+ GR_GL_CALL(gpu->glInterface(),
+ BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, glBuffer->bufferID()));
+ fIndexBufferUniqueID = glBuffer->uniqueID();
}
- fIndexBufferUniqueID = ibuff->uniqueID();
}
return state;
}
diff --git a/src/gpu/gl/GrGLVertexArray.h b/src/gpu/gl/GrGLVertexArray.h
index 93bd526..4e28e62 100644
--- a/src/gpu/gl/GrGLVertexArray.h
+++ b/src/gpu/gl/GrGLVertexArray.h
@@ -75,13 +75,15 @@
void invalidate() {
fVertexBufferUniqueID.makeInvalid();
fDivisor = kInvalidDivisor;
+ fUsingCpuBuffer = false;
}
GrGpuResource::UniqueID fVertexBufferUniqueID;
+ bool fUsingCpuBuffer;
GrVertexAttribType fCPUType;
GrSLType fGPUType;
GrGLsizei fStride;
- size_t fOffset;
+ const GrGLvoid* fOffset;
int fDivisor;
};