Rename from "(un)lock" to "(un)map" for geometry buffers.
This better reflects OpenGL terminology and is less overloaded ("lock" is used w.r.t. the resource cache).
R=robertphillips@google.com
Author: bsalomon@google.com
Review URL: https://codereview.chromium.org/275493004
git-svn-id: http://skia.googlecode.com/svn/trunk@14628 2bbb7eff-a529-9590-31e7-b0007b416f81
diff --git a/src/gpu/GrAAHairLinePathRenderer.cpp b/src/gpu/GrAAHairLinePathRenderer.cpp
index 864fa68..0014bbe 100644
--- a/src/gpu/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/GrAAHairLinePathRenderer.cpp
@@ -50,7 +50,7 @@
kNumLineSegsInIdxBuffer;
static bool push_quad_index_data(GrIndexBuffer* qIdxBuffer) {
- uint16_t* data = (uint16_t*) qIdxBuffer->lock();
+ uint16_t* data = (uint16_t*) qIdxBuffer->map();
bool tempData = NULL == data;
if (tempData) {
data = SkNEW_ARRAY(uint16_t, kNumQuadsInIdxBuffer * kIdxsPerQuad);
@@ -86,13 +86,13 @@
delete[] data;
return ret;
} else {
- qIdxBuffer->unlock();
+ qIdxBuffer->unmap();
return true;
}
}
static bool push_line_index_data(GrIndexBuffer* lIdxBuffer) {
- uint16_t* data = (uint16_t*) lIdxBuffer->lock();
+ uint16_t* data = (uint16_t*) lIdxBuffer->map();
bool tempData = NULL == data;
if (tempData) {
data = SkNEW_ARRAY(uint16_t, kNumLineSegsInIdxBuffer * kIdxsPerLineSeg);
@@ -139,7 +139,7 @@
delete[] data;
return ret;
} else {
- lIdxBuffer->unlock();
+ lIdxBuffer->unmap();
return true;
}
}
diff --git a/src/gpu/GrAARectRenderer.cpp b/src/gpu/GrAARectRenderer.cpp
index eebda01..673cb5a 100644
--- a/src/gpu/GrAARectRenderer.cpp
+++ b/src/gpu/GrAARectRenderer.cpp
@@ -311,7 +311,7 @@
if (NULL == fAAFillRectIndexBuffer) {
fAAFillRectIndexBuffer = gpu->createIndexBuffer(kAAFillRectIndexBufferSize, false);
if (NULL != fAAFillRectIndexBuffer) {
- uint16_t* data = (uint16_t*) fAAFillRectIndexBuffer->lock();
+ uint16_t* data = (uint16_t*) fAAFillRectIndexBuffer->map();
bool useTempData = (NULL == data);
if (useTempData) {
data = SkNEW_ARRAY(uint16_t, kNumAAFillRectsInIndexBuffer * kIndicesPerAAFillRect);
@@ -331,7 +331,7 @@
}
SkDELETE_ARRAY(data);
} else {
- fAAFillRectIndexBuffer->unlock();
+ fAAFillRectIndexBuffer->unmap();
}
}
}
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index 7318cd0..03d43c9 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -56,8 +56,8 @@
VALIDATE();
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isLocked()) {
- buffer->unlock();
+ if (buffer->isMapped()) {
+ buffer->unmap();
}
}
while (!fBlocks.empty()) {
@@ -79,8 +79,8 @@
fBytesInUse = 0;
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isLocked()) {
- buffer->unlock();
+ if (buffer->isMapped()) {
+ buffer->unmap();
}
}
// fPreallocBuffersInUse will be decremented down to zero in the while loop
@@ -101,16 +101,16 @@
VALIDATE();
}
-void GrBufferAllocPool::unlock() {
+void GrBufferAllocPool::unmap() {
VALIDATE();
if (NULL != fBufferPtr) {
BufferBlock& block = fBlocks.back();
- if (block.fBuffer->isLocked()) {
- block.fBuffer->unlock();
+ if (block.fBuffer->isMapped()) {
+ block.fBuffer->unmap();
} else {
size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
- flushCpuData(fBlocks.back().fBuffer, flushSize);
+ this->flushCpuData(fBlocks.back().fBuffer, flushSize);
}
fBufferPtr = NULL;
}
@@ -121,18 +121,18 @@
void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
if (NULL != fBufferPtr) {
SkASSERT(!fBlocks.empty());
- if (fBlocks.back().fBuffer->isLocked()) {
+ if (fBlocks.back().fBuffer->isMapped()) {
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
- SkASSERT(buf->lockPtr() == fBufferPtr);
+ SkASSERT(buf->mapPtr() == fBufferPtr);
} else {
SkASSERT(fCpuData.get() == fBufferPtr);
}
} else {
- SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
+ SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
}
size_t bytesInUse = 0;
for (int i = 0; i < fBlocks.count() - 1; ++i) {
- SkASSERT(!fBlocks[i].fBuffer->isLocked());
+ SkASSERT(!fBlocks[i].fBuffer->isMapped());
}
for (int i = 0; i < fBlocks.count(); ++i) {
size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
@@ -236,9 +236,9 @@
bytes -= bytesUsed;
fBytesInUse -= bytesUsed;
// if we locked a vb to satisfy the make space and we're releasing
- // beyond it, then unlock it.
- if (block.fBuffer->isLocked()) {
- block.fBuffer->unlock();
+ // beyond it, then unmap it.
+ if (block.fBuffer->isMapped()) {
+ block.fBuffer->unmap();
}
this->destroyBlock();
} else {
@@ -286,8 +286,8 @@
if (NULL != fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
- if (prev.fBuffer->isLocked()) {
- prev.fBuffer->unlock();
+ if (prev.fBuffer->isMapped()) {
+ prev.fBuffer->unmap();
} else {
flushCpuData(prev.fBuffer,
prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
@@ -297,22 +297,22 @@
SkASSERT(NULL == fBufferPtr);
- // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
- // Otherwise when buffer locking is supported:
- // a) If the frequently reset hint is set we only lock when the requested size meets a
+ // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
+ // Otherwise when buffer mapping is supported:
+ // a) If the frequently reset hint is set we only map when the requested size meets a
// threshold (since we don't expect it is likely that we will see more vertex data)
- // b) If the hint is not set we lock if the buffer size is greater than the threshold.
- bool attemptLock = block.fBuffer->isCPUBacked();
- if (!attemptLock && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
+ // b) If the hint is not set we map if the buffer size is greater than the threshold.
+ bool attemptMap = block.fBuffer->isCPUBacked();
+ if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
if (fFrequentResetHint) {
- attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
+ attemptMap = requestSize > GR_GEOM_BUFFER_MAP_THRESHOLD;
} else {
- attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
+ attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
}
}
- if (attemptLock) {
- fBufferPtr = block.fBuffer->lock();
+ if (attemptMap) {
+ fBufferPtr = block.fBuffer->map();
}
if (NULL == fBufferPtr) {
@@ -337,7 +337,7 @@
--fPreallocBuffersInUse;
}
}
- SkASSERT(!block.fBuffer->isLocked());
+ SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
fBufferPtr = NULL;
@@ -346,17 +346,17 @@
void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
size_t flushSize) {
SkASSERT(NULL != buffer);
- SkASSERT(!buffer->isLocked());
+ SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData.get() == fBufferPtr);
SkASSERT(flushSize <= buffer->gpuMemorySize());
VALIDATE(true);
if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
- flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
- void* data = buffer->lock();
+ flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
+ void* data = buffer->map();
if (NULL != data) {
memcpy(data, fBufferPtr, flushSize);
- buffer->unlock();
+ buffer->unmap();
return;
}
}
diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h
index 9a31cc2..291d781 100644
--- a/src/gpu/GrBufferAllocPool.h
+++ b/src/gpu/GrBufferAllocPool.h
@@ -20,7 +20,7 @@
*
* The pool allows a client to make space for geometry and then put back excess
* space if it over allocated. When a client is ready to draw from the pool
- * it calls unlock on the pool ensure buffers are ready for drawing. The pool
+ * it calls unmap on the pool ensure buffers are ready for drawing. The pool
* can be reset after drawing is completed to recycle space.
*
* At creation time a minimum per-buffer size can be specified. Additionally,
@@ -30,10 +30,10 @@
class GrBufferAllocPool : SkNoncopyable {
public:
/**
- * Ensures all buffers are unlocked and have all data written to them.
+ * Ensures all buffers are unmapped and have all data written to them.
* Call before drawing using buffers from the pool.
*/
- void unlock();
+ void unmap();
/**
* Invalidates all the data in the pool, unrefs non-preallocated buffers.
@@ -77,7 +77,7 @@
* @param gpu The GrGpu used to create the buffers.
* @param bufferType The type of buffers to create.
* @param frequentResetHint A hint that indicates that the pool
- * should expect frequent unlock() calls
+ * should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created buffers.
@@ -109,11 +109,11 @@
* data is given to the caller. The buffer may or may not be locked. The
* returned ptr remains valid until any of the following:
* *makeSpace is called again.
- * *unlock is called.
+ * *unmap is called.
* *reset is called.
* *this object is destroyed.
*
- * Once unlock on the pool is called the data is guaranteed to be in the
+ * Once unmap on the pool is called the data is guaranteed to be in the
* buffer at the offset indicated by offset. Until that time it may be
* in temporary storage and/or the buffer may be locked.
*
@@ -190,7 +190,7 @@
*
* @param gpu The GrGpu used to create the vertex buffers.
* @param frequentResetHint A hint that indicates that the pool
- * should expect frequent unlock() calls
+ * should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created VBs This value
@@ -209,11 +209,11 @@
* the vertices given to the caller. The buffer may or may not be locked.
* The returned ptr remains valid until any of the following:
* *makeSpace is called again.
- * *unlock is called.
+ * *unmap is called.
* *reset is called.
* *this object is destroyed.
*
- * Once unlock on the pool is called the vertices are guaranteed to be in
+ * Once unmap on the pool is called the vertices are guaranteed to be in
* the buffer at the offset indicated by startVertex. Until that time they
* may be in temporary storage and/or the buffer may be locked.
*
@@ -278,7 +278,7 @@
*
* @param gpu The GrGpu used to create the index buffers.
* @param frequentResetHint A hint that indicates that the pool
- * should expect frequent unlock() calls
+ * should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created IBs This value
@@ -297,11 +297,11 @@
* the indices is given to the caller. The buffer may or may not be locked.
* The returned ptr remains valid until any of the following:
* *makeSpace is called again.
- * *unlock is called.
+ * *unmap is called.
* *reset is called.
* *this object is destroyed.
*
- * Once unlock on the pool is called the indices are guaranteed to be in the
+ * Once unmap on the pool is called the indices are guaranteed to be in the
* buffer at the offset indicated by startIndex. Until that time they may be
* in temporary storage and/or the buffer may be locked.
*
diff --git a/src/gpu/GrGeometryBuffer.h b/src/gpu/GrGeometryBuffer.h
index 2a5aab7..1e1a367 100644
--- a/src/gpu/GrGeometryBuffer.h
+++ b/src/gpu/GrGeometryBuffer.h
@@ -30,46 +30,46 @@
/**
* Returns true if the buffer is a wrapper around a CPU array. If true it
- * indicates that lock will always succeed and will be free.
+ * indicates that map will always succeed and will be free.
*/
bool isCPUBacked() const { return fCPUBacked; }
/**
- * Locks the buffer to be written by the CPU.
+ * Maps the buffer to be written by the CPU.
*
* The previous content of the buffer is invalidated. It is an error
- * to draw from the buffer while it is locked. It is an error to call lock
- * on an already locked buffer. It may fail if the backend doesn't support
- * locking the buffer. If the buffer is CPU backed then it will always
- * succeed and is a free operation. Must be matched by an unlock() call.
- * Currently only one lock at a time is supported (no nesting of
- * lock/unlock).
+ * to draw from the buffer while it is mapped. It is an error to call map
+ * on an already mapped buffer. It may fail if the backend doesn't support
+ * mapping the buffer. If the buffer is CPU backed then it will always
+ * succeed and is a free operation. Must be matched by an unmap() call.
+ * Currently only one map at a time is supported (no nesting of
+ * map/unmap).
*
- * @return a pointer to the data or NULL if the lock fails.
+ * @return a pointer to the data or NULL if the map fails.
*/
- virtual void* lock() = 0;
+ virtual void* map() = 0;
/**
- * Returns the same ptr that lock() returned at time of lock or NULL if the
- * is not locked.
+ * Returns the same ptr that map() returned at time of map or NULL if the
+ * is not mapped.
*
- * @return ptr to locked buffer data or undefined if buffer is not locked.
+ * @return ptr to mapped buffer data or undefined if buffer is not mapped.
*/
- virtual void* lockPtr() const = 0;
+ virtual void* mapPtr() const = 0;
/**
- * Unlocks the buffer.
+ * Unmaps the buffer.
*
- * The pointer returned by the previous lock call will no longer be valid.
+ * The pointer returned by the previous map call will no longer be valid.
*/
- virtual void unlock() = 0;
+ virtual void unmap() = 0;
/**
- Queries whether the buffer has been locked.
+ Queries whether the buffer has been mapped.
- @return true if the buffer is locked, false otherwise.
+ @return true if the buffer is mapped, false otherwise.
*/
- virtual bool isLocked() const = 0;
+ virtual bool isMapped() const = 0;
/**
* Updates the buffer data.
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index bc92952..111f632 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -303,10 +303,10 @@
GrGpu* me = const_cast<GrGpu*>(this);
fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
if (NULL != fQuadIndexBuffer) {
- uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock();
+ uint16_t* indices = (uint16_t*)fQuadIndexBuffer->map();
if (NULL != indices) {
fill_indices(indices, MAX_QUADS);
- fQuadIndexBuffer->unlock();
+ fQuadIndexBuffer->unmap();
} else {
indices = (uint16_t*)sk_malloc_throw(SIZE);
fill_indices(indices, MAX_QUADS);
@@ -422,12 +422,12 @@
void GrGpu::finalizeReservedVertices() {
SkASSERT(NULL != fVertexPool);
- fVertexPool->unlock();
+ fVertexPool->unmap();
}
void GrGpu::finalizeReservedIndices() {
SkASSERT(NULL != fIndexPool);
- fIndexPool->unlock();
+ fIndexPool->unmap();
}
void GrGpu::prepareVertexPool() {
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index fc16237..11f87e0 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -101,8 +101,8 @@
*
* @param size size in bytes of the vertex buffer
* @param dynamic hints whether the data will be frequently changed
- * by either GrVertexBuffer::lock or
- * GrVertexBuffer::updateData.
+ * by either GrVertexBuffer::map() or
+ * GrVertexBuffer::updateData().
*
* @return The vertex buffer if successful, otherwise NULL.
*/
@@ -113,8 +113,8 @@
*
* @param size size in bytes of the index buffer
* @param dynamic hints whether the data will be frequently changed
- * by either GrIndexBuffer::lock or
- * GrIndexBuffer::updateData.
+ * by either GrIndexBuffer::map() or
+ * GrIndexBuffer::updateData().
*
* @return The index buffer if successful, otherwise NULL.
*/
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
index 44d0b1a..4931f47 100644
--- a/src/gpu/GrInOrderDrawBuffer.cpp
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -569,8 +569,8 @@
GrAutoTRestore<bool> flushRestore(&fFlushing);
fFlushing = true;
- fVertexPool.unlock();
- fIndexPool.unlock();
+ fVertexPool.unmap();
+ fIndexPool.unmap();
GrDrawTarget::AutoClipRestore acr(fDstGpu);
AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
diff --git a/src/gpu/gl/GrGLBufferImpl.cpp b/src/gpu/gl/GrGLBufferImpl.cpp
index 46e1f79..ae578a2 100644
--- a/src/gpu/gl/GrGLBufferImpl.cpp
+++ b/src/gpu/gl/GrGLBufferImpl.cpp
@@ -23,7 +23,7 @@
GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferType)
: fDesc(desc)
, fBufferType(bufferType)
- , fLockPtr(NULL) {
+ , fMapPtr(NULL) {
if (0 == desc.fID) {
fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
fGLSizeInBytes = 0;
@@ -52,14 +52,14 @@
fDesc.fID = 0;
fGLSizeInBytes = 0;
}
- fLockPtr = NULL;
+ fMapPtr = NULL;
VALIDATE();
}
void GrGLBufferImpl::abandon() {
fDesc.fID = 0;
fGLSizeInBytes = 0;
- fLockPtr = NULL;
+ fMapPtr = NULL;
sk_free(fCPUData);
fCPUData = NULL;
VALIDATE();
@@ -76,11 +76,11 @@
VALIDATE();
}
-void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
+void* GrGLBufferImpl::map(GrGpuGL* gpu) {
VALIDATE();
- SkASSERT(!this->isLocked());
+ SkASSERT(!this->isMapped());
if (0 == fDesc.fID) {
- fLockPtr = fCPUData;
+ fMapPtr = fCPUData;
} else {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
@@ -95,7 +95,7 @@
BufferData(fBufferType, fGLSizeInBytes, NULL,
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
- GR_GL_CALL_RET(gpu->glInterface(), fLockPtr,
+ GR_GL_CALL_RET(gpu->glInterface(), fMapPtr,
MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
break;
case GrGLCaps::kMapBufferRange_MapBufferType: {
@@ -110,7 +110,7 @@
static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
GR_GL_MAP_WRITE_BIT;
GR_GL_CALL_RET(gpu->glInterface(),
- fLockPtr,
+ fMapPtr,
MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess));
break;
}
@@ -124,18 +124,18 @@
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GR_GL_CALL_RET(gpu->glInterface(),
- fLockPtr,
+ fMapPtr,
MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
break;
}
}
VALIDATE();
- return fLockPtr;
+ return fMapPtr;
}
-void GrGLBufferImpl::unlock(GrGpuGL* gpu) {
+void GrGLBufferImpl::unmap(GrGpuGL* gpu) {
VALIDATE();
- SkASSERT(this->isLocked());
+ SkASSERT(this->isMapped());
if (0 != fDesc.fID) {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
@@ -148,20 +148,20 @@
break;
case GrGLCaps::kChromium_MapBufferType:
this->bind(gpu);
- GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fLockPtr));
+ GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr));
break;
}
}
- fLockPtr = NULL;
+ fMapPtr = NULL;
}
-bool GrGLBufferImpl::isLocked() const {
+bool GrGLBufferImpl::isMapped() const {
VALIDATE();
- return NULL != fLockPtr;
+ return NULL != fMapPtr;
}
bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes) {
- SkASSERT(!this->isLocked());
+ SkASSERT(!this->isMapped());
VALIDATE();
if (srcSizeInBytes > fDesc.fSizeInBytes) {
return false;
@@ -190,7 +190,7 @@
#else
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
- // portions of the buffer (lock() does a glBufferData(..size, NULL..))
+ // portions of the buffer (map() does a glBufferData(..size, NULL..))
bool doSubData = false;
#if GR_GL_MAC_BUFFER_OBJECT_PERFOMANCE_WORKAROUND
static int N = 0;
@@ -221,6 +221,6 @@
// SkASSERT((0 == fDesc.fID) == (NULL != fCPUData));
SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes);
- SkASSERT(NULL == fLockPtr || NULL != fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
- SkASSERT(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr);
+ SkASSERT(NULL == fMapPtr || NULL != fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
+ SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr);
}
diff --git a/src/gpu/gl/GrGLBufferImpl.h b/src/gpu/gl/GrGLBufferImpl.h
index 19d23e0..3ab84f6 100644
--- a/src/gpu/gl/GrGLBufferImpl.h
+++ b/src/gpu/gl/GrGLBufferImpl.h
@@ -40,10 +40,10 @@
void bind(GrGpuGL* gpu) const;
- void* lock(GrGpuGL* gpu);
- void* lockPtr() const { return fLockPtr; }
- void unlock(GrGpuGL* gpu);
- bool isLocked() const;
+ void* map(GrGpuGL* gpu);
+ void* mapPtr() const { return fMapPtr; }
+ void unmap(GrGpuGL* gpu);
+ bool isMapped() const;
bool updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes);
private:
@@ -52,7 +52,7 @@
Desc fDesc;
GrGLenum fBufferType; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER
void* fCPUData;
- void* fLockPtr;
+ void* fMapPtr;
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
// smaller or larger than the size in fDesc.
diff --git a/src/gpu/gl/GrGLIndexBuffer.cpp b/src/gpu/gl/GrGLIndexBuffer.cpp
index 4e7f989..38dd15d 100644
--- a/src/gpu/gl/GrGLIndexBuffer.cpp
+++ b/src/gpu/gl/GrGLIndexBuffer.cpp
@@ -26,26 +26,26 @@
INHERITED::onAbandon();
}
-void* GrGLIndexBuffer::lock() {
+void* GrGLIndexBuffer::map() {
if (!this->wasDestroyed()) {
- return fImpl.lock(this->getGpuGL());
+ return fImpl.map(this->getGpuGL());
} else {
return NULL;
}
}
-void* GrGLIndexBuffer::lockPtr() const {
- return fImpl.lockPtr();
+void* GrGLIndexBuffer::mapPtr() const {
+ return fImpl.mapPtr();
}
-void GrGLIndexBuffer::unlock() {
+void GrGLIndexBuffer::unmap() {
if (!this->wasDestroyed()) {
- fImpl.unlock(this->getGpuGL());
+ fImpl.unmap(this->getGpuGL());
}
}
-bool GrGLIndexBuffer::isLocked() const {
- return fImpl.isLocked();
+bool GrGLIndexBuffer::isMapped() const {
+ return fImpl.isMapped();
}
bool GrGLIndexBuffer::updateData(const void* src, size_t srcSizeInBytes) {
diff --git a/src/gpu/gl/GrGLIndexBuffer.h b/src/gpu/gl/GrGLIndexBuffer.h
index 893e357..3960f58 100644
--- a/src/gpu/gl/GrGLIndexBuffer.h
+++ b/src/gpu/gl/GrGLIndexBuffer.h
@@ -32,10 +32,10 @@
}
// overrides of GrIndexBuffer
- virtual void* lock();
- virtual void* lockPtr() const;
- virtual void unlock();
- virtual bool isLocked() const;
+ virtual void* map() SK_OVERRIDE;
+ virtual void* mapPtr() const SK_OVERRIDE;
+ virtual void unmap() SK_OVERRIDE;
+ virtual bool isMapped() const SK_OVERRIDE;
virtual bool updateData(const void* src, size_t srcSizeInBytes);
protected:
diff --git a/src/gpu/gl/GrGLVertexBuffer.cpp b/src/gpu/gl/GrGLVertexBuffer.cpp
index 8bfe1f0..af60993 100644
--- a/src/gpu/gl/GrGLVertexBuffer.cpp
+++ b/src/gpu/gl/GrGLVertexBuffer.cpp
@@ -27,26 +27,26 @@
INHERITED::onAbandon();
}
-void* GrGLVertexBuffer::lock() {
+void* GrGLVertexBuffer::map() {
if (!this->wasDestroyed()) {
- return fImpl.lock(this->getGpuGL());
+ return fImpl.map(this->getGpuGL());
} else {
return NULL;
}
}
-void* GrGLVertexBuffer::lockPtr() const {
- return fImpl.lockPtr();
+void* GrGLVertexBuffer::mapPtr() const {
+ return fImpl.mapPtr();
}
-void GrGLVertexBuffer::unlock() {
+void GrGLVertexBuffer::unmap() {
if (!this->wasDestroyed()) {
- fImpl.unlock(this->getGpuGL());
+ fImpl.unmap(this->getGpuGL());
}
}
-bool GrGLVertexBuffer::isLocked() const {
- return fImpl.isLocked();
+bool GrGLVertexBuffer::isMapped() const {
+ return fImpl.isMapped();
}
bool GrGLVertexBuffer::updateData(const void* src, size_t srcSizeInBytes) {
diff --git a/src/gpu/gl/GrGLVertexBuffer.h b/src/gpu/gl/GrGLVertexBuffer.h
index 1b9c4f1..ddab829 100644
--- a/src/gpu/gl/GrGLVertexBuffer.h
+++ b/src/gpu/gl/GrGLVertexBuffer.h
@@ -32,10 +32,10 @@
}
// overrides of GrVertexBuffer
- virtual void* lock();
- virtual void* lockPtr() const;
- virtual void unlock();
- virtual bool isLocked() const;
+ virtual void* map() SK_OVERRIDE;
+ virtual void* mapPtr() const SK_OVERRIDE;
+ virtual void unmap() SK_OVERRIDE;
+ virtual bool isMapped() const SK_OVERRIDE;
virtual bool updateData(const void* src, size_t srcSizeInBytes);
protected:
diff --git a/src/gpu/gl/GrGpuGL_program.cpp b/src/gpu/gl/GrGpuGL_program.cpp
index b9b0984..bc3a5b2 100644
--- a/src/gpu/gl/GrGpuGL_program.cpp
+++ b/src/gpu/gl/GrGpuGL_program.cpp
@@ -313,7 +313,7 @@
}
SkASSERT(NULL != vbuf);
- SkASSERT(!vbuf->isLocked());
+ SkASSERT(!vbuf->isMapped());
vertexOffsetInBytes += vbuf->baseOffset();
GrGLIndexBuffer* ibuf = NULL;
@@ -337,7 +337,7 @@
}
SkASSERT(NULL != ibuf);
- SkASSERT(!ibuf->isLocked());
+ SkASSERT(!ibuf->isMapped());
*indexOffsetInBytes += ibuf->baseOffset();
}
GrGLAttribArrayState* attribState =