blob: 65d76c4c96bba436bfa8eecf00874d2001d04dde [file] [log] [blame]
epoger@google.comec3ed6a2011-07-28 14:26:00 +00001
bsalomon@google.com1c13c962011-02-14 16:51:21 +00002/*
epoger@google.comec3ed6a2011-07-28 14:26:00 +00003 * Copyright 2010 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
bsalomon@google.com1c13c962011-02-14 16:51:21 +00007 */
8
epoger@google.comec3ed6a2011-07-28 14:26:00 +00009
bsalomon@google.com1c13c962011-02-14 16:51:21 +000010#include "GrBufferAllocPool.h"
11#include "GrTypes.h"
12#include "GrVertexBuffer.h"
13#include "GrIndexBuffer.h"
14#include "GrGpu.h"
15
16#if GR_DEBUG
17 #define VALIDATE validate
18#else
sugoi@google.come0e385c2013-03-11 18:50:03 +000019 static void VALIDATE(bool = false) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +000020#endif
21
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000022// page size
bsalomon@google.com1c13c962011-02-14 16:51:21 +000023#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
24
25GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
26 BufferType bufferType,
27 bool frequentResetHint,
28 size_t blockSize,
29 int preallocBufferCnt) :
30 fBlocks(GrMax(8, 2*preallocBufferCnt)) {
bsalomon@google.com11f0b512011-03-29 20:52:23 +000031
bsalomon@google.com1c13c962011-02-14 16:51:21 +000032 GrAssert(NULL != gpu);
33 fGpu = gpu;
bsalomon@google.com11f0b512011-03-29 20:52:23 +000034 fGpu->ref();
35 fGpuIsReffed = true;
36
bsalomon@google.com1c13c962011-02-14 16:51:21 +000037 fBufferType = bufferType;
38 fFrequentResetHint = frequentResetHint;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000039 fBufferPtr = NULL;
40 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
41
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000042 fBytesInUse = 0;
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000043
bsalomon@google.com1c13c962011-02-14 16:51:21 +000044 fPreallocBuffersInUse = 0;
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000045 fPreallocBufferStartIdx = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000046 for (int i = 0; i < preallocBufferCnt; ++i) {
47 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
48 if (NULL != buffer) {
49 *fPreallocBuffers.append() = buffer;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000050 }
51 }
52}
53
54GrBufferAllocPool::~GrBufferAllocPool() {
55 VALIDATE();
56 if (fBlocks.count()) {
57 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
58 if (buffer->isLocked()) {
59 buffer->unlock();
60 }
61 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +000062 while (!fBlocks.empty()) {
63 destroyBlock();
64 }
bsalomon@google.com11f0b512011-03-29 20:52:23 +000065 fPreallocBuffers.unrefAll();
66 releaseGpuRef();
67}
68
69void GrBufferAllocPool::releaseGpuRef() {
70 if (fGpuIsReffed) {
71 fGpu->unref();
72 fGpuIsReffed = false;
73 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +000074}
75
76void GrBufferAllocPool::reset() {
77 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000078 fBytesInUse = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000079 if (fBlocks.count()) {
80 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
81 if (buffer->isLocked()) {
82 buffer->unlock();
83 }
84 }
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000085 // fPreallocBuffersInUse will be decremented down to zero in the while loop
86 int preallocBuffersInUse = fPreallocBuffersInUse;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000087 while (!fBlocks.empty()) {
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000088 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +000089 }
90 if (fPreallocBuffers.count()) {
91 // must set this after above loop.
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000092 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
93 preallocBuffersInUse) %
94 fPreallocBuffers.count();
bsalomon@google.com1c13c962011-02-14 16:51:21 +000095 }
bsalomon@google.com987dbc02011-12-14 14:44:19 +000096 // we may have created a large cpu mirror of a large VB. Reset the size
97 // to match our pre-allocated VBs.
98 fCpuData.reset(fMinBlockSize);
bsalomon@google.com1c13c962011-02-14 16:51:21 +000099 GrAssert(0 == fPreallocBuffersInUse);
100 VALIDATE();
101}
102
103void GrBufferAllocPool::unlock() {
104 VALIDATE();
105
106 if (NULL != fBufferPtr) {
107 BufferBlock& block = fBlocks.back();
108 if (block.fBuffer->isLocked()) {
109 block.fBuffer->unlock();
110 } else {
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000111 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000112 flushCpuData(fBlocks.back().fBuffer, flushSize);
113 }
114 fBufferPtr = NULL;
115 }
116 VALIDATE();
117}
118
119#if GR_DEBUG
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000120void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000121 if (NULL != fBufferPtr) {
122 GrAssert(!fBlocks.empty());
123 if (fBlocks.back().fBuffer->isLocked()) {
124 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
125 GrAssert(buf->lockPtr() == fBufferPtr);
126 } else {
127 GrAssert(fCpuData.get() == fBufferPtr);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000128 }
129 } else {
130 GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
131 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000132 size_t bytesInUse = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000133 for (int i = 0; i < fBlocks.count() - 1; ++i) {
134 GrAssert(!fBlocks[i].fBuffer->isLocked());
135 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000136 for (int i = 0; i < fBlocks.count(); ++i) {
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000137 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000138 bytesInUse += bytes;
139 GrAssert(bytes || unusedBlockAllowed);
140 }
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000141
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000142 GrAssert(bytesInUse == fBytesInUse);
143 if (unusedBlockAllowed) {
144 GrAssert((fBytesInUse && !fBlocks.empty()) ||
145 (!fBytesInUse && (fBlocks.count() < 2)));
146 } else {
147 GrAssert((0 == fBytesInUse) == fBlocks.empty());
148 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000149}
150#endif
151
152void* GrBufferAllocPool::makeSpace(size_t size,
153 size_t alignment,
154 const GrGeometryBuffer** buffer,
155 size_t* offset) {
156 VALIDATE();
157
158 GrAssert(NULL != buffer);
159 GrAssert(NULL != offset);
160
161 if (NULL != fBufferPtr) {
162 BufferBlock& back = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000163 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000164 size_t pad = GrSizeAlignUpPad(usedBytes,
165 alignment);
166 if ((size + pad) <= back.fBytesFree) {
167 usedBytes += pad;
168 *offset = usedBytes;
169 *buffer = back.fBuffer;
170 back.fBytesFree -= size + pad;
bsalomon@google.comd5108092012-03-08 15:10:39 +0000171 fBytesInUse += size + pad;
172 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000173 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
174 }
175 }
176
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000177 // We could honor the space request using by a partial update of the current
178 // VB (if there is room). But we don't currently use draw calls to GL that
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000179 // allow the driver to know that previously issued draws won't read from
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000180 // the part of the buffer we update. Also, the GL buffer implementation
181 // may be cheating on the actual buffer size by shrinking the buffer on
182 // updateData() if the amount of data passed is less than the full buffer
183 // size.
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000184
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000185 if (!createBlock(size)) {
186 return NULL;
187 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000188 GrAssert(NULL != fBufferPtr);
189
190 *offset = 0;
191 BufferBlock& back = fBlocks.back();
192 *buffer = back.fBuffer;
193 back.fBytesFree -= size;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000194 fBytesInUse += size;
195 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000196 return fBufferPtr;
197}
198
199int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
200 VALIDATE();
201 if (NULL != fBufferPtr) {
202 const BufferBlock& back = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000203 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000204 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
205 return (back.fBytesFree - pad) / itemSize;
206 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
207 return fMinBlockSize / itemSize;
208 }
209 return 0;
210}
211
212int GrBufferAllocPool::preallocatedBuffersRemaining() const {
213 return fPreallocBuffers.count() - fPreallocBuffersInUse;
214}
215
216int GrBufferAllocPool::preallocatedBufferCount() const {
217 return fPreallocBuffers.count();
218}
219
220void GrBufferAllocPool::putBack(size_t bytes) {
221 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000222
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000223 // if the putBack unwinds all the preallocated buffers then we will
224 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
225 // will be decremented. I will reach zero if all blocks using preallocated
226 // buffers are released.
227 int preallocBuffersInUse = fPreallocBuffersInUse;
228
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000229 while (bytes) {
230 // caller shouldnt try to put back more than they've taken
231 GrAssert(!fBlocks.empty());
232 BufferBlock& block = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000233 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000234 if (bytes >= bytesUsed) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000235 bytes -= bytesUsed;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000236 fBytesInUse -= bytesUsed;
bsalomon@google.com6513cd02011-08-05 20:12:30 +0000237 // if we locked a vb to satisfy the make space and we're releasing
238 // beyond it, then unlock it.
239 if (block.fBuffer->isLocked()) {
240 block.fBuffer->unlock();
241 }
242 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000243 } else {
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000244 block.fBytesFree += bytes;
245 fBytesInUse -= bytes;
246 bytes = 0;
247 break;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000248 }
249 }
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000250 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
251 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
252 preallocBuffersInUse) %
253 fPreallocBuffers.count();
254 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000255 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000256}
257
258bool GrBufferAllocPool::createBlock(size_t requestSize) {
259
260 size_t size = GrMax(requestSize, fMinBlockSize);
261 GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
262
263 VALIDATE();
264
265 BufferBlock& block = fBlocks.push_back();
266
267 if (size == fMinBlockSize &&
268 fPreallocBuffersInUse < fPreallocBuffers.count()) {
269
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000270 uint32_t nextBuffer = (fPreallocBuffersInUse +
271 fPreallocBufferStartIdx) %
272 fPreallocBuffers.count();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000273 block.fBuffer = fPreallocBuffers[nextBuffer];
274 block.fBuffer->ref();
275 ++fPreallocBuffersInUse;
276 } else {
277 block.fBuffer = this->createBuffer(size);
278 if (NULL == block.fBuffer) {
279 fBlocks.pop_back();
280 return false;
281 }
282 }
283
284 block.fBytesFree = size;
285 if (NULL != fBufferPtr) {
286 GrAssert(fBlocks.count() > 1);
287 BufferBlock& prev = fBlocks.fromBack(1);
288 if (prev.fBuffer->isLocked()) {
289 prev.fBuffer->unlock();
290 } else {
291 flushCpuData(prev.fBuffer,
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000292 prev.fBuffer->sizeInBytes() - prev.fBytesFree);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000293 }
294 fBufferPtr = NULL;
295 }
296
297 GrAssert(NULL == fBufferPtr);
298
bsalomon@google.comee3bc3b2013-02-21 14:33:46 +0000299 // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
300 // Otherwise when buffer locking is supported:
301 // a) If the frequently reset hint is set we only lock when the requested size meets a
302 // threshold (since we don't expect it is likely that we will see more vertex data)
303 // b) If the hint is not set we lock if the buffer size is greater than the threshold.
304 bool attemptLock = block.fBuffer->isCPUBacked();
305 if (!attemptLock && fGpu->getCaps().bufferLockSupport()) {
306 if (fFrequentResetHint) {
307 attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
308 } else {
309 attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
310 }
311 }
312
313 if (attemptLock) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000314 fBufferPtr = block.fBuffer->lock();
315 }
316
317 if (NULL == fBufferPtr) {
bsalomon@google.com7d4679a2011-09-02 22:06:24 +0000318 fBufferPtr = fCpuData.reset(size);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000319 }
320
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000321 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000322
323 return true;
324}
325
326void GrBufferAllocPool::destroyBlock() {
327 GrAssert(!fBlocks.empty());
328
329 BufferBlock& block = fBlocks.back();
330 if (fPreallocBuffersInUse > 0) {
331 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000332 fPreallocBufferStartIdx +
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000333 (fPreallocBuffers.count() - 1)) %
334 fPreallocBuffers.count();
335 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
336 --fPreallocBuffersInUse;
337 }
338 }
339 GrAssert(!block.fBuffer->isLocked());
340 block.fBuffer->unref();
341 fBlocks.pop_back();
342 fBufferPtr = NULL;
343}
344
345void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
346 size_t flushSize) {
347 GrAssert(NULL != buffer);
348 GrAssert(!buffer->isLocked());
349 GrAssert(fCpuData.get() == fBufferPtr);
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000350 GrAssert(flushSize <= buffer->sizeInBytes());
bsalomon@google.comd5108092012-03-08 15:10:39 +0000351 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000352
bsalomon@google.comf6601872012-08-28 21:11:35 +0000353 if (fGpu->getCaps().bufferLockSupport() &&
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000354 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
355 void* data = buffer->lock();
356 if (NULL != data) {
357 memcpy(data, fBufferPtr, flushSize);
358 buffer->unlock();
bsalomon@google.com71bd1ef2011-12-12 20:42:26 +0000359 return;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000360 }
361 }
362 buffer->updateData(fBufferPtr, flushSize);
bsalomon@google.comd5108092012-03-08 15:10:39 +0000363 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000364}
365
366GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
367 if (kIndex_BufferType == fBufferType) {
368 return fGpu->createIndexBuffer(size, true);
369 } else {
370 GrAssert(kVertex_BufferType == fBufferType);
371 return fGpu->createVertexBuffer(size, true);
372 }
373}
374
375////////////////////////////////////////////////////////////////////////////////
376
377GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
378 bool frequentResetHint,
379 size_t bufferSize,
380 int preallocBufferCnt)
381: GrBufferAllocPool(gpu,
382 kVertex_BufferType,
383 frequentResetHint,
384 bufferSize,
385 preallocBufferCnt) {
386}
387
jvanverth@google.coma6338982013-01-31 21:34:25 +0000388void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000389 int vertexCount,
390 const GrVertexBuffer** buffer,
391 int* startVertex) {
392
393 GrAssert(vertexCount >= 0);
394 GrAssert(NULL != buffer);
395 GrAssert(NULL != startVertex);
396
bsalomon@google.com8b484412011-04-18 19:07:44 +0000397 size_t offset = 0; // assign to suppress warning
398 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
jvanverth@google.coma6338982013-01-31 21:34:25 +0000399 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
400 vertexSize,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000401 &geomBuffer,
402 &offset);
403
404 *buffer = (const GrVertexBuffer*) geomBuffer;
jvanverth@google.coma6338982013-01-31 21:34:25 +0000405 GrAssert(0 == offset % vertexSize);
406 *startVertex = offset / vertexSize;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000407 return ptr;
408}
409
jvanverth@google.coma6338982013-01-31 21:34:25 +0000410bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000411 int vertexCount,
412 const void* vertices,
413 const GrVertexBuffer** buffer,
414 int* startVertex) {
jvanverth@google.coma6338982013-01-31 21:34:25 +0000415 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000416 if (NULL != space) {
417 memcpy(space,
418 vertices,
jvanverth@google.coma6338982013-01-31 21:34:25 +0000419 vertexSize * vertexCount);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000420 return true;
421 } else {
422 return false;
423 }
424}
425
jvanverth@google.coma6338982013-01-31 21:34:25 +0000426int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const {
427 return INHERITED::preallocatedBufferSize() / vertexSize;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000428}
429
jvanverth@google.coma6338982013-01-31 21:34:25 +0000430int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const {
431 return currentBufferItems(vertexSize);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000432}
433
434////////////////////////////////////////////////////////////////////////////////
435
436GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
437 bool frequentResetHint,
438 size_t bufferSize,
439 int preallocBufferCnt)
440: GrBufferAllocPool(gpu,
441 kIndex_BufferType,
442 frequentResetHint,
443 bufferSize,
444 preallocBufferCnt) {
445}
446
447void* GrIndexBufferAllocPool::makeSpace(int indexCount,
448 const GrIndexBuffer** buffer,
449 int* startIndex) {
450
451 GrAssert(indexCount >= 0);
452 GrAssert(NULL != buffer);
453 GrAssert(NULL != startIndex);
454
bsalomon@google.com8b484412011-04-18 19:07:44 +0000455 size_t offset = 0; // assign to suppress warning
456 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000457 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
458 sizeof(uint16_t),
459 &geomBuffer,
460 &offset);
461
462 *buffer = (const GrIndexBuffer*) geomBuffer;
463 GrAssert(0 == offset % sizeof(uint16_t));
464 *startIndex = offset / sizeof(uint16_t);
465 return ptr;
466}
467
468bool GrIndexBufferAllocPool::appendIndices(int indexCount,
469 const void* indices,
470 const GrIndexBuffer** buffer,
471 int* startIndex) {
472 void* space = makeSpace(indexCount, buffer, startIndex);
473 if (NULL != space) {
474 memcpy(space, indices, sizeof(uint16_t) * indexCount);
475 return true;
476 } else {
477 return false;
478 }
479}
480
481int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
482 return INHERITED::preallocatedBufferSize() / sizeof(uint16_t);
483}
484
485int GrIndexBufferAllocPool::currentBufferIndices() const {
486 return currentBufferItems(sizeof(uint16_t));
487}