blob: 8bed75f5b9ae483d522aadc8db37182ac42f0dff [file] [log] [blame]
epoger@google.comec3ed6a2011-07-28 14:26:00 +00001
bsalomon@google.com1c13c962011-02-14 16:51:21 +00002/*
epoger@google.comec3ed6a2011-07-28 14:26:00 +00003 * Copyright 2010 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
bsalomon@google.com1c13c962011-02-14 16:51:21 +00007 */
8
epoger@google.comec3ed6a2011-07-28 14:26:00 +00009
bsalomon@google.com1c13c962011-02-14 16:51:21 +000010#include "GrBufferAllocPool.h"
11#include "GrTypes.h"
12#include "GrVertexBuffer.h"
13#include "GrIndexBuffer.h"
14#include "GrGpu.h"
15
16#if GR_DEBUG
17 #define VALIDATE validate
18#else
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000019 static void VALIDATE(bool x = false) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +000020#endif
21
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000022// page size
bsalomon@google.com1c13c962011-02-14 16:51:21 +000023#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
24
25GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
26 BufferType bufferType,
27 bool frequentResetHint,
28 size_t blockSize,
29 int preallocBufferCnt) :
30 fBlocks(GrMax(8, 2*preallocBufferCnt)) {
bsalomon@google.com11f0b512011-03-29 20:52:23 +000031
bsalomon@google.com1c13c962011-02-14 16:51:21 +000032 GrAssert(NULL != gpu);
33 fGpu = gpu;
bsalomon@google.com11f0b512011-03-29 20:52:23 +000034 fGpu->ref();
35 fGpuIsReffed = true;
36
bsalomon@google.com1c13c962011-02-14 16:51:21 +000037 fBufferType = bufferType;
38 fFrequentResetHint = frequentResetHint;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000039 fBufferPtr = NULL;
40 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
41
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000042 fBytesInUse = 0;
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000043
bsalomon@google.com1c13c962011-02-14 16:51:21 +000044 fPreallocBuffersInUse = 0;
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000045 fPreallocBufferStartIdx = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000046 for (int i = 0; i < preallocBufferCnt; ++i) {
47 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
48 if (NULL != buffer) {
49 *fPreallocBuffers.append() = buffer;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000050 }
51 }
52}
53
54GrBufferAllocPool::~GrBufferAllocPool() {
55 VALIDATE();
56 if (fBlocks.count()) {
57 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
58 if (buffer->isLocked()) {
59 buffer->unlock();
60 }
61 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +000062 while (!fBlocks.empty()) {
63 destroyBlock();
64 }
bsalomon@google.com11f0b512011-03-29 20:52:23 +000065 fPreallocBuffers.unrefAll();
66 releaseGpuRef();
67}
68
69void GrBufferAllocPool::releaseGpuRef() {
70 if (fGpuIsReffed) {
71 fGpu->unref();
72 fGpuIsReffed = false;
73 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +000074}
75
76void GrBufferAllocPool::reset() {
77 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +000078 fBytesInUse = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000079 if (fBlocks.count()) {
80 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
81 if (buffer->isLocked()) {
82 buffer->unlock();
83 }
84 }
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000085 // fPreallocBuffersInUse will be decremented down to zero in the while loop
86 int preallocBuffersInUse = fPreallocBuffersInUse;
bsalomon@google.com1c13c962011-02-14 16:51:21 +000087 while (!fBlocks.empty()) {
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000088 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +000089 }
90 if (fPreallocBuffers.count()) {
91 // must set this after above loop.
bsalomon@google.comb665a6b2012-03-01 20:59:28 +000092 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
93 preallocBuffersInUse) %
94 fPreallocBuffers.count();
bsalomon@google.com1c13c962011-02-14 16:51:21 +000095 }
bsalomon@google.com987dbc02011-12-14 14:44:19 +000096 // we may have created a large cpu mirror of a large VB. Reset the size
97 // to match our pre-allocated VBs.
98 fCpuData.reset(fMinBlockSize);
bsalomon@google.com1c13c962011-02-14 16:51:21 +000099 GrAssert(0 == fPreallocBuffersInUse);
100 VALIDATE();
101}
102
103void GrBufferAllocPool::unlock() {
104 VALIDATE();
105
106 if (NULL != fBufferPtr) {
107 BufferBlock& block = fBlocks.back();
108 if (block.fBuffer->isLocked()) {
109 block.fBuffer->unlock();
110 } else {
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000111 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000112 flushCpuData(fBlocks.back().fBuffer, flushSize);
113 }
114 fBufferPtr = NULL;
115 }
116 VALIDATE();
117}
118
119#if GR_DEBUG
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000120void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000121 if (NULL != fBufferPtr) {
122 GrAssert(!fBlocks.empty());
123 if (fBlocks.back().fBuffer->isLocked()) {
124 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
125 GrAssert(buf->lockPtr() == fBufferPtr);
126 } else {
127 GrAssert(fCpuData.get() == fBufferPtr);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000128 }
129 } else {
130 GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
131 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000132 size_t bytesInUse = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000133 for (int i = 0; i < fBlocks.count() - 1; ++i) {
134 GrAssert(!fBlocks[i].fBuffer->isLocked());
135 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000136 for (int i = 0; i < fBlocks.count(); ++i) {
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000137 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000138 bytesInUse += bytes;
139 GrAssert(bytes || unusedBlockAllowed);
140 }
141
142 GrAssert(bytesInUse == fBytesInUse);
143 if (unusedBlockAllowed) {
144 GrAssert((fBytesInUse && !fBlocks.empty()) ||
145 (!fBytesInUse && (fBlocks.count() < 2)));
146 } else {
147 GrAssert((0 == fBytesInUse) == fBlocks.empty());
148 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000149}
150#endif
151
152void* GrBufferAllocPool::makeSpace(size_t size,
153 size_t alignment,
154 const GrGeometryBuffer** buffer,
155 size_t* offset) {
156 VALIDATE();
157
158 GrAssert(NULL != buffer);
159 GrAssert(NULL != offset);
160
161 if (NULL != fBufferPtr) {
162 BufferBlock& back = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000163 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000164 size_t pad = GrSizeAlignUpPad(usedBytes,
165 alignment);
166 if ((size + pad) <= back.fBytesFree) {
167 usedBytes += pad;
168 *offset = usedBytes;
169 *buffer = back.fBuffer;
170 back.fBytesFree -= size + pad;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000171 fBytesInUse += size;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000172 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
173 }
174 }
175
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000176 // We could honor the space request using by a partial update of the current
177 // VB (if there is room). But we don't currently use draw calls to GL that
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000178 // allow the driver to know that previously issued draws won't read from
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000179 // the part of the buffer we update. Also, the GL buffer implementation
180 // may be cheating on the actual buffer size by shrinking the buffer on
181 // updateData() if the amount of data passed is less than the full buffer
182 // size.
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000183
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000184 if (!createBlock(size)) {
185 return NULL;
186 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000187 GrAssert(NULL != fBufferPtr);
188
189 *offset = 0;
190 BufferBlock& back = fBlocks.back();
191 *buffer = back.fBuffer;
192 back.fBytesFree -= size;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000193 fBytesInUse += size;
194 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000195 return fBufferPtr;
196}
197
198int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
199 VALIDATE();
200 if (NULL != fBufferPtr) {
201 const BufferBlock& back = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000202 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000203 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
204 return (back.fBytesFree - pad) / itemSize;
205 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
206 return fMinBlockSize / itemSize;
207 }
208 return 0;
209}
210
211int GrBufferAllocPool::preallocatedBuffersRemaining() const {
212 return fPreallocBuffers.count() - fPreallocBuffersInUse;
213}
214
215int GrBufferAllocPool::preallocatedBufferCount() const {
216 return fPreallocBuffers.count();
217}
218
219void GrBufferAllocPool::putBack(size_t bytes) {
220 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000221
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000222 // if the putBack unwinds all the preallocated buffers then we will
223 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
224 // will be decremented. I will reach zero if all blocks using preallocated
225 // buffers are released.
226 int preallocBuffersInUse = fPreallocBuffersInUse;
227
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000228 while (bytes) {
229 // caller shouldnt try to put back more than they've taken
230 GrAssert(!fBlocks.empty());
231 BufferBlock& block = fBlocks.back();
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000232 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000233 if (bytes >= bytesUsed) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000234 bytes -= bytesUsed;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000235 fBytesInUse -= bytesUsed;
bsalomon@google.com6513cd02011-08-05 20:12:30 +0000236 // if we locked a vb to satisfy the make space and we're releasing
237 // beyond it, then unlock it.
238 if (block.fBuffer->isLocked()) {
239 block.fBuffer->unlock();
240 }
241 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000242 } else {
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000243 block.fBytesFree += bytes;
244 fBytesInUse -= bytes;
245 bytes = 0;
246 break;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000247 }
248 }
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000249 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
250 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
251 preallocBuffersInUse) %
252 fPreallocBuffers.count();
253 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000254 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000255}
256
257bool GrBufferAllocPool::createBlock(size_t requestSize) {
258
259 size_t size = GrMax(requestSize, fMinBlockSize);
260 GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
261
262 VALIDATE();
263
264 BufferBlock& block = fBlocks.push_back();
265
266 if (size == fMinBlockSize &&
267 fPreallocBuffersInUse < fPreallocBuffers.count()) {
268
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000269 uint32_t nextBuffer = (fPreallocBuffersInUse +
270 fPreallocBufferStartIdx) %
271 fPreallocBuffers.count();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000272 block.fBuffer = fPreallocBuffers[nextBuffer];
273 block.fBuffer->ref();
274 ++fPreallocBuffersInUse;
275 } else {
276 block.fBuffer = this->createBuffer(size);
277 if (NULL == block.fBuffer) {
278 fBlocks.pop_back();
279 return false;
280 }
281 }
282
283 block.fBytesFree = size;
284 if (NULL != fBufferPtr) {
285 GrAssert(fBlocks.count() > 1);
286 BufferBlock& prev = fBlocks.fromBack(1);
287 if (prev.fBuffer->isLocked()) {
288 prev.fBuffer->unlock();
289 } else {
290 flushCpuData(prev.fBuffer,
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000291 prev.fBuffer->sizeInBytes() - prev.fBytesFree);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000292 }
293 fBufferPtr = NULL;
294 }
295
296 GrAssert(NULL == fBufferPtr);
297
bsalomon@google.com18c9c192011-09-22 21:01:31 +0000298 if (fGpu->getCaps().fBufferLockSupport &&
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000299 size > GR_GEOM_BUFFER_LOCK_THRESHOLD &&
300 (!fFrequentResetHint || requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD)) {
301 fBufferPtr = block.fBuffer->lock();
302 }
303
304 if (NULL == fBufferPtr) {
bsalomon@google.com7d4679a2011-09-02 22:06:24 +0000305 fBufferPtr = fCpuData.reset(size);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000306 }
307
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000308 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000309
310 return true;
311}
312
313void GrBufferAllocPool::destroyBlock() {
314 GrAssert(!fBlocks.empty());
315
316 BufferBlock& block = fBlocks.back();
317 if (fPreallocBuffersInUse > 0) {
318 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
bsalomon@google.comb665a6b2012-03-01 20:59:28 +0000319 fPreallocBufferStartIdx +
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000320 (fPreallocBuffers.count() - 1)) %
321 fPreallocBuffers.count();
322 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
323 --fPreallocBuffersInUse;
324 }
325 }
326 GrAssert(!block.fBuffer->isLocked());
327 block.fBuffer->unref();
328 fBlocks.pop_back();
329 fBufferPtr = NULL;
330}
331
332void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
333 size_t flushSize) {
334 GrAssert(NULL != buffer);
335 GrAssert(!buffer->isLocked());
336 GrAssert(fCpuData.get() == fBufferPtr);
bsalomon@google.comcee661a2011-07-26 12:32:36 +0000337 GrAssert(flushSize <= buffer->sizeInBytes());
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000338
bsalomon@google.com18c9c192011-09-22 21:01:31 +0000339 if (fGpu->getCaps().fBufferLockSupport &&
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000340 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
341 void* data = buffer->lock();
342 if (NULL != data) {
343 memcpy(data, fBufferPtr, flushSize);
344 buffer->unlock();
bsalomon@google.com71bd1ef2011-12-12 20:42:26 +0000345 return;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000346 }
347 }
348 buffer->updateData(fBufferPtr, flushSize);
349}
350
351GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
352 if (kIndex_BufferType == fBufferType) {
353 return fGpu->createIndexBuffer(size, true);
354 } else {
355 GrAssert(kVertex_BufferType == fBufferType);
356 return fGpu->createVertexBuffer(size, true);
357 }
358}
359
360////////////////////////////////////////////////////////////////////////////////
361
362GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
363 bool frequentResetHint,
364 size_t bufferSize,
365 int preallocBufferCnt)
366: GrBufferAllocPool(gpu,
367 kVertex_BufferType,
368 frequentResetHint,
369 bufferSize,
370 preallocBufferCnt) {
371}
372
373void* GrVertexBufferAllocPool::makeSpace(GrVertexLayout layout,
374 int vertexCount,
375 const GrVertexBuffer** buffer,
376 int* startVertex) {
377
378 GrAssert(vertexCount >= 0);
379 GrAssert(NULL != buffer);
380 GrAssert(NULL != startVertex);
381
382 size_t vSize = GrDrawTarget::VertexSize(layout);
bsalomon@google.com8b484412011-04-18 19:07:44 +0000383 size_t offset = 0; // assign to suppress warning
384 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000385 void* ptr = INHERITED::makeSpace(vSize * vertexCount,
386 vSize,
387 &geomBuffer,
388 &offset);
389
390 *buffer = (const GrVertexBuffer*) geomBuffer;
391 GrAssert(0 == offset % vSize);
392 *startVertex = offset / vSize;
393 return ptr;
394}
395
396bool GrVertexBufferAllocPool::appendVertices(GrVertexLayout layout,
397 int vertexCount,
398 const void* vertices,
399 const GrVertexBuffer** buffer,
400 int* startVertex) {
401 void* space = makeSpace(layout, vertexCount, buffer, startVertex);
402 if (NULL != space) {
403 memcpy(space,
404 vertices,
405 GrDrawTarget::VertexSize(layout) * vertexCount);
406 return true;
407 } else {
408 return false;
409 }
410}
411
412int GrVertexBufferAllocPool::preallocatedBufferVertices(GrVertexLayout layout) const {
413 return INHERITED::preallocatedBufferSize() /
414 GrDrawTarget::VertexSize(layout);
415}
416
417int GrVertexBufferAllocPool::currentBufferVertices(GrVertexLayout layout) const {
418 return currentBufferItems(GrDrawTarget::VertexSize(layout));
419}
420
421////////////////////////////////////////////////////////////////////////////////
422
423GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
424 bool frequentResetHint,
425 size_t bufferSize,
426 int preallocBufferCnt)
427: GrBufferAllocPool(gpu,
428 kIndex_BufferType,
429 frequentResetHint,
430 bufferSize,
431 preallocBufferCnt) {
432}
433
434void* GrIndexBufferAllocPool::makeSpace(int indexCount,
435 const GrIndexBuffer** buffer,
436 int* startIndex) {
437
438 GrAssert(indexCount >= 0);
439 GrAssert(NULL != buffer);
440 GrAssert(NULL != startIndex);
441
bsalomon@google.com8b484412011-04-18 19:07:44 +0000442 size_t offset = 0; // assign to suppress warning
443 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000444 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
445 sizeof(uint16_t),
446 &geomBuffer,
447 &offset);
448
449 *buffer = (const GrIndexBuffer*) geomBuffer;
450 GrAssert(0 == offset % sizeof(uint16_t));
451 *startIndex = offset / sizeof(uint16_t);
452 return ptr;
453}
454
455bool GrIndexBufferAllocPool::appendIndices(int indexCount,
456 const void* indices,
457 const GrIndexBuffer** buffer,
458 int* startIndex) {
459 void* space = makeSpace(indexCount, buffer, startIndex);
460 if (NULL != space) {
461 memcpy(space, indices, sizeof(uint16_t) * indexCount);
462 return true;
463 } else {
464 return false;
465 }
466}
467
468int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
469 return INHERITED::preallocatedBufferSize() / sizeof(uint16_t);
470}
471
472int GrIndexBufferAllocPool::currentBufferIndices() const {
473 return currentBufferItems(sizeof(uint16_t));
474}