joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 8 | #include "GrBatch.h" |
bsalomon | b5238a7 | 2015-05-05 07:49:49 -0700 | [diff] [blame^] | 9 | #include "GrBatchTarget.h" |
| 10 | #include "GrResourceProvider.h" |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 11 | |
| 12 | #include "GrMemoryPool.h" |
joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 13 | #include "SkSpinlock.h" |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 14 | |
| 15 | // TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small, |
| 16 | // but seems to be mostly consistent. There is a lot in flux right now, but we should really |
| 17 | // revisit this when batch is everywhere |
| 18 | |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 19 | |
joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 20 | // We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on |
| 21 | // different threads. The GrContext is not used concurrently on different threads and there is a |
| 22 | // memory barrier between accesses of a context on different threads. Also, there may be multiple |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 23 | // GrContexts and those contexts may be in use concurrently on different threads. |
| 24 | namespace { |
joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 25 | SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock); |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 26 | class MemoryPoolAccessor { |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 27 | public: |
joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 28 | MemoryPoolAccessor() { gBatchSpinlock.acquire(); } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 29 | |
joshualitt | 23ac62c | 2015-03-30 09:53:47 -0700 | [diff] [blame] | 30 | ~MemoryPoolAccessor() { gBatchSpinlock.release(); } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 31 | |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 32 | GrMemoryPool* pool() const { |
| 33 | static GrMemoryPool gPool(16384, 16384); |
| 34 | return &gPool; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 35 | } |
| 36 | }; |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 37 | } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 38 | |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 39 | int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 40 | |
| 41 | void* GrBatch::operator new(size_t size) { |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 42 | return MemoryPoolAccessor().pool()->allocate(size); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | void GrBatch::operator delete(void* target) { |
bsalomon | 5baedd6 | 2015-03-09 12:15:53 -0700 | [diff] [blame] | 46 | return MemoryPoolAccessor().pool()->release(target); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 47 | } |
bsalomon | b5238a7 | 2015-05-05 07:49:49 -0700 | [diff] [blame^] | 48 | |
| 49 | void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType, |
| 50 | size_t vertexStride, const GrIndexBuffer* indexBuffer, |
| 51 | int verticesPerInstance, int indicesPerInstance, |
| 52 | int instancesToDraw) { |
| 53 | SkASSERT(!fInstancesRemaining); |
| 54 | SkASSERT(batchTarget); |
| 55 | if (!indexBuffer) { |
| 56 | return NULL; |
| 57 | } |
| 58 | const GrVertexBuffer* vertexBuffer; |
| 59 | int firstVertex; |
| 60 | int vertexCount = verticesPerInstance * instancesToDraw; |
| 61 | void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, vertexCount, &vertexBuffer, |
| 62 | &firstVertex); |
| 63 | if (!vertices) { |
| 64 | SkDebugf("Vertices could not be allocated for instanced rendering."); |
| 65 | return NULL; |
| 66 | } |
| 67 | SkASSERT(vertexBuffer); |
| 68 | fInstancesRemaining = instancesToDraw; |
| 69 | size_t ibSize = indexBuffer->gpuMemorySize(); |
| 70 | fMaxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerInstance)); |
| 71 | |
| 72 | fDrawInfo.initInstanced(primType, vertexBuffer, indexBuffer, |
| 73 | firstVertex, verticesPerInstance, indicesPerInstance, &fInstancesRemaining, |
| 74 | fMaxInstancesPerDraw); |
| 75 | SkASSERT(fMaxInstancesPerDraw > 0); |
| 76 | return vertices; |
| 77 | } |
| 78 | |
| 79 | void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw) { |
| 80 | SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer( |
| 81 | batchTarget->resourceProvider()->refQuadIndexBuffer()); |
| 82 | if (!quadIndexBuffer) { |
| 83 | SkDebugf("Could not get quad index buffer."); |
| 84 | return NULL; |
| 85 | } |
| 86 | return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride, |
| 87 | quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw); |
| 88 | } |