blob: b696d6b95b1585e9745d533801e9f8dfe04f6216 [file] [log] [blame]
joshualitt23ac62c2015-03-30 09:53:47 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
joshualitt4d8da812015-01-28 12:53:54 -08008#include "GrBatch.h"
bsalomonb5238a72015-05-05 07:49:49 -07009#include "GrBatchTarget.h"
10#include "GrResourceProvider.h"
joshualitt4d8da812015-01-28 12:53:54 -080011
12#include "GrMemoryPool.h"
joshualitt23ac62c2015-03-30 09:53:47 -070013#include "SkSpinlock.h"
joshualitt4d8da812015-01-28 12:53:54 -080014
15// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
16// but seems to be mostly consistent. There is a lot in flux right now, but we should really
17// revisit this when batch is everywhere
18
bsalomon5baedd62015-03-09 12:15:53 -070019
joshualitt23ac62c2015-03-30 09:53:47 -070020// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
21// different threads. The GrContext is not used concurrently on different threads and there is a
22// memory barrier between accesses of a context on different threads. Also, there may be multiple
bsalomon5baedd62015-03-09 12:15:53 -070023// GrContexts and those contexts may be in use concurrently on different threads.
24namespace {
joshualitt23ac62c2015-03-30 09:53:47 -070025SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock);
bsalomon5baedd62015-03-09 12:15:53 -070026class MemoryPoolAccessor {
joshualitt4d8da812015-01-28 12:53:54 -080027public:
joshualitt23ac62c2015-03-30 09:53:47 -070028 MemoryPoolAccessor() { gBatchSpinlock.acquire(); }
joshualitt4d8da812015-01-28 12:53:54 -080029
joshualitt23ac62c2015-03-30 09:53:47 -070030 ~MemoryPoolAccessor() { gBatchSpinlock.release(); }
joshualitt4d8da812015-01-28 12:53:54 -080031
bsalomon5baedd62015-03-09 12:15:53 -070032 GrMemoryPool* pool() const {
33 static GrMemoryPool gPool(16384, 16384);
34 return &gPool;
joshualitt4d8da812015-01-28 12:53:54 -080035 }
36};
bsalomon5baedd62015-03-09 12:15:53 -070037}
joshualitt4d8da812015-01-28 12:53:54 -080038
joshualittca1f07e2015-08-07 08:11:19 -070039int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID;
40
41GrBATCH_SPEW(int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;)
joshualitt4d8da812015-01-28 12:53:54 -080042
43void* GrBatch::operator new(size_t size) {
bsalomon5baedd62015-03-09 12:15:53 -070044 return MemoryPoolAccessor().pool()->allocate(size);
joshualitt4d8da812015-01-28 12:53:54 -080045}
46
47void GrBatch::operator delete(void* target) {
bsalomon5baedd62015-03-09 12:15:53 -070048 return MemoryPoolAccessor().pool()->release(target);
joshualitt4d8da812015-01-28 12:53:54 -080049}
bsalomonb5238a72015-05-05 07:49:49 -070050
bsalomona387a112015-08-11 14:47:42 -070051GrBatch::GrBatch()
52 : fClassID(kIllegalBatchID)
bsalomona387a112015-08-11 14:47:42 -070053#if GR_BATCH_SPEW
54 , fUniqueID(GenID(&gCurrBatchUniqueID))
55#endif
56{
57 SkDEBUGCODE(fUsed = false;)
58}
59
bsalomonabd30f52015-08-13 13:34:48 -070060GrBatch::~GrBatch() {}
61
62//////////////////////////////////////////////////////////////////////////////
63
64GrDrawBatch::GrDrawBatch() : fPipelineInstalled(false) { }
65
66GrDrawBatch::~GrDrawBatch() {
bsalomona387a112015-08-11 14:47:42 -070067 if (fPipelineInstalled) {
68 this->pipeline()->~GrPipeline();
69 }
70}
71
bsalomonabd30f52015-08-13 13:34:48 -070072bool GrDrawBatch::installPipeline(const GrPipeline::CreateArgs& args) {
73 GrPipelineOptimizations opts;
74 void* location = fPipelineStorage.get();
75 if (!GrPipeline::CreateAt(location, args, &opts)) {
76 return false;
77 }
78 this->initBatchTracker(opts);
79 fPipelineInstalled = true;
80 return true;
81}
82
83//////////////////////////////////////////////////////////////////////////////
84
85GrVertexBatch::GrVertexBatch() : fNumberOfDraws(0) {}
86
87void* GrVertexBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType,
bsalomonb5238a72015-05-05 07:49:49 -070088 size_t vertexStride, const GrIndexBuffer* indexBuffer,
89 int verticesPerInstance, int indicesPerInstance,
90 int instancesToDraw) {
bsalomonb5238a72015-05-05 07:49:49 -070091 SkASSERT(batchTarget);
92 if (!indexBuffer) {
93 return NULL;
94 }
95 const GrVertexBuffer* vertexBuffer;
96 int firstVertex;
97 int vertexCount = verticesPerInstance * instancesToDraw;
robertphillipse40d3972015-05-07 09:51:43 -070098 void* vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
99 &vertexBuffer, &firstVertex);
bsalomonb5238a72015-05-05 07:49:49 -0700100 if (!vertices) {
101 SkDebugf("Vertices could not be allocated for instanced rendering.");
102 return NULL;
103 }
104 SkASSERT(vertexBuffer);
bsalomonb5238a72015-05-05 07:49:49 -0700105 size_t ibSize = indexBuffer->gpuMemorySize();
bsalomone64eb572015-05-07 11:35:55 -0700106 int maxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerInstance));
bsalomonb5238a72015-05-05 07:49:49 -0700107
bsalomoncb8979d2015-05-05 09:51:38 -0700108 fVertices.initInstanced(primType, vertexBuffer, indexBuffer,
bsalomone64eb572015-05-07 11:35:55 -0700109 firstVertex, verticesPerInstance, indicesPerInstance, instancesToDraw,
110 maxInstancesPerDraw);
bsalomonb5238a72015-05-05 07:49:49 -0700111 return vertices;
112}
113
bsalomonabd30f52015-08-13 13:34:48 -0700114void* GrVertexBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
115 int quadsToDraw) {
bsalomonb5238a72015-05-05 07:49:49 -0700116 SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
117 batchTarget->resourceProvider()->refQuadIndexBuffer());
118 if (!quadIndexBuffer) {
119 SkDebugf("Could not get quad index buffer.");
120 return NULL;
121 }
122 return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
123 quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
124}