blob: 36c043178a3307214f3d1fff5071ee012b6ba538 [file] [log] [blame]
bsalomon@google.com1c13c962011-02-14 16:51:21 +00001/*
epoger@google.comec3ed6a2011-07-28 14:26:00 +00002 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
bsalomon@google.com1c13c962011-02-14 16:51:21 +00006 */
7
8#include "GrBufferAllocPool.h"
bsalomoneb1cb5c2015-05-22 08:01:09 -07009#include "GrCaps.h"
robertphillips1b8e1b52015-06-24 06:54:10 -070010#include "GrContext.h"
Robert Phillips6be756b2018-01-16 15:07:54 -050011#include "GrContextPriv.h"
Brian Salomondbf70722019-02-07 11:31:24 -050012#include "GrCpuBuffer.h"
bsalomon@google.comc26d94f2013-03-25 18:19:00 +000013#include "GrGpu.h"
Brian Salomondbf70722019-02-07 11:31:24 -050014#include "GrGpuBuffer.h"
robertphillips1b8e1b52015-06-24 06:54:10 -070015#include "GrResourceProvider.h"
bsalomon@google.com1c13c962011-02-14 16:51:21 +000016#include "GrTypes.h"
Hal Canary50dbc092018-06-12 14:50:37 -040017#include "SkMacros.h"
Mike Reedfe266c22018-01-17 11:55:07 -050018#include "SkSafeMath.h"
bsalomon3512eda2014-06-26 12:56:22 -070019#include "SkTraceEvent.h"
20
Brian Salomon601ac802019-02-07 13:37:16 -050021sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
22 int maxBuffersToCache) {
23 return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
24}
25
26GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
27 : fMaxBuffersToCache(maxBuffersToCache) {
28 if (fMaxBuffersToCache) {
29 fBuffers.reset(new Buffer[fMaxBuffersToCache]);
30 }
31}
32
33sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
34 bool mustBeInitialized) {
35 SkASSERT(size > 0);
36 Buffer* result = nullptr;
37 if (size == kDefaultBufferSize) {
38 int i = 0;
39 for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
40 SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
41 if (fBuffers[i].fBuffer->unique()) {
42 result = &fBuffers[i];
43 }
44 }
45 if (!result && i < fMaxBuffersToCache) {
46 fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
47 result = &fBuffers[i];
48 }
49 }
50 Buffer tempResult;
51 if (!result) {
52 tempResult.fBuffer = GrCpuBuffer::Make(size);
53 result = &tempResult;
54 }
55 if (mustBeInitialized && !result->fCleared) {
56 result->fCleared = true;
57 memset(result->fBuffer->data(), 0, result->fBuffer->size());
58 }
59 return result->fBuffer;
60}
61
62void GrBufferAllocPool::CpuBufferCache::releaseAll() {
63 for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
64 fBuffers[i].fBuffer.reset();
65 fBuffers[i].fCleared = false;
66 }
67}
68
69//////////////////////////////////////////////////////////////////////////////
70
commit-bot@chromium.org515dcd32013-08-28 14:17:03 +000071#ifdef SK_DEBUG
bsalomon@google.com1c13c962011-02-14 16:51:21 +000072 #define VALIDATE validate
73#else
sugoi@google.come0e385c2013-03-11 18:50:03 +000074 static void VALIDATE(bool = false) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +000075#endif
76
Brian Salomondbf70722019-02-07 11:31:24 -050077#define UNMAP_BUFFER(block) \
78 do { \
79 TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
80 TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
81 (float)((block).fBytesFree) / (block).fBuffer->size()); \
82 SkASSERT(!block.fBuffer->isCpuBuffer()); \
83 static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
84 } while (false)
bsalomon3512eda2014-06-26 12:56:22 -070085
Brian Salomon58f153c2018-10-18 21:51:15 -040086constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
bsalomon@google.com11f0b512011-03-29 20:52:23 +000087
Brian Salomon601ac802019-02-07 13:37:16 -050088GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
89 sk_sp<CpuBufferCache> cpuBufferCache)
90 : fBlocks(8)
91 , fCpuBufferCache(std::move(cpuBufferCache))
92 , fGpu(gpu)
93 , fBufferType(bufferType) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +000094
robertphillips1b8e1b52015-06-24 06:54:10 -070095void GrBufferAllocPool::deleteBlocks() {
bsalomon@google.com1c13c962011-02-14 16:51:21 +000096 if (fBlocks.count()) {
Brian Salomon12d22642019-01-29 14:38:50 -050097 GrBuffer* buffer = fBlocks.back().fBuffer.get();
Brian Salomondbf70722019-02-07 11:31:24 -050098 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
bsalomon3512eda2014-06-26 12:56:22 -070099 UNMAP_BUFFER(fBlocks.back());
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000100 }
101 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000102 while (!fBlocks.empty()) {
robertphillips91d06bc2015-05-06 04:38:36 -0700103 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000104 }
robertphillips1b8e1b52015-06-24 06:54:10 -0700105 SkASSERT(!fBufferPtr);
106}
107
108GrBufferAllocPool::~GrBufferAllocPool() {
109 VALIDATE();
110 this->deleteBlocks();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000111}
112
113void GrBufferAllocPool::reset() {
114 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000115 fBytesInUse = 0;
robertphillips1b8e1b52015-06-24 06:54:10 -0700116 this->deleteBlocks();
Brian Salomon58f153c2018-10-18 21:51:15 -0400117 this->resetCpuData(0);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000118 VALIDATE();
119}
120
commit-bot@chromium.org8341eb72014-05-07 20:51:05 +0000121void GrBufferAllocPool::unmap() {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000122 VALIDATE();
123
bsalomon49f085d2014-09-05 13:34:00 -0700124 if (fBufferPtr) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000125 BufferBlock& block = fBlocks.back();
Brian Salomondbf70722019-02-07 11:31:24 -0500126 GrBuffer* buffer = block.fBuffer.get();
127 if (!buffer->isCpuBuffer()) {
128 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
129 UNMAP_BUFFER(block);
130 } else {
131 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132 this->flushCpuData(fBlocks.back(), flushSize);
133 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000134 }
bsalomon7dea7b72015-08-19 08:26:51 -0700135 fBufferPtr = nullptr;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000136 }
137 VALIDATE();
138}
139
commit-bot@chromium.org515dcd32013-08-28 14:17:03 +0000140#ifdef SK_DEBUG
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000141void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
bsalomon71cb0c22014-11-14 12:10:14 -0800142 bool wasDestroyed = false;
bsalomon49f085d2014-09-05 13:34:00 -0700143 if (fBufferPtr) {
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000144 SkASSERT(!fBlocks.empty());
Brian Salomondbf70722019-02-07 11:31:24 -0500145 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
146 if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
Brian Salomon601ac802019-02-07 13:37:16 -0500147 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000148 }
Brian Salomondbf70722019-02-07 11:31:24 -0500149 } else if (!fBlocks.empty()) {
150 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
151 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000152 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000153 size_t bytesInUse = 0;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000154 for (int i = 0; i < fBlocks.count() - 1; ++i) {
Brian Salomondbf70722019-02-07 11:31:24 -0500155 const GrBuffer* buffer = fBlocks[i].fBuffer.get();
156 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000157 }
bsalomon71cb0c22014-11-14 12:10:14 -0800158 for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
Brian Salomondbf70722019-02-07 11:31:24 -0500159 GrBuffer* buffer = fBlocks[i].fBuffer.get();
160 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
bsalomon71cb0c22014-11-14 12:10:14 -0800161 wasDestroyed = true;
162 } else {
Brian Salomondbf70722019-02-07 11:31:24 -0500163 size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
bsalomon71cb0c22014-11-14 12:10:14 -0800164 bytesInUse += bytes;
165 SkASSERT(bytes || unusedBlockAllowed);
166 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000167 }
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000168
bsalomon71cb0c22014-11-14 12:10:14 -0800169 if (!wasDestroyed) {
170 SkASSERT(bytesInUse == fBytesInUse);
171 if (unusedBlockAllowed) {
172 SkASSERT((fBytesInUse && !fBlocks.empty()) ||
173 (!fBytesInUse && (fBlocks.count() < 2)));
174 } else {
175 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
176 }
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000177 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000178}
179#endif
180
181void* GrBufferAllocPool::makeSpace(size_t size,
182 size_t alignment,
Brian Salomon12d22642019-01-29 14:38:50 -0500183 sk_sp<const GrBuffer>* buffer,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000184 size_t* offset) {
185 VALIDATE();
186
bsalomon49f085d2014-09-05 13:34:00 -0700187 SkASSERT(buffer);
188 SkASSERT(offset);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000189
bsalomon49f085d2014-09-05 13:34:00 -0700190 if (fBufferPtr) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000191 BufferBlock& back = fBlocks.back();
Brian Salomondbf70722019-02-07 11:31:24 -0500192 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
robertphillips1b8e1b52015-06-24 06:54:10 -0700193 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
Brian Salomon6cfcc582018-10-18 14:58:16 -0400194 SkSafeMath safeMath;
195 size_t alignedSize = safeMath.add(pad, size);
196 if (!safeMath.ok()) {
197 return nullptr;
198 }
199 if (alignedSize <= back.fBytesFree) {
dongseong.hwang8f25c662015-01-22 10:40:20 -0800200 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000201 usedBytes += pad;
202 *offset = usedBytes;
203 *buffer = back.fBuffer;
Brian Salomon6cfcc582018-10-18 14:58:16 -0400204 back.fBytesFree -= alignedSize;
205 fBytesInUse += alignedSize;
bsalomon@google.comd5108092012-03-08 15:10:39 +0000206 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000207 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
208 }
209 }
210
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000211 // We could honor the space request using by a partial update of the current
212 // VB (if there is room). But we don't currently use draw calls to GL that
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000213 // allow the driver to know that previously issued draws won't read from
bsalomon@google.com96e96df2011-10-10 14:49:29 +0000214 // the part of the buffer we update. Also, the GL buffer implementation
215 // may be cheating on the actual buffer size by shrinking the buffer on
216 // updateData() if the amount of data passed is less than the full buffer
217 // size.
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000218
robertphillips91d06bc2015-05-06 04:38:36 -0700219 if (!this->createBlock(size)) {
bsalomon7dea7b72015-08-19 08:26:51 -0700220 return nullptr;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000221 }
bsalomon49f085d2014-09-05 13:34:00 -0700222 SkASSERT(fBufferPtr);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000223
224 *offset = 0;
225 BufferBlock& back = fBlocks.back();
226 *buffer = back.fBuffer;
227 back.fBytesFree -= size;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000228 fBytesInUse += size;
229 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000230 return fBufferPtr;
231}
232
Brian Osman49b7b6f2017-06-20 14:43:58 -0400233void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
234 size_t fallbackSize,
235 size_t alignment,
Brian Salomon12d22642019-01-29 14:38:50 -0500236 sk_sp<const GrBuffer>* buffer,
Brian Osman49b7b6f2017-06-20 14:43:58 -0400237 size_t* offset,
238 size_t* actualSize) {
239 VALIDATE();
240
241 SkASSERT(buffer);
242 SkASSERT(offset);
243 SkASSERT(actualSize);
244
245 if (fBufferPtr) {
246 BufferBlock& back = fBlocks.back();
Brian Salomondbf70722019-02-07 11:31:24 -0500247 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
Brian Osman49b7b6f2017-06-20 14:43:58 -0400248 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
249 if ((minSize + pad) <= back.fBytesFree) {
250 // Consume padding first, to make subsequent alignment math easier
251 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
252 usedBytes += pad;
253 back.fBytesFree -= pad;
254 fBytesInUse += pad;
255
Brian Salomon4b8178f2018-10-12 13:18:27 -0400256 // Give caller all remaining space in this block up to fallbackSize (but aligned
257 // correctly)
258 size_t size;
259 if (back.fBytesFree >= fallbackSize) {
260 SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
261 size = fallbackSize;
262 } else {
263 size = GrSizeAlignDown(back.fBytesFree, alignment);
264 }
Brian Osman49b7b6f2017-06-20 14:43:58 -0400265 *offset = usedBytes;
266 *buffer = back.fBuffer;
267 *actualSize = size;
268 back.fBytesFree -= size;
269 fBytesInUse += size;
270 VALIDATE();
271 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
272 }
273 }
274
275 // We could honor the space request using by a partial update of the current
276 // VB (if there is room). But we don't currently use draw calls to GL that
277 // allow the driver to know that previously issued draws won't read from
278 // the part of the buffer we update. Also, the GL buffer implementation
279 // may be cheating on the actual buffer size by shrinking the buffer on
280 // updateData() if the amount of data passed is less than the full buffer
281 // size.
282
283 if (!this->createBlock(fallbackSize)) {
284 return nullptr;
285 }
286 SkASSERT(fBufferPtr);
287
288 *offset = 0;
289 BufferBlock& back = fBlocks.back();
290 *buffer = back.fBuffer;
291 *actualSize = fallbackSize;
292 back.fBytesFree -= fallbackSize;
293 fBytesInUse += fallbackSize;
294 VALIDATE();
295 return fBufferPtr;
296}
297
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000298void GrBufferAllocPool::putBack(size_t bytes) {
299 VALIDATE();
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000300
301 while (bytes) {
robertphillips91d06bc2015-05-06 04:38:36 -0700302 // caller shouldn't try to put back more than they've taken
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000303 SkASSERT(!fBlocks.empty());
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000304 BufferBlock& block = fBlocks.back();
Brian Salomondbf70722019-02-07 11:31:24 -0500305 size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000306 if (bytes >= bytesUsed) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000307 bytes -= bytesUsed;
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000308 fBytesInUse -= bytesUsed;
bsalomon@google.com6513cd02011-08-05 20:12:30 +0000309 // if we locked a vb to satisfy the make space and we're releasing
commit-bot@chromium.org8341eb72014-05-07 20:51:05 +0000310 // beyond it, then unmap it.
Brian Salomondbf70722019-02-07 11:31:24 -0500311 GrBuffer* buffer = block.fBuffer.get();
312 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
bsalomon3512eda2014-06-26 12:56:22 -0700313 UNMAP_BUFFER(block);
bsalomon@google.com6513cd02011-08-05 20:12:30 +0000314 }
315 this->destroyBlock();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000316 } else {
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000317 block.fBytesFree += bytes;
318 fBytesInUse -= bytes;
319 bytes = 0;
320 break;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000321 }
322 }
robertphillips1b8e1b52015-06-24 06:54:10 -0700323
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000324 VALIDATE();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000325}
326
327bool GrBufferAllocPool::createBlock(size_t requestSize) {
Brian Salomon58f153c2018-10-18 21:51:15 -0400328 size_t size = SkTMax(requestSize, kDefaultBufferSize);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000329
330 VALIDATE();
331
332 BufferBlock& block = fBlocks.push_back();
333
robertphillips1b8e1b52015-06-24 06:54:10 -0700334 block.fBuffer = this->getBuffer(size);
bsalomon7dea7b72015-08-19 08:26:51 -0700335 if (!block.fBuffer) {
robertphillips1b8e1b52015-06-24 06:54:10 -0700336 fBlocks.pop_back();
337 return false;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000338 }
339
Brian Salomondbf70722019-02-07 11:31:24 -0500340 block.fBytesFree = block.fBuffer->size();
bsalomon49f085d2014-09-05 13:34:00 -0700341 if (fBufferPtr) {
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000342 SkASSERT(fBlocks.count() > 1);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000343 BufferBlock& prev = fBlocks.fromBack(1);
Brian Salomondbf70722019-02-07 11:31:24 -0500344 GrBuffer* buffer = prev.fBuffer.get();
345 if (!buffer->isCpuBuffer()) {
346 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
347 UNMAP_BUFFER(prev);
348 } else {
349 this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
350 }
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000351 }
bsalomon7dea7b72015-08-19 08:26:51 -0700352 fBufferPtr = nullptr;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000353 }
354
bsalomon7dea7b72015-08-19 08:26:51 -0700355 SkASSERT(!fBufferPtr);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000356
Brian Salomondbf70722019-02-07 11:31:24 -0500357 // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
bsalomonecb8e3e2015-04-29 04:33:52 -0700358 // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
359 // threshold.
Brian Salomondbf70722019-02-07 11:31:24 -0500360 if (block.fBuffer->isCpuBuffer()) {
361 fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
362 SkASSERT(fBufferPtr);
363 } else {
364 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
365 size > fGpu->caps()->bufferMapThreshold()) {
366 fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
367 }
bsalomon@google.comee3bc3b2013-02-21 14:33:46 +0000368 }
bsalomon7dea7b72015-08-19 08:26:51 -0700369 if (!fBufferPtr) {
Brian Salomon601ac802019-02-07 13:37:16 -0500370 this->resetCpuData(block.fBytesFree);
371 fBufferPtr = fCpuStagingBuffer->data();
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000372 }
373
bsalomon@google.com25fb21f2011-06-21 18:17:25 +0000374 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000375
376 return true;
377}
378
379void GrBufferAllocPool::destroyBlock() {
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000380 SkASSERT(!fBlocks.empty());
Brian Salomondbf70722019-02-07 11:31:24 -0500381 SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
382 !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000383 fBlocks.pop_back();
bsalomon7dea7b72015-08-19 08:26:51 -0700384 fBufferPtr = nullptr;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000385}
386
Brian Salomon601ac802019-02-07 13:37:16 -0500387void GrBufferAllocPool::resetCpuData(size_t newSize) {
388 SkASSERT(newSize >= kDefaultBufferSize || !newSize);
389 if (!newSize) {
390 fCpuStagingBuffer.reset();
391 return;
bsalomon7dea7b72015-08-19 08:26:51 -0700392 }
Brian Salomon601ac802019-02-07 13:37:16 -0500393 if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
394 return;
Brian Salomon58f153c2018-10-18 21:51:15 -0400395 }
Brian Salomon601ac802019-02-07 13:37:16 -0500396 bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
397 fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
398 : GrCpuBuffer::Make(newSize);
bsalomon7dea7b72015-08-19 08:26:51 -0700399}
400
bsalomon3512eda2014-06-26 12:56:22 -0700401void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
Brian Salomondbf70722019-02-07 11:31:24 -0500402 SkASSERT(block.fBuffer.get());
403 SkASSERT(!block.fBuffer.get()->isCpuBuffer());
404 GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
commit-bot@chromium.org8341eb72014-05-07 20:51:05 +0000405 SkASSERT(!buffer->isMapped());
Brian Salomon601ac802019-02-07 13:37:16 -0500406 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
Brian Salomondbf70722019-02-07 11:31:24 -0500407 SkASSERT(flushSize <= buffer->size());
bsalomon@google.comd5108092012-03-08 15:10:39 +0000408 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000409
bsalomon4b91f762015-05-19 09:29:46 -0700410 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
Brian Salomon58f153c2018-10-18 21:51:15 -0400411 flushSize > fGpu->caps()->bufferMapThreshold()) {
commit-bot@chromium.org8341eb72014-05-07 20:51:05 +0000412 void* data = buffer->map();
bsalomon49f085d2014-09-05 13:34:00 -0700413 if (data) {
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000414 memcpy(data, fBufferPtr, flushSize);
bsalomon3512eda2014-06-26 12:56:22 -0700415 UNMAP_BUFFER(block);
bsalomon@google.com71bd1ef2011-12-12 20:42:26 +0000416 return;
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000417 }
418 }
419 buffer->updateData(fBufferPtr, flushSize);
bsalomon@google.comd5108092012-03-08 15:10:39 +0000420 VALIDATE(true);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000421}
422
Brian Salomon12d22642019-01-29 14:38:50 -0500423sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
Robert Phillips9da87e02019-02-04 13:26:26 -0500424 auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
robertphillips1b8e1b52015-06-24 06:54:10 -0700425
Brian Salomondbf70722019-02-07 11:31:24 -0500426 if (fGpu->caps()->preferClientSideDynamicBuffers()) {
Brian Salomon601ac802019-02-07 13:37:16 -0500427 bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
428 return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
429 : GrCpuBuffer::Make(size);
Brian Salomondbf70722019-02-07 11:31:24 -0500430 }
431 return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000432}
433
434////////////////////////////////////////////////////////////////////////////////
435
Brian Salomon601ac802019-02-07 13:37:16 -0500436GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
437 : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000438
jvanverth@google.coma6338982013-01-31 21:34:25 +0000439void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000440 int vertexCount,
Brian Salomon12d22642019-01-29 14:38:50 -0500441 sk_sp<const GrBuffer>* buffer,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000442 int* startVertex) {
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000443 SkASSERT(vertexCount >= 0);
bsalomon49f085d2014-09-05 13:34:00 -0700444 SkASSERT(buffer);
445 SkASSERT(startVertex);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000446
Robert Phillipsc787e492017-02-28 11:26:32 -0500447 size_t offset SK_INIT_TO_AVOID_WARNING;
Mike Reedfe266c22018-01-17 11:55:07 -0500448 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
jvanverth@google.coma6338982013-01-31 21:34:25 +0000449 vertexSize,
cdalton397536c2016-03-25 12:15:03 -0700450 buffer,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000451 &offset);
452
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000453 SkASSERT(0 == offset % vertexSize);
robertphillips@google.comadacc702013-10-14 21:53:24 +0000454 *startVertex = static_cast<int>(offset / vertexSize);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000455 return ptr;
456}
457
Brian Osman49b7b6f2017-06-20 14:43:58 -0400458void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
Brian Salomon12d22642019-01-29 14:38:50 -0500459 int fallbackVertexCount,
460 sk_sp<const GrBuffer>* buffer, int* startVertex,
461 int* actualVertexCount) {
Brian Osman49b7b6f2017-06-20 14:43:58 -0400462 SkASSERT(minVertexCount >= 0);
463 SkASSERT(fallbackVertexCount >= minVertexCount);
464 SkASSERT(buffer);
465 SkASSERT(startVertex);
466 SkASSERT(actualVertexCount);
467
468 size_t offset SK_INIT_TO_AVOID_WARNING;
469 size_t actualSize SK_INIT_TO_AVOID_WARNING;
Mike Reedfe266c22018-01-17 11:55:07 -0500470 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
471 SkSafeMath::Mul(vertexSize, fallbackVertexCount),
Brian Osman49b7b6f2017-06-20 14:43:58 -0400472 vertexSize,
473 buffer,
474 &offset,
475 &actualSize);
476
477 SkASSERT(0 == offset % vertexSize);
478 *startVertex = static_cast<int>(offset / vertexSize);
479
480 SkASSERT(0 == actualSize % vertexSize);
481 SkASSERT(actualSize >= vertexSize * minVertexCount);
482 *actualVertexCount = static_cast<int>(actualSize / vertexSize);
483
484 return ptr;
485}
486
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000487////////////////////////////////////////////////////////////////////////////////
488
Brian Salomon601ac802019-02-07 13:37:16 -0500489GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
490 : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000491
Brian Salomon12d22642019-01-29 14:38:50 -0500492void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000493 int* startIndex) {
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000494 SkASSERT(indexCount >= 0);
bsalomon49f085d2014-09-05 13:34:00 -0700495 SkASSERT(buffer);
496 SkASSERT(startIndex);
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000497
Robert Phillipsc787e492017-02-28 11:26:32 -0500498 size_t offset SK_INIT_TO_AVOID_WARNING;
Mike Reedfe266c22018-01-17 11:55:07 -0500499 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000500 sizeof(uint16_t),
cdalton397536c2016-03-25 12:15:03 -0700501 buffer,
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000502 &offset);
503
tfarina@chromium.orgf6de4752013-08-17 00:02:59 +0000504 SkASSERT(0 == offset % sizeof(uint16_t));
robertphillips@google.comadacc702013-10-14 21:53:24 +0000505 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
bsalomon@google.com1c13c962011-02-14 16:51:21 +0000506 return ptr;
507}
Brian Osman49b7b6f2017-06-20 14:43:58 -0400508
509void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
Brian Salomon12d22642019-01-29 14:38:50 -0500510 sk_sp<const GrBuffer>* buffer, int* startIndex,
Brian Osman49b7b6f2017-06-20 14:43:58 -0400511 int* actualIndexCount) {
512 SkASSERT(minIndexCount >= 0);
513 SkASSERT(fallbackIndexCount >= minIndexCount);
514 SkASSERT(buffer);
515 SkASSERT(startIndex);
516 SkASSERT(actualIndexCount);
517
518 size_t offset SK_INIT_TO_AVOID_WARNING;
519 size_t actualSize SK_INIT_TO_AVOID_WARNING;
Mike Reedfe266c22018-01-17 11:55:07 -0500520 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
521 SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
Brian Osman49b7b6f2017-06-20 14:43:58 -0400522 sizeof(uint16_t),
523 buffer,
524 &offset,
525 &actualSize);
526
527 SkASSERT(0 == offset % sizeof(uint16_t));
528 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
529
530 SkASSERT(0 == actualSize % sizeof(uint16_t));
531 SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
532 *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
533 return ptr;
534}