blob: 4ef2d94c779e1c7120dc646fbd08070f26d63d22 [file] [log] [blame]
reed5b6db072015-04-28 17:50:31 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkRWBuffer.h"
Herb Derbyd7b34a52017-03-20 11:19:23 -04009
10#include "SkAtomics.h"
11#include "SkMemory.h"
reed5b6db072015-04-28 17:50:31 -070012#include "SkStream.h"
13
14// Force small chunks to be a page's worth
15static const size_t kMinAllocSize = 4096;
16
17struct SkBufferBlock {
reed377add72016-04-08 12:47:14 -070018 SkBufferBlock* fNext; // updated by the writer
19 size_t fUsed; // updated by the writer
20 const size_t fCapacity;
21
22 SkBufferBlock(size_t capacity) : fNext(nullptr), fUsed(0), fCapacity(capacity) {}
mtkleinf0599002015-07-13 06:18:39 -070023
Mike Kleinfc6c37b2016-09-27 09:34:10 -040024 const void* startData() const { return this + 1; }
mtkleinf0599002015-07-13 06:18:39 -070025
reed5b6db072015-04-28 17:50:31 -070026 size_t avail() const { return fCapacity - fUsed; }
27 void* availData() { return (char*)this->startData() + fUsed; }
mtkleinf0599002015-07-13 06:18:39 -070028
reed5b6db072015-04-28 17:50:31 -070029 static SkBufferBlock* Alloc(size_t length) {
30 size_t capacity = LengthToCapacity(length);
reed377add72016-04-08 12:47:14 -070031 void* buffer = sk_malloc_throw(sizeof(SkBufferBlock) + capacity);
32 return new (buffer) SkBufferBlock(capacity);
reed5b6db072015-04-28 17:50:31 -070033 }
34
reed377add72016-04-08 12:47:14 -070035 // Return number of bytes actually appended. Important that we always completely this block
36 // before spilling into the next, since the reader uses fCapacity to know how many it can read.
37 //
reed5b6db072015-04-28 17:50:31 -070038 size_t append(const void* src, size_t length) {
39 this->validate();
40 size_t amount = SkTMin(this->avail(), length);
41 memcpy(this->availData(), src, amount);
42 fUsed += amount;
43 this->validate();
44 return amount;
45 }
46
scroggo63516402016-04-22 06:59:01 -070047 // Do not call in the reader thread, since the writer may be updating fUsed.
48 // (The assertion is still true, but TSAN still may complain about its raciness.)
reed5b6db072015-04-28 17:50:31 -070049 void validate() const {
50#ifdef SK_DEBUG
51 SkASSERT(fCapacity > 0);
52 SkASSERT(fUsed <= fCapacity);
53#endif
54 }
55
56private:
57 static size_t LengthToCapacity(size_t length) {
58 const size_t minSize = kMinAllocSize - sizeof(SkBufferBlock);
59 return SkTMax(length, minSize);
60 }
61};
62
63struct SkBufferHead {
64 mutable int32_t fRefCnt;
65 SkBufferBlock fBlock;
66
reed377add72016-04-08 12:47:14 -070067 SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {}
68
reed5b6db072015-04-28 17:50:31 -070069 static size_t LengthToCapacity(size_t length) {
70 const size_t minSize = kMinAllocSize - sizeof(SkBufferHead);
71 return SkTMax(length, minSize);
72 }
73
74 static SkBufferHead* Alloc(size_t length) {
75 size_t capacity = LengthToCapacity(length);
76 size_t size = sizeof(SkBufferHead) + capacity;
reed377add72016-04-08 12:47:14 -070077 void* buffer = sk_malloc_throw(size);
78 return new (buffer) SkBufferHead(capacity);
reed5b6db072015-04-28 17:50:31 -070079 }
mtkleinf0599002015-07-13 06:18:39 -070080
reed5b6db072015-04-28 17:50:31 -070081 void ref() const {
82 SkASSERT(fRefCnt > 0);
83 sk_atomic_inc(&fRefCnt);
84 }
mtkleinf0599002015-07-13 06:18:39 -070085
reed5b6db072015-04-28 17:50:31 -070086 void unref() const {
87 SkASSERT(fRefCnt > 0);
88 // A release here acts in place of all releases we "should" have been doing in ref().
89 if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
90 // Like unique(), the acquire is only needed on success.
91 SkBufferBlock* block = fBlock.fNext;
92 sk_free((void*)this);
93 while (block) {
94 SkBufferBlock* next = block->fNext;
95 sk_free(block);
96 block = next;
97 }
98 }
99 }
mtkleinf0599002015-07-13 06:18:39 -0700100
scroggo63516402016-04-22 06:59:01 -0700101 void validate(size_t minUsed, const SkBufferBlock* tail = nullptr) const {
reed5b6db072015-04-28 17:50:31 -0700102#ifdef SK_DEBUG
103 SkASSERT(fRefCnt > 0);
104 size_t totalUsed = 0;
105 const SkBufferBlock* block = &fBlock;
106 const SkBufferBlock* lastBlock = block;
107 while (block) {
108 block->validate();
109 totalUsed += block->fUsed;
110 lastBlock = block;
111 block = block->fNext;
112 }
113 SkASSERT(minUsed <= totalUsed);
114 if (tail) {
115 SkASSERT(tail == lastBlock);
116 }
117#endif
118 }
119};
120
reed377add72016-04-08 12:47:14 -0700121///////////////////////////////////////////////////////////////////////////////////////////////////
122// The reader can only access block.fCapacity (which never changes), and cannot access
123// block.fUsed, which may be updated by the writer.
124//
scroggo63516402016-04-22 06:59:01 -0700125SkROBuffer::SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* tail)
126 : fHead(head), fAvailable(available), fTail(tail)
reed377add72016-04-08 12:47:14 -0700127{
reed5b6db072015-04-28 17:50:31 -0700128 if (head) {
129 fHead->ref();
reed377add72016-04-08 12:47:14 -0700130 SkASSERT(available > 0);
scroggo63516402016-04-22 06:59:01 -0700131 head->validate(available, tail);
reed5b6db072015-04-28 17:50:31 -0700132 } else {
reed377add72016-04-08 12:47:14 -0700133 SkASSERT(0 == available);
scroggo63516402016-04-22 06:59:01 -0700134 SkASSERT(!tail);
reed5b6db072015-04-28 17:50:31 -0700135 }
136}
137
138SkROBuffer::~SkROBuffer() {
139 if (fHead) {
reed5b6db072015-04-28 17:50:31 -0700140 fHead->unref();
141 }
142}
143
144SkROBuffer::Iter::Iter(const SkROBuffer* buffer) {
145 this->reset(buffer);
146}
147
148void SkROBuffer::Iter::reset(const SkROBuffer* buffer) {
scroggo63516402016-04-22 06:59:01 -0700149 fBuffer = buffer;
150 if (buffer && buffer->fHead) {
reed5b6db072015-04-28 17:50:31 -0700151 fBlock = &buffer->fHead->fBlock;
reed377add72016-04-08 12:47:14 -0700152 fRemaining = buffer->fAvailable;
reed5b6db072015-04-28 17:50:31 -0700153 } else {
halcanary96fcdcc2015-08-27 07:41:13 -0700154 fBlock = nullptr;
reed5b6db072015-04-28 17:50:31 -0700155 fRemaining = 0;
156 }
157}
158
159const void* SkROBuffer::Iter::data() const {
halcanary96fcdcc2015-08-27 07:41:13 -0700160 return fRemaining ? fBlock->startData() : nullptr;
reed5b6db072015-04-28 17:50:31 -0700161}
162
163size_t SkROBuffer::Iter::size() const {
scroggob512aaa2016-01-11 06:38:00 -0800164 if (!fBlock) {
165 return 0;
166 }
reed377add72016-04-08 12:47:14 -0700167 return SkTMin(fBlock->fCapacity, fRemaining);
reed5b6db072015-04-28 17:50:31 -0700168}
169
170bool SkROBuffer::Iter::next() {
171 if (fRemaining) {
172 fRemaining -= this->size();
scroggo63516402016-04-22 06:59:01 -0700173 if (fBuffer->fTail == fBlock) {
174 // There are more blocks, but fBuffer does not know about them.
175 SkASSERT(0 == fRemaining);
176 fBlock = nullptr;
177 } else {
178 fBlock = fBlock->fNext;
179 }
reed5b6db072015-04-28 17:50:31 -0700180 }
181 return fRemaining != 0;
182}
183
reed377add72016-04-08 12:47:14 -0700184///////////////////////////////////////////////////////////////////////////////////////////////////
185
fmalita2e36e882016-09-30 10:52:08 -0700186SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(nullptr), fTail(nullptr), fTotalUsed(0) {
187 if (initialCapacity) {
188 fHead = SkBufferHead::Alloc(initialCapacity);
189 fTail = &fHead->fBlock;
190 }
191}
reed5b6db072015-04-28 17:50:31 -0700192
193SkRWBuffer::~SkRWBuffer() {
194 this->validate();
scroggo9dec5d22016-02-16 06:59:18 -0800195 if (fHead) {
196 fHead->unref();
197 }
reed5b6db072015-04-28 17:50:31 -0700198}
199
reed377add72016-04-08 12:47:14 -0700200// It is important that we always completely fill the current block before spilling over to the
201// next, since our reader will be using fCapacity (min'd against its total available) to know how
202// many bytes to read from a given block.
203//
fmalita57765082016-09-30 13:34:19 -0700204void SkRWBuffer::append(const void* src, size_t length, size_t reserve) {
reed5b6db072015-04-28 17:50:31 -0700205 this->validate();
206 if (0 == length) {
207 return;
208 }
209
210 fTotalUsed += length;
211
halcanary96fcdcc2015-08-27 07:41:13 -0700212 if (nullptr == fHead) {
fmalita57765082016-09-30 13:34:19 -0700213 fHead = SkBufferHead::Alloc(length + reserve);
reed5b6db072015-04-28 17:50:31 -0700214 fTail = &fHead->fBlock;
215 }
216
217 size_t written = fTail->append(src, length);
218 SkASSERT(written <= length);
219 src = (const char*)src + written;
220 length -= written;
221
222 if (length) {
fmalita57765082016-09-30 13:34:19 -0700223 SkBufferBlock* block = SkBufferBlock::Alloc(length + reserve);
reed5b6db072015-04-28 17:50:31 -0700224 fTail->fNext = block;
225 fTail = block;
226 written = fTail->append(src, length);
227 SkASSERT(written == length);
228 }
229 this->validate();
230}
231
reed5b6db072015-04-28 17:50:31 -0700232#ifdef SK_DEBUG
233void SkRWBuffer::validate() const {
234 if (fHead) {
235 fHead->validate(fTotalUsed, fTail);
236 } else {
halcanary96fcdcc2015-08-27 07:41:13 -0700237 SkASSERT(nullptr == fTail);
reed5b6db072015-04-28 17:50:31 -0700238 SkASSERT(0 == fTotalUsed);
239 }
240}
241#endif
242
scroggo63516402016-04-22 06:59:01 -0700243SkROBuffer* SkRWBuffer::newRBufferSnapshot() const {
244 return new SkROBuffer(fHead, fTotalUsed, fTail);
245}
reed5b6db072015-04-28 17:50:31 -0700246
247///////////////////////////////////////////////////////////////////////////////////////////////////
248
249class SkROBufferStreamAsset : public SkStreamAsset {
250 void validate() const {
251#ifdef SK_DEBUG
252 SkASSERT(fGlobalOffset <= fBuffer->size());
253 SkASSERT(fLocalOffset <= fIter.size());
254 SkASSERT(fLocalOffset <= fGlobalOffset);
255#endif
256 }
257
258#ifdef SK_DEBUG
259 class AutoValidate {
260 SkROBufferStreamAsset* fStream;
261 public:
262 AutoValidate(SkROBufferStreamAsset* stream) : fStream(stream) { stream->validate(); }
263 ~AutoValidate() { fStream->validate(); }
264 };
265 #define AUTO_VALIDATE AutoValidate av(this);
266#else
267 #define AUTO_VALIDATE
268#endif
269
270public:
271 SkROBufferStreamAsset(const SkROBuffer* buffer) : fBuffer(SkRef(buffer)), fIter(buffer) {
272 fGlobalOffset = fLocalOffset = 0;
273 }
274
275 virtual ~SkROBufferStreamAsset() { fBuffer->unref(); }
276
277 size_t getLength() const override { return fBuffer->size(); }
278
279 bool rewind() override {
280 AUTO_VALIDATE
281 fIter.reset(fBuffer);
282 fGlobalOffset = fLocalOffset = 0;
283 return true;
284 }
285
286 size_t read(void* dst, size_t request) override {
287 AUTO_VALIDATE
288 size_t bytesRead = 0;
289 for (;;) {
290 size_t size = fIter.size();
291 SkASSERT(fLocalOffset <= size);
292 size_t avail = SkTMin(size - fLocalOffset, request - bytesRead);
293 if (dst) {
294 memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail);
295 dst = (char*)dst + avail;
296 }
297 bytesRead += avail;
298 fLocalOffset += avail;
299 SkASSERT(bytesRead <= request);
300 if (bytesRead == request) {
301 break;
302 }
303 // If we get here, we've exhausted the current iter
304 SkASSERT(fLocalOffset == size);
305 fLocalOffset = 0;
306 if (!fIter.next()) {
307 break; // ran out of data
308 }
309 }
310 fGlobalOffset += bytesRead;
311 SkASSERT(fGlobalOffset <= fBuffer->size());
312 return bytesRead;
313 }
314
315 bool isAtEnd() const override {
316 return fBuffer->size() == fGlobalOffset;
317 }
mtkleinf0599002015-07-13 06:18:39 -0700318
halcanary385fe4d2015-08-26 13:07:48 -0700319 SkStreamAsset* duplicate() const override { return new SkROBufferStreamAsset(fBuffer); }
mtkleinf0599002015-07-13 06:18:39 -0700320
321 size_t getPosition() const override {
reed5b6db072015-04-28 17:50:31 -0700322 return fGlobalOffset;
323 }
mtkleinf0599002015-07-13 06:18:39 -0700324
325 bool seek(size_t position) override {
reed5b6db072015-04-28 17:50:31 -0700326 AUTO_VALIDATE
327 if (position < fGlobalOffset) {
328 this->rewind();
329 }
330 (void)this->skip(position - fGlobalOffset);
331 return true;
332 }
mtkleinf0599002015-07-13 06:18:39 -0700333
334 bool move(long offset) override{
reed5b6db072015-04-28 17:50:31 -0700335 AUTO_VALIDATE
336 offset += fGlobalOffset;
337 if (offset <= 0) {
338 this->rewind();
339 } else {
340 (void)this->seek(SkToSizeT(offset));
341 }
342 return true;
343 }
mtkleinf0599002015-07-13 06:18:39 -0700344
reed5b6db072015-04-28 17:50:31 -0700345 SkStreamAsset* fork() const override {
346 SkStreamAsset* clone = this->duplicate();
347 clone->seek(this->getPosition());
348 return clone;
349 }
mtkleinf0599002015-07-13 06:18:39 -0700350
reed5b6db072015-04-28 17:50:31 -0700351
352private:
353 const SkROBuffer* fBuffer;
354 SkROBuffer::Iter fIter;
355 size_t fLocalOffset;
356 size_t fGlobalOffset;
357};
358
359SkStreamAsset* SkRWBuffer::newStreamSnapshot() const {
Hal Canary704cd322016-11-07 14:13:52 -0500360 sk_sp<SkROBuffer> buffer(this->newRBufferSnapshot());
361 return new SkROBufferStreamAsset(buffer.get());
reed5b6db072015-04-28 17:50:31 -0700362}