blob: 3685b2d16d8b1e2926a8f4929f6f6aef82efa98f [file] [log] [blame]
mtklein29b1afc2015-04-09 07:46:41 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
mtkleinf98862c2014-11-24 14:45:47 -08008#include "SkRecord.h"
mtkleinc3c61942015-11-19 07:23:49 -08009#include <algorithm>
mtkleinf98862c2014-11-24 14:45:47 -080010
11SkRecord::~SkRecord() {
12 Destroyer destroyer;
mtkleinc6ad06a2015-08-19 09:51:00 -070013 for (int i = 0; i < this->count(); i++) {
mtklein343a63d2016-03-22 11:46:53 -070014 this->mutate(i, destroyer);
mtkleinf98862c2014-11-24 14:45:47 -080015 }
16}
17
18void SkRecord::grow() {
19 SkASSERT(fCount == fReserved);
mtklein29b1afc2015-04-09 07:46:41 -070020 SkASSERT(fReserved > 0);
21 fReserved *= 2;
mtkleinf98862c2014-11-24 14:45:47 -080022 fRecords.realloc(fReserved);
mtkleinf98862c2014-11-24 14:45:47 -080023}
24
25size_t SkRecord::bytesUsed() const {
mtklein1dda2192015-04-13 12:17:02 -070026 size_t bytes = fAlloc.approxBytesAllocated() + sizeof(SkRecord);
27 // If fReserved <= kInlineRecords, we've already accounted for fRecords with sizeof(SkRecord).
28 // When we go over that limit, they're allocated on the heap (and the inline space is wasted).
29 if (fReserved > kInlineRecords) {
30 bytes += fReserved * sizeof(Record);
31 }
32 return bytes;
mtkleinf98862c2014-11-24 14:45:47 -080033}
mtkleinc3c61942015-11-19 07:23:49 -080034
35void SkRecord::defrag() {
36 // Remove all the NoOps, preserving the order of other ops, e.g.
37 // Save, ClipRect, NoOp, DrawRect, NoOp, NoOp, Restore
38 // -> Save, ClipRect, DrawRect, Restore
39 Record* noops = std::remove_if(fRecords.get(), fRecords.get() + fCount,
40 [](Record op) { return op.type() == SkRecords::NoOp_Type; });
41 fCount = noops - fRecords.get();
42}