blob: 4986e1444ed30485f902ca7693506011bf623881 [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "bench/Benchmark.h"
Michael Ludwig9abd5682020-01-08 13:50:42 -05009#include "include/private/GrTypesPriv.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/utils/SkRandom.h"
11#include "src/gpu/GrMemoryPool.h"
Hal Canary8a001442018-09-19 11:31:27 -040012
Michael Ludwig9abd5682020-01-08 13:50:42 -050013#include <type_traits>
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000014
Michael Ludwig9abd5682020-01-08 13:50:42 -050015namespace {
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000016
Michael Ludwig9abd5682020-01-08 13:50:42 -050017// sizeof is a multiple of GrMemoryPool::kAlignment for 4, 8, or 16 byte alignment
18using Aligned = std::aligned_storage<32, GrMemoryPool::kAlignment>::type;
19static_assert(sizeof(Aligned) == 32);
20static_assert(sizeof(Aligned) % GrMemoryPool::kAlignment == 0);
21
22// sizeof is not a multiple of GrMemoryPool::kAlignment (will not be a multiple of max_align_t
23// if it's 4, 8, or 16, as desired).
24using Unaligned = std::aligned_storage<30, 2>::type;
25static_assert(sizeof(Unaligned) == 30);
26static_assert(sizeof(Unaligned) % GrMemoryPool::kAlignment != 0);
27
28// When max_align_t == 16, 8, or 4 the padded Unaligned will also be 32
29static_assert(GrAlignTo(sizeof(Unaligned), GrMemoryPool::kAlignment) == sizeof(Aligned));
30
31// All benchmarks create and delete the same number of objects. The key difference is the order
32// of operations, the size of the objects being allocated, and the size of the pool.
33typedef void (*RunBenchProc)(GrMemoryPool*, int);
34
35}
36
37// N objects are created, and then destroyed in reverse order (fully unwinding the cursor within
38// each block of the memory pool).
39template <typename T>
40static void run_stack(GrMemoryPool* pool, int loops) {
41 static const int kMaxObjects = 4 * (1 << 10);
42 T* objs[kMaxObjects];
43 for (int i = 0; i < loops; ++i) {
44 // Push N objects into the pool (or heap if pool is null)
45 for (int j = 0; j < kMaxObjects; ++j) {
46 objs[j] = pool ? (T*) pool->allocate(sizeof(T)) : new T;
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000047 }
Michael Ludwig9abd5682020-01-08 13:50:42 -050048 // Pop N objects off in LIFO order
49 for (int j = kMaxObjects - 1; j >= 0; --j) {
50 if (pool) {
51 pool->release(objs[j]);
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000052 } else {
Michael Ludwig9abd5682020-01-08 13:50:42 -050053 delete objs[j];
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000054 }
55 }
Michael Ludwig9abd5682020-01-08 13:50:42 -050056
57 // Everything has been cleaned up for the next loop
58 }
59}
60
61// N objects are created, and then destroyed in creation order (is not able to unwind the cursor
62// within each block, but can reclaim the block once everything is destroyed).
63template <typename T>
64static void run_queue(GrMemoryPool* pool, int loops) {
65 static const int kMaxObjects = 4 * (1 << 10);
66 T* objs[kMaxObjects];
67 for (int i = 0; i < loops; ++i) {
68 // Push N objects into the pool (or heap if pool is null)
69 for (int j = 0; j < kMaxObjects; ++j) {
70 objs[j] = pool ? (T*) pool->allocate(sizeof(T)) : new T;
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000071 }
Michael Ludwig9abd5682020-01-08 13:50:42 -050072 // Pop N objects off in FIFO order
73 for (int j = 0; j < kMaxObjects; ++j) {
74 if (pool) {
75 pool->release(objs[j]);
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000076 } else {
Michael Ludwig9abd5682020-01-08 13:50:42 -050077 delete objs[j];
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000078 }
79 }
Michael Ludwig9abd5682020-01-08 13:50:42 -050080
81 // Everything has been cleaned up for the next loop
Michael Ludwigdff4c1d2020-01-08 15:53:59 +000082 }
Michael Ludwig9abd5682020-01-08 13:50:42 -050083}
Michael Ludwigf811fc32020-01-07 16:29:14 -050084
Michael Ludwig9abd5682020-01-08 13:50:42 -050085// N objects are created and immediately destroyed, so space at the start of the pool should be
86// immediately reclaimed.
87template <typename T>
88static void run_pushpop(GrMemoryPool* pool, int loops) {
89 static const int kMaxObjects = 4 * (1 << 10);
90 T* objs[kMaxObjects];
91 for (int i = 0; i < loops; ++i) {
92 // Push N objects into the pool (or heap if pool is null)
93 for (int j = 0; j < kMaxObjects; ++j) {
94 if (pool) {
95 objs[j] = (T*) pool->allocate(sizeof(T));
96 pool->release(objs[j]);
97 } else {
98 objs[j] = new T;
99 delete objs[j];
100 }
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000101 }
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000102
Michael Ludwig9abd5682020-01-08 13:50:42 -0500103 // Everything has been cleaned up for the next loop
104 }
105}
106
107// N object creations and destructions are invoked in random order.
108template <typename T>
109static void run_random(GrMemoryPool* pool, int loops) {
110 static const int kMaxObjects = 4 * (1 << 10);
111 T* objs[kMaxObjects];
112 for (int i = 0; i < kMaxObjects; ++i) {
113 objs[i] = nullptr;
114 }
115
116 auto del = [&](int j) {
117 // Delete
118 if (pool) {
119 pool->release(objs[j]);
120 } else {
121 delete objs[j];
122 }
123 objs[j] = nullptr;
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000124 };
Michael Ludwig9abd5682020-01-08 13:50:42 -0500125
126 SkRandom r;
127 for (int i = 0; i < loops; ++i) {
128 // Execute 2*kMaxObjects operations, which should average to N create and N destroy,
129 // followed by a small number of remaining deletions.
130 for (int j = 0; j < 2 * kMaxObjects; ++j) {
131 int k = r.nextRangeU(0, kMaxObjects-1);
132 if (objs[k]) {
133 del(k);
134 } else {
135 // Create
136 objs[k] = pool ? (T*) pool->allocate(sizeof(T)) : new T;
137 }
138 }
139
140 // Ensure everything is null for the next loop
141 for (int j = 0; j < kMaxObjects; ++j) {
142 if (objs[j]) {
143 del(j);
144 }
145 }
146 }
147}
148
149///////////////////////////////////////////////////////////////////////////////////////////////////
150
151class GrMemoryPoolBench : public Benchmark {
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000152public:
Michael Ludwig9abd5682020-01-08 13:50:42 -0500153 GrMemoryPoolBench(const char* name, RunBenchProc proc, int poolSize)
154 : fPoolSize(poolSize)
155 , fProc(proc) {
156 fName.printf("grmemorypool_%s", name);
157 }
158
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000159 bool isSuitableFor(Backend backend) override {
160 return backend == kNonRendering_Backend;
161 }
162
163protected:
164 const char* onGetName() override {
Michael Ludwig9abd5682020-01-08 13:50:42 -0500165 return fName.c_str();
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000166 }
167
168 void onDraw(int loops, SkCanvas*) override {
Michael Ludwig9abd5682020-01-08 13:50:42 -0500169 std::unique_ptr<GrMemoryPool> pool;
170 if (fPoolSize > 0) {
171 pool = GrMemoryPool::Make(fPoolSize, fPoolSize);
172 } // else keep it null to test regular new/delete performance
173
174 fProc(pool.get(), loops);
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000175 }
176
Michael Ludwig9abd5682020-01-08 13:50:42 -0500177 SkString fName;
178 int fPoolSize;
179 RunBenchProc fProc;
180
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000181 typedef Benchmark INHERITED;
182};
183
Michael Ludwig9abd5682020-01-08 13:50:42 -0500184///////////////////////////////////////////////////////////////////////////////////////////////////
Michael Ludwigdff4c1d2020-01-08 15:53:59 +0000185
Michael Ludwig9abd5682020-01-08 13:50:42 -0500186static const int kLargePool = 10 * (1 << 10);
187static const int kSmallPool = GrMemoryPool::kMinAllocationSize;
188
189DEF_BENCH( return new GrMemoryPoolBench("stack_aligned_lg", run_stack<Aligned>, kLargePool); )
190DEF_BENCH( return new GrMemoryPoolBench("stack_aligned_sm", run_stack<Aligned>, kSmallPool); )
191DEF_BENCH( return new GrMemoryPoolBench("stack_aligned_ref", run_stack<Aligned>, 0); )
192DEF_BENCH( return new GrMemoryPoolBench("stack_unaligned_lg", run_stack<Unaligned>, kLargePool); )
193DEF_BENCH( return new GrMemoryPoolBench("stack_unaligned_sm", run_stack<Unaligned>, kSmallPool); )
194DEF_BENCH( return new GrMemoryPoolBench("stack_unaligned_ref", run_stack<Unaligned>, 0); )
195
196DEF_BENCH( return new GrMemoryPoolBench("queue_aligned_lg", run_queue<Aligned>, kLargePool); )
197DEF_BENCH( return new GrMemoryPoolBench("queue_aligned_sm", run_queue<Aligned>, kSmallPool); )
198DEF_BENCH( return new GrMemoryPoolBench("queue_aligned_ref", run_queue<Aligned>, 0); )
199DEF_BENCH( return new GrMemoryPoolBench("queue_unaligned_lg", run_queue<Unaligned>, kLargePool); )
200DEF_BENCH( return new GrMemoryPoolBench("queue_unaligned_sm", run_queue<Unaligned>, kSmallPool); )
201DEF_BENCH( return new GrMemoryPoolBench("queue_unaligned_ref", run_queue<Unaligned>, 0); )
202
203DEF_BENCH( return new GrMemoryPoolBench("pushpop_aligned_lg", run_pushpop<Aligned>, kLargePool); )
204DEF_BENCH( return new GrMemoryPoolBench("pushpop_aligned_sm", run_pushpop<Aligned>, kSmallPool); )
205// DEF_BENCH( return new GrMemoryPoolBench("pushpop_aligned_ref", run_pushpop<Aligned>, 0); )
206DEF_BENCH( return new GrMemoryPoolBench("pushpop_unaligned_lg", run_pushpop<Unaligned>, kLargePool); )
207DEF_BENCH( return new GrMemoryPoolBench("pushpop_unaligned_sm", run_pushpop<Unaligned>, kSmallPool); )
208// DEF_BENCH( return new GrMemoryPoolBench("pushpop_unaligned_ref", run_pushpop<Unaligned>, 0); )
209// pushpop_x_ref are not meaningful because the compiler completely optimizes away new T; delete *.
210
211DEF_BENCH( return new GrMemoryPoolBench("random_aligned_lg", run_random<Aligned>, kLargePool); )
212DEF_BENCH( return new GrMemoryPoolBench("random_aligned_sm", run_random<Aligned>, kSmallPool); )
213DEF_BENCH( return new GrMemoryPoolBench("random_aligned_ref", run_random<Aligned>, 0); )
214DEF_BENCH( return new GrMemoryPoolBench("random_unaligned_lg", run_random<Unaligned>, kLargePool); )
215DEF_BENCH( return new GrMemoryPoolBench("random_unaligned_sm", run_random<Unaligned>, kSmallPool); )
216DEF_BENCH( return new GrMemoryPoolBench("random_unaligned_ref", run_random<Unaligned>, 0); )