blob: c4a9beb5f23d14248e188090fadccf2a23409ae7 [file] [log] [blame]
jvanverthd6f80342016-06-16 04:42:30 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8// This is a GPU-backend specific test. It relies on static intializers to work
9
10#include "SkTypes.h"
11
12#if SK_SUPPORT_GPU && SK_ALLOW_STATIC_GLOBAL_INITIALIZERS && defined(SK_VULKAN)
13
14#include "GrContextFactory.h"
15#include "GrTest.h"
16#include "Test.h"
17#include "vk/GrVkGpu.h"
18
19using sk_gpu_test::GrContextFactory;
20
21void subheap_test(skiatest::Reporter* reporter, GrContext* context) {
22 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
23
24 // heap index doesn't matter, we're just testing the suballocation algorithm so we'll use 0
25 GrVkSubHeap heap(gpu, 0, 64 * 1024, 32);
26 GrVkAlloc alloc0, alloc1, alloc2, alloc3;
27 // test full allocation and free
28 REPORTER_ASSERT(reporter, heap.alloc(64 * 1024, &alloc0));
29 REPORTER_ASSERT(reporter, alloc0.fOffset == 0);
30 REPORTER_ASSERT(reporter, alloc0.fSize == 64 * 1024);
31 REPORTER_ASSERT(reporter, heap.freeSize() == 0 && heap.largestBlockSize() == 0);
32 heap.free(alloc0);
33 REPORTER_ASSERT(reporter, heap.freeSize() == 64*1024 && heap.largestBlockSize() == 64 * 1024);
34
35 // now let's suballoc some memory
36 REPORTER_ASSERT(reporter, heap.alloc(16 * 1024, &alloc0));
37 REPORTER_ASSERT(reporter, heap.alloc(23 * 1024, &alloc1));
38 REPORTER_ASSERT(reporter, heap.alloc(18 * 1024, &alloc2));
39 REPORTER_ASSERT(reporter, heap.freeSize() == 7 * 1024 && heap.largestBlockSize() == 7 * 1024);
40 // free lone block
41 heap.free(alloc1);
42 REPORTER_ASSERT(reporter, heap.freeSize() == 30 * 1024 && heap.largestBlockSize() == 23 * 1024);
43 // allocate into smallest free block
44 REPORTER_ASSERT(reporter, heap.alloc(6 * 1024, &alloc3));
45 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockSize() == 23 * 1024);
46 // allocate into exact size free block
47 REPORTER_ASSERT(reporter, heap.alloc(23 * 1024, &alloc1));
48 REPORTER_ASSERT(reporter, heap.freeSize() == 1 * 1024 && heap.largestBlockSize() == 1 * 1024);
49 // free lone block
50 heap.free(alloc2);
51 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockSize() == 18 * 1024);
52 // free and merge with preceding block and following
53 heap.free(alloc3);
54 REPORTER_ASSERT(reporter, heap.freeSize() == 25 * 1024 && heap.largestBlockSize() == 25 * 1024);
55 // free and merge with following block
56 heap.free(alloc1);
57 REPORTER_ASSERT(reporter, heap.freeSize() == 48 * 1024 && heap.largestBlockSize() == 48 * 1024);
58 // free starting block and merge with following
59 heap.free(alloc0);
60 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockSize() == 64 * 1024);
61
62 // realloc
63 REPORTER_ASSERT(reporter, heap.alloc(4 * 1024, &alloc0));
64 REPORTER_ASSERT(reporter, heap.alloc(35 * 1024, &alloc1));
65 REPORTER_ASSERT(reporter, heap.alloc(10 * 1024, &alloc2));
66 REPORTER_ASSERT(reporter, heap.freeSize() == 15 * 1024 && heap.largestBlockSize() == 15 * 1024);
67 // free starting block and merge with following
68 heap.free(alloc0);
69 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockSize() == 15 * 1024);
70 // free block and merge with preceding
71 heap.free(alloc1);
72 REPORTER_ASSERT(reporter, heap.freeSize() == 54 * 1024 && heap.largestBlockSize() == 39 * 1024);
73 // free block and merge with preceding and following
74 heap.free(alloc2);
75 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockSize() == 64 * 1024);
76
77 // fragment
78 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024, &alloc0));
79 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024, &alloc1));
80 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024, &alloc2));
81 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024, &alloc3));
82 REPORTER_ASSERT(reporter, heap.freeSize() == 22 * 1024 && heap.largestBlockSize() == 22 * 1024);
83 heap.free(alloc0);
84 REPORTER_ASSERT(reporter, heap.freeSize() == 41 * 1024 && heap.largestBlockSize() == 22 * 1024);
85 heap.free(alloc2);
86 REPORTER_ASSERT(reporter, heap.freeSize() == 56 * 1024 && heap.largestBlockSize() == 22 * 1024);
87 REPORTER_ASSERT(reporter, !heap.alloc(40 * 1024, &alloc0));
88 heap.free(alloc3);
89 REPORTER_ASSERT(reporter, heap.freeSize() == 59 * 1024 && heap.largestBlockSize() == 40 * 1024);
90 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, &alloc0));
91 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockSize() == 19 * 1024);
92 heap.free(alloc1);
93 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockSize() == 24 * 1024);
94 heap.free(alloc0);
95 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockSize() == 64 * 1024);
96
97 // unaligned sizes
98 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 31, &alloc0));
99 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 5, &alloc1));
100 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 19, &alloc2));
101 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 3, &alloc3));
102 REPORTER_ASSERT(reporter, heap.freeSize() == 22 * 1024 && heap.largestBlockSize() == 22 * 1024);
103 heap.free(alloc0);
104 REPORTER_ASSERT(reporter, heap.freeSize() == 41 * 1024 && heap.largestBlockSize() == 22 * 1024);
105 heap.free(alloc2);
106 REPORTER_ASSERT(reporter, heap.freeSize() == 56 * 1024 && heap.largestBlockSize() == 22 * 1024);
107 REPORTER_ASSERT(reporter, !heap.alloc(40 * 1024, &alloc0));
108 heap.free(alloc3);
109 REPORTER_ASSERT(reporter, heap.freeSize() == 59 * 1024 && heap.largestBlockSize() == 40 * 1024);
110 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, &alloc0));
111 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockSize() == 19 * 1024);
112 heap.free(alloc1);
113 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockSize() == 24 * 1024);
114 heap.free(alloc0);
115 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockSize() == 64 * 1024);
116}
117
118void suballoc_test(skiatest::Reporter* reporter, GrContext* context) {
119 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
120
121 // heap index doesn't matter, we're just testing the allocation algorithm so we'll use 0
122 GrVkHeap heap(gpu, GrVkHeap::kSubAlloc_Strategy, 64 * 1024);
123 GrVkAlloc alloc0, alloc1, alloc2, alloc3;
124 const VkDeviceSize kAlignment = 16;
125 const uint32_t kHeapIndex = 0;
126
127 REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0);
128
129 // fragment allocations so we need to grow heap
130 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 3, kAlignment, kHeapIndex, &alloc0));
131 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 9, kAlignment, kHeapIndex, &alloc1));
132 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 15, kAlignment, kHeapIndex, &alloc2));
133 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 6, kAlignment, kHeapIndex, &alloc3));
134 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 42 * 1024);
135 heap.free(alloc0);
136 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 23 * 1024);
137 heap.free(alloc2);
138 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 8 * 1024);
139 // we expect the heap to grow here
140 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc0));
141 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 48 * 1024);
142 heap.free(alloc3);
143 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 45 * 1024);
144 // heap should not grow here (first subheap has exactly enough room)
145 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc3));
146 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 85 * 1024);
147 // heap should not grow here (second subheap has room)
148 REPORTER_ASSERT(reporter, heap.alloc(22 * 1024, kAlignment, kHeapIndex, &alloc2));
149 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 107 * 1024);
150 heap.free(alloc1);
151 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 102 * 1024);
152 heap.free(alloc0);
153 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 62 * 1024);
154 heap.free(alloc2);
155 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 40 * 1024);
156 heap.free(alloc3);
157 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 0 * 1024);
jvanverth6dc3af42016-06-16 14:05:09 -0700158 // heap should not grow here (allocating more than subheap size)
159 REPORTER_ASSERT(reporter, heap.alloc(128 * 1024, kAlignment, kHeapIndex, &alloc0));
160 REPORTER_ASSERT(reporter, 0 == alloc0.fSize);
161 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 0 * 1024);
162 heap.free(alloc0);
jvanverthd6f80342016-06-16 04:42:30 -0700163}
164
165void singlealloc_test(skiatest::Reporter* reporter, GrContext* context) {
166 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
167
168 // heap index doesn't matter, we're just testing the allocation algorithm so we'll use 0
169 GrVkHeap heap(gpu, GrVkHeap::kSingleAlloc_Strategy, 64 * 1024);
170 GrVkAlloc alloc0, alloc1, alloc2, alloc3;
171 const VkDeviceSize kAlignment = 64;
172 const uint32_t kHeapIndex = 0;
173
174 REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0);
175
176 // make a few allocations
177 REPORTER_ASSERT(reporter, heap.alloc(49 * 1024 - 3, kAlignment, kHeapIndex, &alloc0));
178 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 37, kAlignment, kHeapIndex, &alloc1));
179 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 11, kAlignment, kHeapIndex, &alloc2));
180 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 29, kAlignment, kHeapIndex, &alloc3));
181 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 72 * 1024);
182 heap.free(alloc0);
183 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 23 * 1024);
184 heap.free(alloc2);
185 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 8 * 1024);
186 // heap should not grow here (first subheap has room)
187 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc0));
188 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 48 * 1024);
189 heap.free(alloc3);
190 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 45 * 1024);
191 // check for exact fit -- heap should not grow here (third subheap has room)
192 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 63, kAlignment, kHeapIndex, &alloc2));
193 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 60 * 1024);
194 heap.free(alloc2);
195 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 45 * 1024);
196 // heap should grow here (no subheap has room)
197 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc3));
198 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 85 * 1024);
199 heap.free(alloc1);
200 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 80 * 1024);
201 heap.free(alloc0);
202 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 40 * 1024);
203 heap.free(alloc3);
204 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 0 * 1024);
205}
206
207DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkHeapTests, reporter, ctxInfo) {
208 subheap_test(reporter, ctxInfo.grContext());
209 suballoc_test(reporter, ctxInfo.grContext());
210 singlealloc_test(reporter, ctxInfo.grContext());
211}
212
213#endif