blob: 94f9cc992bf5c003bb5842bd5883c4ccd0b13951 [file] [log] [blame]
Nicolas Capens0bac2852016-05-07 06:09:58 -04001// Copyright 2016 The SwiftShader Authors. All Rights Reserved.
John Bauman66b8ab22014-05-06 15:57:45 -04002//
Nicolas Capens0bac2852016-05-07 06:09:58 -04003// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
John Bauman66b8ab22014-05-06 15:57:45 -04006//
Nicolas Capens0bac2852016-05-07 06:09:58 -04007// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
John Bauman66b8ab22014-05-06 15:57:45 -040014
Nicolas Capenscc863da2015-01-21 15:50:55 -050015#include "PoolAlloc.h"
John Bauman66b8ab22014-05-06 15:57:45 -040016
17#ifndef _MSC_VER
18#include <stdint.h>
19#endif
20#include <stdio.h>
Nicolas Capensbe5fe772018-07-20 10:41:02 -040021#include <stdlib.h>
John Bauman66b8ab22014-05-06 15:57:45 -040022
Nicolas Capenscc863da2015-01-21 15:50:55 -050023#include "InitializeGlobals.h"
24#include "osinclude.h"
John Bauman66b8ab22014-05-06 15:57:45 -040025
26OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
27
John Bauman66b8ab22014-05-06 15:57:45 -040028bool InitializePoolIndex()
29{
Nicolas Capens0bac2852016-05-07 06:09:58 -040030 assert(PoolIndex == OS_INVALID_TLS_INDEX);
John Bauman66b8ab22014-05-06 15:57:45 -040031
Nicolas Capens0bac2852016-05-07 06:09:58 -040032 PoolIndex = OS_AllocTLSIndex();
33 return PoolIndex != OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040034}
35
36void FreePoolIndex()
37{
Nicolas Capens0bac2852016-05-07 06:09:58 -040038 assert(PoolIndex != OS_INVALID_TLS_INDEX);
Nicolas Capens978ddc52014-11-11 12:42:08 -050039
Nicolas Capens0bac2852016-05-07 06:09:58 -040040 OS_FreeTLSIndex(PoolIndex);
41 PoolIndex = OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040042}
43
Nicolas Capens978ddc52014-11-11 12:42:08 -050044TPoolAllocator* GetGlobalPoolAllocator()
John Bauman66b8ab22014-05-06 15:57:45 -040045{
Nicolas Capens0bac2852016-05-07 06:09:58 -040046 assert(PoolIndex != OS_INVALID_TLS_INDEX);
47 return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
John Bauman66b8ab22014-05-06 15:57:45 -040048}
49
50void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
51{
Nicolas Capens0bac2852016-05-07 06:09:58 -040052 assert(PoolIndex != OS_INVALID_TLS_INDEX);
53 OS_SetTLSValue(PoolIndex, poolAllocator);
John Bauman66b8ab22014-05-06 15:57:45 -040054}
55
56//
57// Implement the functionality of the TPoolAllocator class, which
58// is documented in PoolAlloc.h.
59//
Nicolas Capens0bac2852016-05-07 06:09:58 -040060TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
Corentin Wallez3d7c7862017-10-31 18:05:38 -040061 alignment(allocationAlignment)
62#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
63 , pageSize(growthIncrement),
Nicolas Capens0bac2852016-05-07 06:09:58 -040064 freeList(0),
65 inUseList(0),
66 numCalls(0),
67 totalBytes(0)
Corentin Wallez3d7c7862017-10-31 18:05:38 -040068#endif
John Bauman66b8ab22014-05-06 15:57:45 -040069{
Nicolas Capens0bac2852016-05-07 06:09:58 -040070 //
Nicolas Capens0bac2852016-05-07 06:09:58 -040071 // Adjust alignment to be at least pointer aligned and
72 // power of 2.
73 //
74 size_t minAlign = sizeof(void*);
75 alignment &= ~(minAlign - 1);
76 if (alignment < minAlign)
77 alignment = minAlign;
78 size_t a = 1;
79 while (a < alignment)
80 a <<= 1;
81 alignment = a;
82 alignmentMask = a - 1;
John Bauman66b8ab22014-05-06 15:57:45 -040083
Corentin Wallez3d7c7862017-10-31 18:05:38 -040084#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
85 //
86 // Don't allow page sizes we know are smaller than all common
87 // OS page sizes.
88 //
89 if (pageSize < 4*1024)
90 pageSize = 4*1024;
91
92 //
93 // A large currentPageOffset indicates a new page needs to
94 // be obtained to allocate memory.
95 //
96 currentPageOffset = pageSize;
97
Nicolas Capens0bac2852016-05-07 06:09:58 -040098 //
99 // Align header skip
100 //
101 headerSkip = minAlign;
102 if (headerSkip < sizeof(tHeader)) {
103 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
104 }
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400105#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
106 mStack.push_back({});
107#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400108}
109
110TPoolAllocator::~TPoolAllocator()
111{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400112#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400113 while (inUseList) {
114 tHeader* next = inUseList->nextPage;
115 inUseList->~tHeader();
116 delete [] reinterpret_cast<char*>(inUseList);
117 inUseList = next;
118 }
John Bauman66b8ab22014-05-06 15:57:45 -0400119
Nicolas Capens0bac2852016-05-07 06:09:58 -0400120 // We should not check the guard blocks
121 // here, because we did it already when the block was
122 // placed into the free list.
123 //
124 while (freeList) {
125 tHeader* next = freeList->nextPage;
126 delete [] reinterpret_cast<char*>(freeList);
127 freeList = next;
128 }
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400129#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
130 for (auto& allocs : mStack) {
131 for (auto alloc : allocs) {
132 free(alloc);
133 }
134 }
135 mStack.clear();
136#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400137}
138
139// Support MSVC++ 6.0
140const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
141const unsigned char TAllocation::guardBlockEndVal = 0xfe;
142const unsigned char TAllocation::userDataFill = 0xcd;
143
144#ifdef GUARD_BLOCKS
Nicolas Capens0bac2852016-05-07 06:09:58 -0400145 const size_t TAllocation::guardBlockSize = 16;
John Bauman66b8ab22014-05-06 15:57:45 -0400146#else
Nicolas Capens0bac2852016-05-07 06:09:58 -0400147 const size_t TAllocation::guardBlockSize = 0;
John Bauman66b8ab22014-05-06 15:57:45 -0400148#endif
149
150//
151// Check a single guard block for damage
152//
153void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
154{
155#ifdef GUARD_BLOCKS
Nicolas Capens0bac2852016-05-07 06:09:58 -0400156 for (size_t x = 0; x < guardBlockSize; x++) {
157 if (blockMem[x] != val) {
158 char assertMsg[80];
John Bauman66b8ab22014-05-06 15:57:45 -0400159
Nicolas Capens0bac2852016-05-07 06:09:58 -0400160 // We don't print the assert message. It's here just to be helpful.
161 #if defined(_MSC_VER)
162 _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
163 locText, size, data());
164 #else
165 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
166 locText, size, data());
167 #endif
168 assert(0 && "PoolAlloc: Damage in guard block");
169 }
170 }
John Bauman66b8ab22014-05-06 15:57:45 -0400171#endif
172}
173
174
175void TPoolAllocator::push()
176{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400177#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400178 tAllocState state = { currentPageOffset, inUseList };
John Bauman66b8ab22014-05-06 15:57:45 -0400179
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400180 mStack.push_back(state);
Nicolas Capens0bac2852016-05-07 06:09:58 -0400181
182 //
183 // Indicate there is no current page to allocate from.
184 //
185 currentPageOffset = pageSize;
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400186#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
187 mStack.push_back({});
188#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400189}
190
191//
192// Do a mass-deallocation of all the individual allocations
193// that have occurred since the last push(), or since the
194// last pop(), or since the object's creation.
195//
196// The deallocated pages are saved for future allocations.
197//
198void TPoolAllocator::pop()
199{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400200 if (mStack.size() < 1)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400201 return;
John Bauman66b8ab22014-05-06 15:57:45 -0400202
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400203#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
204 tHeader* page = mStack.back().page;
205 currentPageOffset = mStack.back().offset;
John Bauman66b8ab22014-05-06 15:57:45 -0400206
Nicolas Capens0bac2852016-05-07 06:09:58 -0400207 while (inUseList != page) {
208 // invoke destructor to free allocation list
209 inUseList->~tHeader();
John Bauman66b8ab22014-05-06 15:57:45 -0400210
Nicolas Capens0bac2852016-05-07 06:09:58 -0400211 tHeader* nextInUse = inUseList->nextPage;
212 if (inUseList->pageCount > 1)
213 delete [] reinterpret_cast<char*>(inUseList);
214 else {
215 inUseList->nextPage = freeList;
216 freeList = inUseList;
217 }
218 inUseList = nextInUse;
219 }
220
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400221 mStack.pop_back();
222#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
223 for (auto alloc : mStack.back()) {
224 free(alloc);
225 }
226 mStack.pop_back();
227#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400228}
229
230//
231// Do a mass-deallocation of all the individual allocations
232// that have occurred.
233//
234void TPoolAllocator::popAll()
235{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400236 while (mStack.size() > 0)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400237 pop();
John Bauman66b8ab22014-05-06 15:57:45 -0400238}
239
240void* TPoolAllocator::allocate(size_t numBytes)
241{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400242#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400243 //
244 // Just keep some interesting statistics.
245 //
246 ++numCalls;
247 totalBytes += numBytes;
John Bauman66b8ab22014-05-06 15:57:45 -0400248
Nicolas Capens0bac2852016-05-07 06:09:58 -0400249 // If we are using guard blocks, all allocations are bracketed by
250 // them: [guardblock][allocation][guardblock]. numBytes is how
251 // much memory the caller asked for. allocationSize is the total
252 // size including guard blocks. In release build,
253 // guardBlockSize=0 and this all gets optimized away.
254 size_t allocationSize = TAllocation::allocationSize(numBytes);
255 // Detect integer overflow.
256 if (allocationSize < numBytes)
257 return 0;
Nicolas Capensff7f1002014-11-11 11:31:47 -0500258
Nicolas Capens0bac2852016-05-07 06:09:58 -0400259 //
260 // Do the allocation, most likely case first, for efficiency.
261 // This step could be moved to be inline sometime.
262 //
263 if (allocationSize <= pageSize - currentPageOffset) {
264 //
265 // Safe to allocate from currentPageOffset.
266 //
267 unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
268 currentPageOffset += allocationSize;
269 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
John Bauman66b8ab22014-05-06 15:57:45 -0400270
Nicolas Capens0bac2852016-05-07 06:09:58 -0400271 return initializeAllocation(inUseList, memory, numBytes);
272 }
John Bauman66b8ab22014-05-06 15:57:45 -0400273
Nicolas Capens0bac2852016-05-07 06:09:58 -0400274 if (allocationSize > pageSize - headerSkip) {
275 //
276 // Do a multi-page allocation. Don't mix these with the others.
277 // The OS is efficient and allocating and free-ing multiple pages.
278 //
279 size_t numBytesToAlloc = allocationSize + headerSkip;
280 // Detect integer overflow.
281 if (numBytesToAlloc < allocationSize)
282 return 0;
Nicolas Capensff7f1002014-11-11 11:31:47 -0500283
Nicolas Capens0bac2852016-05-07 06:09:58 -0400284 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
285 if (memory == 0)
286 return 0;
John Bauman66b8ab22014-05-06 15:57:45 -0400287
Nicolas Capens0bac2852016-05-07 06:09:58 -0400288 // Use placement-new to initialize header
289 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
290 inUseList = memory;
John Bauman66b8ab22014-05-06 15:57:45 -0400291
Nicolas Capens0bac2852016-05-07 06:09:58 -0400292 currentPageOffset = pageSize; // make next allocation come from a new page
John Bauman66b8ab22014-05-06 15:57:45 -0400293
Nicolas Capens0bac2852016-05-07 06:09:58 -0400294 // No guard blocks for multi-page allocations (yet)
295 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
296 }
John Bauman66b8ab22014-05-06 15:57:45 -0400297
Nicolas Capens0bac2852016-05-07 06:09:58 -0400298 //
299 // Need a simple page to allocate from.
300 //
301 tHeader* memory;
302 if (freeList) {
303 memory = freeList;
304 freeList = freeList->nextPage;
305 } else {
306 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
307 if (memory == 0)
308 return 0;
309 }
John Bauman66b8ab22014-05-06 15:57:45 -0400310
Nicolas Capens0bac2852016-05-07 06:09:58 -0400311 // Use placement-new to initialize header
312 new(memory) tHeader(inUseList, 1);
313 inUseList = memory;
John Bauman66b8ab22014-05-06 15:57:45 -0400314
Nicolas Capens0bac2852016-05-07 06:09:58 -0400315 unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
316 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
317
318 return initializeAllocation(inUseList, ret, numBytes);
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400319#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
320 void *alloc = malloc(numBytes + alignmentMask);
321 mStack.back().push_back(alloc);
322
323 intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
324 intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
325 return reinterpret_cast<void *>(intAlloc);
326#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400327}
328
329
330//
331// Check all allocations in a list for damage by calling check on each.
332//
333void TAllocation::checkAllocList() const
334{
Nicolas Capens0bac2852016-05-07 06:09:58 -0400335 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
336 alloc->check();
John Bauman66b8ab22014-05-06 15:57:45 -0400337}