blob: 645db78d775a3aa35a8c2bdc7c9cbc604e51957a [file] [log] [blame]
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +00001//
2// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6
7#ifndef _POOLALLOC_INCLUDED_
8#define _POOLALLOC_INCLUDED_
9
10#ifdef _DEBUG
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000011#define GUARD_BLOCKS // define to enable guard block sanity checking
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000012#endif
13
14//
15// This header defines an allocator that can be used to efficiently
16// allocate a large number of small requests for heap memory, with the
17// intention that they are not individually deallocated, but rather
18// collectively deallocated at one time.
19//
20// This simultaneously
21//
22// * Makes each individual allocation much more efficient; the
23// typical allocation is trivial.
24// * Completely avoids the cost of doing individual deallocation.
25// * Saves the trouble of tracking down and plugging a large class of leaks.
26//
27// Individual classes can use this allocator by supplying their own
28// new and delete methods.
29//
30// STL containers can use this allocator by using the pool_allocator
31// class as the allocator (second) template argument.
32//
33
34#include <stddef.h>
alokp@chromium.orgcff1aff2010-05-14 19:24:22 +000035#include <string.h>
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000036#include <vector>
37
38// If we are using guard blocks, we must track each indivual
39// allocation. If we aren't using guard blocks, these
40// never get instantiated, so won't have any impact.
41//
42
43class TAllocation {
44public:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000045 TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
46 size(size), mem(mem), prevAlloc(prev) {
47 // Allocations are bracketed:
48 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
49 // This would be cleaner with if (guardBlockSize)..., but that
50 // makes the compiler print warnings about 0 length memsets,
51 // even with the if() protecting them.
52#ifdef GUARD_BLOCKS
53 memset(preGuard(), guardBlockBeginVal, guardBlockSize);
54 memset(data(), userDataFill, size);
55 memset(postGuard(), guardBlockEndVal, guardBlockSize);
56#endif
57 }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000058
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000059 void check() const {
60 checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
61 checkGuardBlock(postGuard(), guardBlockEndVal, "after");
62 }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000063
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000064 void checkAllocList() const;
65
66 // Return total size needed to accomodate user buffer of 'size',
67 // plus our tracking data.
68 inline static size_t allocationSize(size_t size) {
69 return size + 2 * guardBlockSize + headerSize();
70 }
71
72 // Offset from surrounding buffer to get to user data buffer.
73 inline static unsigned char* offsetAllocation(unsigned char* m) {
74 return m + guardBlockSize + headerSize();
75 }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000076
77private:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000078 void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000079
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000080 // Find offsets to pre and post guard blocks, and user data buffer
81 unsigned char* preGuard() const { return mem + headerSize(); }
82 unsigned char* data() const { return preGuard() + guardBlockSize; }
83 unsigned char* postGuard() const { return data() + size; }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000084
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000085 size_t size; // size of the user data area
86 unsigned char* mem; // beginning of our allocation (pts to header)
87 TAllocation* prevAlloc; // prior allocation in the chain
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000088
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000089 // Support MSVC++ 6.0
90 const static unsigned char guardBlockBeginVal;
91 const static unsigned char guardBlockEndVal;
92 const static unsigned char userDataFill;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +000093
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +000094 const static size_t guardBlockSize;
95#ifdef GUARD_BLOCKS
96 inline static size_t headerSize() { return sizeof(TAllocation); }
97#else
98 inline static size_t headerSize() { return 0; }
99#endif
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000100};
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000101
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000102//
103// There are several stacks. One is to track the pushing and popping
104// of the user, and not yet implemented. The others are simply a
105// repositories of free pages or used pages.
106//
107// Page stacks are linked together with a simple header at the beginning
108// of each allocation obtained from the underlying OS. Multi-page allocations
109// are returned to the OS. Individual page allocations are kept for future
110// re-use.
111//
112// The "page size" used is not, nor must it match, the underlying OS
113// page size. But, having it be about that size or equal to a set of
114// pages is likely most optimal.
115//
116class TPoolAllocator {
117public:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000118 TPoolAllocator(bool global = false, int growthIncrement = 8*1024, int allocationAlignment = 16);
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000119
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000120 //
121 // Don't call the destructor just to free up the memory, call pop()
122 //
123 ~TPoolAllocator();
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000124
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000125 //
126 // Call push() to establish a new place to pop memory too. Does not
127 // have to be called to get things started.
128 //
129 void push();
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000130
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000131 //
132 // Call pop() to free all memory allocated since the last call to push(),
133 // or if no last call to push, frees all memory since first allocation.
134 //
135 void pop();
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000136
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000137 //
138 // Call popAll() to free all memory allocated.
139 //
140 void popAll();
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000141
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000142 //
143 // Call allocate() to actually acquire memory. Returns 0 if no memory
144 // available, otherwise a properly aligned pointer to 'numBytes' of memory.
145 //
146 void* allocate(size_t numBytes);
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000147
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000148 //
149 // There is no deallocate. The point of this class is that
150 // deallocation can be skipped by the user of it, as the model
151 // of use is to simultaneously deallocate everything at once
152 // by calling pop(), and to not have to solve memory leak problems.
153 //
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000154
155protected:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000156 friend struct tHeader;
157
158 struct tHeader {
159 tHeader(tHeader* nextPage, size_t pageCount) :
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000160 nextPage(nextPage),
alokp@chromium.orgb2dfd8e2010-08-09 22:28:19 +0000161 pageCount(pageCount)
162#ifdef GUARD_BLOCKS
163 , lastAllocation(0)
164#endif
165 { }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000166
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000167 ~tHeader() {
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000168#ifdef GUARD_BLOCKS
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000169 if (lastAllocation)
170 lastAllocation->checkAllocList();
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000171#endif
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000172 }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000173
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000174 tHeader* nextPage;
175 size_t pageCount;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000176#ifdef GUARD_BLOCKS
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000177 TAllocation* lastAllocation;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000178#endif
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000179 };
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000180
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000181 struct tAllocState {
182 size_t offset;
183 tHeader* page;
184 };
185 typedef std::vector<tAllocState> tAllocStack;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000186
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000187 // Track allocations if and only if we're using guard blocks
188 void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
189#ifdef GUARD_BLOCKS
190 new(memory) TAllocation(numBytes, memory, block->lastAllocation);
191 block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
192#endif
193 // This is optimized entirely away if GUARD_BLOCKS is not defined.
194 return TAllocation::offsetAllocation(memory);
195 }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000196
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000197 bool global; // should be true if this object is globally scoped
198 size_t pageSize; // granularity of allocation from the OS
199 size_t alignment; // all returned allocations will be aligned at
200 // this granularity, which will be a power of 2
201 size_t alignmentMask;
202 size_t headerSkip; // amount of memory to skip to make room for the
203 // header (basically, size of header, rounded
204 // up to make it aligned
205 size_t currentPageOffset; // next offset in top of inUseList to allocate from
206 tHeader* freeList; // list of popped memory
207 tHeader* inUseList; // list of all memory currently being used
208 tAllocStack stack; // stack of where to allocate from, to partition pool
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000209
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000210 int numCalls; // just an interesting statistic
211 size_t totalBytes; // just an interesting statistic
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000212private:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000213 TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator
214 TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000215};
216
217
218//
219// There could potentially be many pools with pops happening at
220// different times. But a simple use is to have a global pop
221// with everyone using the same global allocator.
222//
223typedef TPoolAllocator* PoolAllocatorPointer;
224extern TPoolAllocator& GetGlobalPoolAllocator();
225#define GlobalPoolAllocator GetGlobalPoolAllocator()
226
227
228struct TThreadGlobalPools
229{
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000230 TPoolAllocator* globalPoolAllocator;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000231};
232
233void SetGlobalPoolAllocatorPtr(TPoolAllocator* poolAllocator);
234
235//
236// This STL compatible allocator is intended to be used as the allocator
237// parameter to templatized STL containers, like vector and map.
238//
239// It will use the pools for allocation, and not
240// do any deallocation, but will still do destruction.
241//
242template<class T>
243class pool_allocator {
244public:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000245 typedef size_t size_type;
246 typedef ptrdiff_t difference_type;
247 typedef T* pointer;
248 typedef const T* const_pointer;
249 typedef T& reference;
250 typedef const T& const_reference;
251 typedef T value_type;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000252
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000253 template<class Other>
254 struct rebind {
255 typedef pool_allocator<Other> other;
256 };
257 pointer address(reference x) const { return &x; }
258 const_pointer address(const_reference x) const { return &x; }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000259
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000260 pool_allocator() : allocator(GlobalPoolAllocator) { }
261 pool_allocator(TPoolAllocator& a) : allocator(a) { }
262 pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000263
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000264 template<class Other>
265 pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000266
alokp@chromium.orgb416e702010-08-09 22:32:56 +0000267#if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
268 // libCStd on some platforms have a different allocate/deallocate interface.
269 // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
270 // allocated, not the number of elements.
271 void* allocate(size_type n) {
272 return getAllocator().allocate(n);
273 }
274 void* allocate(size_type n, const void*) {
275 return getAllocator().allocate(n);
276 }
277 void deallocate(void*, size_type) {}
278#else
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000279 pointer allocate(size_type n) {
280 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
281 }
282 pointer allocate(size_type n, const void*) {
283 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
284 }
alokp@chromium.orgb416e702010-08-09 22:32:56 +0000285 void deallocate(pointer, size_type) {}
286#endif // _RWSTD_ALLOCATOR
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000287
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000288 void construct(pointer p, const T& val) { new ((void *)p) T(val); }
289 void destroy(pointer p) { p->T::~T(); }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000290
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000291 bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
292 bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000293
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000294 size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
295 size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000296
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000297 void setAllocator(TPoolAllocator* a) { allocator = *a; }
298 TPoolAllocator& getAllocator() const { return allocator; }
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000299
300protected:
alokp@chromium.orgb1e8c6f2010-05-12 18:35:53 +0000301 TPoolAllocator& allocator;
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +0000302};
303
304#endif // _POOLALLOC_INCLUDED_