blob: bd417b7c1dcc163a3e65f711c6eee6831f38e428 [file] [log] [blame]
daniel@transgaming.com4f39fd92010-03-08 20:26:45 +00001//
2// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6
7#ifndef _POOLALLOC_INCLUDED_
8#define _POOLALLOC_INCLUDED_
9
10#ifdef _DEBUG
11# define GUARD_BLOCKS // define to enable guard block sanity checking
12#endif
13
14//
15// This header defines an allocator that can be used to efficiently
16// allocate a large number of small requests for heap memory, with the
17// intention that they are not individually deallocated, but rather
18// collectively deallocated at one time.
19//
20// This simultaneously
21//
22// * Makes each individual allocation much more efficient; the
23// typical allocation is trivial.
24// * Completely avoids the cost of doing individual deallocation.
25// * Saves the trouble of tracking down and plugging a large class of leaks.
26//
27// Individual classes can use this allocator by supplying their own
28// new and delete methods.
29//
30// STL containers can use this allocator by using the pool_allocator
31// class as the allocator (second) template argument.
32//
33
34#include <stddef.h>
35#include <vector>
36
37// If we are using guard blocks, we must track each indivual
38// allocation. If we aren't using guard blocks, these
39// never get instantiated, so won't have any impact.
40//
41
42class TAllocation {
43public:
44 TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
45 size(size), mem(mem), prevAlloc(prev) {
46 // Allocations are bracketed:
47 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
48 // This would be cleaner with if (guardBlockSize)..., but that
49 // makes the compiler print warnings about 0 length memsets,
50 // even with the if() protecting them.
51# ifdef GUARD_BLOCKS
52 memset(preGuard(), guardBlockBeginVal, guardBlockSize);
53 memset(data(), userDataFill, size);
54 memset(postGuard(), guardBlockEndVal, guardBlockSize);
55# endif
56 }
57
58 void check() const {
59 checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
60 checkGuardBlock(postGuard(), guardBlockEndVal, "after");
61 }
62
63 void checkAllocList() const;
64
65 // Return total size needed to accomodate user buffer of 'size',
66 // plus our tracking data.
67 inline static size_t allocationSize(size_t size) {
68 return size + 2 * guardBlockSize + headerSize();
69 }
70
71 // Offset from surrounding buffer to get to user data buffer.
72 inline static unsigned char* offsetAllocation(unsigned char* m) {
73 return m + guardBlockSize + headerSize();
74 }
75
76private:
77 void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
78
79 // Find offsets to pre and post guard blocks, and user data buffer
80 unsigned char* preGuard() const { return mem + headerSize(); }
81 unsigned char* data() const { return preGuard() + guardBlockSize; }
82 unsigned char* postGuard() const { return data() + size; }
83
84 size_t size; // size of the user data area
85 unsigned char* mem; // beginning of our allocation (pts to header)
86 TAllocation* prevAlloc; // prior allocation in the chain
87
88 // Support MSVC++ 6.0
89 const static unsigned char guardBlockBeginVal;
90 const static unsigned char guardBlockEndVal;
91 const static unsigned char userDataFill;
92
93 const static size_t guardBlockSize;
94# ifdef GUARD_BLOCKS
95 inline static size_t headerSize() { return sizeof(TAllocation); }
96# else
97 inline static size_t headerSize() { return 0; }
98# endif
99};
100
101//
102// There are several stacks. One is to track the pushing and popping
103// of the user, and not yet implemented. The others are simply a
104// repositories of free pages or used pages.
105//
106// Page stacks are linked together with a simple header at the beginning
107// of each allocation obtained from the underlying OS. Multi-page allocations
108// are returned to the OS. Individual page allocations are kept for future
109// re-use.
110//
111// The "page size" used is not, nor must it match, the underlying OS
112// page size. But, having it be about that size or equal to a set of
113// pages is likely most optimal.
114//
115class TPoolAllocator {
116public:
117 TPoolAllocator(bool global = false, int growthIncrement = 8*1024, int allocationAlignment = 16);
118
119 //
120 // Don't call the destructor just to free up the memory, call pop()
121 //
122 ~TPoolAllocator();
123
124 //
125 // Call push() to establish a new place to pop memory too. Does not
126 // have to be called to get things started.
127 //
128 void push();
129
130 //
131 // Call pop() to free all memory allocated since the last call to push(),
132 // or if no last call to push, frees all memory since first allocation.
133 //
134 void pop();
135
136 //
137 // Call popAll() to free all memory allocated.
138 //
139 void popAll();
140
141 //
142 // Call allocate() to actually acquire memory. Returns 0 if no memory
143 // available, otherwise a properly aligned pointer to 'numBytes' of memory.
144 //
145 void* allocate(size_t numBytes);
146
147 //
148 // There is no deallocate. The point of this class is that
149 // deallocation can be skipped by the user of it, as the model
150 // of use is to simultaneously deallocate everything at once
151 // by calling pop(), and to not have to solve memory leak problems.
152 //
153
154protected:
155 friend struct tHeader;
156
157 struct tHeader {
158 tHeader(tHeader* nextPage, size_t pageCount) :
159#ifdef GUARD_BLOCKS
160 lastAllocation(0),
161#endif
162 nextPage(nextPage), pageCount(pageCount) { }
163
164 ~tHeader() {
165#ifdef GUARD_BLOCKS
166 if (lastAllocation)
167 lastAllocation->checkAllocList();
168#endif
169 }
170
171 tHeader* nextPage;
172 size_t pageCount;
173#ifdef GUARD_BLOCKS
174 TAllocation* lastAllocation;
175#endif
176 };
177
178 struct tAllocState {
179 size_t offset;
180 tHeader* page;
181 };
182 typedef std::vector<tAllocState> tAllocStack;
183
184 // Track allocations if and only if we're using guard blocks
185 void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
186# ifdef GUARD_BLOCKS
187 new(memory) TAllocation(numBytes, memory, block->lastAllocation);
188 block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
189# endif
190
191 // This is optimized entirely away if GUARD_BLOCKS is not defined.
192 return TAllocation::offsetAllocation(memory);
193 }
194
195 bool global; // should be true if this object is globally scoped
196 size_t pageSize; // granularity of allocation from the OS
197 size_t alignment; // all returned allocations will be aligned at
198 // this granularity, which will be a power of 2
199 size_t alignmentMask;
200 size_t headerSkip; // amount of memory to skip to make room for the
201 // header (basically, size of header, rounded
202 // up to make it aligned
203 size_t currentPageOffset; // next offset in top of inUseList to allocate from
204 tHeader* freeList; // list of popped memory
205 tHeader* inUseList; // list of all memory currently being used
206 tAllocStack stack; // stack of where to allocate from, to partition pool
207
208 int numCalls; // just an interesting statistic
209 size_t totalBytes; // just an interesting statistic
210private:
211 TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator
212 TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor
213};
214
215
216//
217// There could potentially be many pools with pops happening at
218// different times. But a simple use is to have a global pop
219// with everyone using the same global allocator.
220//
221typedef TPoolAllocator* PoolAllocatorPointer;
222extern TPoolAllocator& GetGlobalPoolAllocator();
223#define GlobalPoolAllocator GetGlobalPoolAllocator()
224
225
226struct TThreadGlobalPools
227{
228 TPoolAllocator* globalPoolAllocator;
229};
230
231void SetGlobalPoolAllocatorPtr(TPoolAllocator* poolAllocator);
232
233//
234// This STL compatible allocator is intended to be used as the allocator
235// parameter to templatized STL containers, like vector and map.
236//
237// It will use the pools for allocation, and not
238// do any deallocation, but will still do destruction.
239//
240template<class T>
241class pool_allocator {
242public:
243 typedef size_t size_type;
244 typedef ptrdiff_t difference_type;
245 typedef T *pointer;
246 typedef const T *const_pointer;
247 typedef T& reference;
248 typedef const T& const_reference;
249 typedef T value_type;
250 template<class Other>
251 struct rebind {
252 typedef pool_allocator<Other> other;
253 };
254 pointer address(reference x) const { return &x; }
255 const_pointer address(const_reference x) const { return &x; }
256
257#ifdef USING_SGI_STL
258 pool_allocator() { }
259#else
260 pool_allocator() : allocator(GlobalPoolAllocator) { }
261 pool_allocator(TPoolAllocator& a) : allocator(a) { }
262 pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
263#endif
264
265#if defined(_MSC_VER) && _MSC_VER >= 1300
266 template<class Other>
267#ifdef USING_SGI_STL
268 pool_allocator(const pool_allocator<Other>& p) /*: allocator(p.getAllocator())*/ { }
269#else
270 pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
271#endif
272#endif
273
274#ifndef _WIN32
275 template<class Other>
276 pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
277#endif
278
279#ifdef USING_SGI_STL
280 static pointer allocate(size_type n) {
281 return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
282 pointer allocate(size_type n, const void*) {
283 return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
284
285 static void deallocate(void*, size_type) { }
286 static void deallocate(pointer, size_type) { }
287#else
288 pointer allocate(size_type n) {
289 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
290 pointer allocate(size_type n, const void*) {
291 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
292
293 void deallocate(void*, size_type) { }
294 void deallocate(pointer, size_type) { }
295#endif
296
297 pointer _Charalloc(size_t n) {
298 return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
299
300 void construct(pointer p, const T& val) { new ((void *)p) T(val); }
301 void destroy(pointer p) { p->T::~T(); }
302
303 bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
304 bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
305
306 size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
307 size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
308
309#ifdef USING_SGI_STL
310 //void setAllocator(TPoolAllocator* a) { allocator = a; }
311 static TPoolAllocator& getAllocator() { return GlobalPoolAllocator; }
312#else
313 void setAllocator(TPoolAllocator* a) { allocator = *a; }
314 TPoolAllocator& getAllocator() const { return allocator; }
315
316protected:
317 TPoolAllocator& allocator;
318#endif
319};
320
321#endif // _POOLALLOC_INCLUDED_