Initial import from TransGaming

git-svn-id: https://angleproject.googlecode.com/svn/trunk@2 736b8ea6-26fd-11df-bfd4-992fa37f6226
diff --git a/Compiler/PoolAlloc.cpp b/Compiler/PoolAlloc.cpp
new file mode 100644
index 0000000..013e752
--- /dev/null
+++ b/Compiler/PoolAlloc.cpp
@@ -0,0 +1,314 @@
+//
+// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "PoolAlloc.h"
+#include "Common.h"
+
+#include "InitializeGlobals.h"
+#include "osinclude.h"
+
+OS_TLSIndex PoolIndex = TLS_OUT_OF_INDEXES;
+
+void InitializeGlobalPools()
+{
+	TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));    
+	if (globalPools)
+		return;
+
+	TPoolAllocator *globalPoolAllocator = new TPoolAllocator(true);
+
+	TThreadGlobalPools* threadData = new TThreadGlobalPools();
+	
+	threadData->globalPoolAllocator = globalPoolAllocator;
+		
+	OS_SetTLSValue(PoolIndex, threadData);     
+	globalPoolAllocator->push();
+}
+
+void FreeGlobalPools()
+{
+	// Release the allocated memory for this thread.
+	TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));    
+	if (!globalPools)
+		return;
+	
+	GlobalPoolAllocator.popAll();
+	delete &GlobalPoolAllocator;       
+	delete globalPools;
+}
+
+bool InitializePoolIndex()
+{
+	// Allocate a TLS index.
+	if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
+		return false;
+
+	return true;
+}
+
+void FreePoolIndex()
+{
+	// Release the TLS index.
+	OS_FreeTLSIndex(PoolIndex);
+}
+
+TPoolAllocator& GetGlobalPoolAllocator()
+{
+	TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
+
+	return *threadData->globalPoolAllocator;
+}
+
+void SetGlobalPoolAllocatorPtr(TPoolAllocator* poolAllocator)
+{
+	TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
+
+	threadData->globalPoolAllocator = poolAllocator;
+}
+
+//
+// Implement the functionality of the TPoolAllocator class, which
+// is documented in PoolAlloc.h.
+//
+TPoolAllocator::TPoolAllocator(bool g, int growthIncrement, int allocationAlignment) : 
+	global(g),
+	pageSize(growthIncrement),
+	alignment(allocationAlignment),
+	freeList(0),
+	inUseList(0),
+	numCalls(0)
+{
+	//
+	// Don't allow page sizes we know are smaller than all common
+	// OS page sizes.
+	//
+	if (pageSize < 4*1024)
+		pageSize = 4*1024;
+
+	//
+	// A large currentPageOffset indicates a new page needs to
+	// be obtained to allocate memory.
+	//
+	currentPageOffset = pageSize;
+
+	//
+	// Adjust alignment to be at least pointer aligned and
+	// power of 2.
+	//
+	size_t minAlign = sizeof(void*);
+	alignment &= ~(minAlign - 1);
+	if (alignment < minAlign)
+		alignment = minAlign;
+	size_t a = 1;
+	while (a < alignment)
+		a <<= 1;
+	alignment = a;
+	alignmentMask = a - 1;
+
+	//
+	// Align header skip
+	//
+	headerSkip = minAlign;
+	if (headerSkip < sizeof(tHeader)) {
+		headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
+	}
+}
+
+TPoolAllocator::~TPoolAllocator()
+{
+	if (!global) {
+		//
+		// Then we know that this object is not being 
+		// allocated after other, globally scoped objects
+		// that depend on it.  So we can delete the "in use" memory.
+		//
+		while (inUseList) {
+			tHeader* next = inUseList->nextPage;
+			inUseList->~tHeader();
+			delete [] reinterpret_cast<char*>(inUseList);
+			inUseList = next;
+		}
+	}
+
+	//
+	// Always delete the free list memory - it can't be being
+	// (correctly) referenced, whether the pool allocator was
+	// global or not.  We should not check the guard blocks
+	// here, because we did it already when the block was
+	// placed into the free list.
+	//
+	while (freeList) {
+		tHeader* next = freeList->nextPage;
+		delete [] reinterpret_cast<char*>(freeList);
+		freeList = next;
+	}
+}
+
+// Support MSVC++ 6.0
+const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
+const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
+const unsigned char TAllocation::userDataFill       = 0xcd;
+
+#   ifdef GUARD_BLOCKS
+	const size_t TAllocation::guardBlockSize = 16;
+#   else
+	const size_t TAllocation::guardBlockSize = 0;
+#   endif
+
+//
+// Check a single guard block for damage
+//
+void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
+{
+	for (size_t x = 0; x < guardBlockSize; x++) {
+		if (blockMem[x] != val) {
+			char assertMsg[80];
+
+			// We don't print the assert message.  It's here just to be helpful.
+			sprintf(assertMsg, "PoolAlloc: Damage %s %lu byte allocation at 0x%p\n",
+					locText, size, data());
+			assert(0 && "PoolAlloc: Damage in guard block");
+		}
+	}
+}
+
+
+void TPoolAllocator::push()
+{
+	tAllocState state = { currentPageOffset, inUseList };
+
+	stack.push_back(state);
+		
+	//
+	// Indicate there is no current page to allocate from.
+	//
+	currentPageOffset = pageSize;
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred since the last push(), or since the
+// last pop(), or since the object's creation.
+//
+// The deallocated pages are saved for future allocations.
+//
+void TPoolAllocator::pop()
+{
+	if (stack.size() < 1)
+		return;
+
+	tHeader* page = stack.back().page;
+	currentPageOffset = stack.back().offset;
+
+	while (inUseList != page) {
+		// invoke destructor to free allocation list
+		inUseList->~tHeader();
+		
+		tHeader* nextInUse = inUseList->nextPage;
+		if (inUseList->pageCount > 1)
+			delete [] reinterpret_cast<char*>(inUseList);
+		else {
+			inUseList->nextPage = freeList;
+			freeList = inUseList;
+		}
+		inUseList = nextInUse;
+	}
+
+	stack.pop_back();
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred.
+//
+void TPoolAllocator::popAll()
+{
+	while (stack.size() > 0)
+		pop();
+}
+
+void* TPoolAllocator::allocate(size_t numBytes)
+{
+	// If we are using guard blocks, all allocations are bracketed by
+	// them: [guardblock][allocation][guardblock].  numBytes is how
+	// much memory the caller asked for.  allocationSize is the total
+	// size including guard blocks.  In release build,
+	// guardBlockSize=0 and this all gets optimized away.
+	size_t allocationSize = TAllocation::allocationSize(numBytes);
+	
+	//
+	// Just keep some interesting statistics.
+	//
+	++numCalls;
+	totalBytes += numBytes;
+
+	//
+	// Do the allocation, most likely case first, for efficiency.
+	// This step could be moved to be inline sometime.
+	//
+	if (currentPageOffset + allocationSize <= pageSize) {
+		//
+		// Safe to allocate from currentPageOffset.
+		//
+		unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
+		currentPageOffset += allocationSize;
+		currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
+
+		return initializeAllocation(inUseList, memory, numBytes);
+	}
+
+	if (allocationSize + headerSkip > pageSize) {
+		//
+		// Do a multi-page allocation.  Don't mix these with the others.
+		// The OS is efficient and allocating and free-ing multiple pages.
+		//
+		size_t numBytesToAlloc = allocationSize + headerSkip;
+		tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
+		if (memory == 0)
+			return 0;
+
+		// Use placement-new to initialize header
+		new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
+		inUseList = memory;
+
+		currentPageOffset = pageSize;  // make next allocation come from a new page
+
+		// No guard blocks for multi-page allocations (yet)
+		return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
+	}
+
+	//
+	// Need a simple page to allocate from.
+	//
+	tHeader* memory;
+	if (freeList) {
+		memory = freeList;
+		freeList = freeList->nextPage;
+	} else {
+		memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
+		if (memory == 0)
+			return 0;
+	}
+
+	// Use placement-new to initialize header
+	new(memory) tHeader(inUseList, 1);
+	inUseList = memory;
+	
+	unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
+	currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
+
+	return initializeAllocation(inUseList, ret, numBytes);
+}
+
+
+//
+// Check all allocations in a list for damage by calling check on each.
+//
+void TAllocation::checkAllocList() const
+{
+	for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
+		alloc->check();
+}