Revert "Vulkan: Adding custom pool allocator"
This reverts commit 05459e06fde5047ae8f5f90fe091c3255e6bc88e.
Reason for revert: Clusterfuzz bugs flagged this commit
Original change's description:
> Vulkan: Adding custom pool allocator
>
> Copied pool allocator used by compiler to common and hooking it up as
> custom allocator for CommandPools. Modified it to support reallocation.
>
> RendererVk now has a private poolAllocator and VkAllocationCallbacks
> struct. The allocation callbacks are initialized to static functions
> in RendererVk::initializeDevice() and then passed to CommandPool init()
> and destroy() functions.
>
> Using the pool allocator saves Command Pool/Buffer clean-up time which
> was showing us as a bottleneck is some cases.
>
> Bug: angleproject:2951
> Change-Id: I81aa8a7ec60397676fa722d6435029db27947ef4
> Reviewed-on: https://chromium-review.googlesource.com/c/1409867
> Commit-Queue: Tobin Ehlis <tobine@google.com>
> Reviewed-by: Shahbaz Youssefi <syoussefi@chromium.org>
> Reviewed-by: Jamie Madill <jmadill@chromium.org>
TBR=jmadill@chromium.org,tobine@google.com,ianelliott@google.com,syoussefi@chromium.org
Change-Id: I363a351667c4dddef79833061790da90de477e70
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: angleproject:2951
Reviewed-on: https://chromium-review.googlesource.com/c/1430679
Reviewed-by: Tobin Ehlis <tobine@google.com>
Commit-Queue: Tobin Ehlis <tobine@google.com>
diff --git a/src/common/PoolAlloc.cpp b/src/common/PoolAlloc.cpp
deleted file mode 100644
index 51b582c..0000000
--- a/src/common/PoolAlloc.cpp
+++ /dev/null
@@ -1,403 +0,0 @@
-//
-// Copyright 2019 The ANGLE Project Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// PoolAlloc.cpp:
-// Implements the class methods for PoolAllocator and Allocation classes.
-//
-
-#include "common/PoolAlloc.h"
-
-#include <assert.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include "common/angleutils.h"
-#include "common/debug.h"
-#include "common/platform.h"
-#include "common/tls.h"
-
-namespace angle
-{
-
-//
-// Implement the functionality of the PoolAllocator class, which
-// is documented in PoolAlloc.h.
-//
-PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
- : alignment(allocationAlignment),
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- pageSize(growthIncrement),
- freeList(0),
- inUseList(0),
- numCalls(0),
- totalBytes(0),
-#endif
- mLocked(false)
-{
- //
- // Adjust alignment to be at least pointer aligned and
- // power of 2.
- //
- size_t minAlign = sizeof(void *);
- alignment &= ~(minAlign - 1);
- if (alignment < minAlign)
- alignment = minAlign;
- size_t a = 1;
- while (a < alignment)
- a <<= 1;
- alignment = a;
- alignmentMask = a - 1;
-
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- //
- // Don't allow page sizes we know are smaller than all common
- // OS page sizes.
- //
- if (pageSize < 4 * 1024)
- pageSize = 4 * 1024;
-
- //
- // A large currentPageOffset indicates a new page needs to
- // be obtained to allocate memory.
- //
- currentPageOffset = pageSize;
-
- //
- // Align header skip
- //
- headerSkip = minAlign;
- if (headerSkip < sizeof(tHeader))
- {
- headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
- }
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- mStack.push_back({});
-#endif
-}
-
-PoolAllocator::PoolAllocator(PoolAllocator &&rhs) noexcept
- : alignment(std::exchange(rhs.alignment, 0)),
- alignmentMask(std::exchange(rhs.alignmentMask, 0)),
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- pageSize(std::exchange(rhs.pageSize, 0)),
- headerSkip(std::exchange(rhs.headerSkip, 0)),
- currentPageOffset(std::exchange(rhs.currentPageOffset, 0)),
- freeList(std::exchange(rhs.freeList, nullptr)),
- inUseList(std::exchange(rhs.inUseList, nullptr)),
- mStack(std::move(rhs.mStack)),
- numCalls(std::exchange(rhs.numCalls, 0)),
- totalBytes(std::exchange(rhs.totalBytes, 0)),
-#else
- mStack(std::move(rhs.mStack)),
-#endif
- mLocked(std::exchange(rhs.mLocked, false))
-{}
-
-PoolAllocator &PoolAllocator::operator=(PoolAllocator &&rhs)
-{
- if (this != &rhs)
- {
- std::swap(alignment, rhs.alignment);
- std::swap(alignmentMask, rhs.alignmentMask);
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- std::swap(pageSize, rhs.pageSize);
- std::swap(headerSkip, rhs.headerSkip);
- std::swap(currentPageOffset, rhs.currentPageOffset);
- std::swap(freeList, rhs.freeList);
- std::swap(inUseList, rhs.inUseList);
- std::swap(numCalls, rhs.numCalls);
- std::swap(totalBytes, rhs.totalBytes);
-#endif
- std::swap(mStack, rhs.mStack);
- std::swap(mLocked, rhs.mLocked);
- }
- return *this;
-}
-
-PoolAllocator::~PoolAllocator()
-{
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- while (inUseList)
- {
- tHeader *next = inUseList->nextPage;
- inUseList->~tHeader();
- delete[] reinterpret_cast<char *>(inUseList);
- inUseList = next;
- }
-
- // We should not check the guard blocks
- // here, because we did it already when the block was
- // placed into the free list.
- //
- while (freeList)
- {
- tHeader *next = freeList->nextPage;
- delete[] reinterpret_cast<char *>(freeList);
- freeList = next;
- }
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- for (auto &allocs : mStack)
- {
- for (auto alloc : allocs)
- {
- free(alloc);
- }
- }
- mStack.clear();
-#endif
-}
-
-//
-// Check a single guard block for damage
-//
-void Allocation::checkGuardBlock(unsigned char *blockMem,
- unsigned char val,
- const char *locText) const
-{
-#ifdef GUARD_BLOCKS
- for (size_t x = 0; x < kGuardBlockSize; x++)
- {
- if (blockMem[x] != val)
- {
- char assertMsg[80];
- // We don't print the assert message. It's here just to be helpful.
- snprintf(assertMsg, sizeof(assertMsg),
- "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, size, data());
-
- assert(0 && "PoolAlloc: Damage in guard block");
- }
- }
-#endif
-}
-
-void PoolAllocator::push()
-{
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- tAllocState state = {currentPageOffset, inUseList};
-
- mStack.push_back(state);
-
- //
- // Indicate there is no current page to allocate from.
- //
- currentPageOffset = pageSize;
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- mStack.push_back({});
-#endif
-}
-
-//
-// Do a mass-deallocation of all the individual allocations
-// that have occurred since the last push(), or since the
-// last pop(), or since the object's creation.
-//
-// The deallocated pages are saved for future allocations.
-//
-void PoolAllocator::pop()
-{
- if (mStack.size() < 1)
- return;
-
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- tHeader *page = mStack.back().page;
- currentPageOffset = mStack.back().offset;
-
- while (inUseList != page)
- {
- // invoke destructor to free allocation list
- inUseList->~tHeader();
-
- tHeader *nextInUse = inUseList->nextPage;
- if (inUseList->pageCount > 1)
- delete[] reinterpret_cast<char *>(inUseList);
- else
- {
- inUseList->nextPage = freeList;
- freeList = inUseList;
- }
- inUseList = nextInUse;
- }
-
- mStack.pop_back();
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- for (auto &alloc : mStack.back())
- {
- free(alloc);
- }
- mStack.pop_back();
-#endif
-}
-
-//
-// Do a mass-deallocation of all the individual allocations
-// that have occurred.
-//
-void PoolAllocator::popAll()
-{
- while (mStack.size() > 0)
- pop();
-}
-
-//
-// Return a pointer to the Allocation Header for an existing memAllocation.
-// Pre-condition: memAllocation must be non-null
-//
-Allocation *PoolAllocator::getAllocationHeader(void *memAllocation) const
-{
- ASSERT(memAllocation != nullptr);
- uint8_t *origAllocAddress = static_cast<uint8_t *>(memAllocation);
- return reinterpret_cast<Allocation *>(origAllocAddress - sizeof(Allocation));
-}
-
-//
-// Do a reallocation, resizing the the given originalAllocation to numBytes while
-// preserving the contents of originalAllocation.
-//
-void *PoolAllocator::reallocate(void *originalAllocation, size_t numBytes)
-{
- if (originalAllocation == nullptr)
- {
- return allocate(numBytes);
- }
- if (numBytes == 0)
- {
- // this is a no-op given the current way we use new pool allocators. Memory will be freed
- // when allocator is destroyed.
- return nullptr;
- }
-
- // Compare the original allocation size to requested realloc size
- Allocation *origAllocationHeader = getAllocationHeader(originalAllocation);
- size_t origSize = origAllocationHeader->getSize();
- if (origSize > numBytes)
- {
- // For growing allocation, create new allocation and copy over original contents
- void *newAlloc = allocate(numBytes);
- memcpy(newAlloc, originalAllocation, origSize);
- return newAlloc;
- }
- // For shrinking allocation, shrink size and return original alloc ptr
- origAllocationHeader->setSize(numBytes);
- return originalAllocation;
-}
-
-void *PoolAllocator::allocate(size_t numBytes)
-{
- ASSERT(!mLocked);
-
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- //
- // Just keep some interesting statistics.
- //
- ++numCalls;
- totalBytes += numBytes;
-
- // If we are using guard blocks, all allocations are bracketed by
- // them: [guardblock][allocation][guardblock]. numBytes is how
- // much memory the caller asked for. allocationSize is the total
- // size including guard blocks. In release build,
- // kGuardBlockSize=0 and this all gets optimized away.
- size_t allocationSize = Allocation::allocationSize(numBytes);
- // Detect integer overflow.
- if (allocationSize < numBytes)
- return 0;
-
- //
- // Do the allocation, most likely case first, for efficiency.
- // This step could be moved to be inline sometime.
- //
- if (allocationSize <= pageSize - currentPageOffset)
- {
- //
- // Safe to allocate from currentPageOffset.
- //
- unsigned char *memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
- currentPageOffset += allocationSize;
- currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
-
- return initializeAllocation(inUseList, memory, numBytes);
- }
-
- if (allocationSize > pageSize - headerSkip)
- {
- //
- // Do a multi-page allocation. Don't mix these with the others.
- // The OS is efficient in allocating and freeing multiple pages.
- //
- size_t numBytesToAlloc = allocationSize + headerSkip;
- // Detect integer overflow.
- if (numBytesToAlloc < allocationSize)
- return 0;
-
- tHeader *memory = reinterpret_cast<tHeader *>(::new char[numBytesToAlloc]);
- if (memory == 0)
- return 0;
-
- // Use placement-new to initialize header
- new (memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
- inUseList = memory;
-
- currentPageOffset = pageSize; // make next allocation come from a new page
-
- // No guard blocks for multi-page allocations (yet)
- return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
- }
-
- //
- // Need a simple page to allocate from.
- //
- tHeader *memory;
- if (freeList)
- {
- memory = freeList;
- freeList = freeList->nextPage;
- }
- else
- {
- memory = reinterpret_cast<tHeader *>(::new char[pageSize]);
- if (memory == 0)
- return 0;
- }
-
- // Use placement-new to initialize header
- new (memory) tHeader(inUseList, 1);
- inUseList = memory;
-
- unsigned char *ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
- currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
-
- return initializeAllocation(inUseList, ret, numBytes);
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- void *alloc = malloc(numBytes + alignmentMask);
- mStack.back().push_back(alloc);
-
- intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
- intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
- return reinterpret_cast<void *>(intAlloc);
-#endif
-}
-
-void PoolAllocator::lock()
-{
- ASSERT(!mLocked);
- mLocked = true;
-}
-
-void PoolAllocator::unlock()
-{
- ASSERT(mLocked);
- mLocked = false;
-}
-
-//
-// Check all allocations in a list for damage by calling check on each.
-//
-void Allocation::checkAllocList() const
-{
- for (const Allocation *alloc = this; alloc != 0; alloc = alloc->prevAlloc)
- alloc->check();
-}
-
-} // namespace angle
\ No newline at end of file
diff --git a/src/common/PoolAlloc.h b/src/common/PoolAlloc.h
deleted file mode 100644
index ca85231..0000000
--- a/src/common/PoolAlloc.h
+++ /dev/null
@@ -1,263 +0,0 @@
-//
-// Copyright 2019 The ANGLE Project Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// PoolAlloc.h:
-// Defines the class interface for PoolAllocator and the Allocation
-// class that it uses internally.
-//
-
-#ifndef POOLALLOC_H_
-#define POOLALLOC_H_
-
-#ifdef _DEBUG
-# define GUARD_BLOCKS // define to enable guard block sanity checking
-#endif
-
-//
-// This header defines an allocator that can be used to efficiently
-// allocate a large number of small requests for heap memory, with the
-// intention that they are not individually deallocated, but rather
-// collectively deallocated at one time.
-//
-// This simultaneously
-//
-// * Makes each individual allocation much more efficient; the
-// typical allocation is trivial.
-// * Completely avoids the cost of doing individual deallocation.
-// * Saves the trouble of tracking down and plugging a large class of leaks.
-//
-// Individual classes can use this allocator by supplying their own
-// new and delete methods.
-//
-
-#include <stddef.h>
-#include <string.h>
-#include <vector>
-
-#include "angleutils.h"
-
-namespace angle
-{
-// If we are using guard blocks, we must track each individual
-// allocation. If we aren't using guard blocks, these
-// never get instantiated, so won't have any impact.
-//
-
-class Allocation
-{
- public:
- Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
- : size(size), mem(mem), prevAlloc(prev)
- {
-// Allocations are bracketed:
-// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
-// This would be cleaner with if (kGuardBlockSize)..., but that
-// makes the compiler print warnings about 0 length memsets,
-// even with the if() protecting them.
-#ifdef GUARD_BLOCKS
- memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
- memset(data(), kUserDataFill, size);
- memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
-#endif
- }
-
- void check() const
- {
- checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
- checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
- }
-
- void checkAllocList() const;
-
- // Return total size needed to accomodate user buffer of 'size',
- // plus our tracking data.
- inline static size_t allocationSize(size_t size)
- {
- return size + 2 * kGuardBlockSize + headerSize();
- }
-
- // Offset from surrounding buffer to get to user data buffer.
- inline static unsigned char *offsetAllocation(unsigned char *m)
- {
- return m + kGuardBlockSize + headerSize();
- }
-
- // Return size of allocation.
- size_t getSize() const { return size; }
- // Set size of allocation
- void setSize(size_t newSize)
- {
- size = newSize;
-#ifdef GUARD_BLOCKS
- // Push post-guard block back now that size is updated
- memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
-#endif
- }
-
- private:
- void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
-
- // Find offsets to pre and post guard blocks, and user data buffer
- unsigned char *preGuard() const { return mem + headerSize(); }
- unsigned char *data() const { return preGuard() + kGuardBlockSize; }
- unsigned char *postGuard() const { return data() + size; }
-
- size_t size; // size of the user data area
- unsigned char *mem; // beginning of our allocation (pts to header)
- Allocation *prevAlloc; // prior allocation in the chain
-
- static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
- static constexpr unsigned char kGuardBlockEndVal = 0xfe;
- static constexpr unsigned char kUserDataFill = 0xcd;
-#ifdef GUARD_BLOCKS
- static constexpr size_t kGuardBlockSize = 16;
-#else
- static constexpr size_t kGuardBlockSize = 0;
-#endif
-
- inline static size_t headerSize() { return sizeof(Allocation); }
-};
-
-//
-// There are several stacks. One is to track the pushing and popping
-// of the user, and not yet implemented. The others are simply a
-// repositories of free pages or used pages.
-//
-// Page stacks are linked together with a simple header at the beginning
-// of each allocation obtained from the underlying OS. Multi-page allocations
-// are returned to the OS. Individual page allocations are kept for future
-// re-use.
-//
-// The "page size" used is not, nor must it match, the underlying OS
-// page size. But, having it be about that size or equal to a set of
-// pages is likely most optimal.
-//
-class PoolAllocator : angle::NonCopyable
-{
- public:
- static const int kDefaultAlignment = 16;
- PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
- PoolAllocator(PoolAllocator &&rhs) noexcept;
- PoolAllocator &operator=(PoolAllocator &&);
-
- //
- // Don't call the destructor just to free up the memory, call pop()
- //
- ~PoolAllocator();
-
- //
- // Call push() to establish a new place to pop memory to. Does not
- // have to be called to get things started.
- //
- void push();
-
- //
- // Call pop() to free all memory allocated since the last call to push(),
- // or if no last call to push, frees all memory since first allocation.
- //
- void pop();
-
- //
- // Call popAll() to free all memory allocated.
- //
- void popAll();
-
- //
- // Call allocate() to actually acquire memory. Returns 0 if no memory
- // available, otherwise a properly aligned pointer to 'numBytes' of memory.
- //
- void *allocate(size_t numBytes);
-
- //
- // Call reallocate() to resize a previous allocation. Returns 0 if no memory
- // available, otherwise a properly aligned pointer to 'numBytes' of memory
- // where any contents from the original allocation will be preserved.
- //
- void *reallocate(void *originalAllocation, size_t numBytes);
-
- //
- // There is no deallocate. The point of this class is that
- // deallocation can be skipped by the user of it, as the model
- // of use is to simultaneously deallocate everything at once
- // by calling pop(), and to not have to solve memory leak problems.
- //
-
- // Catch unwanted allocations.
- // TODO(jmadill): Remove this when we remove the global allocator.
- void lock();
- void unlock();
-
- private:
- size_t alignment; // all returned allocations will be aligned at
- // this granularity, which will be a power of 2
- size_t alignmentMask;
-
-#if !defined(ANGLE_DISABLE_POOL_ALLOC)
- friend struct tHeader;
-
- struct tHeader
- {
- tHeader(tHeader *nextPage, size_t pageCount)
- : nextPage(nextPage),
- pageCount(pageCount)
-# ifdef GUARD_BLOCKS
- ,
- lastAllocation(0)
-# endif
- {}
-
- ~tHeader()
- {
-# ifdef GUARD_BLOCKS
- if (lastAllocation)
- lastAllocation->checkAllocList();
-# endif
- }
-
- tHeader *nextPage;
- size_t pageCount;
- Allocation *lastAllocation;
- };
-
- struct tAllocState
- {
- size_t offset;
- tHeader *page;
- };
- typedef std::vector<tAllocState> tAllocStack;
-
- // Track allocations if and only if we're using guard blocks
- void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
- {
- // Init Allocation by default for reallocation support.
- new (memory) Allocation(numBytes, memory, block->lastAllocation);
- block->lastAllocation = reinterpret_cast<Allocation *>(memory);
- return Allocation::offsetAllocation(memory);
- }
-
- Allocation *getAllocationHeader(void *memAllocation) const;
-
- size_t pageSize; // granularity of allocation from the OS
- size_t headerSkip; // amount of memory to skip to make room for the
- // header (basically, size of header, rounded
- // up to make it aligned
- size_t currentPageOffset; // next offset in top of inUseList to allocate from
- tHeader *freeList; // list of popped memory
- tHeader *inUseList; // list of all memory currently being used
- tAllocStack mStack; // stack of where to allocate from, to partition pool
-
- int numCalls; // just an interesting statistic
- size_t totalBytes; // just an interesting statistic
-
-#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
- std::vector<std::vector<void *>> mStack;
-#endif
-
- bool mLocked;
-};
-
-} // namespace angle
-
-#endif // POOLALLOC_H_
diff --git a/src/compiler/translator/Compiler.cpp b/src/compiler/translator/Compiler.cpp
index ff6c5d0..95082d9 100644
--- a/src/compiler/translator/Compiler.cpp
+++ b/src/compiler/translator/Compiler.cpp
@@ -170,7 +170,7 @@
class TScopedPoolAllocator
{
public:
- TScopedPoolAllocator(angle::PoolAllocator *allocator) : mAllocator(allocator)
+ TScopedPoolAllocator(TPoolAllocator *allocator) : mAllocator(allocator)
{
mAllocator->push();
SetGlobalPoolAllocator(mAllocator);
@@ -182,7 +182,7 @@
}
private:
- angle::PoolAllocator *mAllocator;
+ TPoolAllocator *mAllocator;
};
class TScopedSymbolTableLevel
diff --git a/src/compiler/translator/Compiler.h b/src/compiler/translator/Compiler.h
index eef55d8..2eef387 100644
--- a/src/compiler/translator/Compiler.h
+++ b/src/compiler/translator/Compiler.h
@@ -66,7 +66,7 @@
protected:
// Memory allocator. Allocates and tracks memory required by the compiler.
// Deallocates all memory when compiler is destructed.
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
};
//
diff --git a/src/compiler/translator/PoolAlloc.cpp b/src/compiler/translator/PoolAlloc.cpp
index 7a95658..5ad10c2 100644
--- a/src/compiler/translator/PoolAlloc.cpp
+++ b/src/compiler/translator/PoolAlloc.cpp
@@ -7,7 +7,14 @@
#include "compiler/translator/PoolAlloc.h"
#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "common/angleutils.h"
+#include "common/debug.h"
+#include "common/platform.h"
#include "common/tls.h"
+#include "compiler/translator/InitializeGlobals.h"
TLSIndex PoolIndex = TLS_INVALID_INDEX;
@@ -27,14 +34,327 @@
PoolIndex = TLS_INVALID_INDEX;
}
-angle::PoolAllocator *GetGlobalPoolAllocator()
+TPoolAllocator *GetGlobalPoolAllocator()
{
assert(PoolIndex != TLS_INVALID_INDEX);
- return static_cast<angle::PoolAllocator *>(GetTLSValue(PoolIndex));
+ return static_cast<TPoolAllocator *>(GetTLSValue(PoolIndex));
}
-void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator)
+void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator)
{
assert(PoolIndex != TLS_INVALID_INDEX);
SetTLSValue(PoolIndex, poolAllocator);
}
+
+//
+// Implement the functionality of the TPoolAllocator class, which
+// is documented in PoolAlloc.h.
+//
+TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
+ : alignment(allocationAlignment),
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ pageSize(growthIncrement),
+ freeList(0),
+ inUseList(0),
+ numCalls(0),
+ totalBytes(0),
+#endif
+ mLocked(false)
+{
+ //
+ // Adjust alignment to be at least pointer aligned and
+ // power of 2.
+ //
+ size_t minAlign = sizeof(void *);
+ alignment &= ~(minAlign - 1);
+ if (alignment < minAlign)
+ alignment = minAlign;
+ size_t a = 1;
+ while (a < alignment)
+ a <<= 1;
+ alignment = a;
+ alignmentMask = a - 1;
+
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ //
+ // Don't allow page sizes we know are smaller than all common
+ // OS page sizes.
+ //
+ if (pageSize < 4 * 1024)
+ pageSize = 4 * 1024;
+
+ //
+ // A large currentPageOffset indicates a new page needs to
+ // be obtained to allocate memory.
+ //
+ currentPageOffset = pageSize;
+
+ //
+ // Align header skip
+ //
+ headerSkip = minAlign;
+ if (headerSkip < sizeof(tHeader))
+ {
+ headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
+ }
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ mStack.push_back({});
+#endif
+}
+
+TPoolAllocator::~TPoolAllocator()
+{
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ while (inUseList)
+ {
+ tHeader *next = inUseList->nextPage;
+ inUseList->~tHeader();
+ delete[] reinterpret_cast<char *>(inUseList);
+ inUseList = next;
+ }
+
+ // We should not check the guard blocks
+ // here, because we did it already when the block was
+ // placed into the free list.
+ //
+ while (freeList)
+ {
+ tHeader *next = freeList->nextPage;
+ delete[] reinterpret_cast<char *>(freeList);
+ freeList = next;
+ }
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ for (auto &allocs : mStack)
+ {
+ for (auto alloc : allocs)
+ {
+ free(alloc);
+ }
+ }
+ mStack.clear();
+#endif
+}
+
+// Support MSVC++ 6.0
+const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
+const unsigned char TAllocation::guardBlockEndVal = 0xfe;
+const unsigned char TAllocation::userDataFill = 0xcd;
+
+#ifdef GUARD_BLOCKS
+const size_t TAllocation::guardBlockSize = 16;
+#else
+const size_t TAllocation::guardBlockSize = 0;
+#endif
+
+//
+// Check a single guard block for damage
+//
+void TAllocation::checkGuardBlock(unsigned char *blockMem,
+ unsigned char val,
+ const char *locText) const
+{
+#ifdef GUARD_BLOCKS
+ for (size_t x = 0; x < guardBlockSize; x++)
+ {
+ if (blockMem[x] != val)
+ {
+ char assertMsg[80];
+
+// We don't print the assert message. It's here just to be helpful.
+# if defined(_MSC_VER)
+ snprintf(assertMsg, sizeof(assertMsg),
+ "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", locText, size, data());
+# else
+ snprintf(assertMsg, sizeof(assertMsg),
+ "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, size, data());
+# endif
+ assert(0 && "PoolAlloc: Damage in guard block");
+ }
+ }
+#endif
+}
+
+void TPoolAllocator::push()
+{
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ tAllocState state = {currentPageOffset, inUseList};
+
+ mStack.push_back(state);
+
+ //
+ // Indicate there is no current page to allocate from.
+ //
+ currentPageOffset = pageSize;
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ mStack.push_back({});
+#endif
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred since the last push(), or since the
+// last pop(), or since the object's creation.
+//
+// The deallocated pages are saved for future allocations.
+//
+void TPoolAllocator::pop()
+{
+ if (mStack.size() < 1)
+ return;
+
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ tHeader *page = mStack.back().page;
+ currentPageOffset = mStack.back().offset;
+
+ while (inUseList != page)
+ {
+ // invoke destructor to free allocation list
+ inUseList->~tHeader();
+
+ tHeader *nextInUse = inUseList->nextPage;
+ if (inUseList->pageCount > 1)
+ delete[] reinterpret_cast<char *>(inUseList);
+ else
+ {
+ inUseList->nextPage = freeList;
+ freeList = inUseList;
+ }
+ inUseList = nextInUse;
+ }
+
+ mStack.pop_back();
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ for (auto &alloc : mStack.back())
+ {
+ free(alloc);
+ }
+ mStack.pop_back();
+#endif
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred.
+//
+void TPoolAllocator::popAll()
+{
+ while (mStack.size() > 0)
+ pop();
+}
+
+void *TPoolAllocator::allocate(size_t numBytes)
+{
+ ASSERT(!mLocked);
+
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ //
+ // Just keep some interesting statistics.
+ //
+ ++numCalls;
+ totalBytes += numBytes;
+
+ // If we are using guard blocks, all allocations are bracketed by
+ // them: [guardblock][allocation][guardblock]. numBytes is how
+ // much memory the caller asked for. allocationSize is the total
+ // size including guard blocks. In release build,
+ // guardBlockSize=0 and this all gets optimized away.
+ size_t allocationSize = TAllocation::allocationSize(numBytes);
+ // Detect integer overflow.
+ if (allocationSize < numBytes)
+ return 0;
+
+ //
+ // Do the allocation, most likely case first, for efficiency.
+ // This step could be moved to be inline sometime.
+ //
+ if (allocationSize <= pageSize - currentPageOffset)
+ {
+ //
+ // Safe to allocate from currentPageOffset.
+ //
+ unsigned char *memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
+ currentPageOffset += allocationSize;
+ currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, memory, numBytes);
+ }
+
+ if (allocationSize > pageSize - headerSkip)
+ {
+ //
+ // Do a multi-page allocation. Don't mix these with the others.
+ // The OS is efficient and allocating and free-ing multiple pages.
+ //
+ size_t numBytesToAlloc = allocationSize + headerSkip;
+ // Detect integer overflow.
+ if (numBytesToAlloc < allocationSize)
+ return 0;
+
+ tHeader *memory = reinterpret_cast<tHeader *>(::new char[numBytesToAlloc]);
+ if (memory == 0)
+ return 0;
+
+ // Use placement-new to initialize header
+ new (memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
+ inUseList = memory;
+
+ currentPageOffset = pageSize; // make next allocation come from a new page
+
+ // No guard blocks for multi-page allocations (yet)
+ return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
+ }
+
+ //
+ // Need a simple page to allocate from.
+ //
+ tHeader *memory;
+ if (freeList)
+ {
+ memory = freeList;
+ freeList = freeList->nextPage;
+ }
+ else
+ {
+ memory = reinterpret_cast<tHeader *>(::new char[pageSize]);
+ if (memory == 0)
+ return 0;
+ }
+
+ // Use placement-new to initialize header
+ new (memory) tHeader(inUseList, 1);
+ inUseList = memory;
+
+ unsigned char *ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
+ currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, ret, numBytes);
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ void *alloc = malloc(numBytes + alignmentMask);
+ mStack.back().push_back(alloc);
+
+ intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
+ intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
+ return reinterpret_cast<void *>(intAlloc);
+#endif
+}
+
+void TPoolAllocator::lock()
+{
+ ASSERT(!mLocked);
+ mLocked = true;
+}
+
+void TPoolAllocator::unlock()
+{
+ ASSERT(mLocked);
+ mLocked = false;
+}
+
+//
+// Check all allocations in a list for damage by calling check on each.
+//
+void TAllocation::checkAllocList() const
+{
+ for (const TAllocation *alloc = this; alloc != 0; alloc = alloc->prevAlloc)
+ alloc->check();
+}
diff --git a/src/compiler/translator/PoolAlloc.h b/src/compiler/translator/PoolAlloc.h
index d25b7b2..56c1d0f 100644
--- a/src/compiler/translator/PoolAlloc.h
+++ b/src/compiler/translator/PoolAlloc.h
@@ -12,26 +12,237 @@
#endif
//
-// This header defines the pool_allocator class that allows STL containers
-// to use the angle::PoolAllocator class by using the pool_allocator
-// class as the allocator (second) template argument.
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
//
-// It also defines functions for managing the GlobalPoolAllocator used by the compiler.
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+// STL containers can use this allocator by using the pool_allocator
+// class as the allocator (second) template argument.
//
#include <stddef.h>
#include <string.h>
#include <vector>
-#include "common/PoolAlloc.h"
+// If we are using guard blocks, we must track each indivual
+// allocation. If we aren't using guard blocks, these
+// never get instantiated, so won't have any impact.
+//
+
+class TAllocation
+{
+ public:
+ TAllocation(size_t size, unsigned char *mem, TAllocation *prev = 0)
+ : size(size), mem(mem), prevAlloc(prev)
+ {
+// Allocations are bracketed:
+// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+// This would be cleaner with if (guardBlockSize)..., but that
+// makes the compiler print warnings about 0 length memsets,
+// even with the if() protecting them.
+#ifdef GUARD_BLOCKS
+ memset(preGuard(), guardBlockBeginVal, guardBlockSize);
+ memset(data(), userDataFill, size);
+ memset(postGuard(), guardBlockEndVal, guardBlockSize);
+#endif
+ }
+
+ void check() const
+ {
+ checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), guardBlockEndVal, "after");
+ }
+
+ void checkAllocList() const;
+
+ // Return total size needed to accomodate user buffer of 'size',
+ // plus our tracking data.
+ inline static size_t allocationSize(size_t size)
+ {
+ return size + 2 * guardBlockSize + headerSize();
+ }
+
+ // Offset from surrounding buffer to get to user data buffer.
+ inline static unsigned char *offsetAllocation(unsigned char *m)
+ {
+ return m + guardBlockSize + headerSize();
+ }
+
+ private:
+ void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char *preGuard() const { return mem + headerSize(); }
+ unsigned char *data() const { return preGuard() + guardBlockSize; }
+ unsigned char *postGuard() const { return data() + size; }
+
+ size_t size; // size of the user data area
+ unsigned char *mem; // beginning of our allocation (pts to header)
+ TAllocation *prevAlloc; // prior allocation in the chain
+
+ // Support MSVC++ 6.0
+ const static unsigned char guardBlockBeginVal;
+ const static unsigned char guardBlockEndVal;
+ const static unsigned char userDataFill;
+
+ const static size_t guardBlockSize;
+#ifdef GUARD_BLOCKS
+ inline static size_t headerSize() { return sizeof(TAllocation); }
+#else
+ inline static size_t headerSize() { return 0; }
+#endif
+};
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class TPoolAllocator
+{
+ public:
+ TPoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = 16);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~TPoolAllocator();
+
+ //
+ // Call push() to establish a new place to pop memory too. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void *allocate(size_t numBytes);
+
+ //
+ // There is no deallocate. The point of this class is that
+ // deallocation can be skipped by the user of it, as the model
+ // of use is to simultaneously deallocate everything at once
+ // by calling pop(), and to not have to solve memory leak problems.
+ //
+
+ // Catch unwanted allocations.
+ // TODO(jmadill): Remove this when we remove the global allocator.
+ void lock();
+ void unlock();
+
+ private:
+ size_t alignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+ size_t alignmentMask;
+
+#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ friend struct tHeader;
+
+ struct tHeader
+ {
+ tHeader(tHeader *nextPage, size_t pageCount)
+ : nextPage(nextPage),
+ pageCount(pageCount)
+# ifdef GUARD_BLOCKS
+ ,
+ lastAllocation(0)
+# endif
+ {}
+
+ ~tHeader()
+ {
+# ifdef GUARD_BLOCKS
+ if (lastAllocation)
+ lastAllocation->checkAllocList();
+# endif
+ }
+
+ tHeader *nextPage;
+ size_t pageCount;
+# ifdef GUARD_BLOCKS
+ TAllocation *lastAllocation;
+# endif
+ };
+
+ struct tAllocState
+ {
+ size_t offset;
+ tHeader *page;
+ };
+ typedef std::vector<tAllocState> tAllocStack;
+
+ // Track allocations if and only if we're using guard blocks
+ void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
+ {
+# ifdef GUARD_BLOCKS
+ new (memory) TAllocation(numBytes, memory, block->lastAllocation);
+ block->lastAllocation = reinterpret_cast<TAllocation *>(memory);
+# endif
+ // This is optimized entirely away if GUARD_BLOCKS is not defined.
+ return TAllocation::offsetAllocation(memory);
+ }
+
+ size_t pageSize; // granularity of allocation from the OS
+ size_t headerSkip; // amount of memory to skip to make room for the
+ // header (basically, size of header, rounded
+ // up to make it aligned
+ size_t currentPageOffset; // next offset in top of inUseList to allocate from
+ tHeader *freeList; // list of popped memory
+ tHeader *inUseList; // list of all memory currently being used
+ tAllocStack mStack; // stack of where to allocate from, to partition pool
+
+ int numCalls; // just an interesting statistic
+ size_t totalBytes; // just an interesting statistic
+
+#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
+ std::vector<std::vector<void *>> mStack;
+#endif
+
+ TPoolAllocator &operator=(const TPoolAllocator &); // dont allow assignment operator
+ TPoolAllocator(const TPoolAllocator &); // dont allow default copy constructor
+ bool mLocked;
+};
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
// with everyone using the same global allocator.
//
-extern angle::PoolAllocator *GetGlobalPoolAllocator();
-extern void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator);
+extern TPoolAllocator *GetGlobalPoolAllocator();
+extern void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator);
//
// This STL compatible allocator is intended to be used as the allocator
@@ -100,7 +311,7 @@
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
- angle::PoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
+ TPoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
};
#endif // COMPILER_TRANSLATOR_POOLALLOC_H_
diff --git a/src/libANGLE/renderer/vulkan/RendererVk.cpp b/src/libANGLE/renderer/vulkan/RendererVk.cpp
index 8916ad1..912a0dd 100644
--- a/src/libANGLE/renderer/vulkan/RendererVk.cpp
+++ b/src/libANGLE/renderer/vulkan/RendererVk.cpp
@@ -458,54 +458,6 @@
// Initially dumping the command graphs is disabled.
constexpr bool kEnableCommandGraphDiagnostics = false;
-// Custom allocation functions
-VKAPI_ATTR void *VKAPI_CALL PoolAllocationFunction(void *pUserData,
- size_t size,
- size_t alignment,
- VkSystemAllocationScope allocationScope)
-{
- angle::PoolAllocator *poolAllocator = static_cast<angle::PoolAllocator *>(pUserData);
-
- ASSERT((angle::PoolAllocator::kDefaultAlignment % alignment) == 0);
- return poolAllocator->allocate(size);
-}
-
-VKAPI_ATTR void *VKAPI_CALL PoolReallocationFunction(void *pUserData,
- void *pOriginal,
- size_t size,
- size_t alignment,
- VkSystemAllocationScope allocationScope)
-{
- angle::PoolAllocator *poolAllocator = static_cast<angle::PoolAllocator *>(pUserData);
- return poolAllocator->reallocate(pOriginal, size);
-}
-
-VKAPI_ATTR void VKAPI_CALL PoolFreeFunction(void *pUserData, void *pMemory) {}
-
-VKAPI_ATTR void VKAPI_CALL
-PoolInternalAllocationNotification(void *pUserData,
- size_t size,
- VkInternalAllocationType allocationType,
- VkSystemAllocationScope allocationScope)
-{}
-
-VKAPI_ATTR void VKAPI_CALL PoolInternalFreeNotification(void *pUserData,
- size_t size,
- VkInternalAllocationType allocationType,
- VkSystemAllocationScope allocationScope)
-{}
-
-void InitPoolAllocationCallbacks(angle::PoolAllocator *poolAllocator,
- VkAllocationCallbacks *allocationCallbacks)
-{
- allocationCallbacks->pUserData = static_cast<void *>(poolAllocator);
- allocationCallbacks->pfnAllocation = &PoolAllocationFunction;
- allocationCallbacks->pfnReallocation = &PoolReallocationFunction;
- allocationCallbacks->pfnFree = &PoolFreeFunction;
- allocationCallbacks->pfnInternalAllocation = &PoolInternalAllocationNotification;
- allocationCallbacks->pfnInternalFree = &PoolInternalFreeNotification;
-}
-
} // anonymous namespace
// CommandBatch implementation.
@@ -514,25 +466,20 @@
RendererVk::CommandBatch::~CommandBatch() = default;
RendererVk::CommandBatch::CommandBatch(CommandBatch &&other)
- : commandPool(std::move(other.commandPool)),
- poolAllocator(std::move(other.poolAllocator)),
- fence(std::move(other.fence)),
- serial(other.serial)
+ : commandPool(std::move(other.commandPool)), fence(std::move(other.fence)), serial(other.serial)
{}
RendererVk::CommandBatch &RendererVk::CommandBatch::operator=(CommandBatch &&other)
{
std::swap(commandPool, other.commandPool);
- std::swap(poolAllocator, other.poolAllocator);
std::swap(fence, other.fence);
std::swap(serial, other.serial);
return *this;
}
-void RendererVk::CommandBatch::destroy(VkDevice device,
- const VkAllocationCallbacks *allocationCallbacks)
+void RendererVk::CommandBatch::destroy(VkDevice device)
{
- commandPool.destroy(device, allocationCallbacks);
+ commandPool.destroy(device);
fence.destroy(device);
}
@@ -588,7 +535,7 @@
if (mCommandPool.valid())
{
- mCommandPool.destroy(mDevice, &mAllocationCallbacks);
+ mCommandPool.destroy(mDevice);
}
if (mDevice)
@@ -930,14 +877,13 @@
vkGetDeviceQueue(mDevice, mCurrentQueueFamilyIndex, 0, &mQueue);
- InitPoolAllocationCallbacks(&mPoolAllocator, &mAllocationCallbacks);
// Initialize the command pool now that we know the queue family index.
VkCommandPoolCreateInfo commandPoolInfo = {};
commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
commandPoolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex;
- ANGLE_VK_TRY(displayVk, mCommandPool.init(mDevice, commandPoolInfo, &mAllocationCallbacks));
+ ANGLE_VK_TRY(displayVk, mCommandPool.init(mDevice, commandPoolInfo));
// Initialize the vulkan pipeline cache.
ANGLE_TRY(initPipelineCache(displayVk));
@@ -1290,7 +1236,7 @@
ASSERT(status == VK_SUCCESS || status == VK_ERROR_DEVICE_LOST);
}
batch.fence.destroy(mDevice);
- batch.commandPool.destroy(mDevice, &mAllocationCallbacks);
+ batch.commandPool.destroy(mDevice);
}
mInFlightCommands.clear();
@@ -1320,7 +1266,7 @@
mLastCompletedQueueSerial = batch.serial;
batch.fence.destroy(mDevice);
- batch.commandPool.destroy(mDevice, &mAllocationCallbacks);
+ batch.commandPool.destroy(mDevice);
++finishedCount;
}
@@ -1351,7 +1297,7 @@
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = 0;
- vk::ScopedCustomAllocation<CommandBatch> scopedBatch(mDevice, &mAllocationCallbacks);
+ vk::Scoped<CommandBatch> scopedBatch(mDevice);
CommandBatch &batch = scopedBatch.get();
ANGLE_VK_TRY(context, batch.fence.init(mDevice, fenceInfo));
@@ -1359,7 +1305,6 @@
// Store this command buffer in the in-flight list.
batch.commandPool = std::move(mCommandPool);
- batch.poolAllocator = std::move(mPoolAllocator);
batch.serial = mCurrentQueueSerial;
mInFlightCommands.emplace_back(scopedBatch.release());
@@ -1384,13 +1329,12 @@
// Reallocate the command pool for next frame.
// TODO(jmadill): Consider reusing command pools.
- InitPoolAllocationCallbacks(&mPoolAllocator, &mAllocationCallbacks);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
poolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex;
- ANGLE_VK_TRY(context, mCommandPool.init(mDevice, poolInfo, &mAllocationCallbacks));
+ ANGLE_VK_TRY(context, mCommandPool.init(mDevice, poolInfo));
return angle::Result::Continue;
}
@@ -1733,7 +1677,7 @@
//
// Post-submission work Begin execution
//
- // ???? Write timestamp Tgpu
+ // ???? Write timstamp Tgpu
//
// ???? End execution
//
diff --git a/src/libANGLE/renderer/vulkan/RendererVk.h b/src/libANGLE/renderer/vulkan/RendererVk.h
index 73af881..8bd247e 100644
--- a/src/libANGLE/renderer/vulkan/RendererVk.h
+++ b/src/libANGLE/renderer/vulkan/RendererVk.h
@@ -13,7 +13,6 @@
#include <vulkan/vulkan.h>
#include <memory>
-#include "common/PoolAlloc.h"
#include "common/angleutils.h"
#include "libANGLE/BlobCache.h"
#include "libANGLE/Caps.h"
@@ -256,8 +255,6 @@
uint32_t mCurrentQueueFamilyIndex;
VkDevice mDevice;
vk::CommandPool mCommandPool;
- angle::PoolAllocator mPoolAllocator;
- VkAllocationCallbacks mAllocationCallbacks;
SerialFactory mQueueSerialFactory;
SerialFactory mShaderSerialFactory;
Serial mLastCompletedQueueSerial;
@@ -273,10 +270,9 @@
CommandBatch(CommandBatch &&other);
CommandBatch &operator=(CommandBatch &&other);
- void destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks);
+ void destroy(VkDevice device);
vk::CommandPool commandPool;
- angle::PoolAllocator poolAllocator;
vk::Fence fence;
Serial serial;
};
diff --git a/src/libANGLE/renderer/vulkan/vk_utils.cpp b/src/libANGLE/renderer/vulkan/vk_utils.cpp
index 4b6e832..be20455 100644
--- a/src/libANGLE/renderer/vulkan/vk_utils.cpp
+++ b/src/libANGLE/renderer/vulkan/vk_utils.cpp
@@ -270,21 +270,19 @@
// CommandPool implementation.
CommandPool::CommandPool() {}
-void CommandPool::destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks)
+void CommandPool::destroy(VkDevice device)
{
if (valid())
{
- vkDestroyCommandPool(device, mHandle, allocationCallbacks);
+ vkDestroyCommandPool(device, mHandle, nullptr);
mHandle = VK_NULL_HANDLE;
}
}
-VkResult CommandPool::init(VkDevice device,
- const VkCommandPoolCreateInfo &createInfo,
- const VkAllocationCallbacks *allocationCallbacks)
+VkResult CommandPool::init(VkDevice device, const VkCommandPoolCreateInfo &createInfo)
{
ASSERT(!valid());
- return vkCreateCommandPool(device, &createInfo, allocationCallbacks, &mHandle);
+ return vkCreateCommandPool(device, &createInfo, nullptr, &mHandle);
}
// CommandBuffer implementation.
diff --git a/src/libANGLE/renderer/vulkan/vk_utils.h b/src/libANGLE/renderer/vulkan/vk_utils.h
index 7c3b8e7..027e3eb 100644
--- a/src/libANGLE/renderer/vulkan/vk_utils.h
+++ b/src/libANGLE/renderer/vulkan/vk_utils.h
@@ -290,11 +290,9 @@
public:
CommandPool();
- void destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks);
+ void destroy(VkDevice device);
- VkResult init(VkDevice device,
- const VkCommandPoolCreateInfo &createInfo,
- const VkAllocationCallbacks *allocationCallbacks);
+ VkResult init(VkDevice device, const VkCommandPoolCreateInfo &createInfo);
};
class Pipeline final : public WrappedObject<Pipeline, VkPipeline>
@@ -319,7 +317,7 @@
VkCommandBuffer releaseHandle();
- // This is used for normal pool allocated command buffers. It resets the handle.
+ // This is used for normal pool allocated command buffers. It reset the handle.
void destroy(VkDevice device);
// This is used in conjunction with VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT.
@@ -793,28 +791,6 @@
T mVar;
};
-// Helper class to handle RAII patterns for initialization. Requires that T have a destroy method
-// that takes a VkDevice & VkAllocationCallbacks ptr and returns void.
-template <typename T>
-class ScopedCustomAllocation final : angle::NonCopyable
-{
- public:
- ScopedCustomAllocation(VkDevice device, const VkAllocationCallbacks *allocationCBs)
- : mDevice(device), mAllocationCallbacks(allocationCBs)
- {}
- ~ScopedCustomAllocation() { mVar.destroy(mDevice, mAllocationCallbacks); }
-
- const T &get() const { return mVar; }
- T &get() { return mVar; }
-
- T &&release() { return std::move(mVar); }
-
- private:
- VkDevice mDevice;
- const VkAllocationCallbacks *mAllocationCallbacks;
- T mVar;
-};
-
// This is a very simple RefCount class that has no autoreleasing. Used in the descriptor set and
// pipeline layout caches.
template <typename T>
diff --git a/src/libGLESv2.gni b/src/libGLESv2.gni
index 0b99124..cc6676a 100644
--- a/src/libGLESv2.gni
+++ b/src/libGLESv2.gni
@@ -17,8 +17,6 @@
"src/common/PackedEnums.h",
"src/common/PackedGLEnums_autogen.cpp",
"src/common/PackedGLEnums_autogen.h",
- "src/common/PoolAlloc.cpp",
- "src/common/PoolAlloc.h",
"src/common/aligned_memory.cpp",
"src/common/aligned_memory.h",
"src/common/angleutils.cpp",
diff --git a/src/tests/compiler_tests/ImmutableString_test.cpp b/src/tests/compiler_tests/ImmutableString_test.cpp
index b17cc19..0e5ad3a 100644
--- a/src/tests/compiler_tests/ImmutableString_test.cpp
+++ b/src/tests/compiler_tests/ImmutableString_test.cpp
@@ -30,7 +30,7 @@
allocator.pop();
}
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
};
// Test writing a 32-bit signed int as hexadecimal using ImmutableStringBuilder.
diff --git a/src/tests/compiler_tests/IntermNode_test.cpp b/src/tests/compiler_tests/IntermNode_test.cpp
index 71efdff..b3af0e8 100644
--- a/src/tests/compiler_tests/IntermNode_test.cpp
+++ b/src/tests/compiler_tests/IntermNode_test.cpp
@@ -131,7 +131,7 @@
}
private:
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
int mUniqueIndex;
};
diff --git a/src/tests/compiler_tests/RemovePow_test.cpp b/src/tests/compiler_tests/RemovePow_test.cpp
index ecce96e..97299ff 100644
--- a/src/tests/compiler_tests/RemovePow_test.cpp
+++ b/src/tests/compiler_tests/RemovePow_test.cpp
@@ -61,7 +61,7 @@
sh::TranslatorGLSL *mTranslatorGLSL;
TIntermNode *mASTRoot;
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
};
// Check if there's a pow() node anywhere in the tree.
diff --git a/src/tests/compiler_tests/Type_test.cpp b/src/tests/compiler_tests/Type_test.cpp
index e09fd11..78ea3c8 100644
--- a/src/tests/compiler_tests/Type_test.cpp
+++ b/src/tests/compiler_tests/Type_test.cpp
@@ -19,7 +19,7 @@
// Verify that mangled name matches between a vector/matrix TType and a corresponding StaticType.
TEST(Type, VectorAndMatrixMangledNameConsistent)
{
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
allocator.push();
SetGlobalPoolAllocator(&allocator);
diff --git a/src/tests/perf_tests/CompilerPerf.cpp b/src/tests/perf_tests/CompilerPerf.cpp
index 301bcde..77bc91a 100644
--- a/src/tests/perf_tests/CompilerPerf.cpp
+++ b/src/tests/perf_tests/CompilerPerf.cpp
@@ -197,7 +197,7 @@
case SH_HLSL_4_0_FL9_3_OUTPUT:
case SH_HLSL_3_0_OUTPUT:
{
- angle::PoolAllocator allocator;
+ TPoolAllocator allocator;
InitializePoolIndex();
allocator.push();
SetGlobalPoolAllocator(&allocator);
@@ -259,7 +259,7 @@
const char *mTestShader;
ShBuiltInResources mResources;
- angle::PoolAllocator mAllocator;
+ TPoolAllocator mAllocator;
sh::TCompiler *mTranslator;
};
diff --git a/src/tests/test_utils/ShaderCompileTreeTest.h b/src/tests/test_utils/ShaderCompileTreeTest.h
index 9a2cd31..a821351 100644
--- a/src/tests/test_utils/ShaderCompileTreeTest.h
+++ b/src/tests/test_utils/ShaderCompileTreeTest.h
@@ -53,7 +53,7 @@
private:
TranslatorESSL *mTranslator;
- angle::PoolAllocator mAllocator;
+ TPoolAllocator mAllocator;
};
// Returns true if the node is some kind of a zero node - either constructor or a constant union