Fixes 282230 group allocator for small fixed size, use it for MC_Chunk/SEc vbit
* new files include/pub_tool_groupalloc.h and coregrind/m_groupalloc.c
implementing a group allocator (based on helgrind group alloc).
* include/Makefile.am coregrind/Makefile.am : added pub_tool_groupalloc.h
and m_groupalloc.c
* helgrind/libhb_core.c : use pub_tool_groupalloc.h/m_groupalloc.c
instead of the local implementation.
* include/pub_tool_oset.h coregrind/m_oset.c : new function
allowing to create an oset that will use a pool allocator.
new function allowing to clone an oset (so as to share the pool alloc)
* memcheck/tests/unit_oset.c drd/tests/unit_bitmap.c : modified
so that it compiles with the new m_oset.c
* memcheck/mc_main.c : use group alloc for MC_Chunk
memcheck/mc_include.h : declare the MC_Chunk group alloc
* memcheck/mc_main.c : use group alloc for the nodes of the secVBitTable OSet
* include/pub_tool_hashtable.h coregrind/m_hashtable.c : pass the free node
function in the VG_(HT_destruct).
(needed as the hashtable user can allocate a node with its own alloc,
the hash table destroy must be able to free the nodes with the user
own free).
* coregrind/m_gdbserver/m_gdbserver.c : pass free function to VG_(HT_destruct)
* memcheck/mc_replace_strmem.c memcheck/mc_machine.c
memcheck/mc_malloc_wrappers.c memcheck/mc_leakcheck.c
memcheck/mc_errors.c memcheck/mc_translate.c : new include needed
due to group alloc.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12341 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/helgrind/libhb_core.c b/helgrind/libhb_core.c
index 9be904e..21acf36 100644
--- a/helgrind/libhb_core.c
+++ b/helgrind/libhb_core.c
@@ -31,6 +31,7 @@
*/
#include "pub_tool_basics.h"
+#include "pub_tool_poolalloc.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_libcbase.h"
#include "pub_tool_libcprint.h"
@@ -3814,108 +3815,6 @@
/////////////////////////////////////////////////////////
// //
-// A simple group (memory) allocator //
-// //
-/////////////////////////////////////////////////////////
-
-//////////////// BEGIN general group allocator
-typedef
- struct {
- UWord elemSzB; /* element size */
- UWord nPerGroup; /* # elems per group */
- void* (*alloc)(HChar*, SizeT); /* group allocator */
- HChar* cc; /* group allocator's cc */
- void (*free)(void*); /* group allocator's free-er (unused) */
- /* XArray of void* (pointers to groups). The groups themselves.
- Each element is a pointer to a block of size (elemSzB *
- nPerGroup) bytes. */
- XArray* groups;
- /* next free element. Is a pointer to an element in one of the
- groups pointed to by .groups. */
- void* nextFree;
- }
- GroupAlloc;
-
-static void init_GroupAlloc ( /*MOD*/GroupAlloc* ga,
- UWord elemSzB,
- UWord nPerGroup,
- void* (*alloc)(HChar*, SizeT),
- HChar* cc,
- void (*free)(void*) )
-{
- tl_assert(0 == (elemSzB % sizeof(UWord)));
- tl_assert(elemSzB >= sizeof(UWord));
- tl_assert(nPerGroup >= 100); /* let's say */
- tl_assert(alloc);
- tl_assert(cc);
- tl_assert(free);
- tl_assert(ga);
- VG_(memset)(ga, 0, sizeof(*ga));
- ga->elemSzB = elemSzB;
- ga->nPerGroup = nPerGroup;
- ga->groups = NULL;
- ga->alloc = alloc;
- ga->cc = cc;
- ga->free = free;
- ga->groups = VG_(newXA)( alloc, cc, free, sizeof(void*) );
- ga->nextFree = NULL;
- tl_assert(ga->groups);
-}
-
-/* The freelist is empty. Allocate a new group and put all the new
- elements in it onto the freelist. */
-__attribute__((noinline))
-static void gal_add_new_group ( GroupAlloc* ga )
-{
- Word i;
- UWord* group;
- tl_assert(ga);
- tl_assert(ga->nextFree == NULL);
- group = ga->alloc( ga->cc, ga->elemSzB * ga->nPerGroup );
- tl_assert(group);
- /* extend the freelist through the new group. Place the freelist
- pointer in the first word of each element. That's why the
- element size must be at least one word. */
- for (i = ga->nPerGroup-1; i >= 0; i--) {
- UChar* elemC = ((UChar*)group) + i * ga->elemSzB;
- UWord* elem = (UWord*)elemC;
- tl_assert(0 == (((UWord)elem) % sizeof(UWord)));
- *elem = (UWord)ga->nextFree;
- ga->nextFree = elem;
- }
- /* and add to our collection of groups */
- VG_(addToXA)( ga->groups, &group );
-}
-
-inline static void* gal_Alloc ( GroupAlloc* ga )
-{
- UWord* elem;
- if (UNLIKELY(ga->nextFree == NULL)) {
- gal_add_new_group(ga);
- }
- elem = ga->nextFree;
- ga->nextFree = (void*)*elem;
- *elem = 0; /* unnecessary, but just to be on the safe side */
- return elem;
-}
-
-inline static void* gal_Alloc_w_size_check ( GroupAlloc* ga, SizeT n )
-{
- tl_assert(n == ga->elemSzB);
- return gal_Alloc( ga );
-}
-
-inline static void gal_Free ( GroupAlloc* ga, void* p )
-{
- UWord* elem = (UWord*)p;
- *elem = (UWord)ga->nextFree;
- ga->nextFree = elem;
-}
-//////////////// END general group allocator
-
-
-/////////////////////////////////////////////////////////
-// //
// Change-event map2 //
// //
/////////////////////////////////////////////////////////
@@ -3962,7 +3861,7 @@
Investigations also suggest this is very workload and scheduling
sensitive. Therefore a dynamic sizing would be better.
- However, dynamic sizing would defeat the use of a GroupAllocator
+ However, dynamic sizing would defeat the use of a PoolAllocator
for OldRef structures. And that's important for performance. So
it's not straightforward to do.
*/
@@ -4039,18 +3938,18 @@
}
-//////////// BEGIN RCEC group allocator
-static GroupAlloc rcec_group_allocator;
+//////////// BEGIN RCEC pool allocator
+static PoolAlloc* rcec_pool_allocator;
static RCEC* alloc_RCEC ( void ) {
- return gal_Alloc ( &rcec_group_allocator );
+ return VG_(allocEltPA) ( rcec_pool_allocator );
}
static void free_RCEC ( RCEC* rcec ) {
tl_assert(rcec->magic == RCEC_MAGIC);
- gal_Free( &rcec_group_allocator, rcec );
+ VG_(freeEltPA)( rcec_pool_allocator, rcec );
}
-//////////// END RCEC group allocator
+//////////// END RCEC pool allocator
/* Find 'ec' in the RCEC list whose head pointer lives at 'headp' and
@@ -4203,18 +4102,18 @@
OldRef;
-//////////// BEGIN OldRef group allocator
-static GroupAlloc oldref_group_allocator;
+//////////// BEGIN OldRef pool allocator
+static PoolAlloc* oldref_pool_allocator;
static OldRef* alloc_OldRef ( void ) {
- return gal_Alloc ( &oldref_group_allocator );
+ return VG_(allocEltPA) ( oldref_pool_allocator );
}
static void free_OldRef ( OldRef* r ) {
tl_assert(r->magic == OldRef_MAGIC);
- gal_Free( &oldref_group_allocator, r );
+ VG_(freeEltPA)( oldref_pool_allocator, r );
}
-//////////// END OldRef group allocator
+//////////// END OldRef pool allocator
static SparseWA* oldrefTree = NULL; /* SparseWA* OldRef* */
@@ -4499,13 +4398,14 @@
{
Word i;
- /* Context (RCEC) group allocator */
- init_GroupAlloc ( &rcec_group_allocator,
- sizeof(RCEC),
- 1000 /* RCECs per group */,
- HG_(zalloc),
- "libhb.event_map_init.1 (RCEC groups)",
- HG_(free) );
+ /* Context (RCEC) pool allocator */
+ rcec_pool_allocator = VG_(newPA) (
+ sizeof(RCEC),
+ 1000 /* RCECs per pool */,
+ HG_(zalloc),
+ "libhb.event_map_init.1 (RCEC pools)",
+ HG_(free)
+ );
/* Context table */
tl_assert(!contextTab);
@@ -4515,13 +4415,14 @@
for (i = 0; i < N_RCEC_TAB; i++)
contextTab[i] = NULL;
- /* Oldref group allocator */
- init_GroupAlloc ( &oldref_group_allocator,
- sizeof(OldRef),
- 1000 /* OldRefs per group */,
- HG_(zalloc),
- "libhb.event_map_init.3 (OldRef groups)",
- HG_(free) );
+ /* Oldref pool allocator */
+ oldref_pool_allocator = VG_(newPA)(
+ sizeof(OldRef),
+ 1000 /* OldRefs per pool */,
+ HG_(zalloc),
+ "libhb.event_map_init.3 (OldRef pools)",
+ HG_(free)
+ );
/* Oldref tree */
tl_assert(!oldrefTree);