Restructure source tree.
diff --git a/jemalloc/src/jemalloc_arena.c b/jemalloc/src/arena.c
similarity index 99%
rename from jemalloc/src/jemalloc_arena.c
rename to jemalloc/src/arena.c
index c3dc382..53d703b 100644
--- a/jemalloc/src/jemalloc_arena.c
+++ b/jemalloc/src/arena.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_ARENA_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_base.c b/jemalloc/src/base.c
similarity index 97%
rename from jemalloc/src/jemalloc_base.c
rename to jemalloc/src/base.c
index 607f94a..605197e 100644
--- a/jemalloc/src/jemalloc_base.c
+++ b/jemalloc/src/base.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_BASE_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_chunk.c b/jemalloc/src/chunk.c
similarity index 98%
rename from jemalloc/src/jemalloc_chunk.c
rename to jemalloc/src/chunk.c
index 2554387..e6e3bcd 100644
--- a/jemalloc/src/jemalloc_chunk.c
+++ b/jemalloc/src/chunk.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_CHUNK_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_chunk_dss.c b/jemalloc/src/chunk_dss.c
similarity index 99%
rename from jemalloc/src/jemalloc_chunk_dss.c
rename to jemalloc/src/chunk_dss.c
index e38b876..d9bd63c 100644
--- a/jemalloc/src/jemalloc_chunk_dss.c
+++ b/jemalloc/src/chunk_dss.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_CHUNK_DSS_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 #ifdef JEMALLOC_DSS
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_chunk_mmap.c b/jemalloc/src/chunk_mmap.c
similarity index 98%
rename from jemalloc/src/jemalloc_chunk_mmap.c
rename to jemalloc/src/chunk_mmap.c
index d5702f2..4ab9f3d 100644
--- a/jemalloc/src/jemalloc_chunk_mmap.c
+++ b/jemalloc/src/chunk_mmap.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_CHUNK_MMAP_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_chunk_swap.c b/jemalloc/src/chunk_swap.c
similarity index 99%
rename from jemalloc/src/jemalloc_chunk_swap.c
rename to jemalloc/src/chunk_swap.c
index c185d43..679633c 100644
--- a/jemalloc/src/jemalloc_chunk_swap.c
+++ b/jemalloc/src/chunk_swap.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_CHUNK_SWAP_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 #ifdef JEMALLOC_SWAP
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/ckh.c b/jemalloc/src/ckh.c
index c299279..fd234a4 100644
--- a/jemalloc/src/ckh.c
+++ b/jemalloc/src/ckh.c
@@ -35,7 +35,7 @@
  *
  ******************************************************************************/
 #define	CKH_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Function prototypes for non-inline static functions. */
diff --git a/jemalloc/src/jemalloc_ctl.c b/jemalloc/src/ctl.c
similarity index 99%
rename from jemalloc/src/jemalloc_ctl.c
rename to jemalloc/src/ctl.c
index 9775f72..55ad2a7 100644
--- a/jemalloc/src/jemalloc_ctl.c
+++ b/jemalloc/src/ctl.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_CTL_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc_extent.c b/jemalloc/src/extent.c
similarity index 95%
rename from jemalloc/src/jemalloc_extent.c
rename to jemalloc/src/extent.c
index 1aa96a7..7c3ac7a 100644
--- a/jemalloc/src/jemalloc_extent.c
+++ b/jemalloc/src/extent.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_EXTENT_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 
diff --git a/jemalloc/src/hash.c b/jemalloc/src/hash.c
index 6110c7e..6a13d7a 100644
--- a/jemalloc/src/hash.c
+++ b/jemalloc/src/hash.c
@@ -1,2 +1,2 @@
 #define	HASH_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/jemalloc/src/jemalloc_huge.c b/jemalloc/src/huge.c
similarity index 98%
rename from jemalloc/src/jemalloc_huge.c
rename to jemalloc/src/huge.c
index 2936cdd..d35aa5c 100644
--- a/jemalloc/src/jemalloc_huge.c
+++ b/jemalloc/src/huge.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_HUGE_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/internal/ckh.h b/jemalloc/src/internal/ckh.h
deleted file mode 100644
index c39ea5c..0000000
--- a/jemalloc/src/internal/ckh.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ckh_s ckh_t;
-typedef struct ckhc_s ckhc_t;
-
-/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *);
-typedef bool ckh_keycomp_t (const void *, const void *);
-
-/* Maintain counters used to get an idea of performance. */
-/* #define	CKH_COUNT */
-/* Print counter values in ckh_delete() (requires CKH_COUNT). */
-/* #define	CKH_VERBOSE */
-
-/*
- * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit
- * one bucket per L1 cache line.
- */
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-/* Hash table cell. */
-struct ckhc_s {
-	const void	*key;
-	const void	*data;
-};
-
-struct ckh_s {
-#ifdef JEMALLOC_DEBUG
-#define	CKH_MAGIG	0x3af2489d
-	uint32_t	magic;
-#endif
-
-#ifdef CKH_COUNT
-	/* Counters used to get an idea of performance. */
-	uint64_t	ngrows;
-	uint64_t	nshrinks;
-	uint64_t	nshrinkfails;
-	uint64_t	ninserts;
-	uint64_t	nrelocs;
-#endif
-
-	/* Used for pseudo-random number generation. */
-#define	CKH_A		12345
-#define	CKH_C		12347
-	uint32_t	prn_state;
-
-	/* Total number of items. */
-	size_t		count;
-
-	/*
-	 * Minimum and current number of hash table buckets.  There are
-	 * 2^LG_CKH_BUCKET_CELLS cells per bucket.
-	 */
-	unsigned	lg_minbuckets;
-	unsigned	lg_curbuckets;
-
-	/* Hash and comparison functions. */
-	ckh_hash_t	*hash;
-	ckh_keycomp_t	*keycomp;
-
-	/* Hash table with 2^lg_curbuckets buckets. */
-	ckhc_t		*tab;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-bool	ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
-    ckh_keycomp_t *keycomp);
-void	ckh_delete(ckh_t *ckh);
-size_t	ckh_count(ckh_t *ckh);
-bool	ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool	ckh_insert(ckh_t *ckh, const void *key, const void *data);
-bool	ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
-    void **data);
-bool	ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
-void	ckh_string_hash(const void *key, unsigned minbits, size_t *hash1,
-    size_t *hash2);
-bool	ckh_string_keycomp(const void *k1, const void *k2);
-void	ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
-    size_t *hash2);
-bool	ckh_pointer_keycomp(const void *k1, const void *k2);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/hash.h b/jemalloc/src/internal/hash.h
deleted file mode 100644
index d12cdb8..0000000
--- a/jemalloc/src/internal/hash.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-uint64_t	hash(const void *key, size_t len, uint64_t seed);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(HASH_C_))
-/*
- * The following hash function is based on MurmurHash64A(), placed into the
- * public domain by Austin Appleby.  See http://murmurhash.googlepages.com/ for
- * details.
- */
-JEMALLOC_INLINE uint64_t
-hash(const void *key, size_t len, uint64_t seed)
-{
-	const uint64_t m = 0xc6a4a7935bd1e995;
-	const int r = 47;
-	uint64_t h = seed ^ (len * m);
-	const uint64_t *data = (const uint64_t *)key;
-	const uint64_t *end = data + (len/8);
-	const unsigned char *data2;
-
-	assert(((uintptr_t)key & 0x7) == 0);
-
-	while(data != end) {
-		uint64_t k = *data++;
-
-		k *= m;
-		k ^= k >> r;
-		k *= m;
-
-		h ^= k;
-		h *= m;
-	}
-
-	data2 = (const unsigned char *)data;
-	switch(len & 7) {
-		case 7: h ^= ((uint64_t)(data2[6])) << 48;
-		case 6: h ^= ((uint64_t)(data2[5])) << 40;
-		case 5: h ^= ((uint64_t)(data2[4])) << 32;
-		case 4: h ^= ((uint64_t)(data2[3])) << 24;
-		case 3: h ^= ((uint64_t)(data2[2])) << 16;
-		case 2: h ^= ((uint64_t)(data2[1])) << 8;
-		case 1: h ^= ((uint64_t)(data2[0]));
-			h *= m;
-	}
-
-	h ^= h >> r;
-	h *= m;
-	h ^= h >> r;
-
-	return h;
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_arena.h b/jemalloc/src/internal/jemalloc_arena.h
deleted file mode 100644
index 5596c17..0000000
--- a/jemalloc/src/internal/jemalloc_arena.h
+++ /dev/null
@@ -1,487 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * Subpages are an artificially designated partitioning of pages.  Their only
- * purpose is to support subpage-spaced size classes.
- *
- * There must be at least 4 subpages per page, due to the way size classes are
- * handled.
- */
-#define	LG_SUBPAGE		8
-#define	SUBPAGE			((size_t)(1U << LG_SUBPAGE))
-#define	SUBPAGE_MASK		(SUBPAGE - 1)
-
-/* Return the smallest subpage multiple that is >= s. */
-#define	SUBPAGE_CEILING(s)						\
-	(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
-
-#ifdef JEMALLOC_TINY
-   /* Smallest size class to support. */
-#  define LG_TINY_MIN		1
-#endif
-
-/*
- * Maximum size class that is a multiple of the quantum, but not (necessarily)
- * a power of 2.  Above this size, allocations are rounded up to the nearest
- * power of 2.
- */
-#define	LG_QSPACE_MAX_DEFAULT	7
-
-/*
- * Maximum size class that is a multiple of the cacheline, but not (necessarily)
- * a power of 2.  Above this size, allocations are rounded up to the nearest
- * power of 2.
- */
-#define	LG_CSPACE_MAX_DEFAULT	9
-
-/*
- * Maximum medium size class.  This must not be more than 1/4 of a chunk
- * (LG_MEDIUM_MAX_DEFAULT <= LG_CHUNK_DEFAULT - 2).
- */
-#define	LG_MEDIUM_MAX_DEFAULT	15
-
-/* Return the smallest medium size class that is >= s. */
-#define	MEDIUM_CEILING(s)						\
-	(((s) + mspace_mask) & ~mspace_mask)
-
-/*
- * Soft limit on the number of medium size classes.  Spacing between medium
- * size classes never exceeds pagesize, which can force more than NBINS_MAX
- * medium size classes.
- */
-#define	NMBINS_MAX	16
-
-/*
- * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
- * as small as possible such that this setting is still honored, without
- * violating other constraints.  The goal is to make runs as small as possible
- * without exceeding a per run external fragmentation threshold.
- *
- * We use binary fixed point math for overhead computations, where the binary
- * point is implicitly RUN_BFP bits to the left.
- *
- * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
- * honored for some/all object sizes, since there is one bit of header overhead
- * per object (plus a constant).  This constraint is relaxed (ignored) for runs
- * that are so small that the per-region overhead is greater than:
- *
- *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
- */
-#define	RUN_BFP			12
-/*                                    \/   Implicit binary fixed point. */
-#define	RUN_MAX_OVRHD		0x0000003dU
-#define	RUN_MAX_OVRHD_RELAX	0x00001800U
-
-/* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
-#define	RUN_MAX_SMALL							\
-	(arena_maxclass <= (1U << (CHUNK_MAP_LG_PG_RANGE + PAGE_SHIFT))	\
-	    ? arena_maxclass : (1U << (CHUNK_MAP_LG_PG_RANGE +		\
-	    PAGE_SHIFT)))
-
-/*
- * The minimum ratio of active:dirty pages per arena is computed as:
- *
- *   (nactive >> opt_lg_dirty_mult) >= ndirty
- *
- * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
- * times as many active pages as dirty pages.
- */
-#define	LG_DIRTY_MULT_DEFAULT	5
-
-typedef struct arena_chunk_map_s arena_chunk_map_t;
-typedef struct arena_chunk_s arena_chunk_t;
-typedef struct arena_run_s arena_run_t;
-typedef struct arena_bin_s arena_bin_t;
-typedef struct arena_s arena_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-/* Each element of the chunk map corresponds to one page within the chunk. */
-struct arena_chunk_map_s {
-	/*
-	 * Linkage for run trees.  There are two disjoint uses:
-	 *
-	 * 1) arena_t's runs_avail tree.
-	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
-	 *    runs, rather than directly embedding linkage.
-	 */
-	rb_node(arena_chunk_map_t)	link;
-
-#ifdef JEMALLOC_PROF
-	/* Profile counters, used for large object runs. */
-	prof_thr_cnt_t			*prof_cnt;
-#endif
-
-	/*
-	 * Run address (or size) and various flags are stored together.  The bit
-	 * layout looks like (assuming 32-bit system):
-	 *
-	 *   ???????? ???????? ????cccc ccccdzla
-	 *
-	 * ? : Unallocated: Run address for first/last pages, unset for internal
-	 *                  pages.
-	 *     Small/medium: Don't care.
-	 *     Large: Run size for first page, unset for trailing pages.
-	 * - : Unused.
-	 * c : refcount (could overflow for PAGE_SIZE >= 128 KiB)
-	 * d : dirty?
-	 * z : zeroed?
-	 * l : large?
-	 * a : allocated?
-	 *
-	 * Following are example bit patterns for the three types of runs.
-	 *
-	 * p : run page offset
-	 * s : run size
-	 * x : don't care
-	 * - : 0
-	 * [dzla] : bit set
-	 *
-	 *   Unallocated:
-	 *     ssssssss ssssssss ssss---- --------
-	 *     xxxxxxxx xxxxxxxx xxxx---- ----d---
-	 *     ssssssss ssssssss ssss---- -----z--
-	 *
-	 *   Small/medium:
-	 *     pppppppp ppppcccc cccccccc cccc---a
-	 *     pppppppp ppppcccc cccccccc cccc---a
-	 *     pppppppp ppppcccc cccccccc cccc---a
-	 *
-	 *   Large:
-	 *     ssssssss ssssssss ssss---- ------la
-	 *     -------- -------- -------- ------la
-	 *     -------- -------- -------- ------la
-	 */
-	size_t				bits;
-#define	CHUNK_MAP_PG_MASK	((size_t)0xfff00000U)
-#define	CHUNK_MAP_PG_SHIFT	20
-#define	CHUNK_MAP_LG_PG_RANGE	12
-
-#define	CHUNK_MAP_RC_MASK	((size_t)0xffff0U)
-#define	CHUNK_MAP_RC_ONE	((size_t)0x00010U)
-
-#define	CHUNK_MAP_FLAGS_MASK	((size_t)0xfU)
-#define	CHUNK_MAP_DIRTY		((size_t)0x8U)
-#define	CHUNK_MAP_ZEROED	((size_t)0x4U)
-#define	CHUNK_MAP_LARGE		((size_t)0x2U)
-#define	CHUNK_MAP_ALLOCATED	((size_t)0x1U)
-#define	CHUNK_MAP_KEY		(CHUNK_MAP_DIRTY | CHUNK_MAP_ALLOCATED)
-};
-typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
-typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
-
-/* Arena chunk header. */
-struct arena_chunk_s {
-	/* Arena that owns the chunk. */
-	arena_t		*arena;
-
-	/* Linkage for the arena's chunks_dirty tree. */
-	rb_node(arena_chunk_t) link_dirty;
-
-	/*
-	 * True if the chunk is currently in the chunks_dirty tree, due to
-	 * having at some point contained one or more dirty pages.  Removal
-	 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
-	 */
-	bool		dirtied;
-
-	/* Number of dirty pages. */
-	size_t		ndirty;
-
-	/* Map of pages within chunk that keeps track of free/large/small. */
-	arena_chunk_map_t map[1]; /* Dynamically sized. */
-};
-typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
-
-struct arena_run_s {
-#ifdef JEMALLOC_DEBUG
-	uint32_t	magic;
-#  define ARENA_RUN_MAGIC 0x384adf93
-#endif
-
-	/* Bin this run is associated with. */
-	arena_bin_t	*bin;
-
-	/* Index of first element that might have a free region. */
-	unsigned	regs_minelm;
-
-	/* Number of free regions in run. */
-	unsigned	nfree;
-
-	/* Bitmask of in-use regions (0: in use, 1: free). */
-	unsigned	regs_mask[1]; /* Dynamically sized. */
-};
-
-struct arena_bin_s {
-	/*
-	 * Current run being used to service allocations of this bin's size
-	 * class.
-	 */
-	arena_run_t	*runcur;
-
-	/*
-	 * Tree of non-full runs.  This tree is used when looking for an
-	 * existing run when runcur is no longer usable.  We choose the
-	 * non-full run that is lowest in memory; this policy tends to keep
-	 * objects packed well, and it can also help reduce the number of
-	 * almost-empty chunks.
-	 */
-	arena_run_tree_t runs;
-
-	/* Size of regions in a run for this bin's size class. */
-	size_t		reg_size;
-
-	/* Total size of a run for this bin's size class. */
-	size_t		run_size;
-
-	/* Total number of regions in a run for this bin's size class. */
-	uint32_t	nregs;
-
-	/* Number of elements in a run's regs_mask for this bin's size class. */
-	uint32_t	regs_mask_nelms;
-
-#ifdef JEMALLOC_PROF
-	/*
-	 * Offset of first (prof_cnt_t *) in a run header for this bin's size
-	 * class, or 0 if (opt_prof == false).
-	 */
-	uint32_t	cnt0_offset;
-#endif
-
-	/* Offset of first region in a run for this bin's size class. */
-	uint32_t	reg0_offset;
-
-#ifdef JEMALLOC_STATS
-	/* Bin statistics. */
-	malloc_bin_stats_t stats;
-#endif
-};
-
-struct arena_s {
-#ifdef JEMALLOC_DEBUG
-	uint32_t		magic;
-#  define ARENA_MAGIC 0x947d3d24
-#endif
-
-	/* This arena's index within the arenas array. */
-	unsigned		ind;
-
-	/* All operations on this arena require that lock be locked. */
-	malloc_mutex_t		lock;
-
-#ifdef JEMALLOC_STATS
-	arena_stats_t		stats;
-#  ifdef JEMALLOC_TCACHE
-	/*
-	 * List of tcaches for extant threads associated with this arena.
-	 * Stats from these are merged incrementally, and at exit.
-	 */
-	ql_head(tcache_t)	tcache_ql;
-#  endif
-#endif
-
-#ifdef JEMALLOC_PROF
-	uint64_t		prof_accumbytes;
-#endif
-
-	/* Tree of dirty-page-containing chunks this arena manages. */
-	arena_chunk_tree_t	chunks_dirty;
-
-	/*
-	 * In order to avoid rapid chunk allocation/deallocation when an arena
-	 * oscillates right on the cusp of needing a new chunk, cache the most
-	 * recently freed chunk.  The spare is left in the arena's chunk trees
-	 * until it is deleted.
-	 *
-	 * There is one spare chunk per arena, rather than one spare total, in
-	 * order to avoid interactions between multiple threads that could make
-	 * a single spare inadequate.
-	 */
-	arena_chunk_t		*spare;
-
-	/* Number of pages in active runs. */
-	size_t			nactive;
-
-	/*
-	 * Current count of pages within unused runs that are potentially
-	 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
-	 * By tracking this, we can institute a limit on how much dirty unused
-	 * memory is mapped for each arena.
-	 */
-	size_t			ndirty;
-
-	/*
-	 * Size/address-ordered tree of this arena's available runs.  This tree
-	 * is used for first-best-fit run allocation.
-	 */
-	arena_avail_tree_t	runs_avail;
-
-	/*
-	 * bins is used to store trees of free regions of the following sizes,
-	 * assuming a 16-byte quantum, 4 KiB page size, and default
-	 * JEMALLOC_OPTIONS.
-	 *
-	 *   bins[i] |   size |
-	 *   --------+--------+
-	 *        0  |      2 |
-	 *        1  |      4 |
-	 *        2  |      8 |
-	 *   --------+--------+
-	 *        3  |     16 |
-	 *        4  |     32 |
-	 *        5  |     48 |
-	 *           :        :
-	 *        8  |     96 |
-	 *        9  |    112 |
-	 *       10  |    128 |
-	 *   --------+--------+
-	 *       11  |    192 |
-	 *       12  |    256 |
-	 *       13  |    320 |
-	 *       14  |    384 |
-	 *       15  |    448 |
-	 *       16  |    512 |
-	 *   --------+--------+
-	 *       17  |    768 |
-	 *       18  |   1024 |
-	 *       19  |   1280 |
-	 *           :        :
-	 *       27  |   3328 |
-	 *       28  |   3584 |
-	 *       29  |   3840 |
-	 *   --------+--------+
-	 *       30  |  4 KiB |
-	 *       31  |  6 KiB |
-	 *       33  |  8 KiB |
-	 *           :        :
-	 *       43  | 28 KiB |
-	 *       44  | 30 KiB |
-	 *       45  | 32 KiB |
-	 *   --------+--------+
-	 */
-	arena_bin_t		bins[1]; /* Dynamically sized. */
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern size_t	opt_lg_qspace_max;
-extern size_t	opt_lg_cspace_max;
-extern size_t	opt_lg_medium_max;
-extern ssize_t		opt_lg_dirty_mult;
-extern uint8_t const	*small_size2bin;
-
-/* Various bin-related settings. */
-#ifdef JEMALLOC_TINY		/* Number of (2^n)-spaced tiny bins. */
-#  define		ntbins	((unsigned)(LG_QUANTUM - LG_TINY_MIN))
-#else
-#  define		ntbins	0
-#endif
-extern unsigned		nqbins; /* Number of quantum-spaced bins. */
-extern unsigned		ncbins; /* Number of cacheline-spaced bins. */
-extern unsigned		nsbins; /* Number of subpage-spaced bins. */
-extern unsigned		nmbins; /* Number of medium bins. */
-extern unsigned		nbins;
-extern unsigned		mbin0; /* mbin offset (nbins - nmbins). */
-#ifdef JEMALLOC_TINY
-#  define		tspace_max	((size_t)(QUANTUM >> 1))
-#endif
-#define			qspace_min	QUANTUM
-extern size_t		qspace_max;
-extern size_t		cspace_min;
-extern size_t		cspace_max;
-extern size_t		sspace_min;
-extern size_t		sspace_max;
-#define			small_maxclass	sspace_max
-#define			medium_min	PAGE_SIZE
-extern size_t		medium_max;
-#define			bin_maxclass	medium_max
-
-/* Spacing between medium size classes. */
-extern size_t		lg_mspace;
-extern size_t		mspace_mask;
-
-#define			nlclasses	((chunksize - PAGE_SIZE) >> PAGE_SHIFT)
-
-#ifdef JEMALLOC_TCACHE
-void	arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind
-#  ifdef JEMALLOC_PROF
-    , uint64_t prof_accumbytes
-#  endif
-    );
-#endif
-#ifdef JEMALLOC_PROF
-void	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-#endif
-void	*arena_malloc_small(arena_t *arena, size_t size, bool zero);
-void	*arena_malloc_medium(arena_t *arena, size_t size, bool zero);
-void	*arena_malloc(size_t size, bool zero);
-void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
-    size_t alloc_size);
-size_t	arena_salloc(const void *ptr);
-#ifdef JEMALLOC_PROF
-prof_thr_cnt_t	*arena_prof_cnt_get(const void *ptr);
-void	arena_prof_cnt_set(const void *ptr, prof_thr_cnt_t *cnt);
-#endif
-void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    arena_chunk_map_t *mapelm);
-void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-#ifdef JEMALLOC_STATS
-void	arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
-    arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats);
-#endif
-void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
-bool	arena_new(arena_t *arena, unsigned ind);
-bool	arena_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
-JEMALLOC_INLINE void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
-	size_t pageind;
-	arena_chunk_map_t *mapelm;
-
-	assert(arena != NULL);
-	assert(arena->magic == ARENA_MAGIC);
-	assert(chunk->arena == arena);
-	assert(ptr != NULL);
-	assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
-	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
-	mapelm = &chunk->map[pageind];
-	assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
-	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
-		/* Small allocation. */
-#ifdef JEMALLOC_TCACHE
-		tcache_t *tcache;
-
-		if ((tcache = tcache_get()) != NULL)
-			tcache_dalloc(tcache, ptr);
-		else {
-#endif
-			malloc_mutex_lock(&arena->lock);
-			arena_dalloc_bin(arena, chunk, ptr, mapelm);
-			malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_TCACHE
-		}
-#endif
-	} else
-		arena_dalloc_large(arena, chunk, ptr);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_base.h b/jemalloc/src/internal/jemalloc_base.h
deleted file mode 100644
index e353f30..0000000
--- a/jemalloc/src/internal/jemalloc_base.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern malloc_mutex_t	base_mtx;
-
-void	*base_alloc(size_t size);
-extent_node_t *base_node_alloc(void);
-void	base_node_dealloc(extent_node_t *node);
-bool	base_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_chunk.h b/jemalloc/src/internal/jemalloc_chunk.h
deleted file mode 100644
index 00b2e1d..0000000
--- a/jemalloc/src/internal/jemalloc_chunk.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * Size and alignment of memory chunks that are allocated by the OS's virtual
- * memory system.
- */
-#define	LG_CHUNK_DEFAULT	22
-
-/* Return the chunk address for allocation address a. */
-#define	CHUNK_ADDR2BASE(a)						\
-	((void *)((uintptr_t)(a) & ~chunksize_mask))
-
-/* Return the chunk offset of address a. */
-#define	CHUNK_ADDR2OFFSET(a)						\
-	((size_t)((uintptr_t)(a) & chunksize_mask))
-
-/* Return the smallest chunk multiple that is >= s. */
-#define	CHUNK_CEILING(s)						\
-	(((s) + chunksize_mask) & ~chunksize_mask)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern size_t		opt_lg_chunk;
-#ifdef JEMALLOC_SWAP
-extern bool		opt_overcommit;
-#endif
-
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-/* Protects stats_chunks; currently not used for any other purpose. */
-extern malloc_mutex_t	chunks_mtx;
-/* Chunk statistics. */
-extern chunk_stats_t	stats_chunks;
-#endif
-
-extern size_t		chunksize;
-extern size_t		chunksize_mask; /* (chunksize - 1). */
-extern size_t		chunk_npages;
-extern size_t		arena_chunk_header_npages;
-extern size_t		arena_maxclass; /* Max size class for arenas. */
-
-void	*chunk_alloc(size_t size, bool *zero);
-void	chunk_dealloc(void *chunk, size_t size);
-bool	chunk_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
-#include "internal/jemalloc_chunk_swap.h"
-#include "internal/jemalloc_chunk_dss.h"
-#include "internal/jemalloc_chunk_mmap.h"
diff --git a/jemalloc/src/internal/jemalloc_chunk_dss.h b/jemalloc/src/internal/jemalloc_chunk_dss.h
deleted file mode 100644
index 6be4ad1..0000000
--- a/jemalloc/src/internal/jemalloc_chunk_dss.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifdef JEMALLOC_DSS
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-/*
- * Protects sbrk() calls.  This avoids malloc races among threads, though it
- * does not protect against races with threads that call sbrk() directly.
- */
-extern malloc_mutex_t	dss_mtx;
-
-void	*chunk_alloc_dss(size_t size, bool *zero);
-bool	chunk_dealloc_dss(void *chunk, size_t size);
-bool	chunk_dss_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-#endif /* JEMALLOC_DSS */
diff --git a/jemalloc/src/internal/jemalloc_chunk_mmap.h b/jemalloc/src/internal/jemalloc_chunk_mmap.h
deleted file mode 100644
index 8fb90b7..0000000
--- a/jemalloc/src/internal/jemalloc_chunk_mmap.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void	*chunk_alloc_mmap(size_t size);
-void	chunk_dealloc_mmap(void *chunk, size_t size);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_chunk_swap.h b/jemalloc/src/internal/jemalloc_chunk_swap.h
deleted file mode 100644
index d50cb19..0000000
--- a/jemalloc/src/internal/jemalloc_chunk_swap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifdef JEMALLOC_SWAP
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern malloc_mutex_t	swap_mtx;
-extern bool		swap_enabled;
-extern bool		swap_prezeroed;
-extern size_t		swap_nfds;
-extern int		*swap_fds;
-#ifdef JEMALLOC_STATS
-extern size_t		swap_avail;
-#endif
-
-void	*chunk_alloc_swap(size_t size, bool *zero);
-bool	chunk_dealloc_swap(void *chunk, size_t size);
-bool	chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
-bool	chunk_swap_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-#endif /* JEMALLOC_SWAP */
diff --git a/jemalloc/src/internal/jemalloc_ctl.h b/jemalloc/src/internal/jemalloc_ctl.h
deleted file mode 100644
index 64a620a..0000000
--- a/jemalloc/src/internal/jemalloc_ctl.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ctl_node_s ctl_node_t;
-typedef struct ctl_arena_stats_s ctl_arena_stats_t;
-typedef struct ctl_stats_s ctl_stats_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct ctl_node_s {
-	bool			named;
-	union {
-		struct {
-			const char	*name;
-			/* If (nchildren == 0), this is a terminal node. */
-			unsigned	nchildren;
-			const	ctl_node_t *children;
-		} named;
-		struct {
-			const ctl_node_t *(*index)(const size_t *, size_t,
-			    size_t);
-		} indexed;
-	} u;
-	int	(*ctl)(const size_t *, size_t, void *, size_t *, void *,
-	    size_t);
-};
-
-struct ctl_arena_stats_s {
-	bool			initialized;
-	size_t			pactive;
-	size_t			pdirty;
-#ifdef JEMALLOC_STATS
-	arena_stats_t		astats;
-	malloc_bin_stats_t	*bstats;	/* nbins elements. */
-	malloc_large_stats_t	*lstats;	/* nlclasses elements. */
-#endif
-};
-
-struct ctl_stats_s {
-#ifdef JEMALLOC_STATS
-	size_t			allocated;
-	size_t			active;
-	size_t			mapped;
-	struct {
-		size_t		current;	/* stats_chunks.curchunks */
-		uint64_t	total;		/* stats_chunks.nchunks */
-		size_t		high;		/* stats_chunks.highchunks */
-	} chunks;
-	struct {
-		size_t		allocated;	/* huge_allocated */
-		uint64_t	nmalloc;	/* huge_nmalloc */
-		uint64_t	ndalloc;	/* huge_ndalloc */
-	} huge;
-#endif
-	ctl_arena_stats_t	*arenas;	/* (narenas + 1) elements. */
-#ifdef JEMALLOC_SWAP
-	size_t			swap_avail;
-#endif
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-int	ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
-    size_t newlen);
-int	ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
-
-int	ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
-    void *newp, size_t newlen);
-bool	ctl_boot(void);
-
-#define	xmallctl(name, oldp, oldlenp, newp, newlen) do {		\
-	if (mallctl(name, oldp, oldlenp, newp, newlen) != 0) {		\
-		malloc_write4("<jemalloc>: Invalid xmallctl(\"", name,	\
-		    "\", ...) call\n", "");				\
-		abort();						\
-	}								\
-} while (0)
-
-#define	xmallctlnametomib(name, mibp, miblenp) do {			\
-	if (mallctlnametomib(name, mibp, miblenp) != 0) {		\
-		malloc_write4(						\
-		    "<jemalloc>: Invalid xmallctlnametomib(\"", name,	\
-		    "\", ...) call\n", "");				\
-		abort();						\
-	}								\
-} while (0)
-
-#define	xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do {	\
-	if (mallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen)	\
-	    != 0) {							\
-		malloc_write4(						\
-		    "<jemalloc>: Invalid xmallctlbymib() call\n", "",	\
-		    "", "");						\
-		abort();						\
-	}								\
-} while (0)
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/jemalloc/src/internal/jemalloc_extent.h b/jemalloc/src/internal/jemalloc_extent.h
deleted file mode 100644
index 33a4e9a..0000000
--- a/jemalloc/src/internal/jemalloc_extent.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct extent_node_s extent_node_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-/* Tree of extents. */
-struct extent_node_s {
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
-	/* Linkage for the size/address-ordered tree. */
-	rb_node(extent_node_t)	link_szad;
-#endif
-
-	/* Linkage for the address-ordered tree. */
-	rb_node(extent_node_t)	link_ad;
-
-#ifdef JEMALLOC_PROF
-	/* Profile counters, used for huge objects. */
-	prof_thr_cnt_t		*prof_cnt;
-#endif
-
-	/* Pointer to the extent that this tree node is responsible for. */
-	void			*addr;
-
-	/* Total region size. */
-	size_t			size;
-};
-typedef rb_tree(extent_node_t) extent_tree_t;
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
-rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
-#endif
-
-rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/jemalloc/src/internal/jemalloc_huge.h b/jemalloc/src/internal/jemalloc_huge.h
deleted file mode 100644
index 3cf32f7..0000000
--- a/jemalloc/src/internal/jemalloc_huge.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#ifdef JEMALLOC_STATS
-/* Huge allocation statistics. */
-extern uint64_t		huge_nmalloc;
-extern uint64_t		huge_ndalloc;
-extern size_t		huge_allocated;
-#endif
-
-/* Protects chunk-related data structures. */
-extern malloc_mutex_t	huge_mtx;
-
-void	*huge_malloc(size_t size, bool zero);
-void	*huge_palloc(size_t alignment, size_t size);
-void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
-void	huge_dalloc(void *ptr);
-size_t	huge_salloc(const void *ptr);
-#ifdef JEMALLOC_PROF
-prof_thr_cnt_t	*huge_prof_cnt_get(const void *ptr);
-void	huge_prof_cnt_set(const void *ptr, prof_thr_cnt_t *cnt);
-#endif
-bool	huge_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_internal.h.in b/jemalloc/src/internal/jemalloc_internal.h.in
deleted file mode 100644
index d35cdaf..0000000
--- a/jemalloc/src/internal/jemalloc_internal.h.in
+++ /dev/null
@@ -1,554 +0,0 @@
-#include <sys/mman.h>
-#include <sys/param.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <sys/uio.h>
-
-#include <errno.h>
-#include <limits.h>
-#ifndef SIZE_T_MAX
-#  define SIZE_T_MAX	SIZE_MAX
-#endif
-#include <pthread.h>
-#include <sched.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <string.h>
-#include <strings.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <pthread.h>
-
-#define	JEMALLOC_MANGLE
-#include "../jemalloc@install_suffix@.h"
-
-#ifdef JEMALLOC_LAZY_LOCK
-#include <dlfcn.h>
-#endif
-
-#include "internal/rb.h"
-#include "internal/qr.h"
-#include "internal/ql.h"
-
-extern void	(*JEMALLOC_P(malloc_message))(void *w4opaque, const char *p1,
-    const char *p2, const char *p3, const char *p4);
-
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifdef JEMALLOC_DEBUG
-#  define assert(e) do {						\
-	if (!(e)) {							\
-		char line_buf[UMAX2S_BUFSIZE];				\
-		malloc_write4("<jemalloc>: ", __FILE__, ":",		\
-		    umax2s(__LINE__, 10, line_buf));			\
-		malloc_write4(": Failed assertion: ", "\"", #e,		\
-		    "\"\n");						\
-		abort();						\
-	}								\
-} while (0)
-#else
-#define assert(e)
-#endif
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation.  In order to reduce the effect on
- * visual code flow, read the header files in multiple passes, with one of the
- * following cpp variables defined during each pass:
- *
- *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
- *                        types.
- *   JEMALLOC_H_STRUCTS : Data structures.
- *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- *   JEMALLOC_H_INLINES : Inline functions.
- */
-/******************************************************************************/
-#define JEMALLOC_H_TYPES
-
-#define	ZU(z)	((size_t)z)
-
-#ifndef __DECONST
-#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
-#endif
-
-#ifdef JEMALLOC_DEBUG
-   /* Disable inlining to make debugging easier. */
-#  define JEMALLOC_INLINE
-#  define inline
-#else
-#  define JEMALLOC_ENABLE_INLINE
-#  define JEMALLOC_INLINE static inline
-#endif
-
-/* Size of stack-allocated buffer passed to strerror_r(). */
-#define	STRERROR_BUF		64
-
-/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
-#ifdef __i386__
-#  define LG_QUANTUM		4
-#endif
-#ifdef __ia64__
-#  define LG_QUANTUM		4
-#endif
-#ifdef __alpha__
-#  define LG_QUANTUM		4
-#endif
-#ifdef __sparc64__
-#  define LG_QUANTUM		4
-#endif
-#if (defined(__amd64__) || defined(__x86_64__))
-#  define LG_QUANTUM		4
-#endif
-#ifdef __arm__
-#  define LG_QUANTUM		3
-#endif
-#ifdef __mips__
-#  define LG_QUANTUM		3
-#endif
-#ifdef __powerpc__
-#  define LG_QUANTUM		4
-#endif
-#ifdef __s390x__
-#  define LG_QUANTUM		4
-#endif
-
-#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
-#define	QUANTUM_MASK		(QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define	QUANTUM_CEILING(a)						\
-	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
-
-/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
-#if (!defined(PIC) && !defined(NO_TLS))
-#  define NO_TLS
-#endif
-
-/*
- * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- */
-#define	LG_CACHELINE		6
-#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
-#define	CACHELINE_MASK		(CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define	CACHELINE_CEILING(s)						\
-	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/*
- * Page size.  STATIC_PAGE_SHIFT is determined by the configure script.  If
- * DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
- * compile-time values are required for the purposes of defining data
- * structures.
- */
-#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
-#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
-
-#ifdef DYNAMIC_PAGE_SHIFT
-#  define PAGE_SHIFT	lg_pagesize
-#  define PAGE_SIZE	pagesize
-#  define PAGE_MASK	pagesize_mask
-#else
-#  define PAGE_SHIFT	STATIC_PAGE_SHIFT
-#  define PAGE_SIZE	STATIC_PAGE_SIZE
-#  define PAGE_MASK	STATIC_PAGE_MASK
-#endif
-
-/* Return the smallest pagesize multiple that is >= s. */
-#define	PAGE_CEILING(s)							\
-	(((s) + PAGE_MASK) & ~PAGE_MASK)
-
-#include "internal/prn.h"
-#include "internal/mb.h"
-#include "internal/ckh.h"
-#include "internal/jemalloc_stats.h"
-#include "internal/jemalloc_ctl.h"
-#include "internal/jemalloc_mutex.h"
-#include "internal/jemalloc_extent.h"
-#include "internal/jemalloc_arena.h"
-#include "internal/jemalloc_base.h"
-#include "internal/jemalloc_chunk.h"
-#include "internal/jemalloc_huge.h"
-#include "internal/jemalloc_tcache.h"
-#include "internal/hash.h"
-#include "internal/prof.h"
-
-#undef JEMALLOC_H_TYPES
-/******************************************************************************/
-#define JEMALLOC_H_STRUCTS
-
-#include "internal/prn.h"
-#include "internal/mb.h"
-#include "internal/ckh.h"
-#include "internal/jemalloc_stats.h"
-#include "internal/jemalloc_ctl.h"
-#include "internal/jemalloc_mutex.h"
-#include "internal/jemalloc_extent.h"
-#include "internal/jemalloc_arena.h"
-#include "internal/jemalloc_base.h"
-#include "internal/jemalloc_chunk.h"
-#include "internal/jemalloc_huge.h"
-#include "internal/jemalloc_tcache.h"
-#include "internal/hash.h"
-#include "internal/prof.h"
-
-#undef JEMALLOC_H_STRUCTS
-/******************************************************************************/
-#define JEMALLOC_H_EXTERNS
-
-extern bool	opt_abort;
-#ifdef JEMALLOC_FILL
-extern bool	opt_junk;
-#endif
-#ifdef JEMALLOC_SYSV
-extern bool	opt_sysv;
-#endif
-#ifdef JEMALLOC_XMALLOC
-extern bool	opt_xmalloc;
-#endif
-#ifdef JEMALLOC_FILL
-extern bool	opt_zero;
-#endif
-
-#ifdef DYNAMIC_PAGE_SHIFT
-extern size_t		pagesize;
-extern size_t		pagesize_mask;
-extern size_t		lg_pagesize;
-#endif
-
-/* Number of CPUs. */
-extern unsigned		ncpus;
-
-extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
-#ifndef NO_TLS
-/*
- * Map of pthread_self() --> arenas[???], used for selecting an arena to use
- * for allocations.
- */
-extern __thread arena_t	*arenas_map JEMALLOC_ATTR(tls_model("initial-exec"));
-#endif
-/*
- * Arenas that are used to service external requests.  Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t		**arenas;
-extern unsigned		narenas;
-
-arena_t	*arenas_extend(unsigned ind);
-#ifndef NO_TLS
-arena_t	*choose_arena_hard(void);
-#endif
-
-#include "internal/prn.h"
-#include "internal/mb.h"
-#include "internal/ckh.h"
-#include "internal/jemalloc_stats.h"
-#include "internal/jemalloc_ctl.h"
-#include "internal/jemalloc_mutex.h"
-#include "internal/jemalloc_extent.h"
-#include "internal/jemalloc_arena.h"
-#include "internal/jemalloc_base.h"
-#include "internal/jemalloc_chunk.h"
-#include "internal/jemalloc_huge.h"
-#include "internal/jemalloc_tcache.h"
-#include "internal/hash.h"
-#include "internal/prof.h"
-
-#undef JEMALLOC_H_EXTERNS
-/******************************************************************************/
-#define JEMALLOC_H_INLINES
-
-#include "internal/prn.h"
-#include "internal/mb.h"
-#include "internal/ckh.h"
-#include "internal/jemalloc_stats.h"
-#include "internal/jemalloc_ctl.h"
-#include "internal/jemalloc_mutex.h"
-#include "internal/jemalloc_extent.h"
-#include "internal/jemalloc_base.h"
-#include "internal/jemalloc_chunk.h"
-#include "internal/jemalloc_huge.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	malloc_write4(const char *p1, const char *p2, const char *p3,
-    const char *p4);
-arena_t	*choose_arena(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-/*
- * Wrapper around malloc_message() that avoids the need for
- * JEMALLOC_P(malloc_message)(...) throughout the code.
- */
-JEMALLOC_INLINE void
-malloc_write4(const char *p1, const char *p2, const char *p3, const char *p4)
-{
-
-	JEMALLOC_P(malloc_message)(NULL, p1, p2, p3, p4);
-}
-
-/*
- * Choose an arena based on a per-thread value (fast-path code, calls slow-path
- * code if necessary).
- */
-JEMALLOC_INLINE arena_t *
-choose_arena(void)
-{
-	arena_t *ret;
-
-	/*
-	 * We can only use TLS if this is a PIC library, since for the static
-	 * library version, libc's malloc is used by TLS allocation, which
-	 * introduces a bootstrapping issue.
-	 */
-#ifndef NO_TLS
-	ret = arenas_map;
-	if (ret == NULL) {
-		ret = choose_arena_hard();
-		assert(ret != NULL);
-	}
-#else
-	if (isthreaded && narenas > 1) {
-		unsigned long ind;
-
-		/*
-		 * Hash pthread_self() to one of the arenas.  There is a prime
-		 * number of arenas, so this has a reasonable chance of
-		 * working.  Even so, the hashing can be easily thwarted by
-		 * inconvenient pthread_self() values.  Without specific
-		 * knowledge of how pthread_self() calculates values, we can't
-		 * easily do much better than this.
-		 */
-		ind = (unsigned long) pthread_self() % narenas;
-
-		/*
-		 * Optimistially assume that arenas[ind] has been initialized.
-		 * At worst, we find out that some other thread has already
-		 * done so, after acquiring the lock in preparation.  Note that
-		 * this lazy locking also has the effect of lazily forcing
-		 * cache coherency; without the lock acquisition, there's no
-		 * guarantee that modification of arenas[ind] by another thread
-		 * would be seen on this CPU for an arbitrary amount of time.
-		 *
-		 * In general, this approach to modifying a synchronized value
-		 * isn't a good idea, but in this case we only ever modify the
-		 * value once, so things work out well.
-		 */
-		ret = arenas[ind];
-		if (ret == NULL) {
-			/*
-			 * Avoid races with another thread that may have already
-			 * initialized arenas[ind].
-			 */
-			malloc_mutex_lock(&arenas_lock);
-			if (arenas[ind] == NULL)
-				ret = arenas_extend((unsigned)ind);
-			else
-				ret = arenas[ind];
-			malloc_mutex_unlock(&arenas_lock);
-		}
-	} else
-		ret = arenas[0];
-#endif
-
-	assert(ret != NULL);
-	return (ret);
-}
-#endif
-
-#include "internal/jemalloc_tcache.h"
-#include "internal/jemalloc_arena.h"
-#include "internal/hash.h"
-#include "internal/prof.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	*imalloc(size_t size);
-void	*icalloc(size_t size);
-void	*ipalloc(size_t alignment, size_t size);
-size_t	isalloc(const void *ptr);
-void	*iralloc(void *ptr, size_t size);
-void	idalloc(void *ptr);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE void *
-imalloc(size_t size)
-{
-
-	assert(size != 0);
-
-	if (size <= arena_maxclass)
-		return (arena_malloc(size, false));
-	else
-		return (huge_malloc(size, false));
-}
-
-JEMALLOC_INLINE void *
-icalloc(size_t size)
-{
-
-	if (size <= arena_maxclass)
-		return (arena_malloc(size, true));
-	else
-		return (huge_malloc(size, true));
-}
-
-JEMALLOC_INLINE void *
-ipalloc(size_t alignment, size_t size)
-{
-	void *ret;
-	size_t ceil_size;
-
-	/*
-	 * Round size up to the nearest multiple of alignment.
-	 *
-	 * This done, we can take advantage of the fact that for each small
-	 * size class, every object is aligned at the smallest power of two
-	 * that is non-zero in the base two representation of the size.  For
-	 * example:
-	 *
-	 *   Size |   Base 2 | Minimum alignment
-	 *   -----+----------+------------------
-	 *     96 |  1100000 |  32
-	 *    144 | 10100000 |  32
-	 *    192 | 11000000 |  64
-	 *
-	 * Depending on runtime settings, it is possible that arena_malloc()
-	 * will further round up to a power of two, but that never causes
-	 * correctness issues.
-	 */
-	ceil_size = (size + (alignment - 1)) & (-alignment);
-	/*
-	 * (ceil_size < size) protects against the combination of maximal
-	 * alignment and size greater than maximal alignment.
-	 */
-	if (ceil_size < size) {
-		/* size_t overflow. */
-		return (NULL);
-	}
-
-	if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE
-	    && ceil_size <= arena_maxclass))
-		ret = arena_malloc(ceil_size, false);
-	else {
-		size_t run_size;
-
-		/*
-		 * We can't achieve subpage alignment, so round up alignment
-		 * permanently; it makes later calculations simpler.
-		 */
-		alignment = PAGE_CEILING(alignment);
-		ceil_size = PAGE_CEILING(size);
-		/*
-		 * (ceil_size < size) protects against very large sizes within
-		 * PAGE_SIZE of SIZE_T_MAX.
-		 *
-		 * (ceil_size + alignment < ceil_size) protects against the
-		 * combination of maximal alignment and ceil_size large enough
-		 * to cause overflow.  This is similar to the first overflow
-		 * check above, but it needs to be repeated due to the new
-		 * ceil_size value, which may now be *equal* to maximal
-		 * alignment, whereas before we only detected overflow if the
-		 * original size was *greater* than maximal alignment.
-		 */
-		if (ceil_size < size || ceil_size + alignment < ceil_size) {
-			/* size_t overflow. */
-			return (NULL);
-		}
-
-		/*
-		 * Calculate the size of the over-size run that arena_palloc()
-		 * would need to allocate in order to guarantee the alignment.
-		 */
-		if (ceil_size >= alignment)
-			run_size = ceil_size + alignment - PAGE_SIZE;
-		else {
-			/*
-			 * It is possible that (alignment << 1) will cause
-			 * overflow, but it doesn't matter because we also
-			 * subtract PAGE_SIZE, which in the case of overflow
-			 * leaves us with a very large run_size.  That causes
-			 * the first conditional below to fail, which means
-			 * that the bogus run_size value never gets used for
-			 * anything important.
-			 */
-			run_size = (alignment << 1) - PAGE_SIZE;
-		}
-
-		if (run_size <= arena_maxclass) {
-			ret = arena_palloc(choose_arena(), alignment, ceil_size,
-			    run_size);
-		} else if (alignment <= chunksize)
-			ret = huge_malloc(ceil_size, false);
-		else
-			ret = huge_palloc(alignment, ceil_size);
-	}
-
-	assert(((uintptr_t)ret & (alignment - 1)) == 0);
-	return (ret);
-}
-
-JEMALLOC_INLINE size_t
-isalloc(const void *ptr)
-{
-	size_t ret;
-	arena_chunk_t *chunk;
-
-	assert(ptr != NULL);
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr) {
-		/* Region. */
-		assert(chunk->arena->magic == ARENA_MAGIC);
-
-		ret = arena_salloc(ptr);
-	} else
-		ret = huge_salloc(ptr);
-
-	return (ret);
-}
-
-JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size)
-{
-	size_t oldsize;
-
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	oldsize = isalloc(ptr);
-
-	if (size <= arena_maxclass)
-		return (arena_ralloc(ptr, size, oldsize));
-	else
-		return (huge_ralloc(ptr, size, oldsize));
-}
-
-JEMALLOC_INLINE void
-idalloc(void *ptr)
-{
-	arena_chunk_t *chunk;
-
-	assert(ptr != NULL);
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr)
-		arena_dalloc(chunk->arena, chunk, ptr);
-	else
-		huge_dalloc(ptr);
-}
-#endif
-
-#undef JEMALLOC_H_INLINES
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_mutex.h b/jemalloc/src/internal/jemalloc_mutex.h
deleted file mode 100644
index 6761282..0000000
--- a/jemalloc/src/internal/jemalloc_mutex.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef pthread_mutex_t malloc_mutex_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#ifdef JEMALLOC_LAZY_LOCK
-extern bool isthreaded;
-#else
-#  define isthreaded true
-#endif
-
-bool	malloc_mutex_init(malloc_mutex_t *mutex);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	malloc_mutex_lock(malloc_mutex_t *mutex);
-void	malloc_mutex_unlock(malloc_mutex_t *mutex);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE void
-malloc_mutex_lock(malloc_mutex_t *mutex)
-{
-
-	if (isthreaded)
-		pthread_mutex_lock(mutex);
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_unlock(malloc_mutex_t *mutex)
-{
-
-	if (isthreaded)
-		pthread_mutex_unlock(mutex);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_stats.h b/jemalloc/src/internal/jemalloc_stats.h
deleted file mode 100644
index 36dc5fe..0000000
--- a/jemalloc/src/internal/jemalloc_stats.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#define	UMAX2S_BUFSIZE	65
-
-#ifdef JEMALLOC_STATS
-typedef struct tcache_bin_stats_s tcache_bin_stats_t;
-typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-typedef struct malloc_large_stats_s malloc_large_stats_t;
-typedef struct arena_stats_s arena_stats_t;
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-typedef struct chunk_stats_s chunk_stats_t;
-#endif
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#ifdef JEMALLOC_STATS
-
-#ifdef JEMALLOC_TCACHE
-struct tcache_bin_stats_s {
-	/*
-	 * Number of allocation requests that corresponded to the size of this
-	 * bin.
-	 */
-	uint64_t	nrequests;
-};
-#endif
-
-struct malloc_bin_stats_s {
-	/*
-	 * Number of allocation requests that corresponded to the size of this
-	 * bin.
-	 */
-	uint64_t	nrequests;
-
-#ifdef JEMALLOC_TCACHE
-	/* Number of tcache fills from this bin. */
-	uint64_t	nfills;
-
-	/* Number of tcache flushes to this bin. */
-	uint64_t	nflushes;
-#endif
-
-	/* Total number of runs created for this bin's size class. */
-	uint64_t	nruns;
-
-	/*
-	 * Total number of runs reused by extracting them from the runs tree for
-	 * this bin's size class.
-	 */
-	uint64_t	reruns;
-
-	/* High-water mark for this bin. */
-	size_t		highruns;
-
-	/* Current number of runs in this bin. */
-	size_t		curruns;
-};
-
-struct malloc_large_stats_s {
-	/*
-	 * Number of allocation requests that corresponded to this size class.
-	 */
-	uint64_t	nrequests;
-
-	/* High-water mark for this size class. */
-	size_t		highruns;
-
-	/* Current number of runs of this size class. */
-	size_t		curruns;
-};
-
-struct arena_stats_s {
-	/* Number of bytes currently mapped. */
-	size_t		mapped;
-
-	/*
-	 * Total number of purge sweeps, total number of madvise calls made,
-	 * and total pages purged in order to keep dirty unused memory under
-	 * control.
-	 */
-	uint64_t	npurge;
-	uint64_t	nmadvise;
-	uint64_t	purged;
-
-	/* Per-size-category statistics. */
-	size_t		allocated_small;
-	uint64_t	nmalloc_small;
-	uint64_t	ndalloc_small;
-
-	size_t		allocated_medium;
-	uint64_t	nmalloc_medium;
-	uint64_t	ndalloc_medium;
-
-	size_t		allocated_large;
-	uint64_t	nmalloc_large;
-	uint64_t	ndalloc_large;
-
-	/*
-	 * One element for each possible size class, including sizes that
-	 * overlap with bin size classes.  This is necessary because ipalloc()
-	 * sometimes has to use such large objects in order to assure proper
-	 * alignment.
-	 */
-	malloc_large_stats_t	*lstats;
-};
-#endif /* JEMALLOC_STATS */
-
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-struct chunk_stats_s {
-#  ifdef JEMALLOC_STATS
-	/* Number of chunks that were allocated. */
-	uint64_t	nchunks;
-#  endif
-
-	/* High-water mark for number of chunks allocated. */
-	size_t		highchunks;
-
-	/*
-	 * Current number of chunks allocated.  This value isn't maintained for
-	 * any other purpose, so keep track of it in order to be able to set
-	 * highchunks.
-	 */
-	size_t		curchunks;
-};
-#endif /* JEMALLOC_STATS */
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern bool	opt_stats_print;
-
-char	*umax2s(uintmax_t x, unsigned base, char *s);
-#ifdef JEMALLOC_STATS
-void malloc_cprintf(void (*write4)(void *, const char *, const char *,
-    const char *, const char *), void *w4opaque, const char *format, ...)
-    JEMALLOC_ATTR(format(printf, 3, 4));
-void	malloc_printf(const char *format, ...)
-    JEMALLOC_ATTR(format(printf, 1, 2));
-#endif
-void	stats_print(void (*write4)(void *, const char *, const char *,
-    const char *, const char *), void *w4opaque, const char *opts);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_STATS
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-#endif /* JEMALLOC_STATS */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/jemalloc_tcache.h b/jemalloc/src/internal/jemalloc_tcache.h
deleted file mode 100644
index b499f52..0000000
--- a/jemalloc/src/internal/jemalloc_tcache.h
+++ /dev/null
@@ -1,286 +0,0 @@
-#ifdef JEMALLOC_TCACHE
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct tcache_bin_s tcache_bin_t;
-typedef struct tcache_s tcache_t;
-
-/*
- * Default number of cache slots for each bin in the thread cache (0:
- * disabled).
- */
-#define LG_TCACHE_NSLOTS_DEFAULT	7
- /*
-  * (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
-  * events between full GC sweeps (-1: disabled).  Integer rounding may cause
-  * the actual number to be slightly higher, since GC is performed
-  * incrementally.
-  */
-#define LG_TCACHE_GC_SWEEP_DEFAULT	13
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct tcache_bin_s {
-#  ifdef JEMALLOC_STATS
-	tcache_bin_stats_t tstats;
-#  endif
-	unsigned	low_water;	/* Min # cached since last GC. */
-	unsigned	high_water;	/* Max # cached since last GC. */
-	unsigned	ncached;	/* # of cached objects. */
-	void		*slots[1];	/* Dynamically sized. */
-};
-
-struct tcache_s {
-#  ifdef JEMALLOC_STATS
-	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
-#  endif
-#  ifdef JEMALLOC_PROF
-	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
-#  endif
-	arena_t		*arena;		/* This thread's arena. */
-	unsigned	ev_cnt;		/* Event count since incremental GC. */
-	unsigned	next_gc_bin;	/* Next bin to GC. */
-	tcache_bin_t	*tbins[1];	/* Dynamically sized. */
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern size_t	opt_lg_tcache_nslots;
-extern ssize_t	opt_lg_tcache_gc_sweep;
-
-/* Map of thread-specific caches. */
-extern __thread tcache_t	*tcache_tls
-    JEMALLOC_ATTR(tls_model("initial-exec"));
-
-/*
- * Number of cache slots for each bin in the thread cache, or 0 if tcache is
- * disabled.
- */
-extern size_t			tcache_nslots;
-
-/* Number of tcache allocation/deallocation events between incremental GCs. */
-extern unsigned			tcache_gc_incr;
-
-void	tcache_bin_flush(tcache_bin_t *tbin, size_t binind, unsigned rem
-#ifdef JEMALLOC_PROF
-    , tcache_t *tcache
-#endif
-    );
-tcache_t *tcache_create(arena_t *arena);
-void	tcache_bin_destroy(tcache_t *tcache, tcache_bin_t *tbin,
-    unsigned binind);
-void	*tcache_alloc_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind);
-tcache_bin_t *tcache_bin_create(arena_t *arena);
-void	tcache_destroy(tcache_t *tcache);
-#ifdef JEMALLOC_STATS
-void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
-#endif
-void	tcache_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	tcache_event(tcache_t *tcache);
-tcache_t *tcache_get(void);
-void	*tcache_bin_alloc(tcache_bin_t *tbin);
-void	*tcache_alloc(tcache_t *tcache, size_t size, bool zero);
-void	tcache_dalloc(tcache_t *tcache, void *ptr);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
-JEMALLOC_INLINE tcache_t *
-tcache_get(void)
-{
-	tcache_t *tcache;
-
-	if (isthreaded == false || tcache_nslots == 0)
-		return (NULL);
-
-	tcache = tcache_tls;
-	if ((uintptr_t)tcache <= (uintptr_t)1) {
-		if (tcache == NULL) {
-			tcache = tcache_create(choose_arena());
-			if (tcache == NULL)
-				return (NULL);
-		} else
-			return (NULL);
-	}
-
-	return (tcache);
-}
-
-JEMALLOC_INLINE void
-tcache_event(tcache_t *tcache)
-{
-
-	if (tcache_gc_incr == 0)
-		return;
-
-	tcache->ev_cnt++;
-	assert(tcache->ev_cnt <= tcache_gc_incr);
-	if (tcache->ev_cnt >= tcache_gc_incr) {
-		size_t binind = tcache->next_gc_bin;
-		tcache_bin_t *tbin = tcache->tbins[binind];
-
-		if (tbin != NULL) {
-			if (tbin->high_water == 0) {
-				/*
-				 * This bin went completely unused for an
-				 * entire GC cycle, so throw away the tbin.
-				 */
-				assert(tbin->ncached == 0);
-				tcache_bin_destroy(tcache, tbin, binind);
-				tcache->tbins[binind] = NULL;
-			} else {
-				if (tbin->low_water > 0) {
-					/*
-					 * Flush (ceiling) half of the objects
-					 * below the low water mark.
-					 */
-					tcache_bin_flush(tbin, binind,
-					    tbin->ncached - (tbin->low_water >>
-					    1) - (tbin->low_water & 1)
-#ifdef JEMALLOC_PROF
-					    , tcache
-#endif
-					    );
-				}
-				tbin->low_water = tbin->ncached;
-				tbin->high_water = tbin->ncached;
-			}
-		}
-
-		tcache->next_gc_bin++;
-		if (tcache->next_gc_bin == nbins)
-			tcache->next_gc_bin = 0;
-		tcache->ev_cnt = 0;
-	}
-}
-
-JEMALLOC_INLINE void *
-tcache_bin_alloc(tcache_bin_t *tbin)
-{
-
-	if (tbin->ncached == 0)
-		return (NULL);
-	tbin->ncached--;
-	if (tbin->ncached < tbin->low_water)
-		tbin->low_water = tbin->ncached;
-	return (tbin->slots[tbin->ncached]);
-}
-
-JEMALLOC_INLINE void *
-tcache_alloc(tcache_t *tcache, size_t size, bool zero)
-{
-	void *ret;
-	tcache_bin_t *tbin;
-	size_t binind;
-
-	if (size <= small_maxclass)
-		binind = small_size2bin[size];
-	else {
-		binind = mbin0 + ((MEDIUM_CEILING(size) - medium_min) >>
-		    lg_mspace);
-	}
-	assert(binind < nbins);
-	tbin = tcache->tbins[binind];
-	if (tbin == NULL) {
-		tbin = tcache_bin_create(tcache->arena);
-		if (tbin == NULL)
-			return (NULL);
-		tcache->tbins[binind] = tbin;
-	}
-
-	ret = tcache_bin_alloc(tbin);
-	if (ret == NULL) {
-		ret = tcache_alloc_hard(tcache, tbin, binind);
-		if (ret == NULL)
-			return (NULL);
-	}
-
-	if (zero == false) {
-#ifdef JEMALLOC_FILL
-		if (opt_junk)
-			memset(ret, 0xa5, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-#endif
-	} else
-		memset(ret, 0, size);
-
-#ifdef JEMALLOC_STATS
-	tbin->tstats.nrequests++;
-#endif
-#ifdef JEMALLOC_PROF
-	tcache->prof_accumbytes += tcache->arena->bins[binind].reg_size;
-#endif
-	tcache_event(tcache);
-	return (ret);
-}
-
-JEMALLOC_INLINE void
-tcache_dalloc(tcache_t *tcache, void *ptr)
-{
-	arena_t *arena;
-	arena_chunk_t *chunk;
-	arena_run_t *run;
-	arena_bin_t *bin;
-	tcache_bin_t *tbin;
-	size_t pageind, binind;
-	arena_chunk_map_t *mapelm;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	arena = chunk->arena;
-	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
-	mapelm = &chunk->map[pageind];
-	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
-	    ((mapelm->bits & CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) <<
-	    PAGE_SHIFT));
-	assert(run->magic == ARENA_RUN_MAGIC);
-	bin = run->bin;
-	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
-	    sizeof(arena_bin_t);
-	assert(binind < nbins);
-
-#ifdef JEMALLOC_FILL
-	if (opt_junk)
-		memset(ptr, 0x5a, arena->bins[binind].reg_size);
-#endif
-
-	tbin = tcache->tbins[binind];
-	if (tbin == NULL) {
-		tbin = tcache_bin_create(choose_arena());
-		if (tbin == NULL) {
-			malloc_mutex_lock(&arena->lock);
-			arena_dalloc_bin(arena, chunk, ptr, mapelm);
-			malloc_mutex_unlock(&arena->lock);
-			return;
-		}
-		tcache->tbins[binind] = tbin;
-	}
-
-	if (tbin->ncached == tcache_nslots)
-		tcache_bin_flush(tbin, binind, (tcache_nslots >> 1)
-#ifdef JEMALLOC_PROF
-		    , tcache
-#endif
-		    );
-	assert(tbin->ncached < tcache_nslots);
-	tbin->slots[tbin->ncached] = ptr;
-	tbin->ncached++;
-	if (tbin->ncached > tbin->high_water)
-		tbin->high_water = tbin->ncached;
-
-	tcache_event(tcache);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-#endif /* JEMALLOC_TCACHE */
diff --git a/jemalloc/src/internal/mb.h b/jemalloc/src/internal/mb.h
deleted file mode 100644
index 1707aa9..0000000
--- a/jemalloc/src/internal/mb.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	mb_write(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MB_C_))
-#ifdef __i386__
-/*
- * According to the Intel Architecture Software Developer's Manual, current
- * processors execute instructions in order from the perspective of other
- * processors in a multiprocessor system, but 1) Intel reserves the right to
- * change that, and 2) the compiler's optimizer could re-order instructions if
- * there weren't some form of barrier.  Therefore, even if running on an
- * architecture that does not need memory barriers (everything through at least
- * i686), an "optimizer barrier" is necessary.
- */
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
-#  if 0
-	/* This is a true memory barrier. */
-	asm volatile ("pusha;"
-	    "xor  %%eax,%%eax;"
-	    "cpuid;"
-	    "popa;"
-	    : /* Outputs. */
-	    : /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-#else
-	/*
-	 * This is hopefully enough to keep the compiler from reordering
-	 * instructions around this one.
-	 */
-	asm volatile ("nop;"
-	    : /* Outputs. */
-	    : /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-#endif
-}
-#elif (defined(__amd64_) || defined(__x86_64__))
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
-	asm volatile ("sfence"
-	    : /* Outputs. */
-	    : /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-}
-#elif defined(__powerpc__)
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
-	asm volatile ("eieio"
-	    : /* Outputs. */
-	    : /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-}
-#elif defined(__sparc64__)
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
-	asm volatile ("membar #StoreStore"
-	    : /* Outputs. */
-	    : /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-}
-#else
-/*
- * This is much slower than a simple memory barrier, but the semantics of mutex
- * unlock make this work.
- */
-JEMALLOC_INLINE void
-mb_write(void)
-{
-	malloc_mutex_t mtx;
-
-	malloc_mutex_init(&mtx);
-	malloc_mutex_lock(&mtx);
-	malloc_mutex_unlock(&mtx);
-}
-#endif
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/prn.h b/jemalloc/src/internal/prn.h
deleted file mode 100644
index 502733c..0000000
--- a/jemalloc/src/internal/prn.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * Simple linear congruential pseudo-random number generator:
- *
- *   prn(y) = (a*x + c) % m
- *
- * where the following constants ensure maximal period:
- *
- *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
- *   c == Odd number (relatively prime to 2^n).
- *   m == 2^32
- *
- * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
- *
- * This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position.  For example. the lowest bit has a cycle of 2,
- * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
- * bits.
- *
- * Macro parameters:
- *   uint32_t r          : Result.
- *   unsigned lg_range   : (0..32], number of least significant bits to return.
- *   uint32_t state      : Seed value.
- *   const uint32_t a, c : See above discussion.
- */
-#define prn(r, lg_range, state, a, c) do {				\
-	assert(lg_range > 0);						\
-	assert(lg_range <= 32);						\
-									\
-	r = (state * (a)) + (c);					\
-	state = r;							\
-	r >>= (32 - lg_range);						\
-} while (false)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/jemalloc/src/internal/prof.h b/jemalloc/src/internal/prof.h
deleted file mode 100644
index 44e11cb..0000000
--- a/jemalloc/src/internal/prof.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifdef JEMALLOC_PROF
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_thr_cnt_s prof_thr_cnt_t;
-typedef struct prof_ctx_s prof_ctx_t;
-typedef struct prof_s prof_t;
-
-#define	LG_PROF_INTERVAL_DEFAULT	30
-
-/*
- * Hard limit on stack backtrace depth.  Note that the version of
- * prof_backtrace() that is based on __builtin_return_address() necessarily has
- * a hard-coded number of backtrace frame handlers, so increasing
- * LG_PROF_BT_MAX requires changing prof_backtrace().
- */
-#define	LG_PROF_BT_MAX		7
-#define	PROF_BT_MAX		(1U << LG_PROF_BT_MAX)
-
-/* Initial hash table size. */
-#define	PROF_CKH_MINITEMS	64
-
-/* Size of memory buffer to use when writing dump files. */
-#define	PROF_DUMP_BUF_SIZE	65536
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct prof_bt_s {
-	/* Backtrace, stored as len program counters. */
-	void			**vec;
-	unsigned		len;
-};
-
-#ifdef JEMALLOC_PROF_LIBGCC
-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
-typedef struct {
-	prof_bt_t *bt;
-	unsigned nignore;
-	unsigned max;
-} prof_unwind_data_t;
-#endif
-
-struct prof_cnt_s {
-	/*
-	 * Profiling counters.  An allocation/deallocation pair can operate on
-	 * different prof_thr_cnt_t objects that are linked into the same
-	 * prof_ctx_t sets_ql, so it is possible for the cur* counters to go
-	 * negative.  In principle it is possible for the *bytes counters to
-	 * overflow/underflow, but a general solution would require some form
-	 * of 128-bit counter solution; this implementation doesn't bother to
-	 * solve that problem.
-	 */
-	int64_t		curobjs;
-	int64_t		curbytes;
-	uint64_t	accumobjs;
-	uint64_t	accumbytes;
-};
-
-struct prof_thr_cnt_s {
-	/* Linkage into prof_ctx_t's sets_ql. */
-	ql_elm(prof_thr_cnt_t)	link;
-
-	/*
-	 * Associated context.  If a thread frees an object that it did not
-	 * allocate, it is possible that the context is not cached in the
-	 * thread's hash table, in which case it must be able to look up the
-	 * context, insert a new prof_thr_cnt_t into the thread's hash table,
-	 * and link it into the prof_ctx_t's sets_ql.
-	 */
-	prof_ctx_t		*ctx;
-
-	/*
-	 * Threads use memory barriers to update the counters.  Since there is
-	 * only ever one writer, the only challenge is for the reader to get a
-	 * consistent read of the counters.
-	 *
-	 * The writer uses this series of operations:
-	 *
-	 * 1) Increment epoch to an odd number.
-	 * 2) Update counters.
-	 * 3) Increment epoch to an even number.
-	 *
-	 * The reader must assure 1) that the epoch is even while it reads the
-	 * counters, and 2) that the epoch doesn't change between the time it
-	 * starts and finishes reading the counters.
-	 */
-	unsigned		epoch;
-
-	/* Profiling counters. */
-	prof_cnt_t		cnts;
-};
-
-struct prof_ctx_s {
-	/* Protects cnt_merged and sets_ql. */
-	malloc_mutex_t		lock;
-
-	/* Temporary storage for aggregation during dump. */
-	prof_cnt_t		cnt_dump;
-
-	/* When threads exit, they merge their stats into cnt_merged. */
-	prof_cnt_t		cnt_merged;
-
-	/*
-	 * List of profile counters, one for each thread that has allocated in
-	 * this context.
-	 */
-	ql_head(prof_thr_cnt_t)	cnts_ql;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern bool	opt_prof;
-extern size_t	opt_lg_prof_bt_max; /* Maximum backtrace depth. */
-extern size_t	opt_lg_prof_interval;
-extern bool	opt_prof_udump; /* High-water memory dumping. */
-extern bool	opt_prof_leak; /* Dump leak summary at exit. */
-
-/*
- * Profile dump interval, measured in bytes allocated.  Each arena triggers a
- * profile dump when it reaches this threshold.  The effect is that the
- * interval between profile dumps averages prof_interval, though the actual
- * interval between dumps will tend to be sporadic, and the interval will be a
- * maximum of approximately (prof_interval * narenas).
- */
-extern uint64_t	prof_interval;
-
-bool	prof_init(prof_t *prof, bool master);
-void	prof_destroy(prof_t *prof);
-
-prof_thr_cnt_t	*prof_alloc_prep(void);
-prof_thr_cnt_t	*prof_cnt_get(const void *ptr);
-void	prof_malloc(const void *ptr, prof_thr_cnt_t *cnt);
-void	prof_realloc(const void *ptr, prof_thr_cnt_t *cnt, const void *old_ptr,
-    size_t old_size, prof_thr_cnt_t *old_cnt);
-void	prof_free(const void *ptr);
-void	prof_idump(void);
-void	prof_mdump(void);
-void	prof_udump(void);
-void	prof_boot0(void);
-bool	prof_boot1(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-#endif /* JEMALLOC_PROF */
diff --git a/jemalloc/src/internal/ql.h b/jemalloc/src/internal/ql.h
deleted file mode 100644
index a9ed239..0000000
--- a/jemalloc/src/internal/ql.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * List definitions.
- */
-#define ql_head(a_type)							\
-struct {								\
-	a_type *qlh_first;						\
-}
-
-#define ql_head_initializer(a_head) {NULL}
-
-#define ql_elm(a_type)	qr(a_type)
-
-/* List functions. */
-#define ql_new(a_head) do {						\
-	(a_head)->qlh_first = NULL;					\
-} while (0)
-
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-
-#define ql_first(a_head) ((a_head)->qlh_first)
-
-#define ql_last(a_head, a_field)					\
-	((ql_first(a_head) != NULL)					\
-	    ? qr_prev(ql_first(a_head), a_field) : NULL)
-
-#define ql_next(a_head, a_elm, a_field)					\
-	((ql_last(a_head, a_field) != (a_elm))				\
-	    ? qr_next((a_elm), a_field)	: NULL)
-
-#define ql_prev(a_head, a_elm, a_field)					\
-	((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field)	\
-				       : NULL)
-
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do {		\
-	qr_before_insert((a_qlelm), (a_elm), a_field);			\
-	if (ql_first(a_head) == (a_qlelm)) {				\
-		ql_first(a_head) = (a_elm);				\
-	}								\
-} while (0)
-
-#define ql_after_insert(a_qlelm, a_elm, a_field)			\
-	qr_after_insert((a_qlelm), (a_elm), a_field)
-
-#define ql_head_insert(a_head, a_elm, a_field) do {			\
-	if (ql_first(a_head) != NULL) {					\
-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
-	}								\
-	ql_first(a_head) = (a_elm);					\
-} while (0)
-
-#define ql_tail_insert(a_head, a_elm, a_field) do {			\
-	if (ql_first(a_head) != NULL) {					\
-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
-	}								\
-	ql_first(a_head) = qr_next((a_elm), a_field);			\
-} while (0)
-
-#define ql_remove(a_head, a_elm, a_field) do {				\
-	if (ql_first(a_head) == (a_elm)) {				\
-		ql_first(a_head) = qr_next(ql_first(a_head), a_field);	\
-	}								\
-	if (ql_first(a_head) != (a_elm)) {				\
-		qr_remove((a_elm), a_field);				\
-	} else {							\
-		ql_first(a_head) = NULL;				\
-	}								\
-} while (0)
-
-#define ql_head_remove(a_head, a_type, a_field) do {			\
-	a_type *t = ql_first(a_head);					\
-	ql_remove((a_head), t, a_field);				\
-} while (0)
-
-#define ql_tail_remove(a_head, a_type, a_field) do {			\
-	a_type *t = ql_last(a_head, a_field);				\
-	ql_remove((a_head), t, a_field);				\
-} while (0)
-
-#define ql_foreach(a_var, a_head, a_field)				\
-	qr_foreach((a_var), ql_first(a_head), a_field)
-
-#define ql_reverse_foreach(a_var, a_head, a_field)			\
-	qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/jemalloc/src/internal/qr.h b/jemalloc/src/internal/qr.h
deleted file mode 100644
index fe22352..0000000
--- a/jemalloc/src/internal/qr.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Ring definitions. */
-#define qr(a_type)							\
-struct {								\
-	a_type	*qre_next;						\
-	a_type	*qre_prev;						\
-}
-
-/* Ring functions. */
-#define qr_new(a_qr, a_field) do {					\
-	(a_qr)->a_field.qre_next = (a_qr);				\
-	(a_qr)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-
-#define qr_before_insert(a_qrelm, a_qr, a_field) do {			\
-	(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev;		\
-	(a_qr)->a_field.qre_next = (a_qrelm);				\
-	(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr);		\
-	(a_qrelm)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define qr_after_insert(a_qrelm, a_qr, a_field)				\
-    do									\
-    {									\
-	(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next;		\
-	(a_qr)->a_field.qre_prev = (a_qrelm);				\
-	(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr);		\
-	(a_qrelm)->a_field.qre_next = (a_qr);				\
-    } while (0)
-
-#define qr_meld(a_qr_a, a_qr_b, a_field) do {				\
-	void *t;							\
-	(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b);	\
-	(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a);	\
-	t = (a_qr_a)->a_field.qre_prev;					\
-	(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev;	\
-	(a_qr_b)->a_field.qre_prev = t;					\
-} while (0)
-
-/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code. */
-#define qr_split(a_qr_a, a_qr_b, a_field)				\
-	qr_meld((a_qr_a), (a_qr_b), a_field)
-
-#define qr_remove(a_qr, a_field) do {					\
-	(a_qr)->a_field.qre_prev->a_field.qre_next			\
-	    = (a_qr)->a_field.qre_next;					\
-	(a_qr)->a_field.qre_next->a_field.qre_prev			\
-	    = (a_qr)->a_field.qre_prev;					\
-	(a_qr)->a_field.qre_next = (a_qr);				\
-	(a_qr)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define qr_foreach(var, a_qr, a_field)					\
-	for ((var) = (a_qr);						\
-	    (var) != NULL;						\
-	    (var) = (((var)->a_field.qre_next != (a_qr))		\
-	    ? (var)->a_field.qre_next : NULL))
-
-#define qr_reverse_foreach(var, a_qr, a_field)				\
-	for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL;	\
-	    (var) != NULL;						\
-	    (var) = (((var) != (a_qr))					\
-	    ? (var)->a_field.qre_prev : NULL))
diff --git a/jemalloc/src/internal/rb.h b/jemalloc/src/internal/rb.h
deleted file mode 100644
index 21d06ea..0000000
--- a/jemalloc/src/internal/rb.h
+++ /dev/null
@@ -1,945 +0,0 @@
-/*-
- * cpp macro implementation of left-leaning red-black trees.
- *
- * Usage:
- *
- *   (Optional, see assert(3).)
- *   #define NDEBUG
- *
- *   (Required.)
- *   #include <assert.h>
- *   #include <rb.h>
- *   ...
- *
- * All operations are done non-recursively.  Parent pointers are not used, and
- * color bits are stored in the least significant bit of right-child pointers,
- * thus making node linkage as compact as is possible for red-black trees.
- *
- * Some macros use a comparison function pointer, which is expected to have the
- * following prototype:
- *
- *   int (a_cmp *)(a_type *a_node, a_type *a_other);
- *                         ^^^^^^
- *                      or a_key
- *
- * Interpretation of comparision function return values:
- *
- *   -1 : a_node <  a_other
- *    0 : a_node == a_other
- *    1 : a_node >  a_other
- *
- * In all cases, the a_node or a_key macro argument is the first argument to the
- * comparison function, which makes it possible to write comparison functions
- * that treat the first argument specially.
- *
- ******************************************************************************/
-
-#ifndef RB_H_
-#define	RB_H_
-
-#if 0
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/rb.h,v 1.4 2008/05/14 18:33:13 jasone Exp $");
-#endif
-
-/* Node structure. */
-#define	rb_node(a_type)							\
-struct {								\
-    a_type *rbn_left;							\
-    a_type *rbn_right_red;						\
-}
-
-/* Root structure. */
-#define	rb_tree(a_type)							\
-struct {								\
-    a_type *rbt_root;							\
-    a_type rbt_nil;							\
-}
-
-/* Left accessors. */
-#define	rbp_left_get(a_type, a_field, a_node)				\
-    ((a_node)->a_field.rbn_left)
-#define	rbp_left_set(a_type, a_field, a_node, a_left) do {		\
-    (a_node)->a_field.rbn_left = a_left;				\
-} while (0)
-
-/* Right accessors. */
-#define	rbp_right_get(a_type, a_field, a_node)				\
-    ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red)		\
-      & ((ssize_t)-2)))
-#define	rbp_right_set(a_type, a_field, a_node, a_right) do {		\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right)	\
-      | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1)));	\
-} while (0)
-
-/* Color accessors. */
-#define	rbp_red_get(a_type, a_field, a_node)				\
-    ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red)		\
-      & ((size_t)1)))
-#define	rbp_color_set(a_type, a_field, a_node, a_red) do {		\
-    (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t)		\
-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2))			\
-      | ((ssize_t)a_red));						\
-} while (0)
-#define	rbp_red_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t)		\
-      (a_node)->a_field.rbn_right_red) | ((size_t)1));			\
-} while (0)
-#define	rbp_black_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)		\
-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));		\
-} while (0)
-
-/* Node initializer. */
-#define	rbp_node_new(a_type, a_field, a_tree, a_node) do {		\
-    rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil);	\
-    rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil);	\
-    rbp_red_set(a_type, a_field, (a_node));				\
-} while (0)
-
-/* Tree initializer. */
-#define	rb_new(a_type, a_field, a_tree) do {				\
-    (a_tree)->rbt_root = &(a_tree)->rbt_nil;				\
-    rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil);		\
-    rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil);			\
-} while (0)
-
-/* Tree operations. */
-#define	rbp_black_height(a_type, a_field, a_tree, r_height) do {	\
-    a_type *rbp_bh_t;							\
-    for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0;			\
-      rbp_bh_t != &(a_tree)->rbt_nil;					\
-      rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) {		\
-	if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) {		\
-	    (r_height)++;						\
-	}								\
-    }									\
-} while (0)
-
-#define	rbp_first(a_type, a_field, a_tree, a_root, r_node) do {		\
-    for ((r_node) = (a_root);						\
-      rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil;	\
-      (r_node) = rbp_left_get(a_type, a_field, (r_node))) {		\
-    }									\
-} while (0)
-
-#define	rbp_last(a_type, a_field, a_tree, a_root, r_node) do {		\
-    for ((r_node) = (a_root);						\
-      rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil;	\
-      (r_node) = rbp_right_get(a_type, a_field, (r_node))) {		\
-    }									\
-} while (0)
-
-#define	rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
-    if (rbp_right_get(a_type, a_field, (a_node))			\
-      != &(a_tree)->rbt_nil) {						\
-	rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type,	\
-	  a_field, (a_node)), (r_node));				\
-    } else {								\
-	a_type *rbp_n_t = (a_tree)->rbt_root;				\
-	assert(rbp_n_t != &(a_tree)->rbt_nil);				\
-	(r_node) = &(a_tree)->rbt_nil;					\
-	while (true) {							\
-	    int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t);			\
-	    if (rbp_n_cmp < 0) {					\
-		(r_node) = rbp_n_t;					\
-		rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t);	\
-	    } else if (rbp_n_cmp > 0) {					\
-		rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t);	\
-	    } else {							\
-		break;							\
-	    }								\
-	    assert(rbp_n_t != &(a_tree)->rbt_nil);			\
-	}								\
-    }									\
-} while (0)
-
-#define	rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
-    if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
-	rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type,		\
-	  a_field, (a_node)), (r_node));				\
-    } else {								\
-	a_type *rbp_p_t = (a_tree)->rbt_root;				\
-	assert(rbp_p_t != &(a_tree)->rbt_nil);				\
-	(r_node) = &(a_tree)->rbt_nil;					\
-	while (true) {							\
-	    int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t);			\
-	    if (rbp_p_cmp < 0) {					\
-		rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t);	\
-	    } else if (rbp_p_cmp > 0) {					\
-		(r_node) = rbp_p_t;					\
-		rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t);	\
-	    } else {							\
-		break;							\
-	    }								\
-	    assert(rbp_p_t != &(a_tree)->rbt_nil);			\
-	}								\
-    }									\
-} while (0)
-
-#define	rb_first(a_type, a_field, a_tree, r_node) do {			\
-    rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node));	\
-    if ((r_node) == &(a_tree)->rbt_nil) {				\
-	(r_node) = NULL;						\
-    }									\
-} while (0)
-
-#define	rb_last(a_type, a_field, a_tree, r_node) do {			\
-    rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node);	\
-    if ((r_node) == &(a_tree)->rbt_nil) {				\
-	(r_node) = NULL;						\
-    }									\
-} while (0)
-
-#define	rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
-    rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node));	\
-    if ((r_node) == &(a_tree)->rbt_nil) {				\
-	(r_node) = NULL;						\
-    }									\
-} while (0)
-
-#define	rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
-    rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node));	\
-    if ((r_node) == &(a_tree)->rbt_nil) {				\
-	(r_node) = NULL;						\
-    }									\
-} while (0)
-
-#define	rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {	\
-    int rbp_se_cmp;							\
-    (r_node) = (a_tree)->rbt_root;					\
-    while ((r_node) != &(a_tree)->rbt_nil				\
-      && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) {		\
-	if (rbp_se_cmp < 0) {						\
-	    (r_node) = rbp_left_get(a_type, a_field, (r_node));		\
-	} else {							\
-	    (r_node) = rbp_right_get(a_type, a_field, (r_node));	\
-	}								\
-    }									\
-    if ((r_node) == &(a_tree)->rbt_nil) {				\
-	(r_node) = NULL;						\
-    }									\
-} while (0)
-
-/*
- * Find a match if it exists.  Otherwise, find the next greater node, if one
- * exists.
- */
-#define	rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {	\
-    a_type *rbp_ns_t = (a_tree)->rbt_root;				\
-    (r_node) = NULL;							\
-    while (rbp_ns_t != &(a_tree)->rbt_nil) {				\
-	int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t);			\
-	if (rbp_ns_cmp < 0) {						\
-	    (r_node) = rbp_ns_t;					\
-	    rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t);		\
-	} else if (rbp_ns_cmp > 0) {					\
-	    rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t);	\
-	} else {							\
-	    (r_node) = rbp_ns_t;					\
-	    break;							\
-	}								\
-    }									\
-} while (0)
-
-/*
- * Find a match if it exists.  Otherwise, find the previous lesser node, if one
- * exists.
- */
-#define	rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {	\
-    a_type *rbp_ps_t = (a_tree)->rbt_root;				\
-    (r_node) = NULL;							\
-    while (rbp_ps_t != &(a_tree)->rbt_nil) {				\
-	int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t);			\
-	if (rbp_ps_cmp < 0) {						\
-	    rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t);		\
-	} else if (rbp_ps_cmp > 0) {					\
-	    (r_node) = rbp_ps_t;					\
-	    rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t);	\
-	} else {							\
-	    (r_node) = rbp_ps_t;					\
-	    break;							\
-	}								\
-    }									\
-} while (0)
-
-#define	rbp_rotate_left(a_type, a_field, a_node, r_node) do {		\
-    (r_node) = rbp_right_get(a_type, a_field, (a_node));		\
-    rbp_right_set(a_type, a_field, (a_node),				\
-      rbp_left_get(a_type, a_field, (r_node)));				\
-    rbp_left_set(a_type, a_field, (r_node), (a_node));			\
-} while (0)
-
-#define	rbp_rotate_right(a_type, a_field, a_node, r_node) do {		\
-    (r_node) = rbp_left_get(a_type, a_field, (a_node));			\
-    rbp_left_set(a_type, a_field, (a_node),				\
-      rbp_right_get(a_type, a_field, (r_node)));			\
-    rbp_right_set(a_type, a_field, (r_node), (a_node));			\
-} while (0)
-
-#define	rbp_lean_left(a_type, a_field, a_node, r_node) do {		\
-    bool rbp_ll_red;							\
-    rbp_rotate_left(a_type, a_field, (a_node), (r_node));		\
-    rbp_ll_red = rbp_red_get(a_type, a_field, (a_node));		\
-    rbp_color_set(a_type, a_field, (r_node), rbp_ll_red);		\
-    rbp_red_set(a_type, a_field, (a_node));				\
-} while (0)
-
-#define	rbp_lean_right(a_type, a_field, a_node, r_node) do {		\
-    bool rbp_lr_red;							\
-    rbp_rotate_right(a_type, a_field, (a_node), (r_node));		\
-    rbp_lr_red = rbp_red_get(a_type, a_field, (a_node));		\
-    rbp_color_set(a_type, a_field, (r_node), rbp_lr_red);		\
-    rbp_red_set(a_type, a_field, (a_node));				\
-} while (0)
-
-#define	rbp_move_red_left(a_type, a_field, a_node, r_node) do {		\
-    a_type *rbp_mrl_t, *rbp_mrl_u;					\
-    rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node));		\
-    rbp_red_set(a_type, a_field, rbp_mrl_t);				\
-    rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node));		\
-    rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t);		\
-    if (rbp_red_get(a_type, a_field, rbp_mrl_u)) {			\
-	rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u);	\
-	rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u);		\
-	rbp_rotate_left(a_type, a_field, (a_node), (r_node));		\
-	rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node));		\
-	if (rbp_red_get(a_type, a_field, rbp_mrl_t)) {			\
-	    rbp_black_set(a_type, a_field, rbp_mrl_t);			\
-	    rbp_red_set(a_type, a_field, (a_node));			\
-	    rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t);	\
-	    rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t);		\
-	} else {							\
-	    rbp_black_set(a_type, a_field, (a_node));			\
-	}								\
-    } else {								\
-	rbp_red_set(a_type, a_field, (a_node));				\
-	rbp_rotate_left(a_type, a_field, (a_node), (r_node));		\
-    }									\
-} while (0)
-
-#define	rbp_move_red_right(a_type, a_field, a_node, r_node) do {	\
-    a_type *rbp_mrr_t;							\
-    rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node));		\
-    if (rbp_red_get(a_type, a_field, rbp_mrr_t)) {			\
-	a_type *rbp_mrr_u, *rbp_mrr_v;					\
-	rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t);		\
-	rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u);		\
-	if (rbp_red_get(a_type, a_field, rbp_mrr_v)) {			\
-	    rbp_color_set(a_type, a_field, rbp_mrr_u,			\
-	      rbp_red_get(a_type, a_field, (a_node)));			\
-	    rbp_black_set(a_type, a_field, rbp_mrr_v);			\
-	    rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u);	\
-	    rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u);		\
-	    rbp_rotate_right(a_type, a_field, (a_node), (r_node));	\
-	    rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);	\
-	    rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);	\
-	} else {							\
-	    rbp_color_set(a_type, a_field, rbp_mrr_t,			\
-	      rbp_red_get(a_type, a_field, (a_node)));			\
-	    rbp_red_set(a_type, a_field, rbp_mrr_u);			\
-	    rbp_rotate_right(a_type, a_field, (a_node), (r_node));	\
-	    rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);	\
-	    rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);	\
-	}								\
-	rbp_red_set(a_type, a_field, (a_node));				\
-    } else {								\
-	rbp_red_set(a_type, a_field, rbp_mrr_t);			\
-	rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t);		\
-	if (rbp_red_get(a_type, a_field, rbp_mrr_t)) {			\
-	    rbp_black_set(a_type, a_field, rbp_mrr_t);			\
-	    rbp_rotate_right(a_type, a_field, (a_node), (r_node));	\
-	    rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);	\
-	    rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);	\
-	} else {							\
-	    rbp_rotate_left(a_type, a_field, (a_node), (r_node));	\
-	}								\
-    }									\
-} while (0)
-
-#define	rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do {		\
-    a_type rbp_i_s;							\
-    a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u;		\
-    int rbp_i_cmp = 0;							\
-    rbp_i_g = &(a_tree)->rbt_nil;					\
-    rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root);	\
-    rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil);	\
-    rbp_black_set(a_type, a_field, &rbp_i_s);				\
-    rbp_i_p = &rbp_i_s;							\
-    rbp_i_c = (a_tree)->rbt_root;					\
-    /* Iteratively search down the tree for the insertion point,      */\
-    /* splitting 4-nodes as they are encountered.  At the end of each */\
-    /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down    */\
-    /* the tree, assuming a sufficiently deep tree.                   */\
-    while (rbp_i_c != &(a_tree)->rbt_nil) {				\
-	rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c);		\
-	rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t);		\
-	if (rbp_red_get(a_type, a_field, rbp_i_t)			\
-	  && rbp_red_get(a_type, a_field, rbp_i_u)) {			\
-	    /* rbp_i_c is the top of a logical 4-node, so split it.   */\
-	    /* This iteration does not move down the tree, due to the */\
-	    /* disruptiveness of node splitting.                      */\
-	    /*                                                        */\
-	    /* Rotate right.                                          */\
-	    rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t);	\
-	    /* Pass red links up one level.                           */\
-	    rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t);		\
-	    rbp_black_set(a_type, a_field, rbp_i_u);			\
-	    if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) {	\
-		rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t);	\
-		rbp_i_c = rbp_i_t;					\
-	    } else {							\
-		/* rbp_i_c was the right child of rbp_i_p, so rotate  */\
-		/* left in order to maintain the left-leaning         */\
-		/* invariant.                                         */\
-		assert(rbp_right_get(a_type, a_field, rbp_i_p)		\
-		  == rbp_i_c);						\
-		rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t);	\
-		rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u);	\
-		if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
-		    rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u);	\
-		} else {						\
-		    assert(rbp_right_get(a_type, a_field, rbp_i_g)	\
-		      == rbp_i_p);					\
-		    rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u);	\
-		}							\
-		rbp_i_p = rbp_i_u;					\
-		rbp_i_cmp = (a_cmp)((a_node), rbp_i_p);			\
-		if (rbp_i_cmp < 0) {					\
-		    rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p);	\
-		} else {						\
-		    assert(rbp_i_cmp > 0);				\
-		    rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p);	\
-		}							\
-		continue;						\
-	    }								\
-	}								\
-	rbp_i_g = rbp_i_p;						\
-	rbp_i_p = rbp_i_c;						\
-	rbp_i_cmp = (a_cmp)((a_node), rbp_i_c);				\
-	if (rbp_i_cmp < 0) {						\
-	    rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c);		\
-	} else {							\
-	    assert(rbp_i_cmp > 0);					\
-	    rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c);		\
-	}								\
-    }									\
-    /* rbp_i_p now refers to the node under which to insert.          */\
-    rbp_node_new(a_type, a_field, a_tree, (a_node));			\
-    if (rbp_i_cmp > 0) {						\
-	rbp_right_set(a_type, a_field, rbp_i_p, (a_node));		\
-	rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t);		\
-	if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {	\
-	    rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t);		\
-	} else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
-	    rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t);		\
-	}								\
-    } else {								\
-	rbp_left_set(a_type, a_field, rbp_i_p, (a_node));		\
-    }									\
-    /* Update the root and make sure that it is black.                */\
-    (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s);	\
-    rbp_black_set(a_type, a_field, (a_tree)->rbt_root);			\
-} while (0)
-
-#define	rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do {		\
-    a_type rbp_r_s;							\
-    a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u;		\
-    int rbp_r_cmp;							\
-    rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root);	\
-    rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil);	\
-    rbp_black_set(a_type, a_field, &rbp_r_s);				\
-    rbp_r_p = &rbp_r_s;							\
-    rbp_r_c = (a_tree)->rbt_root;					\
-    rbp_r_xp = &(a_tree)->rbt_nil;					\
-    /* Iterate down the tree, but always transform 2-nodes to 3- or   */\
-    /* 4-nodes in order to maintain the invariant that the current    */\
-    /* node is not a 2-node.  This allows simple deletion once a leaf */\
-    /* is reached.  Handle the root specially though, since there may */\
-    /* be no way to convert it from a 2-node to a 3-node.             */\
-    rbp_r_cmp = (a_cmp)((a_node), rbp_r_c);				\
-    if (rbp_r_cmp < 0) {						\
-	rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);		\
-	rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);		\
-	if (rbp_red_get(a_type, a_field, rbp_r_t) == false		\
-	  && rbp_red_get(a_type, a_field, rbp_r_u) == false) {		\
-	    /* Apply standard transform to prepare for left move.     */\
-	    rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t);	\
-	    rbp_black_set(a_type, a_field, rbp_r_t);			\
-	    rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);		\
-	    rbp_r_c = rbp_r_t;						\
-	} else {							\
-	    /* Move left.                                             */\
-	    rbp_r_p = rbp_r_c;						\
-	    rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);		\
-	}								\
-    } else {								\
-	if (rbp_r_cmp == 0) {						\
-	    assert((a_node) == rbp_r_c);				\
-	    if (rbp_right_get(a_type, a_field, rbp_r_c)			\
-	      == &(a_tree)->rbt_nil) {					\
-		/* Delete root node (which is also a leaf node).      */\
-		if (rbp_left_get(a_type, a_field, rbp_r_c)		\
-		  != &(a_tree)->rbt_nil) {				\
-		    rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t);	\
-		    rbp_right_set(a_type, a_field, rbp_r_t,		\
-		      &(a_tree)->rbt_nil);				\
-		} else {						\
-		    rbp_r_t = &(a_tree)->rbt_nil;			\
-		}							\
-		rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);	\
-	    } else {							\
-		/* This is the node we want to delete, but we will    */\
-		/* instead swap it with its successor and delete the  */\
-		/* successor.  Record enough information to do the    */\
-		/* swap later.  rbp_r_xp is the a_node's parent.      */\
-		rbp_r_xp = rbp_r_p;					\
-		rbp_r_cmp = 1; /* Note that deletion is incomplete.   */\
-	    }								\
-	}								\
-	if (rbp_r_cmp == 1) {						\
-	    if (rbp_red_get(a_type, a_field, rbp_left_get(a_type,	\
-	      a_field, rbp_right_get(a_type, a_field, rbp_r_c)))	\
-	      == false) {						\
-		rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);	\
-		if (rbp_red_get(a_type, a_field, rbp_r_t)) {		\
-		    /* Standard transform.                            */\
-		    rbp_move_red_right(a_type, a_field, rbp_r_c,	\
-		      rbp_r_t);						\
-		} else {						\
-		    /* Root-specific transform.                       */\
-		    rbp_red_set(a_type, a_field, rbp_r_c);		\
-		    rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);	\
-		    if (rbp_red_get(a_type, a_field, rbp_r_u)) {	\
-			rbp_black_set(a_type, a_field, rbp_r_u);	\
-			rbp_rotate_right(a_type, a_field, rbp_r_c,	\
-			  rbp_r_t);					\
-			rbp_rotate_left(a_type, a_field, rbp_r_c,	\
-			  rbp_r_u);					\
-			rbp_right_set(a_type, a_field, rbp_r_t,		\
-			  rbp_r_u);					\
-		    } else {						\
-			rbp_red_set(a_type, a_field, rbp_r_t);		\
-			rbp_rotate_left(a_type, a_field, rbp_r_c,	\
-			  rbp_r_t);					\
-		    }							\
-		}							\
-		rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);	\
-		rbp_r_c = rbp_r_t;					\
-	    } else {							\
-		/* Move right.                                        */\
-		rbp_r_p = rbp_r_c;					\
-		rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c);	\
-	    }								\
-	}								\
-    }									\
-    if (rbp_r_cmp != 0) {						\
-	while (true) {							\
-	    assert(rbp_r_p != &(a_tree)->rbt_nil);			\
-	    rbp_r_cmp = (a_cmp)((a_node), rbp_r_c);			\
-	    if (rbp_r_cmp < 0) {					\
-		rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);	\
-		if (rbp_r_t == &(a_tree)->rbt_nil) {			\
-		    /* rbp_r_c now refers to the successor node to    */\
-		    /* relocate, and rbp_r_xp/a_node refer to the     */\
-		    /* context for the relocation.                    */\
-		    if (rbp_left_get(a_type, a_field, rbp_r_xp)		\
-		      == (a_node)) {					\
-			rbp_left_set(a_type, a_field, rbp_r_xp,		\
-			  rbp_r_c);					\
-		    } else {						\
-			assert(rbp_right_get(a_type, a_field,		\
-			  rbp_r_xp) == (a_node));			\
-			rbp_right_set(a_type, a_field, rbp_r_xp,	\
-			  rbp_r_c);					\
-		    }							\
-		    rbp_left_set(a_type, a_field, rbp_r_c,		\
-		      rbp_left_get(a_type, a_field, (a_node)));		\
-		    rbp_right_set(a_type, a_field, rbp_r_c,		\
-		      rbp_right_get(a_type, a_field, (a_node)));	\
-		    rbp_color_set(a_type, a_field, rbp_r_c,		\
-		      rbp_red_get(a_type, a_field, (a_node)));		\
-		    if (rbp_left_get(a_type, a_field, rbp_r_p)		\
-		      == rbp_r_c) {					\
-			rbp_left_set(a_type, a_field, rbp_r_p,		\
-			  &(a_tree)->rbt_nil);				\
-		    } else {						\
-			assert(rbp_right_get(a_type, a_field, rbp_r_p)	\
-			  == rbp_r_c);					\
-			rbp_right_set(a_type, a_field, rbp_r_p,		\
-			  &(a_tree)->rbt_nil);				\
-		    }							\
-		    break;						\
-		}							\
-		rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);	\
-		if (rbp_red_get(a_type, a_field, rbp_r_t) == false	\
-		  && rbp_red_get(a_type, a_field, rbp_r_u) == false) {	\
-		    rbp_move_red_left(a_type, a_field, rbp_r_c,		\
-		      rbp_r_t);						\
-		    if (rbp_left_get(a_type, a_field, rbp_r_p)		\
-		      == rbp_r_c) {					\
-			rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
-		    } else {						\
-			rbp_right_set(a_type, a_field, rbp_r_p,		\
-			  rbp_r_t);					\
-		    }							\
-		    rbp_r_c = rbp_r_t;					\
-		} else {						\
-		    rbp_r_p = rbp_r_c;					\
-		    rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);	\
-		}							\
-	    } else {							\
-		/* Check whether to delete this node (it has to be    */\
-		/* the correct node and a leaf node).                 */\
-		if (rbp_r_cmp == 0) {					\
-		    assert((a_node) == rbp_r_c);			\
-		    if (rbp_right_get(a_type, a_field, rbp_r_c)		\
-		      == &(a_tree)->rbt_nil) {				\
-			/* Delete leaf node.                          */\
-			if (rbp_left_get(a_type, a_field, rbp_r_c)	\
-			  != &(a_tree)->rbt_nil) {			\
-			    rbp_lean_right(a_type, a_field, rbp_r_c,	\
-			      rbp_r_t);					\
-			    rbp_right_set(a_type, a_field, rbp_r_t,	\
-			      &(a_tree)->rbt_nil);			\
-			} else {					\
-			    rbp_r_t = &(a_tree)->rbt_nil;		\
-			}						\
-			if (rbp_left_get(a_type, a_field, rbp_r_p)	\
-			  == rbp_r_c) {					\
-			    rbp_left_set(a_type, a_field, rbp_r_p,	\
-			      rbp_r_t);					\
-			} else {					\
-			    rbp_right_set(a_type, a_field, rbp_r_p,	\
-			      rbp_r_t);					\
-			}						\
-			break;						\
-		    } else {						\
-			/* This is the node we want to delete, but we */\
-			/* will instead swap it with its successor    */\
-			/* and delete the successor.  Record enough   */\
-			/* information to do the swap later.          */\
-			/* rbp_r_xp is a_node's parent.               */\
-			rbp_r_xp = rbp_r_p;				\
-		    }							\
-		}							\
-		rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c);	\
-		rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);	\
-		if (rbp_red_get(a_type, a_field, rbp_r_u) == false) {	\
-		    rbp_move_red_right(a_type, a_field, rbp_r_c,	\
-		      rbp_r_t);						\
-		    if (rbp_left_get(a_type, a_field, rbp_r_p)		\
-		      == rbp_r_c) {					\
-			rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
-		    } else {						\
-			rbp_right_set(a_type, a_field, rbp_r_p,		\
-			  rbp_r_t);					\
-		    }							\
-		    rbp_r_c = rbp_r_t;					\
-		} else {						\
-		    rbp_r_p = rbp_r_c;					\
-		    rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c);	\
-		}							\
-	    }								\
-	}								\
-    }									\
-    /* Update root.                                                   */\
-    (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s);	\
-} while (0)
-
-/*
- * The rb_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to rb_wrap().
- */
-
-#define	rb_proto(a_attr, a_prefix, a_tree_type, a_type)			\
-a_attr void								\
-a_prefix##new(a_tree_type *tree);					\
-a_attr a_type *								\
-a_prefix##first(a_tree_type *tree);					\
-a_attr a_type *								\
-a_prefix##last(a_tree_type *tree);					\
-a_attr a_type *								\
-a_prefix##next(a_tree_type *tree, a_type *node);			\
-a_attr a_type *								\
-a_prefix##prev(a_tree_type *tree, a_type *node);			\
-a_attr a_type *								\
-a_prefix##search(a_tree_type *tree, a_type *key);			\
-a_attr a_type *								\
-a_prefix##nsearch(a_tree_type *tree, a_type *key);			\
-a_attr a_type *								\
-a_prefix##psearch(a_tree_type *tree, a_type *key);			\
-a_attr void								\
-a_prefix##insert(a_tree_type *tree, a_type *node);			\
-a_attr void								\
-a_prefix##remove(a_tree_type *tree, a_type *node);
-
-/*
- * The rb_wrap() macro provides a convenient way to wrap functions around the
- * cpp macros.  The main benefits of wrapping are that 1) repeated macro
- * expansion can cause code bloat, especially for rb_{insert,remove)(), and
- * 2) type, linkage, comparison functions, etc. need not be specified at every
- * call point.
- */
-
-#define	rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp)	\
-a_attr void								\
-a_prefix##new(a_tree_type *tree) {					\
-    rb_new(a_type, a_field, tree);					\
-}									\
-a_attr a_type *								\
-a_prefix##first(a_tree_type *tree) {					\
-    a_type *ret;							\
-    rb_first(a_type, a_field, tree, ret);				\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##last(a_tree_type *tree) {					\
-    a_type *ret;							\
-    rb_last(a_type, a_field, tree, ret);				\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##next(a_tree_type *tree, a_type *node) {			\
-    a_type *ret;							\
-    rb_next(a_type, a_field, a_cmp, tree, node, ret);			\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##prev(a_tree_type *tree, a_type *node) {			\
-    a_type *ret;							\
-    rb_prev(a_type, a_field, a_cmp, tree, node, ret);			\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##search(a_tree_type *tree, a_type *key) {			\
-    a_type *ret;							\
-    rb_search(a_type, a_field, a_cmp, tree, key, ret);			\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##nsearch(a_tree_type *tree, a_type *key) {			\
-    a_type *ret;							\
-    rb_nsearch(a_type, a_field, a_cmp, tree, key, ret);			\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##psearch(a_tree_type *tree, a_type *key) {			\
-    a_type *ret;							\
-    rb_psearch(a_type, a_field, a_cmp, tree, key, ret);			\
-    return (ret);							\
-}									\
-a_attr void								\
-a_prefix##insert(a_tree_type *tree, a_type *node) {			\
-    rb_insert(a_type, a_field, a_cmp, tree, node);			\
-}									\
-a_attr void								\
-a_prefix##remove(a_tree_type *tree, a_type *node) {			\
-    rb_remove(a_type, a_field, a_cmp, tree, node);			\
-}
-
-/*
- * The iterators simulate recursion via an array of pointers that store the
- * current path.  This is critical to performance, since a series of calls to
- * rb_{next,prev}() would require time proportional to (n lg n), whereas this
- * implementation only requires time proportional to (n).
- *
- * Since the iterators cache a path down the tree, any tree modification may
- * cause the cached path to become invalid.  In order to continue iteration,
- * use something like the following sequence:
- *
- *   {
- *       a_type *node, *tnode;
- *
- *       rb_foreach_begin(a_type, a_field, a_tree, node) {
- *           ...
- *           rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
- *           rb_remove(a_type, a_field, a_cmp, a_tree, node);
- *           rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
- *           ...
- *       } rb_foreach_end(a_type, a_field, a_tree, node)
- *   }
- *
- * Note that this idiom is not advised if every iteration modifies the tree,
- * since in that case there is no algorithmic complexity improvement over a
- * series of rb_{next,prev}() calls, thus making the setup overhead wasted
- * effort.
- */
-
-#define	rb_foreach_begin(a_type, a_field, a_tree, a_var) {		\
-    /* Compute the maximum possible tree depth (3X the black height). */\
-    unsigned rbp_f_height;						\
-    rbp_black_height(a_type, a_field, a_tree, rbp_f_height);		\
-    rbp_f_height *= 3;							\
-    {									\
-	/* Initialize the path to contain the left spine.             */\
-	a_type *rbp_f_path[rbp_f_height];				\
-	a_type *rbp_f_node;						\
-	bool rbp_f_synced = false;					\
-	unsigned rbp_f_depth = 0;					\
-	if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {			\
-	    rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root;		\
-	    rbp_f_depth++;						\
-	    while ((rbp_f_node = rbp_left_get(a_type, a_field,		\
-	      rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {	\
-		rbp_f_path[rbp_f_depth] = rbp_f_node;			\
-		rbp_f_depth++;						\
-	    }								\
-	}								\
-	/* While the path is non-empty, iterate.                      */\
-	while (rbp_f_depth > 0) {					\
-	    (a_var) = rbp_f_path[rbp_f_depth-1];
-
-/* Only use if modifying the tree during iteration. */
-#define	rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node)		\
-	    /* Re-initialize the path to contain the path to a_node.  */\
-	    rbp_f_depth = 0;						\
-	    if (a_node != NULL) {					\
-		if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {		\
-		    rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root;	\
-		    rbp_f_depth++;					\
-		    rbp_f_node = rbp_f_path[0];				\
-		    while (true) {					\
-			int rbp_f_cmp = (a_cmp)((a_node),		\
-			  rbp_f_path[rbp_f_depth-1]);			\
-			if (rbp_f_cmp < 0) {				\
-			    rbp_f_node = rbp_left_get(a_type, a_field,	\
-			      rbp_f_path[rbp_f_depth-1]);		\
-			} else if (rbp_f_cmp > 0) {			\
-			    rbp_f_node = rbp_right_get(a_type, a_field,	\
-			      rbp_f_path[rbp_f_depth-1]);		\
-			} else {					\
-			    break;					\
-			}						\
-			assert(rbp_f_node != &(a_tree)->rbt_nil);	\
-			rbp_f_path[rbp_f_depth] = rbp_f_node;		\
-			rbp_f_depth++;					\
-		    }							\
-		}							\
-	    }								\
-	    rbp_f_synced = true;
-
-#define	rb_foreach_end(a_type, a_field, a_tree, a_var)			\
-	    if (rbp_f_synced) {						\
-		rbp_f_synced = false;					\
-		continue;						\
-	    }								\
-	    /* Find the successor.                                    */\
-	    if ((rbp_f_node = rbp_right_get(a_type, a_field,		\
-	      rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {	\
-	        /* The successor is the left-most node in the right   */\
-		/* subtree.                                           */\
-		rbp_f_path[rbp_f_depth] = rbp_f_node;			\
-		rbp_f_depth++;						\
-		while ((rbp_f_node = rbp_left_get(a_type, a_field,	\
-		  rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {	\
-		    rbp_f_path[rbp_f_depth] = rbp_f_node;		\
-		    rbp_f_depth++;					\
-		}							\
-	    } else {							\
-		/* The successor is above the current node.  Unwind   */\
-		/* until a left-leaning edge is removed from the      */\
-		/* path, or the path is empty.                        */\
-		for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) {	\
-		    if (rbp_left_get(a_type, a_field,			\
-		      rbp_f_path[rbp_f_depth-1])			\
-		      == rbp_f_path[rbp_f_depth]) {			\
-			break;						\
-		    }							\
-		}							\
-	    }								\
-	}								\
-    }									\
-}
-
-#define	rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) {	\
-    /* Compute the maximum possible tree depth (3X the black height). */\
-    unsigned rbp_fr_height;						\
-    rbp_black_height(a_type, a_field, a_tree, rbp_fr_height);		\
-    rbp_fr_height *= 3;							\
-    {									\
-	/* Initialize the path to contain the right spine.            */\
-	a_type *rbp_fr_path[rbp_fr_height];				\
-	a_type *rbp_fr_node;						\
-	bool rbp_fr_synced = false;					\
-	unsigned rbp_fr_depth = 0;					\
-	if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {			\
-	    rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root;		\
-	    rbp_fr_depth++;						\
-	    while ((rbp_fr_node = rbp_right_get(a_type, a_field,	\
-	      rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {	\
-		rbp_fr_path[rbp_fr_depth] = rbp_fr_node;		\
-		rbp_fr_depth++;						\
-	    }								\
-	}								\
-	/* While the path is non-empty, iterate.                      */\
-	while (rbp_fr_depth > 0) {					\
-	    (a_var) = rbp_fr_path[rbp_fr_depth-1];
-
-/* Only use if modifying the tree during iteration. */
-#define	rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node)	\
-	    /* Re-initialize the path to contain the path to a_node.  */\
-	    rbp_fr_depth = 0;						\
-	    if (a_node != NULL) {					\
-		if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {		\
-		    rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root;	\
-		    rbp_fr_depth++;					\
-		    rbp_fr_node = rbp_fr_path[0];			\
-		    while (true) {					\
-			int rbp_fr_cmp = (a_cmp)((a_node),		\
-			  rbp_fr_path[rbp_fr_depth-1]);			\
-			if (rbp_fr_cmp < 0) {				\
-			    rbp_fr_node = rbp_left_get(a_type, a_field,	\
-			      rbp_fr_path[rbp_fr_depth-1]);		\
-			} else if (rbp_fr_cmp > 0) {			\
-			    rbp_fr_node = rbp_right_get(a_type, a_field,\
-			      rbp_fr_path[rbp_fr_depth-1]);		\
-			} else {					\
-			    break;					\
-			}						\
-			assert(rbp_fr_node != &(a_tree)->rbt_nil);	\
-			rbp_fr_path[rbp_fr_depth] = rbp_fr_node;	\
-			rbp_fr_depth++;					\
-		    }							\
-		}							\
-	    }								\
-	    rbp_fr_synced = true;
-
-#define	rb_foreach_reverse_end(a_type, a_field, a_tree, a_var)		\
-	    if (rbp_fr_synced) {					\
-		rbp_fr_synced = false;					\
-		continue;						\
-	    }								\
-	    if (rbp_fr_depth == 0) {					\
-		/* rb_foreach_reverse_sync() was called with a NULL   */\
-		/* a_node.                                            */\
-		break;							\
-	    }								\
-	    /* Find the predecessor.                                  */\
-	    if ((rbp_fr_node = rbp_left_get(a_type, a_field,		\
-	      rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {	\
-	        /* The predecessor is the right-most node in the left */\
-		/* subtree.                                           */\
-		rbp_fr_path[rbp_fr_depth] = rbp_fr_node;		\
-		rbp_fr_depth++;						\
-		while ((rbp_fr_node = rbp_right_get(a_type, a_field,	\
-		  rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
-		    rbp_fr_path[rbp_fr_depth] = rbp_fr_node;		\
-		    rbp_fr_depth++;					\
-		}							\
-	    } else {							\
-		/* The predecessor is above the current node.  Unwind */\
-		/* until a right-leaning edge is removed from the     */\
-		/* path, or the path is empty.                        */\
-		for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
-		    if (rbp_right_get(a_type, a_field,			\
-		      rbp_fr_path[rbp_fr_depth-1])			\
-		      == rbp_fr_path[rbp_fr_depth]) {			\
-			break;						\
-		    }							\
-		}							\
-	    }								\
-	}								\
-    }									\
-}
-
-#endif /* RB_H_ */
diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c
index dfff742..22401d1 100644
--- a/jemalloc/src/jemalloc.c
+++ b/jemalloc/src/jemalloc.c
@@ -90,7 +90,7 @@
  */
 
 #define	JEMALLOC_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/jemalloc.h.in b/jemalloc/src/jemalloc.h.in
deleted file mode 100644
index baa8459..0000000
--- a/jemalloc/src/jemalloc.h.in
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef JEMALLOC_H_
-#define	JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "jemalloc_defs@install_suffix@.h"
-#ifndef JEMALLOC_P
-#  define JEMALLOC_P(s) s
-#endif
-
-extern const char	*JEMALLOC_P(malloc_options);
-extern void		(*JEMALLOC_P(malloc_message))(void *, const char *p1,
-    const char *p2, const char *p3, const char *p4);
-
-void	*JEMALLOC_P(malloc)(size_t size) JEMALLOC_ATTR(malloc);
-void	*JEMALLOC_P(calloc)(size_t num, size_t size) JEMALLOC_ATTR(malloc);
-int	JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
-    JEMALLOC_ATTR(nonnull(1));
-void	*JEMALLOC_P(realloc)(void *ptr, size_t size);
-void	JEMALLOC_P(free)(void *ptr);
-
-size_t	JEMALLOC_P(malloc_usable_size)(const void *ptr);
-void	JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *,
-    const char *, const char *, const char *), void *w4opaque,
-    const char *opts);
-int	JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp,
-    void *newp, size_t newlen);
-int	JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp,
-    size_t *miblenp);
-int	JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen);
-
-#ifdef __cplusplus
-};
-#endif
-#endif /* JEMALLOC_H_ */
diff --git a/jemalloc/src/jemalloc_defs.h.in b/jemalloc/src/jemalloc_defs.h.in
deleted file mode 100644
index 4b4ea7d..0000000
--- a/jemalloc/src/jemalloc_defs.h.in
+++ /dev/null
@@ -1,106 +0,0 @@
-#ifndef JEMALLOC_DEFS_H_
-#define	JEMALLOC_DEFS_H_
-
-/*
- * jemalloc version string.
- */
-#undef JEMALLOC_VERSION
-
-/*
- * If JEMALLOC_PREFIX is defined, it will cause all public APIs to be prefixed.
- * This makes it possible, with some care, to use multiple allocators
- * simultaneously.
- *
- * In many cases it is more convenient to manually prefix allocator function
- * calls than to let macros do it automatically, particularly when using
- * multiple allocators simultaneously.  Define JEMALLOC_MANGLE before
- * #include'ing jemalloc.h in order to cause name mangling that corresponds to
- * the API prefixing.
- */
-#undef JEMALLOC_PREFIX
-#if (defined(JEMALLOC_PREFIX) && defined(JEMALLOC_MANGLE))
-#undef JEMALLOC_P
-#endif
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#undef CPU_SPINWAIT
-
-/* Defined if __attribute__((...)) syntax is supported. */
-#undef JEMALLOC_HAVE_ATTR
-#ifdef JEMALLOC_HAVE_ATTR
-#  define JEMALLOC_ATTR(s) __attribute__((s))
-#else
-#  define JEMALLOC_ATTR(s)
-#endif
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-#undef JEMALLOC_DEBUG
-
-/* JEMALLOC_STATS enables statistics calculation. */
-#undef JEMALLOC_STATS
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/* Use libgcc for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBGCC
-
-/*
- * JEMALLOC_TINY enables support for tiny objects, which are smaller than one
- * quantum.
- */
-#undef JEMALLOC_TINY
-
-/*
- * JEMALLOC_TCACHE enables a thread-specific caching layer for small and medium
- * objects.  This makes it possible to allocate/deallocate objects without any
- * locking when the cache is in the steady state.
- */
-#undef JEMALLOC_TCACHE
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/* JEMALLOC_SWAP enables mmap()ed swap file support. */
-#undef JEMALLOC_SWAP
-
-/* Support memory filling (junk/zero). */
-#undef JEMALLOC_FILL
-
-/* Support optional abort() on OOM. */
-#undef JEMALLOC_XMALLOC
-
-/* Support SYSV semantics. */
-#undef JEMALLOC_SYSV
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-#undef JEMALLOC_LAZY_LOCK
-
-/* Determine page size at run time if defined. */
-#undef DYNAMIC_PAGE_SHIFT
-
-/* One page is 2^STATIC_PAGE_SHIFT bytes. */
-#undef STATIC_PAGE_SHIFT
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef NO_TLS
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#undef LG_SIZEOF_PTR
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#undef LG_SIZEOF_INT
-
-#endif /* JEMALLOC_DEFS_H_ */
diff --git a/jemalloc/src/mb.c b/jemalloc/src/mb.c
index 01665d1..30a1a2e 100644
--- a/jemalloc/src/mb.c
+++ b/jemalloc/src/mb.c
@@ -1,2 +1,2 @@
 #define	MB_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/jemalloc/src/jemalloc_mutex.c b/jemalloc/src/mutex.c
similarity index 96%
rename from jemalloc/src/jemalloc_mutex.c
rename to jemalloc/src/mutex.c
index 5e3cab3..7425027 100644
--- a/jemalloc/src/jemalloc_mutex.c
+++ b/jemalloc/src/mutex.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_MUTEX_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
 /* Data. */
diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
index 7e1d967..edaa7fb 100644
--- a/jemalloc/src/prof.c
+++ b/jemalloc/src/prof.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_PROF_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 #ifdef JEMALLOC_PROF
 /******************************************************************************/
 
diff --git a/jemalloc/src/jemalloc_stats.c b/jemalloc/src/stats.c
similarity index 99%
rename from jemalloc/src/jemalloc_stats.c
rename to jemalloc/src/stats.c
index 7a7f111..9798458 100644
--- a/jemalloc/src/jemalloc_stats.c
+++ b/jemalloc/src/stats.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_STATS_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 
 #define	CTL_GET(n, v, t) do {						\
 	size_t sz = sizeof(t);						\
diff --git a/jemalloc/src/jemalloc_tcache.c b/jemalloc/src/tcache.c
similarity index 99%
rename from jemalloc/src/jemalloc_tcache.c
rename to jemalloc/src/tcache.c
index c54d54e..d64ebac 100644
--- a/jemalloc/src/jemalloc_tcache.c
+++ b/jemalloc/src/tcache.c
@@ -1,5 +1,5 @@
 #define	JEMALLOC_TCACHE_C_
-#include "internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_internal.h"
 #ifdef JEMALLOC_TCACHE
 /******************************************************************************/
 /* Data. */