Rename index_t to szind_t to avoid an existing type on Solaris.

This resolves #256.
diff --git a/ChangeLog b/ChangeLog
index c98179c..284d6d2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -9,6 +9,7 @@
   Bug fixes:
   - Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
     header maps.  This affected systems with page sizes greater than 8 KiB.
+  - Rename index_t to szind_t to avoid an existing type on Solaris.
 
 * 4.0.0 (August 17, 2015)
 
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 2347213..62a9a85 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -39,7 +39,7 @@
 #ifdef JEMALLOC_ARENA_STRUCTS_A
 struct arena_run_s {
 	/* Index of bin this run is associated with. */
-	index_t		binind;
+	szind_t		binind;
 
 	/* Number of free regions in run. */
 	unsigned	nfree;
@@ -448,7 +448,7 @@
 void	arena_maybe_purge(arena_t *arena);
 void	arena_purge_all(arena_t *arena);
 void	arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
-    index_t binind, uint64_t prof_accumbytes);
+    szind_t binind, uint64_t prof_accumbytes);
 void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
     bool zero);
 #ifdef JEMALLOC_JET
@@ -524,7 +524,7 @@
     size_t pageind);
 size_t	arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
 size_t	arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-index_t	arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t	arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
 size_t	arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
 size_t	arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
 size_t	arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
@@ -541,17 +541,17 @@
 void	arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
     size_t size, size_t flags);
 void	arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
-    index_t binind);
+    szind_t binind);
 void	arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
-    size_t runind, index_t binind, size_t flags);
+    size_t runind, szind_t binind, size_t flags);
 void	arena_metadata_allocated_add(arena_t *arena, size_t size);
 void	arena_metadata_allocated_sub(arena_t *arena, size_t size);
 size_t	arena_metadata_allocated_get(arena_t *arena);
 bool	arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
 bool	arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
 bool	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-index_t	arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-index_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
+szind_t	arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
 unsigned	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
     const void *ptr);
 prof_tctx_t	*arena_prof_tctx_get(const void *ptr);
@@ -701,11 +701,11 @@
 	return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
 }
 
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
 {
 	size_t mapbits;
-	index_t binind;
+	szind_t binind;
 
 	mapbits = arena_mapbits_get(chunk, pageind);
 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -840,7 +840,7 @@
 
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
-    index_t binind)
+    szind_t binind)
 {
 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
 	size_t mapbits = arena_mapbitsp_read(mapbitsp);
@@ -854,7 +854,7 @@
 
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
-    index_t binind, size_t flags)
+    szind_t binind, size_t flags)
 {
 	size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
 
@@ -931,10 +931,10 @@
 	}
 }
 
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
 {
-	index_t binind;
+	szind_t binind;
 
 	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
 
@@ -946,7 +946,7 @@
 		size_t rpages_ind;
 		arena_run_t *run;
 		arena_bin_t *bin;
-		index_t run_binind, actual_binind;
+		szind_t run_binind, actual_binind;
 		arena_bin_info_t *bin_info;
 		arena_chunk_map_misc_t *miscelm;
 		void *rpages;
@@ -980,10 +980,10 @@
 #  endif /* JEMALLOC_ARENA_INLINE_A */
 
 #  ifdef JEMALLOC_ARENA_INLINE_B
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
 arena_bin_index(arena_t *arena, arena_bin_t *bin)
 {
-	index_t binind = bin - arena->bins;
+	szind_t binind = bin - arena->bins;
 	assert(binind < NBINS);
 	return (binind);
 }
@@ -1161,7 +1161,7 @@
 	size_t ret;
 	arena_chunk_t *chunk;
 	size_t pageind;
-	index_t binind;
+	szind_t binind;
 
 	assert(ptr != NULL);
 
@@ -1220,7 +1220,7 @@
 		if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
 			/* Small allocation. */
 			if (likely(tcache != NULL)) {
-				index_t binind = arena_ptr_small_binind_get(ptr,
+				szind_t binind = arena_ptr_small_binind_get(ptr,
 				    mapbits);
 				tcache_dalloc_small(tsd, tcache, ptr, binind);
 			} else {
@@ -1272,7 +1272,7 @@
 		if (likely(size <= SMALL_MAXCLASS)) {
 			/* Small allocation. */
 			if (likely(tcache != NULL)) {
-				index_t binind = size2index(size);
+				szind_t binind = size2index(size);
 				tcache_dalloc_small(tsd, tcache, ptr, binind);
 			} else {
 				size_t pageind = ((uintptr_t)ptr -
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 7a137b6..f6e464e 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -184,7 +184,7 @@
 #include "jemalloc/internal/jemalloc_internal_macros.h"
 
 /* Size class index type. */
-typedef unsigned index_t;
+typedef unsigned szind_t;
 
 /*
  * Flags bits:
@@ -511,12 +511,12 @@
 #include "jemalloc/internal/huge.h"
 
 #ifndef JEMALLOC_ENABLE_INLINE
-index_t	size2index_compute(size_t size);
-index_t	size2index_lookup(size_t size);
-index_t	size2index(size_t size);
-size_t	index2size_compute(index_t index);
-size_t	index2size_lookup(index_t index);
-size_t	index2size(index_t index);
+szind_t	size2index_compute(size_t size);
+szind_t	size2index_lookup(size_t size);
+szind_t	size2index(size_t size);
+size_t	index2size_compute(szind_t index);
+size_t	index2size_lookup(szind_t index);
+size_t	index2size(szind_t index);
 size_t	s2u_compute(size_t size);
 size_t	s2u_lookup(size_t size);
 size_t	s2u(size_t size);
@@ -527,7 +527,7 @@
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
 size2index_compute(size_t size)
 {
 
@@ -558,7 +558,7 @@
 	}
 }
 
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
 size2index_lookup(size_t size)
 {
 
@@ -571,7 +571,7 @@
 	}
 }
 
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
 size2index(size_t size)
 {
 
@@ -582,7 +582,7 @@
 }
 
 JEMALLOC_INLINE size_t
-index2size_compute(index_t index)
+index2size_compute(szind_t index)
 {
 
 #if (NTBINS > 0)
@@ -609,7 +609,7 @@
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(index_t index)
+index2size_lookup(szind_t index)
 {
 	size_t ret = (size_t)index2size_tab[index];
 	assert(ret == index2size_compute(index));
@@ -617,7 +617,7 @@
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-index2size(index_t index)
+index2size(szind_t index)
 {
 
 	assert(index < NSIZES);
@@ -976,7 +976,7 @@
 	size_t ret;
 
 	if (usize <= SMALL_MAXCLASS) {
-		index_t binind = size2index(usize);
+		szind_t binind = size2index(usize);
 		ret = arena_bin_info[binind].redzone_size;
 	} else
 		ret = 0;
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 493f457..5079cd2 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -77,7 +77,7 @@
 	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
 	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum(). */
 	unsigned	ev_cnt;		/* Event count since incremental GC. */
-	index_t		next_gc_bin;	/* Next bin to GC. */
+	szind_t		next_gc_bin;	/* Next bin to GC. */
 	tcache_bin_t	tbins[1];	/* Dynamically sized. */
 	/*
 	 * The pointer stacks associated with tbins follow as a contiguous
@@ -126,10 +126,10 @@
 size_t	tcache_salloc(const void *ptr);
 void	tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
 void	*tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
-    tcache_bin_t *tbin, index_t binind);
+    tcache_bin_t *tbin, szind_t binind);
 void	tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
-    index_t binind, unsigned rem);
-void	tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
+    szind_t binind, unsigned rem);
+void	tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
     unsigned rem, tcache_t *tcache);
 void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
 void	tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
@@ -161,7 +161,7 @@
 void	*tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
     size_t size, bool zero);
 void	tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
-    index_t binind);
+    szind_t binind);
 void	tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
     size_t size);
 tcache_t	*tcaches_get(tsd_t *tsd, unsigned ind);
@@ -267,7 +267,7 @@
     bool zero)
 {
 	void *ret;
-	index_t binind;
+	szind_t binind;
 	size_t usize;
 	tcache_bin_t *tbin;
 
@@ -312,7 +312,7 @@
     bool zero)
 {
 	void *ret;
-	index_t binind;
+	szind_t binind;
 	size_t usize;
 	tcache_bin_t *tbin;
 
@@ -360,7 +360,7 @@
 }
 
 JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
 {
 	tcache_bin_t *tbin;
 	tcache_bin_info_t *tbin_info;
@@ -386,7 +386,7 @@
 JEMALLOC_ALWAYS_INLINE void
 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
 {
-	index_t binind;
+	szind_t binind;
 	tcache_bin_t *tbin;
 	tcache_bin_info_t *tbin_info;
 
diff --git a/src/arena.c b/src/arena.c
index bd76e96..b1bb9db 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -314,7 +314,7 @@
 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
 	size_t mapbits = arena_mapbits_get(chunk, pageind);
-	index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+	szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
 	unsigned regind = arena_run_regind(run, bin_info, ptr);
 
@@ -507,7 +507,7 @@
 
 static bool
 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
-    index_t binind)
+    szind_t binind)
 {
 	arena_chunk_t *chunk;
 	arena_chunk_map_misc_t *miscelm;
@@ -779,7 +779,7 @@
 static void
 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
 {
-	index_t index = size2index(usize) - nlclasses - NBINS;
+	szind_t index = size2index(usize) - nlclasses - NBINS;
 
 	cassert(config_stats);
 
@@ -792,7 +792,7 @@
 static void
 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
 {
-	index_t index = size2index(usize) - nlclasses - NBINS;
+	szind_t index = size2index(usize) - nlclasses - NBINS;
 
 	cassert(config_stats);
 
@@ -805,7 +805,7 @@
 static void
 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
 {
-	index_t index = size2index(usize) - nlclasses - NBINS;
+	szind_t index = size2index(usize) - nlclasses - NBINS;
 
 	cassert(config_stats);
 
@@ -818,7 +818,7 @@
 static void
 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
 {
-	index_t index = size2index(usize) - nlclasses - NBINS;
+	szind_t index = size2index(usize) - nlclasses - NBINS;
 
 	cassert(config_stats);
 
@@ -1124,7 +1124,7 @@
 }
 
 static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
+arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
 {
 	arena_run_t *run = arena_run_first_best_fit(arena, size);
 	if (run != NULL) {
@@ -1135,7 +1135,7 @@
 }
 
 static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
 {
 	arena_chunk_t *chunk;
 	arena_run_t *run;
@@ -1888,7 +1888,7 @@
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
 	arena_run_t *run;
-	index_t binind;
+	szind_t binind;
 	arena_bin_info_t *bin_info;
 
 	/* Look for a usable run. */
@@ -1939,7 +1939,7 @@
 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 {
 	void *ret;
-	index_t binind;
+	szind_t binind;
 	arena_bin_info_t *bin_info;
 	arena_run_t *run;
 
@@ -1985,7 +1985,7 @@
 }
 
 void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
     uint64_t prof_accumbytes)
 {
 	unsigned i, nfill;
@@ -2130,7 +2130,7 @@
 void
 arena_quarantine_junk_small(void *ptr, size_t usize)
 {
-	index_t binind;
+	szind_t binind;
 	arena_bin_info_t *bin_info;
 	cassert(config_fill);
 	assert(opt_junk_free);
@@ -2148,7 +2148,7 @@
 	void *ret;
 	arena_bin_t *bin;
 	arena_run_t *run;
-	index_t binind;
+	szind_t binind;
 
 	binind = size2index(size);
 	assert(binind < NBINS);
@@ -2232,7 +2232,7 @@
 	ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
 	    random_offset);
 	if (config_stats) {
-		index_t index = size2index(usize) - NBINS;
+		szind_t index = size2index(usize) - NBINS;
 
 		arena->stats.nmalloc_large++;
 		arena->stats.nrequests_large++;
@@ -2325,7 +2325,7 @@
 	ret = arena_miscelm_to_rpages(miscelm);
 
 	if (config_stats) {
-		index_t index = size2index(usize) - NBINS;
+		szind_t index = size2index(usize) - NBINS;
 
 		arena->stats.nmalloc_large++;
 		arena->stats.nrequests_large++;
@@ -2384,7 +2384,7 @@
 {
 	arena_chunk_t *chunk;
 	size_t pageind;
-	index_t binind;
+	szind_t binind;
 
 	cassert(config_prof);
 	assert(ptr != NULL);
@@ -2412,7 +2412,7 @@
 	if (run == bin->runcur)
 		bin->runcur = NULL;
 	else {
-		index_t binind = arena_bin_index(extent_node_arena_get(
+		szind_t binind = arena_bin_index(extent_node_arena_get(
 		    &chunk->node), bin);
 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
 
@@ -2476,7 +2476,7 @@
 	arena_run_t *run;
 	arena_bin_t *bin;
 	arena_bin_info_t *bin_info;
-	index_t binind;
+	szind_t binind;
 
 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
@@ -2573,7 +2573,7 @@
 		if (!junked)
 			arena_dalloc_junk_large(ptr, usize);
 		if (config_stats) {
-			index_t index = size2index(usize) - NBINS;
+			szind_t index = size2index(usize) - NBINS;
 
 			arena->stats.ndalloc_large++;
 			arena->stats.allocated_large -= usize;
@@ -2620,8 +2620,8 @@
 	arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
 	    large_pad, true);
 	if (config_stats) {
-		index_t oldindex = size2index(oldsize) - NBINS;
-		index_t index = size2index(size) - NBINS;
+		szind_t oldindex = size2index(oldsize) - NBINS;
+		szind_t index = size2index(size) - NBINS;
 
 		arena->stats.ndalloc_large++;
 		arena->stats.allocated_large -= oldsize;
@@ -2699,8 +2699,8 @@
 		    pageind+npages-1)));
 
 		if (config_stats) {
-			index_t oldindex = size2index(oldsize) - NBINS;
-			index_t index = size2index(size) - NBINS;
+			szind_t oldindex = size2index(oldsize) - NBINS;
+			szind_t index = size2index(size) - NBINS;
 
 			arena->stats.ndalloc_large++;
 			arena->stats.allocated_large -= oldsize;
diff --git a/src/tcache.c b/src/tcache.c
index 3814365..f1a30d5 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -32,7 +32,7 @@
 void
 tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
 {
-	index_t binind = tcache->next_gc_bin;
+	szind_t binind = tcache->next_gc_bin;
 	tcache_bin_t *tbin = &tcache->tbins[binind];
 	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
 
@@ -72,7 +72,7 @@
 
 void *
 tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
-    tcache_bin_t *tbin, index_t binind)
+    tcache_bin_t *tbin, szind_t binind)
 {
 	void *ret;
 
@@ -87,7 +87,7 @@
 
 void
 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
-    index_t binind, unsigned rem)
+    szind_t binind, unsigned rem)
 {
 	arena_t *arena;
 	void *ptr;
@@ -166,7 +166,7 @@
 }
 
 void
-tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
     unsigned rem, tcache_t *tcache)
 {
 	arena_t *arena;
diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c
index d791834..d3aaebd 100644
--- a/test/unit/size_classes.c
+++ b/test/unit/size_classes.c
@@ -26,7 +26,7 @@
 TEST_BEGIN(test_size_classes)
 {
 	size_t size_class, max_size_class;
-	index_t index, max_index;
+	szind_t index, max_index;
 
 	max_size_class = get_max_size_class();
 	max_index = size2index(max_size_class);