Fix potential chunk leaks.

Move chunk_dalloc_arena()'s implementation into chunk_dalloc_wrapper(),
so that if the dalloc hook fails, proper decommit/purge/retain cascading
occurs.  This fixes three potential chunk leaks on OOM paths, one during
dss-based chunk allocation, one during chunk header commit (currently
relevant only on Windows), and one during rtree write (e.g. if rtree
node allocation fails).

Merge chunk_purge_arena() into chunk_purge_default() (refactor, no
change to functionality).
diff --git a/src/arena.c b/src/arena.c
index 8291ab2..4596478 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -602,8 +602,8 @@
 		/* Commit header. */
 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
 		    LG_PAGE, arena->ind)) {
-			chunk_dalloc_wrapper(arena, chunk_hooks,
-			    (void *)chunk, chunksize, *commit);
+			chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+			    chunksize, *zero, *commit);
 			chunk = NULL;
 		}
 	}
@@ -614,7 +614,7 @@
 			    LG_PAGE, arena->ind);
 		}
 		chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
-		    chunksize, *commit);
+		    chunksize, *zero, *commit);
 		chunk = NULL;
 	}
 
@@ -1010,7 +1010,7 @@
 		malloc_mutex_unlock(&arena->lock);
 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
-		chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+		chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
 		    true);
 		err = true;
 	}
@@ -1036,8 +1036,8 @@
 	}
 	arena_nactive_add(arena, udiff >> LG_PAGE);
 
-	err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
-	    chunksize, zero, true) == NULL);
+	err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
+	    zero, true) == NULL);
 	malloc_mutex_unlock(&arena->lock);
 	if (err) {
 		err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
@@ -1045,7 +1045,7 @@
 		    cdiff);
 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
-		chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+		chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
 		    true);
 		err = true;
 	}
@@ -1699,7 +1699,7 @@
 			extent_node_dirty_remove(chunkselm);
 			arena_node_dalloc(arena, chunkselm);
 			chunkselm = chunkselm_next;
-			chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+			chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
 			    zeroed, committed);
 		} else {
 			arena_chunk_t *chunk =