Implement support for non-coalescing maps on MinGW.

- Do not reallocate huge objects in place if the number of backing
  chunks would change.
- Do not cache multi-chunk mappings.

This resolves #213.
diff --git a/INSTALL b/INSTALL
index 8d39687..5413ae8 100644
--- a/INSTALL
+++ b/INSTALL
@@ -150,7 +150,10 @@
     the virtual memory for later use.  munmap() is disabled by default (i.e.
     --disable-munmap is implied) on Linux, which has a quirk in its virtual
     memory allocation algorithm that causes semi-permanent VM map holes under
-    normal jemalloc operation.
+    normal jemalloc operation.  Conversely, munmap() (actually VirtualFree()) is
+    forcefully enabled on MinGW because virtual memory mappings do not
+    automatically coalesce (nor fragment on demand), and extra bookkeeping
+    would be required to track mapping boundaries.
 
 --disable-fill
     Disable support for junk/zero filling of memory, quarantine, and redzones.
diff --git a/configure.ac b/configure.ac
index 0497eaf..502dd39 100644
--- a/configure.ac
+++ b/configure.ac
@@ -258,6 +258,7 @@
 dnl definitions need to be seen before any headers are included, which is a pain
 dnl to make happen otherwise.
 default_munmap="1"
+maps_coalesce="1"
 case "${host}" in
   *-*-darwin* | *-*-ios*)
 	CFLAGS="$CFLAGS"
@@ -341,6 +342,7 @@
 	abi="pecoff"
 	force_tls="0"
 	force_lazy_lock="1"
+	maps_coalesce="0"
 	RPATH=""
 	so="dll"
 	if test "x$je_cv_msvc" = "xyes" ; then
@@ -862,6 +864,12 @@
 fi
 AC_SUBST([enable_tcache])
 
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+  AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
+fi
+
 dnl Enable VM deallocation via munmap() by default.
 AC_ARG_ENABLE([munmap],
   [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
@@ -873,6 +881,10 @@
 ],
 [enable_munmap="${default_munmap}"]
 )
+if test "x$enable_munmap" = "x0" -a "x${maps_coalesce}" = "x0" ; then
+  AC_MSG_RESULT([Forcing munmap to avoid non-coalescing map issues])
+  enable_munmap="1"
+fi
 if test "x$enable_munmap" = "x1" ; then
   AC_DEFINE([JEMALLOC_MUNMAP], [ ])
 fi
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 29aa802..496997d 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -70,6 +70,13 @@
     false
 #endif
     ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+    true
+#else
+    false
+#endif
+    ;
 static const bool config_munmap =
 #ifdef JEMALLOC_MUNMAP
     true
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index ed8347a..b0f8caa 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -170,6 +170,15 @@
 #undef LG_PAGE
 
 /*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
  * If defined, use munmap() to unmap freed chunks, rather than storing them for
  * later reuse.  This is disabled by default on Linux because common sequences
  * of mmap()/munmap() calls will cause virtual memory map holes.
diff --git a/src/chunk.c b/src/chunk.c
index 5945482..7a4ede8 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -337,6 +337,7 @@
 	extent_node_t *node, *prev;
 	extent_node_t key;
 
+	assert(maps_coalesce || size == chunksize);
 	assert(!cache || !zeroed);
 	unzeroed = cache || !zeroed;
 	JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@@ -421,6 +422,11 @@
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
+	if (!maps_coalesce && size != chunksize) {
+		chunk_dalloc_arena(arena, chunk, size, false);
+		return;
+	}
+
 	chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
 	    true, chunk, size, false);
 	arena_maybe_purge(arena);
diff --git a/src/huge.c b/src/huge.c
index a7993f8..7cd0d7d 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -304,6 +304,9 @@
 		return (false);
 	}
 
+	if (!maps_coalesce)
+		return (true);
+
 	/* Shrink the allocation in-place. */
 	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
 		huge_ralloc_no_move_shrink(ptr, oldsize, usize);
diff --git a/test/integration/chunk.c b/test/integration/chunk.c
index de45bc5..c94b2d4 100644
--- a/test/integration/chunk.c
+++ b/test/integration/chunk.c
@@ -63,9 +63,9 @@
 	    "Unexpected arenas.hchunk.2.size failure");
 	if (huge0 * 2 > huge2) {
 		/*
-		 * There are at least four size classes per doubling, so
-		 * xallocx() from size=huge2 to size=huge1 is guaranteed to
-		 * leave trailing purgeable memory.
+		 * There are at least four size classes per doubling, so a
+		 * successful xallocx() from size=huge2 to size=huge1 is
+		 * guaranteed to leave trailing purgeable memory.
 		 */
 		p = mallocx(huge2, 0);
 		assert_ptr_not_null(p, "Unexpected mallocx() error");