Densely allocate mspaces.  New heaps are started on the page following
the break of the previously allocated heap.  In addition, we shrink
the reported size of the previous object bitmap so it covers only the
committed range of the heap.  We now separately track the size of the
bitmap virtual memory reservation so we may unmap it all at shutdown
time.
diff --git a/vm/alloc/HeapBitmap.c b/vm/alloc/HeapBitmap.c
index 778fd87..e2cf117 100644
--- a/vm/alloc/HeapBitmap.c
+++ b/vm/alloc/HeapBitmap.c
@@ -70,7 +70,7 @@
 
     memset(hb, 0, sizeof(*hb));
     hb->bits = bits;
-    hb->bitsLen = bitsLen;
+    hb->bitsLen = hb->allocLen = bitsLen;
     hb->base = (uintptr_t)base;
     hb->max = hb->base - 1;
 
@@ -126,9 +126,7 @@
     assert(hb != NULL);
 
     if (hb->bits != NULL) {
-        // Re-calculate the size we passed to mmap().
-        size_t allocLen = ALIGN_UP_TO_PAGE_SIZE(hb->bitsLen);
-        munmap((char *)hb->bits, allocLen);
+        munmap((char *)hb->bits, hb->allocLen);
     }
     memset(hb, 0, sizeof(*hb));
 }