Integrate whole chunks into unused dirty page purging machinery.

Extend per arena unused dirty page purging to manage unused dirty chunks
in aaddtion to unused dirty runs.  Rather than immediately unmapping
deallocated chunks (or purging them in the --disable-munmap case), store
them in a separate set of trees, chunks_[sz]ad_dirty.  Preferrentially
allocate dirty chunks.  When excessive unused dirty pages accumulate,
purge runs and chunks in ingegrated LRU order (and unmap chunks in the
--enable-munmap case).

Refactor extent_node_t to provide accessor functions.
diff --git a/src/extent.c b/src/extent.c
index 60e2468..f98e77e 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -7,13 +7,13 @@
 extent_szad_comp(extent_node_t *a, extent_node_t *b)
 {
 	int ret;
-	size_t a_size = a->size;
-	size_t b_size = b->size;
+	size_t a_size = extent_node_size_get(a);
+	size_t b_size = extent_node_size_get(b);
 
 	ret = (a_size > b_size) - (a_size < b_size);
 	if (ret == 0) {
-		uintptr_t a_addr = (uintptr_t)a->addr;
-		uintptr_t b_addr = (uintptr_t)b->addr;
+		uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+		uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
 
 		ret = (a_addr > b_addr) - (a_addr < b_addr);
 	}
@@ -28,8 +28,8 @@
 JEMALLOC_INLINE_C int
 extent_ad_comp(extent_node_t *a, extent_node_t *b)
 {
-	uintptr_t a_addr = (uintptr_t)a->addr;
-	uintptr_t b_addr = (uintptr_t)b->addr;
+	uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+	uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
 
 	return ((a_addr > b_addr) - (a_addr < b_addr));
 }