Rename "dirty chunks" to "cached chunks".

Rename "dirty chunks" to "cached chunks", in order to avoid overloading
the term "dirty".

Fix the regression caused by 339c2b23b2d61993ac768afcc72af135662c6771
(Fix chunk_unmap() to propagate dirty state.), and actually address what
that change attempted, which is to only purge chunks once, and propagate
whether zeroed pages resulted into chunk_record().
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 2f99deb..81ff40b 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -33,9 +33,9 @@
 	/* Profile counters, used for huge objects. */
 	prof_tctx_t		*en_prof_tctx;
 
-	/* Linkage for arena's runs_dirty and chunks_dirty rings. */
-	qr(extent_node_t)	cd_link;
+	/* Linkage for arena's runs_dirty and chunks_cache rings. */
 	arena_chunk_map_misc_t	runs_dirty;
+	qr(extent_node_t)	cc_link;
 
 	union {
 		/* Linkage for the size/address-ordered tree. */
@@ -78,6 +78,9 @@
 void	extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
     size_t size, bool zeroed);
 void	extent_node_dirty_linkage_init(extent_node_t *node);
+void	extent_node_dirty_insert(extent_node_t *node,
+    arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty);
+void	extent_node_dirty_remove(extent_node_t *node);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
@@ -183,9 +186,27 @@
 extent_node_dirty_linkage_init(extent_node_t *node)
 {
 
-	qr_new(node, cd_link);
 	qr_new(&node->runs_dirty, rd_link);
+	qr_new(node, cc_link);
 }
+
+JEMALLOC_INLINE void
+extent_node_dirty_insert(extent_node_t *node,
+    arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty)
+{
+
+	qr_meld(runs_dirty, &node->runs_dirty, rd_link);
+	qr_meld(chunks_dirty, node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_remove(extent_node_t *node)
+{
+
+	qr_remove(&node->runs_dirty, rd_link);
+	qr_remove(node, cc_link);
+}
+
 #endif
 
 #endif /* JEMALLOC_H_INLINES */