Rename "dirty chunks" to "cached chunks".

Rename "dirty chunks" to "cached chunks", in order to avoid overloading
the term "dirty".

Fix the regression caused by 339c2b23b2d61993ac768afcc72af135662c6771
(Fix chunk_unmap() to propagate dirty state.), and actually address what
that change attempted, which is to only purge chunks once, and propagate
whether zeroed pages resulted into chunk_record().
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 0383f0c..3d79c62 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -318,14 +318,14 @@
 
 	/*
 	 * Unused dirty memory this arena manages.  Dirty memory is conceptually
-	 * tracked as an arbitrarily interleaved LRU of runs and chunks, but the
-	 * list linkage is actually semi-duplicated in order to avoid extra
-	 * arena_chunk_map_misc_t space overhead.
+	 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
+	 * chunks, but the list linkage is actually semi-duplicated in order to
+	 * avoid extra arena_chunk_map_misc_t space overhead.
 	 *
 	 *   LRU-----------------------------------------------------------MRU
 	 *
 	 *         ______________           ___                      ___
-	 *   ...-->|chunks_dirty|<--------->|c|<-------------------->|c|<--...
+	 *   ...-->|chunks_cache|<--------->|c|<-------------------->|c|<--...
 	 *         --------------           |h|                      |h|
 	 *         ____________    _____    |u|    _____    _____    |u|
 	 *   ...-->|runs_dirty|<-->|run|<-->|n|<-->|run|<-->|run|<-->|n|<--...
@@ -333,7 +333,7 @@
 	 *                                  ---                      ---
 	 */
 	arena_chunk_map_misc_t	runs_dirty;
-	extent_node_t		chunks_dirty;
+	extent_node_t		chunks_cache;
 
 	/* Extant huge allocations. */
 	ql_head(extent_node_t)	huge;
@@ -347,8 +347,8 @@
 	 * orderings are needed, which is why there are two trees with the same
 	 * contents.
 	 */
-	extent_tree_t		chunks_szad_dirty;
-	extent_tree_t		chunks_ad_dirty;
+	extent_tree_t		chunks_szad_cache;
+	extent_tree_t		chunks_ad_cache;
 	extent_tree_t		chunks_szad_mmap;
 	extent_tree_t		chunks_ad_mmap;
 	extent_tree_t		chunks_szad_dss;
@@ -384,10 +384,10 @@
 extern unsigned		nlclasses; /* Number of large size classes. */
 extern unsigned		nhclasses; /* Number of huge size classes. */
 
-void	arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node,
-    bool dirty);
-void	arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node,
-    bool dirty);
+void	arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
+    bool cache);
+void	arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
+    bool cache);
 extent_node_t	*arena_node_alloc(arena_t *arena);
 void	arena_node_dalloc(arena_t *arena, extent_node_t *node);
 void	*arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 8722dd0..bf6acbd 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -45,9 +45,10 @@
 void	*chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
     bool *zero, unsigned arena_ind);
 void	chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
-    extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
+    extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
+    bool zeroed);
 bool	chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
-void	chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size);
+void	chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed);
 bool	chunk_boot(void);
 void	chunk_prefork(void);
 void	chunk_postfork_parent(void);
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 2f99deb..81ff40b 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -33,9 +33,9 @@
 	/* Profile counters, used for huge objects. */
 	prof_tctx_t		*en_prof_tctx;
 
-	/* Linkage for arena's runs_dirty and chunks_dirty rings. */
-	qr(extent_node_t)	cd_link;
+	/* Linkage for arena's runs_dirty and chunks_cache rings. */
 	arena_chunk_map_misc_t	runs_dirty;
+	qr(extent_node_t)	cc_link;
 
 	union {
 		/* Linkage for the size/address-ordered tree. */
@@ -78,6 +78,9 @@
 void	extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
     size_t size, bool zeroed);
 void	extent_node_dirty_linkage_init(extent_node_t *node);
+void	extent_node_dirty_insert(extent_node_t *node,
+    arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty);
+void	extent_node_dirty_remove(extent_node_t *node);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
@@ -183,9 +186,27 @@
 extent_node_dirty_linkage_init(extent_node_t *node)
 {
 
-	qr_new(node, cd_link);
 	qr_new(&node->runs_dirty, rd_link);
+	qr_new(node, cc_link);
 }
+
+JEMALLOC_INLINE void
+extent_node_dirty_insert(extent_node_t *node,
+    arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty)
+{
+
+	qr_meld(runs_dirty, &node->runs_dirty, rd_link);
+	qr_meld(chunks_dirty, node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_remove(extent_node_t *node)
+{
+
+	qr_remove(&node->runs_dirty, rd_link);
+	qr_remove(node, cc_link);
+}
+
 #endif
 
 #endif /* JEMALLOC_H_INLINES */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 0a8654b..dfe62ce 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -12,9 +12,9 @@
 arena_choose
 arena_choose_hard
 arena_chunk_alloc_huge
+arena_chunk_cache_maybe_insert
+arena_chunk_cache_maybe_remove
 arena_chunk_dalloc_huge
-arena_chunk_dirty_maybe_insert
-arena_chunk_dirty_maybe_remove
 arena_chunk_ralloc_huge_expand
 arena_chunk_ralloc_huge_shrink
 arena_chunk_ralloc_huge_similar
@@ -182,7 +182,9 @@
 extent_node_addr_set
 extent_node_arena_get
 extent_node_arena_set
+extent_node_dirty_insert
 extent_node_dirty_linkage_init
+extent_node_dirty_remove
 extent_node_init
 extent_node_prof_tctx_get
 extent_node_prof_tctx_set