Implement decay-based unused dirty page purging.

This is an alternative to the existing ratio-based unused dirty page
purging, and is intended to eventually become the sole purging
mechanism.

Add mallctls:
- opt.purge
- opt.decay_time
- arena.<i>.decay
- arena.<i>.decay_time
- arenas.decay_time
- stats.arenas.<i>.decay_time

This resolves #325.
diff --git a/Makefile.in b/Makefile.in
index 9530aa8..e568192 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -121,6 +121,7 @@
 TESTS_UNIT := $(srcroot)test/unit/atomic.c \
 	$(srcroot)test/unit/bitmap.c \
 	$(srcroot)test/unit/ckh.c \
+	$(srcroot)test/unit/decay.c \
 	$(srcroot)test/unit/hash.c \
 	$(srcroot)test/unit/junk.c \
 	$(srcroot)test/unit/junk_alloc.c \
@@ -354,18 +355,22 @@
 check_dir: check_unit_dir check_integration_dir
 
 check_unit: tests_unit check_unit_dir
-	$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+	$(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+	$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
 check_integration_prof: tests_integration check_integration_dir
 ifeq ($(enable_prof), 1)
 	$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
 	$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
 endif
+check_integration_decay: tests_integration check_integration_dir
+	$(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+	$(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+	$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
 check_integration: tests_integration check_integration_dir
 	$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
 stress: tests_stress stress_dir
 	$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
-check: tests check_dir check_integration_prof
-	$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+check: check_unit check_integration check_integration_decay check_integration_prof
 
 ifeq ($(enable_code_coverage), 1)
 coverage_unit: check_unit
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 48765b0..0ced0aa 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -949,6 +949,20 @@
         number of CPUs, or one if there is a single CPU.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="opt.purge">
+        <term>
+          <mallctl>opt.purge</mallctl>
+          (<type>const char *</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Purge mode is &ldquo;ratio&rdquo; (default) or
+        &ldquo;decay&rdquo;.  See <link
+        linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+        for details of the ratio mode.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        details of the decay mode.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="opt.lg_dirty_mult">
         <term>
           <mallctl>opt.lg_dirty_mult</mallctl>
@@ -971,6 +985,26 @@
         for related dynamic control options.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="opt.decay_time">
+        <term>
+          <mallctl>opt.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Approximate time in seconds from the creation of a set
+        of unused dirty pages until an equivalent set of unused dirty pages is
+        purged and/or reused.  The pages are incrementally purged according to a
+        sigmoidal decay curve that starts and ends with zero purge rate.  A
+        decay time of 0 causes all unused dirty pages to be purged immediately
+        upon creation.  A decay time of -1 disables purging.  The default decay
+        time is 10 seconds.  See <link
+        linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
+        and <link
+        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+        for related dynamic control options.
+        </para></listitem>
+      </varlistentry>
+
       <varlistentry id="opt.stats_print">
         <term>
           <mallctl>opt.stats_print</mallctl>
@@ -1501,12 +1535,27 @@
           (<type>void</type>)
           <literal>--</literal>
         </term>
-        <listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for
+        <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
         all arenas if &lt;i&gt; equals <link
         linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
         </para></listitem>
       </varlistentry>
 
+      <varlistentry id="arena.i.decay">
+        <term>
+          <mallctl>arena.&lt;i&gt;.decay</mallctl>
+          (<type>void</type>)
+          <literal>--</literal>
+        </term>
+        <listitem><para>Trigger decay-based purging of unused dirty pages for
+        arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <link
+        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
+        The proportion of unused dirty pages to be purged depends on the current
+        time; see <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        details.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arena.i.dss">
         <term>
           <mallctl>arena.&lt;i&gt;.dss</mallctl>
@@ -1535,6 +1584,22 @@
         for additional information.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="arena.i.decay_time">
+        <term>
+          <mallctl>arena.&lt;i&gt;.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Current per-arena approximate time in seconds from the
+        creation of a set of unused dirty pages until an equivalent set of
+        unused dirty pages is purged and/or reused.  Each time this interface is
+        set, all currently unused dirty pages are considered to have fully
+        decayed, which causes immediate purging of all unused dirty pages unless
+        the decay time is set to -1 (i.e. purging disabled).  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        additional information.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arena.i.chunk_hooks">
         <term>
           <mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
@@ -1769,6 +1834,21 @@
         for additional information.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="arenas.decay_time">
+        <term>
+          <mallctl>arenas.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Current default per-arena approximate time in seconds
+        from the creation of a set of unused dirty pages until an equivalent set
+        of unused dirty pages is purged and/or reused, used to initialize <link
+        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+        during arena creation.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        additional information.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arenas.quantum">
         <term>
           <mallctl>arenas.quantum</mallctl>
@@ -2113,6 +2193,19 @@
         for details.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="stats.arenas.i.decay_time">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Approximate time in seconds from the creation of a set
+        of unused dirty pages until an equivalent set of unused dirty pages is
+        purged and/or reused.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
+        for details.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="stats.arenas.i.nthreads">
         <term>
           <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 2750c00..76d3be1 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -23,6 +23,18 @@
  */
 #define	LG_DIRTY_MULT_DEFAULT	3
 
+typedef enum {
+	purge_mode_ratio = 0,
+	purge_mode_decay = 1,
+
+	purge_mode_limit = 2
+} purge_mode_t;
+#define	PURGE_DEFAULT		purge_mode_ratio
+/* Default decay time in seconds. */
+#define	DECAY_TIME_DEFAULT	10
+/* Number of event ticks between time checks. */
+#define	DECAY_NTICKS_PER_UPDATE	1000
+
 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
 typedef struct arena_run_s arena_run_t;
 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
@@ -325,7 +337,7 @@
 	/* Minimum ratio (log base 2) of nactive:ndirty. */
 	ssize_t			lg_dirty_mult;
 
-	/* True if a thread is currently executing arena_purge(). */
+	/* True if a thread is currently executing arena_purge_to_limit(). */
 	bool			purging;
 
 	/* Number of pages in active runs and huge regions. */
@@ -376,6 +388,53 @@
 	arena_runs_dirty_link_t	runs_dirty;
 	extent_node_t		chunks_cache;
 
+	/*
+	 * Approximate time in seconds from the creation of a set of unused
+	 * dirty pages until an equivalent set of unused dirty pages is purged
+	 * and/or reused.
+	 */
+	ssize_t			decay_time;
+	/* decay_time / SMOOTHSTEP_NSTEPS. */
+	struct timespec		decay_interval;
+	/*
+	 * Time at which the current decay interval logically started.  We do
+	 * not actually advance to a new epoch until sometime after it starts
+	 * because of scheduling and computation delays, and it is even possible
+	 * to completely skip epochs.  In all cases, during epoch advancement we
+	 * merge all relevant activity into the most recently recorded epoch.
+	 */
+	struct timespec		decay_epoch;
+	/* decay_deadline randomness generator. */
+	uint64_t		decay_jitter_state;
+	/*
+	 * Deadline for current epoch.  This is the sum of decay_interval and
+	 * per epoch jitter which is a uniform random variable in
+	 * [0..decay_interval).  Epochs always advance by precise multiples of
+	 * decay_interval, but we randomize the deadline to reduce the
+	 * likelihood of arenas purging in lockstep.
+	 */
+	struct timespec		decay_deadline;
+	/*
+	 * Number of dirty pages at beginning of current epoch.  During epoch
+	 * advancement we use the delta between decay_ndirty and ndirty to
+	 * determine how many dirty pages, if any, were generated, and record
+	 * the result in decay_backlog.
+	 */
+	size_t			decay_ndirty;
+	/*
+	 * Memoized result of arena_decay_backlog_npages_limit() corresponding
+	 * to the current contents of decay_backlog, i.e. the limit on how many
+	 * pages are allowed to exist for the decay epochs.
+	 */
+	size_t			decay_backlog_npages_limit;
+	/*
+	 * Trailing log of how many unused dirty pages were generated during
+	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+	 * element is the most recent epoch.  Corresponding epoch times are
+	 * relative to decay_epoch.
+	 */
+	size_t			decay_backlog[SMOOTHSTEP_NSTEPS];
+
 	/* Extant huge allocations. */
 	ql_head(extent_node_t)	huge;
 	/* Synchronizes all huge allocation/update/deallocation. */
@@ -408,6 +467,7 @@
 /* Used in conjunction with tsd for fast arena-related context lookup. */
 struct arena_tdata_s {
 	arena_t			*arena;
+	ticker_t		decay_ticker;
 };
 #endif /* JEMALLOC_ARENA_STRUCTS_B */
 
@@ -423,7 +483,10 @@
 #endif
     ;
 
+extern purge_mode_t	opt_purge;
+extern const char	*purge_mode_names[];
 extern ssize_t		opt_lg_dirty_mult;
+extern ssize_t		opt_decay_time;
 
 extern arena_bin_info_t	arena_bin_info[NBINS];
 
@@ -451,9 +514,11 @@
     size_t oldsize, size_t usize, bool *zero);
 ssize_t	arena_lg_dirty_mult_get(arena_t *arena);
 bool	arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
+ssize_t	arena_decay_time_get(arena_t *arena);
+bool	arena_decay_time_set(arena_t *arena, ssize_t decay_time);
 void	arena_maybe_purge(arena_t *arena);
-void	arena_purge_all(arena_t *arena);
-void	arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
+void	arena_purge(arena_t *arena, bool all);
+void	arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
     szind_t binind, uint64_t prof_accumbytes);
 void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
     bool zero);
@@ -467,7 +532,7 @@
 void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
 #endif
 void	arena_quarantine_junk_small(void *ptr, size_t usize);
-void	*arena_malloc_large(arena_t *arena, size_t size,
+void	*arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size,
     szind_t ind, bool zero);
 void	*arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
     bool zero, tcache_t *tcache);
@@ -478,8 +543,8 @@
     void *ptr, arena_chunk_map_bits_t *bitselm);
 void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t pageind, arena_chunk_map_bits_t *bitselm);
-void	arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind);
+void	arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t pageind);
 #ifdef JEMALLOC_JET
 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
@@ -488,12 +553,13 @@
 #endif
 void	arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
     void *ptr);
-void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
+void	arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+    void *ptr);
 #ifdef JEMALLOC_JET
 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
 #endif
-bool	arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool	arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
 void	*arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
     size_t size, size_t alignment, bool zero, tcache_t *tcache);
@@ -501,9 +567,11 @@
 bool	arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
 ssize_t	arena_lg_dirty_mult_default_get(void);
 bool	arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
+ssize_t	arena_decay_time_default_get(void);
+bool	arena_decay_time_default_set(ssize_t decay_time);
 void	arena_stats_merge(arena_t *arena, const char **dss,
-    ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
-    arena_stats_t *astats, malloc_bin_stats_t *bstats,
+    ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
+    size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
     malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
 arena_t	*arena_new(unsigned ind);
 bool	arena_boot(void);
@@ -566,6 +634,8 @@
 void	arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
 void	arena_prof_tctx_reset(const void *ptr, size_t usize,
     const void *old_ptr, prof_tctx_t *old_tctx);
+void	arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
+void	arena_decay_tick(tsd_t *tsd, arena_t *arena);
 void	*arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
     bool zero, tcache_t *tcache, bool slow_path);
 arena_t	*arena_aalloc(const void *ptr);
@@ -1165,6 +1235,27 @@
 	}
 }
 
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
+{
+	ticker_t *decay_ticker;
+
+	if (unlikely(tsd == NULL))
+		return;
+	decay_ticker = decay_ticker_get(tsd, arena->ind);
+	if (unlikely(decay_ticker == NULL))
+		return;
+	if (unlikely(ticker_ticks(decay_ticker, nticks)))
+		arena_purge(arena, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_tick(tsd_t *tsd, arena_t *arena)
+{
+
+	arena_decay_ticks(tsd, arena, 1);
+}
+
 JEMALLOC_ALWAYS_INLINE void *
 arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
     tcache_t *tcache, bool slow_path)
@@ -1271,7 +1362,7 @@
 				tcache_dalloc_small(tsd, tcache, ptr, binind,
 				    slow_path);
 			} else {
-				arena_dalloc_small(extent_node_arena_get(
+				arena_dalloc_small(tsd, extent_node_arena_get(
 				    &chunk->node), chunk, ptr, pageind);
 			}
 		} else {
@@ -1286,7 +1377,7 @@
 				tcache_dalloc_large(tsd, tcache, ptr, size -
 				    large_pad, slow_path);
 			} else {
-				arena_dalloc_large(extent_node_arena_get(
+				arena_dalloc_large(tsd, extent_node_arena_get(
 				    &chunk->node), chunk, ptr);
 			}
 		}
@@ -1326,7 +1417,7 @@
 			} else {
 				size_t pageind = ((uintptr_t)ptr -
 				    (uintptr_t)chunk) >> LG_PAGE;
-				arena_dalloc_small(extent_node_arena_get(
+				arena_dalloc_small(tsd, extent_node_arena_get(
 				    &chunk->node), chunk, ptr, pageind);
 			}
 		} else {
@@ -1337,7 +1428,7 @@
 				tcache_dalloc_large(tsd, tcache, ptr, size,
 				    true);
 			} else {
-				arena_dalloc_large(extent_node_arena_get(
+				arena_dalloc_large(tsd, extent_node_arena_get(
 				    &chunk->node), chunk, ptr);
 			}
 		}
diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h
index 751c14b..9add3ed 100644
--- a/include/jemalloc/internal/ctl.h
+++ b/include/jemalloc/internal/ctl.h
@@ -35,6 +35,7 @@
 	unsigned		nthreads;
 	const char		*dss;
 	ssize_t			lg_dirty_mult;
+	ssize_t			decay_time;
 	size_t			pactive;
 	size_t			pdirty;
 	arena_stats_t		astats;
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index ece7af9..68d3789 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -13,8 +13,8 @@
     tcache_t *tcache);
 void	*huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
     bool zero, tcache_t *tcache);
-bool	huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
-    size_t usize_max, bool zero);
+bool	huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
+    size_t usize_min, size_t usize_max, bool zero);
 void	*huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
     size_t usize, size_t alignment, bool zero, tcache_t *tcache);
 #ifdef JEMALLOC_JET
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index e84c435..3b2f75d 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -545,6 +545,7 @@
     bool refresh_if_missing);
 arena_t	*arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
     bool refresh_if_missing);
+ticker_t	*decay_ticker_get(tsd_t *tsd, unsigned ind);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -833,6 +834,17 @@
 
 	return (tdata->arena);
 }
+
+JEMALLOC_INLINE ticker_t *
+decay_ticker_get(tsd_t *tsd, unsigned ind)
+{
+	arena_tdata_t *tdata;
+
+	tdata = arena_tdata_get(tsd, ind, true);
+	if (unlikely(tdata == NULL))
+		return (NULL);
+	return (&tdata->decay_ticker);
+}
 #endif
 
 #include "jemalloc/internal/bitmap.h"
@@ -883,8 +895,8 @@
     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
 void	*iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
     size_t alignment, bool zero);
-bool	ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
-    size_t alignment, bool zero);
+bool	ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t extra, size_t alignment, bool zero);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -1150,8 +1162,8 @@
 }
 
 JEMALLOC_ALWAYS_INLINE bool
-ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
-    bool zero)
+ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero)
 {
 
 	assert(ptr != NULL);
@@ -1163,7 +1175,7 @@
 		return (true);
 	}
 
-	return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
+	return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
 }
 #endif
 
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index a0e6d8a..95ddf0c 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -25,6 +25,12 @@
 arena_dalloc_large
 arena_dalloc_large_junked_locked
 arena_dalloc_small
+arena_decay_time_default_get
+arena_decay_time_default_set
+arena_decay_time_get
+arena_decay_time_set
+arena_decay_tick
+arena_decay_ticks
 arena_dss_prec_get
 arena_dss_prec_set
 arena_get
@@ -83,7 +89,7 @@
 arena_prof_tctx_reset
 arena_prof_tctx_set
 arena_ptr_small_binind_get
-arena_purge_all
+arena_purge
 arena_quarantine_junk_small
 arena_ralloc
 arena_ralloc_junk_large
@@ -185,6 +191,7 @@
 ctl_postfork_child
 ctl_postfork_parent
 ctl_prefork
+decay_ticker_get
 dss_prec_names
 extent_node_achunk_get
 extent_node_achunk_set
@@ -318,6 +325,7 @@
 ncpus
 nhbins
 opt_abort
+opt_decay_time
 opt_dss
 opt_junk
 opt_junk_alloc
@@ -336,6 +344,7 @@
 opt_prof_leak
 opt_prof_prefix
 opt_prof_thread_active_init
+opt_purge
 opt_quarantine
 opt_redzone
 opt_stats_print
@@ -397,6 +406,7 @@
 prof_thread_active_set
 prof_thread_name_get
 prof_thread_name_set
+purge_mode_names
 quarantine
 quarantine_alloc_hook
 quarantine_alloc_hook_work
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index c64f5d3..09935c3 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -361,7 +361,7 @@
 
 		usize = index2size(binind);
 		assert(usize <= tcache_maxclass);
-		ret = arena_malloc_large(arena, usize, binind, zero);
+		ret = arena_malloc_large(tsd, arena, usize, binind, zero);
 		if (ret == NULL)
 			return (NULL);
 	} else {
diff --git a/include/jemalloc/internal/time.h b/include/jemalloc/internal/time.h
index a290f38..dd1dd5b 100644
--- a/include/jemalloc/internal/time.h
+++ b/include/jemalloc/internal/time.h
@@ -26,7 +26,12 @@
 void	time_idivide(struct timespec *time, uint64_t divisor);
 uint64_t	time_divide(const struct timespec *time,
     const struct timespec *divisor);
+#ifdef JEMALLOC_JET
+typedef bool (time_update_t)(struct timespec *);
+extern time_update_t *time_update;
+#else
 bool	time_update(struct timespec *time);
+#endif
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
diff --git a/src/arena.c b/src/arena.c
index 47b136b..b1078ae 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -4,8 +4,17 @@
 /******************************************************************************/
 /* Data. */
 
+purge_mode_t	opt_purge = PURGE_DEFAULT;
+const char	*purge_mode_names[] = {
+	"ratio",
+	"decay",
+	"N/A"
+};
 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
 static ssize_t	lg_dirty_mult_default;
+ssize_t		opt_decay_time = DECAY_TIME_DEFAULT;
+static ssize_t	decay_time_default;
+
 arena_bin_info_t	arena_bin_info[NBINS];
 
 size_t		map_bias;
@@ -1206,9 +1215,192 @@
 }
 
 static void
+arena_decay_deadline_init(arena_t *arena)
+{
+
+	assert(opt_purge == purge_mode_decay);
+
+	/*
+	 * Generate a new deadline that is uniformly random within the next
+	 * epoch after the current one.
+	 */
+	time_copy(&arena->decay_deadline, &arena->decay_epoch);
+	time_add(&arena->decay_deadline, &arena->decay_interval);
+	if (arena->decay_time > 0) {
+		uint64_t decay_interval_ns, r;
+		struct timespec jitter;
+
+		decay_interval_ns = time_sec(&arena->decay_interval) *
+		    1000000000 + time_nsec(&arena->decay_interval);
+		r = prng_range(&arena->decay_jitter_state, decay_interval_ns);
+		time_init(&jitter, r / 1000000000, r % 1000000000);
+		time_add(&arena->decay_deadline, &jitter);
+	}
+}
+
+static bool
+arena_decay_deadline_reached(const arena_t *arena, const struct timespec *time)
+{
+
+	assert(opt_purge == purge_mode_decay);
+
+	return (time_compare(&arena->decay_deadline, time) <= 0);
+}
+
+static size_t
+arena_decay_backlog_npages_limit(const arena_t *arena)
+{
+	static const uint64_t h_steps[] = {
+#define	STEP(step, h, x, y) \
+		h,
+		SMOOTHSTEP
+#undef STEP
+	};
+	uint64_t sum;
+	size_t npages_limit_backlog;
+	unsigned i;
+
+	assert(opt_purge == purge_mode_decay);
+
+	/*
+	 * For each element of decay_backlog, multiply by the corresponding
+	 * fixed-point smoothstep decay factor.  Sum the products, then divide
+	 * to round down to the nearest whole number of pages.
+	 */
+	sum = 0;
+	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+		sum += arena->decay_backlog[i] * h_steps[i];
+	npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
+
+	return (npages_limit_backlog);
+}
+
+static void
+arena_decay_epoch_advance(arena_t *arena, const struct timespec *time)
+{
+	uint64_t nadvance;
+	struct timespec delta;
+	size_t ndirty_delta;
+
+	assert(opt_purge == purge_mode_decay);
+	assert(arena_decay_deadline_reached(arena, time));
+
+	time_copy(&delta, time);
+	time_subtract(&delta, &arena->decay_epoch);
+	nadvance = time_divide(&delta, &arena->decay_interval);
+	assert(nadvance > 0);
+
+	/* Add nadvance decay intervals to epoch. */
+	time_copy(&delta, &arena->decay_interval);
+	time_imultiply(&delta, nadvance);
+	time_add(&arena->decay_epoch, &delta);
+
+	/* Set a new deadline. */
+	arena_decay_deadline_init(arena);
+
+	/* Update the backlog. */
+	if (nadvance >= SMOOTHSTEP_NSTEPS) {
+		memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+		    sizeof(size_t));
+	} else {
+		memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
+		    (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
+		if (nadvance > 1) {
+			memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
+			    nadvance], 0, (nadvance-1) * sizeof(size_t));
+		}
+	}
+	ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
+	    arena->decay_ndirty : 0;
+	arena->decay_ndirty = arena->ndirty;
+	arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
+	arena->decay_backlog_npages_limit =
+	    arena_decay_backlog_npages_limit(arena);
+}
+
+static size_t
+arena_decay_npages_limit(arena_t *arena)
+{
+	size_t npages_limit;
+
+	assert(opt_purge == purge_mode_decay);
+
+	npages_limit = arena->decay_backlog_npages_limit;
+
+	/* Add in any dirty pages created during the current epoch. */
+	if (arena->ndirty > arena->decay_ndirty)
+		npages_limit += arena->ndirty - arena->decay_ndirty;
+
+	return (npages_limit);
+}
+
+static void
+arena_decay_init(arena_t *arena, ssize_t decay_time)
+{
+
+	arena->decay_time = decay_time;
+	if (decay_time > 0) {
+		time_init(&arena->decay_interval, decay_time, 0);
+		time_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
+	}
+
+	time_init(&arena->decay_epoch, 0, 0);
+	time_update(&arena->decay_epoch);
+	arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
+	arena_decay_deadline_init(arena);
+	arena->decay_ndirty = arena->ndirty;
+	arena->decay_backlog_npages_limit = 0;
+	memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+static bool
+arena_decay_time_valid(ssize_t decay_time)
+{
+
+	return (decay_time >= -1 && decay_time <= TIME_SEC_MAX);
+}
+
+ssize_t
+arena_decay_time_get(arena_t *arena)
+{
+	ssize_t decay_time;
+
+	malloc_mutex_lock(&arena->lock);
+	decay_time = arena->decay_time;
+	malloc_mutex_unlock(&arena->lock);
+
+	return (decay_time);
+}
+
+bool
+arena_decay_time_set(arena_t *arena, ssize_t decay_time)
+{
+
+	if (!arena_decay_time_valid(decay_time))
+		return (true);
+
+	malloc_mutex_lock(&arena->lock);
+	/*
+	 * Restart decay backlog from scratch, which may cause many dirty pages
+	 * to be immediately purged.  It would conceptually be possible to map
+	 * the old backlog onto the new backlog, but there is no justification
+	 * for such complexity since decay_time changes are intended to be
+	 * infrequent, either between the {-1, 0, >0} states, or a one-time
+	 * arbitrary change during initial arena configuration.
+	 */
+	arena_decay_init(arena, decay_time);
+	arena_maybe_purge(arena);
+	malloc_mutex_unlock(&arena->lock);
+
+	return (false);
+}
+
+static void
 arena_maybe_purge_ratio(arena_t *arena)
 {
 
+	assert(opt_purge == purge_mode_ratio);
+
 	/* Don't purge if the option is disabled. */
 	if (arena->lg_dirty_mult < 0)
 		return;
@@ -1231,6 +1423,41 @@
 	}
 }
 
+static void
+arena_maybe_purge_decay(arena_t *arena)
+{
+	struct timespec time;
+	size_t ndirty_limit;
+
+	assert(opt_purge == purge_mode_decay);
+
+	/* Purge all or nothing if the option is disabled. */
+	if (arena->decay_time <= 0) {
+		if (arena->decay_time == 0)
+			arena_purge_to_limit(arena, 0);
+		return;
+	}
+
+	time_copy(&time, &arena->decay_epoch);
+	if (unlikely(time_update(&time))) {
+		/* Time went backwards.  Force an epoch advance. */
+		time_copy(&time, &arena->decay_deadline);
+	}
+
+	if (arena_decay_deadline_reached(arena, &time))
+		arena_decay_epoch_advance(arena, &time);
+
+	ndirty_limit = arena_decay_npages_limit(arena);
+
+	/*
+	 * Don't try to purge unless the number of purgeable pages exceeds the
+	 * current limit.
+	 */
+	if (arena->ndirty <= ndirty_limit)
+		return;
+	arena_purge_to_limit(arena, ndirty_limit);
+}
+
 void
 arena_maybe_purge(arena_t *arena)
 {
@@ -1239,7 +1466,10 @@
 	if (arena->purging)
 		return;
 
-	arena_maybe_purge_ratio(arena);
+	if (opt_purge == purge_mode_ratio)
+		arena_maybe_purge_ratio(arena);
+	else
+		arena_maybe_purge_decay(arena);
 }
 
 static size_t
@@ -1298,6 +1528,9 @@
 			UNUSED void *chunk;
 
 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+			if (opt_purge == purge_mode_decay && arena->ndirty -
+			    (nstashed + npages) < ndirty_limit)
+				break;
 
 			chunkselm_next = qr_next(chunkselm, cc_link);
 			/*
@@ -1327,6 +1560,9 @@
 			    arena_mapbits_unallocated_size_get(chunk, pageind);
 
 			npages = run_size >> LG_PAGE;
+			if (opt_purge == purge_mode_decay && arena->ndirty -
+			    (nstashed + npages) < ndirty_limit)
+				break;
 
 			assert(pageind + npages <= chunk_npages);
 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
@@ -1352,7 +1588,8 @@
 		}
 
 		nstashed += npages;
-		if (arena->ndirty - nstashed <= ndirty_limit)
+		if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
+		    ndirty_limit)
 			break;
 	}
 
@@ -1492,6 +1729,15 @@
 	}
 }
 
+/*
+ * NB: ndirty_limit is interpreted differently depending on opt_purge:
+ *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
+ *                       desired state:
+ *                       (arena->ndirty <= ndirty_limit)
+ *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
+ *                       violating the invariant:
+ *                       (arena->ndirty >= ndirty_limit)
+ */
 static void
 arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
 {
@@ -1510,8 +1756,8 @@
 		size_t ndirty = arena_dirty_count(arena);
 		assert(ndirty == arena->ndirty);
 	}
-	assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty ||
-	    ndirty_limit == 0);
+	assert(opt_purge != purge_mode_ratio || (arena->nactive >>
+	    arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
 
 	qr_new(&purge_runs_sentinel, rd_link);
 	extent_node_dirty_linkage_init(&purge_chunks_sentinel);
@@ -1534,11 +1780,14 @@
 }
 
 void
-arena_purge_all(arena_t *arena)
+arena_purge(arena_t *arena, bool all)
 {
 
 	malloc_mutex_lock(&arena->lock);
-	arena_purge_to_limit(arena, 0);
+	if (all)
+		arena_purge_to_limit(arena, 0);
+	else
+		arena_maybe_purge(arena);
 	malloc_mutex_unlock(&arena->lock);
 }
 
@@ -1960,8 +2209,8 @@
 }
 
 void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
-    uint64_t prof_accumbytes)
+arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
+    szind_t binind, uint64_t prof_accumbytes)
 {
 	unsigned i, nfill;
 	arena_bin_t *bin;
@@ -2008,6 +2257,7 @@
 	}
 	malloc_mutex_unlock(&bin->lock);
 	tbin->ncached = i;
+	arena_decay_tick(tsd, arena);
 }
 
 void
@@ -2118,7 +2368,8 @@
 }
 
 static void *
-arena_malloc_small(arena_t *arena, size_t size, szind_t binind, bool zero)
+arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
+    bool zero)
 {
 	void *ret;
 	arena_bin_t *bin;
@@ -2166,11 +2417,13 @@
 		memset(ret, 0, size);
 	}
 
+	arena_decay_tick(tsd, arena);
 	return (ret);
 }
 
 void *
-arena_malloc_large(arena_t *arena, size_t size, szind_t binind, bool zero)
+arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
+    bool zero)
 {
 	void *ret;
 	size_t usize;
@@ -2227,6 +2480,7 @@
 		}
 	}
 
+	arena_decay_tick(tsd, arena);
 	return (ret);
 }
 
@@ -2240,9 +2494,9 @@
 		return (NULL);
 
 	if (likely(size <= SMALL_MAXCLASS))
-		return (arena_malloc_small(arena, size, ind, zero));
+		return (arena_malloc_small(tsd, arena, size, ind, zero));
 	if (likely(size <= large_maxclass))
-		return (arena_malloc_large(arena, size, ind, zero));
+		return (arena_malloc_large(tsd, arena, size, ind, zero));
 	return (huge_malloc(tsd, arena, size, zero, tcache));
 }
 
@@ -2329,6 +2583,7 @@
 		else if (unlikely(opt_zero))
 			memset(ret, 0, usize);
 	}
+	arena_decay_tick(tsd, arena);
 	return (ret);
 }
 
@@ -2515,7 +2770,7 @@
 }
 
 void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t pageind)
 {
 	arena_chunk_map_bits_t *bitselm;
@@ -2527,6 +2782,7 @@
 	}
 	bitselm = arena_bitselm_get(chunk, pageind);
 	arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
+	arena_decay_tick(tsd, arena);
 }
 
 #ifdef JEMALLOC_JET
@@ -2583,12 +2839,13 @@
 }
 
 void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
 
 	malloc_mutex_lock(&arena->lock);
 	arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
 	malloc_mutex_unlock(&arena->lock);
+	arena_decay_tick(tsd, arena);
 }
 
 static void
@@ -2789,14 +3046,16 @@
 }
 
 bool
-arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
-    bool zero)
+arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t extra, bool zero)
 {
 	size_t usize_min, usize_max;
 
 	usize_min = s2u(size);
 	usize_max = s2u(size + extra);
 	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+		arena_chunk_t *chunk;
+
 		/*
 		 * Avoid moving the allocation if the size class can be left the
 		 * same.
@@ -2816,10 +3075,12 @@
 				return (true);
 		}
 
+		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+		arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
 		return (false);
 	} else {
-		return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
-		    zero));
+		return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
+		    usize_max, zero));
 	}
 }
 
@@ -2852,7 +3113,7 @@
 		size_t copysize;
 
 		/* Try to avoid moving the allocation. */
-		if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
+		if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
 			return (ptr);
 
 		/*
@@ -2915,15 +3176,36 @@
 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
 {
 
+	if (opt_purge != purge_mode_ratio)
+		return (true);
 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
 		return (true);
 	atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
 	return (false);
 }
 
+ssize_t
+arena_decay_time_default_get(void)
+{
+
+	return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
+}
+
+bool
+arena_decay_time_default_set(ssize_t decay_time)
+{
+
+	if (opt_purge != purge_mode_decay)
+		return (true);
+	if (!arena_decay_time_valid(decay_time))
+		return (true);
+	atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
+	return (false);
+}
+
 void
 arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
-    size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+    ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats,
     malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
     malloc_huge_stats_t *hstats)
 {
@@ -2932,6 +3214,7 @@
 	malloc_mutex_lock(&arena->lock);
 	*dss = dss_prec_names[arena->dss_prec];
 	*lg_dirty_mult = arena->lg_dirty_mult;
+	*decay_time = arena->decay_time;
 	*nactive += arena->nactive;
 	*ndirty += arena->ndirty;
 
@@ -3050,6 +3333,9 @@
 	qr_new(&arena->runs_dirty, rd_link);
 	qr_new(&arena->chunks_cache, cc_link);
 
+	if (opt_purge == purge_mode_decay)
+		arena_decay_init(arena, arena_decay_time_default_get());
+
 	ql_new(&arena->huge);
 	if (malloc_mutex_init(&arena->huge_mtx))
 		return (NULL);
@@ -3227,6 +3513,7 @@
 	unsigned i;
 
 	arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+	arena_decay_time_default_set(opt_decay_time);
 
 	/*
 	 * Compute the header size such that it is large enough to contain the
diff --git a/src/ctl.c b/src/ctl.c
index 9618d63..f003b41 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -92,7 +92,9 @@
 CTL_PROTO(opt_dss)
 CTL_PROTO(opt_lg_chunk)
 CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_purge)
 CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_decay_time)
 CTL_PROTO(opt_stats_print)
 CTL_PROTO(opt_junk)
 CTL_PROTO(opt_zero)
@@ -115,10 +117,12 @@
 CTL_PROTO(tcache_create)
 CTL_PROTO(tcache_flush)
 CTL_PROTO(tcache_destroy)
+static void	arena_i_purge(unsigned arena_ind, bool all);
 CTL_PROTO(arena_i_purge)
-static void	arena_i_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_decay)
 CTL_PROTO(arena_i_dss)
 CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_decay_time)
 CTL_PROTO(arena_i_chunk_hooks)
 INDEX_PROTO(arena_i)
 CTL_PROTO(arenas_bin_i_size)
@@ -132,6 +136,7 @@
 CTL_PROTO(arenas_narenas)
 CTL_PROTO(arenas_initialized)
 CTL_PROTO(arenas_lg_dirty_mult)
+CTL_PROTO(arenas_decay_time)
 CTL_PROTO(arenas_quantum)
 CTL_PROTO(arenas_page)
 CTL_PROTO(arenas_tcache_max)
@@ -182,6 +187,7 @@
 CTL_PROTO(stats_arenas_i_nthreads)
 CTL_PROTO(stats_arenas_i_dss)
 CTL_PROTO(stats_arenas_i_lg_dirty_mult)
+CTL_PROTO(stats_arenas_i_decay_time)
 CTL_PROTO(stats_arenas_i_pactive)
 CTL_PROTO(stats_arenas_i_pdirty)
 CTL_PROTO(stats_arenas_i_mapped)
@@ -260,7 +266,9 @@
 	{NAME("dss"),		CTL(opt_dss)},
 	{NAME("lg_chunk"),	CTL(opt_lg_chunk)},
 	{NAME("narenas"),	CTL(opt_narenas)},
+	{NAME("purge"),		CTL(opt_purge)},
 	{NAME("lg_dirty_mult"),	CTL(opt_lg_dirty_mult)},
+	{NAME("decay_time"),	CTL(opt_decay_time)},
 	{NAME("stats_print"),	CTL(opt_stats_print)},
 	{NAME("junk"),		CTL(opt_junk)},
 	{NAME("zero"),		CTL(opt_zero)},
@@ -290,8 +298,10 @@
 
 static const ctl_named_node_t arena_i_node[] = {
 	{NAME("purge"),		CTL(arena_i_purge)},
+	{NAME("decay"),		CTL(arena_i_decay)},
 	{NAME("dss"),		CTL(arena_i_dss)},
 	{NAME("lg_dirty_mult"),	CTL(arena_i_lg_dirty_mult)},
+	{NAME("decay_time"),	CTL(arena_i_decay_time)},
 	{NAME("chunk_hooks"),	CTL(arena_i_chunk_hooks)}
 };
 static const ctl_named_node_t super_arena_i_node[] = {
@@ -341,6 +351,7 @@
 	{NAME("narenas"),	CTL(arenas_narenas)},
 	{NAME("initialized"),	CTL(arenas_initialized)},
 	{NAME("lg_dirty_mult"),	CTL(arenas_lg_dirty_mult)},
+	{NAME("decay_time"),	CTL(arenas_decay_time)},
 	{NAME("quantum"),	CTL(arenas_quantum)},
 	{NAME("page"),		CTL(arenas_page)},
 	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
@@ -441,6 +452,7 @@
 	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
 	{NAME("dss"),		CTL(stats_arenas_i_dss)},
 	{NAME("lg_dirty_mult"),	CTL(stats_arenas_i_lg_dirty_mult)},
+	{NAME("decay_time"),	CTL(stats_arenas_i_decay_time)},
 	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
 	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
 	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
@@ -523,6 +535,7 @@
 
 	astats->dss = dss_prec_names[dss_prec_limit];
 	astats->lg_dirty_mult = -1;
+	astats->decay_time = -1;
 	astats->pactive = 0;
 	astats->pdirty = 0;
 	if (config_stats) {
@@ -545,8 +558,8 @@
 	unsigned i;
 
 	arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
-	    &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
-	    cstats->lstats, cstats->hstats);
+	    &cstats->decay_time, &cstats->pactive, &cstats->pdirty,
+	    &cstats->astats, cstats->bstats, cstats->lstats, cstats->hstats);
 
 	for (i = 0; i < NBINS; i++) {
 		cstats->allocated_small += cstats->bstats[i].curregs *
@@ -1265,7 +1278,9 @@
 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
@@ -1539,34 +1554,52 @@
 
 /******************************************************************************/
 
-/* ctl_mutex must be held during execution of this function. */
 static void
-arena_i_purge(unsigned arena_ind)
+arena_i_purge(unsigned arena_ind, bool all)
 {
-	tsd_t *tsd;
-	unsigned i;
-	bool refreshed;
-	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
 
-	tsd = tsd_fetch();
-	for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
-		tarenas[i] = arena_get(tsd, i, false, false);
-		if (tarenas[i] == NULL && !refreshed) {
-			tarenas[i] = arena_get(tsd, i, false, true);
-			refreshed = true;
-		}
-	}
+	malloc_mutex_lock(&ctl_mtx);
+	{
+		tsd_t *tsd = tsd_fetch();
+		unsigned narenas = ctl_stats.narenas;
 
-	if (arena_ind == ctl_stats.narenas) {
-		unsigned i;
-		for (i = 0; i < ctl_stats.narenas; i++) {
-			if (tarenas[i] != NULL)
-				arena_purge_all(tarenas[i]);
+		if (arena_ind == narenas) {
+			unsigned i;
+			bool refreshed;
+			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+
+			for (i = 0, refreshed = false; i < narenas; i++) {
+				tarenas[i] = arena_get(tsd, i, false, false);
+				if (tarenas[i] == NULL && !refreshed) {
+					tarenas[i] = arena_get(tsd, i, false,
+					    true);
+					refreshed = true;
+				}
+			}
+
+			/*
+			 * No further need to hold ctl_mtx, since narenas and
+			 * tarenas contain everything needed below.
+			 */
+			malloc_mutex_unlock(&ctl_mtx);
+
+			for (i = 0; i < narenas; i++) {
+				if (tarenas[i] != NULL)
+					arena_purge(tarenas[i], all);
+			}
+		} else {
+			arena_t *tarena;
+
+			assert(arena_ind < narenas);
+
+			tarena = arena_get(tsd, arena_ind, false, true);
+
+			/* No further need to hold ctl_mtx. */
+			malloc_mutex_unlock(&ctl_mtx);
+
+			if (tarena != NULL)
+				arena_purge(tarena, all);
 		}
-	} else {
-		assert(arena_ind < ctl_stats.narenas);
-		if (tarenas[arena_ind] != NULL)
-			arena_purge_all(tarenas[arena_ind]);
 	}
 }
 
@@ -1578,9 +1611,22 @@
 
 	READONLY();
 	WRITEONLY();
-	malloc_mutex_lock(&ctl_mtx);
-	arena_i_purge(mib[1]);
-	malloc_mutex_unlock(&ctl_mtx);
+	arena_i_purge(mib[1], true);
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+static int
+arena_i_decay_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+
+	READONLY();
+	WRITEONLY();
+	arena_i_purge(mib[1], false);
 
 	ret = 0;
 label_return:
@@ -1678,6 +1724,40 @@
 }
 
 static int
+arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+	int ret;
+	unsigned arena_ind = mib[1];
+	arena_t *arena;
+
+	arena = arena_get(tsd_fetch(), arena_ind, false, true);
+	if (arena == NULL) {
+		ret = EFAULT;
+		goto label_return;
+	}
+
+	if (oldp != NULL && oldlenp != NULL) {
+		size_t oldval = arena_decay_time_get(arena);
+		READ(oldval, ssize_t);
+	}
+	if (newp != NULL) {
+		if (newlen != sizeof(ssize_t)) {
+			ret = EINVAL;
+			goto label_return;
+		}
+		if (arena_decay_time_set(arena, *(ssize_t *)newp)) {
+			ret = EFAULT;
+			goto label_return;
+		}
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+static int
 arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
     size_t *oldlenp, void *newp, size_t newlen)
 {
@@ -1801,6 +1881,32 @@
 	return (ret);
 }
 
+static int
+arenas_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+	int ret;
+
+	if (oldp != NULL && oldlenp != NULL) {
+		size_t oldval = arena_decay_time_default_get();
+		READ(oldval, ssize_t);
+	}
+	if (newp != NULL) {
+		if (newlen != sizeof(ssize_t)) {
+			ret = EINVAL;
+			goto label_return;
+		}
+		if (arena_decay_time_default_set(*(ssize_t *)newp)) {
+			ret = EFAULT;
+			goto label_return;
+		}
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -2002,6 +2108,8 @@
 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
 CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
     ssize_t)
+CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
+    ssize_t)
 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
diff --git a/src/huge.c b/src/huge.c
index c1fa379..9f88048 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -99,6 +99,7 @@
 	} else if (config_fill && unlikely(opt_junk_alloc))
 		memset(ret, 0xa5, size);
 
+	arena_decay_tick(tsd, arena);
 	return (ret);
 }
 
@@ -280,7 +281,7 @@
 }
 
 bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
     size_t usize_max, bool zero)
 {
 
@@ -292,13 +293,18 @@
 
 	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
 		/* Attempt to expand the allocation in-place. */
-		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
+		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
+		    zero)) {
+			arena_decay_tick(tsd, huge_aalloc(ptr));
 			return (false);
+		}
 		/* Try again, this time with usize_min. */
 		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
 		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
-		    oldsize, usize_min, zero))
+		    oldsize, usize_min, zero)) {
+			arena_decay_tick(tsd, huge_aalloc(ptr));
 			return (false);
+		}
 	}
 
 	/*
@@ -309,12 +315,17 @@
 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
 		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
 		    zero);
+		arena_decay_tick(tsd, huge_aalloc(ptr));
 		return (false);
 	}
 
 	/* Attempt to shrink the allocation in-place. */
-	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
-		return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
+	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
+		if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
+			arena_decay_tick(tsd, huge_aalloc(ptr));
+			return (false);
+		}
+	}
 	return (true);
 }
 
@@ -336,7 +347,7 @@
 	size_t copysize;
 
 	/* Try to avoid moving the allocation. */
-	if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
+	if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
 		return (ptr);
 
 	/*
@@ -373,6 +384,8 @@
 	arena_chunk_dalloc_huge(extent_node_arena_get(node),
 	    extent_node_addr_get(node), extent_node_size_get(node));
 	idalloctm(tsd, node, tcache, true, true);
+
+	arena_decay_tick(tsd, arena);
 }
 
 arena_t *
diff --git a/src/jemalloc.c b/src/jemalloc.c
index d2b2afc..f69d951 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -577,6 +577,17 @@
 		    * (narenas_tdata - narenas_actual));
 	}
 
+	/* Copy/initialize tickers. */
+	for (i = 0; i < narenas_actual; i++) {
+		if (i < narenas_tdata_old) {
+			ticker_copy(&arenas_tdata[i].decay_ticker,
+			    &arenas_tdata_old[i].decay_ticker);
+		} else {
+			ticker_init(&arenas_tdata[i].decay_ticker,
+			    DECAY_NTICKS_PER_UPDATE);
+		}
+	}
+
 	/* Read the refreshed tdata array. */
 	tdata = &arenas_tdata[ind];
 label_return:
@@ -1120,8 +1131,27 @@
 			}
 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
 			    SIZE_T_MAX, false)
+			if (strncmp("purge", k, klen) == 0) {
+				int i;
+				bool match = false;
+				for (i = 0; i < purge_mode_limit; i++) {
+					if (strncmp(purge_mode_names[i], v,
+					    vlen) == 0) {
+						opt_purge = (purge_mode_t)i;
+						match = true;
+						break;
+					}
+				}
+				if (!match) {
+					malloc_conf_error("Invalid conf value",
+					    k, klen, v, vlen);
+				}
+				continue;
+			}
 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
 			    -1, (sizeof(size_t) << 3) - 1)
+			CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
+			    TIME_SEC_MAX);
 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
 			if (config_fill) {
 				if (CONF_MATCH("junk")) {
@@ -2344,12 +2374,12 @@
 }
 
 JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
-    size_t alignment, bool zero)
+ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+    size_t extra, size_t alignment, bool zero)
 {
 	size_t usize;
 
-	if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
+	if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
 		return (old_usize);
 	usize = isalloc(ptr, config_prof);
 
@@ -2357,14 +2387,15 @@
 }
 
 static size_t
-ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
-    size_t alignment, bool zero, prof_tctx_t *tctx)
+ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+    size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
 {
 	size_t usize;
 
 	if (tctx == NULL)
 		return (old_usize);
-	usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
+	usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
+	    zero);
 
 	return (usize);
 }
@@ -2390,11 +2421,11 @@
 	assert(usize_max != 0);
 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+		usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
 		    alignment, zero, tctx);
 	} else {
-		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
-		    zero);
+		usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
+		    alignment, zero);
 	}
 	if (usize == old_usize) {
 		prof_alloc_rollback(tsd, tctx, false);
@@ -2441,8 +2472,8 @@
 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
 		    alignment, zero);
 	} else {
-		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
-		    zero);
+		usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
+		    alignment, zero);
 	}
 	if (unlikely(usize == old_usize))
 		goto label_not_resized;
diff --git a/src/stats.c b/src/stats.c
index 7d09c23..8d5ed71 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -258,7 +258,7 @@
 {
 	unsigned nthreads;
 	const char *dss;
-	ssize_t lg_dirty_mult;
+	ssize_t lg_dirty_mult, decay_time;
 	size_t page, pactive, pdirty, mapped;
 	size_t metadata_mapped, metadata_allocated;
 	uint64_t npurge, nmadvise, purged;
@@ -278,13 +278,23 @@
 	malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
 	    dss);
 	CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
-	if (lg_dirty_mult >= 0) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "min active:dirty page ratio: %u:1\n",
-		    (1U << lg_dirty_mult));
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "min active:dirty page ratio: N/A\n");
+	if (opt_purge == purge_mode_ratio) {
+		if (lg_dirty_mult >= 0) {
+			malloc_cprintf(write_cb, cbopaque,
+			    "min active:dirty page ratio: %u:1\n",
+			    (1U << lg_dirty_mult));
+		} else {
+			malloc_cprintf(write_cb, cbopaque,
+			    "min active:dirty page ratio: N/A\n");
+		}
+	}
+	CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
+	if (opt_purge == purge_mode_decay) {
+		if (decay_time >= 0) {
+			malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
+			    decay_time);
+		} else
+			malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
 	}
 	CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
 	CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
@@ -292,9 +302,8 @@
 	CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
 	CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
 	malloc_cprintf(write_cb, cbopaque,
-	    "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
-	    " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
-	    1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
+	    "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64", "
+	    "purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
 
 	malloc_cprintf(write_cb, cbopaque,
 	    "                            allocated      nmalloc      ndalloc"
@@ -486,7 +495,13 @@
 		OPT_WRITE_SIZE_T(lg_chunk)
 		OPT_WRITE_CHAR_P(dss)
 		OPT_WRITE_SIZE_T(narenas)
-		OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
+		OPT_WRITE_CHAR_P(purge)
+		if (opt_purge == purge_mode_ratio) {
+			OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
+			    arenas.lg_dirty_mult)
+		}
+		if (opt_purge == purge_mode_decay)
+			OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
 		OPT_WRITE_BOOL(stats_print)
 		OPT_WRITE_CHAR_P(junk)
 		OPT_WRITE_SIZE_T(quarantine)
@@ -531,13 +546,22 @@
 		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
 
 		CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
-		if (ssv >= 0) {
+		if (opt_purge == purge_mode_ratio) {
+			if (ssv >= 0) {
+				malloc_cprintf(write_cb, cbopaque,
+				    "Min active:dirty page ratio per arena: "
+				    "%u:1\n", (1U << ssv));
+			} else {
+				malloc_cprintf(write_cb, cbopaque,
+				    "Min active:dirty page ratio per arena: "
+				    "N/A\n");
+			}
+		}
+		CTL_GET("arenas.decay_time", &ssv, ssize_t);
+		if (opt_purge == purge_mode_decay) {
 			malloc_cprintf(write_cb, cbopaque,
-			    "Min active:dirty page ratio per arena: %u:1\n",
-			    (1U << ssv));
-		} else {
-			malloc_cprintf(write_cb, cbopaque,
-			    "Min active:dirty page ratio per arena: N/A\n");
+			    "Unused dirty page decay time: %zd%s\n",
+			    ssv, (ssv < 0) ? " (no decay)" : "");
 		}
 		if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
 			malloc_cprintf(write_cb, cbopaque,
diff --git a/src/tcache.c b/src/tcache.c
index e8c3152..426bb1f 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -75,7 +75,7 @@
 {
 	void *ret;
 
-	arena_tcache_fill_small(arena, tbin, binind, config_prof ?
+	arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
 	    tcache->prof_accumbytes : 0);
 	if (config_prof)
 		tcache->prof_accumbytes = 0;
@@ -143,6 +143,7 @@
 			}
 		}
 		malloc_mutex_unlock(&bin->lock);
+		arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
 	}
 	if (config_stats && !merged_stats) {
 		/*
@@ -226,6 +227,7 @@
 		malloc_mutex_unlock(&locked_arena->lock);
 		if (config_prof && idump)
 			prof_idump();
+		arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
 	}
 	if (config_stats && !merged_stats) {
 		/*
diff --git a/src/time.c b/src/time.c
index 3f93038..2fe93e1 100644
--- a/src/time.c
+++ b/src/time.c
@@ -147,6 +147,10 @@
 	return (t / d);
 }
 
+#ifdef JEMALLOC_JET
+#undef time_update
+#define	time_update JEMALLOC_N(time_update_impl)
+#endif
 bool
 time_update(struct timespec *time)
 {
@@ -184,3 +188,8 @@
 	assert(time_valid(time));
 	return (false);
 }
+#ifdef JEMALLOC_JET
+#undef time_update
+#define	time_update JEMALLOC_N(time_update)
+time_update_t *time_update = JEMALLOC_N(time_update_impl);
+#endif
diff --git a/test/unit/decay.c b/test/unit/decay.c
new file mode 100644
index 0000000..324019d
--- /dev/null
+++ b/test/unit/decay.c
@@ -0,0 +1,370 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "purge:decay,decay_time:1";
+
+static time_update_t *time_update_orig;
+
+static unsigned nupdates_mock;
+static struct timespec time_mock;
+static bool nonmonotonic_mock;
+
+static bool
+time_update_mock(struct timespec *time)
+{
+
+	nupdates_mock++;
+	if (!nonmonotonic_mock)
+		time_copy(time, &time_mock);
+	return (nonmonotonic_mock);
+}
+
+TEST_BEGIN(test_decay_ticks)
+{
+	ticker_t *decay_ticker;
+	unsigned tick0, tick1;
+	size_t sz, huge0, large0;
+	void *p;
+	unsigned tcache_ind;
+
+	test_skip_if(opt_purge != purge_mode_decay);
+
+	decay_ticker = decay_ticker_get(tsd_fetch(), 0);
+	assert_ptr_not_null(decay_ticker,
+	    "Unexpected failure getting decay ticker");
+
+	sz = sizeof(size_t);
+	assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	/* malloc(). */
+	tick0 = ticker_read(decay_ticker);
+	p = malloc(huge0);
+	assert_ptr_not_null(p, "Unexpected malloc() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+	/* free(). */
+	tick0 = ticker_read(decay_ticker);
+	free(p);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+	/* calloc(). */
+	tick0 = ticker_read(decay_ticker);
+	p = calloc(1, huge0);
+	assert_ptr_not_null(p, "Unexpected calloc() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+	free(p);
+
+	/* posix_memalign(). */
+	tick0 = ticker_read(decay_ticker);
+	assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
+	    "Unexpected posix_memalign() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during posix_memalign()");
+	free(p);
+
+	/* aligned_alloc(). */
+	tick0 = ticker_read(decay_ticker);
+	p = aligned_alloc(sizeof(size_t), huge0);
+	assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during aligned_alloc()");
+	free(p);
+
+	/* realloc(). */
+	/* Allocate. */
+	tick0 = ticker_read(decay_ticker);
+	p = realloc(NULL, huge0);
+	assert_ptr_not_null(p, "Unexpected realloc() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+	/* Reallocate. */
+	tick0 = ticker_read(decay_ticker);
+	p = realloc(p, huge0);
+	assert_ptr_not_null(p, "Unexpected realloc() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+	/* Deallocate. */
+	tick0 = ticker_read(decay_ticker);
+	realloc(p, 0);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+	/* Huge mallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = mallocx(huge0, 0);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during huge mallocx()");
+	/* Huge rallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = rallocx(p, huge0, 0);
+	assert_ptr_not_null(p, "Unexpected rallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during huge rallocx()");
+	/* Huge xallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	xallocx(p, huge0, 0, 0);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during huge xallocx()");
+	/* Huge dallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	dallocx(p, 0);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during huge dallocx()");
+	/* Huge sdallocx(). */
+	p = mallocx(huge0, 0);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick0 = ticker_read(decay_ticker);
+	sdallocx(p, huge0, 0);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during huge sdallocx()");
+
+	/* Large mallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = mallocx(large0, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during large mallocx()");
+	/* Large rallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = rallocx(p, large0, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected rallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during large rallocx()");
+	/* Large xallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	xallocx(p, large0, 0, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during large xallocx()");
+	/* Large dallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	dallocx(p, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during large dallocx()");
+	/* Large sdallocx(). */
+	p = mallocx(large0, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick0 = ticker_read(decay_ticker);
+	sdallocx(p, large0, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during large sdallocx()");
+
+	/* Small mallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = mallocx(1, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during small mallocx()");
+	/* Small rallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	p = rallocx(p, 1, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected rallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during small rallocx()");
+	/* Small xallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	xallocx(p, 1, 0, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during small xallocx()");
+	/* Small dallocx(). */
+	tick0 = ticker_read(decay_ticker);
+	dallocx(p, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during small dallocx()");
+	/* Small sdallocx(). */
+	p = mallocx(1, MALLOCX_TCACHE_NONE);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick0 = ticker_read(decay_ticker);
+	sdallocx(p, 1, MALLOCX_TCACHE_NONE);
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during small sdallocx()");
+
+	/* tcache fill. */
+	sz = sizeof(unsigned);
+	assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+	tick0 = ticker_read(decay_ticker);
+	p = mallocx(1, MALLOCX_TCACHE(tcache_ind));
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during tcache fill");
+	/* tcache flush. */
+	dallocx(p, MALLOCX_TCACHE(tcache_ind));
+	tick0 = ticker_read(decay_ticker);
+	assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tcache_ind,
+	    sizeof(unsigned)), 0, "Unexpected mallctl failure");
+	tick1 = ticker_read(decay_ticker);
+	assert_u32_ne(tick1, tick0,
+	    "Expected ticker to tick during tcache flush");
+}
+TEST_END
+
+TEST_BEGIN(test_decay_ticker)
+{
+#define	NPS 1024
+	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+	void *ps[NPS];
+	uint64_t epoch, npurge0, npurge1;
+	size_t sz, tcache_max, large;
+	unsigned i, nupdates0;
+	struct timespec time, decay_time, deadline;
+
+	test_skip_if(opt_purge != purge_mode_decay);
+
+	/*
+	 * Allocate a bunch of large objects, pause the clock, deallocate the
+	 * objects, restore the clock, then [md]allocx() in a tight loop to
+	 * verify the ticker triggers purging.
+	 */
+
+	sz = sizeof(size_t);
+	assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+	large = nallocx(tcache_max + 1, flags);
+
+	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+	    "Unexpected mallctl failure");
+	sz = sizeof(uint64_t);
+	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	for (i = 0; i < NPS; i++) {
+		ps[i] = mallocx(large, flags);
+		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+	}
+
+	nupdates_mock = 0;
+	time_init(&time_mock, 0, 0);
+	time_update(&time_mock);
+	nonmonotonic_mock = false;
+
+	time_update_orig = time_update;
+	time_update = time_update_mock;
+
+	for (i = 0; i < NPS; i++) {
+		dallocx(ps[i], flags);
+		nupdates0 = nupdates_mock;
+		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+		    "Unexpected arena.0.decay failure");
+		assert_u_gt(nupdates_mock, nupdates0,
+		    "Expected time_update() to be called");
+	}
+
+	time_update = time_update_orig;
+
+	time_init(&time, 0, 0);
+	time_update(&time);
+	time_init(&decay_time, opt_decay_time, 0);
+	time_copy(&deadline, &time);
+	time_add(&deadline, &decay_time);
+	do {
+		for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
+			void *p = mallocx(1, flags);
+			assert_ptr_not_null(p, "Unexpected mallocx() failure");
+			dallocx(p, flags);
+		}
+		assert_d_eq(mallctl("epoch", NULL, NULL, &epoch,
+		    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
+		sz = sizeof(uint64_t);
+		assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz,
+		    NULL, 0), 0, "Unexpected mallctl failure");
+
+		time_update(&time);
+	} while (time_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
+
+	assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic)
+{
+#define	NPS (SMOOTHSTEP_NSTEPS + 1)
+	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+	void *ps[NPS];
+	uint64_t epoch, npurge0, npurge1;
+	size_t sz, large0;
+	unsigned i, nupdates0;
+
+	test_skip_if(opt_purge != purge_mode_decay);
+
+	sz = sizeof(size_t);
+	assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+	    "Unexpected mallctl failure");
+	sz = sizeof(uint64_t);
+	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	nupdates_mock = 0;
+	time_init(&time_mock, 0, 0);
+	time_update(&time_mock);
+	nonmonotonic_mock = true;
+
+	time_update_orig = time_update;
+	time_update = time_update_mock;
+
+	for (i = 0; i < NPS; i++) {
+		ps[i] = mallocx(large0, flags);
+		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+	}
+
+	for (i = 0; i < NPS; i++) {
+		dallocx(ps[i], flags);
+		nupdates0 = nupdates_mock;
+		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+		    "Unexpected arena.0.decay failure");
+		assert_u_gt(nupdates_mock, nupdates0,
+		    "Expected time_update() to be called");
+	}
+
+	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+	    "Unexpected mallctl failure");
+	sz = sizeof(uint64_t);
+	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+
+	time_update = time_update_orig;
+#undef NPS
+}
+TEST_END
+
+int
+main(void)
+{
+
+	return (test(
+	    test_decay_ticks,
+	    test_decay_ticker,
+	    test_decay_nonmonotonic));
+}
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
index fde223f..b312fc6 100644
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -164,7 +164,9 @@
 	TEST_MALLCTL_OPT(size_t, lg_chunk, always);
 	TEST_MALLCTL_OPT(const char *, dss, always);
 	TEST_MALLCTL_OPT(size_t, narenas, always);
+	TEST_MALLCTL_OPT(const char *, purge, always);
 	TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
+	TEST_MALLCTL_OPT(ssize_t, decay_time, always);
 	TEST_MALLCTL_OPT(bool, stats_print, always);
 	TEST_MALLCTL_OPT(const char *, junk, fill);
 	TEST_MALLCTL_OPT(size_t, quarantine, fill);
@@ -355,6 +357,8 @@
 	ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
 	size_t sz = sizeof(ssize_t);
 
+	test_skip_if(opt_purge != purge_mode_ratio);
+
 	assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
 	    NULL, 0), 0, "Unexpected mallctl() failure");
 
@@ -382,6 +386,39 @@
 }
 TEST_END
 
+TEST_BEGIN(test_arena_i_decay_time)
+{
+	ssize_t decay_time, orig_decay_time, prev_decay_time;
+	size_t sz = sizeof(ssize_t);
+
+	test_skip_if(opt_purge != purge_mode_decay);
+
+	assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz,
+	    NULL, 0), 0, "Unexpected mallctl() failure");
+
+	decay_time = -2;
+	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+	    &decay_time, sizeof(ssize_t)), EFAULT,
+	    "Unexpected mallctl() success");
+
+	decay_time = TIME_SEC_MAX;
+	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+	    &decay_time, sizeof(ssize_t)), 0,
+	    "Unexpected mallctl() failure");
+
+	for (prev_decay_time = decay_time, decay_time = -1;
+	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+		ssize_t old_decay_time;
+
+		assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time,
+		    &sz, &decay_time, sizeof(ssize_t)), 0,
+		    "Unexpected mallctl() failure");
+		assert_zd_eq(old_decay_time, prev_decay_time,
+		    "Unexpected old arena.0.decay_time");
+	}
+}
+TEST_END
+
 TEST_BEGIN(test_arena_i_purge)
 {
 	unsigned narenas;
@@ -402,6 +439,26 @@
 }
 TEST_END
 
+TEST_BEGIN(test_arena_i_decay)
+{
+	unsigned narenas;
+	size_t sz = sizeof(unsigned);
+	size_t mib[3];
+	size_t miblen = 3;
+
+	assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+	    "Unexpected mallctl() failure");
+
+	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+	    "Unexpected mallctlnametomib() failure");
+	mib[1] = narenas;
+	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+	    "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
 TEST_BEGIN(test_arena_i_dss)
 {
 	const char *dss_prec_old, *dss_prec_new;
@@ -466,6 +523,8 @@
 	ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
 	size_t sz = sizeof(ssize_t);
 
+	test_skip_if(opt_purge != purge_mode_ratio);
+
 	assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
 	    NULL, 0), 0, "Unexpected mallctl() failure");
 
@@ -493,6 +552,39 @@
 }
 TEST_END
 
+TEST_BEGIN(test_arenas_decay_time)
+{
+	ssize_t decay_time, orig_decay_time, prev_decay_time;
+	size_t sz = sizeof(ssize_t);
+
+	test_skip_if(opt_purge != purge_mode_decay);
+
+	assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz,
+	    NULL, 0), 0, "Unexpected mallctl() failure");
+
+	decay_time = -2;
+	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+	    &decay_time, sizeof(ssize_t)), EFAULT,
+	    "Unexpected mallctl() success");
+
+	decay_time = TIME_SEC_MAX;
+	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+	    &decay_time, sizeof(ssize_t)), 0,
+	    "Expected mallctl() failure");
+
+	for (prev_decay_time = decay_time, decay_time = -1;
+	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+		ssize_t old_decay_time;
+
+		assert_d_eq(mallctl("arenas.decay_time", &old_decay_time,
+		    &sz, &decay_time, sizeof(ssize_t)), 0,
+		    "Unexpected mallctl() failure");
+		assert_zd_eq(old_decay_time, prev_decay_time,
+		    "Unexpected old arenas.decay_time");
+	}
+}
+TEST_END
+
 TEST_BEGIN(test_arenas_constants)
 {
 
@@ -621,10 +713,13 @@
 	    test_tcache,
 	    test_thread_arena,
 	    test_arena_i_lg_dirty_mult,
+	    test_arena_i_decay_time,
 	    test_arena_i_purge,
+	    test_arena_i_decay,
 	    test_arena_i_dss,
 	    test_arenas_initialized,
 	    test_arenas_lg_dirty_mult,
+	    test_arenas_decay_time,
 	    test_arenas_constants,
 	    test_arenas_bin_constants,
 	    test_arenas_lrun_constants,