Fix "prof.reset" mallctl-related corruption.

Fix heap profiling to distinguish among otherwise identical sample sites
with interposed resets (triggered via the "prof.reset" mallctl).  This
bug could cause data structure corruption that would most likely result
in a segfault.
diff --git a/ChangeLog b/ChangeLog
index dba05eb..63c9d56 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -22,6 +22,10 @@
     visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
   - Fix TLS configuration such that it is enabled by default for platforms on
     which it works correctly.
+  - Fix heap profiling to distinguish among otherwise identical sample sites
+    with interposed resets (triggered via the "prof.reset" mallctl).  This bug
+    could cause data structure corruption that would most likely result in a
+    segfault.
 
 * 4.0.0 (August 17, 2015)
 
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index fe89828..eca8aa8 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -90,10 +90,11 @@
 	prof_tdata_t		*tdata;
 
 	/*
-	 * Copy of tdata->thr_uid, necessary because tdata may be defunct during
-	 * teardown.
+	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+	 * defunct during teardown.
 	 */
 	uint64_t		thr_uid;
+	uint64_t		thr_discrim;
 
 	/* Profiling counters, protected by tdata->lock. */
 	prof_cnt_t		cnts;
diff --git a/src/prof.c b/src/prof.c
index b79eba6..7427bf5 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -139,9 +139,16 @@
 	uint64_t b_thr_uid = b->thr_uid;
 	int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
 	if (ret == 0) {
-		uint64_t a_tctx_uid = a->tctx_uid;
-		uint64_t b_tctx_uid = b->tctx_uid;
-		ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
+		uint64_t a_thr_discrim = a->thr_discrim;
+		uint64_t b_thr_discrim = b->thr_discrim;
+		ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+		    b_thr_discrim);
+		if (ret == 0) {
+			uint64_t a_tctx_uid = a->tctx_uid;
+			uint64_t b_tctx_uid = b->tctx_uid;
+			ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+			    b_tctx_uid);
+		}
 	}
 	return (ret);
 }
@@ -791,6 +798,7 @@
 		}
 		ret.p->tdata = tdata;
 		ret.p->thr_uid = tdata->thr_uid;
+		ret.p->thr_discrim = tdata->thr_discrim;
 		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
 		ret.p->gctx = gctx;
 		ret.p->tctx_uid = tdata->tctx_uid_next++;
diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c
index da34d70..69983e5 100644
--- a/test/unit/prof_reset.c
+++ b/test/unit/prof_reset.c
@@ -16,6 +16,14 @@
 	return (fd);
 }
 
+static void
+set_prof_active(bool active)
+{
+
+	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+	    0, "Unexpected mallctl failure");
+}
+
 static size_t
 get_lg_prof_sample(void)
 {
@@ -97,15 +105,12 @@
 
 TEST_BEGIN(test_prof_reset_cleanup)
 {
-	bool active;
 	void *p;
 	prof_dump_header_t *prof_dump_header_orig;
 
 	test_skip_if(!config_prof);
 
-	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while activating profiling");
+	set_prof_active(true);
 
 	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
 	p = mallocx(1, 0);
@@ -133,9 +138,7 @@
 	dallocx(p, 0);
 	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
 
-	active = false;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while deactivating profiling");
+	set_prof_active(false);
 }
 TEST_END
 
@@ -192,7 +195,6 @@
 TEST_BEGIN(test_prof_reset)
 {
 	size_t lg_prof_sample_orig;
-	bool active;
 	thd_t thds[NTHREADS];
 	unsigned thd_args[NTHREADS];
 	unsigned i;
@@ -208,9 +210,7 @@
 	lg_prof_sample_orig = get_lg_prof_sample();
 	do_prof_reset(5);
 
-	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while activating profiling");
+	set_prof_active(true);
 
 	for (i = 0; i < NTHREADS; i++) {
 		thd_args[i] = i;
@@ -224,9 +224,7 @@
 	assert_zu_eq(prof_tdata_count(), tdata_count,
 	    "Unexpected remaining tdata structures");
 
-	active = false;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while deactivating profiling");
+	set_prof_active(false);
 
 	do_prof_reset(lg_prof_sample_orig);
 }
@@ -237,6 +235,58 @@
 #undef RESET_INTERVAL
 #undef DUMP_INTERVAL
 
+/* Test sampling at the same allocation site across resets. */
+#define	NITER 10
+TEST_BEGIN(test_xallocx)
+{
+	size_t lg_prof_sample_orig;
+	unsigned i;
+	void *ptrs[NITER];
+
+	test_skip_if(!config_prof);
+
+	lg_prof_sample_orig = get_lg_prof_sample();
+	set_prof_active(true);
+
+	/* Reset profiling. */
+	do_prof_reset(0);
+
+	for (i = 0; i < NITER; i++) {
+		void *p;
+		size_t sz, nsz;
+
+		/* Reset profiling. */
+		do_prof_reset(0);
+
+		/* Allocate small object (which will be promoted). */
+		p = ptrs[i] = mallocx(1, 0);
+		assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+		/* Reset profiling. */
+		do_prof_reset(0);
+
+		/* Perform successful xallocx(). */
+		sz = sallocx(p, 0);
+		assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+		    "Unexpected xallocx() failure");
+
+		/* Perform unsuccessful xallocx(). */
+		nsz = nallocx(sz+1, 0);
+		assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+		    "Unexpected xallocx() success");
+	}
+
+	for (i = 0; i < NITER; i++) {
+		/* dallocx. */
+		dallocx(ptrs[i], 0);
+	}
+
+	set_prof_active(false);
+	do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
 int
 main(void)
 {
@@ -247,5 +297,6 @@
 	return (test(
 	    test_prof_reset_basic,
 	    test_prof_reset_cleanup,
-	    test_prof_reset));
+	    test_prof_reset,
+	    test_xallocx));
 }