blob: a8f6aed270fc7d2bd4986e6dbf02258d9f888e8a [file] [log] [blame]
Jason Evansc0cc5db2017-01-19 21:41:41 -08001#define JEMALLOC_PROF_C_
David Goldblatt743d9402017-04-10 18:17:55 -07002#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
David Goldblattd9ec36e2017-04-11 14:43:12 -07005#include "jemalloc/internal/assert.h"
David Goldblatt68da2362017-04-19 14:56:42 -07006#include "jemalloc/internal/ckh.h"
David Goldblatt54373be2017-04-11 13:06:31 -07007#include "jemalloc/internal/malloc_io.h"
8
Jason Evans6109fe02010-02-10 10:37:56 -08009/******************************************************************************/
10
11#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evansc0cc5db2017-01-19 21:41:41 -080012#define UNW_LOCAL_ONLY
Jason Evans6109fe02010-02-10 10:37:56 -080013#include <libunwind.h>
14#endif
15
Jason Evans77f350b2011-03-15 22:23:12 -070016#ifdef JEMALLOC_PROF_LIBGCC
David Goldblatt0a0fcd32017-03-28 17:30:54 -070017/*
18 * We have a circular dependency -- jemalloc_internal.h tells us if we should
19 * use libgcc's unwinding functionality, but after we've included that, we've
20 * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
21 */
22#undef _Unwind_Backtrace
Jason Evans77f350b2011-03-15 22:23:12 -070023#include <unwind.h>
David Goldblatt0a0fcd32017-03-28 17:30:54 -070024#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
Jason Evans77f350b2011-03-15 22:23:12 -070025#endif
26
Jason Evans6109fe02010-02-10 10:37:56 -080027/******************************************************************************/
28/* Data. */
29
30bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070031bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070032bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080033size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070034ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070035bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070036bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080037bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070038bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080039char opt_prof_prefix[
40 /* Minimize memory bloat for non-prof builds. */
41#ifdef JEMALLOC_PROF
42 PATH_MAX +
43#endif
Jason Evanseefdd022014-01-16 18:04:30 -080044 1];
Jason Evans6109fe02010-02-10 10:37:56 -080045
Jason Evansfc12c0b2014-10-03 23:25:30 -070046/*
47 * Initialized as opt_prof_active, and accessed via
48 * prof_active_[gs]et{_unlocked,}().
49 */
50bool prof_active;
51static malloc_mutex_t prof_active_mtx;
52
53/*
54 * Initialized as opt_prof_thread_active_init, and accessed via
55 * prof_thread_active_init_[gs]et().
56 */
57static bool prof_thread_active_init;
58static malloc_mutex_t prof_thread_active_init_mtx;
59
Jason Evans5b8ed5b2015-01-25 21:16:57 -080060/*
61 * Initialized as opt_prof_gdump, and accessed via
62 * prof_gdump_[gs]et{_unlocked,}().
63 */
64bool prof_gdump_val;
65static malloc_mutex_t prof_gdump_mtx;
66
Jason Evansa3b33862012-11-13 12:56:27 -080067uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080068
Jason Evans602c8e02014-08-18 16:22:13 -070069size_t lg_prof_sample;
70
Jason Evans6109fe02010-02-10 10:37:56 -080071/*
Jason Evans602c8e02014-08-18 16:22:13 -070072 * Table of mutexes that are shared among gctx's. These are leaf locks, so
73 * there is no problem with using them for more than one gctx at the same time.
74 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070075 * and destroying mutexes causes complications for systems that allocate when
76 * creating/destroying mutexes.
77 */
Jason Evans602c8e02014-08-18 16:22:13 -070078static malloc_mutex_t *gctx_locks;
David Goldblatt074f2252017-04-04 18:36:45 -070079static atomic_u_t cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070080
81/*
Jason Evans602c8e02014-08-18 16:22:13 -070082 * Table of mutexes that are shared among tdata's. No operations require
83 * holding multiple tdata locks, so there is no problem with using them for more
84 * than one tdata at the same time, even though a gctx lock may be acquired
85 * while holding a tdata lock.
86 */
87static malloc_mutex_t *tdata_locks;
88
89/*
90 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070091 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080092 */
Jason Evans602c8e02014-08-18 16:22:13 -070093static ckh_t bt2gctx;
Qi Wangca9074d2017-03-11 20:28:31 -080094/* Non static to enable profiling. */
95malloc_mutex_t bt2gctx_mtx;
Jason Evans602c8e02014-08-18 16:22:13 -070096
97/*
98 * Tree of all extant prof_tdata_t structures, regardless of state,
99 * {attached,detached,expired}.
100 */
101static prof_tdata_tree_t tdatas;
102static malloc_mutex_t tdatas_mtx;
103
104static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -0700105static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -0800106
Jason Evans6109fe02010-02-10 10:37:56 -0800107static malloc_mutex_t prof_dump_seq_mtx;
108static uint64_t prof_dump_seq;
109static uint64_t prof_dump_iseq;
110static uint64_t prof_dump_mseq;
111static uint64_t prof_dump_useq;
112
113/*
114 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800115 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800116 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800117static malloc_mutex_t prof_dump_mtx;
118static char prof_dump_buf[
119 /* Minimize memory bloat for non-prof builds. */
120#ifdef JEMALLOC_PROF
121 PROF_DUMP_BUFSIZE
122#else
123 1
124#endif
125];
Jason Evans42ce80e2016-02-25 20:51:00 -0800126static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800127static int prof_dump_fd;
128
129/* Do not dump any profiles until bootstrapping is complete. */
130static bool prof_booted = false;
131
Jason Evans6109fe02010-02-10 10:37:56 -0800132/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700133/*
134 * Function prototypes for static functions that are referenced prior to
135 * definition.
136 */
137
Jason Evansc1e00ef2016-05-10 22:21:10 -0700138static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700139static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700140static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700141 bool even_if_attached);
Jason Evansb54d1602016-10-20 23:59:12 -0700142static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700143 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700144static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700145
146/******************************************************************************/
147/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800148
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700149static int
Jason Evansc4c25922017-01-15 16:56:30 -0800150prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
Jason Evans04211e22015-03-16 15:11:06 -0700151 uint64_t a_thr_uid = a->thr_uid;
152 uint64_t b_thr_uid = b->thr_uid;
153 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700154 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700155 uint64_t a_thr_discrim = a->thr_discrim;
156 uint64_t b_thr_discrim = b->thr_discrim;
157 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
158 b_thr_discrim);
159 if (ret == 0) {
160 uint64_t a_tctx_uid = a->tctx_uid;
161 uint64_t b_tctx_uid = b->tctx_uid;
162 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
163 b_tctx_uid);
164 }
Jason Evansd69964b2015-03-12 16:25:18 -0700165 }
Jason Evansf4086432017-01-19 18:15:45 -0800166 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700167}
168
Jason Evans602c8e02014-08-18 16:22:13 -0700169rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
170 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700171
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700172static int
Jason Evansc4c25922017-01-15 16:56:30 -0800173prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700174 unsigned a_len = a->bt.len;
175 unsigned b_len = b->bt.len;
176 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
177 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
Jason Evansc4c25922017-01-15 16:56:30 -0800178 if (ret == 0) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700179 ret = (a_len > b_len) - (a_len < b_len);
Jason Evansc4c25922017-01-15 16:56:30 -0800180 }
Jason Evansf4086432017-01-19 18:15:45 -0800181 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700182}
183
Jason Evans602c8e02014-08-18 16:22:13 -0700184rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
185 prof_gctx_comp)
186
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700187static int
Jason Evansc4c25922017-01-15 16:56:30 -0800188prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
Jason Evans20c31de2014-10-02 23:01:10 -0700189 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700190 uint64_t a_uid = a->thr_uid;
191 uint64_t b_uid = b->thr_uid;
192
Jason Evans20c31de2014-10-02 23:01:10 -0700193 ret = ((a_uid > b_uid) - (a_uid < b_uid));
194 if (ret == 0) {
195 uint64_t a_discrim = a->thr_discrim;
196 uint64_t b_discrim = b->thr_discrim;
197
198 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
199 }
Jason Evansf4086432017-01-19 18:15:45 -0800200 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700201}
202
203rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
204 prof_tdata_comp)
205
206/******************************************************************************/
207
208void
Jason Evansc4c25922017-01-15 16:56:30 -0800209prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
Jason Evans6e73dc12014-09-09 19:37:26 -0700210 prof_tdata_t *tdata;
211
212 cassert(config_prof);
213
214 if (updated) {
215 /*
216 * Compute a new sample threshold. This isn't very important in
217 * practice, because this function is rarely executed, so the
218 * potential for sample bias is minimal except in contrived
219 * programs.
220 */
Jason Evans5460aa62014-09-22 21:09:23 -0700221 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800222 if (tdata != NULL) {
Jason Evans3ca0cf62015-09-17 14:47:39 -0700223 prof_sample_threshold_update(tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800224 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700225 }
226
227 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700228 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700229 tctx->prepared = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800230 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700231 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800232 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700233 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800234 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700235 }
236}
237
238void
Jason Evans5e67fbc2017-03-20 11:00:07 -0700239prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
240 prof_tctx_t *tctx) {
Qi Wangccfe68a2017-04-11 18:13:10 -0700241 prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700242
Jason Evansc1e00ef2016-05-10 22:21:10 -0700243 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700244 tctx->cnts.curobjs++;
245 tctx->cnts.curbytes += usize;
246 if (opt_prof_accum) {
247 tctx->cnts.accumobjs++;
248 tctx->cnts.accumbytes += usize;
249 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700250 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700251 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700252}
253
254void
Jason Evansc4c25922017-01-15 16:56:30 -0800255prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700256 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700257 assert(tctx->cnts.curobjs > 0);
258 assert(tctx->cnts.curbytes >= usize);
259 tctx->cnts.curobjs--;
260 tctx->cnts.curbytes -= usize;
261
Jason Evansc4c25922017-01-15 16:56:30 -0800262 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700263 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800264 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700265 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800266 }
Jason Evans602c8e02014-08-18 16:22:13 -0700267}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700268
Jason Evans4d6a1342010-10-20 19:05:59 -0700269void
Jason Evansc4c25922017-01-15 16:56:30 -0800270bt_init(prof_bt_t *bt, void **vec) {
Jason Evans7372b152012-02-10 20:22:09 -0800271 cassert(config_prof);
272
Jason Evans6109fe02010-02-10 10:37:56 -0800273 bt->vec = vec;
274 bt->len = 0;
275}
276
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700277static void
Jason Evansc4c25922017-01-15 16:56:30 -0800278prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800279 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700280 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800281
Jason Evans82cb6032014-11-01 00:20:28 -0700282 if (tdata != NULL) {
283 assert(!tdata->enq);
284 tdata->enq = true;
285 }
Jason Evans6109fe02010-02-10 10:37:56 -0800286
Jason Evansc1e00ef2016-05-10 22:21:10 -0700287 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800288}
289
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700290static void
Jason Evansc4c25922017-01-15 16:56:30 -0800291prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800292 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700293 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800294
Jason Evansc1e00ef2016-05-10 22:21:10 -0700295 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800296
Jason Evans82cb6032014-11-01 00:20:28 -0700297 if (tdata != NULL) {
298 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800299
Jason Evans82cb6032014-11-01 00:20:28 -0700300 assert(tdata->enq);
301 tdata->enq = false;
302 idump = tdata->enq_idump;
303 tdata->enq_idump = false;
304 gdump = tdata->enq_gdump;
305 tdata->enq_gdump = false;
306
Jason Evansc4c25922017-01-15 16:56:30 -0800307 if (idump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700308 prof_idump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800309 }
310 if (gdump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700311 prof_gdump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800312 }
Jason Evans82cb6032014-11-01 00:20:28 -0700313 }
Jason Evans6109fe02010-02-10 10:37:56 -0800314}
315
Jason Evans77f350b2011-03-15 22:23:12 -0700316#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700317void
Jason Evansc4c25922017-01-15 16:56:30 -0800318prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700319 int nframes;
320
Jason Evans7372b152012-02-10 20:22:09 -0800321 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800322 assert(bt->len == 0);
323 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800324
Jason Evans6f001052014-04-22 18:41:15 -0700325 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
Jason Evansc4c25922017-01-15 16:56:30 -0800326 if (nframes <= 0) {
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700327 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800328 }
Jason Evans6f001052014-04-22 18:41:15 -0700329 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800330}
Jason Evans7372b152012-02-10 20:22:09 -0800331#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700332static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800333prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans7372b152012-02-10 20:22:09 -0800334 cassert(config_prof);
335
Jason Evansf4086432017-01-19 18:15:45 -0800336 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700337}
338
339static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800340prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans77f350b2011-03-15 22:23:12 -0700341 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700342 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700343
Jason Evans7372b152012-02-10 20:22:09 -0800344 cassert(config_prof);
345
Jason Evans6f001052014-04-22 18:41:15 -0700346 ip = (void *)_Unwind_GetIP(context);
Jason Evansc4c25922017-01-15 16:56:30 -0800347 if (ip == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800348 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800349 }
Jason Evans6f001052014-04-22 18:41:15 -0700350 data->bt->vec[data->bt->len] = ip;
351 data->bt->len++;
Jason Evansc4c25922017-01-15 16:56:30 -0800352 if (data->bt->len == data->max) {
Jason Evansf4086432017-01-19 18:15:45 -0800353 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800354 }
Jason Evans77f350b2011-03-15 22:23:12 -0700355
Jason Evansf4086432017-01-19 18:15:45 -0800356 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700357}
358
359void
Jason Evansc4c25922017-01-15 16:56:30 -0800360prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700361 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700362
Jason Evans7372b152012-02-10 20:22:09 -0800363 cassert(config_prof);
364
Jason Evans77f350b2011-03-15 22:23:12 -0700365 _Unwind_Backtrace(prof_unwind_callback, &data);
366}
Jason Evans7372b152012-02-10 20:22:09 -0800367#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700368void
Jason Evansc4c25922017-01-15 16:56:30 -0800369prof_backtrace(prof_bt_t *bt) {
Jason Evansc0cc5db2017-01-19 21:41:41 -0800370#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700371 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800372 void *p; \
Jason Evansc4c25922017-01-15 16:56:30 -0800373 if (__builtin_frame_address(i) == 0) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800374 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800375 } \
Jason Evans6109fe02010-02-10 10:37:56 -0800376 p = __builtin_return_address(i); \
Jason Evansc4c25922017-01-15 16:56:30 -0800377 if (p == NULL) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800378 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800379 } \
Jason Evans6f001052014-04-22 18:41:15 -0700380 bt->vec[(i)] = p; \
381 bt->len = (i) + 1; \
Jason Evansc4c25922017-01-15 16:56:30 -0800382 } else { \
383 return; \
384 }
Jason Evans6109fe02010-02-10 10:37:56 -0800385
Jason Evans7372b152012-02-10 20:22:09 -0800386 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800387
Jason Evans6109fe02010-02-10 10:37:56 -0800388 BT_FRAME(0)
389 BT_FRAME(1)
390 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800391 BT_FRAME(3)
392 BT_FRAME(4)
393 BT_FRAME(5)
394 BT_FRAME(6)
395 BT_FRAME(7)
396 BT_FRAME(8)
397 BT_FRAME(9)
398
399 BT_FRAME(10)
400 BT_FRAME(11)
401 BT_FRAME(12)
402 BT_FRAME(13)
403 BT_FRAME(14)
404 BT_FRAME(15)
405 BT_FRAME(16)
406 BT_FRAME(17)
407 BT_FRAME(18)
408 BT_FRAME(19)
409
410 BT_FRAME(20)
411 BT_FRAME(21)
412 BT_FRAME(22)
413 BT_FRAME(23)
414 BT_FRAME(24)
415 BT_FRAME(25)
416 BT_FRAME(26)
417 BT_FRAME(27)
418 BT_FRAME(28)
419 BT_FRAME(29)
420
421 BT_FRAME(30)
422 BT_FRAME(31)
423 BT_FRAME(32)
424 BT_FRAME(33)
425 BT_FRAME(34)
426 BT_FRAME(35)
427 BT_FRAME(36)
428 BT_FRAME(37)
429 BT_FRAME(38)
430 BT_FRAME(39)
431
432 BT_FRAME(40)
433 BT_FRAME(41)
434 BT_FRAME(42)
435 BT_FRAME(43)
436 BT_FRAME(44)
437 BT_FRAME(45)
438 BT_FRAME(46)
439 BT_FRAME(47)
440 BT_FRAME(48)
441 BT_FRAME(49)
442
443 BT_FRAME(50)
444 BT_FRAME(51)
445 BT_FRAME(52)
446 BT_FRAME(53)
447 BT_FRAME(54)
448 BT_FRAME(55)
449 BT_FRAME(56)
450 BT_FRAME(57)
451 BT_FRAME(58)
452 BT_FRAME(59)
453
454 BT_FRAME(60)
455 BT_FRAME(61)
456 BT_FRAME(62)
457 BT_FRAME(63)
458 BT_FRAME(64)
459 BT_FRAME(65)
460 BT_FRAME(66)
461 BT_FRAME(67)
462 BT_FRAME(68)
463 BT_FRAME(69)
464
465 BT_FRAME(70)
466 BT_FRAME(71)
467 BT_FRAME(72)
468 BT_FRAME(73)
469 BT_FRAME(74)
470 BT_FRAME(75)
471 BT_FRAME(76)
472 BT_FRAME(77)
473 BT_FRAME(78)
474 BT_FRAME(79)
475
476 BT_FRAME(80)
477 BT_FRAME(81)
478 BT_FRAME(82)
479 BT_FRAME(83)
480 BT_FRAME(84)
481 BT_FRAME(85)
482 BT_FRAME(86)
483 BT_FRAME(87)
484 BT_FRAME(88)
485 BT_FRAME(89)
486
487 BT_FRAME(90)
488 BT_FRAME(91)
489 BT_FRAME(92)
490 BT_FRAME(93)
491 BT_FRAME(94)
492 BT_FRAME(95)
493 BT_FRAME(96)
494 BT_FRAME(97)
495 BT_FRAME(98)
496 BT_FRAME(99)
497
498 BT_FRAME(100)
499 BT_FRAME(101)
500 BT_FRAME(102)
501 BT_FRAME(103)
502 BT_FRAME(104)
503 BT_FRAME(105)
504 BT_FRAME(106)
505 BT_FRAME(107)
506 BT_FRAME(108)
507 BT_FRAME(109)
508
509 BT_FRAME(110)
510 BT_FRAME(111)
511 BT_FRAME(112)
512 BT_FRAME(113)
513 BT_FRAME(114)
514 BT_FRAME(115)
515 BT_FRAME(116)
516 BT_FRAME(117)
517 BT_FRAME(118)
518 BT_FRAME(119)
519
520 BT_FRAME(120)
521 BT_FRAME(121)
522 BT_FRAME(122)
523 BT_FRAME(123)
524 BT_FRAME(124)
525 BT_FRAME(125)
526 BT_FRAME(126)
527 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800528#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800529}
Jason Evans7372b152012-02-10 20:22:09 -0800530#else
531void
Jason Evansc4c25922017-01-15 16:56:30 -0800532prof_backtrace(prof_bt_t *bt) {
Jason Evans7372b152012-02-10 20:22:09 -0800533 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700534 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800535}
Jason Evans6109fe02010-02-10 10:37:56 -0800536#endif
537
Jason Evans4f37ef62014-01-16 13:23:56 -0800538static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800539prof_gctx_mutex_choose(void) {
David Goldblatt074f2252017-04-04 18:36:45 -0700540 unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
Jason Evans4f37ef62014-01-16 13:23:56 -0800541
Jason Evansf4086432017-01-19 18:15:45 -0800542 return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
Jason Evans4f37ef62014-01-16 13:23:56 -0800543}
544
Jason Evans602c8e02014-08-18 16:22:13 -0700545static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800546prof_tdata_mutex_choose(uint64_t thr_uid) {
Jason Evansf4086432017-01-19 18:15:45 -0800547 return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
Jason Evans602c8e02014-08-18 16:22:13 -0700548}
549
550static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800551prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
Jason Evansab532e92014-08-15 15:05:12 -0700552 /*
553 * Create a single allocation that has space for vec of length bt->len.
554 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700555 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700556 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
557 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700558 true);
Jason Evansc4c25922017-01-15 16:56:30 -0800559 if (gctx == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800560 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800561 }
Jason Evans602c8e02014-08-18 16:22:13 -0700562 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800563 /*
564 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700565 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800566 */
Jason Evans602c8e02014-08-18 16:22:13 -0700567 gctx->nlimbo = 1;
568 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700569 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700570 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
571 gctx->bt.vec = gctx->vec;
572 gctx->bt.len = bt->len;
Jason Evansf4086432017-01-19 18:15:45 -0800573 return gctx;
Jason Evans4f37ef62014-01-16 13:23:56 -0800574}
575
576static void
Jason Evansc93ed812014-10-30 16:50:33 -0700577prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800578 prof_tdata_t *tdata) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800579 cassert(config_prof);
580
581 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700582 * Check that gctx is still unused by any thread cache before destroying
583 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
584 * condition with this function, as does prof_tctx_destroy() in order to
585 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800586 * into this function.
587 */
Jason Evansc93ed812014-10-30 16:50:33 -0700588 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700589 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700590 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700591 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
592 /* Remove gctx from bt2gctx. */
Jason Evansc4c25922017-01-15 16:56:30 -0800593 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800594 not_reached();
Jason Evansc4c25922017-01-15 16:56:30 -0800595 }
Jason Evansc93ed812014-10-30 16:50:33 -0700596 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700597 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700598 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Qi Wangbfa530b2017-04-07 14:12:30 -0700599 idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800600 } else {
601 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700602 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800603 * prof_lookup().
604 */
Jason Evans602c8e02014-08-18 16:22:13 -0700605 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700606 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700607 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800608 }
609}
610
Jason Evans602c8e02014-08-18 16:22:13 -0700611static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800612prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700613 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700614
Jason Evansc4c25922017-01-15 16:56:30 -0800615 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800616 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800617 }
618 if (tctx->cnts.curobjs != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800619 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800620 }
621 if (tctx->prepared) {
Jason Evansf4086432017-01-19 18:15:45 -0800622 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800623 }
Jason Evansf4086432017-01-19 18:15:45 -0800624 return true;
Jason Evans4f37ef62014-01-16 13:23:56 -0800625}
626
Jason Evansfb1775e2014-01-14 17:04:34 -0800627static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800628prof_gctx_should_destroy(prof_gctx_t *gctx) {
629 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800630 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800631 }
632 if (!tctx_tree_empty(&gctx->tctxs)) {
Jason Evansf4086432017-01-19 18:15:45 -0800633 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800634 }
635 if (gctx->nlimbo != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800636 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800637 }
Jason Evansf4086432017-01-19 18:15:45 -0800638 return true;
Jason Evans602c8e02014-08-18 16:22:13 -0700639}
640
Jason Evans602c8e02014-08-18 16:22:13 -0700641static void
Jason Evansc4c25922017-01-15 16:56:30 -0800642prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
Jason Evans6fd53da2014-09-09 12:45:53 -0700643 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700644 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700645 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700646
Jason Evansc1e00ef2016-05-10 22:21:10 -0700647 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700648
Jason Evans602c8e02014-08-18 16:22:13 -0700649 assert(tctx->cnts.curobjs == 0);
650 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700651 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700652 assert(tctx->cnts.accumobjs == 0);
653 assert(tctx->cnts.accumbytes == 0);
654
Jason Evansb54d1602016-10-20 23:59:12 -0700655 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700656 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
657 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700658
Jason Evansc1e00ef2016-05-10 22:21:10 -0700659 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700660 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700661 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700662 tctx_tree_remove(&gctx->tctxs, tctx);
663 destroy_tctx = true;
664 if (prof_gctx_should_destroy(gctx)) {
665 /*
666 * Increment gctx->nlimbo in order to keep another
667 * thread from winning the race to destroy gctx while
668 * this one has gctx->lock dropped. Without this, it
669 * would be possible for another thread to:
670 *
671 * 1) Sample an allocation associated with gctx.
672 * 2) Deallocate the sampled object.
673 * 3) Successfully prof_gctx_try_destroy(gctx).
674 *
675 * The result would be that gctx no longer exists by the
676 * time this thread accesses it in
677 * prof_gctx_try_destroy().
678 */
679 gctx->nlimbo++;
680 destroy_gctx = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800681 } else {
Jason Evansbf406412014-10-06 16:35:11 -0700682 destroy_gctx = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800683 }
Jason Evans764b0002015-03-14 14:01:35 -0700684 break;
685 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700686 /*
Jason Evansbf406412014-10-06 16:35:11 -0700687 * A dumping thread needs tctx to remain valid until dumping
688 * has finished. Change state such that the dumping thread will
689 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700690 */
Jason Evansbf406412014-10-06 16:35:11 -0700691 tctx->state = prof_tctx_state_purgatory;
692 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700693 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700694 break;
695 default:
696 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700697 destroy_tctx = false;
698 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700699 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700700 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700701 if (destroy_gctx) {
702 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
703 tdata);
704 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700705
Jason Evansc1e00ef2016-05-10 22:21:10 -0700706 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700707
Jason Evansc4c25922017-01-15 16:56:30 -0800708 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -0700709 prof_tdata_destroy(tsd, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800710 }
Jason Evans602c8e02014-08-18 16:22:13 -0700711
Jason Evansc4c25922017-01-15 16:56:30 -0800712 if (destroy_tctx) {
Qi Wangbfa530b2017-04-07 14:12:30 -0700713 idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800714 }
Jason Evans602c8e02014-08-18 16:22:13 -0700715}
716
717static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700718prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -0800719 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800720 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700721 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800722 void *v;
Jason Evans5033a912017-01-29 21:51:30 -0800723 } gctx, tgctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800724 union {
725 prof_bt_t *p;
726 void *v;
727 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700728 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800729
Jason Evansc93ed812014-10-30 16:50:33 -0700730 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700731 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800732 /* bt has never been seen before. Insert it. */
Jason Evans5033a912017-01-29 21:51:30 -0800733 prof_leave(tsd, tdata);
734 tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
735 if (tgctx.v == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800736 return true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800737 }
Jason Evans5033a912017-01-29 21:51:30 -0800738 prof_enter(tsd, tdata);
739 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
740 gctx.p = tgctx.p;
741 btkey.p = &gctx.p->bt;
742 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
743 /* OOM. */
744 prof_leave(tsd, tdata);
Qi Wangbfa530b2017-04-07 14:12:30 -0700745 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
746 true, true);
Jason Evans5033a912017-01-29 21:51:30 -0800747 return true;
748 }
749 new_gctx = true;
750 } else {
751 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800752 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800753 } else {
Jason Evans5033a912017-01-29 21:51:30 -0800754 tgctx.v = NULL;
755 new_gctx = false;
756 }
757
758 if (!new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800759 /*
760 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700761 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800762 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700763 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700764 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700765 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700766 new_gctx = false;
Jason Evans5033a912017-01-29 21:51:30 -0800767
768 if (tgctx.v != NULL) {
769 /* Lost race to insert. */
Qi Wangbfa530b2017-04-07 14:12:30 -0700770 idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
771 true);
Jason Evans5033a912017-01-29 21:51:30 -0800772 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800773 }
Jason Evansc93ed812014-10-30 16:50:33 -0700774 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800775
776 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700777 *p_gctx = gctx.p;
778 *p_new_gctx = new_gctx;
Jason Evansf4086432017-01-19 18:15:45 -0800779 return false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800780}
781
Jason Evans602c8e02014-08-18 16:22:13 -0700782prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800783prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
Jason Evans075e77c2010-09-20 19:53:25 -0700784 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700785 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700786 void *v;
787 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700788 prof_tdata_t *tdata;
789 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800790
Jason Evans7372b152012-02-10 20:22:09 -0800791 cassert(config_prof);
792
Jason Evans5460aa62014-09-22 21:09:23 -0700793 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800794 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800795 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800796 }
Jason Evans6109fe02010-02-10 10:37:56 -0800797
Jason Evansc1e00ef2016-05-10 22:21:10 -0700798 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700799 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evansc4c25922017-01-15 16:56:30 -0800800 if (!not_found) { /* Note double negative! */
Jason Evans6e73dc12014-09-09 19:37:26 -0700801 ret.p->prepared = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800802 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700803 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700804 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800805 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700806 prof_gctx_t *gctx;
807 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800808
809 /*
810 * This thread's cache lacks bt. Look for it in the global
811 * cache.
812 */
Jason Evans5460aa62014-09-22 21:09:23 -0700813 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800814 &new_gctx)) {
Jason Evansf4086432017-01-19 18:15:45 -0800815 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800816 }
Jason Evans6109fe02010-02-10 10:37:56 -0800817
Jason Evans602c8e02014-08-18 16:22:13 -0700818 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700819 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
Jason Evans66cd9532016-04-22 14:34:14 -0700820 size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansb54d1602016-10-20 23:59:12 -0700821 arena_ichoose(tsd, NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700822 if (ret.p == NULL) {
Jason Evansc4c25922017-01-15 16:56:30 -0800823 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700824 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800825 }
Jason Evansf4086432017-01-19 18:15:45 -0800826 return NULL;
Jason Evansa881cd22010-10-02 15:18:50 -0700827 }
Jason Evans602c8e02014-08-18 16:22:13 -0700828 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700829 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700830 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700831 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700832 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700833 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700834 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700835 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700836 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evansb54d1602016-10-20 23:59:12 -0700837 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700838 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700839 if (error) {
Jason Evansc4c25922017-01-15 16:56:30 -0800840 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700841 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800842 }
Qi Wangbfa530b2017-04-07 14:12:30 -0700843 idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -0800844 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800845 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700846 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700847 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700848 tctx_tree_insert(&gctx->tctxs, ret.p);
849 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700850 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800851 }
852
Jason Evansf4086432017-01-19 18:15:45 -0800853 return ret.p;
Jason Evans6109fe02010-02-10 10:37:56 -0800854}
855
Jason Evansdc391ad2016-05-04 12:14:36 -0700856/*
857 * The bodies of this function and prof_leakcheck() are compiled out unless heap
858 * profiling is enabled, so that it is possible to compile jemalloc with
859 * floating point support completely disabled. Avoiding floating point code is
860 * important on memory-constrained systems, but it also enables a workaround for
861 * versions of glibc that don't properly save/restore floating point registers
862 * during dynamic lazy symbol loading (which internally calls into whatever
863 * malloc implementation happens to be integrated into the application). Note
864 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
865 * memory moves, so jemalloc must be compiled with such optimizations disabled
866 * (e.g.
867 * -mno-sse) in order for the workaround to be complete.
868 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700869void
Jason Evansc4c25922017-01-15 16:56:30 -0800870prof_sample_threshold_update(prof_tdata_t *tdata) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700871#ifdef JEMALLOC_PROF
872 uint64_t r;
873 double u;
874
Jason Evansc4c25922017-01-15 16:56:30 -0800875 if (!config_prof) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700876 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800877 }
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700878
Jason Evans602c8e02014-08-18 16:22:13 -0700879 if (lg_prof_sample == 0) {
880 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700881 return;
882 }
883
884 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700885 * Compute sample interval as a geometrically distributed random
886 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700887 *
Jason Evans602c8e02014-08-18 16:22:13 -0700888 * __ __
889 * | log(u) | 1
890 * tdata->bytes_until_sample = | -------- |, where p = ---------------
891 * | log(1-p) | lg_prof_sample
892 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700893 *
894 * For more information on the math, see:
895 *
896 * Non-Uniform Random Variate Generation
897 * Luc Devroye
898 * Springer-Verlag, New York, 1986
899 * pp 500
900 * (http://luc.devroye.org/rnbookindex.html)
901 */
Jason Evans04b46352016-11-07 10:52:44 -0800902 r = prng_lg_range_u64(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700903 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700904 tdata->bytes_until_sample = (uint64_t)(log(u) /
905 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700906 + (uint64_t)1U;
907#endif
908}
909
Jason Evans772163b2014-01-17 15:40:52 -0800910#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700911static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800912prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
913 void *arg) {
Jason Evans20c31de2014-10-02 23:01:10 -0700914 size_t *tdata_count = (size_t *)arg;
915
916 (*tdata_count)++;
917
Jason Evansf4086432017-01-19 18:15:45 -0800918 return NULL;
Jason Evans20c31de2014-10-02 23:01:10 -0700919}
920
921size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800922prof_tdata_count(void) {
Jason Evans20c31de2014-10-02 23:01:10 -0700923 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700924 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700925
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926 tsdn = tsdn_fetch();
927 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700928 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
929 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700930 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700931
Jason Evansf4086432017-01-19 18:15:45 -0800932 return tdata_count;
Jason Evans20c31de2014-10-02 23:01:10 -0700933}
934#endif
935
936#ifdef JEMALLOC_JET
Jason Evans772163b2014-01-17 15:40:52 -0800937size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800938prof_bt_count(void) {
Jason Evans772163b2014-01-17 15:40:52 -0800939 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700940 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700941 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800942
Jason Evans029d44c2014-10-04 11:12:53 -0700943 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700944 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800945 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800946 return 0;
Jason Evansc4c25922017-01-15 16:56:30 -0800947 }
Jason Evans772163b2014-01-17 15:40:52 -0800948
Jason Evansc1e00ef2016-05-10 22:21:10 -0700949 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700950 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700951 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800952
Jason Evansf4086432017-01-19 18:15:45 -0800953 return bt_count;
Jason Evans772163b2014-01-17 15:40:52 -0800954}
955#endif
956
957#ifdef JEMALLOC_JET
958#undef prof_dump_open
Jason Evansc0cc5db2017-01-19 21:41:41 -0800959#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
Jason Evans772163b2014-01-17 15:40:52 -0800960#endif
961static int
Jason Evansc4c25922017-01-15 16:56:30 -0800962prof_dump_open(bool propagate_err, const char *filename) {
Jason Evans772163b2014-01-17 15:40:52 -0800963 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800964
Jason Evans772163b2014-01-17 15:40:52 -0800965 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700966 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800967 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
968 filename);
Jason Evansc4c25922017-01-15 16:56:30 -0800969 if (opt_abort) {
Jason Evans772163b2014-01-17 15:40:52 -0800970 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800971 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800972 }
973
Jason Evansf4086432017-01-19 18:15:45 -0800974 return fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800975}
Jason Evans772163b2014-01-17 15:40:52 -0800976#ifdef JEMALLOC_JET
977#undef prof_dump_open
Jason Evansc0cc5db2017-01-19 21:41:41 -0800978#define prof_dump_open JEMALLOC_N(prof_dump_open)
Jason Evans772163b2014-01-17 15:40:52 -0800979prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
980#endif
Jason Evans4f37ef62014-01-16 13:23:56 -0800981
982static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800983prof_dump_flush(bool propagate_err) {
Jason Evans22ca8552010-03-02 11:57:30 -0800984 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800985 ssize_t err;
986
Jason Evans7372b152012-02-10 20:22:09 -0800987 cassert(config_prof);
988
Jason Evans6109fe02010-02-10 10:37:56 -0800989 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
990 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700991 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800992 malloc_write("<jemalloc>: write() failed during heap "
993 "profile flush\n");
Jason Evansc4c25922017-01-15 16:56:30 -0800994 if (opt_abort) {
Jason Evans22ca8552010-03-02 11:57:30 -0800995 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800996 }
Jason Evans22ca8552010-03-02 11:57:30 -0800997 }
998 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800999 }
1000 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -08001001
Jason Evansf4086432017-01-19 18:15:45 -08001002 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001003}
1004
Jason Evans22ca8552010-03-02 11:57:30 -08001005static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001006prof_dump_close(bool propagate_err) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001007 bool ret;
1008
1009 assert(prof_dump_fd != -1);
1010 ret = prof_dump_flush(propagate_err);
1011 close(prof_dump_fd);
1012 prof_dump_fd = -1;
1013
Jason Evansf4086432017-01-19 18:15:45 -08001014 return ret;
Jason Evans4f37ef62014-01-16 13:23:56 -08001015}
1016
1017static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001018prof_dump_write(bool propagate_err, const char *s) {
Jason Evansca8fffb2016-02-24 13:16:51 -08001019 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -08001020
Jason Evans7372b152012-02-10 20:22:09 -08001021 cassert(config_prof);
1022
Jason Evans6109fe02010-02-10 10:37:56 -08001023 i = 0;
1024 slen = strlen(s);
1025 while (i < slen) {
1026 /* Flush the buffer if it is full. */
Jason Evansc4c25922017-01-15 16:56:30 -08001027 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1028 if (prof_dump_flush(propagate_err) && propagate_err) {
Jason Evansf4086432017-01-19 18:15:45 -08001029 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001030 }
1031 }
Jason Evans6109fe02010-02-10 10:37:56 -08001032
Jason Evanscd9a1342012-03-21 18:33:03 -07001033 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001034 /* Finish writing. */
1035 n = slen - i;
1036 } else {
1037 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001038 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001039 }
1040 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1041 prof_dump_buf_end += n;
1042 i += n;
1043 }
Jason Evans22ca8552010-03-02 11:57:30 -08001044
Jason Evansf4086432017-01-19 18:15:45 -08001045 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001046}
1047
Jason Evanse42c3092015-07-22 15:44:47 -07001048JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001049static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001050prof_dump_printf(bool propagate_err, const char *format, ...) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001051 bool ret;
1052 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001053 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001054
1055 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001056 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001057 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001058 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001059
Jason Evansf4086432017-01-19 18:15:45 -08001060 return ret;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001061}
1062
Jason Evans602c8e02014-08-18 16:22:13 -07001063static void
Jason Evansc4c25922017-01-15 16:56:30 -08001064prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001065 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001066
Jason Evansc1e00ef2016-05-10 22:21:10 -07001067 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001068
1069 switch (tctx->state) {
1070 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001071 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001072 return;
Jason Evans764b0002015-03-14 14:01:35 -07001073 case prof_tctx_state_nominal:
1074 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001075 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001076
Jason Evans764b0002015-03-14 14:01:35 -07001077 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001078
Jason Evans764b0002015-03-14 14:01:35 -07001079 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1080 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1081 if (opt_prof_accum) {
1082 tdata->cnt_summed.accumobjs +=
1083 tctx->dump_cnts.accumobjs;
1084 tdata->cnt_summed.accumbytes +=
1085 tctx->dump_cnts.accumbytes;
1086 }
1087 break;
1088 case prof_tctx_state_dumping:
1089 case prof_tctx_state_purgatory:
1090 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001091 }
1092}
1093
Jason Evans602c8e02014-08-18 16:22:13 -07001094static void
Jason Evansc4c25922017-01-15 16:56:30 -08001095prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001096 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001097
Jason Evans602c8e02014-08-18 16:22:13 -07001098 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1099 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1100 if (opt_prof_accum) {
1101 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1102 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1103 }
1104}
1105
Jason Evans602c8e02014-08-18 16:22:13 -07001106static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001107prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001108 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001109
Jason Evansc1e00ef2016-05-10 22:21:10 -07001110 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001111
1112 switch (tctx->state) {
1113 case prof_tctx_state_nominal:
1114 /* New since dumping started; ignore. */
1115 break;
1116 case prof_tctx_state_dumping:
1117 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001118 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001119 break;
1120 default:
1121 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001122 }
1123
Jason Evansf4086432017-01-19 18:15:45 -08001124 return NULL;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001125}
1126
Jason Evansb2c0d632016-04-13 23:36:15 -07001127struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001128 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001129 bool propagate_err;
1130};
1131
Jason Evans602c8e02014-08-18 16:22:13 -07001132static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001133prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001134 struct prof_tctx_dump_iter_arg_s *arg =
1135 (struct prof_tctx_dump_iter_arg_s *)opaque;
1136
Jason Evansc1e00ef2016-05-10 22:21:10 -07001137 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001138
Jason Evansfb64ec22015-09-21 18:37:18 -07001139 switch (tctx->state) {
1140 case prof_tctx_state_initializing:
1141 case prof_tctx_state_nominal:
1142 /* Not captured by this dump. */
1143 break;
1144 case prof_tctx_state_dumping:
1145 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001146 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001147 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1148 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1149 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001150 tctx->dump_cnts.accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001151 return tctx;
Jason Evansc4c25922017-01-15 16:56:30 -08001152 }
Jason Evansfb64ec22015-09-21 18:37:18 -07001153 break;
1154 default:
1155 not_reached();
1156 }
Jason Evansf4086432017-01-19 18:15:45 -08001157 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001158}
1159
Jason Evans602c8e02014-08-18 16:22:13 -07001160static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001161prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001162 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001163 prof_tctx_t *ret;
1164
Jason Evansc1e00ef2016-05-10 22:21:10 -07001165 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001166
Jason Evans602c8e02014-08-18 16:22:13 -07001167 switch (tctx->state) {
1168 case prof_tctx_state_nominal:
1169 /* New since dumping started; ignore. */
1170 break;
1171 case prof_tctx_state_dumping:
1172 tctx->state = prof_tctx_state_nominal;
1173 break;
1174 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001175 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001176 goto label_return;
1177 default:
1178 not_reached();
1179 }
1180
1181 ret = NULL;
1182label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001183 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -07001184}
1185
Jason Evans6109fe02010-02-10 10:37:56 -08001186static void
Jason Evansc4c25922017-01-15 16:56:30 -08001187prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
Jason Evans7372b152012-02-10 20:22:09 -08001188 cassert(config_prof);
1189
Jason Evansc1e00ef2016-05-10 22:21:10 -07001190 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001191
Jason Evans4f37ef62014-01-16 13:23:56 -08001192 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001193 * Increment nlimbo so that gctx won't go away before dump.
1194 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001195 * prof_dump()'s second pass.
1196 */
Jason Evans602c8e02014-08-18 16:22:13 -07001197 gctx->nlimbo++;
1198 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001199
Jason Evans602c8e02014-08-18 16:22:13 -07001200 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001201
Jason Evansc1e00ef2016-05-10 22:21:10 -07001202 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001203}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001204
Jason Evansb2c0d632016-04-13 23:36:15 -07001205struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001206 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001207 size_t leak_ngctx;
1208};
Jason Evans6109fe02010-02-10 10:37:56 -08001209
Jason Evansb2c0d632016-04-13 23:36:15 -07001210static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001211prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001212 struct prof_gctx_merge_iter_arg_s *arg =
1213 (struct prof_gctx_merge_iter_arg_s *)opaque;
1214
Jason Evansc1e00ef2016-05-10 22:21:10 -07001215 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001216 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001217 (void *)arg->tsdn);
Jason Evansc4c25922017-01-15 16:56:30 -08001218 if (gctx->cnt_summed.curobjs != 0) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001219 arg->leak_ngctx++;
Jason Evansc4c25922017-01-15 16:56:30 -08001220 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001221 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001222
Jason Evansf4086432017-01-19 18:15:45 -08001223 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001224}
1225
Jason Evans20c31de2014-10-02 23:01:10 -07001226static void
Jason Evansc4c25922017-01-15 16:56:30 -08001227prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
Jason Evans5460aa62014-09-22 21:09:23 -07001228 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001229 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001230
Jason Evans20c31de2014-10-02 23:01:10 -07001231 /*
1232 * Standard tree iteration won't work here, because as soon as we
1233 * decrement gctx->nlimbo and unlock gctx, another thread can
1234 * concurrently destroy it, which will corrupt the tree. Therefore,
1235 * tear down the tree one node at a time during iteration.
1236 */
1237 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1238 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001239 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001240 {
1241 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001242
Jason Evans20c31de2014-10-02 23:01:10 -07001243 next = NULL;
1244 do {
1245 prof_tctx_t *to_destroy =
1246 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001247 prof_tctx_finish_iter,
1248 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001249 if (to_destroy != NULL) {
1250 next = tctx_tree_next(&gctx->tctxs,
1251 to_destroy);
1252 tctx_tree_remove(&gctx->tctxs,
1253 to_destroy);
Jason Evans51a2ec92017-03-17 02:45:12 -07001254 idalloctm(tsd_tsdn(tsd), to_destroy,
Qi Wangbfa530b2017-04-07 14:12:30 -07001255 NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -08001256 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07001257 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001258 }
Jason Evans20c31de2014-10-02 23:01:10 -07001259 } while (next != NULL);
1260 }
1261 gctx->nlimbo--;
1262 if (prof_gctx_should_destroy(gctx)) {
1263 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001264 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001265 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001266 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001267 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08001268 }
Jason Evans20c31de2014-10-02 23:01:10 -07001269 }
Jason Evans602c8e02014-08-18 16:22:13 -07001270}
1271
Jason Evansb2c0d632016-04-13 23:36:15 -07001272struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001273 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001274 prof_cnt_t cnt_all;
1275};
Jason Evans602c8e02014-08-18 16:22:13 -07001276
Jason Evansb2c0d632016-04-13 23:36:15 -07001277static prof_tdata_t *
1278prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001279 void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001280 struct prof_tdata_merge_iter_arg_s *arg =
1281 (struct prof_tdata_merge_iter_arg_s *)opaque;
1282
Jason Evansc1e00ef2016-05-10 22:21:10 -07001283 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001284 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001285 size_t tabind;
1286 union {
1287 prof_tctx_t *p;
1288 void *v;
1289 } tctx;
1290
1291 tdata->dumping = true;
1292 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001293 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
Jason Evansc4c25922017-01-15 16:56:30 -08001294 &tctx.v);) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001295 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001296 }
Jason Evans602c8e02014-08-18 16:22:13 -07001297
Jason Evansb2c0d632016-04-13 23:36:15 -07001298 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1299 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001300 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001301 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1302 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001303 }
Jason Evansc4c25922017-01-15 16:56:30 -08001304 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07001305 tdata->dumping = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001306 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001307 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001308
Jason Evansf4086432017-01-19 18:15:45 -08001309 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001310}
1311
1312static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001313prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1314 void *arg) {
Jason Evans602c8e02014-08-18 16:22:13 -07001315 bool propagate_err = *(bool *)arg;
1316
Jason Evansc4c25922017-01-15 16:56:30 -08001317 if (!tdata->dumping) {
Jason Evansf4086432017-01-19 18:15:45 -08001318 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001319 }
Jason Evans602c8e02014-08-18 16:22:13 -07001320
1321 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001322 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001323 tdata->thr_uid, tdata->cnt_summed.curobjs,
1324 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1325 tdata->cnt_summed.accumbytes,
1326 (tdata->thread_name != NULL) ? " " : "",
Jason Evansc4c25922017-01-15 16:56:30 -08001327 (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
Jason Evansf4086432017-01-19 18:15:45 -08001328 return tdata;
Jason Evansc4c25922017-01-15 16:56:30 -08001329 }
Jason Evansf4086432017-01-19 18:15:45 -08001330 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001331}
1332
Jason Evans20c31de2014-10-02 23:01:10 -07001333#ifdef JEMALLOC_JET
1334#undef prof_dump_header
Jason Evansc0cc5db2017-01-19 21:41:41 -08001335#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
Jason Evans20c31de2014-10-02 23:01:10 -07001336#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001337static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001338prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) {
Jason Evans602c8e02014-08-18 16:22:13 -07001339 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001340
Jason Evans602c8e02014-08-18 16:22:13 -07001341 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001342 "heap_v2/%"FMTu64"\n"
1343 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001344 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001345 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001346 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001347 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001348
Jason Evansc1e00ef2016-05-10 22:21:10 -07001349 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001350 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1351 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001352 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08001353 return ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001354}
Jason Evans20c31de2014-10-02 23:01:10 -07001355#ifdef JEMALLOC_JET
1356#undef prof_dump_header
Jason Evansc0cc5db2017-01-19 21:41:41 -08001357#define prof_dump_header JEMALLOC_N(prof_dump_header)
Jason Evans20c31de2014-10-02 23:01:10 -07001358prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1359#endif
Jason Evansa881cd22010-10-02 15:18:50 -07001360
Jason Evans22ca8552010-03-02 11:57:30 -08001361static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001362prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001363 const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001364 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001365 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001366 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001367
Jason Evans7372b152012-02-10 20:22:09 -08001368 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001369 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001370
Jason Evans602c8e02014-08-18 16:22:13 -07001371 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001372 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001373 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1374 assert(gctx->cnt_summed.curobjs == 0);
1375 assert(gctx->cnt_summed.curbytes == 0);
1376 assert(gctx->cnt_summed.accumobjs == 0);
1377 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001378 ret = false;
1379 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001380 }
1381
Jason Evans602c8e02014-08-18 16:22:13 -07001382 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001383 ret = true;
1384 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001385 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001386 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001387 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001388 (uintptr_t)bt->vec[i])) {
1389 ret = true;
1390 goto label_return;
1391 }
1392 }
Jason Evans22ca8552010-03-02 11:57:30 -08001393
Jason Evans602c8e02014-08-18 16:22:13 -07001394 if (prof_dump_printf(propagate_err,
1395 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001396 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001397 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1398 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1399 ret = true;
1400 goto label_return;
1401 }
1402
Jason Evansc1e00ef2016-05-10 22:21:10 -07001403 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001404 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001405 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001406 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001407 ret = true;
1408 goto label_return;
1409 }
1410
Jason Evans772163b2014-01-17 15:40:52 -08001411 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001412label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001413 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001414}
1415
Jason Evans788d29d2016-02-20 23:46:14 -08001416#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001417JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001418static int
Jason Evansc4c25922017-01-15 16:56:30 -08001419prof_open_maps(const char *format, ...) {
Jason Evans8e33c212015-05-01 09:03:20 -07001420 int mfd;
1421 va_list ap;
1422 char filename[PATH_MAX + 1];
1423
1424 va_start(ap, format);
1425 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1426 va_end(ap);
1427 mfd = open(filename, O_RDONLY);
1428
Jason Evansf4086432017-01-19 18:15:45 -08001429 return mfd;
Jason Evans8e33c212015-05-01 09:03:20 -07001430}
Jason Evans788d29d2016-02-20 23:46:14 -08001431#endif
1432
1433static int
Jason Evansc4c25922017-01-15 16:56:30 -08001434prof_getpid(void) {
Jason Evans788d29d2016-02-20 23:46:14 -08001435#ifdef _WIN32
Jason Evansf4086432017-01-19 18:15:45 -08001436 return GetCurrentProcessId();
Jason Evans788d29d2016-02-20 23:46:14 -08001437#else
Jason Evansf4086432017-01-19 18:15:45 -08001438 return getpid();
Jason Evans788d29d2016-02-20 23:46:14 -08001439#endif
1440}
Jason Evans8e33c212015-05-01 09:03:20 -07001441
Jason Evans22ca8552010-03-02 11:57:30 -08001442static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001443prof_dump_maps(bool propagate_err) {
Jason Evans93f39f82013-10-21 15:07:40 -07001444 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001445 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001446
Jason Evans7372b152012-02-10 20:22:09 -08001447 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001448#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001449 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001450#elif defined(_WIN32)
1451 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001452#else
Jason Evans8e33c212015-05-01 09:03:20 -07001453 {
Jason Evans788d29d2016-02-20 23:46:14 -08001454 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001455
1456 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001457 if (mfd == -1) {
Jason Evans8e33c212015-05-01 09:03:20 -07001458 mfd = prof_open_maps("/proc/%d/maps", pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001459 }
Jason Evans8e33c212015-05-01 09:03:20 -07001460 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001461#endif
Jason Evansc7177182010-02-11 09:25:56 -08001462 if (mfd != -1) {
1463 ssize_t nread;
1464
Jason Evans4f37ef62014-01-16 13:23:56 -08001465 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001466 propagate_err) {
1467 ret = true;
1468 goto label_return;
1469 }
Jason Evansc7177182010-02-11 09:25:56 -08001470 nread = 0;
1471 do {
1472 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001473 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001474 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001475 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001476 propagate_err) {
1477 ret = true;
1478 goto label_return;
1479 }
Jason Evansc7177182010-02-11 09:25:56 -08001480 }
1481 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001482 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001483 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001484 } else {
1485 ret = true;
1486 goto label_return;
1487 }
Jason Evans22ca8552010-03-02 11:57:30 -08001488
Jason Evans93f39f82013-10-21 15:07:40 -07001489 ret = false;
1490label_return:
Jason Evansc4c25922017-01-15 16:56:30 -08001491 if (mfd != -1) {
Jason Evans93f39f82013-10-21 15:07:40 -07001492 close(mfd);
Jason Evansc4c25922017-01-15 16:56:30 -08001493 }
Jason Evansf4086432017-01-19 18:15:45 -08001494 return ret;
Jason Evansc7177182010-02-11 09:25:56 -08001495}
1496
Jason Evansdc391ad2016-05-04 12:14:36 -07001497/*
1498 * See prof_sample_threshold_update() comment for why the body of this function
1499 * is conditionally compiled.
1500 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001501static void
Jason Evans602c8e02014-08-18 16:22:13 -07001502prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001503 const char *filename) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001504#ifdef JEMALLOC_PROF
1505 /*
1506 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1507 * differ slightly from what jeprof reports, because here we scale the
1508 * summary values, whereas jeprof scales each context individually and
1509 * reports the sums of the scaled values.
1510 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001511 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001512 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1513 double ratio = (((double)cnt_all->curbytes) /
1514 (double)cnt_all->curobjs) / sample_period;
1515 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1516 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1517 * scale_factor);
1518 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1519 scale_factor);
1520
1521 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1522 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1523 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1524 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001525 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001526 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001527 filename);
1528 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001529#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001530}
1531
Jason Evansb2c0d632016-04-13 23:36:15 -07001532struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001533 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001534 bool propagate_err;
1535};
1536
Jason Evans602c8e02014-08-18 16:22:13 -07001537static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001538prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evans602c8e02014-08-18 16:22:13 -07001539 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001540 struct prof_gctx_dump_iter_arg_s *arg =
1541 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001542
Jason Evansc1e00ef2016-05-10 22:21:10 -07001543 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001544
Jason Evansc1e00ef2016-05-10 22:21:10 -07001545 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001546 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001547 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001548 goto label_return;
1549 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001550
Jason Evans602c8e02014-08-18 16:22:13 -07001551 ret = NULL;
1552label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001553 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evansf4086432017-01-19 18:15:45 -08001554 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001555}
1556
Jason Evans1ff09532017-01-16 11:09:24 -08001557static void
1558prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
1559 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1560 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001561 prof_gctx_tree_t *gctxs) {
Jason Evans6109fe02010-02-10 10:37:56 -08001562 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001563 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001564 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001565 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001566 } gctx;
Jason Evans6109fe02010-02-10 10:37:56 -08001567
Jason Evansc93ed812014-10-30 16:50:33 -07001568 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001569
Jason Evans602c8e02014-08-18 16:22:13 -07001570 /*
1571 * Put gctx's in limbo and clear their counters in preparation for
1572 * summing.
1573 */
Jason Evans1ff09532017-01-16 11:09:24 -08001574 gctx_tree_new(gctxs);
1575 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
1576 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
1577 }
Jason Evans602c8e02014-08-18 16:22:13 -07001578
1579 /*
1580 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1581 * stats and merge them into the associated gctx's.
1582 */
Jason Evans1ff09532017-01-16 11:09:24 -08001583 prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1584 memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001585 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001586 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
Jason Evans1ff09532017-01-16 11:09:24 -08001587 (void *)prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001588 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001589
1590 /* Merge tctx stats into gctx's. */
Jason Evans1ff09532017-01-16 11:09:24 -08001591 prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1592 prof_gctx_merge_iter_arg->leak_ngctx = 0;
1593 gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
1594 (void *)prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001595
Jason Evansc93ed812014-10-30 16:50:33 -07001596 prof_leave(tsd, tdata);
Jason Evans1ff09532017-01-16 11:09:24 -08001597}
Jason Evans4f37ef62014-01-16 13:23:56 -08001598
Jason Evans1ff09532017-01-16 11:09:24 -08001599static bool
1600prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
1601 bool leakcheck, prof_tdata_t *tdata,
1602 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1603 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1604 struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001605 prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001606 /* Create dump file. */
Jason Evans1ff09532017-01-16 11:09:24 -08001607 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
1608 return true;
1609 }
Jason Evans6109fe02010-02-10 10:37:56 -08001610
1611 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001612 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evans1ff09532017-01-16 11:09:24 -08001613 &prof_tdata_merge_iter_arg->cnt_all)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001614 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001615 }
Jason Evans6109fe02010-02-10 10:37:56 -08001616
Jason Evans602c8e02014-08-18 16:22:13 -07001617 /* Dump per gctx profile stats. */
Jason Evans1ff09532017-01-16 11:09:24 -08001618 prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
1619 prof_gctx_dump_iter_arg->propagate_err = propagate_err;
1620 if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
1621 (void *)prof_gctx_dump_iter_arg) != NULL) {
Jason Evans3a81cbd2014-08-16 12:58:55 -07001622 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001623 }
Jason Evans6109fe02010-02-10 10:37:56 -08001624
Jason Evansc7177182010-02-11 09:25:56 -08001625 /* Dump /proc/<pid>/maps if possible. */
Jason Evans1ff09532017-01-16 11:09:24 -08001626 if (prof_dump_maps(propagate_err)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001627 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001628 }
Jason Evansc7177182010-02-11 09:25:56 -08001629
Jason Evans1ff09532017-01-16 11:09:24 -08001630 if (prof_dump_close(propagate_err)) {
1631 return true;
1632 }
Jason Evans6109fe02010-02-10 10:37:56 -08001633
Jason Evans1ff09532017-01-16 11:09:24 -08001634 return false;
1635label_write_error:
1636 prof_dump_close(propagate_err);
1637 return true;
1638}
1639
1640static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001641prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
1642 bool leakcheck) {
Jason Evans1ff09532017-01-16 11:09:24 -08001643 prof_tdata_t *tdata;
1644 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1645 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1646 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
1647 prof_gctx_tree_t gctxs;
1648 bool err;
1649
1650 cassert(config_prof);
1651
1652 tdata = prof_tdata_get(tsd, true);
1653 if (tdata == NULL) {
1654 return true;
1655 }
1656
1657 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1658
1659 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1660 &prof_gctx_merge_iter_arg, &gctxs);
1661 err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
1662 &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
1663 &prof_gctx_dump_iter_arg, &gctxs);
Jason Evans20c31de2014-10-02 23:01:10 -07001664 prof_gctx_finish(tsd, &gctxs);
Jason Evans1ff09532017-01-16 11:09:24 -08001665
Jason Evansc1e00ef2016-05-10 22:21:10 -07001666 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001667
Jason Evans1ff09532017-01-16 11:09:24 -08001668 if (err) {
1669 return true;
1670 }
1671
Jason Evansb2c0d632016-04-13 23:36:15 -07001672 if (leakcheck) {
1673 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1674 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1675 }
Jason Evans1ff09532017-01-16 11:09:24 -08001676 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001677}
1678
Jason Evans1ff09532017-01-16 11:09:24 -08001679#ifdef JEMALLOC_JET
1680void
1681prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001682 uint64_t *accumbytes) {
Jason Evans1ff09532017-01-16 11:09:24 -08001683 tsd_t *tsd;
1684 prof_tdata_t *tdata;
1685 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1686 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1687 prof_gctx_tree_t gctxs;
1688
1689 tsd = tsd_fetch();
1690 tdata = prof_tdata_get(tsd, false);
1691 if (tdata == NULL) {
1692 if (curobjs != NULL) {
1693 *curobjs = 0;
1694 }
1695 if (curbytes != NULL) {
1696 *curbytes = 0;
1697 }
1698 if (accumobjs != NULL) {
1699 *accumobjs = 0;
1700 }
1701 if (accumbytes != NULL) {
1702 *accumbytes = 0;
1703 }
1704 return;
1705 }
1706
1707 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1708 &prof_gctx_merge_iter_arg, &gctxs);
1709 prof_gctx_finish(tsd, &gctxs);
1710
1711 if (curobjs != NULL) {
1712 *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
1713 }
1714 if (curbytes != NULL) {
1715 *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
1716 }
1717 if (accumobjs != NULL) {
1718 *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
1719 }
1720 if (accumbytes != NULL) {
1721 *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
1722 }
1723}
1724#endif
1725
Jason Evansc0cc5db2017-01-19 21:41:41 -08001726#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1727#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001728static void
Jason Evansc4c25922017-01-15 16:56:30 -08001729prof_dump_filename(char *filename, char v, uint64_t vseq) {
Jason Evans7372b152012-02-10 20:22:09 -08001730 cassert(config_prof);
1731
Jason Evans4f37ef62014-01-16 13:23:56 -08001732 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001733 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1734 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001735 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001736 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001737 } else {
1738 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1739 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001740 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001741 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001742 }
Jason Evans52386b22012-04-22 16:00:11 -07001743 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001744}
1745
1746static void
Jason Evansc4c25922017-01-15 16:56:30 -08001747prof_fdump(void) {
Jason Evans5460aa62014-09-22 21:09:23 -07001748 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001749 char filename[DUMP_FILENAME_BUFSIZE];
1750
Jason Evans7372b152012-02-10 20:22:09 -08001751 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001752 assert(opt_prof_final);
1753 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001754
Jason Evansc4c25922017-01-15 16:56:30 -08001755 if (!prof_booted) {
Jason Evans6109fe02010-02-10 10:37:56 -08001756 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001757 }
Jason Evans029d44c2014-10-04 11:12:53 -07001758 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001759
Jason Evansc1e00ef2016-05-10 22:21:10 -07001760 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001761 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001762 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001763 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001764}
1765
Jason Evansfa2d64c2017-02-12 17:03:46 -08001766bool
1767prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
1768 cassert(config_prof);
1769
1770#ifndef JEMALLOC_ATOMIC_U64
1771 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
1772 WITNESS_RANK_PROF_ACCUM)) {
1773 return true;
1774 }
Jason Evansfa2d64c2017-02-12 17:03:46 -08001775 prof_accum->accumbytes = 0;
David Goldblatt30d74db2017-04-04 18:08:58 -07001776#else
1777 atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
1778#endif
Jason Evansfa2d64c2017-02-12 17:03:46 -08001779 return false;
1780}
1781
Jason Evans6109fe02010-02-10 10:37:56 -08001782void
Jason Evansc4c25922017-01-15 16:56:30 -08001783prof_idump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001784 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001785 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001786
Jason Evans7372b152012-02-10 20:22:09 -08001787 cassert(config_prof);
1788
Jason Evansc4c25922017-01-15 16:56:30 -08001789 if (!prof_booted || tsdn_null(tsdn)) {
Jason Evans6109fe02010-02-10 10:37:56 -08001790 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001791 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001792 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001793 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001794 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001795 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001796 }
Jason Evans602c8e02014-08-18 16:22:13 -07001797 if (tdata->enq) {
1798 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001799 return;
1800 }
Jason Evans6109fe02010-02-10 10:37:56 -08001801
Jason Evanse7339702010-10-23 18:37:06 -07001802 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001803 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001804 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001805 prof_dump_filename(filename, 'i', prof_dump_iseq);
1806 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001807 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001808 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001809 }
Jason Evans6109fe02010-02-10 10:37:56 -08001810}
1811
Jason Evans22ca8552010-03-02 11:57:30 -08001812bool
Jason Evansc4c25922017-01-15 16:56:30 -08001813prof_mdump(tsd_t *tsd, const char *filename) {
Jason Evans22ca8552010-03-02 11:57:30 -08001814 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001815
Jason Evans7372b152012-02-10 20:22:09 -08001816 cassert(config_prof);
1817
Jason Evansc4c25922017-01-15 16:56:30 -08001818 if (!opt_prof || !prof_booted) {
Jason Evansf4086432017-01-19 18:15:45 -08001819 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001820 }
Jason Evans6109fe02010-02-10 10:37:56 -08001821
Jason Evans22ca8552010-03-02 11:57:30 -08001822 if (filename == NULL) {
1823 /* No filename specified, so automatically generate one. */
Jason Evansc4c25922017-01-15 16:56:30 -08001824 if (opt_prof_prefix[0] == '\0') {
Jason Evansf4086432017-01-19 18:15:45 -08001825 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001826 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001827 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001828 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1829 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001830 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001831 filename = filename_buf;
1832 }
Jason Evansf4086432017-01-19 18:15:45 -08001833 return prof_dump(tsd, true, filename, false);
Jason Evans6109fe02010-02-10 10:37:56 -08001834}
1835
1836void
Jason Evansc4c25922017-01-15 16:56:30 -08001837prof_gdump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001838 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001839 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001840
Jason Evans7372b152012-02-10 20:22:09 -08001841 cassert(config_prof);
1842
Jason Evansc4c25922017-01-15 16:56:30 -08001843 if (!prof_booted || tsdn_null(tsdn)) {
Jason Evans6109fe02010-02-10 10:37:56 -08001844 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001845 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001846 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001847 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001848 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001849 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001850 }
Jason Evans602c8e02014-08-18 16:22:13 -07001851 if (tdata->enq) {
1852 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001853 return;
1854 }
Jason Evans6109fe02010-02-10 10:37:56 -08001855
Jason Evanse7339702010-10-23 18:37:06 -07001856 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001857 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001858 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001859 prof_dump_filename(filename, 'u', prof_dump_useq);
1860 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001861 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001862 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001863 }
Jason Evans6109fe02010-02-10 10:37:56 -08001864}
1865
1866static void
Jason Evansc4c25922017-01-15 16:56:30 -08001867prof_bt_hash(const void *key, size_t r_hash[2]) {
Jason Evans6109fe02010-02-10 10:37:56 -08001868 prof_bt_t *bt = (prof_bt_t *)key;
1869
Jason Evans7372b152012-02-10 20:22:09 -08001870 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001871
Jason Evansae03bf62013-01-22 12:02:08 -08001872 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001873}
1874
1875static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001876prof_bt_keycomp(const void *k1, const void *k2) {
Jason Evans6109fe02010-02-10 10:37:56 -08001877 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1878 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1879
Jason Evans7372b152012-02-10 20:22:09 -08001880 cassert(config_prof);
1881
Jason Evansc4c25922017-01-15 16:56:30 -08001882 if (bt1->len != bt2->len) {
Jason Evansf4086432017-01-19 18:15:45 -08001883 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001884 }
Jason Evans6109fe02010-02-10 10:37:56 -08001885 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1886}
1887
David Goldblatt4d2e4bf2017-04-21 09:37:34 -07001888static uint64_t
Jason Evansc4c25922017-01-15 16:56:30 -08001889prof_thr_uid_alloc(tsdn_t *tsdn) {
Jason Evans9d8f3d22014-09-11 18:06:30 -07001890 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001891
Jason Evansc1e00ef2016-05-10 22:21:10 -07001892 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001893 thr_uid = next_thr_uid;
1894 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001895 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001896
Jason Evansf4086432017-01-19 18:15:45 -08001897 return thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001898}
1899
1900static prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001901prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansc4c25922017-01-15 16:56:30 -08001902 char *thread_name, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07001903 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001904
Jason Evans7372b152012-02-10 20:22:09 -08001905 cassert(config_prof);
1906
Jason Evans4d6a1342010-10-20 19:05:59 -07001907 /* Initialize an empty cache for this thread. */
Jason Evansb54d1602016-10-20 23:59:12 -07001908 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
Jason Evansc1e00ef2016-05-10 22:21:10 -07001909 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1910 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08001911 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08001912 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001913 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001914
Jason Evans602c8e02014-08-18 16:22:13 -07001915 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1916 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001917 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001918 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001919 tdata->attached = true;
1920 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001921 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001922
Jason Evansb54d1602016-10-20 23:59:12 -07001923 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
1924 prof_bt_keycomp)) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001925 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -08001926 return NULL;
Jason Evans4d6a1342010-10-20 19:05:59 -07001927 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001928
Jason Evans602c8e02014-08-18 16:22:13 -07001929 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1930 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001931
Jason Evans602c8e02014-08-18 16:22:13 -07001932 tdata->enq = false;
1933 tdata->enq_idump = false;
1934 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001935
Jason Evans602c8e02014-08-18 16:22:13 -07001936 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001937 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001938
Jason Evansb54d1602016-10-20 23:59:12 -07001939 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001940 tdata_tree_insert(&tdatas, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001941 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001942
Jason Evansf4086432017-01-19 18:15:45 -08001943 return tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001944}
1945
1946prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001947prof_tdata_init(tsd_t *tsd) {
Jason Evansf4086432017-01-19 18:15:45 -08001948 return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
1949 NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
Jason Evans602c8e02014-08-18 16:22:13 -07001950}
1951
Jason Evans602c8e02014-08-18 16:22:13 -07001952static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001953prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
1954 if (tdata->attached && !even_if_attached) {
Jason Evansf4086432017-01-19 18:15:45 -08001955 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001956 }
1957 if (ckh_count(&tdata->bt2tctx) != 0) {
Jason Evansf4086432017-01-19 18:15:45 -08001958 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001959 }
Jason Evansf4086432017-01-19 18:15:45 -08001960 return true;
Jason Evans602c8e02014-08-18 16:22:13 -07001961}
1962
Jason Evansb2c0d632016-04-13 23:36:15 -07001963static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001964prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001965 bool even_if_attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001966 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001967
Jason Evansf4086432017-01-19 18:15:45 -08001968 return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
Jason Evansb2c0d632016-04-13 23:36:15 -07001969}
1970
Jason Evans602c8e02014-08-18 16:22:13 -07001971static void
Jason Evansb54d1602016-10-20 23:59:12 -07001972prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001973 bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001974 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001975
Jason Evans602c8e02014-08-18 16:22:13 -07001976 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001977
Jason Evansc1e00ef2016-05-10 22:21:10 -07001978 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001979
Jason Evansdb722722016-03-23 20:29:33 -07001980 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001981 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
1982 true);
Jason Evansdb722722016-03-23 20:29:33 -07001983 }
Jason Evansb54d1602016-10-20 23:59:12 -07001984 ckh_delete(tsd, &tdata->bt2tctx);
Qi Wangbfa530b2017-04-07 14:12:30 -07001985 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001986}
1987
1988static void
Jason Evansc4c25922017-01-15 16:56:30 -08001989prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001990 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1991 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1992 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001993}
1994
1995static void
Jason Evansc4c25922017-01-15 16:56:30 -08001996prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans602c8e02014-08-18 16:22:13 -07001997 bool destroy_tdata;
1998
Jason Evansc1e00ef2016-05-10 22:21:10 -07001999 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002000 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002001 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
2002 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07002003 /*
2004 * Only detach if !destroy_tdata, because detaching would allow
2005 * another thread to win the race to destroy tdata.
2006 */
Jason Evansc4c25922017-01-15 16:56:30 -08002007 if (!destroy_tdata) {
Jason Evansf04a0be2014-10-04 15:03:49 -07002008 tdata->attached = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002009 }
Jason Evans029d44c2014-10-04 11:12:53 -07002010 tsd_prof_tdata_set(tsd, NULL);
Jason Evansc4c25922017-01-15 16:56:30 -08002011 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07002012 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002013 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002014 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08002015 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -07002016 prof_tdata_destroy(tsd, tdata, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002017 }
Jason Evans602c8e02014-08-18 16:22:13 -07002018}
2019
Jason Evans20c31de2014-10-02 23:01:10 -07002020prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002021prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002022 uint64_t thr_uid = tdata->thr_uid;
2023 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002024 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07002025 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002026 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002027
Jason Evans20c31de2014-10-02 23:01:10 -07002028 prof_tdata_detach(tsd, tdata);
Jason Evansf4086432017-01-19 18:15:45 -08002029 return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
2030 active);
Jason Evans602c8e02014-08-18 16:22:13 -07002031}
2032
Jason Evans20c31de2014-10-02 23:01:10 -07002033static bool
Jason Evansc4c25922017-01-15 16:56:30 -08002034prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002035 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002036
Jason Evansc1e00ef2016-05-10 22:21:10 -07002037 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002038 if (!tdata->expired) {
2039 tdata->expired = true;
2040 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07002041 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002042 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002043 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002044 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002045 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002046
Jason Evansf4086432017-01-19 18:15:45 -08002047 return destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002048}
2049
2050static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002051prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
2052 void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002053 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07002054
Jason Evansc1e00ef2016-05-10 22:21:10 -07002055 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002056}
2057
2058void
Jason Evansc4c25922017-01-15 16:56:30 -08002059prof_reset(tsd_t *tsd, size_t lg_sample) {
Jason Evans20c31de2014-10-02 23:01:10 -07002060 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07002061
2062 assert(lg_sample < (sizeof(uint64_t) << 3));
2063
Jason Evansb54d1602016-10-20 23:59:12 -07002064 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
2065 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002066
2067 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07002068
2069 next = NULL;
2070 do {
2071 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansb54d1602016-10-20 23:59:12 -07002072 prof_tdata_reset_iter, (void *)tsd);
Jason Evans20c31de2014-10-02 23:01:10 -07002073 if (to_destroy != NULL) {
2074 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansb54d1602016-10-20 23:59:12 -07002075 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002076 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002077 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002078 }
Jason Evans20c31de2014-10-02 23:01:10 -07002079 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002080
Jason Evansb54d1602016-10-20 23:59:12 -07002081 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2082 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07002083}
2084
Jason Evanscd9a1342012-03-21 18:33:03 -07002085void
Jason Evansc4c25922017-01-15 16:56:30 -08002086prof_tdata_cleanup(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002087 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07002088
Jason Evansc4c25922017-01-15 16:56:30 -08002089 if (!config_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002090 return;
Jason Evansc4c25922017-01-15 16:56:30 -08002091 }
Jason Evans7372b152012-02-10 20:22:09 -08002092
Jason Evans5460aa62014-09-22 21:09:23 -07002093 tdata = tsd_prof_tdata_get(tsd);
Jason Evansc4c25922017-01-15 16:56:30 -08002094 if (tdata != NULL) {
Jason Evans5460aa62014-09-22 21:09:23 -07002095 prof_tdata_detach(tsd, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08002096 }
Jason Evans6109fe02010-02-10 10:37:56 -08002097}
2098
Jason Evansfc12c0b2014-10-03 23:25:30 -07002099bool
Jason Evansc4c25922017-01-15 16:56:30 -08002100prof_active_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002101 bool prof_active_current;
2102
Jason Evansc1e00ef2016-05-10 22:21:10 -07002103 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002104 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002105 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002106 return prof_active_current;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002107}
2108
2109bool
Jason Evansc4c25922017-01-15 16:56:30 -08002110prof_active_set(tsdn_t *tsdn, bool active) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002111 bool prof_active_old;
2112
Jason Evansc1e00ef2016-05-10 22:21:10 -07002113 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002114 prof_active_old = prof_active;
2115 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002116 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002117 return prof_active_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002118}
2119
Jason Evans602c8e02014-08-18 16:22:13 -07002120const char *
Jason Evansc4c25922017-01-15 16:56:30 -08002121prof_thread_name_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002122 prof_tdata_t *tdata;
2123
Jason Evans5460aa62014-09-22 21:09:23 -07002124 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002125 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002126 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002127 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002128 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002129}
2130
Jason Evansfc12c0b2014-10-03 23:25:30 -07002131static char *
Jason Evansc4c25922017-01-15 16:56:30 -08002132prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002133 char *ret;
2134 size_t size;
2135
Jason Evansc4c25922017-01-15 16:56:30 -08002136 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002137 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002138 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002139
2140 size = strlen(thread_name) + 1;
Jason Evansc4c25922017-01-15 16:56:30 -08002141 if (size == 1) {
Jason Evansf4086432017-01-19 18:15:45 -08002142 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002143 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002144
Jason Evansc1e00ef2016-05-10 22:21:10 -07002145 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2146 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08002147 if (ret == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002148 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002149 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002150 memcpy(ret, thread_name, size);
Jason Evansf4086432017-01-19 18:15:45 -08002151 return ret;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002152}
2153
2154int
Jason Evansc4c25922017-01-15 16:56:30 -08002155prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
Jason Evans602c8e02014-08-18 16:22:13 -07002156 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002157 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002158 char *s;
2159
Jason Evans5460aa62014-09-22 21:09:23 -07002160 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002161 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002162 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002163 }
Jason Evans602c8e02014-08-18 16:22:13 -07002164
Jason Evansfc12c0b2014-10-03 23:25:30 -07002165 /* Validate input. */
Jason Evansc4c25922017-01-15 16:56:30 -08002166 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002167 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002168 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002169 for (i = 0; thread_name[i] != '\0'; i++) {
2170 char c = thread_name[i];
Jason Evansc4c25922017-01-15 16:56:30 -08002171 if (!isgraph(c) && !isblank(c)) {
Jason Evansf4086432017-01-19 18:15:45 -08002172 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002173 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002174 }
2175
Jason Evansc1e00ef2016-05-10 22:21:10 -07002176 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evansc4c25922017-01-15 16:56:30 -08002177 if (s == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002178 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002179 }
Jason Evans602c8e02014-08-18 16:22:13 -07002180
Jason Evansfc12c0b2014-10-03 23:25:30 -07002181 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07002182 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
2183 true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002184 tdata->thread_name = NULL;
2185 }
Jason Evansc4c25922017-01-15 16:56:30 -08002186 if (strlen(s) > 0) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002187 tdata->thread_name = s;
Jason Evansc4c25922017-01-15 16:56:30 -08002188 }
Jason Evansf4086432017-01-19 18:15:45 -08002189 return 0;
Jason Evans602c8e02014-08-18 16:22:13 -07002190}
2191
2192bool
Jason Evansc4c25922017-01-15 16:56:30 -08002193prof_thread_active_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002194 prof_tdata_t *tdata;
2195
Jason Evans5460aa62014-09-22 21:09:23 -07002196 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002197 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002198 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08002199 }
Jason Evansf4086432017-01-19 18:15:45 -08002200 return tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002201}
2202
2203bool
Jason Evansc4c25922017-01-15 16:56:30 -08002204prof_thread_active_set(tsd_t *tsd, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07002205 prof_tdata_t *tdata;
2206
Jason Evans5460aa62014-09-22 21:09:23 -07002207 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002208 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002209 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002210 }
Jason Evans602c8e02014-08-18 16:22:13 -07002211 tdata->active = active;
Jason Evansf4086432017-01-19 18:15:45 -08002212 return false;
Jason Evans602c8e02014-08-18 16:22:13 -07002213}
2214
Jason Evansfc12c0b2014-10-03 23:25:30 -07002215bool
Jason Evansc4c25922017-01-15 16:56:30 -08002216prof_thread_active_init_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002217 bool active_init;
2218
Jason Evansc1e00ef2016-05-10 22:21:10 -07002219 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002220 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002221 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002222 return active_init;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002223}
2224
2225bool
Jason Evansc4c25922017-01-15 16:56:30 -08002226prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002227 bool active_init_old;
2228
Jason Evansc1e00ef2016-05-10 22:21:10 -07002229 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002230 active_init_old = prof_thread_active_init;
2231 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002232 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002233 return active_init_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002234}
2235
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002236bool
Jason Evansc4c25922017-01-15 16:56:30 -08002237prof_gdump_get(tsdn_t *tsdn) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002238 bool prof_gdump_current;
2239
Jason Evansc1e00ef2016-05-10 22:21:10 -07002240 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002241 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002242 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002243 return prof_gdump_current;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002244}
2245
2246bool
Jason Evansc4c25922017-01-15 16:56:30 -08002247prof_gdump_set(tsdn_t *tsdn, bool gdump) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002248 bool prof_gdump_old;
2249
Jason Evansc1e00ef2016-05-10 22:21:10 -07002250 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002251 prof_gdump_old = prof_gdump_val;
2252 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002253 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002254 return prof_gdump_old;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002255}
2256
Jason Evans6109fe02010-02-10 10:37:56 -08002257void
Jason Evansc4c25922017-01-15 16:56:30 -08002258prof_boot0(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002259 cassert(config_prof);
2260
Jason Evanse7339702010-10-23 18:37:06 -07002261 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2262 sizeof(PROF_PREFIX_DEFAULT));
2263}
2264
2265void
Jason Evansc4c25922017-01-15 16:56:30 -08002266prof_boot1(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002267 cassert(config_prof);
2268
Jason Evans6109fe02010-02-10 10:37:56 -08002269 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002270 * opt_prof must be in its final state before any arenas are
2271 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002272 */
2273
Jason Evans551ebc42014-10-03 10:16:09 -07002274 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002275 /*
2276 * Enable opt_prof, but in such a way that profiles are never
2277 * automatically dumped.
2278 */
2279 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002280 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002281 } else if (opt_prof) {
2282 if (opt_lg_prof_interval >= 0) {
2283 prof_interval = (((uint64_t)1U) <<
2284 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002285 }
Jason Evansa02fc082010-03-31 17:35:51 -07002286 }
Jason Evans6109fe02010-02-10 10:37:56 -08002287}
2288
2289bool
Jason Evansc4c25922017-01-15 16:56:30 -08002290prof_boot2(tsd_t *tsd) {
Jason Evans7372b152012-02-10 20:22:09 -08002291 cassert(config_prof);
2292
Jason Evans6109fe02010-02-10 10:37:56 -08002293 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002294 unsigned i;
2295
Jason Evans602c8e02014-08-18 16:22:13 -07002296 lg_prof_sample = opt_lg_prof_sample;
2297
Jason Evansfc12c0b2014-10-03 23:25:30 -07002298 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002299 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
Jason Evansc4c25922017-01-15 16:56:30 -08002300 WITNESS_RANK_PROF_ACTIVE)) {
Jason Evansf4086432017-01-19 18:15:45 -08002301 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002302 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002303
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002304 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002305 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
Jason Evansc4c25922017-01-15 16:56:30 -08002306 WITNESS_RANK_PROF_GDUMP)) {
Jason Evansf4086432017-01-19 18:15:45 -08002307 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002308 }
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002309
Jason Evansfc12c0b2014-10-03 23:25:30 -07002310 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002311 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2312 "prof_thread_active_init",
Jason Evansc4c25922017-01-15 16:56:30 -08002313 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) {
Jason Evansf4086432017-01-19 18:15:45 -08002314 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002315 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002316
Jason Evansb54d1602016-10-20 23:59:12 -07002317 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evansc4c25922017-01-15 16:56:30 -08002318 prof_bt_keycomp)) {
Jason Evansf4086432017-01-19 18:15:45 -08002319 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002320 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002321 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
Jason Evansc4c25922017-01-15 16:56:30 -08002322 WITNESS_RANK_PROF_BT2GCTX)) {
Jason Evansf4086432017-01-19 18:15:45 -08002323 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002324 }
Jason Evans6109fe02010-02-10 10:37:56 -08002325
Jason Evans602c8e02014-08-18 16:22:13 -07002326 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002327 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
Jason Evansc4c25922017-01-15 16:56:30 -08002328 WITNESS_RANK_PROF_TDATAS)) {
Jason Evansf4086432017-01-19 18:15:45 -08002329 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002330 }
Jason Evans602c8e02014-08-18 16:22:13 -07002331
2332 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002333 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
Jason Evansc4c25922017-01-15 16:56:30 -08002334 WITNESS_RANK_PROF_NEXT_THR_UID)) {
Jason Evansf4086432017-01-19 18:15:45 -08002335 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002336 }
Jason Evans602c8e02014-08-18 16:22:13 -07002337
Jason Evansb2c0d632016-04-13 23:36:15 -07002338 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
Jason Evansc4c25922017-01-15 16:56:30 -08002339 WITNESS_RANK_PROF_DUMP_SEQ)) {
Jason Evansf4086432017-01-19 18:15:45 -08002340 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002341 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002342 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
Jason Evansc4c25922017-01-15 16:56:30 -08002343 WITNESS_RANK_PROF_DUMP)) {
Jason Evansf4086432017-01-19 18:15:45 -08002344 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002345 }
Jason Evans6109fe02010-02-10 10:37:56 -08002346
Jason Evans57efa7b2014-10-08 17:57:19 -07002347 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2348 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002349 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansc4c25922017-01-15 16:56:30 -08002350 if (opt_abort) {
Jason Evans6109fe02010-02-10 10:37:56 -08002351 abort();
Jason Evansc4c25922017-01-15 16:56:30 -08002352 }
Jason Evans6109fe02010-02-10 10:37:56 -08002353 }
Jason Evans6da54182012-03-23 18:05:51 -07002354
Jason Evansb54d1602016-10-20 23:59:12 -07002355 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002356 b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
2357 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002358 if (gctx_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002359 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002360 }
Jason Evans6da54182012-03-23 18:05:51 -07002361 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002362 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
Jason Evansc4c25922017-01-15 16:56:30 -08002363 WITNESS_RANK_PROF_GCTX)) {
Jason Evansf4086432017-01-19 18:15:45 -08002364 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002365 }
Jason Evans602c8e02014-08-18 16:22:13 -07002366 }
2367
Jason Evansb54d1602016-10-20 23:59:12 -07002368 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002369 b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
2370 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002371 if (tdata_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002372 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002373 }
Jason Evans602c8e02014-08-18 16:22:13 -07002374 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002375 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
Jason Evansc4c25922017-01-15 16:56:30 -08002376 WITNESS_RANK_PROF_TDATA)) {
Jason Evansf4086432017-01-19 18:15:45 -08002377 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002378 }
Jason Evans6da54182012-03-23 18:05:51 -07002379 }
Jason Evans6109fe02010-02-10 10:37:56 -08002380 }
2381
Jason Evansb27805b2010-02-10 18:15:53 -08002382#ifdef JEMALLOC_PROF_LIBGCC
2383 /*
2384 * Cause the backtracing machinery to allocate its internal state
2385 * before enabling profiling.
2386 */
2387 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2388#endif
2389
Jason Evans6109fe02010-02-10 10:37:56 -08002390 prof_booted = true;
2391
Jason Evansf4086432017-01-19 18:15:45 -08002392 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08002393}
2394
Jason Evans20f1fc92012-10-09 14:46:22 -07002395void
Jason Evansc4c25922017-01-15 16:56:30 -08002396prof_prefork0(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002397 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002398 unsigned i;
2399
Jason Evansc1e00ef2016-05-10 22:21:10 -07002400 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2401 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2402 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002403 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002404 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002405 }
2406 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002407 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002408 }
Jason Evans174c0c32016-04-25 23:14:40 -07002409 }
2410}
2411
2412void
Jason Evansc4c25922017-01-15 16:56:30 -08002413prof_prefork1(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002414 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002415 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2416 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2417 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2418 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2419 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002420 }
2421}
2422
2423void
Jason Evansc4c25922017-01-15 16:56:30 -08002424prof_postfork_parent(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002425 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002426 unsigned i;
2427
Jason Evansc1e00ef2016-05-10 22:21:10 -07002428 malloc_mutex_postfork_parent(tsdn,
2429 &prof_thread_active_init_mtx);
2430 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2431 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2432 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2433 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002434 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002435 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002436 }
2437 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002438 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002439 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002440 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2441 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2442 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002443 }
2444}
2445
2446void
Jason Evansc4c25922017-01-15 16:56:30 -08002447prof_postfork_child(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002448 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002449 unsigned i;
2450
Jason Evansc1e00ef2016-05-10 22:21:10 -07002451 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2452 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2453 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2454 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2455 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002456 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002457 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002458 }
2459 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002460 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002461 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002462 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2463 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2464 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002465 }
2466}
2467
Jason Evans6109fe02010-02-10 10:37:56 -08002468/******************************************************************************/