blob: 13df641a030f73436d836e6b645e6602c89f7182 [file] [log] [blame]
Jason Evansc0cc5db2017-01-19 21:41:41 -08001#define JEMALLOC_PROF_C_
David Goldblatt743d9402017-04-10 18:17:55 -07002#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
David Goldblattd9ec36e2017-04-11 14:43:12 -07005#include "jemalloc/internal/assert.h"
David Goldblatt68da2362017-04-19 14:56:42 -07006#include "jemalloc/internal/ckh.h"
David Goldblattdab4beb2017-04-24 17:16:36 -07007#include "jemalloc/internal/hash.h"
David Goldblatt54373be2017-04-11 13:06:31 -07008#include "jemalloc/internal/malloc_io.h"
David Goldblatt18ecbfa2017-05-23 12:28:19 -07009#include "jemalloc/internal/mutex.h"
David Goldblatt54373be2017-04-11 13:06:31 -070010
Jason Evans6109fe02010-02-10 10:37:56 -080011/******************************************************************************/
12
13#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evansc0cc5db2017-01-19 21:41:41 -080014#define UNW_LOCAL_ONLY
Jason Evans6109fe02010-02-10 10:37:56 -080015#include <libunwind.h>
16#endif
17
Jason Evans77f350b2011-03-15 22:23:12 -070018#ifdef JEMALLOC_PROF_LIBGCC
David Goldblatt0a0fcd32017-03-28 17:30:54 -070019/*
20 * We have a circular dependency -- jemalloc_internal.h tells us if we should
21 * use libgcc's unwinding functionality, but after we've included that, we've
22 * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
23 */
24#undef _Unwind_Backtrace
Jason Evans77f350b2011-03-15 22:23:12 -070025#include <unwind.h>
David Goldblatt0a0fcd32017-03-28 17:30:54 -070026#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
Jason Evans77f350b2011-03-15 22:23:12 -070027#endif
28
Jason Evans6109fe02010-02-10 10:37:56 -080029/******************************************************************************/
30/* Data. */
31
32bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070033bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070034bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080035size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070036ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070037bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070038bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080039bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070040bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080041char opt_prof_prefix[
42 /* Minimize memory bloat for non-prof builds. */
43#ifdef JEMALLOC_PROF
44 PATH_MAX +
45#endif
Jason Evanseefdd022014-01-16 18:04:30 -080046 1];
Jason Evans6109fe02010-02-10 10:37:56 -080047
Jason Evansfc12c0b2014-10-03 23:25:30 -070048/*
49 * Initialized as opt_prof_active, and accessed via
50 * prof_active_[gs]et{_unlocked,}().
51 */
52bool prof_active;
53static malloc_mutex_t prof_active_mtx;
54
55/*
56 * Initialized as opt_prof_thread_active_init, and accessed via
57 * prof_thread_active_init_[gs]et().
58 */
59static bool prof_thread_active_init;
60static malloc_mutex_t prof_thread_active_init_mtx;
61
Jason Evans5b8ed5b2015-01-25 21:16:57 -080062/*
63 * Initialized as opt_prof_gdump, and accessed via
64 * prof_gdump_[gs]et{_unlocked,}().
65 */
66bool prof_gdump_val;
67static malloc_mutex_t prof_gdump_mtx;
68
Jason Evansa3b33862012-11-13 12:56:27 -080069uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080070
Jason Evans602c8e02014-08-18 16:22:13 -070071size_t lg_prof_sample;
72
Jason Evans6109fe02010-02-10 10:37:56 -080073/*
Jason Evans602c8e02014-08-18 16:22:13 -070074 * Table of mutexes that are shared among gctx's. These are leaf locks, so
75 * there is no problem with using them for more than one gctx at the same time.
76 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070077 * and destroying mutexes causes complications for systems that allocate when
78 * creating/destroying mutexes.
79 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static malloc_mutex_t *gctx_locks;
David Goldblatt074f2252017-04-04 18:36:45 -070081static atomic_u_t cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070082
83/*
Jason Evans602c8e02014-08-18 16:22:13 -070084 * Table of mutexes that are shared among tdata's. No operations require
85 * holding multiple tdata locks, so there is no problem with using them for more
86 * than one tdata at the same time, even though a gctx lock may be acquired
87 * while holding a tdata lock.
88 */
89static malloc_mutex_t *tdata_locks;
90
91/*
92 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070093 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080094 */
Jason Evans602c8e02014-08-18 16:22:13 -070095static ckh_t bt2gctx;
Qi Wangca9074d2017-03-11 20:28:31 -080096/* Non static to enable profiling. */
97malloc_mutex_t bt2gctx_mtx;
Jason Evans602c8e02014-08-18 16:22:13 -070098
99/*
100 * Tree of all extant prof_tdata_t structures, regardless of state,
101 * {attached,detached,expired}.
102 */
103static prof_tdata_tree_t tdatas;
104static malloc_mutex_t tdatas_mtx;
105
106static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -0700107static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -0800108
Jason Evans6109fe02010-02-10 10:37:56 -0800109static malloc_mutex_t prof_dump_seq_mtx;
110static uint64_t prof_dump_seq;
111static uint64_t prof_dump_iseq;
112static uint64_t prof_dump_mseq;
113static uint64_t prof_dump_useq;
114
115/*
116 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800117 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800118 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800119static malloc_mutex_t prof_dump_mtx;
120static char prof_dump_buf[
121 /* Minimize memory bloat for non-prof builds. */
122#ifdef JEMALLOC_PROF
123 PROF_DUMP_BUFSIZE
124#else
125 1
126#endif
127];
Jason Evans42ce80e2016-02-25 20:51:00 -0800128static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800129static int prof_dump_fd;
130
131/* Do not dump any profiles until bootstrapping is complete. */
132static bool prof_booted = false;
133
Jason Evans6109fe02010-02-10 10:37:56 -0800134/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700135/*
136 * Function prototypes for static functions that are referenced prior to
137 * definition.
138 */
139
Jason Evansc1e00ef2016-05-10 22:21:10 -0700140static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700141static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700142static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700143 bool even_if_attached);
Jason Evansb54d1602016-10-20 23:59:12 -0700144static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700145 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700146static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700147
148/******************************************************************************/
149/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800150
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700151static int
Jason Evansc4c25922017-01-15 16:56:30 -0800152prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
Jason Evans04211e22015-03-16 15:11:06 -0700153 uint64_t a_thr_uid = a->thr_uid;
154 uint64_t b_thr_uid = b->thr_uid;
155 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700156 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700157 uint64_t a_thr_discrim = a->thr_discrim;
158 uint64_t b_thr_discrim = b->thr_discrim;
159 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
160 b_thr_discrim);
161 if (ret == 0) {
162 uint64_t a_tctx_uid = a->tctx_uid;
163 uint64_t b_tctx_uid = b->tctx_uid;
164 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
165 b_tctx_uid);
166 }
Jason Evansd69964b2015-03-12 16:25:18 -0700167 }
Jason Evansf4086432017-01-19 18:15:45 -0800168 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
172 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700173
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700174static int
Jason Evansc4c25922017-01-15 16:56:30 -0800175prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700176 unsigned a_len = a->bt.len;
177 unsigned b_len = b->bt.len;
178 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
179 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
Jason Evansc4c25922017-01-15 16:56:30 -0800180 if (ret == 0) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700181 ret = (a_len > b_len) - (a_len < b_len);
Jason Evansc4c25922017-01-15 16:56:30 -0800182 }
Jason Evansf4086432017-01-19 18:15:45 -0800183 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700184}
185
Jason Evans602c8e02014-08-18 16:22:13 -0700186rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
187 prof_gctx_comp)
188
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700189static int
Jason Evansc4c25922017-01-15 16:56:30 -0800190prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
Jason Evans20c31de2014-10-02 23:01:10 -0700191 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700192 uint64_t a_uid = a->thr_uid;
193 uint64_t b_uid = b->thr_uid;
194
Jason Evans20c31de2014-10-02 23:01:10 -0700195 ret = ((a_uid > b_uid) - (a_uid < b_uid));
196 if (ret == 0) {
197 uint64_t a_discrim = a->thr_discrim;
198 uint64_t b_discrim = b->thr_discrim;
199
200 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
201 }
Jason Evansf4086432017-01-19 18:15:45 -0800202 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700203}
204
205rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
206 prof_tdata_comp)
207
208/******************************************************************************/
209
210void
Jason Evansc4c25922017-01-15 16:56:30 -0800211prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
Jason Evans6e73dc12014-09-09 19:37:26 -0700212 prof_tdata_t *tdata;
213
214 cassert(config_prof);
215
216 if (updated) {
217 /*
218 * Compute a new sample threshold. This isn't very important in
219 * practice, because this function is rarely executed, so the
220 * potential for sample bias is minimal except in contrived
221 * programs.
222 */
Jason Evans5460aa62014-09-22 21:09:23 -0700223 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800224 if (tdata != NULL) {
Jason Evans3ca0cf62015-09-17 14:47:39 -0700225 prof_sample_threshold_update(tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800226 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700227 }
228
229 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700230 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700231 tctx->prepared = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800232 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700233 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800234 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700235 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800236 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700237 }
238}
239
240void
Jason Evans5e67fbc2017-03-20 11:00:07 -0700241prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
242 prof_tctx_t *tctx) {
Qi Wangccfe68a2017-04-11 18:13:10 -0700243 prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700244
Jason Evansc1e00ef2016-05-10 22:21:10 -0700245 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700246 tctx->cnts.curobjs++;
247 tctx->cnts.curbytes += usize;
248 if (opt_prof_accum) {
249 tctx->cnts.accumobjs++;
250 tctx->cnts.accumbytes += usize;
251 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700252 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700253 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700254}
255
256void
Jason Evansc4c25922017-01-15 16:56:30 -0800257prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700258 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700259 assert(tctx->cnts.curobjs > 0);
260 assert(tctx->cnts.curbytes >= usize);
261 tctx->cnts.curobjs--;
262 tctx->cnts.curbytes -= usize;
263
Jason Evansc4c25922017-01-15 16:56:30 -0800264 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700265 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800266 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700267 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800268 }
Jason Evans602c8e02014-08-18 16:22:13 -0700269}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700270
Jason Evans4d6a1342010-10-20 19:05:59 -0700271void
Jason Evansc4c25922017-01-15 16:56:30 -0800272bt_init(prof_bt_t *bt, void **vec) {
Jason Evans7372b152012-02-10 20:22:09 -0800273 cassert(config_prof);
274
Jason Evans6109fe02010-02-10 10:37:56 -0800275 bt->vec = vec;
276 bt->len = 0;
277}
278
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700279static void
Jason Evansc4c25922017-01-15 16:56:30 -0800280prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800281 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700282 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800283
Jason Evans82cb6032014-11-01 00:20:28 -0700284 if (tdata != NULL) {
285 assert(!tdata->enq);
286 tdata->enq = true;
287 }
Jason Evans6109fe02010-02-10 10:37:56 -0800288
Jason Evansc1e00ef2016-05-10 22:21:10 -0700289 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800290}
291
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700292static void
Jason Evansc4c25922017-01-15 16:56:30 -0800293prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800294 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700295 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800296
Jason Evansc1e00ef2016-05-10 22:21:10 -0700297 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800298
Jason Evans82cb6032014-11-01 00:20:28 -0700299 if (tdata != NULL) {
300 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800301
Jason Evans82cb6032014-11-01 00:20:28 -0700302 assert(tdata->enq);
303 tdata->enq = false;
304 idump = tdata->enq_idump;
305 tdata->enq_idump = false;
306 gdump = tdata->enq_gdump;
307 tdata->enq_gdump = false;
308
Jason Evansc4c25922017-01-15 16:56:30 -0800309 if (idump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700310 prof_idump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800311 }
312 if (gdump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700313 prof_gdump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800314 }
Jason Evans82cb6032014-11-01 00:20:28 -0700315 }
Jason Evans6109fe02010-02-10 10:37:56 -0800316}
317
Jason Evans77f350b2011-03-15 22:23:12 -0700318#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700319void
Jason Evansc4c25922017-01-15 16:56:30 -0800320prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700321 int nframes;
322
Jason Evans7372b152012-02-10 20:22:09 -0800323 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800324 assert(bt->len == 0);
325 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800326
Jason Evans6f001052014-04-22 18:41:15 -0700327 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
Jason Evansc4c25922017-01-15 16:56:30 -0800328 if (nframes <= 0) {
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700329 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800330 }
Jason Evans6f001052014-04-22 18:41:15 -0700331 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800332}
Jason Evans7372b152012-02-10 20:22:09 -0800333#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700334static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800335prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans7372b152012-02-10 20:22:09 -0800336 cassert(config_prof);
337
Jason Evansf4086432017-01-19 18:15:45 -0800338 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700339}
340
341static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800342prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans77f350b2011-03-15 22:23:12 -0700343 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700344 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700345
Jason Evans7372b152012-02-10 20:22:09 -0800346 cassert(config_prof);
347
Jason Evans6f001052014-04-22 18:41:15 -0700348 ip = (void *)_Unwind_GetIP(context);
Jason Evansc4c25922017-01-15 16:56:30 -0800349 if (ip == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800350 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800351 }
Jason Evans6f001052014-04-22 18:41:15 -0700352 data->bt->vec[data->bt->len] = ip;
353 data->bt->len++;
Jason Evansc4c25922017-01-15 16:56:30 -0800354 if (data->bt->len == data->max) {
Jason Evansf4086432017-01-19 18:15:45 -0800355 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800356 }
Jason Evans77f350b2011-03-15 22:23:12 -0700357
Jason Evansf4086432017-01-19 18:15:45 -0800358 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700359}
360
361void
Jason Evansc4c25922017-01-15 16:56:30 -0800362prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700363 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700364
Jason Evans7372b152012-02-10 20:22:09 -0800365 cassert(config_prof);
366
Jason Evans77f350b2011-03-15 22:23:12 -0700367 _Unwind_Backtrace(prof_unwind_callback, &data);
368}
Jason Evans7372b152012-02-10 20:22:09 -0800369#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700370void
Jason Evansc4c25922017-01-15 16:56:30 -0800371prof_backtrace(prof_bt_t *bt) {
Jason Evansc0cc5db2017-01-19 21:41:41 -0800372#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700373 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800374 void *p; \
Jason Evansc4c25922017-01-15 16:56:30 -0800375 if (__builtin_frame_address(i) == 0) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800376 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800377 } \
Jason Evans6109fe02010-02-10 10:37:56 -0800378 p = __builtin_return_address(i); \
Jason Evansc4c25922017-01-15 16:56:30 -0800379 if (p == NULL) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800380 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800381 } \
Jason Evans6f001052014-04-22 18:41:15 -0700382 bt->vec[(i)] = p; \
383 bt->len = (i) + 1; \
Jason Evansc4c25922017-01-15 16:56:30 -0800384 } else { \
385 return; \
386 }
Jason Evans6109fe02010-02-10 10:37:56 -0800387
Jason Evans7372b152012-02-10 20:22:09 -0800388 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800389
Jason Evans6109fe02010-02-10 10:37:56 -0800390 BT_FRAME(0)
391 BT_FRAME(1)
392 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800393 BT_FRAME(3)
394 BT_FRAME(4)
395 BT_FRAME(5)
396 BT_FRAME(6)
397 BT_FRAME(7)
398 BT_FRAME(8)
399 BT_FRAME(9)
400
401 BT_FRAME(10)
402 BT_FRAME(11)
403 BT_FRAME(12)
404 BT_FRAME(13)
405 BT_FRAME(14)
406 BT_FRAME(15)
407 BT_FRAME(16)
408 BT_FRAME(17)
409 BT_FRAME(18)
410 BT_FRAME(19)
411
412 BT_FRAME(20)
413 BT_FRAME(21)
414 BT_FRAME(22)
415 BT_FRAME(23)
416 BT_FRAME(24)
417 BT_FRAME(25)
418 BT_FRAME(26)
419 BT_FRAME(27)
420 BT_FRAME(28)
421 BT_FRAME(29)
422
423 BT_FRAME(30)
424 BT_FRAME(31)
425 BT_FRAME(32)
426 BT_FRAME(33)
427 BT_FRAME(34)
428 BT_FRAME(35)
429 BT_FRAME(36)
430 BT_FRAME(37)
431 BT_FRAME(38)
432 BT_FRAME(39)
433
434 BT_FRAME(40)
435 BT_FRAME(41)
436 BT_FRAME(42)
437 BT_FRAME(43)
438 BT_FRAME(44)
439 BT_FRAME(45)
440 BT_FRAME(46)
441 BT_FRAME(47)
442 BT_FRAME(48)
443 BT_FRAME(49)
444
445 BT_FRAME(50)
446 BT_FRAME(51)
447 BT_FRAME(52)
448 BT_FRAME(53)
449 BT_FRAME(54)
450 BT_FRAME(55)
451 BT_FRAME(56)
452 BT_FRAME(57)
453 BT_FRAME(58)
454 BT_FRAME(59)
455
456 BT_FRAME(60)
457 BT_FRAME(61)
458 BT_FRAME(62)
459 BT_FRAME(63)
460 BT_FRAME(64)
461 BT_FRAME(65)
462 BT_FRAME(66)
463 BT_FRAME(67)
464 BT_FRAME(68)
465 BT_FRAME(69)
466
467 BT_FRAME(70)
468 BT_FRAME(71)
469 BT_FRAME(72)
470 BT_FRAME(73)
471 BT_FRAME(74)
472 BT_FRAME(75)
473 BT_FRAME(76)
474 BT_FRAME(77)
475 BT_FRAME(78)
476 BT_FRAME(79)
477
478 BT_FRAME(80)
479 BT_FRAME(81)
480 BT_FRAME(82)
481 BT_FRAME(83)
482 BT_FRAME(84)
483 BT_FRAME(85)
484 BT_FRAME(86)
485 BT_FRAME(87)
486 BT_FRAME(88)
487 BT_FRAME(89)
488
489 BT_FRAME(90)
490 BT_FRAME(91)
491 BT_FRAME(92)
492 BT_FRAME(93)
493 BT_FRAME(94)
494 BT_FRAME(95)
495 BT_FRAME(96)
496 BT_FRAME(97)
497 BT_FRAME(98)
498 BT_FRAME(99)
499
500 BT_FRAME(100)
501 BT_FRAME(101)
502 BT_FRAME(102)
503 BT_FRAME(103)
504 BT_FRAME(104)
505 BT_FRAME(105)
506 BT_FRAME(106)
507 BT_FRAME(107)
508 BT_FRAME(108)
509 BT_FRAME(109)
510
511 BT_FRAME(110)
512 BT_FRAME(111)
513 BT_FRAME(112)
514 BT_FRAME(113)
515 BT_FRAME(114)
516 BT_FRAME(115)
517 BT_FRAME(116)
518 BT_FRAME(117)
519 BT_FRAME(118)
520 BT_FRAME(119)
521
522 BT_FRAME(120)
523 BT_FRAME(121)
524 BT_FRAME(122)
525 BT_FRAME(123)
526 BT_FRAME(124)
527 BT_FRAME(125)
528 BT_FRAME(126)
529 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800530#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800531}
Jason Evans7372b152012-02-10 20:22:09 -0800532#else
533void
Jason Evansc4c25922017-01-15 16:56:30 -0800534prof_backtrace(prof_bt_t *bt) {
Jason Evans7372b152012-02-10 20:22:09 -0800535 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700536 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800537}
Jason Evans6109fe02010-02-10 10:37:56 -0800538#endif
539
Jason Evans4f37ef62014-01-16 13:23:56 -0800540static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800541prof_gctx_mutex_choose(void) {
David Goldblatt074f2252017-04-04 18:36:45 -0700542 unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
Jason Evans4f37ef62014-01-16 13:23:56 -0800543
Jason Evansf4086432017-01-19 18:15:45 -0800544 return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
Jason Evans4f37ef62014-01-16 13:23:56 -0800545}
546
Jason Evans602c8e02014-08-18 16:22:13 -0700547static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800548prof_tdata_mutex_choose(uint64_t thr_uid) {
Jason Evansf4086432017-01-19 18:15:45 -0800549 return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
Jason Evans602c8e02014-08-18 16:22:13 -0700550}
551
552static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800553prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
Jason Evansab532e92014-08-15 15:05:12 -0700554 /*
555 * Create a single allocation that has space for vec of length bt->len.
556 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700557 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700558 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
David Goldblatt8261e582017-05-30 10:45:37 -0700559 sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700560 true);
Jason Evansc4c25922017-01-15 16:56:30 -0800561 if (gctx == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800562 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800563 }
Jason Evans602c8e02014-08-18 16:22:13 -0700564 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800565 /*
566 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700567 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800568 */
Jason Evans602c8e02014-08-18 16:22:13 -0700569 gctx->nlimbo = 1;
570 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700571 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700572 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
573 gctx->bt.vec = gctx->vec;
574 gctx->bt.len = bt->len;
Jason Evansf4086432017-01-19 18:15:45 -0800575 return gctx;
Jason Evans4f37ef62014-01-16 13:23:56 -0800576}
577
578static void
Jason Evansc93ed812014-10-30 16:50:33 -0700579prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800580 prof_tdata_t *tdata) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800581 cassert(config_prof);
582
583 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700584 * Check that gctx is still unused by any thread cache before destroying
585 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
586 * condition with this function, as does prof_tctx_destroy() in order to
587 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800588 * into this function.
589 */
Jason Evansc93ed812014-10-30 16:50:33 -0700590 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700591 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700592 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700593 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
594 /* Remove gctx from bt2gctx. */
Jason Evansc4c25922017-01-15 16:56:30 -0800595 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800596 not_reached();
Jason Evansc4c25922017-01-15 16:56:30 -0800597 }
Jason Evansc93ed812014-10-30 16:50:33 -0700598 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700599 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700600 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Qi Wangbfa530b2017-04-07 14:12:30 -0700601 idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800602 } else {
603 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700604 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800605 * prof_lookup().
606 */
Jason Evans602c8e02014-08-18 16:22:13 -0700607 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700608 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700609 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800610 }
611}
612
Jason Evans602c8e02014-08-18 16:22:13 -0700613static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800614prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700615 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700616
Jason Evansc4c25922017-01-15 16:56:30 -0800617 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800618 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800619 }
620 if (tctx->cnts.curobjs != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800621 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800622 }
623 if (tctx->prepared) {
Jason Evansf4086432017-01-19 18:15:45 -0800624 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800625 }
Jason Evansf4086432017-01-19 18:15:45 -0800626 return true;
Jason Evans4f37ef62014-01-16 13:23:56 -0800627}
628
Jason Evansfb1775e2014-01-14 17:04:34 -0800629static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800630prof_gctx_should_destroy(prof_gctx_t *gctx) {
631 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800632 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800633 }
634 if (!tctx_tree_empty(&gctx->tctxs)) {
Jason Evansf4086432017-01-19 18:15:45 -0800635 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800636 }
637 if (gctx->nlimbo != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800638 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800639 }
Jason Evansf4086432017-01-19 18:15:45 -0800640 return true;
Jason Evans602c8e02014-08-18 16:22:13 -0700641}
642
Jason Evans602c8e02014-08-18 16:22:13 -0700643static void
Jason Evansc4c25922017-01-15 16:56:30 -0800644prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
Jason Evans6fd53da2014-09-09 12:45:53 -0700645 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700646 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700647 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700648
Jason Evansc1e00ef2016-05-10 22:21:10 -0700649 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700650
Jason Evans602c8e02014-08-18 16:22:13 -0700651 assert(tctx->cnts.curobjs == 0);
652 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700653 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700654 assert(tctx->cnts.accumobjs == 0);
655 assert(tctx->cnts.accumbytes == 0);
656
Jason Evansb54d1602016-10-20 23:59:12 -0700657 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700658 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
659 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700660
Jason Evansc1e00ef2016-05-10 22:21:10 -0700661 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700662 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700663 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700664 tctx_tree_remove(&gctx->tctxs, tctx);
665 destroy_tctx = true;
666 if (prof_gctx_should_destroy(gctx)) {
667 /*
668 * Increment gctx->nlimbo in order to keep another
669 * thread from winning the race to destroy gctx while
670 * this one has gctx->lock dropped. Without this, it
671 * would be possible for another thread to:
672 *
673 * 1) Sample an allocation associated with gctx.
674 * 2) Deallocate the sampled object.
675 * 3) Successfully prof_gctx_try_destroy(gctx).
676 *
677 * The result would be that gctx no longer exists by the
678 * time this thread accesses it in
679 * prof_gctx_try_destroy().
680 */
681 gctx->nlimbo++;
682 destroy_gctx = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800683 } else {
Jason Evansbf406412014-10-06 16:35:11 -0700684 destroy_gctx = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800685 }
Jason Evans764b0002015-03-14 14:01:35 -0700686 break;
687 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700688 /*
Jason Evansbf406412014-10-06 16:35:11 -0700689 * A dumping thread needs tctx to remain valid until dumping
690 * has finished. Change state such that the dumping thread will
691 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700692 */
Jason Evansbf406412014-10-06 16:35:11 -0700693 tctx->state = prof_tctx_state_purgatory;
694 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700695 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700696 break;
697 default:
698 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700699 destroy_tctx = false;
700 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700701 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700702 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700703 if (destroy_gctx) {
704 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
705 tdata);
706 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700707
Jason Evansc1e00ef2016-05-10 22:21:10 -0700708 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700709
Jason Evansc4c25922017-01-15 16:56:30 -0800710 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -0700711 prof_tdata_destroy(tsd, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800712 }
Jason Evans602c8e02014-08-18 16:22:13 -0700713
Jason Evansc4c25922017-01-15 16:56:30 -0800714 if (destroy_tctx) {
Qi Wangbfa530b2017-04-07 14:12:30 -0700715 idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800716 }
Jason Evans602c8e02014-08-18 16:22:13 -0700717}
718
719static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700720prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -0800721 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800722 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700723 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800724 void *v;
Jason Evans5033a912017-01-29 21:51:30 -0800725 } gctx, tgctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800726 union {
727 prof_bt_t *p;
728 void *v;
729 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700730 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800731
Jason Evansc93ed812014-10-30 16:50:33 -0700732 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700733 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800734 /* bt has never been seen before. Insert it. */
Jason Evans5033a912017-01-29 21:51:30 -0800735 prof_leave(tsd, tdata);
736 tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
737 if (tgctx.v == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800738 return true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800739 }
Jason Evans5033a912017-01-29 21:51:30 -0800740 prof_enter(tsd, tdata);
741 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
742 gctx.p = tgctx.p;
743 btkey.p = &gctx.p->bt;
744 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
745 /* OOM. */
746 prof_leave(tsd, tdata);
Qi Wangbfa530b2017-04-07 14:12:30 -0700747 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
748 true, true);
Jason Evans5033a912017-01-29 21:51:30 -0800749 return true;
750 }
751 new_gctx = true;
752 } else {
753 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800754 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800755 } else {
Jason Evans5033a912017-01-29 21:51:30 -0800756 tgctx.v = NULL;
757 new_gctx = false;
758 }
759
760 if (!new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800761 /*
762 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700763 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800764 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700765 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700766 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700767 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700768 new_gctx = false;
Jason Evans5033a912017-01-29 21:51:30 -0800769
770 if (tgctx.v != NULL) {
771 /* Lost race to insert. */
Qi Wangbfa530b2017-04-07 14:12:30 -0700772 idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
773 true);
Jason Evans5033a912017-01-29 21:51:30 -0800774 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800775 }
Jason Evansc93ed812014-10-30 16:50:33 -0700776 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800777
778 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700779 *p_gctx = gctx.p;
780 *p_new_gctx = new_gctx;
Jason Evansf4086432017-01-19 18:15:45 -0800781 return false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800782}
783
Jason Evans602c8e02014-08-18 16:22:13 -0700784prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800785prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
Jason Evans075e77c2010-09-20 19:53:25 -0700786 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700787 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700788 void *v;
789 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700790 prof_tdata_t *tdata;
791 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800792
Jason Evans7372b152012-02-10 20:22:09 -0800793 cassert(config_prof);
794
Jason Evans5460aa62014-09-22 21:09:23 -0700795 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800796 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800797 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800798 }
Jason Evans6109fe02010-02-10 10:37:56 -0800799
Jason Evansc1e00ef2016-05-10 22:21:10 -0700800 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700801 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evansc4c25922017-01-15 16:56:30 -0800802 if (!not_found) { /* Note double negative! */
Jason Evans6e73dc12014-09-09 19:37:26 -0700803 ret.p->prepared = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800804 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700805 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700806 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800807 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700808 prof_gctx_t *gctx;
809 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800810
811 /*
812 * This thread's cache lacks bt. Look for it in the global
813 * cache.
814 */
Jason Evans5460aa62014-09-22 21:09:23 -0700815 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800816 &new_gctx)) {
Jason Evansf4086432017-01-19 18:15:45 -0800817 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800818 }
Jason Evans6109fe02010-02-10 10:37:56 -0800819
Jason Evans602c8e02014-08-18 16:22:13 -0700820 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700821 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
David Goldblatt8261e582017-05-30 10:45:37 -0700822 sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansb54d1602016-10-20 23:59:12 -0700823 arena_ichoose(tsd, NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700824 if (ret.p == NULL) {
Jason Evansc4c25922017-01-15 16:56:30 -0800825 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700826 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800827 }
Jason Evansf4086432017-01-19 18:15:45 -0800828 return NULL;
Jason Evansa881cd22010-10-02 15:18:50 -0700829 }
Jason Evans602c8e02014-08-18 16:22:13 -0700830 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700831 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700832 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700833 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700834 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700835 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700836 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700837 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700838 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evansb54d1602016-10-20 23:59:12 -0700839 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700840 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700841 if (error) {
Jason Evansc4c25922017-01-15 16:56:30 -0800842 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700843 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800844 }
Qi Wangbfa530b2017-04-07 14:12:30 -0700845 idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -0800846 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800847 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700848 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700849 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700850 tctx_tree_insert(&gctx->tctxs, ret.p);
851 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700852 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800853 }
854
Jason Evansf4086432017-01-19 18:15:45 -0800855 return ret.p;
Jason Evans6109fe02010-02-10 10:37:56 -0800856}
857
Jason Evansdc391ad2016-05-04 12:14:36 -0700858/*
859 * The bodies of this function and prof_leakcheck() are compiled out unless heap
860 * profiling is enabled, so that it is possible to compile jemalloc with
861 * floating point support completely disabled. Avoiding floating point code is
862 * important on memory-constrained systems, but it also enables a workaround for
863 * versions of glibc that don't properly save/restore floating point registers
864 * during dynamic lazy symbol loading (which internally calls into whatever
865 * malloc implementation happens to be integrated into the application). Note
866 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
867 * memory moves, so jemalloc must be compiled with such optimizations disabled
868 * (e.g.
869 * -mno-sse) in order for the workaround to be complete.
870 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700871void
Jason Evansc4c25922017-01-15 16:56:30 -0800872prof_sample_threshold_update(prof_tdata_t *tdata) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700873#ifdef JEMALLOC_PROF
874 uint64_t r;
875 double u;
876
Jason Evansc4c25922017-01-15 16:56:30 -0800877 if (!config_prof) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700878 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800879 }
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700880
Jason Evans602c8e02014-08-18 16:22:13 -0700881 if (lg_prof_sample == 0) {
882 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700883 return;
884 }
885
886 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700887 * Compute sample interval as a geometrically distributed random
888 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700889 *
Jason Evans602c8e02014-08-18 16:22:13 -0700890 * __ __
891 * | log(u) | 1
892 * tdata->bytes_until_sample = | -------- |, where p = ---------------
893 * | log(1-p) | lg_prof_sample
894 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700895 *
896 * For more information on the math, see:
897 *
898 * Non-Uniform Random Variate Generation
899 * Luc Devroye
900 * Springer-Verlag, New York, 1986
901 * pp 500
902 * (http://luc.devroye.org/rnbookindex.html)
903 */
Jason Evans04b46352016-11-07 10:52:44 -0800904 r = prng_lg_range_u64(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700905 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700906 tdata->bytes_until_sample = (uint64_t)(log(u) /
907 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700908 + (uint64_t)1U;
909#endif
910}
911
Jason Evans772163b2014-01-17 15:40:52 -0800912#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700913static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800914prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
915 void *arg) {
Jason Evans20c31de2014-10-02 23:01:10 -0700916 size_t *tdata_count = (size_t *)arg;
917
918 (*tdata_count)++;
919
Jason Evansf4086432017-01-19 18:15:45 -0800920 return NULL;
Jason Evans20c31de2014-10-02 23:01:10 -0700921}
922
923size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800924prof_tdata_count(void) {
Jason Evans20c31de2014-10-02 23:01:10 -0700925 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700927
Jason Evansc1e00ef2016-05-10 22:21:10 -0700928 tsdn = tsdn_fetch();
929 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700930 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
931 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700932 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700933
Jason Evansf4086432017-01-19 18:15:45 -0800934 return tdata_count;
Jason Evans20c31de2014-10-02 23:01:10 -0700935}
Jason Evans20c31de2014-10-02 23:01:10 -0700936
Jason Evans772163b2014-01-17 15:40:52 -0800937size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800938prof_bt_count(void) {
Jason Evans772163b2014-01-17 15:40:52 -0800939 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700940 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700941 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800942
Jason Evans029d44c2014-10-04 11:12:53 -0700943 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700944 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800945 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800946 return 0;
Jason Evansc4c25922017-01-15 16:56:30 -0800947 }
Jason Evans772163b2014-01-17 15:40:52 -0800948
Jason Evansc1e00ef2016-05-10 22:21:10 -0700949 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700950 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700951 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800952
Jason Evansf4086432017-01-19 18:15:45 -0800953 return bt_count;
Jason Evans772163b2014-01-17 15:40:52 -0800954}
955#endif
956
Jason Evans772163b2014-01-17 15:40:52 -0800957static int
Jason Evansa268af52017-05-01 23:10:42 -0700958prof_dump_open_impl(bool propagate_err, const char *filename) {
Jason Evans772163b2014-01-17 15:40:52 -0800959 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800960
Jason Evans772163b2014-01-17 15:40:52 -0800961 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700962 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800963 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
964 filename);
Jason Evansc4c25922017-01-15 16:56:30 -0800965 if (opt_abort) {
Jason Evans772163b2014-01-17 15:40:52 -0800966 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800967 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800968 }
969
Jason Evansf4086432017-01-19 18:15:45 -0800970 return fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800971}
Jason Evansa268af52017-05-01 23:10:42 -0700972prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
Jason Evans4f37ef62014-01-16 13:23:56 -0800973
974static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800975prof_dump_flush(bool propagate_err) {
Jason Evans22ca8552010-03-02 11:57:30 -0800976 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800977 ssize_t err;
978
Jason Evans7372b152012-02-10 20:22:09 -0800979 cassert(config_prof);
980
Qi Wangd3e09762018-04-06 11:40:44 -0700981 err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
Jason Evans6109fe02010-02-10 10:37:56 -0800982 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700983 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800984 malloc_write("<jemalloc>: write() failed during heap "
985 "profile flush\n");
Jason Evansc4c25922017-01-15 16:56:30 -0800986 if (opt_abort) {
Jason Evans22ca8552010-03-02 11:57:30 -0800987 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800988 }
Jason Evans22ca8552010-03-02 11:57:30 -0800989 }
990 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800991 }
992 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800993
Jason Evansf4086432017-01-19 18:15:45 -0800994 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800995}
996
Jason Evans22ca8552010-03-02 11:57:30 -0800997static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800998prof_dump_close(bool propagate_err) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800999 bool ret;
1000
1001 assert(prof_dump_fd != -1);
1002 ret = prof_dump_flush(propagate_err);
1003 close(prof_dump_fd);
1004 prof_dump_fd = -1;
1005
Jason Evansf4086432017-01-19 18:15:45 -08001006 return ret;
Jason Evans4f37ef62014-01-16 13:23:56 -08001007}
1008
1009static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001010prof_dump_write(bool propagate_err, const char *s) {
Jason Evansca8fffb2016-02-24 13:16:51 -08001011 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -08001012
Jason Evans7372b152012-02-10 20:22:09 -08001013 cassert(config_prof);
1014
Jason Evans6109fe02010-02-10 10:37:56 -08001015 i = 0;
1016 slen = strlen(s);
1017 while (i < slen) {
1018 /* Flush the buffer if it is full. */
Jason Evansc4c25922017-01-15 16:56:30 -08001019 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1020 if (prof_dump_flush(propagate_err) && propagate_err) {
Jason Evansf4086432017-01-19 18:15:45 -08001021 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001022 }
1023 }
Jason Evans6109fe02010-02-10 10:37:56 -08001024
Jason Evanscd9a1342012-03-21 18:33:03 -07001025 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001026 /* Finish writing. */
1027 n = slen - i;
1028 } else {
1029 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001030 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001031 }
1032 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1033 prof_dump_buf_end += n;
1034 i += n;
1035 }
Jason Evans22ca8552010-03-02 11:57:30 -08001036
Jason Evansf4086432017-01-19 18:15:45 -08001037 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001038}
1039
Jason Evanse42c3092015-07-22 15:44:47 -07001040JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001041static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001042prof_dump_printf(bool propagate_err, const char *format, ...) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001043 bool ret;
1044 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001045 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001046
1047 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001048 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001049 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001050 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001051
Jason Evansf4086432017-01-19 18:15:45 -08001052 return ret;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001053}
1054
Jason Evans602c8e02014-08-18 16:22:13 -07001055static void
Jason Evansc4c25922017-01-15 16:56:30 -08001056prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001057 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001058
Jason Evansc1e00ef2016-05-10 22:21:10 -07001059 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001060
1061 switch (tctx->state) {
1062 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001063 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001064 return;
Jason Evans764b0002015-03-14 14:01:35 -07001065 case prof_tctx_state_nominal:
1066 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001067 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001068
Jason Evans764b0002015-03-14 14:01:35 -07001069 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001070
Jason Evans764b0002015-03-14 14:01:35 -07001071 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1072 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1073 if (opt_prof_accum) {
1074 tdata->cnt_summed.accumobjs +=
1075 tctx->dump_cnts.accumobjs;
1076 tdata->cnt_summed.accumbytes +=
1077 tctx->dump_cnts.accumbytes;
1078 }
1079 break;
1080 case prof_tctx_state_dumping:
1081 case prof_tctx_state_purgatory:
1082 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001083 }
1084}
1085
Jason Evans602c8e02014-08-18 16:22:13 -07001086static void
Jason Evansc4c25922017-01-15 16:56:30 -08001087prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001088 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001089
Jason Evans602c8e02014-08-18 16:22:13 -07001090 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1091 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1092 if (opt_prof_accum) {
1093 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1094 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1095 }
1096}
1097
Jason Evans602c8e02014-08-18 16:22:13 -07001098static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001099prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001100 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001101
Jason Evansc1e00ef2016-05-10 22:21:10 -07001102 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001103
1104 switch (tctx->state) {
1105 case prof_tctx_state_nominal:
1106 /* New since dumping started; ignore. */
1107 break;
1108 case prof_tctx_state_dumping:
1109 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001110 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001111 break;
1112 default:
1113 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001114 }
1115
Jason Evansf4086432017-01-19 18:15:45 -08001116 return NULL;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001117}
1118
Jason Evansb2c0d632016-04-13 23:36:15 -07001119struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001120 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001121 bool propagate_err;
1122};
1123
Jason Evans602c8e02014-08-18 16:22:13 -07001124static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001125prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001126 struct prof_tctx_dump_iter_arg_s *arg =
1127 (struct prof_tctx_dump_iter_arg_s *)opaque;
1128
Jason Evansc1e00ef2016-05-10 22:21:10 -07001129 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001130
Jason Evansfb64ec22015-09-21 18:37:18 -07001131 switch (tctx->state) {
1132 case prof_tctx_state_initializing:
1133 case prof_tctx_state_nominal:
1134 /* Not captured by this dump. */
1135 break;
1136 case prof_tctx_state_dumping:
1137 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001138 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001139 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1140 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1141 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001142 tctx->dump_cnts.accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001143 return tctx;
Jason Evansc4c25922017-01-15 16:56:30 -08001144 }
Jason Evansfb64ec22015-09-21 18:37:18 -07001145 break;
1146 default:
1147 not_reached();
1148 }
Jason Evansf4086432017-01-19 18:15:45 -08001149 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001150}
1151
Jason Evans602c8e02014-08-18 16:22:13 -07001152static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001153prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001154 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001155 prof_tctx_t *ret;
1156
Jason Evansc1e00ef2016-05-10 22:21:10 -07001157 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001158
Jason Evans602c8e02014-08-18 16:22:13 -07001159 switch (tctx->state) {
1160 case prof_tctx_state_nominal:
1161 /* New since dumping started; ignore. */
1162 break;
1163 case prof_tctx_state_dumping:
1164 tctx->state = prof_tctx_state_nominal;
1165 break;
1166 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001167 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001168 goto label_return;
1169 default:
1170 not_reached();
1171 }
1172
1173 ret = NULL;
1174label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001175 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -07001176}
1177
Jason Evans6109fe02010-02-10 10:37:56 -08001178static void
Jason Evansc4c25922017-01-15 16:56:30 -08001179prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
Jason Evans7372b152012-02-10 20:22:09 -08001180 cassert(config_prof);
1181
Jason Evansc1e00ef2016-05-10 22:21:10 -07001182 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001183
Jason Evans4f37ef62014-01-16 13:23:56 -08001184 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001185 * Increment nlimbo so that gctx won't go away before dump.
1186 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001187 * prof_dump()'s second pass.
1188 */
Jason Evans602c8e02014-08-18 16:22:13 -07001189 gctx->nlimbo++;
1190 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001191
Jason Evans602c8e02014-08-18 16:22:13 -07001192 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001193
Jason Evansc1e00ef2016-05-10 22:21:10 -07001194 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001195}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001196
Jason Evansb2c0d632016-04-13 23:36:15 -07001197struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001198 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001199 size_t leak_ngctx;
1200};
Jason Evans6109fe02010-02-10 10:37:56 -08001201
Jason Evansb2c0d632016-04-13 23:36:15 -07001202static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001203prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001204 struct prof_gctx_merge_iter_arg_s *arg =
1205 (struct prof_gctx_merge_iter_arg_s *)opaque;
1206
Jason Evansc1e00ef2016-05-10 22:21:10 -07001207 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001208 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001209 (void *)arg->tsdn);
Jason Evansc4c25922017-01-15 16:56:30 -08001210 if (gctx->cnt_summed.curobjs != 0) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001211 arg->leak_ngctx++;
Jason Evansc4c25922017-01-15 16:56:30 -08001212 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001213 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001214
Jason Evansf4086432017-01-19 18:15:45 -08001215 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001216}
1217
Jason Evans20c31de2014-10-02 23:01:10 -07001218static void
Jason Evansc4c25922017-01-15 16:56:30 -08001219prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
Jason Evans5460aa62014-09-22 21:09:23 -07001220 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001221 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001222
Jason Evans20c31de2014-10-02 23:01:10 -07001223 /*
1224 * Standard tree iteration won't work here, because as soon as we
1225 * decrement gctx->nlimbo and unlock gctx, another thread can
1226 * concurrently destroy it, which will corrupt the tree. Therefore,
1227 * tear down the tree one node at a time during iteration.
1228 */
1229 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1230 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001231 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001232 {
1233 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001234
Jason Evans20c31de2014-10-02 23:01:10 -07001235 next = NULL;
1236 do {
1237 prof_tctx_t *to_destroy =
1238 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001239 prof_tctx_finish_iter,
1240 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001241 if (to_destroy != NULL) {
1242 next = tctx_tree_next(&gctx->tctxs,
1243 to_destroy);
1244 tctx_tree_remove(&gctx->tctxs,
1245 to_destroy);
Jason Evans51a2ec92017-03-17 02:45:12 -07001246 idalloctm(tsd_tsdn(tsd), to_destroy,
Qi Wangbfa530b2017-04-07 14:12:30 -07001247 NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -08001248 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07001249 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001250 }
Jason Evans20c31de2014-10-02 23:01:10 -07001251 } while (next != NULL);
1252 }
1253 gctx->nlimbo--;
1254 if (prof_gctx_should_destroy(gctx)) {
1255 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001256 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001257 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001258 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001259 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08001260 }
Jason Evans20c31de2014-10-02 23:01:10 -07001261 }
Jason Evans602c8e02014-08-18 16:22:13 -07001262}
1263
Jason Evansb2c0d632016-04-13 23:36:15 -07001264struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001265 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001266 prof_cnt_t cnt_all;
1267};
Jason Evans602c8e02014-08-18 16:22:13 -07001268
Jason Evansb2c0d632016-04-13 23:36:15 -07001269static prof_tdata_t *
1270prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001271 void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001272 struct prof_tdata_merge_iter_arg_s *arg =
1273 (struct prof_tdata_merge_iter_arg_s *)opaque;
1274
Jason Evansc1e00ef2016-05-10 22:21:10 -07001275 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001276 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001277 size_t tabind;
1278 union {
1279 prof_tctx_t *p;
1280 void *v;
1281 } tctx;
1282
1283 tdata->dumping = true;
1284 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001285 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
Jason Evansc4c25922017-01-15 16:56:30 -08001286 &tctx.v);) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001287 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001288 }
Jason Evans602c8e02014-08-18 16:22:13 -07001289
Jason Evansb2c0d632016-04-13 23:36:15 -07001290 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1291 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001292 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001293 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1294 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001295 }
Jason Evansc4c25922017-01-15 16:56:30 -08001296 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07001297 tdata->dumping = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001298 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001299 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001300
Jason Evansf4086432017-01-19 18:15:45 -08001301 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001302}
1303
1304static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001305prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1306 void *arg) {
Jason Evans602c8e02014-08-18 16:22:13 -07001307 bool propagate_err = *(bool *)arg;
1308
Jason Evansc4c25922017-01-15 16:56:30 -08001309 if (!tdata->dumping) {
Jason Evansf4086432017-01-19 18:15:45 -08001310 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001311 }
Jason Evans602c8e02014-08-18 16:22:13 -07001312
1313 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001314 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001315 tdata->thr_uid, tdata->cnt_summed.curobjs,
1316 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1317 tdata->cnt_summed.accumbytes,
1318 (tdata->thread_name != NULL) ? " " : "",
Jason Evansc4c25922017-01-15 16:56:30 -08001319 (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
Jason Evansf4086432017-01-19 18:15:45 -08001320 return tdata;
Jason Evansc4c25922017-01-15 16:56:30 -08001321 }
Jason Evansf4086432017-01-19 18:15:45 -08001322 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001323}
1324
Jason Evans4f37ef62014-01-16 13:23:56 -08001325static bool
Jason Evansa268af52017-05-01 23:10:42 -07001326prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
1327 const prof_cnt_t *cnt_all) {
Jason Evans602c8e02014-08-18 16:22:13 -07001328 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001329
Jason Evans602c8e02014-08-18 16:22:13 -07001330 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001331 "heap_v2/%"FMTu64"\n"
1332 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001333 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001334 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001335 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001336 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001337
Jason Evansc1e00ef2016-05-10 22:21:10 -07001338 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001339 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1340 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001341 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08001342 return ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001343}
Jason Evansa268af52017-05-01 23:10:42 -07001344prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
Jason Evansa881cd22010-10-02 15:18:50 -07001345
Jason Evans22ca8552010-03-02 11:57:30 -08001346static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001347prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001348 const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001349 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001350 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001351 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001352
Jason Evans7372b152012-02-10 20:22:09 -08001353 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001354 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001355
Jason Evans602c8e02014-08-18 16:22:13 -07001356 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001357 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001358 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1359 assert(gctx->cnt_summed.curobjs == 0);
1360 assert(gctx->cnt_summed.curbytes == 0);
1361 assert(gctx->cnt_summed.accumobjs == 0);
1362 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001363 ret = false;
1364 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001365 }
1366
Jason Evans602c8e02014-08-18 16:22:13 -07001367 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001368 ret = true;
1369 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001370 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001371 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001372 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001373 (uintptr_t)bt->vec[i])) {
1374 ret = true;
1375 goto label_return;
1376 }
1377 }
Jason Evans22ca8552010-03-02 11:57:30 -08001378
Jason Evans602c8e02014-08-18 16:22:13 -07001379 if (prof_dump_printf(propagate_err,
1380 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001381 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001382 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1383 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1384 ret = true;
1385 goto label_return;
1386 }
1387
Jason Evansc1e00ef2016-05-10 22:21:10 -07001388 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001389 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001390 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001391 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001392 ret = true;
1393 goto label_return;
1394 }
1395
Jason Evans772163b2014-01-17 15:40:52 -08001396 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001397label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001398 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001399}
1400
Jason Evans788d29d2016-02-20 23:46:14 -08001401#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001402JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001403static int
Jason Evansc4c25922017-01-15 16:56:30 -08001404prof_open_maps(const char *format, ...) {
Jason Evans8e33c212015-05-01 09:03:20 -07001405 int mfd;
1406 va_list ap;
1407 char filename[PATH_MAX + 1];
1408
1409 va_start(ap, format);
1410 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1411 va_end(ap);
Y. T. Chung0975b882017-07-20 23:02:23 +08001412
1413#if defined(O_CLOEXEC)
Jason Evans10d090a2017-05-30 14:36:55 -07001414 mfd = open(filename, O_RDONLY | O_CLOEXEC);
Y. T. Chung0975b882017-07-20 23:02:23 +08001415#else
1416 mfd = open(filename, O_RDONLY);
Y. T. Chungaa6c2822017-07-21 21:40:29 +08001417 if (mfd != -1) {
1418 fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
1419 }
Y. T. Chung0975b882017-07-20 23:02:23 +08001420#endif
Jason Evans8e33c212015-05-01 09:03:20 -07001421
Jason Evansf4086432017-01-19 18:15:45 -08001422 return mfd;
Jason Evans8e33c212015-05-01 09:03:20 -07001423}
Jason Evans788d29d2016-02-20 23:46:14 -08001424#endif
1425
1426static int
Jason Evansc4c25922017-01-15 16:56:30 -08001427prof_getpid(void) {
Jason Evans788d29d2016-02-20 23:46:14 -08001428#ifdef _WIN32
Jason Evansf4086432017-01-19 18:15:45 -08001429 return GetCurrentProcessId();
Jason Evans788d29d2016-02-20 23:46:14 -08001430#else
Jason Evansf4086432017-01-19 18:15:45 -08001431 return getpid();
Jason Evans788d29d2016-02-20 23:46:14 -08001432#endif
1433}
Jason Evans8e33c212015-05-01 09:03:20 -07001434
Jason Evans22ca8552010-03-02 11:57:30 -08001435static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001436prof_dump_maps(bool propagate_err) {
Jason Evans93f39f82013-10-21 15:07:40 -07001437 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001438 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001439
Jason Evans7372b152012-02-10 20:22:09 -08001440 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001441#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001442 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001443#elif defined(_WIN32)
1444 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001445#else
Jason Evans8e33c212015-05-01 09:03:20 -07001446 {
Jason Evans788d29d2016-02-20 23:46:14 -08001447 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001448
1449 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001450 if (mfd == -1) {
Jason Evans8e33c212015-05-01 09:03:20 -07001451 mfd = prof_open_maps("/proc/%d/maps", pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001452 }
Jason Evans8e33c212015-05-01 09:03:20 -07001453 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001454#endif
Jason Evansc7177182010-02-11 09:25:56 -08001455 if (mfd != -1) {
1456 ssize_t nread;
1457
Jason Evans4f37ef62014-01-16 13:23:56 -08001458 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001459 propagate_err) {
1460 ret = true;
1461 goto label_return;
1462 }
Jason Evansc7177182010-02-11 09:25:56 -08001463 nread = 0;
1464 do {
1465 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001466 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001467 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001468 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001469 propagate_err) {
1470 ret = true;
1471 goto label_return;
1472 }
Jason Evansc7177182010-02-11 09:25:56 -08001473 }
Qi Wangd3e09762018-04-06 11:40:44 -07001474 nread = malloc_read_fd(mfd,
1475 &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
1476 - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001477 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001478 } else {
1479 ret = true;
1480 goto label_return;
1481 }
Jason Evans22ca8552010-03-02 11:57:30 -08001482
Jason Evans93f39f82013-10-21 15:07:40 -07001483 ret = false;
1484label_return:
Jason Evansc4c25922017-01-15 16:56:30 -08001485 if (mfd != -1) {
Jason Evans93f39f82013-10-21 15:07:40 -07001486 close(mfd);
Jason Evansc4c25922017-01-15 16:56:30 -08001487 }
Jason Evansf4086432017-01-19 18:15:45 -08001488 return ret;
Jason Evansc7177182010-02-11 09:25:56 -08001489}
1490
Jason Evansdc391ad2016-05-04 12:14:36 -07001491/*
1492 * See prof_sample_threshold_update() comment for why the body of this function
1493 * is conditionally compiled.
1494 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001495static void
Jason Evans602c8e02014-08-18 16:22:13 -07001496prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001497 const char *filename) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001498#ifdef JEMALLOC_PROF
1499 /*
1500 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1501 * differ slightly from what jeprof reports, because here we scale the
1502 * summary values, whereas jeprof scales each context individually and
1503 * reports the sums of the scaled values.
1504 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001505 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001506 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1507 double ratio = (((double)cnt_all->curbytes) /
1508 (double)cnt_all->curobjs) / sample_period;
1509 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1510 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1511 * scale_factor);
1512 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1513 scale_factor);
1514
1515 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1516 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1517 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1518 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001519 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001520 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001521 filename);
1522 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001523#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001524}
1525
Jason Evansb2c0d632016-04-13 23:36:15 -07001526struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001527 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001528 bool propagate_err;
1529};
1530
Jason Evans602c8e02014-08-18 16:22:13 -07001531static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001532prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evans602c8e02014-08-18 16:22:13 -07001533 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001534 struct prof_gctx_dump_iter_arg_s *arg =
1535 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001536
Jason Evansc1e00ef2016-05-10 22:21:10 -07001537 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001538
Jason Evansc1e00ef2016-05-10 22:21:10 -07001539 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001540 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001541 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001542 goto label_return;
1543 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001544
Jason Evans602c8e02014-08-18 16:22:13 -07001545 ret = NULL;
1546label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001547 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evansf4086432017-01-19 18:15:45 -08001548 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001549}
1550
Jason Evans1ff09532017-01-16 11:09:24 -08001551static void
1552prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
1553 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1554 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001555 prof_gctx_tree_t *gctxs) {
Jason Evans6109fe02010-02-10 10:37:56 -08001556 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001557 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001558 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001559 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001560 } gctx;
Jason Evans6109fe02010-02-10 10:37:56 -08001561
Jason Evansc93ed812014-10-30 16:50:33 -07001562 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001563
Jason Evans602c8e02014-08-18 16:22:13 -07001564 /*
1565 * Put gctx's in limbo and clear their counters in preparation for
1566 * summing.
1567 */
Jason Evans1ff09532017-01-16 11:09:24 -08001568 gctx_tree_new(gctxs);
1569 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
1570 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
1571 }
Jason Evans602c8e02014-08-18 16:22:13 -07001572
1573 /*
1574 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1575 * stats and merge them into the associated gctx's.
1576 */
Jason Evans1ff09532017-01-16 11:09:24 -08001577 prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1578 memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001579 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001580 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
Jason Evans1ff09532017-01-16 11:09:24 -08001581 (void *)prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001582 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001583
1584 /* Merge tctx stats into gctx's. */
Jason Evans1ff09532017-01-16 11:09:24 -08001585 prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1586 prof_gctx_merge_iter_arg->leak_ngctx = 0;
1587 gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
1588 (void *)prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001589
Jason Evansc93ed812014-10-30 16:50:33 -07001590 prof_leave(tsd, tdata);
Jason Evans1ff09532017-01-16 11:09:24 -08001591}
Jason Evans4f37ef62014-01-16 13:23:56 -08001592
Jason Evans1ff09532017-01-16 11:09:24 -08001593static bool
1594prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
1595 bool leakcheck, prof_tdata_t *tdata,
1596 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1597 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1598 struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001599 prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001600 /* Create dump file. */
Jason Evans1ff09532017-01-16 11:09:24 -08001601 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
1602 return true;
1603 }
Jason Evans6109fe02010-02-10 10:37:56 -08001604
1605 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001606 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evans1ff09532017-01-16 11:09:24 -08001607 &prof_tdata_merge_iter_arg->cnt_all)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001608 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001609 }
Jason Evans6109fe02010-02-10 10:37:56 -08001610
Jason Evans602c8e02014-08-18 16:22:13 -07001611 /* Dump per gctx profile stats. */
Jason Evans1ff09532017-01-16 11:09:24 -08001612 prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
1613 prof_gctx_dump_iter_arg->propagate_err = propagate_err;
1614 if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
1615 (void *)prof_gctx_dump_iter_arg) != NULL) {
Jason Evans3a81cbd2014-08-16 12:58:55 -07001616 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001617 }
Jason Evans6109fe02010-02-10 10:37:56 -08001618
Jason Evansc7177182010-02-11 09:25:56 -08001619 /* Dump /proc/<pid>/maps if possible. */
Jason Evans1ff09532017-01-16 11:09:24 -08001620 if (prof_dump_maps(propagate_err)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001621 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001622 }
Jason Evansc7177182010-02-11 09:25:56 -08001623
Jason Evans1ff09532017-01-16 11:09:24 -08001624 if (prof_dump_close(propagate_err)) {
1625 return true;
1626 }
Jason Evans6109fe02010-02-10 10:37:56 -08001627
Jason Evans1ff09532017-01-16 11:09:24 -08001628 return false;
1629label_write_error:
1630 prof_dump_close(propagate_err);
1631 return true;
1632}
1633
1634static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001635prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
1636 bool leakcheck) {
Jason Evans1ff09532017-01-16 11:09:24 -08001637 cassert(config_prof);
David Goldblatt209f2922017-04-26 18:37:44 -07001638 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans1ff09532017-01-16 11:09:24 -08001639
Qi Wang05775a32017-04-24 18:14:57 -07001640 prof_tdata_t * tdata = prof_tdata_get(tsd, true);
Jason Evans1ff09532017-01-16 11:09:24 -08001641 if (tdata == NULL) {
1642 return true;
1643 }
1644
Qi Wang425463a2017-06-22 16:18:30 -07001645 pre_reentrancy(tsd, NULL);
Jason Evans1ff09532017-01-16 11:09:24 -08001646 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1647
Qi Wang05775a32017-04-24 18:14:57 -07001648 prof_gctx_tree_t gctxs;
1649 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1650 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1651 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
Jason Evans1ff09532017-01-16 11:09:24 -08001652 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1653 &prof_gctx_merge_iter_arg, &gctxs);
Qi Wang05775a32017-04-24 18:14:57 -07001654 bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
Jason Evans1ff09532017-01-16 11:09:24 -08001655 &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
1656 &prof_gctx_dump_iter_arg, &gctxs);
Jason Evans20c31de2014-10-02 23:01:10 -07001657 prof_gctx_finish(tsd, &gctxs);
Jason Evans1ff09532017-01-16 11:09:24 -08001658
Jason Evansc1e00ef2016-05-10 22:21:10 -07001659 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Qi Wang05775a32017-04-24 18:14:57 -07001660 post_reentrancy(tsd);
Jason Evans4f37ef62014-01-16 13:23:56 -08001661
Jason Evans1ff09532017-01-16 11:09:24 -08001662 if (err) {
1663 return true;
1664 }
1665
Jason Evansb2c0d632016-04-13 23:36:15 -07001666 if (leakcheck) {
1667 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1668 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1669 }
Jason Evans1ff09532017-01-16 11:09:24 -08001670 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001671}
1672
Jason Evans1ff09532017-01-16 11:09:24 -08001673#ifdef JEMALLOC_JET
1674void
1675prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001676 uint64_t *accumbytes) {
Jason Evans1ff09532017-01-16 11:09:24 -08001677 tsd_t *tsd;
1678 prof_tdata_t *tdata;
1679 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1680 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1681 prof_gctx_tree_t gctxs;
1682
1683 tsd = tsd_fetch();
1684 tdata = prof_tdata_get(tsd, false);
1685 if (tdata == NULL) {
1686 if (curobjs != NULL) {
1687 *curobjs = 0;
1688 }
1689 if (curbytes != NULL) {
1690 *curbytes = 0;
1691 }
1692 if (accumobjs != NULL) {
1693 *accumobjs = 0;
1694 }
1695 if (accumbytes != NULL) {
1696 *accumbytes = 0;
1697 }
1698 return;
1699 }
1700
1701 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1702 &prof_gctx_merge_iter_arg, &gctxs);
1703 prof_gctx_finish(tsd, &gctxs);
1704
1705 if (curobjs != NULL) {
1706 *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
1707 }
1708 if (curbytes != NULL) {
1709 *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
1710 }
1711 if (accumobjs != NULL) {
1712 *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
1713 }
1714 if (accumbytes != NULL) {
1715 *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
1716 }
1717}
1718#endif
1719
Jason Evansc0cc5db2017-01-19 21:41:41 -08001720#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1721#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001722static void
Jason Evansc4c25922017-01-15 16:56:30 -08001723prof_dump_filename(char *filename, char v, uint64_t vseq) {
Jason Evans7372b152012-02-10 20:22:09 -08001724 cassert(config_prof);
1725
Jason Evans4f37ef62014-01-16 13:23:56 -08001726 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001727 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1728 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001729 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001730 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001731 } else {
1732 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1733 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001734 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001735 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001736 }
Jason Evans52386b22012-04-22 16:00:11 -07001737 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001738}
1739
1740static void
Jason Evansc4c25922017-01-15 16:56:30 -08001741prof_fdump(void) {
Jason Evans5460aa62014-09-22 21:09:23 -07001742 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001743 char filename[DUMP_FILENAME_BUFSIZE];
1744
Jason Evans7372b152012-02-10 20:22:09 -08001745 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001746 assert(opt_prof_final);
1747 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001748
Jason Evansc4c25922017-01-15 16:56:30 -08001749 if (!prof_booted) {
Jason Evans6109fe02010-02-10 10:37:56 -08001750 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001751 }
Jason Evans029d44c2014-10-04 11:12:53 -07001752 tsd = tsd_fetch();
David Goldblatt209f2922017-04-26 18:37:44 -07001753 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans6109fe02010-02-10 10:37:56 -08001754
Jason Evansc1e00ef2016-05-10 22:21:10 -07001755 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001756 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001757 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001758 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001759}
1760
Jason Evansfa2d64c2017-02-12 17:03:46 -08001761bool
1762prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
1763 cassert(config_prof);
1764
1765#ifndef JEMALLOC_ATOMIC_U64
1766 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
David Goldblatt26c792e2017-05-15 15:38:15 -07001767 WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
Jason Evansfa2d64c2017-02-12 17:03:46 -08001768 return true;
1769 }
Jason Evansfa2d64c2017-02-12 17:03:46 -08001770 prof_accum->accumbytes = 0;
David Goldblatt30d74db2017-04-04 18:08:58 -07001771#else
1772 atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
1773#endif
Jason Evansfa2d64c2017-02-12 17:03:46 -08001774 return false;
1775}
1776
Jason Evans6109fe02010-02-10 10:37:56 -08001777void
Jason Evansc4c25922017-01-15 16:56:30 -08001778prof_idump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001779 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001780 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001781
Jason Evans7372b152012-02-10 20:22:09 -08001782 cassert(config_prof);
1783
Qi Wang2dccf452018-04-06 13:45:37 -07001784 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
Jason Evans6109fe02010-02-10 10:37:56 -08001785 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001786 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001787 tsd = tsdn_tsd(tsdn);
David Goldblatt209f2922017-04-26 18:37:44 -07001788 if (tsd_reentrancy_level_get(tsd) > 0) {
Qi Wang05775a32017-04-24 18:14:57 -07001789 return;
1790 }
1791
Jason Evans5460aa62014-09-22 21:09:23 -07001792 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001793 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001794 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001795 }
Jason Evans602c8e02014-08-18 16:22:13 -07001796 if (tdata->enq) {
1797 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001798 return;
1799 }
Jason Evans6109fe02010-02-10 10:37:56 -08001800
Jason Evanse7339702010-10-23 18:37:06 -07001801 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001802 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001803 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001804 prof_dump_filename(filename, 'i', prof_dump_iseq);
1805 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001806 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001807 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001808 }
Jason Evans6109fe02010-02-10 10:37:56 -08001809}
1810
Jason Evans22ca8552010-03-02 11:57:30 -08001811bool
Jason Evansc4c25922017-01-15 16:56:30 -08001812prof_mdump(tsd_t *tsd, const char *filename) {
Jason Evans7372b152012-02-10 20:22:09 -08001813 cassert(config_prof);
David Goldblatt209f2922017-04-26 18:37:44 -07001814 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans7372b152012-02-10 20:22:09 -08001815
Jason Evansc4c25922017-01-15 16:56:30 -08001816 if (!opt_prof || !prof_booted) {
Jason Evansf4086432017-01-19 18:15:45 -08001817 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001818 }
Qi Wang05775a32017-04-24 18:14:57 -07001819 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans22ca8552010-03-02 11:57:30 -08001820 if (filename == NULL) {
1821 /* No filename specified, so automatically generate one. */
Jason Evansc4c25922017-01-15 16:56:30 -08001822 if (opt_prof_prefix[0] == '\0') {
Jason Evansf4086432017-01-19 18:15:45 -08001823 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001824 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001825 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001826 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1827 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001828 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001829 filename = filename_buf;
1830 }
Jason Evansf4086432017-01-19 18:15:45 -08001831 return prof_dump(tsd, true, filename, false);
Jason Evans6109fe02010-02-10 10:37:56 -08001832}
1833
1834void
Jason Evansc4c25922017-01-15 16:56:30 -08001835prof_gdump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001836 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001837 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001838
Jason Evans7372b152012-02-10 20:22:09 -08001839 cassert(config_prof);
1840
Qi Wang2dccf452018-04-06 13:45:37 -07001841 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
Jason Evans6109fe02010-02-10 10:37:56 -08001842 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001843 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001844 tsd = tsdn_tsd(tsdn);
David Goldblatt209f2922017-04-26 18:37:44 -07001845 if (tsd_reentrancy_level_get(tsd) > 0) {
Qi Wang05775a32017-04-24 18:14:57 -07001846 return;
1847 }
1848
Jason Evans5460aa62014-09-22 21:09:23 -07001849 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001850 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001851 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001852 }
Jason Evans602c8e02014-08-18 16:22:13 -07001853 if (tdata->enq) {
1854 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001855 return;
1856 }
Jason Evans6109fe02010-02-10 10:37:56 -08001857
Jason Evanse7339702010-10-23 18:37:06 -07001858 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001859 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001860 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001861 prof_dump_filename(filename, 'u', prof_dump_useq);
1862 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001863 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001864 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001865 }
Jason Evans6109fe02010-02-10 10:37:56 -08001866}
1867
1868static void
Jason Evansc4c25922017-01-15 16:56:30 -08001869prof_bt_hash(const void *key, size_t r_hash[2]) {
Jason Evans6109fe02010-02-10 10:37:56 -08001870 prof_bt_t *bt = (prof_bt_t *)key;
1871
Jason Evans7372b152012-02-10 20:22:09 -08001872 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001873
Jason Evansae03bf62013-01-22 12:02:08 -08001874 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001875}
1876
1877static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001878prof_bt_keycomp(const void *k1, const void *k2) {
Jason Evans6109fe02010-02-10 10:37:56 -08001879 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1880 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1881
Jason Evans7372b152012-02-10 20:22:09 -08001882 cassert(config_prof);
1883
Jason Evansc4c25922017-01-15 16:56:30 -08001884 if (bt1->len != bt2->len) {
Jason Evansf4086432017-01-19 18:15:45 -08001885 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001886 }
Jason Evans6109fe02010-02-10 10:37:56 -08001887 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1888}
1889
David Goldblatt4d2e4bf2017-04-21 09:37:34 -07001890static uint64_t
Jason Evansc4c25922017-01-15 16:56:30 -08001891prof_thr_uid_alloc(tsdn_t *tsdn) {
Jason Evans9d8f3d22014-09-11 18:06:30 -07001892 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001893
Jason Evansc1e00ef2016-05-10 22:21:10 -07001894 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001895 thr_uid = next_thr_uid;
1896 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001897 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001898
Jason Evansf4086432017-01-19 18:15:45 -08001899 return thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001900}
1901
1902static prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001903prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansc4c25922017-01-15 16:56:30 -08001904 char *thread_name, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07001905 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001906
Jason Evans7372b152012-02-10 20:22:09 -08001907 cassert(config_prof);
1908
Jason Evans4d6a1342010-10-20 19:05:59 -07001909 /* Initialize an empty cache for this thread. */
Jason Evansb54d1602016-10-20 23:59:12 -07001910 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
David Goldblatt8261e582017-05-30 10:45:37 -07001911 sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001912 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08001913 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08001914 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001915 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001916
Jason Evans602c8e02014-08-18 16:22:13 -07001917 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1918 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001919 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001920 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001921 tdata->attached = true;
1922 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001923 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001924
Jason Evansb54d1602016-10-20 23:59:12 -07001925 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
1926 prof_bt_keycomp)) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001927 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -08001928 return NULL;
Jason Evans4d6a1342010-10-20 19:05:59 -07001929 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001930
Jason Evans602c8e02014-08-18 16:22:13 -07001931 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1932 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001933
Jason Evans602c8e02014-08-18 16:22:13 -07001934 tdata->enq = false;
1935 tdata->enq_idump = false;
1936 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001937
Jason Evans602c8e02014-08-18 16:22:13 -07001938 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001939 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001940
Jason Evansb54d1602016-10-20 23:59:12 -07001941 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001942 tdata_tree_insert(&tdatas, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001943 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001944
Jason Evansf4086432017-01-19 18:15:45 -08001945 return tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001946}
1947
1948prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001949prof_tdata_init(tsd_t *tsd) {
Jason Evansf4086432017-01-19 18:15:45 -08001950 return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
1951 NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
Jason Evans602c8e02014-08-18 16:22:13 -07001952}
1953
Jason Evans602c8e02014-08-18 16:22:13 -07001954static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001955prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
1956 if (tdata->attached && !even_if_attached) {
Jason Evansf4086432017-01-19 18:15:45 -08001957 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001958 }
1959 if (ckh_count(&tdata->bt2tctx) != 0) {
Jason Evansf4086432017-01-19 18:15:45 -08001960 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001961 }
Jason Evansf4086432017-01-19 18:15:45 -08001962 return true;
Jason Evans602c8e02014-08-18 16:22:13 -07001963}
1964
Jason Evansb2c0d632016-04-13 23:36:15 -07001965static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001966prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001967 bool even_if_attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001968 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001969
Jason Evansf4086432017-01-19 18:15:45 -08001970 return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
Jason Evansb2c0d632016-04-13 23:36:15 -07001971}
1972
Jason Evans602c8e02014-08-18 16:22:13 -07001973static void
Jason Evansb54d1602016-10-20 23:59:12 -07001974prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001975 bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001976 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001977
Jason Evans602c8e02014-08-18 16:22:13 -07001978 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001979
Jason Evansc1e00ef2016-05-10 22:21:10 -07001980 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001981
Jason Evansdb722722016-03-23 20:29:33 -07001982 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001983 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
1984 true);
Jason Evansdb722722016-03-23 20:29:33 -07001985 }
Jason Evansb54d1602016-10-20 23:59:12 -07001986 ckh_delete(tsd, &tdata->bt2tctx);
Qi Wangbfa530b2017-04-07 14:12:30 -07001987 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001988}
1989
1990static void
Jason Evansc4c25922017-01-15 16:56:30 -08001991prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001992 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1993 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1994 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001995}
1996
1997static void
Jason Evansc4c25922017-01-15 16:56:30 -08001998prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans602c8e02014-08-18 16:22:13 -07001999 bool destroy_tdata;
2000
Jason Evansc1e00ef2016-05-10 22:21:10 -07002001 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002002 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002003 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
2004 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07002005 /*
2006 * Only detach if !destroy_tdata, because detaching would allow
2007 * another thread to win the race to destroy tdata.
2008 */
Jason Evansc4c25922017-01-15 16:56:30 -08002009 if (!destroy_tdata) {
Jason Evansf04a0be2014-10-04 15:03:49 -07002010 tdata->attached = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002011 }
Jason Evans029d44c2014-10-04 11:12:53 -07002012 tsd_prof_tdata_set(tsd, NULL);
Jason Evansc4c25922017-01-15 16:56:30 -08002013 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07002014 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002015 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002016 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08002017 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -07002018 prof_tdata_destroy(tsd, tdata, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002019 }
Jason Evans602c8e02014-08-18 16:22:13 -07002020}
2021
Jason Evans20c31de2014-10-02 23:01:10 -07002022prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002023prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002024 uint64_t thr_uid = tdata->thr_uid;
2025 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002026 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07002027 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002028 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002029
Jason Evans20c31de2014-10-02 23:01:10 -07002030 prof_tdata_detach(tsd, tdata);
Jason Evansf4086432017-01-19 18:15:45 -08002031 return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
2032 active);
Jason Evans602c8e02014-08-18 16:22:13 -07002033}
2034
Jason Evans20c31de2014-10-02 23:01:10 -07002035static bool
Jason Evansc4c25922017-01-15 16:56:30 -08002036prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002037 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002038
Jason Evansc1e00ef2016-05-10 22:21:10 -07002039 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002040 if (!tdata->expired) {
2041 tdata->expired = true;
2042 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07002043 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002044 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002045 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002046 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002047 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002048
Jason Evansf4086432017-01-19 18:15:45 -08002049 return destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002050}
2051
2052static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002053prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
2054 void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002055 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07002056
Jason Evansc1e00ef2016-05-10 22:21:10 -07002057 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002058}
2059
2060void
Jason Evansc4c25922017-01-15 16:56:30 -08002061prof_reset(tsd_t *tsd, size_t lg_sample) {
Jason Evans20c31de2014-10-02 23:01:10 -07002062 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07002063
2064 assert(lg_sample < (sizeof(uint64_t) << 3));
2065
Jason Evansb54d1602016-10-20 23:59:12 -07002066 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
2067 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002068
2069 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07002070
2071 next = NULL;
2072 do {
2073 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansb54d1602016-10-20 23:59:12 -07002074 prof_tdata_reset_iter, (void *)tsd);
Jason Evans20c31de2014-10-02 23:01:10 -07002075 if (to_destroy != NULL) {
2076 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansb54d1602016-10-20 23:59:12 -07002077 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002078 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002079 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002080 }
Jason Evans20c31de2014-10-02 23:01:10 -07002081 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002082
Jason Evansb54d1602016-10-20 23:59:12 -07002083 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2084 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07002085}
2086
Jason Evanscd9a1342012-03-21 18:33:03 -07002087void
Jason Evansc4c25922017-01-15 16:56:30 -08002088prof_tdata_cleanup(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002089 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07002090
Jason Evansc4c25922017-01-15 16:56:30 -08002091 if (!config_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002092 return;
Jason Evansc4c25922017-01-15 16:56:30 -08002093 }
Jason Evans7372b152012-02-10 20:22:09 -08002094
Jason Evans5460aa62014-09-22 21:09:23 -07002095 tdata = tsd_prof_tdata_get(tsd);
Jason Evansc4c25922017-01-15 16:56:30 -08002096 if (tdata != NULL) {
Jason Evans5460aa62014-09-22 21:09:23 -07002097 prof_tdata_detach(tsd, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08002098 }
Jason Evans6109fe02010-02-10 10:37:56 -08002099}
2100
Jason Evansfc12c0b2014-10-03 23:25:30 -07002101bool
Jason Evansc4c25922017-01-15 16:56:30 -08002102prof_active_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002103 bool prof_active_current;
2104
Jason Evansc1e00ef2016-05-10 22:21:10 -07002105 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002106 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002107 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002108 return prof_active_current;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002109}
2110
2111bool
Jason Evansc4c25922017-01-15 16:56:30 -08002112prof_active_set(tsdn_t *tsdn, bool active) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002113 bool prof_active_old;
2114
Jason Evansc1e00ef2016-05-10 22:21:10 -07002115 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002116 prof_active_old = prof_active;
2117 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002118 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002119 return prof_active_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002120}
2121
Jason Evans602c8e02014-08-18 16:22:13 -07002122const char *
Jason Evansc4c25922017-01-15 16:56:30 -08002123prof_thread_name_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002124 prof_tdata_t *tdata;
2125
Jason Evans5460aa62014-09-22 21:09:23 -07002126 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002127 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002128 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002129 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002130 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002131}
2132
Jason Evansfc12c0b2014-10-03 23:25:30 -07002133static char *
Jason Evansc4c25922017-01-15 16:56:30 -08002134prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002135 char *ret;
2136 size_t size;
2137
Jason Evansc4c25922017-01-15 16:56:30 -08002138 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002139 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002140 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002141
2142 size = strlen(thread_name) + 1;
Jason Evansc4c25922017-01-15 16:56:30 -08002143 if (size == 1) {
Jason Evansf4086432017-01-19 18:15:45 -08002144 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002145 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002146
David Goldblatt8261e582017-05-30 10:45:37 -07002147 ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
Jason Evansc1e00ef2016-05-10 22:21:10 -07002148 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08002149 if (ret == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002150 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002151 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002152 memcpy(ret, thread_name, size);
Jason Evansf4086432017-01-19 18:15:45 -08002153 return ret;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002154}
2155
2156int
Jason Evansc4c25922017-01-15 16:56:30 -08002157prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
Jason Evans602c8e02014-08-18 16:22:13 -07002158 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002159 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002160 char *s;
2161
Jason Evans5460aa62014-09-22 21:09:23 -07002162 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002163 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002164 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002165 }
Jason Evans602c8e02014-08-18 16:22:13 -07002166
Jason Evansfc12c0b2014-10-03 23:25:30 -07002167 /* Validate input. */
Jason Evansc4c25922017-01-15 16:56:30 -08002168 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002169 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002170 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002171 for (i = 0; thread_name[i] != '\0'; i++) {
2172 char c = thread_name[i];
Jason Evansc4c25922017-01-15 16:56:30 -08002173 if (!isgraph(c) && !isblank(c)) {
Jason Evansf4086432017-01-19 18:15:45 -08002174 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002175 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002176 }
2177
Jason Evansc1e00ef2016-05-10 22:21:10 -07002178 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evansc4c25922017-01-15 16:56:30 -08002179 if (s == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002180 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002181 }
Jason Evans602c8e02014-08-18 16:22:13 -07002182
Jason Evansfc12c0b2014-10-03 23:25:30 -07002183 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07002184 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
2185 true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002186 tdata->thread_name = NULL;
2187 }
Jason Evansc4c25922017-01-15 16:56:30 -08002188 if (strlen(s) > 0) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002189 tdata->thread_name = s;
Jason Evansc4c25922017-01-15 16:56:30 -08002190 }
Jason Evansf4086432017-01-19 18:15:45 -08002191 return 0;
Jason Evans602c8e02014-08-18 16:22:13 -07002192}
2193
2194bool
Jason Evansc4c25922017-01-15 16:56:30 -08002195prof_thread_active_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002196 prof_tdata_t *tdata;
2197
Jason Evans5460aa62014-09-22 21:09:23 -07002198 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002199 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002200 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08002201 }
Jason Evansf4086432017-01-19 18:15:45 -08002202 return tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002203}
2204
2205bool
Jason Evansc4c25922017-01-15 16:56:30 -08002206prof_thread_active_set(tsd_t *tsd, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07002207 prof_tdata_t *tdata;
2208
Jason Evans5460aa62014-09-22 21:09:23 -07002209 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002210 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002211 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002212 }
Jason Evans602c8e02014-08-18 16:22:13 -07002213 tdata->active = active;
Jason Evansf4086432017-01-19 18:15:45 -08002214 return false;
Jason Evans602c8e02014-08-18 16:22:13 -07002215}
2216
Jason Evansfc12c0b2014-10-03 23:25:30 -07002217bool
Jason Evansc4c25922017-01-15 16:56:30 -08002218prof_thread_active_init_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002219 bool active_init;
2220
Jason Evansc1e00ef2016-05-10 22:21:10 -07002221 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002222 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002223 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002224 return active_init;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002225}
2226
2227bool
Jason Evansc4c25922017-01-15 16:56:30 -08002228prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002229 bool active_init_old;
2230
Jason Evansc1e00ef2016-05-10 22:21:10 -07002231 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002232 active_init_old = prof_thread_active_init;
2233 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002234 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002235 return active_init_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002236}
2237
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002238bool
Jason Evansc4c25922017-01-15 16:56:30 -08002239prof_gdump_get(tsdn_t *tsdn) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002240 bool prof_gdump_current;
2241
Jason Evansc1e00ef2016-05-10 22:21:10 -07002242 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002243 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002244 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002245 return prof_gdump_current;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002246}
2247
2248bool
Jason Evansc4c25922017-01-15 16:56:30 -08002249prof_gdump_set(tsdn_t *tsdn, bool gdump) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002250 bool prof_gdump_old;
2251
Jason Evansc1e00ef2016-05-10 22:21:10 -07002252 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002253 prof_gdump_old = prof_gdump_val;
2254 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002255 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002256 return prof_gdump_old;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002257}
2258
Jason Evans6109fe02010-02-10 10:37:56 -08002259void
Jason Evansc4c25922017-01-15 16:56:30 -08002260prof_boot0(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002261 cassert(config_prof);
2262
Jason Evanse7339702010-10-23 18:37:06 -07002263 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2264 sizeof(PROF_PREFIX_DEFAULT));
2265}
2266
2267void
Jason Evansc4c25922017-01-15 16:56:30 -08002268prof_boot1(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002269 cassert(config_prof);
2270
Jason Evans6109fe02010-02-10 10:37:56 -08002271 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002272 * opt_prof must be in its final state before any arenas are
2273 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002274 */
2275
Jason Evans551ebc42014-10-03 10:16:09 -07002276 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002277 /*
2278 * Enable opt_prof, but in such a way that profiles are never
2279 * automatically dumped.
2280 */
2281 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002282 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002283 } else if (opt_prof) {
2284 if (opt_lg_prof_interval >= 0) {
2285 prof_interval = (((uint64_t)1U) <<
2286 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002287 }
Jason Evansa02fc082010-03-31 17:35:51 -07002288 }
Jason Evans6109fe02010-02-10 10:37:56 -08002289}
2290
2291bool
Jason Evansc4c25922017-01-15 16:56:30 -08002292prof_boot2(tsd_t *tsd) {
Jason Evans7372b152012-02-10 20:22:09 -08002293 cassert(config_prof);
2294
Jason Evans6109fe02010-02-10 10:37:56 -08002295 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002296 unsigned i;
2297
Jason Evans602c8e02014-08-18 16:22:13 -07002298 lg_prof_sample = opt_lg_prof_sample;
2299
Jason Evansfc12c0b2014-10-03 23:25:30 -07002300 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002301 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
David Goldblatt26c792e2017-05-15 15:38:15 -07002302 WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002303 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002304 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002305
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002306 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002307 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
David Goldblatt26c792e2017-05-15 15:38:15 -07002308 WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002309 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002310 }
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002311
Jason Evansfc12c0b2014-10-03 23:25:30 -07002312 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002313 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2314 "prof_thread_active_init",
David Goldblatt26c792e2017-05-15 15:38:15 -07002315 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
2316 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002317 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002318 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002319
Jason Evansb54d1602016-10-20 23:59:12 -07002320 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evansc4c25922017-01-15 16:56:30 -08002321 prof_bt_keycomp)) {
Jason Evansf4086432017-01-19 18:15:45 -08002322 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002323 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002324 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
David Goldblatt26c792e2017-05-15 15:38:15 -07002325 WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002326 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002327 }
Jason Evans6109fe02010-02-10 10:37:56 -08002328
Jason Evans602c8e02014-08-18 16:22:13 -07002329 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002330 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
David Goldblatt26c792e2017-05-15 15:38:15 -07002331 WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002332 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002333 }
Jason Evans602c8e02014-08-18 16:22:13 -07002334
2335 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002336 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
David Goldblatt26c792e2017-05-15 15:38:15 -07002337 WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002338 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002339 }
Jason Evans602c8e02014-08-18 16:22:13 -07002340
Jason Evansb2c0d632016-04-13 23:36:15 -07002341 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
David Goldblatt26c792e2017-05-15 15:38:15 -07002342 WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002343 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002344 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002345 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
David Goldblatt26c792e2017-05-15 15:38:15 -07002346 WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002347 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002348 }
Jason Evans6109fe02010-02-10 10:37:56 -08002349
Jason Evans57efa7b2014-10-08 17:57:19 -07002350 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2351 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002352 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansc4c25922017-01-15 16:56:30 -08002353 if (opt_abort) {
Jason Evans6109fe02010-02-10 10:37:56 -08002354 abort();
Jason Evansc4c25922017-01-15 16:56:30 -08002355 }
Jason Evans6109fe02010-02-10 10:37:56 -08002356 }
Jason Evans6da54182012-03-23 18:05:51 -07002357
Jason Evansb54d1602016-10-20 23:59:12 -07002358 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002359 b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
2360 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002361 if (gctx_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002362 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002363 }
Jason Evans6da54182012-03-23 18:05:51 -07002364 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002365 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
David Goldblatt26c792e2017-05-15 15:38:15 -07002366 WITNESS_RANK_PROF_GCTX,
2367 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002368 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002369 }
Jason Evans602c8e02014-08-18 16:22:13 -07002370 }
2371
Jason Evansb54d1602016-10-20 23:59:12 -07002372 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002373 b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
2374 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002375 if (tdata_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002376 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002377 }
Jason Evans602c8e02014-08-18 16:22:13 -07002378 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002379 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
David Goldblatt26c792e2017-05-15 15:38:15 -07002380 WITNESS_RANK_PROF_TDATA,
2381 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002382 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002383 }
Jason Evans6da54182012-03-23 18:05:51 -07002384 }
Jason Evans6109fe02010-02-10 10:37:56 -08002385 }
2386
Jason Evansb27805b2010-02-10 18:15:53 -08002387#ifdef JEMALLOC_PROF_LIBGCC
2388 /*
2389 * Cause the backtracing machinery to allocate its internal state
2390 * before enabling profiling.
2391 */
2392 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2393#endif
2394
Jason Evans6109fe02010-02-10 10:37:56 -08002395 prof_booted = true;
2396
Jason Evansf4086432017-01-19 18:15:45 -08002397 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08002398}
2399
Jason Evans20f1fc92012-10-09 14:46:22 -07002400void
Jason Evansc4c25922017-01-15 16:56:30 -08002401prof_prefork0(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002402 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002403 unsigned i;
2404
Jason Evansc1e00ef2016-05-10 22:21:10 -07002405 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2406 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2407 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002408 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002409 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002410 }
2411 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002412 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002413 }
Jason Evans174c0c32016-04-25 23:14:40 -07002414 }
2415}
2416
2417void
Jason Evansc4c25922017-01-15 16:56:30 -08002418prof_prefork1(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002419 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002420 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2421 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2422 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2423 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2424 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002425 }
2426}
2427
2428void
Jason Evansc4c25922017-01-15 16:56:30 -08002429prof_postfork_parent(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002430 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002431 unsigned i;
2432
Jason Evansc1e00ef2016-05-10 22:21:10 -07002433 malloc_mutex_postfork_parent(tsdn,
2434 &prof_thread_active_init_mtx);
2435 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2436 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2437 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2438 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002439 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002440 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002441 }
2442 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002443 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002444 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002445 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2446 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2447 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002448 }
2449}
2450
2451void
Jason Evansc4c25922017-01-15 16:56:30 -08002452prof_postfork_child(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002453 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002454 unsigned i;
2455
Jason Evansc1e00ef2016-05-10 22:21:10 -07002456 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2457 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2458 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2459 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2460 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002461 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002462 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002463 }
2464 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002465 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002466 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002467 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2468 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2469 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002470 }
2471}
2472
Jason Evans6109fe02010-02-10 10:37:56 -08002473/******************************************************************************/