blob: 293684ca98f13e917be9f9d3ed5ace4a8c72803a [file] [log] [blame]
Jason Evansc0cc5db2017-01-19 21:41:41 -08001#define JEMALLOC_PROF_C_
David Goldblatt743d9402017-04-10 18:17:55 -07002#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
David Goldblattd9ec36e2017-04-11 14:43:12 -07005#include "jemalloc/internal/assert.h"
David Goldblatt68da2362017-04-19 14:56:42 -07006#include "jemalloc/internal/ckh.h"
David Goldblattdab4beb2017-04-24 17:16:36 -07007#include "jemalloc/internal/hash.h"
David Goldblatt54373be2017-04-11 13:06:31 -07008#include "jemalloc/internal/malloc_io.h"
David Goldblatt18ecbfa2017-05-23 12:28:19 -07009#include "jemalloc/internal/mutex.h"
David Goldblatt54373be2017-04-11 13:06:31 -070010
Jason Evans6109fe02010-02-10 10:37:56 -080011/******************************************************************************/
12
13#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evansc0cc5db2017-01-19 21:41:41 -080014#define UNW_LOCAL_ONLY
Jason Evans6109fe02010-02-10 10:37:56 -080015#include <libunwind.h>
16#endif
17
Jason Evans77f350b2011-03-15 22:23:12 -070018#ifdef JEMALLOC_PROF_LIBGCC
David Goldblatt0a0fcd32017-03-28 17:30:54 -070019/*
20 * We have a circular dependency -- jemalloc_internal.h tells us if we should
21 * use libgcc's unwinding functionality, but after we've included that, we've
22 * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
23 */
24#undef _Unwind_Backtrace
Jason Evans77f350b2011-03-15 22:23:12 -070025#include <unwind.h>
David Goldblatt0a0fcd32017-03-28 17:30:54 -070026#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
Jason Evans77f350b2011-03-15 22:23:12 -070027#endif
28
Jason Evans6109fe02010-02-10 10:37:56 -080029/******************************************************************************/
30/* Data. */
31
32bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070033bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070034bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080035size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070036ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070037bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070038bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080039bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070040bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080041char opt_prof_prefix[
42 /* Minimize memory bloat for non-prof builds. */
43#ifdef JEMALLOC_PROF
44 PATH_MAX +
45#endif
Jason Evanseefdd022014-01-16 18:04:30 -080046 1];
Jason Evans6109fe02010-02-10 10:37:56 -080047
Jason Evansfc12c0b2014-10-03 23:25:30 -070048/*
49 * Initialized as opt_prof_active, and accessed via
50 * prof_active_[gs]et{_unlocked,}().
51 */
52bool prof_active;
53static malloc_mutex_t prof_active_mtx;
54
55/*
56 * Initialized as opt_prof_thread_active_init, and accessed via
57 * prof_thread_active_init_[gs]et().
58 */
59static bool prof_thread_active_init;
60static malloc_mutex_t prof_thread_active_init_mtx;
61
Jason Evans5b8ed5b2015-01-25 21:16:57 -080062/*
63 * Initialized as opt_prof_gdump, and accessed via
64 * prof_gdump_[gs]et{_unlocked,}().
65 */
66bool prof_gdump_val;
67static malloc_mutex_t prof_gdump_mtx;
68
Jason Evansa3b33862012-11-13 12:56:27 -080069uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080070
Jason Evans602c8e02014-08-18 16:22:13 -070071size_t lg_prof_sample;
72
Jason Evans6109fe02010-02-10 10:37:56 -080073/*
Jason Evans602c8e02014-08-18 16:22:13 -070074 * Table of mutexes that are shared among gctx's. These are leaf locks, so
75 * there is no problem with using them for more than one gctx at the same time.
76 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070077 * and destroying mutexes causes complications for systems that allocate when
78 * creating/destroying mutexes.
79 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static malloc_mutex_t *gctx_locks;
David Goldblatt074f2252017-04-04 18:36:45 -070081static atomic_u_t cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070082
83/*
Jason Evans602c8e02014-08-18 16:22:13 -070084 * Table of mutexes that are shared among tdata's. No operations require
85 * holding multiple tdata locks, so there is no problem with using them for more
86 * than one tdata at the same time, even though a gctx lock may be acquired
87 * while holding a tdata lock.
88 */
89static malloc_mutex_t *tdata_locks;
90
91/*
92 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070093 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080094 */
Jason Evans602c8e02014-08-18 16:22:13 -070095static ckh_t bt2gctx;
Qi Wangca9074d2017-03-11 20:28:31 -080096/* Non static to enable profiling. */
97malloc_mutex_t bt2gctx_mtx;
Jason Evans602c8e02014-08-18 16:22:13 -070098
99/*
100 * Tree of all extant prof_tdata_t structures, regardless of state,
101 * {attached,detached,expired}.
102 */
103static prof_tdata_tree_t tdatas;
104static malloc_mutex_t tdatas_mtx;
105
106static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -0700107static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -0800108
Jason Evans6109fe02010-02-10 10:37:56 -0800109static malloc_mutex_t prof_dump_seq_mtx;
110static uint64_t prof_dump_seq;
111static uint64_t prof_dump_iseq;
112static uint64_t prof_dump_mseq;
113static uint64_t prof_dump_useq;
114
115/*
116 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800117 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800118 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800119static malloc_mutex_t prof_dump_mtx;
120static char prof_dump_buf[
121 /* Minimize memory bloat for non-prof builds. */
122#ifdef JEMALLOC_PROF
123 PROF_DUMP_BUFSIZE
124#else
125 1
126#endif
127];
Jason Evans42ce80e2016-02-25 20:51:00 -0800128static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800129static int prof_dump_fd;
130
131/* Do not dump any profiles until bootstrapping is complete. */
132static bool prof_booted = false;
133
Jason Evans6109fe02010-02-10 10:37:56 -0800134/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700135/*
136 * Function prototypes for static functions that are referenced prior to
137 * definition.
138 */
139
Jason Evansc1e00ef2016-05-10 22:21:10 -0700140static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700141static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700142static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700143 bool even_if_attached);
Jason Evansb54d1602016-10-20 23:59:12 -0700144static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700145 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700146static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700147
148/******************************************************************************/
149/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800150
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700151static int
Jason Evansc4c25922017-01-15 16:56:30 -0800152prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
Jason Evans04211e22015-03-16 15:11:06 -0700153 uint64_t a_thr_uid = a->thr_uid;
154 uint64_t b_thr_uid = b->thr_uid;
155 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700156 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700157 uint64_t a_thr_discrim = a->thr_discrim;
158 uint64_t b_thr_discrim = b->thr_discrim;
159 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
160 b_thr_discrim);
161 if (ret == 0) {
162 uint64_t a_tctx_uid = a->tctx_uid;
163 uint64_t b_tctx_uid = b->tctx_uid;
164 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
165 b_tctx_uid);
166 }
Jason Evansd69964b2015-03-12 16:25:18 -0700167 }
Jason Evansf4086432017-01-19 18:15:45 -0800168 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
172 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700173
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700174static int
Jason Evansc4c25922017-01-15 16:56:30 -0800175prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700176 unsigned a_len = a->bt.len;
177 unsigned b_len = b->bt.len;
178 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
179 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
Jason Evansc4c25922017-01-15 16:56:30 -0800180 if (ret == 0) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700181 ret = (a_len > b_len) - (a_len < b_len);
Jason Evansc4c25922017-01-15 16:56:30 -0800182 }
Jason Evansf4086432017-01-19 18:15:45 -0800183 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700184}
185
Jason Evans602c8e02014-08-18 16:22:13 -0700186rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
187 prof_gctx_comp)
188
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700189static int
Jason Evansc4c25922017-01-15 16:56:30 -0800190prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
Jason Evans20c31de2014-10-02 23:01:10 -0700191 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700192 uint64_t a_uid = a->thr_uid;
193 uint64_t b_uid = b->thr_uid;
194
Jason Evans20c31de2014-10-02 23:01:10 -0700195 ret = ((a_uid > b_uid) - (a_uid < b_uid));
196 if (ret == 0) {
197 uint64_t a_discrim = a->thr_discrim;
198 uint64_t b_discrim = b->thr_discrim;
199
200 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
201 }
Jason Evansf4086432017-01-19 18:15:45 -0800202 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700203}
204
205rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
206 prof_tdata_comp)
207
208/******************************************************************************/
209
210void
Jason Evansc4c25922017-01-15 16:56:30 -0800211prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
Jason Evans6e73dc12014-09-09 19:37:26 -0700212 prof_tdata_t *tdata;
213
214 cassert(config_prof);
215
216 if (updated) {
217 /*
218 * Compute a new sample threshold. This isn't very important in
219 * practice, because this function is rarely executed, so the
220 * potential for sample bias is minimal except in contrived
221 * programs.
222 */
Jason Evans5460aa62014-09-22 21:09:23 -0700223 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800224 if (tdata != NULL) {
Jason Evans3ca0cf62015-09-17 14:47:39 -0700225 prof_sample_threshold_update(tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800226 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700227 }
228
229 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700230 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700231 tctx->prepared = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800232 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700233 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800234 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700235 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800236 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700237 }
238}
239
240void
Jason Evans5e67fbc2017-03-20 11:00:07 -0700241prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
242 prof_tctx_t *tctx) {
Qi Wangccfe68a2017-04-11 18:13:10 -0700243 prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700244
Jason Evansc1e00ef2016-05-10 22:21:10 -0700245 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700246 tctx->cnts.curobjs++;
247 tctx->cnts.curbytes += usize;
248 if (opt_prof_accum) {
249 tctx->cnts.accumobjs++;
250 tctx->cnts.accumbytes += usize;
251 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700252 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700253 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700254}
255
256void
Jason Evansc4c25922017-01-15 16:56:30 -0800257prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700258 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700259 assert(tctx->cnts.curobjs > 0);
260 assert(tctx->cnts.curbytes >= usize);
261 tctx->cnts.curobjs--;
262 tctx->cnts.curbytes -= usize;
263
Jason Evansc4c25922017-01-15 16:56:30 -0800264 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700265 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800266 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700267 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800268 }
Jason Evans602c8e02014-08-18 16:22:13 -0700269}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700270
Jason Evans4d6a1342010-10-20 19:05:59 -0700271void
Jason Evansc4c25922017-01-15 16:56:30 -0800272bt_init(prof_bt_t *bt, void **vec) {
Jason Evans7372b152012-02-10 20:22:09 -0800273 cassert(config_prof);
274
Jason Evans6109fe02010-02-10 10:37:56 -0800275 bt->vec = vec;
276 bt->len = 0;
277}
278
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700279static void
Jason Evansc4c25922017-01-15 16:56:30 -0800280prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800281 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700282 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800283
Jason Evans82cb6032014-11-01 00:20:28 -0700284 if (tdata != NULL) {
285 assert(!tdata->enq);
286 tdata->enq = true;
287 }
Jason Evans6109fe02010-02-10 10:37:56 -0800288
Jason Evansc1e00ef2016-05-10 22:21:10 -0700289 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800290}
291
David Goldblatt4d2e4bf2017-04-21 09:37:34 -0700292static void
Jason Evansc4c25922017-01-15 16:56:30 -0800293prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800294 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700295 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800296
Jason Evansc1e00ef2016-05-10 22:21:10 -0700297 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800298
Jason Evans82cb6032014-11-01 00:20:28 -0700299 if (tdata != NULL) {
300 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800301
Jason Evans82cb6032014-11-01 00:20:28 -0700302 assert(tdata->enq);
303 tdata->enq = false;
304 idump = tdata->enq_idump;
305 tdata->enq_idump = false;
306 gdump = tdata->enq_gdump;
307 tdata->enq_gdump = false;
308
Jason Evansc4c25922017-01-15 16:56:30 -0800309 if (idump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700310 prof_idump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800311 }
312 if (gdump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700313 prof_gdump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800314 }
Jason Evans82cb6032014-11-01 00:20:28 -0700315 }
Jason Evans6109fe02010-02-10 10:37:56 -0800316}
317
Jason Evans77f350b2011-03-15 22:23:12 -0700318#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700319void
Jason Evansc4c25922017-01-15 16:56:30 -0800320prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700321 int nframes;
322
Jason Evans7372b152012-02-10 20:22:09 -0800323 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800324 assert(bt->len == 0);
325 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800326
Jason Evans6f001052014-04-22 18:41:15 -0700327 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
Jason Evansc4c25922017-01-15 16:56:30 -0800328 if (nframes <= 0) {
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700329 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800330 }
Jason Evans6f001052014-04-22 18:41:15 -0700331 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800332}
Jason Evans7372b152012-02-10 20:22:09 -0800333#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700334static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800335prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans7372b152012-02-10 20:22:09 -0800336 cassert(config_prof);
337
Jason Evansf4086432017-01-19 18:15:45 -0800338 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700339}
340
341static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800342prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans77f350b2011-03-15 22:23:12 -0700343 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700344 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700345
Jason Evans7372b152012-02-10 20:22:09 -0800346 cassert(config_prof);
347
Jason Evans6f001052014-04-22 18:41:15 -0700348 ip = (void *)_Unwind_GetIP(context);
Jason Evansc4c25922017-01-15 16:56:30 -0800349 if (ip == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800350 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800351 }
Jason Evans6f001052014-04-22 18:41:15 -0700352 data->bt->vec[data->bt->len] = ip;
353 data->bt->len++;
Jason Evansc4c25922017-01-15 16:56:30 -0800354 if (data->bt->len == data->max) {
Jason Evansf4086432017-01-19 18:15:45 -0800355 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800356 }
Jason Evans77f350b2011-03-15 22:23:12 -0700357
Jason Evansf4086432017-01-19 18:15:45 -0800358 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700359}
360
361void
Jason Evansc4c25922017-01-15 16:56:30 -0800362prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700363 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700364
Jason Evans7372b152012-02-10 20:22:09 -0800365 cassert(config_prof);
366
Jason Evans77f350b2011-03-15 22:23:12 -0700367 _Unwind_Backtrace(prof_unwind_callback, &data);
368}
Jason Evans7372b152012-02-10 20:22:09 -0800369#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700370void
Jason Evansc4c25922017-01-15 16:56:30 -0800371prof_backtrace(prof_bt_t *bt) {
Jason Evansc0cc5db2017-01-19 21:41:41 -0800372#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700373 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800374 void *p; \
Jason Evansc4c25922017-01-15 16:56:30 -0800375 if (__builtin_frame_address(i) == 0) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800376 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800377 } \
Jason Evans6109fe02010-02-10 10:37:56 -0800378 p = __builtin_return_address(i); \
Jason Evansc4c25922017-01-15 16:56:30 -0800379 if (p == NULL) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800380 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800381 } \
Jason Evans6f001052014-04-22 18:41:15 -0700382 bt->vec[(i)] = p; \
383 bt->len = (i) + 1; \
Jason Evansc4c25922017-01-15 16:56:30 -0800384 } else { \
385 return; \
386 }
Jason Evans6109fe02010-02-10 10:37:56 -0800387
Jason Evans7372b152012-02-10 20:22:09 -0800388 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800389
Jason Evans6109fe02010-02-10 10:37:56 -0800390 BT_FRAME(0)
391 BT_FRAME(1)
392 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800393 BT_FRAME(3)
394 BT_FRAME(4)
395 BT_FRAME(5)
396 BT_FRAME(6)
397 BT_FRAME(7)
398 BT_FRAME(8)
399 BT_FRAME(9)
400
401 BT_FRAME(10)
402 BT_FRAME(11)
403 BT_FRAME(12)
404 BT_FRAME(13)
405 BT_FRAME(14)
406 BT_FRAME(15)
407 BT_FRAME(16)
408 BT_FRAME(17)
409 BT_FRAME(18)
410 BT_FRAME(19)
411
412 BT_FRAME(20)
413 BT_FRAME(21)
414 BT_FRAME(22)
415 BT_FRAME(23)
416 BT_FRAME(24)
417 BT_FRAME(25)
418 BT_FRAME(26)
419 BT_FRAME(27)
420 BT_FRAME(28)
421 BT_FRAME(29)
422
423 BT_FRAME(30)
424 BT_FRAME(31)
425 BT_FRAME(32)
426 BT_FRAME(33)
427 BT_FRAME(34)
428 BT_FRAME(35)
429 BT_FRAME(36)
430 BT_FRAME(37)
431 BT_FRAME(38)
432 BT_FRAME(39)
433
434 BT_FRAME(40)
435 BT_FRAME(41)
436 BT_FRAME(42)
437 BT_FRAME(43)
438 BT_FRAME(44)
439 BT_FRAME(45)
440 BT_FRAME(46)
441 BT_FRAME(47)
442 BT_FRAME(48)
443 BT_FRAME(49)
444
445 BT_FRAME(50)
446 BT_FRAME(51)
447 BT_FRAME(52)
448 BT_FRAME(53)
449 BT_FRAME(54)
450 BT_FRAME(55)
451 BT_FRAME(56)
452 BT_FRAME(57)
453 BT_FRAME(58)
454 BT_FRAME(59)
455
456 BT_FRAME(60)
457 BT_FRAME(61)
458 BT_FRAME(62)
459 BT_FRAME(63)
460 BT_FRAME(64)
461 BT_FRAME(65)
462 BT_FRAME(66)
463 BT_FRAME(67)
464 BT_FRAME(68)
465 BT_FRAME(69)
466
467 BT_FRAME(70)
468 BT_FRAME(71)
469 BT_FRAME(72)
470 BT_FRAME(73)
471 BT_FRAME(74)
472 BT_FRAME(75)
473 BT_FRAME(76)
474 BT_FRAME(77)
475 BT_FRAME(78)
476 BT_FRAME(79)
477
478 BT_FRAME(80)
479 BT_FRAME(81)
480 BT_FRAME(82)
481 BT_FRAME(83)
482 BT_FRAME(84)
483 BT_FRAME(85)
484 BT_FRAME(86)
485 BT_FRAME(87)
486 BT_FRAME(88)
487 BT_FRAME(89)
488
489 BT_FRAME(90)
490 BT_FRAME(91)
491 BT_FRAME(92)
492 BT_FRAME(93)
493 BT_FRAME(94)
494 BT_FRAME(95)
495 BT_FRAME(96)
496 BT_FRAME(97)
497 BT_FRAME(98)
498 BT_FRAME(99)
499
500 BT_FRAME(100)
501 BT_FRAME(101)
502 BT_FRAME(102)
503 BT_FRAME(103)
504 BT_FRAME(104)
505 BT_FRAME(105)
506 BT_FRAME(106)
507 BT_FRAME(107)
508 BT_FRAME(108)
509 BT_FRAME(109)
510
511 BT_FRAME(110)
512 BT_FRAME(111)
513 BT_FRAME(112)
514 BT_FRAME(113)
515 BT_FRAME(114)
516 BT_FRAME(115)
517 BT_FRAME(116)
518 BT_FRAME(117)
519 BT_FRAME(118)
520 BT_FRAME(119)
521
522 BT_FRAME(120)
523 BT_FRAME(121)
524 BT_FRAME(122)
525 BT_FRAME(123)
526 BT_FRAME(124)
527 BT_FRAME(125)
528 BT_FRAME(126)
529 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800530#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800531}
Jason Evans7372b152012-02-10 20:22:09 -0800532#else
533void
Jason Evansc4c25922017-01-15 16:56:30 -0800534prof_backtrace(prof_bt_t *bt) {
Jason Evans7372b152012-02-10 20:22:09 -0800535 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700536 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800537}
Jason Evans6109fe02010-02-10 10:37:56 -0800538#endif
539
Jason Evans4f37ef62014-01-16 13:23:56 -0800540static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800541prof_gctx_mutex_choose(void) {
David Goldblatt074f2252017-04-04 18:36:45 -0700542 unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
Jason Evans4f37ef62014-01-16 13:23:56 -0800543
Jason Evansf4086432017-01-19 18:15:45 -0800544 return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
Jason Evans4f37ef62014-01-16 13:23:56 -0800545}
546
Jason Evans602c8e02014-08-18 16:22:13 -0700547static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800548prof_tdata_mutex_choose(uint64_t thr_uid) {
Jason Evansf4086432017-01-19 18:15:45 -0800549 return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
Jason Evans602c8e02014-08-18 16:22:13 -0700550}
551
552static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800553prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
Jason Evansab532e92014-08-15 15:05:12 -0700554 /*
555 * Create a single allocation that has space for vec of length bt->len.
556 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700557 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700558 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
David Goldblatt8261e582017-05-30 10:45:37 -0700559 sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700560 true);
Jason Evansc4c25922017-01-15 16:56:30 -0800561 if (gctx == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800562 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800563 }
Jason Evans602c8e02014-08-18 16:22:13 -0700564 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800565 /*
566 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700567 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800568 */
Jason Evans602c8e02014-08-18 16:22:13 -0700569 gctx->nlimbo = 1;
570 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700571 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700572 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
573 gctx->bt.vec = gctx->vec;
574 gctx->bt.len = bt->len;
Jason Evansf4086432017-01-19 18:15:45 -0800575 return gctx;
Jason Evans4f37ef62014-01-16 13:23:56 -0800576}
577
578static void
Jason Evansc93ed812014-10-30 16:50:33 -0700579prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800580 prof_tdata_t *tdata) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800581 cassert(config_prof);
582
583 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700584 * Check that gctx is still unused by any thread cache before destroying
585 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
586 * condition with this function, as does prof_tctx_destroy() in order to
587 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800588 * into this function.
589 */
Jason Evansc93ed812014-10-30 16:50:33 -0700590 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700591 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700592 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700593 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
594 /* Remove gctx from bt2gctx. */
Jason Evansc4c25922017-01-15 16:56:30 -0800595 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800596 not_reached();
Jason Evansc4c25922017-01-15 16:56:30 -0800597 }
Jason Evansc93ed812014-10-30 16:50:33 -0700598 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700599 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700600 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Qi Wangbfa530b2017-04-07 14:12:30 -0700601 idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800602 } else {
603 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700604 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800605 * prof_lookup().
606 */
Jason Evans602c8e02014-08-18 16:22:13 -0700607 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700608 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700609 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800610 }
611}
612
Jason Evans602c8e02014-08-18 16:22:13 -0700613static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800614prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700615 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700616
Jason Evansc4c25922017-01-15 16:56:30 -0800617 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800618 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800619 }
620 if (tctx->cnts.curobjs != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800621 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800622 }
623 if (tctx->prepared) {
Jason Evansf4086432017-01-19 18:15:45 -0800624 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800625 }
Jason Evansf4086432017-01-19 18:15:45 -0800626 return true;
Jason Evans4f37ef62014-01-16 13:23:56 -0800627}
628
Jason Evansfb1775e2014-01-14 17:04:34 -0800629static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800630prof_gctx_should_destroy(prof_gctx_t *gctx) {
631 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800632 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800633 }
634 if (!tctx_tree_empty(&gctx->tctxs)) {
Jason Evansf4086432017-01-19 18:15:45 -0800635 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800636 }
637 if (gctx->nlimbo != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800638 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800639 }
Jason Evansf4086432017-01-19 18:15:45 -0800640 return true;
Jason Evans602c8e02014-08-18 16:22:13 -0700641}
642
Jason Evans602c8e02014-08-18 16:22:13 -0700643static void
Jason Evansc4c25922017-01-15 16:56:30 -0800644prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
Jason Evans6fd53da2014-09-09 12:45:53 -0700645 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700646 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700647 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700648
Jason Evansc1e00ef2016-05-10 22:21:10 -0700649 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700650
Jason Evans602c8e02014-08-18 16:22:13 -0700651 assert(tctx->cnts.curobjs == 0);
652 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700653 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700654 assert(tctx->cnts.accumobjs == 0);
655 assert(tctx->cnts.accumbytes == 0);
656
Jason Evansb54d1602016-10-20 23:59:12 -0700657 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700658 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
659 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700660
Jason Evansc1e00ef2016-05-10 22:21:10 -0700661 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700662 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700663 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700664 tctx_tree_remove(&gctx->tctxs, tctx);
665 destroy_tctx = true;
666 if (prof_gctx_should_destroy(gctx)) {
667 /*
668 * Increment gctx->nlimbo in order to keep another
669 * thread from winning the race to destroy gctx while
670 * this one has gctx->lock dropped. Without this, it
671 * would be possible for another thread to:
672 *
673 * 1) Sample an allocation associated with gctx.
674 * 2) Deallocate the sampled object.
675 * 3) Successfully prof_gctx_try_destroy(gctx).
676 *
677 * The result would be that gctx no longer exists by the
678 * time this thread accesses it in
679 * prof_gctx_try_destroy().
680 */
681 gctx->nlimbo++;
682 destroy_gctx = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800683 } else {
Jason Evansbf406412014-10-06 16:35:11 -0700684 destroy_gctx = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800685 }
Jason Evans764b0002015-03-14 14:01:35 -0700686 break;
687 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700688 /*
Jason Evansbf406412014-10-06 16:35:11 -0700689 * A dumping thread needs tctx to remain valid until dumping
690 * has finished. Change state such that the dumping thread will
691 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700692 */
Jason Evansbf406412014-10-06 16:35:11 -0700693 tctx->state = prof_tctx_state_purgatory;
694 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700695 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700696 break;
697 default:
698 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700699 destroy_tctx = false;
700 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700701 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700702 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700703 if (destroy_gctx) {
704 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
705 tdata);
706 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700707
Jason Evansc1e00ef2016-05-10 22:21:10 -0700708 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700709
Jason Evansc4c25922017-01-15 16:56:30 -0800710 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -0700711 prof_tdata_destroy(tsd, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800712 }
Jason Evans602c8e02014-08-18 16:22:13 -0700713
Jason Evansc4c25922017-01-15 16:56:30 -0800714 if (destroy_tctx) {
Qi Wangbfa530b2017-04-07 14:12:30 -0700715 idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800716 }
Jason Evans602c8e02014-08-18 16:22:13 -0700717}
718
719static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700720prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -0800721 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800722 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700723 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800724 void *v;
Jason Evans5033a912017-01-29 21:51:30 -0800725 } gctx, tgctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800726 union {
727 prof_bt_t *p;
728 void *v;
729 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700730 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800731
Jason Evansc93ed812014-10-30 16:50:33 -0700732 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700733 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800734 /* bt has never been seen before. Insert it. */
Jason Evans5033a912017-01-29 21:51:30 -0800735 prof_leave(tsd, tdata);
736 tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
737 if (tgctx.v == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800738 return true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800739 }
Jason Evans5033a912017-01-29 21:51:30 -0800740 prof_enter(tsd, tdata);
741 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
742 gctx.p = tgctx.p;
743 btkey.p = &gctx.p->bt;
744 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
745 /* OOM. */
746 prof_leave(tsd, tdata);
Qi Wangbfa530b2017-04-07 14:12:30 -0700747 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
748 true, true);
Jason Evans5033a912017-01-29 21:51:30 -0800749 return true;
750 }
751 new_gctx = true;
752 } else {
753 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800754 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800755 } else {
Jason Evans5033a912017-01-29 21:51:30 -0800756 tgctx.v = NULL;
757 new_gctx = false;
758 }
759
760 if (!new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800761 /*
762 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700763 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800764 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700765 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700766 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700767 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700768 new_gctx = false;
Jason Evans5033a912017-01-29 21:51:30 -0800769
770 if (tgctx.v != NULL) {
771 /* Lost race to insert. */
Qi Wangbfa530b2017-04-07 14:12:30 -0700772 idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
773 true);
Jason Evans5033a912017-01-29 21:51:30 -0800774 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800775 }
Jason Evansc93ed812014-10-30 16:50:33 -0700776 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800777
778 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700779 *p_gctx = gctx.p;
780 *p_new_gctx = new_gctx;
Jason Evansf4086432017-01-19 18:15:45 -0800781 return false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800782}
783
Jason Evans602c8e02014-08-18 16:22:13 -0700784prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800785prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
Jason Evans075e77c2010-09-20 19:53:25 -0700786 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700787 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700788 void *v;
789 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700790 prof_tdata_t *tdata;
791 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800792
Jason Evans7372b152012-02-10 20:22:09 -0800793 cassert(config_prof);
794
Jason Evans5460aa62014-09-22 21:09:23 -0700795 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800796 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800797 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800798 }
Jason Evans6109fe02010-02-10 10:37:56 -0800799
Jason Evansc1e00ef2016-05-10 22:21:10 -0700800 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700801 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evansc4c25922017-01-15 16:56:30 -0800802 if (!not_found) { /* Note double negative! */
Jason Evans6e73dc12014-09-09 19:37:26 -0700803 ret.p->prepared = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800804 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700805 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700806 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800807 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700808 prof_gctx_t *gctx;
809 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800810
811 /*
812 * This thread's cache lacks bt. Look for it in the global
813 * cache.
814 */
Jason Evans5460aa62014-09-22 21:09:23 -0700815 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800816 &new_gctx)) {
Jason Evansf4086432017-01-19 18:15:45 -0800817 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800818 }
Jason Evans6109fe02010-02-10 10:37:56 -0800819
Jason Evans602c8e02014-08-18 16:22:13 -0700820 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700821 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
David Goldblatt8261e582017-05-30 10:45:37 -0700822 sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansb54d1602016-10-20 23:59:12 -0700823 arena_ichoose(tsd, NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700824 if (ret.p == NULL) {
Jason Evansc4c25922017-01-15 16:56:30 -0800825 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700826 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800827 }
Jason Evansf4086432017-01-19 18:15:45 -0800828 return NULL;
Jason Evansa881cd22010-10-02 15:18:50 -0700829 }
Jason Evans602c8e02014-08-18 16:22:13 -0700830 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700831 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700832 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700833 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700834 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700835 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700836 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700837 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700838 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evansb54d1602016-10-20 23:59:12 -0700839 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700840 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700841 if (error) {
Jason Evansc4c25922017-01-15 16:56:30 -0800842 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700843 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800844 }
Qi Wangbfa530b2017-04-07 14:12:30 -0700845 idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -0800846 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800847 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700848 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700849 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700850 tctx_tree_insert(&gctx->tctxs, ret.p);
851 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700852 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800853 }
854
Jason Evansf4086432017-01-19 18:15:45 -0800855 return ret.p;
Jason Evans6109fe02010-02-10 10:37:56 -0800856}
857
Jason Evansdc391ad2016-05-04 12:14:36 -0700858/*
859 * The bodies of this function and prof_leakcheck() are compiled out unless heap
860 * profiling is enabled, so that it is possible to compile jemalloc with
861 * floating point support completely disabled. Avoiding floating point code is
862 * important on memory-constrained systems, but it also enables a workaround for
863 * versions of glibc that don't properly save/restore floating point registers
864 * during dynamic lazy symbol loading (which internally calls into whatever
865 * malloc implementation happens to be integrated into the application). Note
866 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
867 * memory moves, so jemalloc must be compiled with such optimizations disabled
868 * (e.g.
869 * -mno-sse) in order for the workaround to be complete.
870 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700871void
Jason Evansc4c25922017-01-15 16:56:30 -0800872prof_sample_threshold_update(prof_tdata_t *tdata) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700873#ifdef JEMALLOC_PROF
874 uint64_t r;
875 double u;
876
Jason Evansc4c25922017-01-15 16:56:30 -0800877 if (!config_prof) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700878 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800879 }
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700880
Jason Evans602c8e02014-08-18 16:22:13 -0700881 if (lg_prof_sample == 0) {
882 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700883 return;
884 }
885
886 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700887 * Compute sample interval as a geometrically distributed random
888 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700889 *
Jason Evans602c8e02014-08-18 16:22:13 -0700890 * __ __
891 * | log(u) | 1
892 * tdata->bytes_until_sample = | -------- |, where p = ---------------
893 * | log(1-p) | lg_prof_sample
894 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700895 *
896 * For more information on the math, see:
897 *
898 * Non-Uniform Random Variate Generation
899 * Luc Devroye
900 * Springer-Verlag, New York, 1986
901 * pp 500
902 * (http://luc.devroye.org/rnbookindex.html)
903 */
Jason Evans04b46352016-11-07 10:52:44 -0800904 r = prng_lg_range_u64(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700905 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700906 tdata->bytes_until_sample = (uint64_t)(log(u) /
907 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700908 + (uint64_t)1U;
909#endif
910}
911
Jason Evans772163b2014-01-17 15:40:52 -0800912#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700913static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800914prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
915 void *arg) {
Jason Evans20c31de2014-10-02 23:01:10 -0700916 size_t *tdata_count = (size_t *)arg;
917
918 (*tdata_count)++;
919
Jason Evansf4086432017-01-19 18:15:45 -0800920 return NULL;
Jason Evans20c31de2014-10-02 23:01:10 -0700921}
922
923size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800924prof_tdata_count(void) {
Jason Evans20c31de2014-10-02 23:01:10 -0700925 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700927
Jason Evansc1e00ef2016-05-10 22:21:10 -0700928 tsdn = tsdn_fetch();
929 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700930 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
931 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700932 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700933
Jason Evansf4086432017-01-19 18:15:45 -0800934 return tdata_count;
Jason Evans20c31de2014-10-02 23:01:10 -0700935}
Jason Evans20c31de2014-10-02 23:01:10 -0700936
Jason Evans772163b2014-01-17 15:40:52 -0800937size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800938prof_bt_count(void) {
Jason Evans772163b2014-01-17 15:40:52 -0800939 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700940 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700941 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800942
Jason Evans029d44c2014-10-04 11:12:53 -0700943 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700944 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800945 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800946 return 0;
Jason Evansc4c25922017-01-15 16:56:30 -0800947 }
Jason Evans772163b2014-01-17 15:40:52 -0800948
Jason Evansc1e00ef2016-05-10 22:21:10 -0700949 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700950 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700951 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800952
Jason Evansf4086432017-01-19 18:15:45 -0800953 return bt_count;
Jason Evans772163b2014-01-17 15:40:52 -0800954}
955#endif
956
Jason Evans772163b2014-01-17 15:40:52 -0800957static int
Jason Evansa268af52017-05-01 23:10:42 -0700958prof_dump_open_impl(bool propagate_err, const char *filename) {
Jason Evans772163b2014-01-17 15:40:52 -0800959 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800960
Jason Evans772163b2014-01-17 15:40:52 -0800961 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700962 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800963 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
964 filename);
Jason Evansc4c25922017-01-15 16:56:30 -0800965 if (opt_abort) {
Jason Evans772163b2014-01-17 15:40:52 -0800966 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800967 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800968 }
969
Jason Evansf4086432017-01-19 18:15:45 -0800970 return fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800971}
Jason Evansa268af52017-05-01 23:10:42 -0700972prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
Jason Evans4f37ef62014-01-16 13:23:56 -0800973
974static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800975prof_dump_flush(bool propagate_err) {
Jason Evans22ca8552010-03-02 11:57:30 -0800976 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800977 ssize_t err;
978
Jason Evans7372b152012-02-10 20:22:09 -0800979 cassert(config_prof);
980
Jason Evans6109fe02010-02-10 10:37:56 -0800981 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
982 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700983 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800984 malloc_write("<jemalloc>: write() failed during heap "
985 "profile flush\n");
Jason Evansc4c25922017-01-15 16:56:30 -0800986 if (opt_abort) {
Jason Evans22ca8552010-03-02 11:57:30 -0800987 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800988 }
Jason Evans22ca8552010-03-02 11:57:30 -0800989 }
990 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800991 }
992 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800993
Jason Evansf4086432017-01-19 18:15:45 -0800994 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800995}
996
Jason Evans22ca8552010-03-02 11:57:30 -0800997static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800998prof_dump_close(bool propagate_err) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800999 bool ret;
1000
1001 assert(prof_dump_fd != -1);
1002 ret = prof_dump_flush(propagate_err);
1003 close(prof_dump_fd);
1004 prof_dump_fd = -1;
1005
Jason Evansf4086432017-01-19 18:15:45 -08001006 return ret;
Jason Evans4f37ef62014-01-16 13:23:56 -08001007}
1008
1009static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001010prof_dump_write(bool propagate_err, const char *s) {
Jason Evansca8fffb2016-02-24 13:16:51 -08001011 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -08001012
Jason Evans7372b152012-02-10 20:22:09 -08001013 cassert(config_prof);
1014
Jason Evans6109fe02010-02-10 10:37:56 -08001015 i = 0;
1016 slen = strlen(s);
1017 while (i < slen) {
1018 /* Flush the buffer if it is full. */
Jason Evansc4c25922017-01-15 16:56:30 -08001019 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1020 if (prof_dump_flush(propagate_err) && propagate_err) {
Jason Evansf4086432017-01-19 18:15:45 -08001021 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001022 }
1023 }
Jason Evans6109fe02010-02-10 10:37:56 -08001024
Jason Evanscd9a1342012-03-21 18:33:03 -07001025 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001026 /* Finish writing. */
1027 n = slen - i;
1028 } else {
1029 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001030 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001031 }
1032 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1033 prof_dump_buf_end += n;
1034 i += n;
1035 }
Jason Evans22ca8552010-03-02 11:57:30 -08001036
Jason Evansf4086432017-01-19 18:15:45 -08001037 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001038}
1039
Jason Evanse42c3092015-07-22 15:44:47 -07001040JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001041static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001042prof_dump_printf(bool propagate_err, const char *format, ...) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001043 bool ret;
1044 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001045 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001046
1047 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001048 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001049 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001050 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001051
Jason Evansf4086432017-01-19 18:15:45 -08001052 return ret;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001053}
1054
Jason Evans602c8e02014-08-18 16:22:13 -07001055static void
Jason Evansc4c25922017-01-15 16:56:30 -08001056prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001057 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001058
Jason Evansc1e00ef2016-05-10 22:21:10 -07001059 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001060
1061 switch (tctx->state) {
1062 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001063 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001064 return;
Jason Evans764b0002015-03-14 14:01:35 -07001065 case prof_tctx_state_nominal:
1066 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001067 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001068
Jason Evans764b0002015-03-14 14:01:35 -07001069 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001070
Jason Evans764b0002015-03-14 14:01:35 -07001071 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1072 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1073 if (opt_prof_accum) {
1074 tdata->cnt_summed.accumobjs +=
1075 tctx->dump_cnts.accumobjs;
1076 tdata->cnt_summed.accumbytes +=
1077 tctx->dump_cnts.accumbytes;
1078 }
1079 break;
1080 case prof_tctx_state_dumping:
1081 case prof_tctx_state_purgatory:
1082 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001083 }
1084}
1085
Jason Evans602c8e02014-08-18 16:22:13 -07001086static void
Jason Evansc4c25922017-01-15 16:56:30 -08001087prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001088 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001089
Jason Evans602c8e02014-08-18 16:22:13 -07001090 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1091 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1092 if (opt_prof_accum) {
1093 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1094 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1095 }
1096}
1097
Jason Evans602c8e02014-08-18 16:22:13 -07001098static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001099prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001100 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001101
Jason Evansc1e00ef2016-05-10 22:21:10 -07001102 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001103
1104 switch (tctx->state) {
1105 case prof_tctx_state_nominal:
1106 /* New since dumping started; ignore. */
1107 break;
1108 case prof_tctx_state_dumping:
1109 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001110 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001111 break;
1112 default:
1113 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001114 }
1115
Jason Evansf4086432017-01-19 18:15:45 -08001116 return NULL;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001117}
1118
Jason Evansb2c0d632016-04-13 23:36:15 -07001119struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001120 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001121 bool propagate_err;
1122};
1123
Jason Evans602c8e02014-08-18 16:22:13 -07001124static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001125prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001126 struct prof_tctx_dump_iter_arg_s *arg =
1127 (struct prof_tctx_dump_iter_arg_s *)opaque;
1128
Jason Evansc1e00ef2016-05-10 22:21:10 -07001129 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001130
Jason Evansfb64ec22015-09-21 18:37:18 -07001131 switch (tctx->state) {
1132 case prof_tctx_state_initializing:
1133 case prof_tctx_state_nominal:
1134 /* Not captured by this dump. */
1135 break;
1136 case prof_tctx_state_dumping:
1137 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001138 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001139 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1140 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1141 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001142 tctx->dump_cnts.accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001143 return tctx;
Jason Evansc4c25922017-01-15 16:56:30 -08001144 }
Jason Evansfb64ec22015-09-21 18:37:18 -07001145 break;
1146 default:
1147 not_reached();
1148 }
Jason Evansf4086432017-01-19 18:15:45 -08001149 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001150}
1151
Jason Evans602c8e02014-08-18 16:22:13 -07001152static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001153prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001154 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001155 prof_tctx_t *ret;
1156
Jason Evansc1e00ef2016-05-10 22:21:10 -07001157 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001158
Jason Evans602c8e02014-08-18 16:22:13 -07001159 switch (tctx->state) {
1160 case prof_tctx_state_nominal:
1161 /* New since dumping started; ignore. */
1162 break;
1163 case prof_tctx_state_dumping:
1164 tctx->state = prof_tctx_state_nominal;
1165 break;
1166 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001167 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001168 goto label_return;
1169 default:
1170 not_reached();
1171 }
1172
1173 ret = NULL;
1174label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001175 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -07001176}
1177
Jason Evans6109fe02010-02-10 10:37:56 -08001178static void
Jason Evansc4c25922017-01-15 16:56:30 -08001179prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
Jason Evans7372b152012-02-10 20:22:09 -08001180 cassert(config_prof);
1181
Jason Evansc1e00ef2016-05-10 22:21:10 -07001182 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001183
Jason Evans4f37ef62014-01-16 13:23:56 -08001184 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001185 * Increment nlimbo so that gctx won't go away before dump.
1186 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001187 * prof_dump()'s second pass.
1188 */
Jason Evans602c8e02014-08-18 16:22:13 -07001189 gctx->nlimbo++;
1190 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001191
Jason Evans602c8e02014-08-18 16:22:13 -07001192 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001193
Jason Evansc1e00ef2016-05-10 22:21:10 -07001194 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001195}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001196
Jason Evansb2c0d632016-04-13 23:36:15 -07001197struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001198 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001199 size_t leak_ngctx;
1200};
Jason Evans6109fe02010-02-10 10:37:56 -08001201
Jason Evansb2c0d632016-04-13 23:36:15 -07001202static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001203prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001204 struct prof_gctx_merge_iter_arg_s *arg =
1205 (struct prof_gctx_merge_iter_arg_s *)opaque;
1206
Jason Evansc1e00ef2016-05-10 22:21:10 -07001207 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001208 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001209 (void *)arg->tsdn);
Jason Evansc4c25922017-01-15 16:56:30 -08001210 if (gctx->cnt_summed.curobjs != 0) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001211 arg->leak_ngctx++;
Jason Evansc4c25922017-01-15 16:56:30 -08001212 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001213 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001214
Jason Evansf4086432017-01-19 18:15:45 -08001215 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001216}
1217
Jason Evans20c31de2014-10-02 23:01:10 -07001218static void
Jason Evansc4c25922017-01-15 16:56:30 -08001219prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
Jason Evans5460aa62014-09-22 21:09:23 -07001220 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001221 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001222
Jason Evans20c31de2014-10-02 23:01:10 -07001223 /*
1224 * Standard tree iteration won't work here, because as soon as we
1225 * decrement gctx->nlimbo and unlock gctx, another thread can
1226 * concurrently destroy it, which will corrupt the tree. Therefore,
1227 * tear down the tree one node at a time during iteration.
1228 */
1229 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1230 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001231 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001232 {
1233 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001234
Jason Evans20c31de2014-10-02 23:01:10 -07001235 next = NULL;
1236 do {
1237 prof_tctx_t *to_destroy =
1238 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001239 prof_tctx_finish_iter,
1240 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001241 if (to_destroy != NULL) {
1242 next = tctx_tree_next(&gctx->tctxs,
1243 to_destroy);
1244 tctx_tree_remove(&gctx->tctxs,
1245 to_destroy);
Jason Evans51a2ec92017-03-17 02:45:12 -07001246 idalloctm(tsd_tsdn(tsd), to_destroy,
Qi Wangbfa530b2017-04-07 14:12:30 -07001247 NULL, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -08001248 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07001249 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001250 }
Jason Evans20c31de2014-10-02 23:01:10 -07001251 } while (next != NULL);
1252 }
1253 gctx->nlimbo--;
1254 if (prof_gctx_should_destroy(gctx)) {
1255 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001256 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001257 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001258 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001259 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08001260 }
Jason Evans20c31de2014-10-02 23:01:10 -07001261 }
Jason Evans602c8e02014-08-18 16:22:13 -07001262}
1263
Jason Evansb2c0d632016-04-13 23:36:15 -07001264struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001265 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001266 prof_cnt_t cnt_all;
1267};
Jason Evans602c8e02014-08-18 16:22:13 -07001268
Jason Evansb2c0d632016-04-13 23:36:15 -07001269static prof_tdata_t *
1270prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001271 void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001272 struct prof_tdata_merge_iter_arg_s *arg =
1273 (struct prof_tdata_merge_iter_arg_s *)opaque;
1274
Jason Evansc1e00ef2016-05-10 22:21:10 -07001275 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001276 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001277 size_t tabind;
1278 union {
1279 prof_tctx_t *p;
1280 void *v;
1281 } tctx;
1282
1283 tdata->dumping = true;
1284 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001285 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
Jason Evansc4c25922017-01-15 16:56:30 -08001286 &tctx.v);) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001287 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001288 }
Jason Evans602c8e02014-08-18 16:22:13 -07001289
Jason Evansb2c0d632016-04-13 23:36:15 -07001290 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1291 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001292 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001293 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1294 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001295 }
Jason Evansc4c25922017-01-15 16:56:30 -08001296 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07001297 tdata->dumping = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001298 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001299 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001300
Jason Evansf4086432017-01-19 18:15:45 -08001301 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001302}
1303
1304static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001305prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1306 void *arg) {
Jason Evans602c8e02014-08-18 16:22:13 -07001307 bool propagate_err = *(bool *)arg;
1308
Jason Evansc4c25922017-01-15 16:56:30 -08001309 if (!tdata->dumping) {
Jason Evansf4086432017-01-19 18:15:45 -08001310 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001311 }
Jason Evans602c8e02014-08-18 16:22:13 -07001312
1313 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001314 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001315 tdata->thr_uid, tdata->cnt_summed.curobjs,
1316 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1317 tdata->cnt_summed.accumbytes,
1318 (tdata->thread_name != NULL) ? " " : "",
Jason Evansc4c25922017-01-15 16:56:30 -08001319 (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
Jason Evansf4086432017-01-19 18:15:45 -08001320 return tdata;
Jason Evansc4c25922017-01-15 16:56:30 -08001321 }
Jason Evansf4086432017-01-19 18:15:45 -08001322 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001323}
1324
Jason Evans4f37ef62014-01-16 13:23:56 -08001325static bool
Jason Evansa268af52017-05-01 23:10:42 -07001326prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
1327 const prof_cnt_t *cnt_all) {
Jason Evans602c8e02014-08-18 16:22:13 -07001328 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001329
Jason Evans602c8e02014-08-18 16:22:13 -07001330 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001331 "heap_v2/%"FMTu64"\n"
1332 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001333 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001334 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001335 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001336 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001337
Jason Evansc1e00ef2016-05-10 22:21:10 -07001338 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001339 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1340 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001341 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08001342 return ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001343}
Jason Evansa268af52017-05-01 23:10:42 -07001344prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
Jason Evansa881cd22010-10-02 15:18:50 -07001345
Jason Evans22ca8552010-03-02 11:57:30 -08001346static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001347prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001348 const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001349 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001350 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001351 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001352
Jason Evans7372b152012-02-10 20:22:09 -08001353 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001354 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001355
Jason Evans602c8e02014-08-18 16:22:13 -07001356 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001357 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001358 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1359 assert(gctx->cnt_summed.curobjs == 0);
1360 assert(gctx->cnt_summed.curbytes == 0);
1361 assert(gctx->cnt_summed.accumobjs == 0);
1362 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001363 ret = false;
1364 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001365 }
1366
Jason Evans602c8e02014-08-18 16:22:13 -07001367 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001368 ret = true;
1369 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001370 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001371 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001372 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001373 (uintptr_t)bt->vec[i])) {
1374 ret = true;
1375 goto label_return;
1376 }
1377 }
Jason Evans22ca8552010-03-02 11:57:30 -08001378
Jason Evans602c8e02014-08-18 16:22:13 -07001379 if (prof_dump_printf(propagate_err,
1380 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001381 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001382 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1383 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1384 ret = true;
1385 goto label_return;
1386 }
1387
Jason Evansc1e00ef2016-05-10 22:21:10 -07001388 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001389 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001390 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001391 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001392 ret = true;
1393 goto label_return;
1394 }
1395
Jason Evans772163b2014-01-17 15:40:52 -08001396 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001397label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001398 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001399}
1400
Jason Evans788d29d2016-02-20 23:46:14 -08001401#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001402JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001403static int
Jason Evansc4c25922017-01-15 16:56:30 -08001404prof_open_maps(const char *format, ...) {
Jason Evans8e33c212015-05-01 09:03:20 -07001405 int mfd;
1406 va_list ap;
1407 char filename[PATH_MAX + 1];
1408
1409 va_start(ap, format);
1410 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1411 va_end(ap);
Y. T. Chung0975b882017-07-20 23:02:23 +08001412
1413#if defined(O_CLOEXEC)
Jason Evans10d090a2017-05-30 14:36:55 -07001414 mfd = open(filename, O_RDONLY | O_CLOEXEC);
Y. T. Chung0975b882017-07-20 23:02:23 +08001415#else
1416 mfd = open(filename, O_RDONLY);
Y. T. Chungaa6c2822017-07-21 21:40:29 +08001417 if (mfd != -1) {
1418 fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
1419 }
Y. T. Chung0975b882017-07-20 23:02:23 +08001420#endif
Jason Evans8e33c212015-05-01 09:03:20 -07001421
Jason Evansf4086432017-01-19 18:15:45 -08001422 return mfd;
Jason Evans8e33c212015-05-01 09:03:20 -07001423}
Jason Evans788d29d2016-02-20 23:46:14 -08001424#endif
1425
1426static int
Jason Evansc4c25922017-01-15 16:56:30 -08001427prof_getpid(void) {
Jason Evans788d29d2016-02-20 23:46:14 -08001428#ifdef _WIN32
Jason Evansf4086432017-01-19 18:15:45 -08001429 return GetCurrentProcessId();
Jason Evans788d29d2016-02-20 23:46:14 -08001430#else
Jason Evansf4086432017-01-19 18:15:45 -08001431 return getpid();
Jason Evans788d29d2016-02-20 23:46:14 -08001432#endif
1433}
Jason Evans8e33c212015-05-01 09:03:20 -07001434
Jason Evans22ca8552010-03-02 11:57:30 -08001435static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001436prof_dump_maps(bool propagate_err) {
Jason Evans93f39f82013-10-21 15:07:40 -07001437 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001438 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001439
Jason Evans7372b152012-02-10 20:22:09 -08001440 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001441#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001442 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001443#elif defined(_WIN32)
1444 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001445#else
Jason Evans8e33c212015-05-01 09:03:20 -07001446 {
Jason Evans788d29d2016-02-20 23:46:14 -08001447 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001448
1449 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001450 if (mfd == -1) {
Jason Evans8e33c212015-05-01 09:03:20 -07001451 mfd = prof_open_maps("/proc/%d/maps", pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001452 }
Jason Evans8e33c212015-05-01 09:03:20 -07001453 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001454#endif
Jason Evansc7177182010-02-11 09:25:56 -08001455 if (mfd != -1) {
1456 ssize_t nread;
1457
Jason Evans4f37ef62014-01-16 13:23:56 -08001458 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001459 propagate_err) {
1460 ret = true;
1461 goto label_return;
1462 }
Jason Evansc7177182010-02-11 09:25:56 -08001463 nread = 0;
1464 do {
1465 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001466 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001467 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001468 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001469 propagate_err) {
1470 ret = true;
1471 goto label_return;
1472 }
Jason Evansc7177182010-02-11 09:25:56 -08001473 }
1474 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001475 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001476 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001477 } else {
1478 ret = true;
1479 goto label_return;
1480 }
Jason Evans22ca8552010-03-02 11:57:30 -08001481
Jason Evans93f39f82013-10-21 15:07:40 -07001482 ret = false;
1483label_return:
Jason Evansc4c25922017-01-15 16:56:30 -08001484 if (mfd != -1) {
Jason Evans93f39f82013-10-21 15:07:40 -07001485 close(mfd);
Jason Evansc4c25922017-01-15 16:56:30 -08001486 }
Jason Evansf4086432017-01-19 18:15:45 -08001487 return ret;
Jason Evansc7177182010-02-11 09:25:56 -08001488}
1489
Jason Evansdc391ad2016-05-04 12:14:36 -07001490/*
1491 * See prof_sample_threshold_update() comment for why the body of this function
1492 * is conditionally compiled.
1493 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001494static void
Jason Evans602c8e02014-08-18 16:22:13 -07001495prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001496 const char *filename) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001497#ifdef JEMALLOC_PROF
1498 /*
1499 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1500 * differ slightly from what jeprof reports, because here we scale the
1501 * summary values, whereas jeprof scales each context individually and
1502 * reports the sums of the scaled values.
1503 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001504 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001505 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1506 double ratio = (((double)cnt_all->curbytes) /
1507 (double)cnt_all->curobjs) / sample_period;
1508 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1509 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1510 * scale_factor);
1511 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1512 scale_factor);
1513
1514 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1515 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1516 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1517 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001518 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001519 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001520 filename);
1521 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001522#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001523}
1524
Jason Evansb2c0d632016-04-13 23:36:15 -07001525struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001526 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001527 bool propagate_err;
1528};
1529
Jason Evans602c8e02014-08-18 16:22:13 -07001530static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001531prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evans602c8e02014-08-18 16:22:13 -07001532 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001533 struct prof_gctx_dump_iter_arg_s *arg =
1534 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001535
Jason Evansc1e00ef2016-05-10 22:21:10 -07001536 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001537
Jason Evansc1e00ef2016-05-10 22:21:10 -07001538 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001539 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001540 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001541 goto label_return;
1542 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001543
Jason Evans602c8e02014-08-18 16:22:13 -07001544 ret = NULL;
1545label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001546 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evansf4086432017-01-19 18:15:45 -08001547 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001548}
1549
Jason Evans1ff09532017-01-16 11:09:24 -08001550static void
1551prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
1552 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1553 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001554 prof_gctx_tree_t *gctxs) {
Jason Evans6109fe02010-02-10 10:37:56 -08001555 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001556 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001557 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001558 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001559 } gctx;
Jason Evans6109fe02010-02-10 10:37:56 -08001560
Jason Evansc93ed812014-10-30 16:50:33 -07001561 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001562
Jason Evans602c8e02014-08-18 16:22:13 -07001563 /*
1564 * Put gctx's in limbo and clear their counters in preparation for
1565 * summing.
1566 */
Jason Evans1ff09532017-01-16 11:09:24 -08001567 gctx_tree_new(gctxs);
1568 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
1569 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
1570 }
Jason Evans602c8e02014-08-18 16:22:13 -07001571
1572 /*
1573 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1574 * stats and merge them into the associated gctx's.
1575 */
Jason Evans1ff09532017-01-16 11:09:24 -08001576 prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1577 memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001578 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001579 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
Jason Evans1ff09532017-01-16 11:09:24 -08001580 (void *)prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001581 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001582
1583 /* Merge tctx stats into gctx's. */
Jason Evans1ff09532017-01-16 11:09:24 -08001584 prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1585 prof_gctx_merge_iter_arg->leak_ngctx = 0;
1586 gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
1587 (void *)prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001588
Jason Evansc93ed812014-10-30 16:50:33 -07001589 prof_leave(tsd, tdata);
Jason Evans1ff09532017-01-16 11:09:24 -08001590}
Jason Evans4f37ef62014-01-16 13:23:56 -08001591
Jason Evans1ff09532017-01-16 11:09:24 -08001592static bool
1593prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
1594 bool leakcheck, prof_tdata_t *tdata,
1595 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1596 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1597 struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001598 prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001599 /* Create dump file. */
Jason Evans1ff09532017-01-16 11:09:24 -08001600 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
1601 return true;
1602 }
Jason Evans6109fe02010-02-10 10:37:56 -08001603
1604 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001605 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evans1ff09532017-01-16 11:09:24 -08001606 &prof_tdata_merge_iter_arg->cnt_all)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001607 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001608 }
Jason Evans6109fe02010-02-10 10:37:56 -08001609
Jason Evans602c8e02014-08-18 16:22:13 -07001610 /* Dump per gctx profile stats. */
Jason Evans1ff09532017-01-16 11:09:24 -08001611 prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
1612 prof_gctx_dump_iter_arg->propagate_err = propagate_err;
1613 if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
1614 (void *)prof_gctx_dump_iter_arg) != NULL) {
Jason Evans3a81cbd2014-08-16 12:58:55 -07001615 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001616 }
Jason Evans6109fe02010-02-10 10:37:56 -08001617
Jason Evansc7177182010-02-11 09:25:56 -08001618 /* Dump /proc/<pid>/maps if possible. */
Jason Evans1ff09532017-01-16 11:09:24 -08001619 if (prof_dump_maps(propagate_err)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001620 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001621 }
Jason Evansc7177182010-02-11 09:25:56 -08001622
Jason Evans1ff09532017-01-16 11:09:24 -08001623 if (prof_dump_close(propagate_err)) {
1624 return true;
1625 }
Jason Evans6109fe02010-02-10 10:37:56 -08001626
Jason Evans1ff09532017-01-16 11:09:24 -08001627 return false;
1628label_write_error:
1629 prof_dump_close(propagate_err);
1630 return true;
1631}
1632
1633static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001634prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
1635 bool leakcheck) {
Jason Evans1ff09532017-01-16 11:09:24 -08001636 cassert(config_prof);
David Goldblatt209f2922017-04-26 18:37:44 -07001637 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans1ff09532017-01-16 11:09:24 -08001638
Qi Wang05775a32017-04-24 18:14:57 -07001639 prof_tdata_t * tdata = prof_tdata_get(tsd, true);
Jason Evans1ff09532017-01-16 11:09:24 -08001640 if (tdata == NULL) {
1641 return true;
1642 }
1643
Qi Wang425463a2017-06-22 16:18:30 -07001644 pre_reentrancy(tsd, NULL);
Jason Evans1ff09532017-01-16 11:09:24 -08001645 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1646
Qi Wang05775a32017-04-24 18:14:57 -07001647 prof_gctx_tree_t gctxs;
1648 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1649 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1650 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
Jason Evans1ff09532017-01-16 11:09:24 -08001651 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1652 &prof_gctx_merge_iter_arg, &gctxs);
Qi Wang05775a32017-04-24 18:14:57 -07001653 bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
Jason Evans1ff09532017-01-16 11:09:24 -08001654 &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
1655 &prof_gctx_dump_iter_arg, &gctxs);
Jason Evans20c31de2014-10-02 23:01:10 -07001656 prof_gctx_finish(tsd, &gctxs);
Jason Evans1ff09532017-01-16 11:09:24 -08001657
Jason Evansc1e00ef2016-05-10 22:21:10 -07001658 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Qi Wang05775a32017-04-24 18:14:57 -07001659 post_reentrancy(tsd);
Jason Evans4f37ef62014-01-16 13:23:56 -08001660
Jason Evans1ff09532017-01-16 11:09:24 -08001661 if (err) {
1662 return true;
1663 }
1664
Jason Evansb2c0d632016-04-13 23:36:15 -07001665 if (leakcheck) {
1666 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1667 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1668 }
Jason Evans1ff09532017-01-16 11:09:24 -08001669 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001670}
1671
Jason Evans1ff09532017-01-16 11:09:24 -08001672#ifdef JEMALLOC_JET
1673void
1674prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001675 uint64_t *accumbytes) {
Jason Evans1ff09532017-01-16 11:09:24 -08001676 tsd_t *tsd;
1677 prof_tdata_t *tdata;
1678 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1679 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1680 prof_gctx_tree_t gctxs;
1681
1682 tsd = tsd_fetch();
1683 tdata = prof_tdata_get(tsd, false);
1684 if (tdata == NULL) {
1685 if (curobjs != NULL) {
1686 *curobjs = 0;
1687 }
1688 if (curbytes != NULL) {
1689 *curbytes = 0;
1690 }
1691 if (accumobjs != NULL) {
1692 *accumobjs = 0;
1693 }
1694 if (accumbytes != NULL) {
1695 *accumbytes = 0;
1696 }
1697 return;
1698 }
1699
1700 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1701 &prof_gctx_merge_iter_arg, &gctxs);
1702 prof_gctx_finish(tsd, &gctxs);
1703
1704 if (curobjs != NULL) {
1705 *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
1706 }
1707 if (curbytes != NULL) {
1708 *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
1709 }
1710 if (accumobjs != NULL) {
1711 *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
1712 }
1713 if (accumbytes != NULL) {
1714 *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
1715 }
1716}
1717#endif
1718
Jason Evansc0cc5db2017-01-19 21:41:41 -08001719#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1720#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001721static void
Jason Evansc4c25922017-01-15 16:56:30 -08001722prof_dump_filename(char *filename, char v, uint64_t vseq) {
Jason Evans7372b152012-02-10 20:22:09 -08001723 cassert(config_prof);
1724
Jason Evans4f37ef62014-01-16 13:23:56 -08001725 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001726 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1727 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001728 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001729 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001730 } else {
1731 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1732 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001733 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001734 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001735 }
Jason Evans52386b22012-04-22 16:00:11 -07001736 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001737}
1738
1739static void
Jason Evansc4c25922017-01-15 16:56:30 -08001740prof_fdump(void) {
Jason Evans5460aa62014-09-22 21:09:23 -07001741 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001742 char filename[DUMP_FILENAME_BUFSIZE];
1743
Jason Evans7372b152012-02-10 20:22:09 -08001744 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001745 assert(opt_prof_final);
1746 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001747
Jason Evansc4c25922017-01-15 16:56:30 -08001748 if (!prof_booted) {
Jason Evans6109fe02010-02-10 10:37:56 -08001749 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001750 }
Jason Evans029d44c2014-10-04 11:12:53 -07001751 tsd = tsd_fetch();
David Goldblatt209f2922017-04-26 18:37:44 -07001752 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans6109fe02010-02-10 10:37:56 -08001753
Jason Evansc1e00ef2016-05-10 22:21:10 -07001754 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001755 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001756 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001757 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001758}
1759
Jason Evansfa2d64c2017-02-12 17:03:46 -08001760bool
1761prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
1762 cassert(config_prof);
1763
1764#ifndef JEMALLOC_ATOMIC_U64
1765 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
David Goldblatt26c792e2017-05-15 15:38:15 -07001766 WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
Jason Evansfa2d64c2017-02-12 17:03:46 -08001767 return true;
1768 }
Jason Evansfa2d64c2017-02-12 17:03:46 -08001769 prof_accum->accumbytes = 0;
David Goldblatt30d74db2017-04-04 18:08:58 -07001770#else
1771 atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
1772#endif
Jason Evansfa2d64c2017-02-12 17:03:46 -08001773 return false;
1774}
1775
Jason Evans6109fe02010-02-10 10:37:56 -08001776void
Jason Evansc4c25922017-01-15 16:56:30 -08001777prof_idump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001778 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001779 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001780
Jason Evans7372b152012-02-10 20:22:09 -08001781 cassert(config_prof);
1782
Qi Wang2dccf452018-04-06 13:45:37 -07001783 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
Jason Evans6109fe02010-02-10 10:37:56 -08001784 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001785 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001786 tsd = tsdn_tsd(tsdn);
David Goldblatt209f2922017-04-26 18:37:44 -07001787 if (tsd_reentrancy_level_get(tsd) > 0) {
Qi Wang05775a32017-04-24 18:14:57 -07001788 return;
1789 }
1790
Jason Evans5460aa62014-09-22 21:09:23 -07001791 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001792 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001793 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001794 }
Jason Evans602c8e02014-08-18 16:22:13 -07001795 if (tdata->enq) {
1796 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001797 return;
1798 }
Jason Evans6109fe02010-02-10 10:37:56 -08001799
Jason Evanse7339702010-10-23 18:37:06 -07001800 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001801 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001802 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001803 prof_dump_filename(filename, 'i', prof_dump_iseq);
1804 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001805 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001806 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001807 }
Jason Evans6109fe02010-02-10 10:37:56 -08001808}
1809
Jason Evans22ca8552010-03-02 11:57:30 -08001810bool
Jason Evansc4c25922017-01-15 16:56:30 -08001811prof_mdump(tsd_t *tsd, const char *filename) {
Jason Evans7372b152012-02-10 20:22:09 -08001812 cassert(config_prof);
David Goldblatt209f2922017-04-26 18:37:44 -07001813 assert(tsd_reentrancy_level_get(tsd) == 0);
Jason Evans7372b152012-02-10 20:22:09 -08001814
Jason Evansc4c25922017-01-15 16:56:30 -08001815 if (!opt_prof || !prof_booted) {
Jason Evansf4086432017-01-19 18:15:45 -08001816 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001817 }
Qi Wang05775a32017-04-24 18:14:57 -07001818 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans22ca8552010-03-02 11:57:30 -08001819 if (filename == NULL) {
1820 /* No filename specified, so automatically generate one. */
Jason Evansc4c25922017-01-15 16:56:30 -08001821 if (opt_prof_prefix[0] == '\0') {
Jason Evansf4086432017-01-19 18:15:45 -08001822 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001823 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001824 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001825 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1826 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001827 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001828 filename = filename_buf;
1829 }
Jason Evansf4086432017-01-19 18:15:45 -08001830 return prof_dump(tsd, true, filename, false);
Jason Evans6109fe02010-02-10 10:37:56 -08001831}
1832
1833void
Jason Evansc4c25922017-01-15 16:56:30 -08001834prof_gdump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001835 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001836 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001837
Jason Evans7372b152012-02-10 20:22:09 -08001838 cassert(config_prof);
1839
Qi Wang2dccf452018-04-06 13:45:37 -07001840 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
Jason Evans6109fe02010-02-10 10:37:56 -08001841 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001842 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001843 tsd = tsdn_tsd(tsdn);
David Goldblatt209f2922017-04-26 18:37:44 -07001844 if (tsd_reentrancy_level_get(tsd) > 0) {
Qi Wang05775a32017-04-24 18:14:57 -07001845 return;
1846 }
1847
Jason Evans5460aa62014-09-22 21:09:23 -07001848 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001849 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001850 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001851 }
Jason Evans602c8e02014-08-18 16:22:13 -07001852 if (tdata->enq) {
1853 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001854 return;
1855 }
Jason Evans6109fe02010-02-10 10:37:56 -08001856
Jason Evanse7339702010-10-23 18:37:06 -07001857 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001858 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001859 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001860 prof_dump_filename(filename, 'u', prof_dump_useq);
1861 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001862 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001863 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001864 }
Jason Evans6109fe02010-02-10 10:37:56 -08001865}
1866
1867static void
Jason Evansc4c25922017-01-15 16:56:30 -08001868prof_bt_hash(const void *key, size_t r_hash[2]) {
Jason Evans6109fe02010-02-10 10:37:56 -08001869 prof_bt_t *bt = (prof_bt_t *)key;
1870
Jason Evans7372b152012-02-10 20:22:09 -08001871 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001872
Jason Evansae03bf62013-01-22 12:02:08 -08001873 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001874}
1875
1876static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001877prof_bt_keycomp(const void *k1, const void *k2) {
Jason Evans6109fe02010-02-10 10:37:56 -08001878 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1879 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1880
Jason Evans7372b152012-02-10 20:22:09 -08001881 cassert(config_prof);
1882
Jason Evansc4c25922017-01-15 16:56:30 -08001883 if (bt1->len != bt2->len) {
Jason Evansf4086432017-01-19 18:15:45 -08001884 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001885 }
Jason Evans6109fe02010-02-10 10:37:56 -08001886 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1887}
1888
David Goldblatt4d2e4bf2017-04-21 09:37:34 -07001889static uint64_t
Jason Evansc4c25922017-01-15 16:56:30 -08001890prof_thr_uid_alloc(tsdn_t *tsdn) {
Jason Evans9d8f3d22014-09-11 18:06:30 -07001891 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001892
Jason Evansc1e00ef2016-05-10 22:21:10 -07001893 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001894 thr_uid = next_thr_uid;
1895 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001896 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001897
Jason Evansf4086432017-01-19 18:15:45 -08001898 return thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001899}
1900
1901static prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001902prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansc4c25922017-01-15 16:56:30 -08001903 char *thread_name, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07001904 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001905
Jason Evans7372b152012-02-10 20:22:09 -08001906 cassert(config_prof);
1907
Jason Evans4d6a1342010-10-20 19:05:59 -07001908 /* Initialize an empty cache for this thread. */
Jason Evansb54d1602016-10-20 23:59:12 -07001909 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
David Goldblatt8261e582017-05-30 10:45:37 -07001910 sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001911 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08001912 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08001913 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001914 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001915
Jason Evans602c8e02014-08-18 16:22:13 -07001916 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1917 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001918 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001919 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001920 tdata->attached = true;
1921 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001922 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001923
Jason Evansb54d1602016-10-20 23:59:12 -07001924 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
1925 prof_bt_keycomp)) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001926 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -08001927 return NULL;
Jason Evans4d6a1342010-10-20 19:05:59 -07001928 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001929
Jason Evans602c8e02014-08-18 16:22:13 -07001930 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1931 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001932
Jason Evans602c8e02014-08-18 16:22:13 -07001933 tdata->enq = false;
1934 tdata->enq_idump = false;
1935 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001936
Jason Evans602c8e02014-08-18 16:22:13 -07001937 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001938 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001939
Jason Evansb54d1602016-10-20 23:59:12 -07001940 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001941 tdata_tree_insert(&tdatas, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001942 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001943
Jason Evansf4086432017-01-19 18:15:45 -08001944 return tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001945}
1946
1947prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001948prof_tdata_init(tsd_t *tsd) {
Jason Evansf4086432017-01-19 18:15:45 -08001949 return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
1950 NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
Jason Evans602c8e02014-08-18 16:22:13 -07001951}
1952
Jason Evans602c8e02014-08-18 16:22:13 -07001953static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001954prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
1955 if (tdata->attached && !even_if_attached) {
Jason Evansf4086432017-01-19 18:15:45 -08001956 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001957 }
1958 if (ckh_count(&tdata->bt2tctx) != 0) {
Jason Evansf4086432017-01-19 18:15:45 -08001959 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001960 }
Jason Evansf4086432017-01-19 18:15:45 -08001961 return true;
Jason Evans602c8e02014-08-18 16:22:13 -07001962}
1963
Jason Evansb2c0d632016-04-13 23:36:15 -07001964static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001965prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001966 bool even_if_attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001967 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001968
Jason Evansf4086432017-01-19 18:15:45 -08001969 return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
Jason Evansb2c0d632016-04-13 23:36:15 -07001970}
1971
Jason Evans602c8e02014-08-18 16:22:13 -07001972static void
Jason Evansb54d1602016-10-20 23:59:12 -07001973prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001974 bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001975 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001976
Jason Evans602c8e02014-08-18 16:22:13 -07001977 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001978
Jason Evansc1e00ef2016-05-10 22:21:10 -07001979 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001980
Jason Evansdb722722016-03-23 20:29:33 -07001981 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07001982 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
1983 true);
Jason Evansdb722722016-03-23 20:29:33 -07001984 }
Jason Evansb54d1602016-10-20 23:59:12 -07001985 ckh_delete(tsd, &tdata->bt2tctx);
Qi Wangbfa530b2017-04-07 14:12:30 -07001986 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001987}
1988
1989static void
Jason Evansc4c25922017-01-15 16:56:30 -08001990prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001991 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1992 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1993 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001994}
1995
1996static void
Jason Evansc4c25922017-01-15 16:56:30 -08001997prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans602c8e02014-08-18 16:22:13 -07001998 bool destroy_tdata;
1999
Jason Evansc1e00ef2016-05-10 22:21:10 -07002000 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002001 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002002 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
2003 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07002004 /*
2005 * Only detach if !destroy_tdata, because detaching would allow
2006 * another thread to win the race to destroy tdata.
2007 */
Jason Evansc4c25922017-01-15 16:56:30 -08002008 if (!destroy_tdata) {
Jason Evansf04a0be2014-10-04 15:03:49 -07002009 tdata->attached = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002010 }
Jason Evans029d44c2014-10-04 11:12:53 -07002011 tsd_prof_tdata_set(tsd, NULL);
Jason Evansc4c25922017-01-15 16:56:30 -08002012 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07002013 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002014 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002015 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08002016 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -07002017 prof_tdata_destroy(tsd, tdata, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002018 }
Jason Evans602c8e02014-08-18 16:22:13 -07002019}
2020
Jason Evans20c31de2014-10-02 23:01:10 -07002021prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002022prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002023 uint64_t thr_uid = tdata->thr_uid;
2024 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002025 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07002026 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002027 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002028
Jason Evans20c31de2014-10-02 23:01:10 -07002029 prof_tdata_detach(tsd, tdata);
Jason Evansf4086432017-01-19 18:15:45 -08002030 return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
2031 active);
Jason Evans602c8e02014-08-18 16:22:13 -07002032}
2033
Jason Evans20c31de2014-10-02 23:01:10 -07002034static bool
Jason Evansc4c25922017-01-15 16:56:30 -08002035prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002036 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002037
Jason Evansc1e00ef2016-05-10 22:21:10 -07002038 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002039 if (!tdata->expired) {
2040 tdata->expired = true;
2041 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07002042 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002043 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002044 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002045 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002046 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002047
Jason Evansf4086432017-01-19 18:15:45 -08002048 return destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002049}
2050
2051static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002052prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
2053 void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002054 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07002055
Jason Evansc1e00ef2016-05-10 22:21:10 -07002056 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002057}
2058
2059void
Jason Evansc4c25922017-01-15 16:56:30 -08002060prof_reset(tsd_t *tsd, size_t lg_sample) {
Jason Evans20c31de2014-10-02 23:01:10 -07002061 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07002062
2063 assert(lg_sample < (sizeof(uint64_t) << 3));
2064
Jason Evansb54d1602016-10-20 23:59:12 -07002065 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
2066 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002067
2068 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07002069
2070 next = NULL;
2071 do {
2072 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansb54d1602016-10-20 23:59:12 -07002073 prof_tdata_reset_iter, (void *)tsd);
Jason Evans20c31de2014-10-02 23:01:10 -07002074 if (to_destroy != NULL) {
2075 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansb54d1602016-10-20 23:59:12 -07002076 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002077 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002078 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002079 }
Jason Evans20c31de2014-10-02 23:01:10 -07002080 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002081
Jason Evansb54d1602016-10-20 23:59:12 -07002082 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2083 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07002084}
2085
Jason Evanscd9a1342012-03-21 18:33:03 -07002086void
Jason Evansc4c25922017-01-15 16:56:30 -08002087prof_tdata_cleanup(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002088 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07002089
Jason Evansc4c25922017-01-15 16:56:30 -08002090 if (!config_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002091 return;
Jason Evansc4c25922017-01-15 16:56:30 -08002092 }
Jason Evans7372b152012-02-10 20:22:09 -08002093
Jason Evans5460aa62014-09-22 21:09:23 -07002094 tdata = tsd_prof_tdata_get(tsd);
Jason Evansc4c25922017-01-15 16:56:30 -08002095 if (tdata != NULL) {
Jason Evans5460aa62014-09-22 21:09:23 -07002096 prof_tdata_detach(tsd, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08002097 }
Jason Evans6109fe02010-02-10 10:37:56 -08002098}
2099
Jason Evansfc12c0b2014-10-03 23:25:30 -07002100bool
Jason Evansc4c25922017-01-15 16:56:30 -08002101prof_active_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002102 bool prof_active_current;
2103
Jason Evansc1e00ef2016-05-10 22:21:10 -07002104 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002105 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002106 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002107 return prof_active_current;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002108}
2109
2110bool
Jason Evansc4c25922017-01-15 16:56:30 -08002111prof_active_set(tsdn_t *tsdn, bool active) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002112 bool prof_active_old;
2113
Jason Evansc1e00ef2016-05-10 22:21:10 -07002114 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002115 prof_active_old = prof_active;
2116 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002117 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002118 return prof_active_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002119}
2120
Jason Evans602c8e02014-08-18 16:22:13 -07002121const char *
Jason Evansc4c25922017-01-15 16:56:30 -08002122prof_thread_name_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002123 prof_tdata_t *tdata;
2124
Jason Evans5460aa62014-09-22 21:09:23 -07002125 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002126 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002127 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002128 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002129 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002130}
2131
Jason Evansfc12c0b2014-10-03 23:25:30 -07002132static char *
Jason Evansc4c25922017-01-15 16:56:30 -08002133prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002134 char *ret;
2135 size_t size;
2136
Jason Evansc4c25922017-01-15 16:56:30 -08002137 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002138 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002139 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002140
2141 size = strlen(thread_name) + 1;
Jason Evansc4c25922017-01-15 16:56:30 -08002142 if (size == 1) {
Jason Evansf4086432017-01-19 18:15:45 -08002143 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002144 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002145
David Goldblatt8261e582017-05-30 10:45:37 -07002146 ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
Jason Evansc1e00ef2016-05-10 22:21:10 -07002147 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08002148 if (ret == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002149 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002150 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002151 memcpy(ret, thread_name, size);
Jason Evansf4086432017-01-19 18:15:45 -08002152 return ret;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002153}
2154
2155int
Jason Evansc4c25922017-01-15 16:56:30 -08002156prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
Jason Evans602c8e02014-08-18 16:22:13 -07002157 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002158 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002159 char *s;
2160
Jason Evans5460aa62014-09-22 21:09:23 -07002161 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002162 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002163 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002164 }
Jason Evans602c8e02014-08-18 16:22:13 -07002165
Jason Evansfc12c0b2014-10-03 23:25:30 -07002166 /* Validate input. */
Jason Evansc4c25922017-01-15 16:56:30 -08002167 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002168 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002169 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002170 for (i = 0; thread_name[i] != '\0'; i++) {
2171 char c = thread_name[i];
Jason Evansc4c25922017-01-15 16:56:30 -08002172 if (!isgraph(c) && !isblank(c)) {
Jason Evansf4086432017-01-19 18:15:45 -08002173 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002174 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002175 }
2176
Jason Evansc1e00ef2016-05-10 22:21:10 -07002177 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evansc4c25922017-01-15 16:56:30 -08002178 if (s == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002179 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002180 }
Jason Evans602c8e02014-08-18 16:22:13 -07002181
Jason Evansfc12c0b2014-10-03 23:25:30 -07002182 if (tdata->thread_name != NULL) {
Qi Wangbfa530b2017-04-07 14:12:30 -07002183 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
2184 true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002185 tdata->thread_name = NULL;
2186 }
Jason Evansc4c25922017-01-15 16:56:30 -08002187 if (strlen(s) > 0) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002188 tdata->thread_name = s;
Jason Evansc4c25922017-01-15 16:56:30 -08002189 }
Jason Evansf4086432017-01-19 18:15:45 -08002190 return 0;
Jason Evans602c8e02014-08-18 16:22:13 -07002191}
2192
2193bool
Jason Evansc4c25922017-01-15 16:56:30 -08002194prof_thread_active_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002195 prof_tdata_t *tdata;
2196
Jason Evans5460aa62014-09-22 21:09:23 -07002197 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002198 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002199 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08002200 }
Jason Evansf4086432017-01-19 18:15:45 -08002201 return tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002202}
2203
2204bool
Jason Evansc4c25922017-01-15 16:56:30 -08002205prof_thread_active_set(tsd_t *tsd, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07002206 prof_tdata_t *tdata;
2207
Jason Evans5460aa62014-09-22 21:09:23 -07002208 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002209 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002210 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002211 }
Jason Evans602c8e02014-08-18 16:22:13 -07002212 tdata->active = active;
Jason Evansf4086432017-01-19 18:15:45 -08002213 return false;
Jason Evans602c8e02014-08-18 16:22:13 -07002214}
2215
Jason Evansfc12c0b2014-10-03 23:25:30 -07002216bool
Jason Evansc4c25922017-01-15 16:56:30 -08002217prof_thread_active_init_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002218 bool active_init;
2219
Jason Evansc1e00ef2016-05-10 22:21:10 -07002220 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002221 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002222 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002223 return active_init;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002224}
2225
2226bool
Jason Evansc4c25922017-01-15 16:56:30 -08002227prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002228 bool active_init_old;
2229
Jason Evansc1e00ef2016-05-10 22:21:10 -07002230 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002231 active_init_old = prof_thread_active_init;
2232 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002233 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002234 return active_init_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002235}
2236
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002237bool
Jason Evansc4c25922017-01-15 16:56:30 -08002238prof_gdump_get(tsdn_t *tsdn) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002239 bool prof_gdump_current;
2240
Jason Evansc1e00ef2016-05-10 22:21:10 -07002241 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002242 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002243 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002244 return prof_gdump_current;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002245}
2246
2247bool
Jason Evansc4c25922017-01-15 16:56:30 -08002248prof_gdump_set(tsdn_t *tsdn, bool gdump) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002249 bool prof_gdump_old;
2250
Jason Evansc1e00ef2016-05-10 22:21:10 -07002251 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002252 prof_gdump_old = prof_gdump_val;
2253 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002254 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002255 return prof_gdump_old;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002256}
2257
Jason Evans6109fe02010-02-10 10:37:56 -08002258void
Jason Evansc4c25922017-01-15 16:56:30 -08002259prof_boot0(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002260 cassert(config_prof);
2261
Jason Evanse7339702010-10-23 18:37:06 -07002262 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2263 sizeof(PROF_PREFIX_DEFAULT));
2264}
2265
2266void
Jason Evansc4c25922017-01-15 16:56:30 -08002267prof_boot1(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002268 cassert(config_prof);
2269
Jason Evans6109fe02010-02-10 10:37:56 -08002270 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002271 * opt_prof must be in its final state before any arenas are
2272 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002273 */
2274
Jason Evans551ebc42014-10-03 10:16:09 -07002275 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002276 /*
2277 * Enable opt_prof, but in such a way that profiles are never
2278 * automatically dumped.
2279 */
2280 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002281 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002282 } else if (opt_prof) {
2283 if (opt_lg_prof_interval >= 0) {
2284 prof_interval = (((uint64_t)1U) <<
2285 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002286 }
Jason Evansa02fc082010-03-31 17:35:51 -07002287 }
Jason Evans6109fe02010-02-10 10:37:56 -08002288}
2289
2290bool
Jason Evansc4c25922017-01-15 16:56:30 -08002291prof_boot2(tsd_t *tsd) {
Jason Evans7372b152012-02-10 20:22:09 -08002292 cassert(config_prof);
2293
Jason Evans6109fe02010-02-10 10:37:56 -08002294 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002295 unsigned i;
2296
Jason Evans602c8e02014-08-18 16:22:13 -07002297 lg_prof_sample = opt_lg_prof_sample;
2298
Jason Evansfc12c0b2014-10-03 23:25:30 -07002299 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002300 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
David Goldblatt26c792e2017-05-15 15:38:15 -07002301 WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002302 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002303 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002304
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002305 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002306 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
David Goldblatt26c792e2017-05-15 15:38:15 -07002307 WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002308 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002309 }
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002310
Jason Evansfc12c0b2014-10-03 23:25:30 -07002311 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002312 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2313 "prof_thread_active_init",
David Goldblatt26c792e2017-05-15 15:38:15 -07002314 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
2315 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002316 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002317 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002318
Jason Evansb54d1602016-10-20 23:59:12 -07002319 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evansc4c25922017-01-15 16:56:30 -08002320 prof_bt_keycomp)) {
Jason Evansf4086432017-01-19 18:15:45 -08002321 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002322 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002323 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
David Goldblatt26c792e2017-05-15 15:38:15 -07002324 WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002325 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002326 }
Jason Evans6109fe02010-02-10 10:37:56 -08002327
Jason Evans602c8e02014-08-18 16:22:13 -07002328 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002329 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
David Goldblatt26c792e2017-05-15 15:38:15 -07002330 WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002331 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002332 }
Jason Evans602c8e02014-08-18 16:22:13 -07002333
2334 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002335 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
David Goldblatt26c792e2017-05-15 15:38:15 -07002336 WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002337 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002338 }
Jason Evans602c8e02014-08-18 16:22:13 -07002339
Jason Evansb2c0d632016-04-13 23:36:15 -07002340 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
David Goldblatt26c792e2017-05-15 15:38:15 -07002341 WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002342 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002343 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002344 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
David Goldblatt26c792e2017-05-15 15:38:15 -07002345 WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002346 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002347 }
Jason Evans6109fe02010-02-10 10:37:56 -08002348
Jason Evans57efa7b2014-10-08 17:57:19 -07002349 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2350 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002351 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansc4c25922017-01-15 16:56:30 -08002352 if (opt_abort) {
Jason Evans6109fe02010-02-10 10:37:56 -08002353 abort();
Jason Evansc4c25922017-01-15 16:56:30 -08002354 }
Jason Evans6109fe02010-02-10 10:37:56 -08002355 }
Jason Evans6da54182012-03-23 18:05:51 -07002356
Jason Evansb54d1602016-10-20 23:59:12 -07002357 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002358 b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
2359 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002360 if (gctx_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002361 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002362 }
Jason Evans6da54182012-03-23 18:05:51 -07002363 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002364 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
David Goldblatt26c792e2017-05-15 15:38:15 -07002365 WITNESS_RANK_PROF_GCTX,
2366 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002367 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002368 }
Jason Evans602c8e02014-08-18 16:22:13 -07002369 }
2370
Jason Evansb54d1602016-10-20 23:59:12 -07002371 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002372 b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
2373 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002374 if (tdata_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002375 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002376 }
Jason Evans602c8e02014-08-18 16:22:13 -07002377 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002378 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
David Goldblatt26c792e2017-05-15 15:38:15 -07002379 WITNESS_RANK_PROF_TDATA,
2380 malloc_mutex_rank_exclusive)) {
Jason Evansf4086432017-01-19 18:15:45 -08002381 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002382 }
Jason Evans6da54182012-03-23 18:05:51 -07002383 }
Jason Evans6109fe02010-02-10 10:37:56 -08002384 }
2385
Jason Evansb27805b2010-02-10 18:15:53 -08002386#ifdef JEMALLOC_PROF_LIBGCC
2387 /*
2388 * Cause the backtracing machinery to allocate its internal state
2389 * before enabling profiling.
2390 */
2391 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2392#endif
2393
Jason Evans6109fe02010-02-10 10:37:56 -08002394 prof_booted = true;
2395
Jason Evansf4086432017-01-19 18:15:45 -08002396 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08002397}
2398
Jason Evans20f1fc92012-10-09 14:46:22 -07002399void
Jason Evansc4c25922017-01-15 16:56:30 -08002400prof_prefork0(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002401 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002402 unsigned i;
2403
Jason Evansc1e00ef2016-05-10 22:21:10 -07002404 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2405 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2406 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002407 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002408 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002409 }
2410 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002411 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002412 }
Jason Evans174c0c32016-04-25 23:14:40 -07002413 }
2414}
2415
2416void
Jason Evansc4c25922017-01-15 16:56:30 -08002417prof_prefork1(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002418 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002419 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2420 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2421 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2422 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2423 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002424 }
2425}
2426
2427void
Jason Evansc4c25922017-01-15 16:56:30 -08002428prof_postfork_parent(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002429 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002430 unsigned i;
2431
Jason Evansc1e00ef2016-05-10 22:21:10 -07002432 malloc_mutex_postfork_parent(tsdn,
2433 &prof_thread_active_init_mtx);
2434 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2435 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2436 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2437 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002438 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002439 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002440 }
2441 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002442 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002443 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002444 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2445 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2446 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002447 }
2448}
2449
2450void
Jason Evansc4c25922017-01-15 16:56:30 -08002451prof_postfork_child(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002452 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002453 unsigned i;
2454
Jason Evansc1e00ef2016-05-10 22:21:10 -07002455 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2456 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2457 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2458 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2459 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002460 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002461 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002462 }
2463 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002464 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002465 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002466 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2467 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2468 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002469 }
2470}
2471
Jason Evans6109fe02010-02-10 10:37:56 -08002472/******************************************************************************/