blob: a0290b8fd7d1dc11765ee859bca9c25bc0b715c6 [file] [log] [blame]
Jason Evansc0cc5db2017-01-19 21:41:41 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evansc0cc5db2017-01-19 21:41:41 -08006#define UNW_LOCAL_ONLY
Jason Evans6109fe02010-02-10 10:37:56 -08007#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
17bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070018bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070019bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080020size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070021ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070022bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070023bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080024bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070025bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080026char opt_prof_prefix[
27 /* Minimize memory bloat for non-prof builds. */
28#ifdef JEMALLOC_PROF
29 PATH_MAX +
30#endif
Jason Evanseefdd022014-01-16 18:04:30 -080031 1];
Jason Evans6109fe02010-02-10 10:37:56 -080032
Jason Evansfc12c0b2014-10-03 23:25:30 -070033/*
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
36 */
37bool prof_active;
38static malloc_mutex_t prof_active_mtx;
39
40/*
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
43 */
44static bool prof_thread_active_init;
45static malloc_mutex_t prof_thread_active_init_mtx;
46
Jason Evans5b8ed5b2015-01-25 21:16:57 -080047/*
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
50 */
51bool prof_gdump_val;
52static malloc_mutex_t prof_gdump_mtx;
53
Jason Evansa3b33862012-11-13 12:56:27 -080054uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080055
Jason Evans602c8e02014-08-18 16:22:13 -070056size_t lg_prof_sample;
57
Jason Evans6109fe02010-02-10 10:37:56 -080058/*
Jason Evans602c8e02014-08-18 16:22:13 -070059 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070062 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
64 */
Jason Evans602c8e02014-08-18 16:22:13 -070065static malloc_mutex_t *gctx_locks;
David Goldblatt074f2252017-04-04 18:36:45 -070066static atomic_u_t cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070067
68/*
Jason Evans602c8e02014-08-18 16:22:13 -070069 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
73 */
74static malloc_mutex_t *tdata_locks;
75
76/*
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070078 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080079 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static ckh_t bt2gctx;
Qi Wangca9074d2017-03-11 20:28:31 -080081/* Non static to enable profiling. */
82malloc_mutex_t bt2gctx_mtx;
Jason Evans602c8e02014-08-18 16:22:13 -070083
84/*
85 * Tree of all extant prof_tdata_t structures, regardless of state,
86 * {attached,detached,expired}.
87 */
88static prof_tdata_tree_t tdatas;
89static malloc_mutex_t tdatas_mtx;
90
91static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -070092static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -080093
Jason Evans6109fe02010-02-10 10:37:56 -080094static malloc_mutex_t prof_dump_seq_mtx;
95static uint64_t prof_dump_seq;
96static uint64_t prof_dump_iseq;
97static uint64_t prof_dump_mseq;
98static uint64_t prof_dump_useq;
99
100/*
101 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800102 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800103 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800104static malloc_mutex_t prof_dump_mtx;
105static char prof_dump_buf[
106 /* Minimize memory bloat for non-prof builds. */
107#ifdef JEMALLOC_PROF
108 PROF_DUMP_BUFSIZE
109#else
110 1
111#endif
112];
Jason Evans42ce80e2016-02-25 20:51:00 -0800113static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800114static int prof_dump_fd;
115
116/* Do not dump any profiles until bootstrapping is complete. */
117static bool prof_booted = false;
118
Jason Evans6109fe02010-02-10 10:37:56 -0800119/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700120/*
121 * Function prototypes for static functions that are referenced prior to
122 * definition.
123 */
124
Jason Evansc1e00ef2016-05-10 22:21:10 -0700125static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700126static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700127static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700128 bool even_if_attached);
Jason Evansb54d1602016-10-20 23:59:12 -0700129static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700130 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700131static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700132
133/******************************************************************************/
134/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800135
Jason Evans3a81cbd2014-08-16 12:58:55 -0700136JEMALLOC_INLINE_C int
Jason Evansc4c25922017-01-15 16:56:30 -0800137prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
Jason Evans04211e22015-03-16 15:11:06 -0700138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700141 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
145 b_thr_discrim);
146 if (ret == 0) {
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
150 b_tctx_uid);
151 }
Jason Evansd69964b2015-03-12 16:25:18 -0700152 }
Jason Evansf4086432017-01-19 18:15:45 -0800153 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700154}
155
Jason Evans602c8e02014-08-18 16:22:13 -0700156rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700158
159JEMALLOC_INLINE_C int
Jason Evansc4c25922017-01-15 16:56:30 -0800160prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700161 unsigned a_len = a->bt.len;
162 unsigned b_len = b->bt.len;
163 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
164 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
Jason Evansc4c25922017-01-15 16:56:30 -0800165 if (ret == 0) {
Jason Evans3a81cbd2014-08-16 12:58:55 -0700166 ret = (a_len > b_len) - (a_len < b_len);
Jason Evansc4c25922017-01-15 16:56:30 -0800167 }
Jason Evansf4086432017-01-19 18:15:45 -0800168 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -0700169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
172 prof_gctx_comp)
173
174JEMALLOC_INLINE_C int
Jason Evansc4c25922017-01-15 16:56:30 -0800175prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
Jason Evans20c31de2014-10-02 23:01:10 -0700176 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700177 uint64_t a_uid = a->thr_uid;
178 uint64_t b_uid = b->thr_uid;
179
Jason Evans20c31de2014-10-02 23:01:10 -0700180 ret = ((a_uid > b_uid) - (a_uid < b_uid));
181 if (ret == 0) {
182 uint64_t a_discrim = a->thr_discrim;
183 uint64_t b_discrim = b->thr_discrim;
184
185 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
186 }
Jason Evansf4086432017-01-19 18:15:45 -0800187 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700188}
189
190rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
191 prof_tdata_comp)
192
193/******************************************************************************/
194
195void
Jason Evansc4c25922017-01-15 16:56:30 -0800196prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
Jason Evans6e73dc12014-09-09 19:37:26 -0700197 prof_tdata_t *tdata;
198
199 cassert(config_prof);
200
201 if (updated) {
202 /*
203 * Compute a new sample threshold. This isn't very important in
204 * practice, because this function is rarely executed, so the
205 * potential for sample bias is minimal except in contrived
206 * programs.
207 */
Jason Evans5460aa62014-09-22 21:09:23 -0700208 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800209 if (tdata != NULL) {
Jason Evans3ca0cf62015-09-17 14:47:39 -0700210 prof_sample_threshold_update(tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800211 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700212 }
213
214 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700215 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700216 tctx->prepared = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800217 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700218 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800219 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700220 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800221 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700222 }
223}
224
225void
Jason Evans5e67fbc2017-03-20 11:00:07 -0700226prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
227 prof_tctx_t *tctx) {
228 prof_tctx_set(tsdn, ptr, usize, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700229
Jason Evansc1e00ef2016-05-10 22:21:10 -0700230 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700231 tctx->cnts.curobjs++;
232 tctx->cnts.curbytes += usize;
233 if (opt_prof_accum) {
234 tctx->cnts.accumobjs++;
235 tctx->cnts.accumbytes += usize;
236 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700237 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700238 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700239}
240
241void
Jason Evansc4c25922017-01-15 16:56:30 -0800242prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700243 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700244 assert(tctx->cnts.curobjs > 0);
245 assert(tctx->cnts.curbytes >= usize);
246 tctx->cnts.curobjs--;
247 tctx->cnts.curbytes -= usize;
248
Jason Evansc4c25922017-01-15 16:56:30 -0800249 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700250 prof_tctx_destroy(tsd, tctx);
Jason Evansc4c25922017-01-15 16:56:30 -0800251 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700252 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -0800253 }
Jason Evans602c8e02014-08-18 16:22:13 -0700254}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700255
Jason Evans4d6a1342010-10-20 19:05:59 -0700256void
Jason Evansc4c25922017-01-15 16:56:30 -0800257bt_init(prof_bt_t *bt, void **vec) {
Jason Evans7372b152012-02-10 20:22:09 -0800258 cassert(config_prof);
259
Jason Evans6109fe02010-02-10 10:37:56 -0800260 bt->vec = vec;
261 bt->len = 0;
262}
263
Jason Evansaf1f5922014-10-30 16:38:08 -0700264JEMALLOC_INLINE_C void
Jason Evansc4c25922017-01-15 16:56:30 -0800265prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800266 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700267 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800268
Jason Evans82cb6032014-11-01 00:20:28 -0700269 if (tdata != NULL) {
270 assert(!tdata->enq);
271 tdata->enq = true;
272 }
Jason Evans6109fe02010-02-10 10:37:56 -0800273
Jason Evansc1e00ef2016-05-10 22:21:10 -0700274 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800275}
276
Jason Evansaf1f5922014-10-30 16:38:08 -0700277JEMALLOC_INLINE_C void
Jason Evansc4c25922017-01-15 16:56:30 -0800278prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans7372b152012-02-10 20:22:09 -0800279 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700280 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800281
Jason Evansc1e00ef2016-05-10 22:21:10 -0700282 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800283
Jason Evans82cb6032014-11-01 00:20:28 -0700284 if (tdata != NULL) {
285 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800286
Jason Evans82cb6032014-11-01 00:20:28 -0700287 assert(tdata->enq);
288 tdata->enq = false;
289 idump = tdata->enq_idump;
290 tdata->enq_idump = false;
291 gdump = tdata->enq_gdump;
292 tdata->enq_gdump = false;
293
Jason Evansc4c25922017-01-15 16:56:30 -0800294 if (idump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700295 prof_idump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800296 }
297 if (gdump) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700298 prof_gdump(tsd_tsdn(tsd));
Jason Evansc4c25922017-01-15 16:56:30 -0800299 }
Jason Evans82cb6032014-11-01 00:20:28 -0700300 }
Jason Evans6109fe02010-02-10 10:37:56 -0800301}
302
Jason Evans77f350b2011-03-15 22:23:12 -0700303#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700304void
Jason Evansc4c25922017-01-15 16:56:30 -0800305prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700306 int nframes;
307
Jason Evans7372b152012-02-10 20:22:09 -0800308 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800309 assert(bt->len == 0);
310 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800311
Jason Evans6f001052014-04-22 18:41:15 -0700312 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
Jason Evansc4c25922017-01-15 16:56:30 -0800313 if (nframes <= 0) {
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700314 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800315 }
Jason Evans6f001052014-04-22 18:41:15 -0700316 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800317}
Jason Evans7372b152012-02-10 20:22:09 -0800318#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700319static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800320prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans7372b152012-02-10 20:22:09 -0800321 cassert(config_prof);
322
Jason Evansf4086432017-01-19 18:15:45 -0800323 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700324}
325
326static _Unwind_Reason_Code
Jason Evansc4c25922017-01-15 16:56:30 -0800327prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
Jason Evans77f350b2011-03-15 22:23:12 -0700328 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700329 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700330
Jason Evans7372b152012-02-10 20:22:09 -0800331 cassert(config_prof);
332
Jason Evans6f001052014-04-22 18:41:15 -0700333 ip = (void *)_Unwind_GetIP(context);
Jason Evansc4c25922017-01-15 16:56:30 -0800334 if (ip == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800335 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800336 }
Jason Evans6f001052014-04-22 18:41:15 -0700337 data->bt->vec[data->bt->len] = ip;
338 data->bt->len++;
Jason Evansc4c25922017-01-15 16:56:30 -0800339 if (data->bt->len == data->max) {
Jason Evansf4086432017-01-19 18:15:45 -0800340 return _URC_END_OF_STACK;
Jason Evansc4c25922017-01-15 16:56:30 -0800341 }
Jason Evans77f350b2011-03-15 22:23:12 -0700342
Jason Evansf4086432017-01-19 18:15:45 -0800343 return _URC_NO_REASON;
Jason Evans77f350b2011-03-15 22:23:12 -0700344}
345
346void
Jason Evansc4c25922017-01-15 16:56:30 -0800347prof_backtrace(prof_bt_t *bt) {
Jason Evans6f001052014-04-22 18:41:15 -0700348 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700349
Jason Evans7372b152012-02-10 20:22:09 -0800350 cassert(config_prof);
351
Jason Evans77f350b2011-03-15 22:23:12 -0700352 _Unwind_Backtrace(prof_unwind_callback, &data);
353}
Jason Evans7372b152012-02-10 20:22:09 -0800354#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700355void
Jason Evansc4c25922017-01-15 16:56:30 -0800356prof_backtrace(prof_bt_t *bt) {
Jason Evansc0cc5db2017-01-19 21:41:41 -0800357#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700358 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800359 void *p; \
Jason Evansc4c25922017-01-15 16:56:30 -0800360 if (__builtin_frame_address(i) == 0) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800361 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800362 } \
Jason Evans6109fe02010-02-10 10:37:56 -0800363 p = __builtin_return_address(i); \
Jason Evansc4c25922017-01-15 16:56:30 -0800364 if (p == NULL) { \
Jason Evansb27805b2010-02-10 18:15:53 -0800365 return; \
Jason Evansc4c25922017-01-15 16:56:30 -0800366 } \
Jason Evans6f001052014-04-22 18:41:15 -0700367 bt->vec[(i)] = p; \
368 bt->len = (i) + 1; \
Jason Evansc4c25922017-01-15 16:56:30 -0800369 } else { \
370 return; \
371 }
Jason Evans6109fe02010-02-10 10:37:56 -0800372
Jason Evans7372b152012-02-10 20:22:09 -0800373 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800374
Jason Evans6109fe02010-02-10 10:37:56 -0800375 BT_FRAME(0)
376 BT_FRAME(1)
377 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800378 BT_FRAME(3)
379 BT_FRAME(4)
380 BT_FRAME(5)
381 BT_FRAME(6)
382 BT_FRAME(7)
383 BT_FRAME(8)
384 BT_FRAME(9)
385
386 BT_FRAME(10)
387 BT_FRAME(11)
388 BT_FRAME(12)
389 BT_FRAME(13)
390 BT_FRAME(14)
391 BT_FRAME(15)
392 BT_FRAME(16)
393 BT_FRAME(17)
394 BT_FRAME(18)
395 BT_FRAME(19)
396
397 BT_FRAME(20)
398 BT_FRAME(21)
399 BT_FRAME(22)
400 BT_FRAME(23)
401 BT_FRAME(24)
402 BT_FRAME(25)
403 BT_FRAME(26)
404 BT_FRAME(27)
405 BT_FRAME(28)
406 BT_FRAME(29)
407
408 BT_FRAME(30)
409 BT_FRAME(31)
410 BT_FRAME(32)
411 BT_FRAME(33)
412 BT_FRAME(34)
413 BT_FRAME(35)
414 BT_FRAME(36)
415 BT_FRAME(37)
416 BT_FRAME(38)
417 BT_FRAME(39)
418
419 BT_FRAME(40)
420 BT_FRAME(41)
421 BT_FRAME(42)
422 BT_FRAME(43)
423 BT_FRAME(44)
424 BT_FRAME(45)
425 BT_FRAME(46)
426 BT_FRAME(47)
427 BT_FRAME(48)
428 BT_FRAME(49)
429
430 BT_FRAME(50)
431 BT_FRAME(51)
432 BT_FRAME(52)
433 BT_FRAME(53)
434 BT_FRAME(54)
435 BT_FRAME(55)
436 BT_FRAME(56)
437 BT_FRAME(57)
438 BT_FRAME(58)
439 BT_FRAME(59)
440
441 BT_FRAME(60)
442 BT_FRAME(61)
443 BT_FRAME(62)
444 BT_FRAME(63)
445 BT_FRAME(64)
446 BT_FRAME(65)
447 BT_FRAME(66)
448 BT_FRAME(67)
449 BT_FRAME(68)
450 BT_FRAME(69)
451
452 BT_FRAME(70)
453 BT_FRAME(71)
454 BT_FRAME(72)
455 BT_FRAME(73)
456 BT_FRAME(74)
457 BT_FRAME(75)
458 BT_FRAME(76)
459 BT_FRAME(77)
460 BT_FRAME(78)
461 BT_FRAME(79)
462
463 BT_FRAME(80)
464 BT_FRAME(81)
465 BT_FRAME(82)
466 BT_FRAME(83)
467 BT_FRAME(84)
468 BT_FRAME(85)
469 BT_FRAME(86)
470 BT_FRAME(87)
471 BT_FRAME(88)
472 BT_FRAME(89)
473
474 BT_FRAME(90)
475 BT_FRAME(91)
476 BT_FRAME(92)
477 BT_FRAME(93)
478 BT_FRAME(94)
479 BT_FRAME(95)
480 BT_FRAME(96)
481 BT_FRAME(97)
482 BT_FRAME(98)
483 BT_FRAME(99)
484
485 BT_FRAME(100)
486 BT_FRAME(101)
487 BT_FRAME(102)
488 BT_FRAME(103)
489 BT_FRAME(104)
490 BT_FRAME(105)
491 BT_FRAME(106)
492 BT_FRAME(107)
493 BT_FRAME(108)
494 BT_FRAME(109)
495
496 BT_FRAME(110)
497 BT_FRAME(111)
498 BT_FRAME(112)
499 BT_FRAME(113)
500 BT_FRAME(114)
501 BT_FRAME(115)
502 BT_FRAME(116)
503 BT_FRAME(117)
504 BT_FRAME(118)
505 BT_FRAME(119)
506
507 BT_FRAME(120)
508 BT_FRAME(121)
509 BT_FRAME(122)
510 BT_FRAME(123)
511 BT_FRAME(124)
512 BT_FRAME(125)
513 BT_FRAME(126)
514 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800515#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800516}
Jason Evans7372b152012-02-10 20:22:09 -0800517#else
518void
Jason Evansc4c25922017-01-15 16:56:30 -0800519prof_backtrace(prof_bt_t *bt) {
Jason Evans7372b152012-02-10 20:22:09 -0800520 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700521 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800522}
Jason Evans6109fe02010-02-10 10:37:56 -0800523#endif
524
Jason Evans4f37ef62014-01-16 13:23:56 -0800525static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800526prof_gctx_mutex_choose(void) {
David Goldblatt074f2252017-04-04 18:36:45 -0700527 unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
Jason Evans4f37ef62014-01-16 13:23:56 -0800528
Jason Evansf4086432017-01-19 18:15:45 -0800529 return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
Jason Evans4f37ef62014-01-16 13:23:56 -0800530}
531
Jason Evans602c8e02014-08-18 16:22:13 -0700532static malloc_mutex_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800533prof_tdata_mutex_choose(uint64_t thr_uid) {
Jason Evansf4086432017-01-19 18:15:45 -0800534 return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
Jason Evans602c8e02014-08-18 16:22:13 -0700535}
536
537static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800538prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
Jason Evansab532e92014-08-15 15:05:12 -0700539 /*
540 * Create a single allocation that has space for vec of length bt->len.
541 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700542 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700543 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
544 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700545 true);
Jason Evansc4c25922017-01-15 16:56:30 -0800546 if (gctx == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800547 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800548 }
Jason Evans602c8e02014-08-18 16:22:13 -0700549 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800550 /*
551 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700552 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800553 */
Jason Evans602c8e02014-08-18 16:22:13 -0700554 gctx->nlimbo = 1;
555 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700556 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700557 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
558 gctx->bt.vec = gctx->vec;
559 gctx->bt.len = bt->len;
Jason Evansf4086432017-01-19 18:15:45 -0800560 return gctx;
Jason Evans4f37ef62014-01-16 13:23:56 -0800561}
562
563static void
Jason Evansc93ed812014-10-30 16:50:33 -0700564prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800565 prof_tdata_t *tdata) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800566 cassert(config_prof);
567
568 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700569 * Check that gctx is still unused by any thread cache before destroying
570 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
571 * condition with this function, as does prof_tctx_destroy() in order to
572 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800573 * into this function.
574 */
Jason Evansc93ed812014-10-30 16:50:33 -0700575 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700576 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700577 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700578 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
579 /* Remove gctx from bt2gctx. */
Jason Evansc4c25922017-01-15 16:56:30 -0800580 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800581 not_reached();
Jason Evansc4c25922017-01-15 16:56:30 -0800582 }
Jason Evansc93ed812014-10-30 16:50:33 -0700583 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700584 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700585 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans51a2ec92017-03-17 02:45:12 -0700586 idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800587 } else {
588 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700589 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800590 * prof_lookup().
591 */
Jason Evans602c8e02014-08-18 16:22:13 -0700592 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700593 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700594 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800595 }
596}
597
Jason Evans602c8e02014-08-18 16:22:13 -0700598static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800599prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700600 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700601
Jason Evansc4c25922017-01-15 16:56:30 -0800602 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800603 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800604 }
605 if (tctx->cnts.curobjs != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800606 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800607 }
608 if (tctx->prepared) {
Jason Evansf4086432017-01-19 18:15:45 -0800609 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800610 }
Jason Evansf4086432017-01-19 18:15:45 -0800611 return true;
Jason Evans4f37ef62014-01-16 13:23:56 -0800612}
613
Jason Evansfb1775e2014-01-14 17:04:34 -0800614static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800615prof_gctx_should_destroy(prof_gctx_t *gctx) {
616 if (opt_prof_accum) {
Jason Evansf4086432017-01-19 18:15:45 -0800617 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800618 }
619 if (!tctx_tree_empty(&gctx->tctxs)) {
Jason Evansf4086432017-01-19 18:15:45 -0800620 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800621 }
622 if (gctx->nlimbo != 0) {
Jason Evansf4086432017-01-19 18:15:45 -0800623 return false;
Jason Evansc4c25922017-01-15 16:56:30 -0800624 }
Jason Evansf4086432017-01-19 18:15:45 -0800625 return true;
Jason Evans602c8e02014-08-18 16:22:13 -0700626}
627
Jason Evans602c8e02014-08-18 16:22:13 -0700628static void
Jason Evansc4c25922017-01-15 16:56:30 -0800629prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
Jason Evans6fd53da2014-09-09 12:45:53 -0700630 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700631 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700632 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700633
Jason Evansc1e00ef2016-05-10 22:21:10 -0700634 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700635
Jason Evans602c8e02014-08-18 16:22:13 -0700636 assert(tctx->cnts.curobjs == 0);
637 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700638 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700639 assert(tctx->cnts.accumobjs == 0);
640 assert(tctx->cnts.accumbytes == 0);
641
Jason Evansb54d1602016-10-20 23:59:12 -0700642 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700643 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
644 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700645
Jason Evansc1e00ef2016-05-10 22:21:10 -0700646 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700647 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700648 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700649 tctx_tree_remove(&gctx->tctxs, tctx);
650 destroy_tctx = true;
651 if (prof_gctx_should_destroy(gctx)) {
652 /*
653 * Increment gctx->nlimbo in order to keep another
654 * thread from winning the race to destroy gctx while
655 * this one has gctx->lock dropped. Without this, it
656 * would be possible for another thread to:
657 *
658 * 1) Sample an allocation associated with gctx.
659 * 2) Deallocate the sampled object.
660 * 3) Successfully prof_gctx_try_destroy(gctx).
661 *
662 * The result would be that gctx no longer exists by the
663 * time this thread accesses it in
664 * prof_gctx_try_destroy().
665 */
666 gctx->nlimbo++;
667 destroy_gctx = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800668 } else {
Jason Evansbf406412014-10-06 16:35:11 -0700669 destroy_gctx = false;
Jason Evansc4c25922017-01-15 16:56:30 -0800670 }
Jason Evans764b0002015-03-14 14:01:35 -0700671 break;
672 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700673 /*
Jason Evansbf406412014-10-06 16:35:11 -0700674 * A dumping thread needs tctx to remain valid until dumping
675 * has finished. Change state such that the dumping thread will
676 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700677 */
Jason Evansbf406412014-10-06 16:35:11 -0700678 tctx->state = prof_tctx_state_purgatory;
679 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700680 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700681 break;
682 default:
683 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700684 destroy_tctx = false;
685 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700686 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700687 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700688 if (destroy_gctx) {
689 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
690 tdata);
691 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700692
Jason Evansc1e00ef2016-05-10 22:21:10 -0700693 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700694
Jason Evansc4c25922017-01-15 16:56:30 -0800695 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -0700696 prof_tdata_destroy(tsd, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800697 }
Jason Evans602c8e02014-08-18 16:22:13 -0700698
Jason Evansc4c25922017-01-15 16:56:30 -0800699 if (destroy_tctx) {
Jason Evans51a2ec92017-03-17 02:45:12 -0700700 idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -0800701 }
Jason Evans602c8e02014-08-18 16:22:13 -0700702}
703
704static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700705prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -0800706 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800707 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700708 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800709 void *v;
Jason Evans5033a912017-01-29 21:51:30 -0800710 } gctx, tgctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800711 union {
712 prof_bt_t *p;
713 void *v;
714 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700715 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800716
Jason Evansc93ed812014-10-30 16:50:33 -0700717 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700718 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800719 /* bt has never been seen before. Insert it. */
Jason Evans5033a912017-01-29 21:51:30 -0800720 prof_leave(tsd, tdata);
721 tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
722 if (tgctx.v == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800723 return true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800724 }
Jason Evans5033a912017-01-29 21:51:30 -0800725 prof_enter(tsd, tdata);
726 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
727 gctx.p = tgctx.p;
728 btkey.p = &gctx.p->bt;
729 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
730 /* OOM. */
731 prof_leave(tsd, tdata);
Jason Evans51a2ec92017-03-17 02:45:12 -0700732 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true,
733 true);
Jason Evans5033a912017-01-29 21:51:30 -0800734 return true;
735 }
736 new_gctx = true;
737 } else {
738 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800739 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800740 } else {
Jason Evans5033a912017-01-29 21:51:30 -0800741 tgctx.v = NULL;
742 new_gctx = false;
743 }
744
745 if (!new_gctx) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800746 /*
747 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700748 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800749 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700750 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700751 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700752 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700753 new_gctx = false;
Jason Evans5033a912017-01-29 21:51:30 -0800754
755 if (tgctx.v != NULL) {
756 /* Lost race to insert. */
Jason Evans51a2ec92017-03-17 02:45:12 -0700757 idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, true, true);
Jason Evans5033a912017-01-29 21:51:30 -0800758 }
Jason Evansfb1775e2014-01-14 17:04:34 -0800759 }
Jason Evansc93ed812014-10-30 16:50:33 -0700760 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800761
762 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700763 *p_gctx = gctx.p;
764 *p_new_gctx = new_gctx;
Jason Evansf4086432017-01-19 18:15:45 -0800765 return false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800766}
767
Jason Evans602c8e02014-08-18 16:22:13 -0700768prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800769prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
Jason Evans075e77c2010-09-20 19:53:25 -0700770 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700771 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700772 void *v;
773 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700774 prof_tdata_t *tdata;
775 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800776
Jason Evans7372b152012-02-10 20:22:09 -0800777 cassert(config_prof);
778
Jason Evans5460aa62014-09-22 21:09:23 -0700779 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800780 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800781 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800782 }
Jason Evans6109fe02010-02-10 10:37:56 -0800783
Jason Evansc1e00ef2016-05-10 22:21:10 -0700784 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700785 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evansc4c25922017-01-15 16:56:30 -0800786 if (!not_found) { /* Note double negative! */
Jason Evans6e73dc12014-09-09 19:37:26 -0700787 ret.p->prepared = true;
Jason Evansc4c25922017-01-15 16:56:30 -0800788 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700789 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700790 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800791 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700792 prof_gctx_t *gctx;
793 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800794
795 /*
796 * This thread's cache lacks bt. Look for it in the global
797 * cache.
798 */
Jason Evans5460aa62014-09-22 21:09:23 -0700799 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evansc4c25922017-01-15 16:56:30 -0800800 &new_gctx)) {
Jason Evansf4086432017-01-19 18:15:45 -0800801 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -0800802 }
Jason Evans6109fe02010-02-10 10:37:56 -0800803
Jason Evans602c8e02014-08-18 16:22:13 -0700804 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700805 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
Jason Evans66cd9532016-04-22 14:34:14 -0700806 size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansb54d1602016-10-20 23:59:12 -0700807 arena_ichoose(tsd, NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700808 if (ret.p == NULL) {
Jason Evansc4c25922017-01-15 16:56:30 -0800809 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700810 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800811 }
Jason Evansf4086432017-01-19 18:15:45 -0800812 return NULL;
Jason Evansa881cd22010-10-02 15:18:50 -0700813 }
Jason Evans602c8e02014-08-18 16:22:13 -0700814 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700815 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700816 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700817 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700818 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700819 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700820 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700821 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700822 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evansb54d1602016-10-20 23:59:12 -0700823 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700824 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700825 if (error) {
Jason Evansc4c25922017-01-15 16:56:30 -0800826 if (new_gctx) {
Jason Evansc93ed812014-10-30 16:50:33 -0700827 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -0800828 }
Jason Evans51a2ec92017-03-17 02:45:12 -0700829 idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -0800830 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800831 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700832 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700833 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700834 tctx_tree_insert(&gctx->tctxs, ret.p);
835 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700836 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800837 }
838
Jason Evansf4086432017-01-19 18:15:45 -0800839 return ret.p;
Jason Evans6109fe02010-02-10 10:37:56 -0800840}
841
Jason Evansdc391ad2016-05-04 12:14:36 -0700842/*
843 * The bodies of this function and prof_leakcheck() are compiled out unless heap
844 * profiling is enabled, so that it is possible to compile jemalloc with
845 * floating point support completely disabled. Avoiding floating point code is
846 * important on memory-constrained systems, but it also enables a workaround for
847 * versions of glibc that don't properly save/restore floating point registers
848 * during dynamic lazy symbol loading (which internally calls into whatever
849 * malloc implementation happens to be integrated into the application). Note
850 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
851 * memory moves, so jemalloc must be compiled with such optimizations disabled
852 * (e.g.
853 * -mno-sse) in order for the workaround to be complete.
854 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700855void
Jason Evansc4c25922017-01-15 16:56:30 -0800856prof_sample_threshold_update(prof_tdata_t *tdata) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700857#ifdef JEMALLOC_PROF
858 uint64_t r;
859 double u;
860
Jason Evansc4c25922017-01-15 16:56:30 -0800861 if (!config_prof) {
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700862 return;
Jason Evansc4c25922017-01-15 16:56:30 -0800863 }
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700864
Jason Evans602c8e02014-08-18 16:22:13 -0700865 if (lg_prof_sample == 0) {
866 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700867 return;
868 }
869
870 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700871 * Compute sample interval as a geometrically distributed random
872 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700873 *
Jason Evans602c8e02014-08-18 16:22:13 -0700874 * __ __
875 * | log(u) | 1
876 * tdata->bytes_until_sample = | -------- |, where p = ---------------
877 * | log(1-p) | lg_prof_sample
878 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700879 *
880 * For more information on the math, see:
881 *
882 * Non-Uniform Random Variate Generation
883 * Luc Devroye
884 * Springer-Verlag, New York, 1986
885 * pp 500
886 * (http://luc.devroye.org/rnbookindex.html)
887 */
Jason Evans04b46352016-11-07 10:52:44 -0800888 r = prng_lg_range_u64(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700889 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700890 tdata->bytes_until_sample = (uint64_t)(log(u) /
891 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700892 + (uint64_t)1U;
893#endif
894}
895
Jason Evans772163b2014-01-17 15:40:52 -0800896#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700897static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -0800898prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
899 void *arg) {
Jason Evans20c31de2014-10-02 23:01:10 -0700900 size_t *tdata_count = (size_t *)arg;
901
902 (*tdata_count)++;
903
Jason Evansf4086432017-01-19 18:15:45 -0800904 return NULL;
Jason Evans20c31de2014-10-02 23:01:10 -0700905}
906
907size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800908prof_tdata_count(void) {
Jason Evans20c31de2014-10-02 23:01:10 -0700909 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700910 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700911
Jason Evansc1e00ef2016-05-10 22:21:10 -0700912 tsdn = tsdn_fetch();
913 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700914 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
915 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700916 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700917
Jason Evansf4086432017-01-19 18:15:45 -0800918 return tdata_count;
Jason Evans20c31de2014-10-02 23:01:10 -0700919}
920#endif
921
922#ifdef JEMALLOC_JET
Jason Evans772163b2014-01-17 15:40:52 -0800923size_t
Jason Evansc4c25922017-01-15 16:56:30 -0800924prof_bt_count(void) {
Jason Evans772163b2014-01-17 15:40:52 -0800925 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700926 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700927 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800928
Jason Evans029d44c2014-10-04 11:12:53 -0700929 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700930 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -0800931 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -0800932 return 0;
Jason Evansc4c25922017-01-15 16:56:30 -0800933 }
Jason Evans772163b2014-01-17 15:40:52 -0800934
Jason Evansc1e00ef2016-05-10 22:21:10 -0700935 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700936 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700937 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800938
Jason Evansf4086432017-01-19 18:15:45 -0800939 return bt_count;
Jason Evans772163b2014-01-17 15:40:52 -0800940}
941#endif
942
943#ifdef JEMALLOC_JET
944#undef prof_dump_open
Jason Evansc0cc5db2017-01-19 21:41:41 -0800945#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
Jason Evans772163b2014-01-17 15:40:52 -0800946#endif
947static int
Jason Evansc4c25922017-01-15 16:56:30 -0800948prof_dump_open(bool propagate_err, const char *filename) {
Jason Evans772163b2014-01-17 15:40:52 -0800949 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800950
Jason Evans772163b2014-01-17 15:40:52 -0800951 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700952 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800953 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
954 filename);
Jason Evansc4c25922017-01-15 16:56:30 -0800955 if (opt_abort) {
Jason Evans772163b2014-01-17 15:40:52 -0800956 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800957 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800958 }
959
Jason Evansf4086432017-01-19 18:15:45 -0800960 return fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800961}
Jason Evans772163b2014-01-17 15:40:52 -0800962#ifdef JEMALLOC_JET
963#undef prof_dump_open
Jason Evansc0cc5db2017-01-19 21:41:41 -0800964#define prof_dump_open JEMALLOC_N(prof_dump_open)
Jason Evans772163b2014-01-17 15:40:52 -0800965prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
966#endif
Jason Evans4f37ef62014-01-16 13:23:56 -0800967
968static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800969prof_dump_flush(bool propagate_err) {
Jason Evans22ca8552010-03-02 11:57:30 -0800970 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800971 ssize_t err;
972
Jason Evans7372b152012-02-10 20:22:09 -0800973 cassert(config_prof);
974
Jason Evans6109fe02010-02-10 10:37:56 -0800975 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
976 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700977 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800978 malloc_write("<jemalloc>: write() failed during heap "
979 "profile flush\n");
Jason Evansc4c25922017-01-15 16:56:30 -0800980 if (opt_abort) {
Jason Evans22ca8552010-03-02 11:57:30 -0800981 abort();
Jason Evansc4c25922017-01-15 16:56:30 -0800982 }
Jason Evans22ca8552010-03-02 11:57:30 -0800983 }
984 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800985 }
986 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800987
Jason Evansf4086432017-01-19 18:15:45 -0800988 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800989}
990
Jason Evans22ca8552010-03-02 11:57:30 -0800991static bool
Jason Evansc4c25922017-01-15 16:56:30 -0800992prof_dump_close(bool propagate_err) {
Jason Evans4f37ef62014-01-16 13:23:56 -0800993 bool ret;
994
995 assert(prof_dump_fd != -1);
996 ret = prof_dump_flush(propagate_err);
997 close(prof_dump_fd);
998 prof_dump_fd = -1;
999
Jason Evansf4086432017-01-19 18:15:45 -08001000 return ret;
Jason Evans4f37ef62014-01-16 13:23:56 -08001001}
1002
1003static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001004prof_dump_write(bool propagate_err, const char *s) {
Jason Evansca8fffb2016-02-24 13:16:51 -08001005 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -08001006
Jason Evans7372b152012-02-10 20:22:09 -08001007 cassert(config_prof);
1008
Jason Evans6109fe02010-02-10 10:37:56 -08001009 i = 0;
1010 slen = strlen(s);
1011 while (i < slen) {
1012 /* Flush the buffer if it is full. */
Jason Evansc4c25922017-01-15 16:56:30 -08001013 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1014 if (prof_dump_flush(propagate_err) && propagate_err) {
Jason Evansf4086432017-01-19 18:15:45 -08001015 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001016 }
1017 }
Jason Evans6109fe02010-02-10 10:37:56 -08001018
Jason Evanscd9a1342012-03-21 18:33:03 -07001019 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001020 /* Finish writing. */
1021 n = slen - i;
1022 } else {
1023 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001024 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001025 }
1026 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1027 prof_dump_buf_end += n;
1028 i += n;
1029 }
Jason Evans22ca8552010-03-02 11:57:30 -08001030
Jason Evansf4086432017-01-19 18:15:45 -08001031 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001032}
1033
Jason Evanse42c3092015-07-22 15:44:47 -07001034JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001035static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001036prof_dump_printf(bool propagate_err, const char *format, ...) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001037 bool ret;
1038 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001039 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001040
1041 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001042 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001043 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001044 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001045
Jason Evansf4086432017-01-19 18:15:45 -08001046 return ret;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001047}
1048
Jason Evans602c8e02014-08-18 16:22:13 -07001049static void
Jason Evansc4c25922017-01-15 16:56:30 -08001050prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001051 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001052
Jason Evansc1e00ef2016-05-10 22:21:10 -07001053 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001054
1055 switch (tctx->state) {
1056 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001057 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001058 return;
Jason Evans764b0002015-03-14 14:01:35 -07001059 case prof_tctx_state_nominal:
1060 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001061 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001062
Jason Evans764b0002015-03-14 14:01:35 -07001063 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001064
Jason Evans764b0002015-03-14 14:01:35 -07001065 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1066 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1067 if (opt_prof_accum) {
1068 tdata->cnt_summed.accumobjs +=
1069 tctx->dump_cnts.accumobjs;
1070 tdata->cnt_summed.accumbytes +=
1071 tctx->dump_cnts.accumbytes;
1072 }
1073 break;
1074 case prof_tctx_state_dumping:
1075 case prof_tctx_state_purgatory:
1076 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001077 }
1078}
1079
Jason Evans602c8e02014-08-18 16:22:13 -07001080static void
Jason Evansc4c25922017-01-15 16:56:30 -08001081prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001082 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001083
Jason Evans602c8e02014-08-18 16:22:13 -07001084 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1085 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1086 if (opt_prof_accum) {
1087 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1088 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1089 }
1090}
1091
Jason Evans602c8e02014-08-18 16:22:13 -07001092static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001093prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001094 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001095
Jason Evansc1e00ef2016-05-10 22:21:10 -07001096 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001097
1098 switch (tctx->state) {
1099 case prof_tctx_state_nominal:
1100 /* New since dumping started; ignore. */
1101 break;
1102 case prof_tctx_state_dumping:
1103 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001104 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001105 break;
1106 default:
1107 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001108 }
1109
Jason Evansf4086432017-01-19 18:15:45 -08001110 return NULL;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001111}
1112
Jason Evansb2c0d632016-04-13 23:36:15 -07001113struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001114 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001115 bool propagate_err;
1116};
1117
Jason Evans602c8e02014-08-18 16:22:13 -07001118static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001119prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001120 struct prof_tctx_dump_iter_arg_s *arg =
1121 (struct prof_tctx_dump_iter_arg_s *)opaque;
1122
Jason Evansc1e00ef2016-05-10 22:21:10 -07001123 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001124
Jason Evansfb64ec22015-09-21 18:37:18 -07001125 switch (tctx->state) {
1126 case prof_tctx_state_initializing:
1127 case prof_tctx_state_nominal:
1128 /* Not captured by this dump. */
1129 break;
1130 case prof_tctx_state_dumping:
1131 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001132 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001133 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1134 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1135 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001136 tctx->dump_cnts.accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001137 return tctx;
Jason Evansc4c25922017-01-15 16:56:30 -08001138 }
Jason Evansfb64ec22015-09-21 18:37:18 -07001139 break;
1140 default:
1141 not_reached();
1142 }
Jason Evansf4086432017-01-19 18:15:45 -08001143 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001144}
1145
Jason Evans602c8e02014-08-18 16:22:13 -07001146static prof_tctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001147prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001148 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001149 prof_tctx_t *ret;
1150
Jason Evansc1e00ef2016-05-10 22:21:10 -07001151 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001152
Jason Evans602c8e02014-08-18 16:22:13 -07001153 switch (tctx->state) {
1154 case prof_tctx_state_nominal:
1155 /* New since dumping started; ignore. */
1156 break;
1157 case prof_tctx_state_dumping:
1158 tctx->state = prof_tctx_state_nominal;
1159 break;
1160 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001161 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001162 goto label_return;
1163 default:
1164 not_reached();
1165 }
1166
1167 ret = NULL;
1168label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001169 return ret;
Jason Evans602c8e02014-08-18 16:22:13 -07001170}
1171
Jason Evans6109fe02010-02-10 10:37:56 -08001172static void
Jason Evansc4c25922017-01-15 16:56:30 -08001173prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
Jason Evans7372b152012-02-10 20:22:09 -08001174 cassert(config_prof);
1175
Jason Evansc1e00ef2016-05-10 22:21:10 -07001176 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001177
Jason Evans4f37ef62014-01-16 13:23:56 -08001178 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001179 * Increment nlimbo so that gctx won't go away before dump.
1180 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001181 * prof_dump()'s second pass.
1182 */
Jason Evans602c8e02014-08-18 16:22:13 -07001183 gctx->nlimbo++;
1184 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001185
Jason Evans602c8e02014-08-18 16:22:13 -07001186 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001187
Jason Evansc1e00ef2016-05-10 22:21:10 -07001188 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001189}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001190
Jason Evansb2c0d632016-04-13 23:36:15 -07001191struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001192 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001193 size_t leak_ngctx;
1194};
Jason Evans6109fe02010-02-10 10:37:56 -08001195
Jason Evansb2c0d632016-04-13 23:36:15 -07001196static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001197prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001198 struct prof_gctx_merge_iter_arg_s *arg =
1199 (struct prof_gctx_merge_iter_arg_s *)opaque;
1200
Jason Evansc1e00ef2016-05-10 22:21:10 -07001201 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001202 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001203 (void *)arg->tsdn);
Jason Evansc4c25922017-01-15 16:56:30 -08001204 if (gctx->cnt_summed.curobjs != 0) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001205 arg->leak_ngctx++;
Jason Evansc4c25922017-01-15 16:56:30 -08001206 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001207 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001208
Jason Evansf4086432017-01-19 18:15:45 -08001209 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001210}
1211
Jason Evans20c31de2014-10-02 23:01:10 -07001212static void
Jason Evansc4c25922017-01-15 16:56:30 -08001213prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
Jason Evans5460aa62014-09-22 21:09:23 -07001214 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001215 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001216
Jason Evans20c31de2014-10-02 23:01:10 -07001217 /*
1218 * Standard tree iteration won't work here, because as soon as we
1219 * decrement gctx->nlimbo and unlock gctx, another thread can
1220 * concurrently destroy it, which will corrupt the tree. Therefore,
1221 * tear down the tree one node at a time during iteration.
1222 */
1223 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1224 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001225 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001226 {
1227 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001228
Jason Evans20c31de2014-10-02 23:01:10 -07001229 next = NULL;
1230 do {
1231 prof_tctx_t *to_destroy =
1232 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001233 prof_tctx_finish_iter,
1234 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001235 if (to_destroy != NULL) {
1236 next = tctx_tree_next(&gctx->tctxs,
1237 to_destroy);
1238 tctx_tree_remove(&gctx->tctxs,
1239 to_destroy);
Jason Evans51a2ec92017-03-17 02:45:12 -07001240 idalloctm(tsd_tsdn(tsd), to_destroy,
1241 NULL, true, true);
Jason Evansc4c25922017-01-15 16:56:30 -08001242 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07001243 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001244 }
Jason Evans20c31de2014-10-02 23:01:10 -07001245 } while (next != NULL);
1246 }
1247 gctx->nlimbo--;
1248 if (prof_gctx_should_destroy(gctx)) {
1249 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001250 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001251 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001252 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001253 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08001254 }
Jason Evans20c31de2014-10-02 23:01:10 -07001255 }
Jason Evans602c8e02014-08-18 16:22:13 -07001256}
1257
Jason Evansb2c0d632016-04-13 23:36:15 -07001258struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001259 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001260 prof_cnt_t cnt_all;
1261};
Jason Evans602c8e02014-08-18 16:22:13 -07001262
Jason Evansb2c0d632016-04-13 23:36:15 -07001263static prof_tdata_t *
1264prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001265 void *opaque) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001266 struct prof_tdata_merge_iter_arg_s *arg =
1267 (struct prof_tdata_merge_iter_arg_s *)opaque;
1268
Jason Evansc1e00ef2016-05-10 22:21:10 -07001269 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001270 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001271 size_t tabind;
1272 union {
1273 prof_tctx_t *p;
1274 void *v;
1275 } tctx;
1276
1277 tdata->dumping = true;
1278 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001279 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
Jason Evansc4c25922017-01-15 16:56:30 -08001280 &tctx.v);) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001281 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08001282 }
Jason Evans602c8e02014-08-18 16:22:13 -07001283
Jason Evansb2c0d632016-04-13 23:36:15 -07001284 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1285 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001286 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001287 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1288 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001289 }
Jason Evansc4c25922017-01-15 16:56:30 -08001290 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07001291 tdata->dumping = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001292 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001293 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001294
Jason Evansf4086432017-01-19 18:15:45 -08001295 return NULL;
Jason Evans602c8e02014-08-18 16:22:13 -07001296}
1297
1298static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001299prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1300 void *arg) {
Jason Evans602c8e02014-08-18 16:22:13 -07001301 bool propagate_err = *(bool *)arg;
1302
Jason Evansc4c25922017-01-15 16:56:30 -08001303 if (!tdata->dumping) {
Jason Evansf4086432017-01-19 18:15:45 -08001304 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001305 }
Jason Evans602c8e02014-08-18 16:22:13 -07001306
1307 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001308 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001309 tdata->thr_uid, tdata->cnt_summed.curobjs,
1310 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1311 tdata->cnt_summed.accumbytes,
1312 (tdata->thread_name != NULL) ? " " : "",
Jason Evansc4c25922017-01-15 16:56:30 -08001313 (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
Jason Evansf4086432017-01-19 18:15:45 -08001314 return tdata;
Jason Evansc4c25922017-01-15 16:56:30 -08001315 }
Jason Evansf4086432017-01-19 18:15:45 -08001316 return NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001317}
1318
Jason Evans20c31de2014-10-02 23:01:10 -07001319#ifdef JEMALLOC_JET
1320#undef prof_dump_header
Jason Evansc0cc5db2017-01-19 21:41:41 -08001321#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
Jason Evans20c31de2014-10-02 23:01:10 -07001322#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001323static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001324prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) {
Jason Evans602c8e02014-08-18 16:22:13 -07001325 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001326
Jason Evans602c8e02014-08-18 16:22:13 -07001327 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001328 "heap_v2/%"FMTu64"\n"
1329 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001330 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001331 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
Jason Evansf4086432017-01-19 18:15:45 -08001332 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001333 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001334
Jason Evansc1e00ef2016-05-10 22:21:10 -07001335 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001336 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1337 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001338 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08001339 return ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001340}
Jason Evans20c31de2014-10-02 23:01:10 -07001341#ifdef JEMALLOC_JET
1342#undef prof_dump_header
Jason Evansc0cc5db2017-01-19 21:41:41 -08001343#define prof_dump_header JEMALLOC_N(prof_dump_header)
Jason Evans20c31de2014-10-02 23:01:10 -07001344prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1345#endif
Jason Evansa881cd22010-10-02 15:18:50 -07001346
Jason Evans22ca8552010-03-02 11:57:30 -08001347static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001348prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001349 const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001350 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001351 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001352 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001353
Jason Evans7372b152012-02-10 20:22:09 -08001354 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001355 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001356
Jason Evans602c8e02014-08-18 16:22:13 -07001357 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001358 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001359 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1360 assert(gctx->cnt_summed.curobjs == 0);
1361 assert(gctx->cnt_summed.curbytes == 0);
1362 assert(gctx->cnt_summed.accumobjs == 0);
1363 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001364 ret = false;
1365 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001366 }
1367
Jason Evans602c8e02014-08-18 16:22:13 -07001368 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001369 ret = true;
1370 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001371 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001372 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001373 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001374 (uintptr_t)bt->vec[i])) {
1375 ret = true;
1376 goto label_return;
1377 }
1378 }
Jason Evans22ca8552010-03-02 11:57:30 -08001379
Jason Evans602c8e02014-08-18 16:22:13 -07001380 if (prof_dump_printf(propagate_err,
1381 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001382 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001383 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1384 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1385 ret = true;
1386 goto label_return;
1387 }
1388
Jason Evansc1e00ef2016-05-10 22:21:10 -07001389 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001390 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001391 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001392 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001393 ret = true;
1394 goto label_return;
1395 }
1396
Jason Evans772163b2014-01-17 15:40:52 -08001397 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001398label_return:
Jason Evansf4086432017-01-19 18:15:45 -08001399 return ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001400}
1401
Jason Evans788d29d2016-02-20 23:46:14 -08001402#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001403JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001404static int
Jason Evansc4c25922017-01-15 16:56:30 -08001405prof_open_maps(const char *format, ...) {
Jason Evans8e33c212015-05-01 09:03:20 -07001406 int mfd;
1407 va_list ap;
1408 char filename[PATH_MAX + 1];
1409
1410 va_start(ap, format);
1411 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1412 va_end(ap);
1413 mfd = open(filename, O_RDONLY);
1414
Jason Evansf4086432017-01-19 18:15:45 -08001415 return mfd;
Jason Evans8e33c212015-05-01 09:03:20 -07001416}
Jason Evans788d29d2016-02-20 23:46:14 -08001417#endif
1418
1419static int
Jason Evansc4c25922017-01-15 16:56:30 -08001420prof_getpid(void) {
Jason Evans788d29d2016-02-20 23:46:14 -08001421#ifdef _WIN32
Jason Evansf4086432017-01-19 18:15:45 -08001422 return GetCurrentProcessId();
Jason Evans788d29d2016-02-20 23:46:14 -08001423#else
Jason Evansf4086432017-01-19 18:15:45 -08001424 return getpid();
Jason Evans788d29d2016-02-20 23:46:14 -08001425#endif
1426}
Jason Evans8e33c212015-05-01 09:03:20 -07001427
Jason Evans22ca8552010-03-02 11:57:30 -08001428static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001429prof_dump_maps(bool propagate_err) {
Jason Evans93f39f82013-10-21 15:07:40 -07001430 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001431 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001432
Jason Evans7372b152012-02-10 20:22:09 -08001433 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001434#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001435 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001436#elif defined(_WIN32)
1437 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001438#else
Jason Evans8e33c212015-05-01 09:03:20 -07001439 {
Jason Evans788d29d2016-02-20 23:46:14 -08001440 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001441
1442 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001443 if (mfd == -1) {
Jason Evans8e33c212015-05-01 09:03:20 -07001444 mfd = prof_open_maps("/proc/%d/maps", pid);
Jason Evansc4c25922017-01-15 16:56:30 -08001445 }
Jason Evans8e33c212015-05-01 09:03:20 -07001446 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001447#endif
Jason Evansc7177182010-02-11 09:25:56 -08001448 if (mfd != -1) {
1449 ssize_t nread;
1450
Jason Evans4f37ef62014-01-16 13:23:56 -08001451 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001452 propagate_err) {
1453 ret = true;
1454 goto label_return;
1455 }
Jason Evansc7177182010-02-11 09:25:56 -08001456 nread = 0;
1457 do {
1458 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001459 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001460 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001461 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001462 propagate_err) {
1463 ret = true;
1464 goto label_return;
1465 }
Jason Evansc7177182010-02-11 09:25:56 -08001466 }
1467 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001468 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001469 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001470 } else {
1471 ret = true;
1472 goto label_return;
1473 }
Jason Evans22ca8552010-03-02 11:57:30 -08001474
Jason Evans93f39f82013-10-21 15:07:40 -07001475 ret = false;
1476label_return:
Jason Evansc4c25922017-01-15 16:56:30 -08001477 if (mfd != -1) {
Jason Evans93f39f82013-10-21 15:07:40 -07001478 close(mfd);
Jason Evansc4c25922017-01-15 16:56:30 -08001479 }
Jason Evansf4086432017-01-19 18:15:45 -08001480 return ret;
Jason Evansc7177182010-02-11 09:25:56 -08001481}
1482
Jason Evansdc391ad2016-05-04 12:14:36 -07001483/*
1484 * See prof_sample_threshold_update() comment for why the body of this function
1485 * is conditionally compiled.
1486 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001487static void
Jason Evans602c8e02014-08-18 16:22:13 -07001488prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evansc4c25922017-01-15 16:56:30 -08001489 const char *filename) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001490#ifdef JEMALLOC_PROF
1491 /*
1492 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1493 * differ slightly from what jeprof reports, because here we scale the
1494 * summary values, whereas jeprof scales each context individually and
1495 * reports the sums of the scaled values.
1496 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001497 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001498 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1499 double ratio = (((double)cnt_all->curbytes) /
1500 (double)cnt_all->curobjs) / sample_period;
1501 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1502 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1503 * scale_factor);
1504 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1505 scale_factor);
1506
1507 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1508 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1509 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1510 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001511 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001512 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001513 filename);
1514 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001515#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001516}
1517
Jason Evansb2c0d632016-04-13 23:36:15 -07001518struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001519 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001520 bool propagate_err;
1521};
1522
Jason Evans602c8e02014-08-18 16:22:13 -07001523static prof_gctx_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001524prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
Jason Evans602c8e02014-08-18 16:22:13 -07001525 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001526 struct prof_gctx_dump_iter_arg_s *arg =
1527 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001528
Jason Evansc1e00ef2016-05-10 22:21:10 -07001529 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001530
Jason Evansc1e00ef2016-05-10 22:21:10 -07001531 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001532 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001533 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001534 goto label_return;
1535 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001536
Jason Evans602c8e02014-08-18 16:22:13 -07001537 ret = NULL;
1538label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001539 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evansf4086432017-01-19 18:15:45 -08001540 return ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001541}
1542
Jason Evans1ff09532017-01-16 11:09:24 -08001543static void
1544prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
1545 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1546 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001547 prof_gctx_tree_t *gctxs) {
Jason Evans6109fe02010-02-10 10:37:56 -08001548 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001549 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001550 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001551 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001552 } gctx;
Jason Evans6109fe02010-02-10 10:37:56 -08001553
Jason Evansc93ed812014-10-30 16:50:33 -07001554 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001555
Jason Evans602c8e02014-08-18 16:22:13 -07001556 /*
1557 * Put gctx's in limbo and clear their counters in preparation for
1558 * summing.
1559 */
Jason Evans1ff09532017-01-16 11:09:24 -08001560 gctx_tree_new(gctxs);
1561 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
1562 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
1563 }
Jason Evans602c8e02014-08-18 16:22:13 -07001564
1565 /*
1566 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1567 * stats and merge them into the associated gctx's.
1568 */
Jason Evans1ff09532017-01-16 11:09:24 -08001569 prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1570 memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001571 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001572 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
Jason Evans1ff09532017-01-16 11:09:24 -08001573 (void *)prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001574 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001575
1576 /* Merge tctx stats into gctx's. */
Jason Evans1ff09532017-01-16 11:09:24 -08001577 prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1578 prof_gctx_merge_iter_arg->leak_ngctx = 0;
1579 gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
1580 (void *)prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001581
Jason Evansc93ed812014-10-30 16:50:33 -07001582 prof_leave(tsd, tdata);
Jason Evans1ff09532017-01-16 11:09:24 -08001583}
Jason Evans4f37ef62014-01-16 13:23:56 -08001584
Jason Evans1ff09532017-01-16 11:09:24 -08001585static bool
1586prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
1587 bool leakcheck, prof_tdata_t *tdata,
1588 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1589 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1590 struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
Jason Evansc4c25922017-01-15 16:56:30 -08001591 prof_gctx_tree_t *gctxs) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001592 /* Create dump file. */
Jason Evans1ff09532017-01-16 11:09:24 -08001593 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
1594 return true;
1595 }
Jason Evans6109fe02010-02-10 10:37:56 -08001596
1597 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001598 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evans1ff09532017-01-16 11:09:24 -08001599 &prof_tdata_merge_iter_arg->cnt_all)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001600 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001601 }
Jason Evans6109fe02010-02-10 10:37:56 -08001602
Jason Evans602c8e02014-08-18 16:22:13 -07001603 /* Dump per gctx profile stats. */
Jason Evans1ff09532017-01-16 11:09:24 -08001604 prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
1605 prof_gctx_dump_iter_arg->propagate_err = propagate_err;
1606 if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
1607 (void *)prof_gctx_dump_iter_arg) != NULL) {
Jason Evans3a81cbd2014-08-16 12:58:55 -07001608 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001609 }
Jason Evans6109fe02010-02-10 10:37:56 -08001610
Jason Evansc7177182010-02-11 09:25:56 -08001611 /* Dump /proc/<pid>/maps if possible. */
Jason Evans1ff09532017-01-16 11:09:24 -08001612 if (prof_dump_maps(propagate_err)) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001613 goto label_write_error;
Jason Evans1ff09532017-01-16 11:09:24 -08001614 }
Jason Evansc7177182010-02-11 09:25:56 -08001615
Jason Evans1ff09532017-01-16 11:09:24 -08001616 if (prof_dump_close(propagate_err)) {
1617 return true;
1618 }
Jason Evans6109fe02010-02-10 10:37:56 -08001619
Jason Evans1ff09532017-01-16 11:09:24 -08001620 return false;
1621label_write_error:
1622 prof_dump_close(propagate_err);
1623 return true;
1624}
1625
1626static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001627prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
1628 bool leakcheck) {
Jason Evans1ff09532017-01-16 11:09:24 -08001629 prof_tdata_t *tdata;
1630 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1631 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1632 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
1633 prof_gctx_tree_t gctxs;
1634 bool err;
1635
1636 cassert(config_prof);
1637
1638 tdata = prof_tdata_get(tsd, true);
1639 if (tdata == NULL) {
1640 return true;
1641 }
1642
1643 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1644
1645 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1646 &prof_gctx_merge_iter_arg, &gctxs);
1647 err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
1648 &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
1649 &prof_gctx_dump_iter_arg, &gctxs);
Jason Evans20c31de2014-10-02 23:01:10 -07001650 prof_gctx_finish(tsd, &gctxs);
Jason Evans1ff09532017-01-16 11:09:24 -08001651
Jason Evansc1e00ef2016-05-10 22:21:10 -07001652 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001653
Jason Evans1ff09532017-01-16 11:09:24 -08001654 if (err) {
1655 return true;
1656 }
1657
Jason Evansb2c0d632016-04-13 23:36:15 -07001658 if (leakcheck) {
1659 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1660 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1661 }
Jason Evans1ff09532017-01-16 11:09:24 -08001662 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08001663}
1664
Jason Evans1ff09532017-01-16 11:09:24 -08001665#ifdef JEMALLOC_JET
1666void
1667prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
Jason Evansc4c25922017-01-15 16:56:30 -08001668 uint64_t *accumbytes) {
Jason Evans1ff09532017-01-16 11:09:24 -08001669 tsd_t *tsd;
1670 prof_tdata_t *tdata;
1671 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1672 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1673 prof_gctx_tree_t gctxs;
1674
1675 tsd = tsd_fetch();
1676 tdata = prof_tdata_get(tsd, false);
1677 if (tdata == NULL) {
1678 if (curobjs != NULL) {
1679 *curobjs = 0;
1680 }
1681 if (curbytes != NULL) {
1682 *curbytes = 0;
1683 }
1684 if (accumobjs != NULL) {
1685 *accumobjs = 0;
1686 }
1687 if (accumbytes != NULL) {
1688 *accumbytes = 0;
1689 }
1690 return;
1691 }
1692
1693 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1694 &prof_gctx_merge_iter_arg, &gctxs);
1695 prof_gctx_finish(tsd, &gctxs);
1696
1697 if (curobjs != NULL) {
1698 *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
1699 }
1700 if (curbytes != NULL) {
1701 *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
1702 }
1703 if (accumobjs != NULL) {
1704 *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
1705 }
1706 if (accumbytes != NULL) {
1707 *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
1708 }
1709}
1710#endif
1711
Jason Evansc0cc5db2017-01-19 21:41:41 -08001712#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1713#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001714static void
Jason Evansc4c25922017-01-15 16:56:30 -08001715prof_dump_filename(char *filename, char v, uint64_t vseq) {
Jason Evans7372b152012-02-10 20:22:09 -08001716 cassert(config_prof);
1717
Jason Evans4f37ef62014-01-16 13:23:56 -08001718 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001719 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1720 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001721 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001722 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001723 } else {
1724 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1725 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001726 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001727 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001728 }
Jason Evans52386b22012-04-22 16:00:11 -07001729 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001730}
1731
1732static void
Jason Evansc4c25922017-01-15 16:56:30 -08001733prof_fdump(void) {
Jason Evans5460aa62014-09-22 21:09:23 -07001734 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001735 char filename[DUMP_FILENAME_BUFSIZE];
1736
Jason Evans7372b152012-02-10 20:22:09 -08001737 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001738 assert(opt_prof_final);
1739 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001740
Jason Evansc4c25922017-01-15 16:56:30 -08001741 if (!prof_booted) {
Jason Evans6109fe02010-02-10 10:37:56 -08001742 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001743 }
Jason Evans029d44c2014-10-04 11:12:53 -07001744 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001745
Jason Evansc1e00ef2016-05-10 22:21:10 -07001746 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001747 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001748 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001749 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001750}
1751
Jason Evansfa2d64c2017-02-12 17:03:46 -08001752bool
1753prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
1754 cassert(config_prof);
1755
1756#ifndef JEMALLOC_ATOMIC_U64
1757 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
1758 WITNESS_RANK_PROF_ACCUM)) {
1759 return true;
1760 }
Jason Evansfa2d64c2017-02-12 17:03:46 -08001761 prof_accum->accumbytes = 0;
David Goldblatt30d74db2017-04-04 18:08:58 -07001762#else
1763 atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
1764#endif
Jason Evansfa2d64c2017-02-12 17:03:46 -08001765 return false;
1766}
1767
Jason Evans6109fe02010-02-10 10:37:56 -08001768void
Jason Evansc4c25922017-01-15 16:56:30 -08001769prof_idump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001770 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001771 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001772
Jason Evans7372b152012-02-10 20:22:09 -08001773 cassert(config_prof);
1774
Jason Evansc4c25922017-01-15 16:56:30 -08001775 if (!prof_booted || tsdn_null(tsdn)) {
Jason Evans6109fe02010-02-10 10:37:56 -08001776 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001777 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001778 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001779 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001780 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001781 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001782 }
Jason Evans602c8e02014-08-18 16:22:13 -07001783 if (tdata->enq) {
1784 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001785 return;
1786 }
Jason Evans6109fe02010-02-10 10:37:56 -08001787
Jason Evanse7339702010-10-23 18:37:06 -07001788 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001789 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001790 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001791 prof_dump_filename(filename, 'i', prof_dump_iseq);
1792 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001793 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001794 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001795 }
Jason Evans6109fe02010-02-10 10:37:56 -08001796}
1797
Jason Evans22ca8552010-03-02 11:57:30 -08001798bool
Jason Evansc4c25922017-01-15 16:56:30 -08001799prof_mdump(tsd_t *tsd, const char *filename) {
Jason Evans22ca8552010-03-02 11:57:30 -08001800 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001801
Jason Evans7372b152012-02-10 20:22:09 -08001802 cassert(config_prof);
1803
Jason Evansc4c25922017-01-15 16:56:30 -08001804 if (!opt_prof || !prof_booted) {
Jason Evansf4086432017-01-19 18:15:45 -08001805 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001806 }
Jason Evans6109fe02010-02-10 10:37:56 -08001807
Jason Evans22ca8552010-03-02 11:57:30 -08001808 if (filename == NULL) {
1809 /* No filename specified, so automatically generate one. */
Jason Evansc4c25922017-01-15 16:56:30 -08001810 if (opt_prof_prefix[0] == '\0') {
Jason Evansf4086432017-01-19 18:15:45 -08001811 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08001812 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001813 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001814 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1815 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001816 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001817 filename = filename_buf;
1818 }
Jason Evansf4086432017-01-19 18:15:45 -08001819 return prof_dump(tsd, true, filename, false);
Jason Evans6109fe02010-02-10 10:37:56 -08001820}
1821
1822void
Jason Evansc4c25922017-01-15 16:56:30 -08001823prof_gdump(tsdn_t *tsdn) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001824 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001825 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001826
Jason Evans7372b152012-02-10 20:22:09 -08001827 cassert(config_prof);
1828
Jason Evansc4c25922017-01-15 16:56:30 -08001829 if (!prof_booted || tsdn_null(tsdn)) {
Jason Evans6109fe02010-02-10 10:37:56 -08001830 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001831 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001832 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001833 tdata = prof_tdata_get(tsd, false);
Jason Evansc4c25922017-01-15 16:56:30 -08001834 if (tdata == NULL) {
Jason Evans52386b22012-04-22 16:00:11 -07001835 return;
Jason Evansc4c25922017-01-15 16:56:30 -08001836 }
Jason Evans602c8e02014-08-18 16:22:13 -07001837 if (tdata->enq) {
1838 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001839 return;
1840 }
Jason Evans6109fe02010-02-10 10:37:56 -08001841
Jason Evanse7339702010-10-23 18:37:06 -07001842 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001843 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001844 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001845 prof_dump_filename(filename, 'u', prof_dump_useq);
1846 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001847 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001848 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001849 }
Jason Evans6109fe02010-02-10 10:37:56 -08001850}
1851
1852static void
Jason Evansc4c25922017-01-15 16:56:30 -08001853prof_bt_hash(const void *key, size_t r_hash[2]) {
Jason Evans6109fe02010-02-10 10:37:56 -08001854 prof_bt_t *bt = (prof_bt_t *)key;
1855
Jason Evans7372b152012-02-10 20:22:09 -08001856 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001857
Jason Evansae03bf62013-01-22 12:02:08 -08001858 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001859}
1860
1861static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001862prof_bt_keycomp(const void *k1, const void *k2) {
Jason Evans6109fe02010-02-10 10:37:56 -08001863 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1864 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1865
Jason Evans7372b152012-02-10 20:22:09 -08001866 cassert(config_prof);
1867
Jason Evansc4c25922017-01-15 16:56:30 -08001868 if (bt1->len != bt2->len) {
Jason Evansf4086432017-01-19 18:15:45 -08001869 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001870 }
Jason Evans6109fe02010-02-10 10:37:56 -08001871 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1872}
1873
Jason Evans602c8e02014-08-18 16:22:13 -07001874JEMALLOC_INLINE_C uint64_t
Jason Evansc4c25922017-01-15 16:56:30 -08001875prof_thr_uid_alloc(tsdn_t *tsdn) {
Jason Evans9d8f3d22014-09-11 18:06:30 -07001876 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001877
Jason Evansc1e00ef2016-05-10 22:21:10 -07001878 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001879 thr_uid = next_thr_uid;
1880 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001881 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001882
Jason Evansf4086432017-01-19 18:15:45 -08001883 return thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001884}
1885
1886static prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001887prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansc4c25922017-01-15 16:56:30 -08001888 char *thread_name, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07001889 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001890
Jason Evans7372b152012-02-10 20:22:09 -08001891 cassert(config_prof);
1892
Jason Evans4d6a1342010-10-20 19:05:59 -07001893 /* Initialize an empty cache for this thread. */
Jason Evansb54d1602016-10-20 23:59:12 -07001894 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
Jason Evansc1e00ef2016-05-10 22:21:10 -07001895 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1896 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08001897 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08001898 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08001899 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001900
Jason Evans602c8e02014-08-18 16:22:13 -07001901 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1902 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001903 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001904 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001905 tdata->attached = true;
1906 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001907 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001908
Jason Evansb54d1602016-10-20 23:59:12 -07001909 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
1910 prof_bt_keycomp)) {
Jason Evans51a2ec92017-03-17 02:45:12 -07001911 idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
Jason Evansf4086432017-01-19 18:15:45 -08001912 return NULL;
Jason Evans4d6a1342010-10-20 19:05:59 -07001913 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001914
Jason Evans602c8e02014-08-18 16:22:13 -07001915 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1916 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001917
Jason Evans602c8e02014-08-18 16:22:13 -07001918 tdata->enq = false;
1919 tdata->enq_idump = false;
1920 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001921
Jason Evans602c8e02014-08-18 16:22:13 -07001922 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001923 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001924
Jason Evansb54d1602016-10-20 23:59:12 -07001925 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001926 tdata_tree_insert(&tdatas, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001927 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001928
Jason Evansf4086432017-01-19 18:15:45 -08001929 return tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001930}
1931
1932prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08001933prof_tdata_init(tsd_t *tsd) {
Jason Evansf4086432017-01-19 18:15:45 -08001934 return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
1935 NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
Jason Evans602c8e02014-08-18 16:22:13 -07001936}
1937
Jason Evans602c8e02014-08-18 16:22:13 -07001938static bool
Jason Evansc4c25922017-01-15 16:56:30 -08001939prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
1940 if (tdata->attached && !even_if_attached) {
Jason Evansf4086432017-01-19 18:15:45 -08001941 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001942 }
1943 if (ckh_count(&tdata->bt2tctx) != 0) {
Jason Evansf4086432017-01-19 18:15:45 -08001944 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08001945 }
Jason Evansf4086432017-01-19 18:15:45 -08001946 return true;
Jason Evans602c8e02014-08-18 16:22:13 -07001947}
1948
Jason Evansb2c0d632016-04-13 23:36:15 -07001949static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001950prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001951 bool even_if_attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001952 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001953
Jason Evansf4086432017-01-19 18:15:45 -08001954 return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
Jason Evansb2c0d632016-04-13 23:36:15 -07001955}
1956
Jason Evans602c8e02014-08-18 16:22:13 -07001957static void
Jason Evansb54d1602016-10-20 23:59:12 -07001958prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansc4c25922017-01-15 16:56:30 -08001959 bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001960 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001961
Jason Evans602c8e02014-08-18 16:22:13 -07001962 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001963
Jason Evansc1e00ef2016-05-10 22:21:10 -07001964 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001965
Jason Evansdb722722016-03-23 20:29:33 -07001966 if (tdata->thread_name != NULL) {
Jason Evans51a2ec92017-03-17 02:45:12 -07001967 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
Jason Evansdb722722016-03-23 20:29:33 -07001968 }
Jason Evansb54d1602016-10-20 23:59:12 -07001969 ckh_delete(tsd, &tdata->bt2tctx);
Jason Evans51a2ec92017-03-17 02:45:12 -07001970 idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001971}
1972
1973static void
Jason Evansc4c25922017-01-15 16:56:30 -08001974prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
Jason Evansb54d1602016-10-20 23:59:12 -07001975 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1976 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1977 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001978}
1979
1980static void
Jason Evansc4c25922017-01-15 16:56:30 -08001981prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans602c8e02014-08-18 16:22:13 -07001982 bool destroy_tdata;
1983
Jason Evansc1e00ef2016-05-10 22:21:10 -07001984 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001985 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001986 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
1987 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07001988 /*
1989 * Only detach if !destroy_tdata, because detaching would allow
1990 * another thread to win the race to destroy tdata.
1991 */
Jason Evansc4c25922017-01-15 16:56:30 -08001992 if (!destroy_tdata) {
Jason Evansf04a0be2014-10-04 15:03:49 -07001993 tdata->attached = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001994 }
Jason Evans029d44c2014-10-04 11:12:53 -07001995 tsd_prof_tdata_set(tsd, NULL);
Jason Evansc4c25922017-01-15 16:56:30 -08001996 } else {
Jason Evans602c8e02014-08-18 16:22:13 -07001997 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08001998 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001999 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evansc4c25922017-01-15 16:56:30 -08002000 if (destroy_tdata) {
Jason Evansb54d1602016-10-20 23:59:12 -07002001 prof_tdata_destroy(tsd, tdata, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002002 }
Jason Evans602c8e02014-08-18 16:22:13 -07002003}
2004
Jason Evans20c31de2014-10-02 23:01:10 -07002005prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002006prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002007 uint64_t thr_uid = tdata->thr_uid;
2008 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002009 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07002010 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002011 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002012
Jason Evans20c31de2014-10-02 23:01:10 -07002013 prof_tdata_detach(tsd, tdata);
Jason Evansf4086432017-01-19 18:15:45 -08002014 return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
2015 active);
Jason Evans602c8e02014-08-18 16:22:13 -07002016}
2017
Jason Evans20c31de2014-10-02 23:01:10 -07002018static bool
Jason Evansc4c25922017-01-15 16:56:30 -08002019prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
Jason Evans20c31de2014-10-02 23:01:10 -07002020 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002021
Jason Evansc1e00ef2016-05-10 22:21:10 -07002022 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002023 if (!tdata->expired) {
2024 tdata->expired = true;
2025 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07002026 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002027 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002028 destroy_tdata = false;
Jason Evansc4c25922017-01-15 16:56:30 -08002029 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002030 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07002031
Jason Evansf4086432017-01-19 18:15:45 -08002032 return destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07002033}
2034
2035static prof_tdata_t *
Jason Evansc4c25922017-01-15 16:56:30 -08002036prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
2037 void *arg) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002038 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07002039
Jason Evansc1e00ef2016-05-10 22:21:10 -07002040 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002041}
2042
2043void
Jason Evansc4c25922017-01-15 16:56:30 -08002044prof_reset(tsd_t *tsd, size_t lg_sample) {
Jason Evans20c31de2014-10-02 23:01:10 -07002045 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07002046
2047 assert(lg_sample < (sizeof(uint64_t) << 3));
2048
Jason Evansb54d1602016-10-20 23:59:12 -07002049 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
2050 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002051
2052 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07002053
2054 next = NULL;
2055 do {
2056 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansb54d1602016-10-20 23:59:12 -07002057 prof_tdata_reset_iter, (void *)tsd);
Jason Evans20c31de2014-10-02 23:01:10 -07002058 if (to_destroy != NULL) {
2059 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansb54d1602016-10-20 23:59:12 -07002060 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evansc4c25922017-01-15 16:56:30 -08002061 } else {
Jason Evans20c31de2014-10-02 23:01:10 -07002062 next = NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002063 }
Jason Evans20c31de2014-10-02 23:01:10 -07002064 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07002065
Jason Evansb54d1602016-10-20 23:59:12 -07002066 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2067 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07002068}
2069
Jason Evanscd9a1342012-03-21 18:33:03 -07002070void
Jason Evansc4c25922017-01-15 16:56:30 -08002071prof_tdata_cleanup(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002072 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07002073
Jason Evansc4c25922017-01-15 16:56:30 -08002074 if (!config_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002075 return;
Jason Evansc4c25922017-01-15 16:56:30 -08002076 }
Jason Evans7372b152012-02-10 20:22:09 -08002077
Jason Evans5460aa62014-09-22 21:09:23 -07002078 tdata = tsd_prof_tdata_get(tsd);
Jason Evansc4c25922017-01-15 16:56:30 -08002079 if (tdata != NULL) {
Jason Evans5460aa62014-09-22 21:09:23 -07002080 prof_tdata_detach(tsd, tdata);
Jason Evansc4c25922017-01-15 16:56:30 -08002081 }
Jason Evans6109fe02010-02-10 10:37:56 -08002082}
2083
Jason Evansfc12c0b2014-10-03 23:25:30 -07002084bool
Jason Evansc4c25922017-01-15 16:56:30 -08002085prof_active_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002086 bool prof_active_current;
2087
Jason Evansc1e00ef2016-05-10 22:21:10 -07002088 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002089 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002090 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002091 return prof_active_current;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002092}
2093
2094bool
Jason Evansc4c25922017-01-15 16:56:30 -08002095prof_active_set(tsdn_t *tsdn, bool active) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002096 bool prof_active_old;
2097
Jason Evansc1e00ef2016-05-10 22:21:10 -07002098 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002099 prof_active_old = prof_active;
2100 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002101 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002102 return prof_active_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002103}
2104
Jason Evans602c8e02014-08-18 16:22:13 -07002105const char *
Jason Evansc4c25922017-01-15 16:56:30 -08002106prof_thread_name_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002107 prof_tdata_t *tdata;
2108
Jason Evans5460aa62014-09-22 21:09:23 -07002109 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002110 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002111 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002112 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002113 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002114}
2115
Jason Evansfc12c0b2014-10-03 23:25:30 -07002116static char *
Jason Evansc4c25922017-01-15 16:56:30 -08002117prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002118 char *ret;
2119 size_t size;
2120
Jason Evansc4c25922017-01-15 16:56:30 -08002121 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002122 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002123 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002124
2125 size = strlen(thread_name) + 1;
Jason Evansc4c25922017-01-15 16:56:30 -08002126 if (size == 1) {
Jason Evansf4086432017-01-19 18:15:45 -08002127 return "";
Jason Evansc4c25922017-01-15 16:56:30 -08002128 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002129
Jason Evansc1e00ef2016-05-10 22:21:10 -07002130 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2131 arena_get(TSDN_NULL, 0, true), true);
Jason Evansc4c25922017-01-15 16:56:30 -08002132 if (ret == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002133 return NULL;
Jason Evansc4c25922017-01-15 16:56:30 -08002134 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002135 memcpy(ret, thread_name, size);
Jason Evansf4086432017-01-19 18:15:45 -08002136 return ret;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002137}
2138
2139int
Jason Evansc4c25922017-01-15 16:56:30 -08002140prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
Jason Evans602c8e02014-08-18 16:22:13 -07002141 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002142 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002143 char *s;
2144
Jason Evans5460aa62014-09-22 21:09:23 -07002145 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002146 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002147 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002148 }
Jason Evans602c8e02014-08-18 16:22:13 -07002149
Jason Evansfc12c0b2014-10-03 23:25:30 -07002150 /* Validate input. */
Jason Evansc4c25922017-01-15 16:56:30 -08002151 if (thread_name == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002152 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002153 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002154 for (i = 0; thread_name[i] != '\0'; i++) {
2155 char c = thread_name[i];
Jason Evansc4c25922017-01-15 16:56:30 -08002156 if (!isgraph(c) && !isblank(c)) {
Jason Evansf4086432017-01-19 18:15:45 -08002157 return EFAULT;
Jason Evansc4c25922017-01-15 16:56:30 -08002158 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002159 }
2160
Jason Evansc1e00ef2016-05-10 22:21:10 -07002161 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evansc4c25922017-01-15 16:56:30 -08002162 if (s == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002163 return EAGAIN;
Jason Evansc4c25922017-01-15 16:56:30 -08002164 }
Jason Evans602c8e02014-08-18 16:22:13 -07002165
Jason Evansfc12c0b2014-10-03 23:25:30 -07002166 if (tdata->thread_name != NULL) {
Jason Evans51a2ec92017-03-17 02:45:12 -07002167 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002168 tdata->thread_name = NULL;
2169 }
Jason Evansc4c25922017-01-15 16:56:30 -08002170 if (strlen(s) > 0) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002171 tdata->thread_name = s;
Jason Evansc4c25922017-01-15 16:56:30 -08002172 }
Jason Evansf4086432017-01-19 18:15:45 -08002173 return 0;
Jason Evans602c8e02014-08-18 16:22:13 -07002174}
2175
2176bool
Jason Evansc4c25922017-01-15 16:56:30 -08002177prof_thread_active_get(tsd_t *tsd) {
Jason Evans5460aa62014-09-22 21:09:23 -07002178 prof_tdata_t *tdata;
2179
Jason Evans5460aa62014-09-22 21:09:23 -07002180 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002181 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002182 return false;
Jason Evansc4c25922017-01-15 16:56:30 -08002183 }
Jason Evansf4086432017-01-19 18:15:45 -08002184 return tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07002185}
2186
2187bool
Jason Evansc4c25922017-01-15 16:56:30 -08002188prof_thread_active_set(tsd_t *tsd, bool active) {
Jason Evans602c8e02014-08-18 16:22:13 -07002189 prof_tdata_t *tdata;
2190
Jason Evans5460aa62014-09-22 21:09:23 -07002191 tdata = prof_tdata_get(tsd, true);
Jason Evansc4c25922017-01-15 16:56:30 -08002192 if (tdata == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002193 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002194 }
Jason Evans602c8e02014-08-18 16:22:13 -07002195 tdata->active = active;
Jason Evansf4086432017-01-19 18:15:45 -08002196 return false;
Jason Evans602c8e02014-08-18 16:22:13 -07002197}
2198
Jason Evansfc12c0b2014-10-03 23:25:30 -07002199bool
Jason Evansc4c25922017-01-15 16:56:30 -08002200prof_thread_active_init_get(tsdn_t *tsdn) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002201 bool active_init;
2202
Jason Evansc1e00ef2016-05-10 22:21:10 -07002203 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002204 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002205 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002206 return active_init;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002207}
2208
2209bool
Jason Evansc4c25922017-01-15 16:56:30 -08002210prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
Jason Evansfc12c0b2014-10-03 23:25:30 -07002211 bool active_init_old;
2212
Jason Evansc1e00ef2016-05-10 22:21:10 -07002213 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002214 active_init_old = prof_thread_active_init;
2215 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002216 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002217 return active_init_old;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002218}
2219
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002220bool
Jason Evansc4c25922017-01-15 16:56:30 -08002221prof_gdump_get(tsdn_t *tsdn) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002222 bool prof_gdump_current;
2223
Jason Evansc1e00ef2016-05-10 22:21:10 -07002224 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002225 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002226 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002227 return prof_gdump_current;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002228}
2229
2230bool
Jason Evansc4c25922017-01-15 16:56:30 -08002231prof_gdump_set(tsdn_t *tsdn, bool gdump) {
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002232 bool prof_gdump_old;
2233
Jason Evansc1e00ef2016-05-10 22:21:10 -07002234 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002235 prof_gdump_old = prof_gdump_val;
2236 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002237 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evansf4086432017-01-19 18:15:45 -08002238 return prof_gdump_old;
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002239}
2240
Jason Evans6109fe02010-02-10 10:37:56 -08002241void
Jason Evansc4c25922017-01-15 16:56:30 -08002242prof_boot0(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002243 cassert(config_prof);
2244
Jason Evanse7339702010-10-23 18:37:06 -07002245 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2246 sizeof(PROF_PREFIX_DEFAULT));
2247}
2248
2249void
Jason Evansc4c25922017-01-15 16:56:30 -08002250prof_boot1(void) {
Jason Evans7372b152012-02-10 20:22:09 -08002251 cassert(config_prof);
2252
Jason Evans6109fe02010-02-10 10:37:56 -08002253 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002254 * opt_prof must be in its final state before any arenas are
2255 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002256 */
2257
Jason Evans551ebc42014-10-03 10:16:09 -07002258 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002259 /*
2260 * Enable opt_prof, but in such a way that profiles are never
2261 * automatically dumped.
2262 */
2263 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002264 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002265 } else if (opt_prof) {
2266 if (opt_lg_prof_interval >= 0) {
2267 prof_interval = (((uint64_t)1U) <<
2268 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002269 }
Jason Evansa02fc082010-03-31 17:35:51 -07002270 }
Jason Evans6109fe02010-02-10 10:37:56 -08002271}
2272
2273bool
Jason Evansc4c25922017-01-15 16:56:30 -08002274prof_boot2(tsd_t *tsd) {
Jason Evans7372b152012-02-10 20:22:09 -08002275 cassert(config_prof);
2276
Jason Evans6109fe02010-02-10 10:37:56 -08002277 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002278 unsigned i;
2279
Jason Evans602c8e02014-08-18 16:22:13 -07002280 lg_prof_sample = opt_lg_prof_sample;
2281
Jason Evansfc12c0b2014-10-03 23:25:30 -07002282 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002283 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
Jason Evansc4c25922017-01-15 16:56:30 -08002284 WITNESS_RANK_PROF_ACTIVE)) {
Jason Evansf4086432017-01-19 18:15:45 -08002285 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002286 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002287
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002288 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002289 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
Jason Evansc4c25922017-01-15 16:56:30 -08002290 WITNESS_RANK_PROF_GDUMP)) {
Jason Evansf4086432017-01-19 18:15:45 -08002291 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002292 }
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002293
Jason Evansfc12c0b2014-10-03 23:25:30 -07002294 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002295 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2296 "prof_thread_active_init",
Jason Evansc4c25922017-01-15 16:56:30 -08002297 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) {
Jason Evansf4086432017-01-19 18:15:45 -08002298 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002299 }
Jason Evansfc12c0b2014-10-03 23:25:30 -07002300
Jason Evansb54d1602016-10-20 23:59:12 -07002301 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evansc4c25922017-01-15 16:56:30 -08002302 prof_bt_keycomp)) {
Jason Evansf4086432017-01-19 18:15:45 -08002303 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002304 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002305 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
Jason Evansc4c25922017-01-15 16:56:30 -08002306 WITNESS_RANK_PROF_BT2GCTX)) {
Jason Evansf4086432017-01-19 18:15:45 -08002307 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002308 }
Jason Evans6109fe02010-02-10 10:37:56 -08002309
Jason Evans602c8e02014-08-18 16:22:13 -07002310 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002311 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
Jason Evansc4c25922017-01-15 16:56:30 -08002312 WITNESS_RANK_PROF_TDATAS)) {
Jason Evansf4086432017-01-19 18:15:45 -08002313 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002314 }
Jason Evans602c8e02014-08-18 16:22:13 -07002315
2316 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002317 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
Jason Evansc4c25922017-01-15 16:56:30 -08002318 WITNESS_RANK_PROF_NEXT_THR_UID)) {
Jason Evansf4086432017-01-19 18:15:45 -08002319 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002320 }
Jason Evans602c8e02014-08-18 16:22:13 -07002321
Jason Evansb2c0d632016-04-13 23:36:15 -07002322 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
Jason Evansc4c25922017-01-15 16:56:30 -08002323 WITNESS_RANK_PROF_DUMP_SEQ)) {
Jason Evansf4086432017-01-19 18:15:45 -08002324 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002325 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002326 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
Jason Evansc4c25922017-01-15 16:56:30 -08002327 WITNESS_RANK_PROF_DUMP)) {
Jason Evansf4086432017-01-19 18:15:45 -08002328 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002329 }
Jason Evans6109fe02010-02-10 10:37:56 -08002330
Jason Evans57efa7b2014-10-08 17:57:19 -07002331 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2332 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002333 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansc4c25922017-01-15 16:56:30 -08002334 if (opt_abort) {
Jason Evans6109fe02010-02-10 10:37:56 -08002335 abort();
Jason Evansc4c25922017-01-15 16:56:30 -08002336 }
Jason Evans6109fe02010-02-10 10:37:56 -08002337 }
Jason Evans6da54182012-03-23 18:05:51 -07002338
Jason Evansb54d1602016-10-20 23:59:12 -07002339 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002340 b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
2341 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002342 if (gctx_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002343 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002344 }
Jason Evans6da54182012-03-23 18:05:51 -07002345 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002346 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
Jason Evansc4c25922017-01-15 16:56:30 -08002347 WITNESS_RANK_PROF_GCTX)) {
Jason Evansf4086432017-01-19 18:15:45 -08002348 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002349 }
Jason Evans602c8e02014-08-18 16:22:13 -07002350 }
2351
Jason Evansb54d1602016-10-20 23:59:12 -07002352 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansa0dd3a42016-12-22 16:39:10 -06002353 b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
2354 CACHELINE);
Jason Evansc4c25922017-01-15 16:56:30 -08002355 if (tdata_locks == NULL) {
Jason Evansf4086432017-01-19 18:15:45 -08002356 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002357 }
Jason Evans602c8e02014-08-18 16:22:13 -07002358 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002359 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
Jason Evansc4c25922017-01-15 16:56:30 -08002360 WITNESS_RANK_PROF_TDATA)) {
Jason Evansf4086432017-01-19 18:15:45 -08002361 return true;
Jason Evansc4c25922017-01-15 16:56:30 -08002362 }
Jason Evans6da54182012-03-23 18:05:51 -07002363 }
Jason Evans6109fe02010-02-10 10:37:56 -08002364 }
2365
Jason Evansb27805b2010-02-10 18:15:53 -08002366#ifdef JEMALLOC_PROF_LIBGCC
2367 /*
2368 * Cause the backtracing machinery to allocate its internal state
2369 * before enabling profiling.
2370 */
2371 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2372#endif
2373
Jason Evans6109fe02010-02-10 10:37:56 -08002374 prof_booted = true;
2375
Jason Evansf4086432017-01-19 18:15:45 -08002376 return false;
Jason Evans6109fe02010-02-10 10:37:56 -08002377}
2378
Jason Evans20f1fc92012-10-09 14:46:22 -07002379void
Jason Evansc4c25922017-01-15 16:56:30 -08002380prof_prefork0(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002381 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002382 unsigned i;
2383
Jason Evansc1e00ef2016-05-10 22:21:10 -07002384 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2385 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2386 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002387 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002388 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002389 }
2390 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002391 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002392 }
Jason Evans174c0c32016-04-25 23:14:40 -07002393 }
2394}
2395
2396void
Jason Evansc4c25922017-01-15 16:56:30 -08002397prof_prefork1(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002398 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002399 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2400 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2401 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2402 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2403 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002404 }
2405}
2406
2407void
Jason Evansc4c25922017-01-15 16:56:30 -08002408prof_postfork_parent(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002409 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002410 unsigned i;
2411
Jason Evansc1e00ef2016-05-10 22:21:10 -07002412 malloc_mutex_postfork_parent(tsdn,
2413 &prof_thread_active_init_mtx);
2414 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2415 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2416 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2417 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002418 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002419 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002420 }
2421 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002422 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002423 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002424 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2425 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2426 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002427 }
2428}
2429
2430void
Jason Evansc4c25922017-01-15 16:56:30 -08002431prof_postfork_child(tsdn_t *tsdn) {
Jason Evans397f54a2017-01-29 17:35:57 -08002432 if (config_prof && opt_prof) {
Jason Evans20f1fc92012-10-09 14:46:22 -07002433 unsigned i;
2434
Jason Evansc1e00ef2016-05-10 22:21:10 -07002435 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2436 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2437 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2438 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2439 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evansc4c25922017-01-15 16:56:30 -08002440 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002441 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002442 }
2443 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002444 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
Jason Evansc4c25922017-01-15 16:56:30 -08002445 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002446 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2447 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2448 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002449 }
2450}
2451
Jason Evans6109fe02010-02-10 10:37:56 -08002452/******************************************************************************/