blob: c1f58d462bec3dddc757b963e44c6f93b8d64c34 [file] [log] [blame]
Jason Evans6109fe02010-02-10 10:37:56 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
17bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070018bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070019bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080020size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070021ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070022bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070023bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080024bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070025bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080026char opt_prof_prefix[
27 /* Minimize memory bloat for non-prof builds. */
28#ifdef JEMALLOC_PROF
29 PATH_MAX +
30#endif
Jason Evanseefdd022014-01-16 18:04:30 -080031 1];
Jason Evans6109fe02010-02-10 10:37:56 -080032
Jason Evansfc12c0b2014-10-03 23:25:30 -070033/*
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
36 */
37bool prof_active;
38static malloc_mutex_t prof_active_mtx;
39
40/*
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
43 */
44static bool prof_thread_active_init;
45static malloc_mutex_t prof_thread_active_init_mtx;
46
Jason Evans5b8ed5b2015-01-25 21:16:57 -080047/*
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
50 */
51bool prof_gdump_val;
52static malloc_mutex_t prof_gdump_mtx;
53
Jason Evansa3b33862012-11-13 12:56:27 -080054uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080055
Jason Evans602c8e02014-08-18 16:22:13 -070056size_t lg_prof_sample;
57
Jason Evans6109fe02010-02-10 10:37:56 -080058/*
Jason Evans602c8e02014-08-18 16:22:13 -070059 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070062 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
64 */
Jason Evans602c8e02014-08-18 16:22:13 -070065static malloc_mutex_t *gctx_locks;
66static unsigned cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070067
68/*
Jason Evans602c8e02014-08-18 16:22:13 -070069 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
73 */
74static malloc_mutex_t *tdata_locks;
75
76/*
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070078 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080079 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static ckh_t bt2gctx;
81static malloc_mutex_t bt2gctx_mtx;
82
83/*
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
86 */
87static prof_tdata_tree_t tdatas;
88static malloc_mutex_t tdatas_mtx;
89
90static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -070091static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -080092
Jason Evans6109fe02010-02-10 10:37:56 -080093static malloc_mutex_t prof_dump_seq_mtx;
94static uint64_t prof_dump_seq;
95static uint64_t prof_dump_iseq;
96static uint64_t prof_dump_mseq;
97static uint64_t prof_dump_useq;
98
99/*
100 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800101 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800102 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800103static malloc_mutex_t prof_dump_mtx;
104static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
106#ifdef JEMALLOC_PROF
107 PROF_DUMP_BUFSIZE
108#else
109 1
110#endif
111];
Jason Evans42ce80e2016-02-25 20:51:00 -0800112static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800113static int prof_dump_fd;
114
115/* Do not dump any profiles until bootstrapping is complete. */
116static bool prof_booted = false;
117
Jason Evans6109fe02010-02-10 10:37:56 -0800118/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700119/*
120 * Function prototypes for static functions that are referenced prior to
121 * definition.
122 */
123
Jason Evansc1e00ef2016-05-10 22:21:10 -0700124static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700125static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700126static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700127 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700128static void prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700129 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700130static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700131
132/******************************************************************************/
133/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800134
Jason Evans3a81cbd2014-08-16 12:58:55 -0700135JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700136prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700137{
Jason Evans04211e22015-03-16 15:11:06 -0700138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700141 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
145 b_thr_discrim);
146 if (ret == 0) {
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
150 b_tctx_uid);
151 }
Jason Evansd69964b2015-03-12 16:25:18 -0700152 }
153 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -0700154}
155
Jason Evans602c8e02014-08-18 16:22:13 -0700156rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700158
159JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700160prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700161{
162 unsigned a_len = a->bt.len;
163 unsigned b_len = b->bt.len;
164 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
166 if (ret == 0)
167 ret = (a_len > b_len) - (a_len < b_len);
168 return (ret);
169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
172 prof_gctx_comp)
173
174JEMALLOC_INLINE_C int
175prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
176{
Jason Evans20c31de2014-10-02 23:01:10 -0700177 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700178 uint64_t a_uid = a->thr_uid;
179 uint64_t b_uid = b->thr_uid;
180
Jason Evans20c31de2014-10-02 23:01:10 -0700181 ret = ((a_uid > b_uid) - (a_uid < b_uid));
182 if (ret == 0) {
183 uint64_t a_discrim = a->thr_discrim;
184 uint64_t b_discrim = b->thr_discrim;
185
186 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
187 }
188 return (ret);
Jason Evans602c8e02014-08-18 16:22:13 -0700189}
190
191rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
192 prof_tdata_comp)
193
194/******************************************************************************/
195
196void
Jason Evans5460aa62014-09-22 21:09:23 -0700197prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
Jason Evans6e73dc12014-09-09 19:37:26 -0700198{
199 prof_tdata_t *tdata;
200
201 cassert(config_prof);
202
203 if (updated) {
204 /*
205 * Compute a new sample threshold. This isn't very important in
206 * practice, because this function is rarely executed, so the
207 * potential for sample bias is minimal except in contrived
208 * programs.
209 */
Jason Evans5460aa62014-09-22 21:09:23 -0700210 tdata = prof_tdata_get(tsd, true);
211 if (tdata != NULL)
Jason Evans3ca0cf62015-09-17 14:47:39 -0700212 prof_sample_threshold_update(tdata);
Jason Evans6e73dc12014-09-09 19:37:26 -0700213 }
214
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700216 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700217 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700218 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700219 prof_tctx_destroy(tsd, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -0700220 else
Jason Evansc1e00ef2016-05-10 22:21:10 -0700221 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700222 }
223}
224
225void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700226prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700227 prof_tctx_t *tctx)
Jason Evanscfc57062014-10-30 23:18:45 -0700228{
229
Jason Evansc1e00ef2016-05-10 22:21:10 -0700230 prof_tctx_set(tsdn, ptr, usize, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700231
Jason Evansc1e00ef2016-05-10 22:21:10 -0700232 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700233 tctx->cnts.curobjs++;
234 tctx->cnts.curbytes += usize;
235 if (opt_prof_accum) {
236 tctx->cnts.accumobjs++;
237 tctx->cnts.accumbytes += usize;
238 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700239 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700240 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700241}
242
243void
Jason Evans5460aa62014-09-22 21:09:23 -0700244prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700245{
246
Jason Evansc1e00ef2016-05-10 22:21:10 -0700247 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700248 assert(tctx->cnts.curobjs > 0);
249 assert(tctx->cnts.curbytes >= usize);
250 tctx->cnts.curobjs--;
251 tctx->cnts.curbytes -= usize;
252
Jason Evansc1e00ef2016-05-10 22:21:10 -0700253 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700254 prof_tctx_destroy(tsd, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700255 else
Jason Evansc1e00ef2016-05-10 22:21:10 -0700256 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700257}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700258
Jason Evans4d6a1342010-10-20 19:05:59 -0700259void
Jason Evans6109fe02010-02-10 10:37:56 -0800260bt_init(prof_bt_t *bt, void **vec)
261{
262
Jason Evans7372b152012-02-10 20:22:09 -0800263 cassert(config_prof);
264
Jason Evans6109fe02010-02-10 10:37:56 -0800265 bt->vec = vec;
266 bt->len = 0;
267}
268
Jason Evansaf1f5922014-10-30 16:38:08 -0700269JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700270prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800271{
272
Jason Evans7372b152012-02-10 20:22:09 -0800273 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700274 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800275
Jason Evans82cb6032014-11-01 00:20:28 -0700276 if (tdata != NULL) {
277 assert(!tdata->enq);
278 tdata->enq = true;
279 }
Jason Evans6109fe02010-02-10 10:37:56 -0800280
Jason Evansc1e00ef2016-05-10 22:21:10 -0700281 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800282}
283
Jason Evansaf1f5922014-10-30 16:38:08 -0700284JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700285prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800286{
Jason Evans6109fe02010-02-10 10:37:56 -0800287
Jason Evans7372b152012-02-10 20:22:09 -0800288 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700289 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800290
Jason Evansc1e00ef2016-05-10 22:21:10 -0700291 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800292
Jason Evans82cb6032014-11-01 00:20:28 -0700293 if (tdata != NULL) {
294 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800295
Jason Evans82cb6032014-11-01 00:20:28 -0700296 assert(tdata->enq);
297 tdata->enq = false;
298 idump = tdata->enq_idump;
299 tdata->enq_idump = false;
300 gdump = tdata->enq_gdump;
301 tdata->enq_gdump = false;
302
303 if (idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700304 prof_idump(tsd_tsdn(tsd));
Jason Evans82cb6032014-11-01 00:20:28 -0700305 if (gdump)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700306 prof_gdump(tsd_tsdn(tsd));
Jason Evans82cb6032014-11-01 00:20:28 -0700307 }
Jason Evans6109fe02010-02-10 10:37:56 -0800308}
309
Jason Evans77f350b2011-03-15 22:23:12 -0700310#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700311void
Jason Evans6f001052014-04-22 18:41:15 -0700312prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800313{
Jason Evans6f001052014-04-22 18:41:15 -0700314 int nframes;
315
Jason Evans7372b152012-02-10 20:22:09 -0800316 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800317 assert(bt->len == 0);
318 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800319
Jason Evans6f001052014-04-22 18:41:15 -0700320 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
321 if (nframes <= 0)
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700322 return;
Jason Evans6f001052014-04-22 18:41:15 -0700323 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800324}
Jason Evans7372b152012-02-10 20:22:09 -0800325#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700326static _Unwind_Reason_Code
327prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
328{
329
Jason Evans7372b152012-02-10 20:22:09 -0800330 cassert(config_prof);
331
Jason Evans77f350b2011-03-15 22:23:12 -0700332 return (_URC_NO_REASON);
333}
334
335static _Unwind_Reason_Code
336prof_unwind_callback(struct _Unwind_Context *context, void *arg)
337{
338 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700339 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700340
Jason Evans7372b152012-02-10 20:22:09 -0800341 cassert(config_prof);
342
Jason Evans6f001052014-04-22 18:41:15 -0700343 ip = (void *)_Unwind_GetIP(context);
344 if (ip == NULL)
345 return (_URC_END_OF_STACK);
346 data->bt->vec[data->bt->len] = ip;
347 data->bt->len++;
348 if (data->bt->len == data->max)
349 return (_URC_END_OF_STACK);
Jason Evans77f350b2011-03-15 22:23:12 -0700350
351 return (_URC_NO_REASON);
352}
353
354void
Jason Evans6f001052014-04-22 18:41:15 -0700355prof_backtrace(prof_bt_t *bt)
Jason Evans77f350b2011-03-15 22:23:12 -0700356{
Jason Evans6f001052014-04-22 18:41:15 -0700357 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700358
Jason Evans7372b152012-02-10 20:22:09 -0800359 cassert(config_prof);
360
Jason Evans77f350b2011-03-15 22:23:12 -0700361 _Unwind_Backtrace(prof_unwind_callback, &data);
362}
Jason Evans7372b152012-02-10 20:22:09 -0800363#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700364void
Jason Evans6f001052014-04-22 18:41:15 -0700365prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800366{
Jason Evans6109fe02010-02-10 10:37:56 -0800367#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700368 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800369 void *p; \
370 if (__builtin_frame_address(i) == 0) \
Jason Evansb27805b2010-02-10 18:15:53 -0800371 return; \
Jason Evans6109fe02010-02-10 10:37:56 -0800372 p = __builtin_return_address(i); \
373 if (p == NULL) \
Jason Evansb27805b2010-02-10 18:15:53 -0800374 return; \
Jason Evans6f001052014-04-22 18:41:15 -0700375 bt->vec[(i)] = p; \
376 bt->len = (i) + 1; \
Jason Evans6109fe02010-02-10 10:37:56 -0800377 } else \
Jason Evansb27805b2010-02-10 18:15:53 -0800378 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800379
Jason Evans7372b152012-02-10 20:22:09 -0800380 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800381
Jason Evans6109fe02010-02-10 10:37:56 -0800382 BT_FRAME(0)
383 BT_FRAME(1)
384 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800385 BT_FRAME(3)
386 BT_FRAME(4)
387 BT_FRAME(5)
388 BT_FRAME(6)
389 BT_FRAME(7)
390 BT_FRAME(8)
391 BT_FRAME(9)
392
393 BT_FRAME(10)
394 BT_FRAME(11)
395 BT_FRAME(12)
396 BT_FRAME(13)
397 BT_FRAME(14)
398 BT_FRAME(15)
399 BT_FRAME(16)
400 BT_FRAME(17)
401 BT_FRAME(18)
402 BT_FRAME(19)
403
404 BT_FRAME(20)
405 BT_FRAME(21)
406 BT_FRAME(22)
407 BT_FRAME(23)
408 BT_FRAME(24)
409 BT_FRAME(25)
410 BT_FRAME(26)
411 BT_FRAME(27)
412 BT_FRAME(28)
413 BT_FRAME(29)
414
415 BT_FRAME(30)
416 BT_FRAME(31)
417 BT_FRAME(32)
418 BT_FRAME(33)
419 BT_FRAME(34)
420 BT_FRAME(35)
421 BT_FRAME(36)
422 BT_FRAME(37)
423 BT_FRAME(38)
424 BT_FRAME(39)
425
426 BT_FRAME(40)
427 BT_FRAME(41)
428 BT_FRAME(42)
429 BT_FRAME(43)
430 BT_FRAME(44)
431 BT_FRAME(45)
432 BT_FRAME(46)
433 BT_FRAME(47)
434 BT_FRAME(48)
435 BT_FRAME(49)
436
437 BT_FRAME(50)
438 BT_FRAME(51)
439 BT_FRAME(52)
440 BT_FRAME(53)
441 BT_FRAME(54)
442 BT_FRAME(55)
443 BT_FRAME(56)
444 BT_FRAME(57)
445 BT_FRAME(58)
446 BT_FRAME(59)
447
448 BT_FRAME(60)
449 BT_FRAME(61)
450 BT_FRAME(62)
451 BT_FRAME(63)
452 BT_FRAME(64)
453 BT_FRAME(65)
454 BT_FRAME(66)
455 BT_FRAME(67)
456 BT_FRAME(68)
457 BT_FRAME(69)
458
459 BT_FRAME(70)
460 BT_FRAME(71)
461 BT_FRAME(72)
462 BT_FRAME(73)
463 BT_FRAME(74)
464 BT_FRAME(75)
465 BT_FRAME(76)
466 BT_FRAME(77)
467 BT_FRAME(78)
468 BT_FRAME(79)
469
470 BT_FRAME(80)
471 BT_FRAME(81)
472 BT_FRAME(82)
473 BT_FRAME(83)
474 BT_FRAME(84)
475 BT_FRAME(85)
476 BT_FRAME(86)
477 BT_FRAME(87)
478 BT_FRAME(88)
479 BT_FRAME(89)
480
481 BT_FRAME(90)
482 BT_FRAME(91)
483 BT_FRAME(92)
484 BT_FRAME(93)
485 BT_FRAME(94)
486 BT_FRAME(95)
487 BT_FRAME(96)
488 BT_FRAME(97)
489 BT_FRAME(98)
490 BT_FRAME(99)
491
492 BT_FRAME(100)
493 BT_FRAME(101)
494 BT_FRAME(102)
495 BT_FRAME(103)
496 BT_FRAME(104)
497 BT_FRAME(105)
498 BT_FRAME(106)
499 BT_FRAME(107)
500 BT_FRAME(108)
501 BT_FRAME(109)
502
503 BT_FRAME(110)
504 BT_FRAME(111)
505 BT_FRAME(112)
506 BT_FRAME(113)
507 BT_FRAME(114)
508 BT_FRAME(115)
509 BT_FRAME(116)
510 BT_FRAME(117)
511 BT_FRAME(118)
512 BT_FRAME(119)
513
514 BT_FRAME(120)
515 BT_FRAME(121)
516 BT_FRAME(122)
517 BT_FRAME(123)
518 BT_FRAME(124)
519 BT_FRAME(125)
520 BT_FRAME(126)
521 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800522#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800523}
Jason Evans7372b152012-02-10 20:22:09 -0800524#else
525void
Jason Evans6f001052014-04-22 18:41:15 -0700526prof_backtrace(prof_bt_t *bt)
Jason Evans7372b152012-02-10 20:22:09 -0800527{
528
529 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700530 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800531}
Jason Evans6109fe02010-02-10 10:37:56 -0800532#endif
533
Jason Evans4f37ef62014-01-16 13:23:56 -0800534static malloc_mutex_t *
Jason Evans602c8e02014-08-18 16:22:13 -0700535prof_gctx_mutex_choose(void)
Jason Evans4f37ef62014-01-16 13:23:56 -0800536{
Jason Evans602c8e02014-08-18 16:22:13 -0700537 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
Jason Evans4f37ef62014-01-16 13:23:56 -0800538
Jason Evans602c8e02014-08-18 16:22:13 -0700539 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
Jason Evans4f37ef62014-01-16 13:23:56 -0800540}
541
Jason Evans602c8e02014-08-18 16:22:13 -0700542static malloc_mutex_t *
543prof_tdata_mutex_choose(uint64_t thr_uid)
544{
545
546 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
547}
548
549static prof_gctx_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700550prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
Jason Evans4f37ef62014-01-16 13:23:56 -0800551{
Jason Evansab532e92014-08-15 15:05:12 -0700552 /*
553 * Create a single allocation that has space for vec of length bt->len.
554 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700555 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700556 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
557 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700558 true);
Jason Evans602c8e02014-08-18 16:22:13 -0700559 if (gctx == NULL)
Jason Evansab532e92014-08-15 15:05:12 -0700560 return (NULL);
Jason Evans602c8e02014-08-18 16:22:13 -0700561 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800562 /*
563 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700564 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800565 */
Jason Evans602c8e02014-08-18 16:22:13 -0700566 gctx->nlimbo = 1;
567 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700568 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700569 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
570 gctx->bt.vec = gctx->vec;
571 gctx->bt.len = bt->len;
572 return (gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -0800573}
574
575static void
Jason Evansc93ed812014-10-30 16:50:33 -0700576prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
577 prof_tdata_t *tdata)
Jason Evans4f37ef62014-01-16 13:23:56 -0800578{
Jason Evans4f37ef62014-01-16 13:23:56 -0800579
580 cassert(config_prof);
581
582 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700583 * Check that gctx is still unused by any thread cache before destroying
584 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
585 * condition with this function, as does prof_tctx_destroy() in order to
586 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800587 * into this function.
588 */
Jason Evansc93ed812014-10-30 16:50:33 -0700589 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700590 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700591 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700592 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
593 /* Remove gctx from bt2gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700594 if (ckh_remove(tsd_tsdn(tsd), &bt2gctx, &gctx->bt, NULL, NULL))
Jason Evans4f37ef62014-01-16 13:23:56 -0800595 not_reached();
Jason Evansc93ed812014-10-30 16:50:33 -0700596 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700597 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700598 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
599 idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800600 } else {
601 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700602 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800603 * prof_lookup().
604 */
Jason Evans602c8e02014-08-18 16:22:13 -0700605 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700606 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700607 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800608 }
609}
610
Jason Evans602c8e02014-08-18 16:22:13 -0700611static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700612prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
Jason Evans4f37ef62014-01-16 13:23:56 -0800613{
Jason Evans4f37ef62014-01-16 13:23:56 -0800614
Jason Evansc1e00ef2016-05-10 22:21:10 -0700615 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700616
Jason Evans602c8e02014-08-18 16:22:13 -0700617 if (opt_prof_accum)
618 return (false);
619 if (tctx->cnts.curobjs != 0)
620 return (false);
Jason Evans6e73dc12014-09-09 19:37:26 -0700621 if (tctx->prepared)
622 return (false);
Jason Evans602c8e02014-08-18 16:22:13 -0700623 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800624}
625
Jason Evansfb1775e2014-01-14 17:04:34 -0800626static bool
Jason Evans602c8e02014-08-18 16:22:13 -0700627prof_gctx_should_destroy(prof_gctx_t *gctx)
628{
629
630 if (opt_prof_accum)
631 return (false);
Jason Evans551ebc42014-10-03 10:16:09 -0700632 if (!tctx_tree_empty(&gctx->tctxs))
Jason Evans602c8e02014-08-18 16:22:13 -0700633 return (false);
634 if (gctx->nlimbo != 0)
635 return (false);
636 return (true);
637}
638
Jason Evans602c8e02014-08-18 16:22:13 -0700639static void
Jason Evans5460aa62014-09-22 21:09:23 -0700640prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700641{
Jason Evans6fd53da2014-09-09 12:45:53 -0700642 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700643 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700644 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700645
Jason Evansc1e00ef2016-05-10 22:21:10 -0700646 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700647
Jason Evans602c8e02014-08-18 16:22:13 -0700648 assert(tctx->cnts.curobjs == 0);
649 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700650 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700651 assert(tctx->cnts.accumobjs == 0);
652 assert(tctx->cnts.accumbytes == 0);
653
Jason Evansc1e00ef2016-05-10 22:21:10 -0700654 ckh_remove(tsd_tsdn(tsd), &tdata->bt2tctx, &gctx->bt, NULL, NULL);
655 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
656 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700657
Jason Evansc1e00ef2016-05-10 22:21:10 -0700658 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700659 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700660 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700661 tctx_tree_remove(&gctx->tctxs, tctx);
662 destroy_tctx = true;
663 if (prof_gctx_should_destroy(gctx)) {
664 /*
665 * Increment gctx->nlimbo in order to keep another
666 * thread from winning the race to destroy gctx while
667 * this one has gctx->lock dropped. Without this, it
668 * would be possible for another thread to:
669 *
670 * 1) Sample an allocation associated with gctx.
671 * 2) Deallocate the sampled object.
672 * 3) Successfully prof_gctx_try_destroy(gctx).
673 *
674 * The result would be that gctx no longer exists by the
675 * time this thread accesses it in
676 * prof_gctx_try_destroy().
677 */
678 gctx->nlimbo++;
679 destroy_gctx = true;
680 } else
681 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700682 break;
683 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700684 /*
Jason Evansbf406412014-10-06 16:35:11 -0700685 * A dumping thread needs tctx to remain valid until dumping
686 * has finished. Change state such that the dumping thread will
687 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700688 */
Jason Evansbf406412014-10-06 16:35:11 -0700689 tctx->state = prof_tctx_state_purgatory;
690 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700691 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700692 break;
693 default:
694 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700695 destroy_tctx = false;
696 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700697 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700698 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700699 if (destroy_gctx) {
700 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
701 tdata);
702 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700703
Jason Evansc1e00ef2016-05-10 22:21:10 -0700704 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700705
Jason Evans6fd53da2014-09-09 12:45:53 -0700706 if (destroy_tdata)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700707 prof_tdata_destroy(tsd_tsdn(tsd), tdata, false);
Jason Evans602c8e02014-08-18 16:22:13 -0700708
Jason Evansbf406412014-10-06 16:35:11 -0700709 if (destroy_tctx)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700710 idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -0700711}
712
713static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700714prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
715 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
Jason Evansfb1775e2014-01-14 17:04:34 -0800716{
717 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700718 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800719 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -0700720 } gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800721 union {
722 prof_bt_t *p;
723 void *v;
724 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700725 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800726
Jason Evansc93ed812014-10-30 16:50:33 -0700727 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700728 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800729 /* bt has never been seen before. Insert it. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700730 gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
Jason Evans602c8e02014-08-18 16:22:13 -0700731 if (gctx.v == NULL) {
Jason Evansc93ed812014-10-30 16:50:33 -0700732 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800733 return (true);
734 }
Jason Evans602c8e02014-08-18 16:22:13 -0700735 btkey.p = &gctx.p->bt;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700736 if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800737 /* OOM. */
Jason Evansc93ed812014-10-30 16:50:33 -0700738 prof_leave(tsd, tdata);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700739 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
Jason Evansfb1775e2014-01-14 17:04:34 -0800740 return (true);
741 }
Jason Evans602c8e02014-08-18 16:22:13 -0700742 new_gctx = true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800743 } else {
744 /*
745 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700746 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800747 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700748 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700749 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700750 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700751 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800752 }
Jason Evansc93ed812014-10-30 16:50:33 -0700753 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800754
755 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700756 *p_gctx = gctx.p;
757 *p_new_gctx = new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800758 return (false);
759}
760
Jason Evans602c8e02014-08-18 16:22:13 -0700761prof_tctx_t *
Jason Evans5460aa62014-09-22 21:09:23 -0700762prof_lookup(tsd_t *tsd, prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800763{
Jason Evans075e77c2010-09-20 19:53:25 -0700764 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700765 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700766 void *v;
767 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700768 prof_tdata_t *tdata;
769 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800770
Jason Evans7372b152012-02-10 20:22:09 -0800771 cassert(config_prof);
772
Jason Evans5460aa62014-09-22 21:09:23 -0700773 tdata = prof_tdata_get(tsd, false);
774 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -0700775 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800776
Jason Evansc1e00ef2016-05-10 22:21:10 -0700777 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700778 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evans6e73dc12014-09-09 19:37:26 -0700779 if (!not_found) /* Note double negative! */
780 ret.p->prepared = true;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700781 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700782 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800783 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700784 prof_gctx_t *gctx;
785 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800786
787 /*
788 * This thread's cache lacks bt. Look for it in the global
789 * cache.
790 */
Jason Evans5460aa62014-09-22 21:09:23 -0700791 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evans602c8e02014-08-18 16:22:13 -0700792 &new_gctx))
Jason Evansfb1775e2014-01-14 17:04:34 -0800793 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800794
Jason Evans602c8e02014-08-18 16:22:13 -0700795 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700796 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
Jason Evans66cd9532016-04-22 14:34:14 -0700797 size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansc1e00ef2016-05-10 22:21:10 -0700798 arena_ichoose(tsd_tsdn(tsd), NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700799 if (ret.p == NULL) {
Jason Evans602c8e02014-08-18 16:22:13 -0700800 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700801 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700802 return (NULL);
Jason Evansa881cd22010-10-02 15:18:50 -0700803 }
Jason Evans602c8e02014-08-18 16:22:13 -0700804 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700805 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700806 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700807 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700808 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700809 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700810 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700811 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700812 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
813 error = ckh_insert(tsd_tsdn(tsd), &tdata->bt2tctx, btkey,
814 ret.v);
815 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700816 if (error) {
817 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700818 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700819 idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
Jason Evans6109fe02010-02-10 10:37:56 -0800820 return (NULL);
821 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700822 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700823 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700824 tctx_tree_insert(&gctx->tctxs, ret.p);
825 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700826 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800827 }
828
Jason Evans075e77c2010-09-20 19:53:25 -0700829 return (ret.p);
Jason Evans6109fe02010-02-10 10:37:56 -0800830}
831
Jason Evansdc391ad2016-05-04 12:14:36 -0700832/*
833 * The bodies of this function and prof_leakcheck() are compiled out unless heap
834 * profiling is enabled, so that it is possible to compile jemalloc with
835 * floating point support completely disabled. Avoiding floating point code is
836 * important on memory-constrained systems, but it also enables a workaround for
837 * versions of glibc that don't properly save/restore floating point registers
838 * during dynamic lazy symbol loading (which internally calls into whatever
839 * malloc implementation happens to be integrated into the application). Note
840 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
841 * memory moves, so jemalloc must be compiled with such optimizations disabled
842 * (e.g.
843 * -mno-sse) in order for the workaround to be complete.
844 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700845void
Jason Evans602c8e02014-08-18 16:22:13 -0700846prof_sample_threshold_update(prof_tdata_t *tdata)
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700847{
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700848#ifdef JEMALLOC_PROF
849 uint64_t r;
850 double u;
851
852 if (!config_prof)
853 return;
854
Jason Evans602c8e02014-08-18 16:22:13 -0700855 if (lg_prof_sample == 0) {
856 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700857 return;
858 }
859
860 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700861 * Compute sample interval as a geometrically distributed random
862 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700863 *
Jason Evans602c8e02014-08-18 16:22:13 -0700864 * __ __
865 * | log(u) | 1
866 * tdata->bytes_until_sample = | -------- |, where p = ---------------
867 * | log(1-p) | lg_prof_sample
868 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700869 *
870 * For more information on the math, see:
871 *
872 * Non-Uniform Random Variate Generation
873 * Luc Devroye
874 * Springer-Verlag, New York, 1986
875 * pp 500
876 * (http://luc.devroye.org/rnbookindex.html)
877 */
Jason Evans34676d32016-02-09 16:28:40 -0800878 r = prng_lg_range(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700879 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700880 tdata->bytes_until_sample = (uint64_t)(log(u) /
881 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700882 + (uint64_t)1U;
883#endif
884}
885
Jason Evans772163b2014-01-17 15:40:52 -0800886#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700887static prof_tdata_t *
888prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
889{
890 size_t *tdata_count = (size_t *)arg;
891
892 (*tdata_count)++;
893
894 return (NULL);
895}
896
897size_t
898prof_tdata_count(void)
899{
900 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700901 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700902
Jason Evansc1e00ef2016-05-10 22:21:10 -0700903 tsdn = tsdn_fetch();
904 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700905 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
906 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700907 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700908
909 return (tdata_count);
910}
911#endif
912
913#ifdef JEMALLOC_JET
Jason Evans772163b2014-01-17 15:40:52 -0800914size_t
915prof_bt_count(void)
916{
917 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700918 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700919 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800920
Jason Evans029d44c2014-10-04 11:12:53 -0700921 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700922 tdata = prof_tdata_get(tsd, false);
923 if (tdata == NULL)
Jason Evans772163b2014-01-17 15:40:52 -0800924 return (0);
925
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700927 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700928 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800929
930 return (bt_count);
931}
932#endif
933
934#ifdef JEMALLOC_JET
935#undef prof_dump_open
936#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
937#endif
938static int
Jason Evans4f37ef62014-01-16 13:23:56 -0800939prof_dump_open(bool propagate_err, const char *filename)
940{
Jason Evans772163b2014-01-17 15:40:52 -0800941 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800942
Jason Evans772163b2014-01-17 15:40:52 -0800943 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700944 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800945 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
946 filename);
947 if (opt_abort)
948 abort();
Jason Evans4f37ef62014-01-16 13:23:56 -0800949 }
950
Jason Evans772163b2014-01-17 15:40:52 -0800951 return (fd);
Jason Evans4f37ef62014-01-16 13:23:56 -0800952}
Jason Evans772163b2014-01-17 15:40:52 -0800953#ifdef JEMALLOC_JET
954#undef prof_dump_open
955#define prof_dump_open JEMALLOC_N(prof_dump_open)
956prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
957#endif
Jason Evans4f37ef62014-01-16 13:23:56 -0800958
959static bool
960prof_dump_flush(bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800961{
Jason Evans22ca8552010-03-02 11:57:30 -0800962 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800963 ssize_t err;
964
Jason Evans7372b152012-02-10 20:22:09 -0800965 cassert(config_prof);
966
Jason Evans6109fe02010-02-10 10:37:56 -0800967 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
968 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700969 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800970 malloc_write("<jemalloc>: write() failed during heap "
971 "profile flush\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800972 if (opt_abort)
973 abort();
974 }
975 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800976 }
977 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800978
979 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800980}
981
Jason Evans22ca8552010-03-02 11:57:30 -0800982static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800983prof_dump_close(bool propagate_err)
984{
985 bool ret;
986
987 assert(prof_dump_fd != -1);
988 ret = prof_dump_flush(propagate_err);
989 close(prof_dump_fd);
990 prof_dump_fd = -1;
991
992 return (ret);
993}
994
995static bool
996prof_dump_write(bool propagate_err, const char *s)
Jason Evans6109fe02010-02-10 10:37:56 -0800997{
Jason Evansca8fffb2016-02-24 13:16:51 -0800998 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -0800999
Jason Evans7372b152012-02-10 20:22:09 -08001000 cassert(config_prof);
1001
Jason Evans6109fe02010-02-10 10:37:56 -08001002 i = 0;
1003 slen = strlen(s);
1004 while (i < slen) {
1005 /* Flush the buffer if it is full. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001006 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
Jason Evans4f37ef62014-01-16 13:23:56 -08001007 if (prof_dump_flush(propagate_err) && propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -08001008 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001009
Jason Evanscd9a1342012-03-21 18:33:03 -07001010 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001011 /* Finish writing. */
1012 n = slen - i;
1013 } else {
1014 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001015 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001016 }
1017 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1018 prof_dump_buf_end += n;
1019 i += n;
1020 }
Jason Evans22ca8552010-03-02 11:57:30 -08001021
1022 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -08001023}
1024
Jason Evanse42c3092015-07-22 15:44:47 -07001025JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001026static bool
Jason Evans4f37ef62014-01-16 13:23:56 -08001027prof_dump_printf(bool propagate_err, const char *format, ...)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001028{
1029 bool ret;
1030 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001031 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001032
1033 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001034 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001035 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001036 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001037
1038 return (ret);
1039}
1040
Jason Evans602c8e02014-08-18 16:22:13 -07001041static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001042prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001043{
Jason Evans3a81cbd2014-08-16 12:58:55 -07001044
Jason Evansc1e00ef2016-05-10 22:21:10 -07001045 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001046
Jason Evansc1e00ef2016-05-10 22:21:10 -07001047 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001048
1049 switch (tctx->state) {
1050 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001051 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001052 return;
Jason Evans764b0002015-03-14 14:01:35 -07001053 case prof_tctx_state_nominal:
1054 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001055 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001056
Jason Evans764b0002015-03-14 14:01:35 -07001057 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001058
Jason Evans764b0002015-03-14 14:01:35 -07001059 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1060 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1061 if (opt_prof_accum) {
1062 tdata->cnt_summed.accumobjs +=
1063 tctx->dump_cnts.accumobjs;
1064 tdata->cnt_summed.accumbytes +=
1065 tctx->dump_cnts.accumbytes;
1066 }
1067 break;
1068 case prof_tctx_state_dumping:
1069 case prof_tctx_state_purgatory:
1070 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001071 }
1072}
1073
Jason Evans602c8e02014-08-18 16:22:13 -07001074static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001075prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
Jason Evans602c8e02014-08-18 16:22:13 -07001076{
1077
Jason Evansc1e00ef2016-05-10 22:21:10 -07001078 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001079
Jason Evans602c8e02014-08-18 16:22:13 -07001080 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1081 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1082 if (opt_prof_accum) {
1083 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1084 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1085 }
1086}
1087
Jason Evans602c8e02014-08-18 16:22:13 -07001088static prof_tctx_t *
1089prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1090{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001091 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001092
Jason Evansc1e00ef2016-05-10 22:21:10 -07001093 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001094
1095 switch (tctx->state) {
1096 case prof_tctx_state_nominal:
1097 /* New since dumping started; ignore. */
1098 break;
1099 case prof_tctx_state_dumping:
1100 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001101 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001102 break;
1103 default:
1104 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001105 }
1106
1107 return (NULL);
1108}
1109
Jason Evansb2c0d632016-04-13 23:36:15 -07001110struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001111 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001112 bool propagate_err;
1113};
1114
Jason Evans602c8e02014-08-18 16:22:13 -07001115static prof_tctx_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001116prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
Jason Evans602c8e02014-08-18 16:22:13 -07001117{
Jason Evansb2c0d632016-04-13 23:36:15 -07001118 struct prof_tctx_dump_iter_arg_s *arg =
1119 (struct prof_tctx_dump_iter_arg_s *)opaque;
1120
Jason Evansc1e00ef2016-05-10 22:21:10 -07001121 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001122
Jason Evansfb64ec22015-09-21 18:37:18 -07001123 switch (tctx->state) {
1124 case prof_tctx_state_initializing:
1125 case prof_tctx_state_nominal:
1126 /* Not captured by this dump. */
1127 break;
1128 case prof_tctx_state_dumping:
1129 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001130 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001131 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1132 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1133 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1134 tctx->dump_cnts.accumbytes))
1135 return (tctx);
1136 break;
1137 default:
1138 not_reached();
1139 }
Jason Evans602c8e02014-08-18 16:22:13 -07001140 return (NULL);
1141}
1142
Jason Evans602c8e02014-08-18 16:22:13 -07001143static prof_tctx_t *
1144prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1145{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001146 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001147 prof_tctx_t *ret;
1148
Jason Evansc1e00ef2016-05-10 22:21:10 -07001149 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001150
Jason Evans602c8e02014-08-18 16:22:13 -07001151 switch (tctx->state) {
1152 case prof_tctx_state_nominal:
1153 /* New since dumping started; ignore. */
1154 break;
1155 case prof_tctx_state_dumping:
1156 tctx->state = prof_tctx_state_nominal;
1157 break;
1158 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001159 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001160 goto label_return;
1161 default:
1162 not_reached();
1163 }
1164
1165 ret = NULL;
1166label_return:
1167 return (ret);
1168}
1169
Jason Evans6109fe02010-02-10 10:37:56 -08001170static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001171prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001172{
Jason Evans6109fe02010-02-10 10:37:56 -08001173
Jason Evans7372b152012-02-10 20:22:09 -08001174 cassert(config_prof);
1175
Jason Evansc1e00ef2016-05-10 22:21:10 -07001176 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001177
Jason Evans4f37ef62014-01-16 13:23:56 -08001178 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001179 * Increment nlimbo so that gctx won't go away before dump.
1180 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001181 * prof_dump()'s second pass.
1182 */
Jason Evans602c8e02014-08-18 16:22:13 -07001183 gctx->nlimbo++;
1184 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001185
Jason Evans602c8e02014-08-18 16:22:13 -07001186 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001187
Jason Evansc1e00ef2016-05-10 22:21:10 -07001188 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001189}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001190
Jason Evansb2c0d632016-04-13 23:36:15 -07001191struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001192 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001193 size_t leak_ngctx;
1194};
Jason Evans6109fe02010-02-10 10:37:56 -08001195
Jason Evansb2c0d632016-04-13 23:36:15 -07001196static prof_gctx_t *
1197prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1198{
1199 struct prof_gctx_merge_iter_arg_s *arg =
1200 (struct prof_gctx_merge_iter_arg_s *)opaque;
1201
Jason Evansc1e00ef2016-05-10 22:21:10 -07001202 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001203 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001204 (void *)arg->tsdn);
Jason Evans602c8e02014-08-18 16:22:13 -07001205 if (gctx->cnt_summed.curobjs != 0)
Jason Evansb2c0d632016-04-13 23:36:15 -07001206 arg->leak_ngctx++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001207 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001208
1209 return (NULL);
1210}
1211
Jason Evans20c31de2014-10-02 23:01:10 -07001212static void
1213prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
Jason Evans602c8e02014-08-18 16:22:13 -07001214{
Jason Evans5460aa62014-09-22 21:09:23 -07001215 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001216 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001217
Jason Evans20c31de2014-10-02 23:01:10 -07001218 /*
1219 * Standard tree iteration won't work here, because as soon as we
1220 * decrement gctx->nlimbo and unlock gctx, another thread can
1221 * concurrently destroy it, which will corrupt the tree. Therefore,
1222 * tear down the tree one node at a time during iteration.
1223 */
1224 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1225 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001226 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001227 {
1228 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001229
Jason Evans20c31de2014-10-02 23:01:10 -07001230 next = NULL;
1231 do {
1232 prof_tctx_t *to_destroy =
1233 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001234 prof_tctx_finish_iter,
1235 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001236 if (to_destroy != NULL) {
1237 next = tctx_tree_next(&gctx->tctxs,
1238 to_destroy);
1239 tctx_tree_remove(&gctx->tctxs,
1240 to_destroy);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001241 idalloctm(tsd_tsdn(tsd), to_destroy,
1242 NULL, true, true);
Jason Evans20c31de2014-10-02 23:01:10 -07001243 } else
1244 next = NULL;
1245 } while (next != NULL);
1246 }
1247 gctx->nlimbo--;
1248 if (prof_gctx_should_destroy(gctx)) {
1249 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001250 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001251 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evans20c31de2014-10-02 23:01:10 -07001252 } else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001253 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001254 }
Jason Evans602c8e02014-08-18 16:22:13 -07001255}
1256
Jason Evansb2c0d632016-04-13 23:36:15 -07001257struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001258 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001259 prof_cnt_t cnt_all;
1260};
Jason Evans602c8e02014-08-18 16:22:13 -07001261
Jason Evansb2c0d632016-04-13 23:36:15 -07001262static prof_tdata_t *
1263prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1264 void *opaque)
1265{
1266 struct prof_tdata_merge_iter_arg_s *arg =
1267 (struct prof_tdata_merge_iter_arg_s *)opaque;
1268
Jason Evansc1e00ef2016-05-10 22:21:10 -07001269 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001270 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001271 size_t tabind;
1272 union {
1273 prof_tctx_t *p;
1274 void *v;
1275 } tctx;
1276
1277 tdata->dumping = true;
1278 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001279 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1280 &tctx.v);)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001281 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001282
Jason Evansb2c0d632016-04-13 23:36:15 -07001283 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1284 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001285 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001286 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1287 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001288 }
1289 } else
1290 tdata->dumping = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001291 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001292
1293 return (NULL);
1294}
1295
1296static prof_tdata_t *
1297prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1298{
1299 bool propagate_err = *(bool *)arg;
1300
Jason Evans551ebc42014-10-03 10:16:09 -07001301 if (!tdata->dumping)
Jason Evans602c8e02014-08-18 16:22:13 -07001302 return (NULL);
1303
1304 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001305 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001306 tdata->thr_uid, tdata->cnt_summed.curobjs,
1307 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1308 tdata->cnt_summed.accumbytes,
1309 (tdata->thread_name != NULL) ? " " : "",
1310 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1311 return (tdata);
1312 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001313}
1314
Jason Evans20c31de2014-10-02 23:01:10 -07001315#ifdef JEMALLOC_JET
1316#undef prof_dump_header
1317#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1318#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001319static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001320prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
Jason Evansa881cd22010-10-02 15:18:50 -07001321{
Jason Evans602c8e02014-08-18 16:22:13 -07001322 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001323
Jason Evans602c8e02014-08-18 16:22:13 -07001324 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001325 "heap_v2/%"FMTu64"\n"
1326 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001327 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1328 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1329 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001330
Jason Evansc1e00ef2016-05-10 22:21:10 -07001331 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001332 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1333 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001334 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001335 return (ret);
Jason Evansa881cd22010-10-02 15:18:50 -07001336}
Jason Evans20c31de2014-10-02 23:01:10 -07001337#ifdef JEMALLOC_JET
1338#undef prof_dump_header
1339#define prof_dump_header JEMALLOC_N(prof_dump_header)
1340prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1341#endif
Jason Evansa881cd22010-10-02 15:18:50 -07001342
Jason Evans22ca8552010-03-02 11:57:30 -08001343static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001344prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansb2c0d632016-04-13 23:36:15 -07001345 const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001346{
Jason Evans4f37ef62014-01-16 13:23:56 -08001347 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001348 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001349 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001350
Jason Evans7372b152012-02-10 20:22:09 -08001351 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001352 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001353
Jason Evans602c8e02014-08-18 16:22:13 -07001354 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001355 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001356 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1357 assert(gctx->cnt_summed.curobjs == 0);
1358 assert(gctx->cnt_summed.curbytes == 0);
1359 assert(gctx->cnt_summed.accumobjs == 0);
1360 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001361 ret = false;
1362 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001363 }
1364
Jason Evans602c8e02014-08-18 16:22:13 -07001365 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001366 ret = true;
1367 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001368 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001369 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001370 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001371 (uintptr_t)bt->vec[i])) {
1372 ret = true;
1373 goto label_return;
1374 }
1375 }
Jason Evans22ca8552010-03-02 11:57:30 -08001376
Jason Evans602c8e02014-08-18 16:22:13 -07001377 if (prof_dump_printf(propagate_err,
1378 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001379 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001380 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1381 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1382 ret = true;
1383 goto label_return;
1384 }
1385
Jason Evansc1e00ef2016-05-10 22:21:10 -07001386 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001387 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001388 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001389 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001390 ret = true;
1391 goto label_return;
1392 }
1393
Jason Evans772163b2014-01-17 15:40:52 -08001394 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001395label_return:
Jason Evans4f37ef62014-01-16 13:23:56 -08001396 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -08001397}
1398
Jason Evans788d29d2016-02-20 23:46:14 -08001399#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001400JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001401static int
1402prof_open_maps(const char *format, ...)
1403{
1404 int mfd;
1405 va_list ap;
1406 char filename[PATH_MAX + 1];
1407
1408 va_start(ap, format);
1409 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1410 va_end(ap);
1411 mfd = open(filename, O_RDONLY);
1412
1413 return (mfd);
1414}
Jason Evans788d29d2016-02-20 23:46:14 -08001415#endif
1416
1417static int
1418prof_getpid(void)
1419{
1420
1421#ifdef _WIN32
1422 return (GetCurrentProcessId());
1423#else
1424 return (getpid());
1425#endif
1426}
Jason Evans8e33c212015-05-01 09:03:20 -07001427
Jason Evans22ca8552010-03-02 11:57:30 -08001428static bool
1429prof_dump_maps(bool propagate_err)
Jason Evansc7177182010-02-11 09:25:56 -08001430{
Jason Evans93f39f82013-10-21 15:07:40 -07001431 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001432 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001433
Jason Evans7372b152012-02-10 20:22:09 -08001434 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001435#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001436 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001437#elif defined(_WIN32)
1438 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001439#else
Jason Evans8e33c212015-05-01 09:03:20 -07001440 {
Jason Evans788d29d2016-02-20 23:46:14 -08001441 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001442
1443 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1444 if (mfd == -1)
1445 mfd = prof_open_maps("/proc/%d/maps", pid);
1446 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001447#endif
Jason Evansc7177182010-02-11 09:25:56 -08001448 if (mfd != -1) {
1449 ssize_t nread;
1450
Jason Evans4f37ef62014-01-16 13:23:56 -08001451 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001452 propagate_err) {
1453 ret = true;
1454 goto label_return;
1455 }
Jason Evansc7177182010-02-11 09:25:56 -08001456 nread = 0;
1457 do {
1458 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001459 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001460 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001461 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001462 propagate_err) {
1463 ret = true;
1464 goto label_return;
1465 }
Jason Evansc7177182010-02-11 09:25:56 -08001466 }
1467 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001468 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001469 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001470 } else {
1471 ret = true;
1472 goto label_return;
1473 }
Jason Evans22ca8552010-03-02 11:57:30 -08001474
Jason Evans93f39f82013-10-21 15:07:40 -07001475 ret = false;
1476label_return:
1477 if (mfd != -1)
1478 close(mfd);
1479 return (ret);
Jason Evansc7177182010-02-11 09:25:56 -08001480}
1481
Jason Evansdc391ad2016-05-04 12:14:36 -07001482/*
1483 * See prof_sample_threshold_update() comment for why the body of this function
1484 * is conditionally compiled.
1485 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001486static void
Jason Evans602c8e02014-08-18 16:22:13 -07001487prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evans4f37ef62014-01-16 13:23:56 -08001488 const char *filename)
1489{
1490
Jason Evansdc391ad2016-05-04 12:14:36 -07001491#ifdef JEMALLOC_PROF
1492 /*
1493 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1494 * differ slightly from what jeprof reports, because here we scale the
1495 * summary values, whereas jeprof scales each context individually and
1496 * reports the sums of the scaled values.
1497 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001498 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001499 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1500 double ratio = (((double)cnt_all->curbytes) /
1501 (double)cnt_all->curobjs) / sample_period;
1502 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1503 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1504 * scale_factor);
1505 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1506 scale_factor);
1507
1508 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1509 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1510 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1511 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001512 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001513 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001514 filename);
1515 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001516#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001517}
1518
Jason Evansb2c0d632016-04-13 23:36:15 -07001519struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001520 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001521 bool propagate_err;
1522};
1523
Jason Evans602c8e02014-08-18 16:22:13 -07001524static prof_gctx_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001525prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001526{
Jason Evans602c8e02014-08-18 16:22:13 -07001527 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001528 struct prof_gctx_dump_iter_arg_s *arg =
1529 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001530
Jason Evansc1e00ef2016-05-10 22:21:10 -07001531 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001532
Jason Evansc1e00ef2016-05-10 22:21:10 -07001533 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001534 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001535 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001536 goto label_return;
1537 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001538
Jason Evans602c8e02014-08-18 16:22:13 -07001539 ret = NULL;
1540label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001541 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001542 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001543}
1544
Jason Evans22ca8552010-03-02 11:57:30 -08001545static bool
Jason Evans5460aa62014-09-22 21:09:23 -07001546prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
Jason Evans6109fe02010-02-10 10:37:56 -08001547{
Jason Evans602c8e02014-08-18 16:22:13 -07001548 prof_tdata_t *tdata;
Jason Evansb2c0d632016-04-13 23:36:15 -07001549 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001550 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001551 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001552 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001553 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001554 } gctx;
Jason Evansb2c0d632016-04-13 23:36:15 -07001555 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1556 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001557 prof_gctx_tree_t gctxs;
Jason Evans6109fe02010-02-10 10:37:56 -08001558
Jason Evans7372b152012-02-10 20:22:09 -08001559 cassert(config_prof);
1560
Jason Evans20c31de2014-10-02 23:01:10 -07001561 tdata = prof_tdata_get(tsd, true);
Jason Evans5460aa62014-09-22 21:09:23 -07001562 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001563 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001564
Jason Evansc1e00ef2016-05-10 22:21:10 -07001565 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evansc93ed812014-10-30 16:50:33 -07001566 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001567
Jason Evans602c8e02014-08-18 16:22:13 -07001568 /*
1569 * Put gctx's in limbo and clear their counters in preparation for
1570 * summing.
1571 */
1572 gctx_tree_new(&gctxs);
Jason Evans551ebc42014-10-03 10:16:09 -07001573 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001574 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
Jason Evans602c8e02014-08-18 16:22:13 -07001575
1576 /*
1577 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1578 * stats and merge them into the associated gctx's.
1579 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001580 prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001581 memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001582 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001583 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
1584 (void *)&prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001585 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001586
1587 /* Merge tctx stats into gctx's. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001588 prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001589 prof_gctx_merge_iter_arg.leak_ngctx = 0;
1590 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
1591 (void *)&prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001592
Jason Evansc93ed812014-10-30 16:50:33 -07001593 prof_leave(tsd, tdata);
Jason Evans4f37ef62014-01-16 13:23:56 -08001594
1595 /* Create dump file. */
Jason Evans772163b2014-01-17 15:40:52 -08001596 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001597 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001598
1599 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001600 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evansb2c0d632016-04-13 23:36:15 -07001601 &prof_tdata_merge_iter_arg.cnt_all))
Jason Evans4f37ef62014-01-16 13:23:56 -08001602 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001603
Jason Evans602c8e02014-08-18 16:22:13 -07001604 /* Dump per gctx profile stats. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001605 prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001606 prof_gctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001607 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001608 (void *)&prof_gctx_dump_iter_arg) != NULL)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001609 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001610
Jason Evansc7177182010-02-11 09:25:56 -08001611 /* Dump /proc/<pid>/maps if possible. */
Jason Evans22ca8552010-03-02 11:57:30 -08001612 if (prof_dump_maps(propagate_err))
Jason Evans4f37ef62014-01-16 13:23:56 -08001613 goto label_write_error;
Jason Evansc7177182010-02-11 09:25:56 -08001614
Jason Evans4f37ef62014-01-16 13:23:56 -08001615 if (prof_dump_close(propagate_err))
1616 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001617
Jason Evans20c31de2014-10-02 23:01:10 -07001618 prof_gctx_finish(tsd, &gctxs);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001619 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001620
Jason Evansb2c0d632016-04-13 23:36:15 -07001621 if (leakcheck) {
1622 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1623 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1624 }
Jason Evans22ca8552010-03-02 11:57:30 -08001625 return (false);
Jason Evans4f37ef62014-01-16 13:23:56 -08001626label_write_error:
1627 prof_dump_close(propagate_err);
1628label_open_close_error:
Jason Evans20c31de2014-10-02 23:01:10 -07001629 prof_gctx_finish(tsd, &gctxs);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001630 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001631 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001632}
1633
Jason Evansd81e4bd2012-03-06 14:57:45 -08001634#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001635#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001636static void
Chris Peterson3e310b32014-05-28 19:04:06 -07001637prof_dump_filename(char *filename, char v, uint64_t vseq)
Jason Evans6109fe02010-02-10 10:37:56 -08001638{
Jason Evans6109fe02010-02-10 10:37:56 -08001639
Jason Evans7372b152012-02-10 20:22:09 -08001640 cassert(config_prof);
1641
Jason Evans4f37ef62014-01-16 13:23:56 -08001642 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001643 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1644 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001645 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001646 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001647 } else {
1648 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1649 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001650 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001651 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001652 }
Jason Evans52386b22012-04-22 16:00:11 -07001653 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001654}
1655
1656static void
1657prof_fdump(void)
1658{
Jason Evans5460aa62014-09-22 21:09:23 -07001659 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001660 char filename[DUMP_FILENAME_BUFSIZE];
1661
Jason Evans7372b152012-02-10 20:22:09 -08001662 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001663 assert(opt_prof_final);
1664 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001665
Jason Evans551ebc42014-10-03 10:16:09 -07001666 if (!prof_booted)
Jason Evans6109fe02010-02-10 10:37:56 -08001667 return;
Jason Evans029d44c2014-10-04 11:12:53 -07001668 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001669
Jason Evansc1e00ef2016-05-10 22:21:10 -07001670 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001671 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001672 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001673 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001674}
1675
1676void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001677prof_idump(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001678{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001679 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001680 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001681
Jason Evans7372b152012-02-10 20:22:09 -08001682 cassert(config_prof);
1683
Jason Evansc1e00ef2016-05-10 22:21:10 -07001684 if (!prof_booted || tsdn_null(tsdn))
Jason Evans6109fe02010-02-10 10:37:56 -08001685 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001686 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001687 tdata = prof_tdata_get(tsd, false);
1688 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001689 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001690 if (tdata->enq) {
1691 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001692 return;
1693 }
Jason Evans6109fe02010-02-10 10:37:56 -08001694
Jason Evanse7339702010-10-23 18:37:06 -07001695 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001696 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001697 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001698 prof_dump_filename(filename, 'i', prof_dump_iseq);
1699 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001700 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001701 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001702 }
Jason Evans6109fe02010-02-10 10:37:56 -08001703}
1704
Jason Evans22ca8552010-03-02 11:57:30 -08001705bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001706prof_mdump(tsd_t *tsd, const char *filename)
Jason Evans6109fe02010-02-10 10:37:56 -08001707{
Jason Evans22ca8552010-03-02 11:57:30 -08001708 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001709
Jason Evans7372b152012-02-10 20:22:09 -08001710 cassert(config_prof);
1711
Jason Evans551ebc42014-10-03 10:16:09 -07001712 if (!opt_prof || !prof_booted)
Jason Evans22ca8552010-03-02 11:57:30 -08001713 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001714
Jason Evans22ca8552010-03-02 11:57:30 -08001715 if (filename == NULL) {
1716 /* No filename specified, so automatically generate one. */
Jason Evanse7339702010-10-23 18:37:06 -07001717 if (opt_prof_prefix[0] == '\0')
1718 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001719 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001720 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1721 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001722 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001723 filename = filename_buf;
1724 }
Jason Evans5460aa62014-09-22 21:09:23 -07001725 return (prof_dump(tsd, true, filename, false));
Jason Evans6109fe02010-02-10 10:37:56 -08001726}
1727
1728void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001729prof_gdump(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001730{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001731 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001732 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001733
Jason Evans7372b152012-02-10 20:22:09 -08001734 cassert(config_prof);
1735
Jason Evansc1e00ef2016-05-10 22:21:10 -07001736 if (!prof_booted || tsdn_null(tsdn))
Jason Evans6109fe02010-02-10 10:37:56 -08001737 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001738 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001739 tdata = prof_tdata_get(tsd, false);
1740 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001741 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001742 if (tdata->enq) {
1743 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001744 return;
1745 }
Jason Evans6109fe02010-02-10 10:37:56 -08001746
Jason Evanse7339702010-10-23 18:37:06 -07001747 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001748 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001749 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001750 prof_dump_filename(filename, 'u', prof_dump_useq);
1751 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001752 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001753 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001754 }
Jason Evans6109fe02010-02-10 10:37:56 -08001755}
1756
1757static void
Jason Evansae03bf62013-01-22 12:02:08 -08001758prof_bt_hash(const void *key, size_t r_hash[2])
Jason Evans6109fe02010-02-10 10:37:56 -08001759{
Jason Evans6109fe02010-02-10 10:37:56 -08001760 prof_bt_t *bt = (prof_bt_t *)key;
1761
Jason Evans7372b152012-02-10 20:22:09 -08001762 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001763
Jason Evansae03bf62013-01-22 12:02:08 -08001764 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001765}
1766
1767static bool
1768prof_bt_keycomp(const void *k1, const void *k2)
1769{
1770 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1771 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1772
Jason Evans7372b152012-02-10 20:22:09 -08001773 cassert(config_prof);
1774
Jason Evans6109fe02010-02-10 10:37:56 -08001775 if (bt1->len != bt2->len)
1776 return (false);
1777 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1778}
1779
Jason Evans602c8e02014-08-18 16:22:13 -07001780JEMALLOC_INLINE_C uint64_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001781prof_thr_uid_alloc(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001782{
Jason Evans9d8f3d22014-09-11 18:06:30 -07001783 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001784
Jason Evansc1e00ef2016-05-10 22:21:10 -07001785 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001786 thr_uid = next_thr_uid;
1787 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001788 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001789
1790 return (thr_uid);
Jason Evans602c8e02014-08-18 16:22:13 -07001791}
1792
1793static prof_tdata_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001794prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansfc12c0b2014-10-03 23:25:30 -07001795 char *thread_name, bool active)
Jason Evans602c8e02014-08-18 16:22:13 -07001796{
1797 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001798
Jason Evans7372b152012-02-10 20:22:09 -08001799 cassert(config_prof);
1800
Jason Evans4d6a1342010-10-20 19:05:59 -07001801 /* Initialize an empty cache for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001802 tdata = (prof_tdata_t *)iallocztm(tsdn, sizeof(prof_tdata_t),
1803 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1804 arena_get(TSDN_NULL, 0, true), true);
Jason Evans602c8e02014-08-18 16:22:13 -07001805 if (tdata == NULL)
Jason Evans4d6a1342010-10-20 19:05:59 -07001806 return (NULL);
1807
Jason Evans602c8e02014-08-18 16:22:13 -07001808 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1809 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001810 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001811 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001812 tdata->attached = true;
1813 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001814 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001815
Jason Evansc1e00ef2016-05-10 22:21:10 -07001816 if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS,
Jason Evans4d6a1342010-10-20 19:05:59 -07001817 prof_bt_hash, prof_bt_keycomp)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001818 idalloctm(tsdn, tdata, NULL, true, true);
Jason Evans4d6a1342010-10-20 19:05:59 -07001819 return (NULL);
1820 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001821
Jason Evans602c8e02014-08-18 16:22:13 -07001822 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1823 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001824
Jason Evans602c8e02014-08-18 16:22:13 -07001825 tdata->enq = false;
1826 tdata->enq_idump = false;
1827 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001828
Jason Evans602c8e02014-08-18 16:22:13 -07001829 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001830 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001831
Jason Evansc1e00ef2016-05-10 22:21:10 -07001832 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001833 tdata_tree_insert(&tdatas, tdata);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001834 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001835
1836 return (tdata);
1837}
1838
1839prof_tdata_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001840prof_tdata_init(tsdn_t *tsdn)
Jason Evans602c8e02014-08-18 16:22:13 -07001841{
1842
Jason Evansc1e00ef2016-05-10 22:21:10 -07001843 return (prof_tdata_init_impl(tsdn, prof_thr_uid_alloc(tsdn), 0, NULL,
1844 prof_thread_active_init_get(tsdn)));
Jason Evans602c8e02014-08-18 16:22:13 -07001845}
1846
Jason Evans602c8e02014-08-18 16:22:13 -07001847static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001848prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001849{
1850
Jason Evansf04a0be2014-10-04 15:03:49 -07001851 if (tdata->attached && !even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001852 return (false);
1853 if (ckh_count(&tdata->bt2tctx) != 0)
1854 return (false);
1855 return (true);
1856}
1857
Jason Evansb2c0d632016-04-13 23:36:15 -07001858static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001859prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansb2c0d632016-04-13 23:36:15 -07001860 bool even_if_attached)
1861{
1862
Jason Evansc1e00ef2016-05-10 22:21:10 -07001863 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001864
Jason Evansc1e00ef2016-05-10 22:21:10 -07001865 return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001866}
1867
Jason Evans602c8e02014-08-18 16:22:13 -07001868static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001869prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -07001870 bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001871{
1872
Jason Evansc1e00ef2016-05-10 22:21:10 -07001873 malloc_mutex_assert_owner(tsdn, &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001874
Jason Evansc1e00ef2016-05-10 22:21:10 -07001875 assert(tsdn_null(tsdn) || tsd_prof_tdata_get(tsdn_tsd(tsdn)) != tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001876
Jason Evans602c8e02014-08-18 16:22:13 -07001877 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001878
Jason Evansc1e00ef2016-05-10 22:21:10 -07001879 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001880
Jason Evans602c8e02014-08-18 16:22:13 -07001881 if (tdata->thread_name != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001882 idalloctm(tsdn, tdata->thread_name, NULL, true, true);
1883 ckh_delete(tsdn, &tdata->bt2tctx);
1884 idalloctm(tsdn, tdata, NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001885}
1886
1887static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001888prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached)
Jason Evans20c31de2014-10-02 23:01:10 -07001889{
1890
Jason Evansc1e00ef2016-05-10 22:21:10 -07001891 malloc_mutex_lock(tsdn, &tdatas_mtx);
1892 prof_tdata_destroy_locked(tsdn, tdata, even_if_attached);
1893 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001894}
1895
1896static void
1897prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001898{
1899 bool destroy_tdata;
1900
Jason Evansc1e00ef2016-05-10 22:21:10 -07001901 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001902 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001903 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
1904 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07001905 /*
1906 * Only detach if !destroy_tdata, because detaching would allow
1907 * another thread to win the race to destroy tdata.
1908 */
1909 if (!destroy_tdata)
1910 tdata->attached = false;
Jason Evans029d44c2014-10-04 11:12:53 -07001911 tsd_prof_tdata_set(tsd, NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001912 } else
1913 destroy_tdata = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001914 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001915 if (destroy_tdata)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001916 prof_tdata_destroy(tsd_tsdn(tsd), tdata, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001917}
1918
Jason Evans20c31de2014-10-02 23:01:10 -07001919prof_tdata_t *
1920prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001921{
Jason Evans20c31de2014-10-02 23:01:10 -07001922 uint64_t thr_uid = tdata->thr_uid;
1923 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001924 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07001925 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001926 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07001927
Jason Evans20c31de2014-10-02 23:01:10 -07001928 prof_tdata_detach(tsd, tdata);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001929 return (prof_tdata_init_impl(tsd_tsdn(tsd), thr_uid, thr_discrim,
1930 thread_name, active));
Jason Evans602c8e02014-08-18 16:22:13 -07001931}
1932
Jason Evans20c31de2014-10-02 23:01:10 -07001933static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001934prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001935{
Jason Evans20c31de2014-10-02 23:01:10 -07001936 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001937
Jason Evansc1e00ef2016-05-10 22:21:10 -07001938 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001939 if (!tdata->expired) {
1940 tdata->expired = true;
1941 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07001942 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001943 } else
1944 destroy_tdata = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001945 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001946
1947 return (destroy_tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001948}
1949
1950static prof_tdata_t *
1951prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1952{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001953 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001954
Jason Evansc1e00ef2016-05-10 22:21:10 -07001955 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001956}
1957
1958void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001959prof_reset(tsdn_t *tsdn, size_t lg_sample)
Jason Evans602c8e02014-08-18 16:22:13 -07001960{
Jason Evans20c31de2014-10-02 23:01:10 -07001961 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001962
1963 assert(lg_sample < (sizeof(uint64_t) << 3));
1964
Jason Evansc1e00ef2016-05-10 22:21:10 -07001965 malloc_mutex_lock(tsdn, &prof_dump_mtx);
1966 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001967
1968 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07001969
1970 next = NULL;
1971 do {
1972 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001973 prof_tdata_reset_iter, (void *)tsdn);
Jason Evans20c31de2014-10-02 23:01:10 -07001974 if (to_destroy != NULL) {
1975 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001976 prof_tdata_destroy_locked(tsdn, to_destroy, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001977 } else
1978 next = NULL;
1979 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001980
Jason Evansc1e00ef2016-05-10 22:21:10 -07001981 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1982 malloc_mutex_unlock(tsdn, &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07001983}
1984
Jason Evanscd9a1342012-03-21 18:33:03 -07001985void
Jason Evans5460aa62014-09-22 21:09:23 -07001986prof_tdata_cleanup(tsd_t *tsd)
Jason Evans4d6a1342010-10-20 19:05:59 -07001987{
Jason Evans5460aa62014-09-22 21:09:23 -07001988 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07001989
Jason Evans5460aa62014-09-22 21:09:23 -07001990 if (!config_prof)
1991 return;
Jason Evans7372b152012-02-10 20:22:09 -08001992
Jason Evans5460aa62014-09-22 21:09:23 -07001993 tdata = tsd_prof_tdata_get(tsd);
1994 if (tdata != NULL)
1995 prof_tdata_detach(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001996}
1997
Jason Evansfc12c0b2014-10-03 23:25:30 -07001998bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001999prof_active_get(tsdn_t *tsdn)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002000{
2001 bool prof_active_current;
2002
Jason Evansc1e00ef2016-05-10 22:21:10 -07002003 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002004 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002005 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002006 return (prof_active_current);
2007}
2008
2009bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002010prof_active_set(tsdn_t *tsdn, bool active)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002011{
2012 bool prof_active_old;
2013
Jason Evansc1e00ef2016-05-10 22:21:10 -07002014 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002015 prof_active_old = prof_active;
2016 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002017 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002018 return (prof_active_old);
2019}
2020
Jason Evans602c8e02014-08-18 16:22:13 -07002021const char *
Jason Evansb2c0d632016-04-13 23:36:15 -07002022prof_thread_name_get(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07002023{
Jason Evans5460aa62014-09-22 21:09:23 -07002024 prof_tdata_t *tdata;
2025
Jason Evans5460aa62014-09-22 21:09:23 -07002026 tdata = prof_tdata_get(tsd, true);
2027 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002028 return ("");
2029 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002030}
2031
Jason Evansfc12c0b2014-10-03 23:25:30 -07002032static char *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002033prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002034{
2035 char *ret;
2036 size_t size;
2037
2038 if (thread_name == NULL)
2039 return (NULL);
2040
2041 size = strlen(thread_name) + 1;
2042 if (size == 1)
2043 return ("");
2044
Jason Evansc1e00ef2016-05-10 22:21:10 -07002045 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2046 arena_get(TSDN_NULL, 0, true), true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002047 if (ret == NULL)
2048 return (NULL);
2049 memcpy(ret, thread_name, size);
2050 return (ret);
2051}
2052
2053int
Jason Evans5460aa62014-09-22 21:09:23 -07002054prof_thread_name_set(tsd_t *tsd, const char *thread_name)
Jason Evans602c8e02014-08-18 16:22:13 -07002055{
2056 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002057 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002058 char *s;
2059
Jason Evans5460aa62014-09-22 21:09:23 -07002060 tdata = prof_tdata_get(tsd, true);
2061 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002062 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07002063
Jason Evansfc12c0b2014-10-03 23:25:30 -07002064 /* Validate input. */
2065 if (thread_name == NULL)
2066 return (EFAULT);
2067 for (i = 0; thread_name[i] != '\0'; i++) {
2068 char c = thread_name[i];
2069 if (!isgraph(c) && !isblank(c))
2070 return (EFAULT);
2071 }
2072
Jason Evansc1e00ef2016-05-10 22:21:10 -07002073 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -07002074 if (s == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002075 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07002076
Jason Evansfc12c0b2014-10-03 23:25:30 -07002077 if (tdata->thread_name != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002078 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002079 tdata->thread_name = NULL;
2080 }
2081 if (strlen(s) > 0)
2082 tdata->thread_name = s;
2083 return (0);
Jason Evans602c8e02014-08-18 16:22:13 -07002084}
2085
2086bool
Jason Evansb2c0d632016-04-13 23:36:15 -07002087prof_thread_active_get(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07002088{
Jason Evans5460aa62014-09-22 21:09:23 -07002089 prof_tdata_t *tdata;
2090
Jason Evans5460aa62014-09-22 21:09:23 -07002091 tdata = prof_tdata_get(tsd, true);
2092 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002093 return (false);
2094 return (tdata->active);
2095}
2096
2097bool
Jason Evansb2c0d632016-04-13 23:36:15 -07002098prof_thread_active_set(tsd_t *tsd, bool active)
Jason Evans602c8e02014-08-18 16:22:13 -07002099{
2100 prof_tdata_t *tdata;
2101
Jason Evans5460aa62014-09-22 21:09:23 -07002102 tdata = prof_tdata_get(tsd, true);
2103 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002104 return (true);
2105 tdata->active = active;
2106 return (false);
2107}
2108
Jason Evansfc12c0b2014-10-03 23:25:30 -07002109bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002110prof_thread_active_init_get(tsdn_t *tsdn)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002111{
2112 bool active_init;
2113
Jason Evansc1e00ef2016-05-10 22:21:10 -07002114 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002115 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002116 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002117 return (active_init);
2118}
2119
2120bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002121prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002122{
2123 bool active_init_old;
2124
Jason Evansc1e00ef2016-05-10 22:21:10 -07002125 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002126 active_init_old = prof_thread_active_init;
2127 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002128 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002129 return (active_init_old);
2130}
2131
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002132bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002133prof_gdump_get(tsdn_t *tsdn)
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002134{
2135 bool prof_gdump_current;
2136
Jason Evansc1e00ef2016-05-10 22:21:10 -07002137 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002138 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002139 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002140 return (prof_gdump_current);
2141}
2142
2143bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002144prof_gdump_set(tsdn_t *tsdn, bool gdump)
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002145{
2146 bool prof_gdump_old;
2147
Jason Evansc1e00ef2016-05-10 22:21:10 -07002148 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002149 prof_gdump_old = prof_gdump_val;
2150 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002151 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002152 return (prof_gdump_old);
2153}
2154
Jason Evans6109fe02010-02-10 10:37:56 -08002155void
2156prof_boot0(void)
2157{
2158
Jason Evans7372b152012-02-10 20:22:09 -08002159 cassert(config_prof);
2160
Jason Evanse7339702010-10-23 18:37:06 -07002161 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2162 sizeof(PROF_PREFIX_DEFAULT));
2163}
2164
2165void
2166prof_boot1(void)
2167{
2168
Jason Evans7372b152012-02-10 20:22:09 -08002169 cassert(config_prof);
2170
Jason Evans6109fe02010-02-10 10:37:56 -08002171 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002172 * opt_prof must be in its final state before any arenas are
2173 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002174 */
2175
Jason Evans551ebc42014-10-03 10:16:09 -07002176 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002177 /*
2178 * Enable opt_prof, but in such a way that profiles are never
2179 * automatically dumped.
2180 */
2181 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002182 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002183 } else if (opt_prof) {
2184 if (opt_lg_prof_interval >= 0) {
2185 prof_interval = (((uint64_t)1U) <<
2186 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002187 }
Jason Evansa02fc082010-03-31 17:35:51 -07002188 }
Jason Evans6109fe02010-02-10 10:37:56 -08002189}
2190
2191bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002192prof_boot2(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08002193{
2194
Jason Evans7372b152012-02-10 20:22:09 -08002195 cassert(config_prof);
2196
Jason Evans6109fe02010-02-10 10:37:56 -08002197 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002198 unsigned i;
2199
Jason Evans602c8e02014-08-18 16:22:13 -07002200 lg_prof_sample = opt_lg_prof_sample;
2201
Jason Evansfc12c0b2014-10-03 23:25:30 -07002202 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002203 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
2204 WITNESS_RANK_PROF_ACTIVE))
Jason Evansfc12c0b2014-10-03 23:25:30 -07002205 return (true);
2206
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002207 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002208 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
2209 WITNESS_RANK_PROF_GDUMP))
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002210 return (true);
2211
Jason Evansfc12c0b2014-10-03 23:25:30 -07002212 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002213 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2214 "prof_thread_active_init",
2215 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
Jason Evansfc12c0b2014-10-03 23:25:30 -07002216 return (true);
2217
Jason Evansc1e00ef2016-05-10 22:21:10 -07002218 if (ckh_new(tsdn, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evans6109fe02010-02-10 10:37:56 -08002219 prof_bt_keycomp))
2220 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07002221 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
2222 WITNESS_RANK_PROF_BT2GCTX))
Jason Evans6109fe02010-02-10 10:37:56 -08002223 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002224
Jason Evans602c8e02014-08-18 16:22:13 -07002225 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002226 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
2227 WITNESS_RANK_PROF_TDATAS))
Jason Evans602c8e02014-08-18 16:22:13 -07002228 return (true);
2229
2230 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002231 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
2232 WITNESS_RANK_PROF_NEXT_THR_UID))
Jason Evans9d8f3d22014-09-11 18:06:30 -07002233 return (true);
Jason Evans602c8e02014-08-18 16:22:13 -07002234
Jason Evansb2c0d632016-04-13 23:36:15 -07002235 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
2236 WITNESS_RANK_PROF_DUMP_SEQ))
Jason Evans6109fe02010-02-10 10:37:56 -08002237 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07002238 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
2239 WITNESS_RANK_PROF_DUMP))
Jason Evans4f37ef62014-01-16 13:23:56 -08002240 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002241
Jason Evans57efa7b2014-10-08 17:57:19 -07002242 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2243 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002244 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08002245 if (opt_abort)
2246 abort();
2247 }
Jason Evans6da54182012-03-23 18:05:51 -07002248
Jason Evansc1e00ef2016-05-10 22:21:10 -07002249 gctx_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NCTX_LOCKS
2250 * sizeof(malloc_mutex_t));
Jason Evans602c8e02014-08-18 16:22:13 -07002251 if (gctx_locks == NULL)
Jason Evans6da54182012-03-23 18:05:51 -07002252 return (true);
2253 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002254 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
2255 WITNESS_RANK_PROF_GCTX))
Jason Evans602c8e02014-08-18 16:22:13 -07002256 return (true);
2257 }
2258
Jason Evansc1e00ef2016-05-10 22:21:10 -07002259 tdata_locks = (malloc_mutex_t *)base_alloc(tsdn,
Jason Evansb2c0d632016-04-13 23:36:15 -07002260 PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
Jason Evans602c8e02014-08-18 16:22:13 -07002261 if (tdata_locks == NULL)
2262 return (true);
2263 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002264 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
2265 WITNESS_RANK_PROF_TDATA))
Jason Evans6da54182012-03-23 18:05:51 -07002266 return (true);
2267 }
Jason Evans6109fe02010-02-10 10:37:56 -08002268 }
2269
Jason Evansb27805b2010-02-10 18:15:53 -08002270#ifdef JEMALLOC_PROF_LIBGCC
2271 /*
2272 * Cause the backtracing machinery to allocate its internal state
2273 * before enabling profiling.
2274 */
2275 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2276#endif
2277
Jason Evans6109fe02010-02-10 10:37:56 -08002278 prof_booted = true;
2279
2280 return (false);
2281}
2282
Jason Evans20f1fc92012-10-09 14:46:22 -07002283void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002284prof_prefork0(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002285{
2286
2287 if (opt_prof) {
2288 unsigned i;
2289
Jason Evansc1e00ef2016-05-10 22:21:10 -07002290 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2291 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2292 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002293 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002294 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002295 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002296 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002297 }
2298}
2299
2300void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002301prof_prefork1(tsdn_t *tsdn)
Jason Evans174c0c32016-04-25 23:14:40 -07002302{
2303
2304 if (opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002305 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2306 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2307 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2308 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2309 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002310 }
2311}
2312
2313void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002314prof_postfork_parent(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002315{
2316
2317 if (opt_prof) {
2318 unsigned i;
2319
Jason Evansc1e00ef2016-05-10 22:21:10 -07002320 malloc_mutex_postfork_parent(tsdn,
2321 &prof_thread_active_init_mtx);
2322 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2323 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2324 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2325 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002326 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002327 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002328 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002329 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
2330 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2331 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2332 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002333 }
2334}
2335
2336void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002337prof_postfork_child(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002338{
2339
2340 if (opt_prof) {
2341 unsigned i;
2342
Jason Evansc1e00ef2016-05-10 22:21:10 -07002343 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2344 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2345 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2346 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2347 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002348 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002349 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002350 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002351 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
2352 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2353 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2354 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002355 }
2356}
2357
Jason Evans6109fe02010-02-10 10:37:56 -08002358/******************************************************************************/