blob: 4bafb39afddcf11228bc2e066970914aff3a7d83 [file] [log] [blame]
Jason Evans6109fe02010-02-10 10:37:56 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
17bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070018bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070019bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080020size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070021ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070022bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070023bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080024bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070025bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080026char opt_prof_prefix[
27 /* Minimize memory bloat for non-prof builds. */
28#ifdef JEMALLOC_PROF
29 PATH_MAX +
30#endif
Jason Evanseefdd022014-01-16 18:04:30 -080031 1];
Jason Evans6109fe02010-02-10 10:37:56 -080032
Jason Evansfc12c0b2014-10-03 23:25:30 -070033/*
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
36 */
37bool prof_active;
38static malloc_mutex_t prof_active_mtx;
39
40/*
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
43 */
44static bool prof_thread_active_init;
45static malloc_mutex_t prof_thread_active_init_mtx;
46
Jason Evans5b8ed5b2015-01-25 21:16:57 -080047/*
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
50 */
51bool prof_gdump_val;
52static malloc_mutex_t prof_gdump_mtx;
53
Jason Evansa3b33862012-11-13 12:56:27 -080054uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080055
Jason Evans602c8e02014-08-18 16:22:13 -070056size_t lg_prof_sample;
57
Jason Evans6109fe02010-02-10 10:37:56 -080058/*
Jason Evans602c8e02014-08-18 16:22:13 -070059 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070062 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
64 */
Jason Evans602c8e02014-08-18 16:22:13 -070065static malloc_mutex_t *gctx_locks;
66static unsigned cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070067
68/*
Jason Evans602c8e02014-08-18 16:22:13 -070069 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
73 */
74static malloc_mutex_t *tdata_locks;
75
76/*
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070078 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080079 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static ckh_t bt2gctx;
81static malloc_mutex_t bt2gctx_mtx;
82
83/*
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
86 */
87static prof_tdata_tree_t tdatas;
88static malloc_mutex_t tdatas_mtx;
89
90static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -070091static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -080092
Jason Evans6109fe02010-02-10 10:37:56 -080093static malloc_mutex_t prof_dump_seq_mtx;
94static uint64_t prof_dump_seq;
95static uint64_t prof_dump_iseq;
96static uint64_t prof_dump_mseq;
97static uint64_t prof_dump_useq;
98
99/*
100 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800101 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800102 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800103static malloc_mutex_t prof_dump_mtx;
104static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
106#ifdef JEMALLOC_PROF
107 PROF_DUMP_BUFSIZE
108#else
109 1
110#endif
111];
Jason Evans42ce80e2016-02-25 20:51:00 -0800112static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800113static int prof_dump_fd;
114
115/* Do not dump any profiles until bootstrapping is complete. */
116static bool prof_booted = false;
117
Jason Evans6109fe02010-02-10 10:37:56 -0800118/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700119/*
120 * Function prototypes for static functions that are referenced prior to
121 * definition.
122 */
123
Jason Evansc1e00ef2016-05-10 22:21:10 -0700124static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700125static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700126static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700127 bool even_if_attached);
Jason Evansb54d1602016-10-20 23:59:12 -0700128static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -0700129 bool even_if_attached);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700130static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700131
132/******************************************************************************/
133/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800134
Jason Evans3a81cbd2014-08-16 12:58:55 -0700135JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700136prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700137{
Jason Evans04211e22015-03-16 15:11:06 -0700138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700141 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
145 b_thr_discrim);
146 if (ret == 0) {
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
150 b_tctx_uid);
151 }
Jason Evansd69964b2015-03-12 16:25:18 -0700152 }
153 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -0700154}
155
Jason Evans602c8e02014-08-18 16:22:13 -0700156rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700158
159JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700160prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700161{
162 unsigned a_len = a->bt.len;
163 unsigned b_len = b->bt.len;
164 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
166 if (ret == 0)
167 ret = (a_len > b_len) - (a_len < b_len);
168 return (ret);
169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
172 prof_gctx_comp)
173
174JEMALLOC_INLINE_C int
175prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
176{
Jason Evans20c31de2014-10-02 23:01:10 -0700177 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700178 uint64_t a_uid = a->thr_uid;
179 uint64_t b_uid = b->thr_uid;
180
Jason Evans20c31de2014-10-02 23:01:10 -0700181 ret = ((a_uid > b_uid) - (a_uid < b_uid));
182 if (ret == 0) {
183 uint64_t a_discrim = a->thr_discrim;
184 uint64_t b_discrim = b->thr_discrim;
185
186 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
187 }
188 return (ret);
Jason Evans602c8e02014-08-18 16:22:13 -0700189}
190
191rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
192 prof_tdata_comp)
193
194/******************************************************************************/
195
196void
Jason Evans5460aa62014-09-22 21:09:23 -0700197prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
Jason Evans6e73dc12014-09-09 19:37:26 -0700198{
199 prof_tdata_t *tdata;
200
201 cassert(config_prof);
202
203 if (updated) {
204 /*
205 * Compute a new sample threshold. This isn't very important in
206 * practice, because this function is rarely executed, so the
207 * potential for sample bias is minimal except in contrived
208 * programs.
209 */
Jason Evans5460aa62014-09-22 21:09:23 -0700210 tdata = prof_tdata_get(tsd, true);
211 if (tdata != NULL)
Jason Evans3ca0cf62015-09-17 14:47:39 -0700212 prof_sample_threshold_update(tdata);
Jason Evans6e73dc12014-09-09 19:37:26 -0700213 }
214
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700216 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700217 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700218 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700219 prof_tctx_destroy(tsd, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -0700220 else
Jason Evansc1e00ef2016-05-10 22:21:10 -0700221 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans6e73dc12014-09-09 19:37:26 -0700222 }
223}
224
225void
Jason Evansdb722722016-03-23 20:29:33 -0700226prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr,
227 size_t usize, prof_tctx_t *tctx)
Jason Evanscfc57062014-10-30 23:18:45 -0700228{
229
Jason Evansdb722722016-03-23 20:29:33 -0700230 prof_tctx_set(tsdn, extent, ptr, usize, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700231
Jason Evansc1e00ef2016-05-10 22:21:10 -0700232 malloc_mutex_lock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700233 tctx->cnts.curobjs++;
234 tctx->cnts.curbytes += usize;
235 if (opt_prof_accum) {
236 tctx->cnts.accumobjs++;
237 tctx->cnts.accumbytes += usize;
238 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700239 tctx->prepared = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700240 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700241}
242
243void
Jason Evans5460aa62014-09-22 21:09:23 -0700244prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700245{
246
Jason Evansc1e00ef2016-05-10 22:21:10 -0700247 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700248 assert(tctx->cnts.curobjs > 0);
249 assert(tctx->cnts.curbytes >= usize);
250 tctx->cnts.curobjs--;
251 tctx->cnts.curbytes -= usize;
252
Jason Evansc1e00ef2016-05-10 22:21:10 -0700253 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700254 prof_tctx_destroy(tsd, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700255 else
Jason Evansc1e00ef2016-05-10 22:21:10 -0700256 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700257}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700258
Jason Evans4d6a1342010-10-20 19:05:59 -0700259void
Jason Evans6109fe02010-02-10 10:37:56 -0800260bt_init(prof_bt_t *bt, void **vec)
261{
262
Jason Evans7372b152012-02-10 20:22:09 -0800263 cassert(config_prof);
264
Jason Evans6109fe02010-02-10 10:37:56 -0800265 bt->vec = vec;
266 bt->len = 0;
267}
268
Jason Evansaf1f5922014-10-30 16:38:08 -0700269JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700270prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800271{
272
Jason Evans7372b152012-02-10 20:22:09 -0800273 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700274 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800275
Jason Evans82cb6032014-11-01 00:20:28 -0700276 if (tdata != NULL) {
277 assert(!tdata->enq);
278 tdata->enq = true;
279 }
Jason Evans6109fe02010-02-10 10:37:56 -0800280
Jason Evansc1e00ef2016-05-10 22:21:10 -0700281 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800282}
283
Jason Evansaf1f5922014-10-30 16:38:08 -0700284JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700285prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800286{
Jason Evans6109fe02010-02-10 10:37:56 -0800287
Jason Evans7372b152012-02-10 20:22:09 -0800288 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700289 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800290
Jason Evansc1e00ef2016-05-10 22:21:10 -0700291 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800292
Jason Evans82cb6032014-11-01 00:20:28 -0700293 if (tdata != NULL) {
294 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800295
Jason Evans82cb6032014-11-01 00:20:28 -0700296 assert(tdata->enq);
297 tdata->enq = false;
298 idump = tdata->enq_idump;
299 tdata->enq_idump = false;
300 gdump = tdata->enq_gdump;
301 tdata->enq_gdump = false;
302
303 if (idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700304 prof_idump(tsd_tsdn(tsd));
Jason Evans82cb6032014-11-01 00:20:28 -0700305 if (gdump)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700306 prof_gdump(tsd_tsdn(tsd));
Jason Evans82cb6032014-11-01 00:20:28 -0700307 }
Jason Evans6109fe02010-02-10 10:37:56 -0800308}
309
Jason Evans77f350b2011-03-15 22:23:12 -0700310#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700311void
Jason Evans6f001052014-04-22 18:41:15 -0700312prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800313{
Jason Evans6f001052014-04-22 18:41:15 -0700314 int nframes;
315
Jason Evans7372b152012-02-10 20:22:09 -0800316 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800317 assert(bt->len == 0);
318 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800319
Jason Evans6f001052014-04-22 18:41:15 -0700320 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
321 if (nframes <= 0)
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700322 return;
Jason Evans6f001052014-04-22 18:41:15 -0700323 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800324}
Jason Evans7372b152012-02-10 20:22:09 -0800325#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700326static _Unwind_Reason_Code
327prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
328{
329
Jason Evans7372b152012-02-10 20:22:09 -0800330 cassert(config_prof);
331
Jason Evans77f350b2011-03-15 22:23:12 -0700332 return (_URC_NO_REASON);
333}
334
335static _Unwind_Reason_Code
336prof_unwind_callback(struct _Unwind_Context *context, void *arg)
337{
338 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700339 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700340
Jason Evans7372b152012-02-10 20:22:09 -0800341 cassert(config_prof);
342
Jason Evans6f001052014-04-22 18:41:15 -0700343 ip = (void *)_Unwind_GetIP(context);
344 if (ip == NULL)
345 return (_URC_END_OF_STACK);
346 data->bt->vec[data->bt->len] = ip;
347 data->bt->len++;
348 if (data->bt->len == data->max)
349 return (_URC_END_OF_STACK);
Jason Evans77f350b2011-03-15 22:23:12 -0700350
351 return (_URC_NO_REASON);
352}
353
354void
Jason Evans6f001052014-04-22 18:41:15 -0700355prof_backtrace(prof_bt_t *bt)
Jason Evans77f350b2011-03-15 22:23:12 -0700356{
Jason Evans6f001052014-04-22 18:41:15 -0700357 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700358
Jason Evans7372b152012-02-10 20:22:09 -0800359 cassert(config_prof);
360
Jason Evans77f350b2011-03-15 22:23:12 -0700361 _Unwind_Backtrace(prof_unwind_callback, &data);
362}
Jason Evans7372b152012-02-10 20:22:09 -0800363#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700364void
Jason Evans6f001052014-04-22 18:41:15 -0700365prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800366{
Jason Evans6109fe02010-02-10 10:37:56 -0800367#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700368 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800369 void *p; \
370 if (__builtin_frame_address(i) == 0) \
Jason Evansb27805b2010-02-10 18:15:53 -0800371 return; \
Jason Evans6109fe02010-02-10 10:37:56 -0800372 p = __builtin_return_address(i); \
373 if (p == NULL) \
Jason Evansb27805b2010-02-10 18:15:53 -0800374 return; \
Jason Evans6f001052014-04-22 18:41:15 -0700375 bt->vec[(i)] = p; \
376 bt->len = (i) + 1; \
Jason Evans6109fe02010-02-10 10:37:56 -0800377 } else \
Jason Evansb27805b2010-02-10 18:15:53 -0800378 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800379
Jason Evans7372b152012-02-10 20:22:09 -0800380 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800381
Jason Evans6109fe02010-02-10 10:37:56 -0800382 BT_FRAME(0)
383 BT_FRAME(1)
384 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800385 BT_FRAME(3)
386 BT_FRAME(4)
387 BT_FRAME(5)
388 BT_FRAME(6)
389 BT_FRAME(7)
390 BT_FRAME(8)
391 BT_FRAME(9)
392
393 BT_FRAME(10)
394 BT_FRAME(11)
395 BT_FRAME(12)
396 BT_FRAME(13)
397 BT_FRAME(14)
398 BT_FRAME(15)
399 BT_FRAME(16)
400 BT_FRAME(17)
401 BT_FRAME(18)
402 BT_FRAME(19)
403
404 BT_FRAME(20)
405 BT_FRAME(21)
406 BT_FRAME(22)
407 BT_FRAME(23)
408 BT_FRAME(24)
409 BT_FRAME(25)
410 BT_FRAME(26)
411 BT_FRAME(27)
412 BT_FRAME(28)
413 BT_FRAME(29)
414
415 BT_FRAME(30)
416 BT_FRAME(31)
417 BT_FRAME(32)
418 BT_FRAME(33)
419 BT_FRAME(34)
420 BT_FRAME(35)
421 BT_FRAME(36)
422 BT_FRAME(37)
423 BT_FRAME(38)
424 BT_FRAME(39)
425
426 BT_FRAME(40)
427 BT_FRAME(41)
428 BT_FRAME(42)
429 BT_FRAME(43)
430 BT_FRAME(44)
431 BT_FRAME(45)
432 BT_FRAME(46)
433 BT_FRAME(47)
434 BT_FRAME(48)
435 BT_FRAME(49)
436
437 BT_FRAME(50)
438 BT_FRAME(51)
439 BT_FRAME(52)
440 BT_FRAME(53)
441 BT_FRAME(54)
442 BT_FRAME(55)
443 BT_FRAME(56)
444 BT_FRAME(57)
445 BT_FRAME(58)
446 BT_FRAME(59)
447
448 BT_FRAME(60)
449 BT_FRAME(61)
450 BT_FRAME(62)
451 BT_FRAME(63)
452 BT_FRAME(64)
453 BT_FRAME(65)
454 BT_FRAME(66)
455 BT_FRAME(67)
456 BT_FRAME(68)
457 BT_FRAME(69)
458
459 BT_FRAME(70)
460 BT_FRAME(71)
461 BT_FRAME(72)
462 BT_FRAME(73)
463 BT_FRAME(74)
464 BT_FRAME(75)
465 BT_FRAME(76)
466 BT_FRAME(77)
467 BT_FRAME(78)
468 BT_FRAME(79)
469
470 BT_FRAME(80)
471 BT_FRAME(81)
472 BT_FRAME(82)
473 BT_FRAME(83)
474 BT_FRAME(84)
475 BT_FRAME(85)
476 BT_FRAME(86)
477 BT_FRAME(87)
478 BT_FRAME(88)
479 BT_FRAME(89)
480
481 BT_FRAME(90)
482 BT_FRAME(91)
483 BT_FRAME(92)
484 BT_FRAME(93)
485 BT_FRAME(94)
486 BT_FRAME(95)
487 BT_FRAME(96)
488 BT_FRAME(97)
489 BT_FRAME(98)
490 BT_FRAME(99)
491
492 BT_FRAME(100)
493 BT_FRAME(101)
494 BT_FRAME(102)
495 BT_FRAME(103)
496 BT_FRAME(104)
497 BT_FRAME(105)
498 BT_FRAME(106)
499 BT_FRAME(107)
500 BT_FRAME(108)
501 BT_FRAME(109)
502
503 BT_FRAME(110)
504 BT_FRAME(111)
505 BT_FRAME(112)
506 BT_FRAME(113)
507 BT_FRAME(114)
508 BT_FRAME(115)
509 BT_FRAME(116)
510 BT_FRAME(117)
511 BT_FRAME(118)
512 BT_FRAME(119)
513
514 BT_FRAME(120)
515 BT_FRAME(121)
516 BT_FRAME(122)
517 BT_FRAME(123)
518 BT_FRAME(124)
519 BT_FRAME(125)
520 BT_FRAME(126)
521 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800522#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800523}
Jason Evans7372b152012-02-10 20:22:09 -0800524#else
525void
Jason Evans6f001052014-04-22 18:41:15 -0700526prof_backtrace(prof_bt_t *bt)
Jason Evans7372b152012-02-10 20:22:09 -0800527{
528
529 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700530 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800531}
Jason Evans6109fe02010-02-10 10:37:56 -0800532#endif
533
Jason Evans4f37ef62014-01-16 13:23:56 -0800534static malloc_mutex_t *
Jason Evans602c8e02014-08-18 16:22:13 -0700535prof_gctx_mutex_choose(void)
Jason Evans4f37ef62014-01-16 13:23:56 -0800536{
Jason Evans602c8e02014-08-18 16:22:13 -0700537 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
Jason Evans4f37ef62014-01-16 13:23:56 -0800538
Jason Evans602c8e02014-08-18 16:22:13 -0700539 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
Jason Evans4f37ef62014-01-16 13:23:56 -0800540}
541
Jason Evans602c8e02014-08-18 16:22:13 -0700542static malloc_mutex_t *
543prof_tdata_mutex_choose(uint64_t thr_uid)
544{
545
546 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
547}
548
549static prof_gctx_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700550prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
Jason Evans4f37ef62014-01-16 13:23:56 -0800551{
Jason Evansab532e92014-08-15 15:05:12 -0700552 /*
553 * Create a single allocation that has space for vec of length bt->len.
554 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700555 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700556 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
557 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
Jason Evans66cd9532016-04-22 14:34:14 -0700558 true);
Jason Evans602c8e02014-08-18 16:22:13 -0700559 if (gctx == NULL)
Jason Evansab532e92014-08-15 15:05:12 -0700560 return (NULL);
Jason Evans602c8e02014-08-18 16:22:13 -0700561 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800562 /*
563 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700564 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800565 */
Jason Evans602c8e02014-08-18 16:22:13 -0700566 gctx->nlimbo = 1;
567 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700568 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700569 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
570 gctx->bt.vec = gctx->vec;
571 gctx->bt.len = bt->len;
572 return (gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -0800573}
574
575static void
Jason Evansc93ed812014-10-30 16:50:33 -0700576prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
577 prof_tdata_t *tdata)
Jason Evans4f37ef62014-01-16 13:23:56 -0800578{
Jason Evans4f37ef62014-01-16 13:23:56 -0800579
580 cassert(config_prof);
581
582 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700583 * Check that gctx is still unused by any thread cache before destroying
584 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
585 * condition with this function, as does prof_tctx_destroy() in order to
586 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800587 * into this function.
588 */
Jason Evansc93ed812014-10-30 16:50:33 -0700589 prof_enter(tsd, tdata_self);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700590 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700591 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700592 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
593 /* Remove gctx from bt2gctx. */
Jason Evansb54d1602016-10-20 23:59:12 -0700594 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
Jason Evans4f37ef62014-01-16 13:23:56 -0800595 not_reached();
Jason Evansc93ed812014-10-30 16:50:33 -0700596 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700597 /* Destroy gctx. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700598 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans8c9be3e2016-04-16 00:36:11 -0700599 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx), gctx,
600 NULL, true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800601 } else {
602 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700603 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800604 * prof_lookup().
605 */
Jason Evans602c8e02014-08-18 16:22:13 -0700606 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700607 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700608 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800609 }
610}
611
Jason Evans602c8e02014-08-18 16:22:13 -0700612static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700613prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
Jason Evans4f37ef62014-01-16 13:23:56 -0800614{
Jason Evans4f37ef62014-01-16 13:23:56 -0800615
Jason Evansc1e00ef2016-05-10 22:21:10 -0700616 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700617
Jason Evans602c8e02014-08-18 16:22:13 -0700618 if (opt_prof_accum)
619 return (false);
620 if (tctx->cnts.curobjs != 0)
621 return (false);
Jason Evans6e73dc12014-09-09 19:37:26 -0700622 if (tctx->prepared)
623 return (false);
Jason Evans602c8e02014-08-18 16:22:13 -0700624 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800625}
626
Jason Evansfb1775e2014-01-14 17:04:34 -0800627static bool
Jason Evans602c8e02014-08-18 16:22:13 -0700628prof_gctx_should_destroy(prof_gctx_t *gctx)
629{
630
631 if (opt_prof_accum)
632 return (false);
Jason Evans551ebc42014-10-03 10:16:09 -0700633 if (!tctx_tree_empty(&gctx->tctxs))
Jason Evans602c8e02014-08-18 16:22:13 -0700634 return (false);
635 if (gctx->nlimbo != 0)
636 return (false);
637 return (true);
638}
639
Jason Evans602c8e02014-08-18 16:22:13 -0700640static void
Jason Evans5460aa62014-09-22 21:09:23 -0700641prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700642{
Jason Evans6fd53da2014-09-09 12:45:53 -0700643 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700644 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700645 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700646
Jason Evansc1e00ef2016-05-10 22:21:10 -0700647 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700648
Jason Evans602c8e02014-08-18 16:22:13 -0700649 assert(tctx->cnts.curobjs == 0);
650 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700651 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700652 assert(tctx->cnts.accumobjs == 0);
653 assert(tctx->cnts.accumbytes == 0);
654
Jason Evansb54d1602016-10-20 23:59:12 -0700655 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700656 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
657 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700658
Jason Evansc1e00ef2016-05-10 22:21:10 -0700659 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700660 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700661 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700662 tctx_tree_remove(&gctx->tctxs, tctx);
663 destroy_tctx = true;
664 if (prof_gctx_should_destroy(gctx)) {
665 /*
666 * Increment gctx->nlimbo in order to keep another
667 * thread from winning the race to destroy gctx while
668 * this one has gctx->lock dropped. Without this, it
669 * would be possible for another thread to:
670 *
671 * 1) Sample an allocation associated with gctx.
672 * 2) Deallocate the sampled object.
673 * 3) Successfully prof_gctx_try_destroy(gctx).
674 *
675 * The result would be that gctx no longer exists by the
676 * time this thread accesses it in
677 * prof_gctx_try_destroy().
678 */
679 gctx->nlimbo++;
680 destroy_gctx = true;
681 } else
682 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700683 break;
684 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700685 /*
Jason Evansbf406412014-10-06 16:35:11 -0700686 * A dumping thread needs tctx to remain valid until dumping
687 * has finished. Change state such that the dumping thread will
688 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700689 */
Jason Evansbf406412014-10-06 16:35:11 -0700690 tctx->state = prof_tctx_state_purgatory;
691 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700692 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700693 break;
694 default:
695 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700696 destroy_tctx = false;
697 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700698 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700699 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700700 if (destroy_gctx) {
701 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
702 tdata);
703 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700704
Jason Evansc1e00ef2016-05-10 22:21:10 -0700705 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -0700706
Jason Evans6fd53da2014-09-09 12:45:53 -0700707 if (destroy_tdata)
Jason Evansb54d1602016-10-20 23:59:12 -0700708 prof_tdata_destroy(tsd, tdata, false);
Jason Evans602c8e02014-08-18 16:22:13 -0700709
Jason Evansbf406412014-10-06 16:35:11 -0700710 if (destroy_tctx)
Jason Evans8c9be3e2016-04-16 00:36:11 -0700711 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx,
712 NULL, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -0700713}
714
715static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700716prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
717 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
Jason Evansfb1775e2014-01-14 17:04:34 -0800718{
719 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700720 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800721 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -0700722 } gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800723 union {
724 prof_bt_t *p;
725 void *v;
726 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700727 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800728
Jason Evansc93ed812014-10-30 16:50:33 -0700729 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700730 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800731 /* bt has never been seen before. Insert it. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700732 gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
Jason Evans602c8e02014-08-18 16:22:13 -0700733 if (gctx.v == NULL) {
Jason Evansc93ed812014-10-30 16:50:33 -0700734 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800735 return (true);
736 }
Jason Evans602c8e02014-08-18 16:22:13 -0700737 btkey.p = &gctx.p->bt;
Jason Evansb54d1602016-10-20 23:59:12 -0700738 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800739 /* OOM. */
Jason Evansc93ed812014-10-30 16:50:33 -0700740 prof_leave(tsd, tdata);
Jason Evans8c9be3e2016-04-16 00:36:11 -0700741 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v),
742 gctx.v, NULL, true, true);
Jason Evansfb1775e2014-01-14 17:04:34 -0800743 return (true);
744 }
Jason Evans602c8e02014-08-18 16:22:13 -0700745 new_gctx = true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800746 } else {
747 /*
748 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700749 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800750 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700751 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700752 gctx.p->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700753 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700754 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800755 }
Jason Evansc93ed812014-10-30 16:50:33 -0700756 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800757
758 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700759 *p_gctx = gctx.p;
760 *p_new_gctx = new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800761 return (false);
762}
763
Jason Evans602c8e02014-08-18 16:22:13 -0700764prof_tctx_t *
Jason Evans5460aa62014-09-22 21:09:23 -0700765prof_lookup(tsd_t *tsd, prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800766{
Jason Evans075e77c2010-09-20 19:53:25 -0700767 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700768 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700769 void *v;
770 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700771 prof_tdata_t *tdata;
772 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800773
Jason Evans7372b152012-02-10 20:22:09 -0800774 cassert(config_prof);
775
Jason Evans5460aa62014-09-22 21:09:23 -0700776 tdata = prof_tdata_get(tsd, false);
777 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -0700778 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800779
Jason Evansc1e00ef2016-05-10 22:21:10 -0700780 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700781 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evans6e73dc12014-09-09 19:37:26 -0700782 if (!not_found) /* Note double negative! */
783 ret.p->prepared = true;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700784 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700785 if (not_found) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800786 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700787 prof_gctx_t *gctx;
788 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800789
790 /*
791 * This thread's cache lacks bt. Look for it in the global
792 * cache.
793 */
Jason Evans5460aa62014-09-22 21:09:23 -0700794 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evans602c8e02014-08-18 16:22:13 -0700795 &new_gctx))
Jason Evansfb1775e2014-01-14 17:04:34 -0800796 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800797
Jason Evans602c8e02014-08-18 16:22:13 -0700798 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700799 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
Jason Evans66cd9532016-04-22 14:34:14 -0700800 size2index(sizeof(prof_tctx_t)), false, NULL, true,
Jason Evansb54d1602016-10-20 23:59:12 -0700801 arena_ichoose(tsd, NULL), true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700802 if (ret.p == NULL) {
Jason Evans602c8e02014-08-18 16:22:13 -0700803 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700804 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700805 return (NULL);
Jason Evansa881cd22010-10-02 15:18:50 -0700806 }
Jason Evans602c8e02014-08-18 16:22:13 -0700807 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700808 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700809 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700810 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700811 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700812 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700813 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700814 ret.p->state = prof_tctx_state_initializing;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700815 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evansb54d1602016-10-20 23:59:12 -0700816 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700817 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700818 if (error) {
819 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700820 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evans8c9be3e2016-04-16 00:36:11 -0700821 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v),
822 ret.v, NULL, true, true);
Jason Evans6109fe02010-02-10 10:37:56 -0800823 return (NULL);
824 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700825 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700826 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700827 tctx_tree_insert(&gctx->tctxs, ret.p);
828 gctx->nlimbo--;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700829 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800830 }
831
Jason Evans075e77c2010-09-20 19:53:25 -0700832 return (ret.p);
Jason Evans6109fe02010-02-10 10:37:56 -0800833}
834
Jason Evansdc391ad2016-05-04 12:14:36 -0700835/*
836 * The bodies of this function and prof_leakcheck() are compiled out unless heap
837 * profiling is enabled, so that it is possible to compile jemalloc with
838 * floating point support completely disabled. Avoiding floating point code is
839 * important on memory-constrained systems, but it also enables a workaround for
840 * versions of glibc that don't properly save/restore floating point registers
841 * during dynamic lazy symbol loading (which internally calls into whatever
842 * malloc implementation happens to be integrated into the application). Note
843 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
844 * memory moves, so jemalloc must be compiled with such optimizations disabled
845 * (e.g.
846 * -mno-sse) in order for the workaround to be complete.
847 */
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700848void
Jason Evans602c8e02014-08-18 16:22:13 -0700849prof_sample_threshold_update(prof_tdata_t *tdata)
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700850{
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700851#ifdef JEMALLOC_PROF
852 uint64_t r;
853 double u;
854
855 if (!config_prof)
856 return;
857
Jason Evans602c8e02014-08-18 16:22:13 -0700858 if (lg_prof_sample == 0) {
859 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700860 return;
861 }
862
863 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700864 * Compute sample interval as a geometrically distributed random
865 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700866 *
Jason Evans602c8e02014-08-18 16:22:13 -0700867 * __ __
868 * | log(u) | 1
869 * tdata->bytes_until_sample = | -------- |, where p = ---------------
870 * | log(1-p) | lg_prof_sample
871 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700872 *
873 * For more information on the math, see:
874 *
875 * Non-Uniform Random Variate Generation
876 * Luc Devroye
877 * Springer-Verlag, New York, 1986
878 * pp 500
879 * (http://luc.devroye.org/rnbookindex.html)
880 */
Jason Evansb46261d2016-05-27 18:57:15 -0700881 r = prng_lg_range(&tdata->prng_state, 53, false);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700882 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700883 tdata->bytes_until_sample = (uint64_t)(log(u) /
884 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700885 + (uint64_t)1U;
886#endif
887}
888
Jason Evans772163b2014-01-17 15:40:52 -0800889#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700890static prof_tdata_t *
891prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
892{
893 size_t *tdata_count = (size_t *)arg;
894
895 (*tdata_count)++;
896
897 return (NULL);
898}
899
900size_t
901prof_tdata_count(void)
902{
903 size_t tdata_count = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700904 tsdn_t *tsdn;
Jason Evans20c31de2014-10-02 23:01:10 -0700905
Jason Evansc1e00ef2016-05-10 22:21:10 -0700906 tsdn = tsdn_fetch();
907 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700908 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
909 (void *)&tdata_count);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700910 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -0700911
912 return (tdata_count);
913}
914#endif
915
916#ifdef JEMALLOC_JET
Jason Evans772163b2014-01-17 15:40:52 -0800917size_t
918prof_bt_count(void)
919{
920 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700921 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700922 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800923
Jason Evans029d44c2014-10-04 11:12:53 -0700924 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700925 tdata = prof_tdata_get(tsd, false);
926 if (tdata == NULL)
Jason Evans772163b2014-01-17 15:40:52 -0800927 return (0);
928
Jason Evansc1e00ef2016-05-10 22:21:10 -0700929 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700930 bt_count = ckh_count(&bt2gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700931 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800932
933 return (bt_count);
934}
935#endif
936
937#ifdef JEMALLOC_JET
938#undef prof_dump_open
939#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
940#endif
941static int
Jason Evans4f37ef62014-01-16 13:23:56 -0800942prof_dump_open(bool propagate_err, const char *filename)
943{
Jason Evans772163b2014-01-17 15:40:52 -0800944 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800945
Jason Evans772163b2014-01-17 15:40:52 -0800946 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700947 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800948 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
949 filename);
950 if (opt_abort)
951 abort();
Jason Evans4f37ef62014-01-16 13:23:56 -0800952 }
953
Jason Evans772163b2014-01-17 15:40:52 -0800954 return (fd);
Jason Evans4f37ef62014-01-16 13:23:56 -0800955}
Jason Evans772163b2014-01-17 15:40:52 -0800956#ifdef JEMALLOC_JET
957#undef prof_dump_open
958#define prof_dump_open JEMALLOC_N(prof_dump_open)
959prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
960#endif
Jason Evans4f37ef62014-01-16 13:23:56 -0800961
962static bool
963prof_dump_flush(bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800964{
Jason Evans22ca8552010-03-02 11:57:30 -0800965 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800966 ssize_t err;
967
Jason Evans7372b152012-02-10 20:22:09 -0800968 cassert(config_prof);
969
Jason Evans6109fe02010-02-10 10:37:56 -0800970 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
971 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700972 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800973 malloc_write("<jemalloc>: write() failed during heap "
974 "profile flush\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800975 if (opt_abort)
976 abort();
977 }
978 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800979 }
980 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800981
982 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800983}
984
Jason Evans22ca8552010-03-02 11:57:30 -0800985static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800986prof_dump_close(bool propagate_err)
987{
988 bool ret;
989
990 assert(prof_dump_fd != -1);
991 ret = prof_dump_flush(propagate_err);
992 close(prof_dump_fd);
993 prof_dump_fd = -1;
994
995 return (ret);
996}
997
998static bool
999prof_dump_write(bool propagate_err, const char *s)
Jason Evans6109fe02010-02-10 10:37:56 -08001000{
Jason Evansca8fffb2016-02-24 13:16:51 -08001001 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -08001002
Jason Evans7372b152012-02-10 20:22:09 -08001003 cassert(config_prof);
1004
Jason Evans6109fe02010-02-10 10:37:56 -08001005 i = 0;
1006 slen = strlen(s);
1007 while (i < slen) {
1008 /* Flush the buffer if it is full. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001009 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
Jason Evans4f37ef62014-01-16 13:23:56 -08001010 if (prof_dump_flush(propagate_err) && propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -08001011 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001012
Jason Evanscd9a1342012-03-21 18:33:03 -07001013 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001014 /* Finish writing. */
1015 n = slen - i;
1016 } else {
1017 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001018 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001019 }
1020 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1021 prof_dump_buf_end += n;
1022 i += n;
1023 }
Jason Evans22ca8552010-03-02 11:57:30 -08001024
1025 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -08001026}
1027
Jason Evanse42c3092015-07-22 15:44:47 -07001028JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001029static bool
Jason Evans4f37ef62014-01-16 13:23:56 -08001030prof_dump_printf(bool propagate_err, const char *format, ...)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001031{
1032 bool ret;
1033 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001034 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001035
1036 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001037 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001038 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001039 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001040
1041 return (ret);
1042}
1043
Jason Evans602c8e02014-08-18 16:22:13 -07001044static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001045prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001046{
Jason Evans3a81cbd2014-08-16 12:58:55 -07001047
Jason Evansc1e00ef2016-05-10 22:21:10 -07001048 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001049
Jason Evansc1e00ef2016-05-10 22:21:10 -07001050 malloc_mutex_lock(tsdn, tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001051
1052 switch (tctx->state) {
1053 case prof_tctx_state_initializing:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001054 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001055 return;
Jason Evans764b0002015-03-14 14:01:35 -07001056 case prof_tctx_state_nominal:
1057 tctx->state = prof_tctx_state_dumping;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001058 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001059
Jason Evans764b0002015-03-14 14:01:35 -07001060 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001061
Jason Evans764b0002015-03-14 14:01:35 -07001062 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1063 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1064 if (opt_prof_accum) {
1065 tdata->cnt_summed.accumobjs +=
1066 tctx->dump_cnts.accumobjs;
1067 tdata->cnt_summed.accumbytes +=
1068 tctx->dump_cnts.accumbytes;
1069 }
1070 break;
1071 case prof_tctx_state_dumping:
1072 case prof_tctx_state_purgatory:
1073 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001074 }
1075}
1076
Jason Evans602c8e02014-08-18 16:22:13 -07001077static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001078prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
Jason Evans602c8e02014-08-18 16:22:13 -07001079{
1080
Jason Evansc1e00ef2016-05-10 22:21:10 -07001081 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001082
Jason Evans602c8e02014-08-18 16:22:13 -07001083 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1084 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1085 if (opt_prof_accum) {
1086 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1087 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1088 }
1089}
1090
Jason Evans602c8e02014-08-18 16:22:13 -07001091static prof_tctx_t *
1092prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1093{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001094 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evansb2c0d632016-04-13 23:36:15 -07001095
Jason Evansc1e00ef2016-05-10 22:21:10 -07001096 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001097
1098 switch (tctx->state) {
1099 case prof_tctx_state_nominal:
1100 /* New since dumping started; ignore. */
1101 break;
1102 case prof_tctx_state_dumping:
1103 case prof_tctx_state_purgatory:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001104 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
Jason Evans602c8e02014-08-18 16:22:13 -07001105 break;
1106 default:
1107 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001108 }
1109
1110 return (NULL);
1111}
1112
Jason Evansb2c0d632016-04-13 23:36:15 -07001113struct prof_tctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001114 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001115 bool propagate_err;
1116};
1117
Jason Evans602c8e02014-08-18 16:22:13 -07001118static prof_tctx_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001119prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
Jason Evans602c8e02014-08-18 16:22:13 -07001120{
Jason Evansb2c0d632016-04-13 23:36:15 -07001121 struct prof_tctx_dump_iter_arg_s *arg =
1122 (struct prof_tctx_dump_iter_arg_s *)opaque;
1123
Jason Evansc1e00ef2016-05-10 22:21:10 -07001124 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001125
Jason Evansfb64ec22015-09-21 18:37:18 -07001126 switch (tctx->state) {
1127 case prof_tctx_state_initializing:
1128 case prof_tctx_state_nominal:
1129 /* Not captured by this dump. */
1130 break;
1131 case prof_tctx_state_dumping:
1132 case prof_tctx_state_purgatory:
Jason Evansb2c0d632016-04-13 23:36:15 -07001133 if (prof_dump_printf(arg->propagate_err,
Jason Evansfb64ec22015-09-21 18:37:18 -07001134 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1135 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1136 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1137 tctx->dump_cnts.accumbytes))
1138 return (tctx);
1139 break;
1140 default:
1141 not_reached();
1142 }
Jason Evans602c8e02014-08-18 16:22:13 -07001143 return (NULL);
1144}
1145
Jason Evans602c8e02014-08-18 16:22:13 -07001146static prof_tctx_t *
1147prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1148{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001149 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001150 prof_tctx_t *ret;
1151
Jason Evansc1e00ef2016-05-10 22:21:10 -07001152 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001153
Jason Evans602c8e02014-08-18 16:22:13 -07001154 switch (tctx->state) {
1155 case prof_tctx_state_nominal:
1156 /* New since dumping started; ignore. */
1157 break;
1158 case prof_tctx_state_dumping:
1159 tctx->state = prof_tctx_state_nominal;
1160 break;
1161 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001162 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001163 goto label_return;
1164 default:
1165 not_reached();
1166 }
1167
1168 ret = NULL;
1169label_return:
1170 return (ret);
1171}
1172
Jason Evans6109fe02010-02-10 10:37:56 -08001173static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001174prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001175{
Jason Evans6109fe02010-02-10 10:37:56 -08001176
Jason Evans7372b152012-02-10 20:22:09 -08001177 cassert(config_prof);
1178
Jason Evansc1e00ef2016-05-10 22:21:10 -07001179 malloc_mutex_lock(tsdn, gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001180
Jason Evans4f37ef62014-01-16 13:23:56 -08001181 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001182 * Increment nlimbo so that gctx won't go away before dump.
1183 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001184 * prof_dump()'s second pass.
1185 */
Jason Evans602c8e02014-08-18 16:22:13 -07001186 gctx->nlimbo++;
1187 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001188
Jason Evans602c8e02014-08-18 16:22:13 -07001189 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001190
Jason Evansc1e00ef2016-05-10 22:21:10 -07001191 malloc_mutex_unlock(tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001192}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001193
Jason Evansb2c0d632016-04-13 23:36:15 -07001194struct prof_gctx_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001195 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001196 size_t leak_ngctx;
1197};
Jason Evans6109fe02010-02-10 10:37:56 -08001198
Jason Evansb2c0d632016-04-13 23:36:15 -07001199static prof_gctx_t *
1200prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1201{
1202 struct prof_gctx_merge_iter_arg_s *arg =
1203 (struct prof_gctx_merge_iter_arg_s *)opaque;
1204
Jason Evansc1e00ef2016-05-10 22:21:10 -07001205 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001206 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001207 (void *)arg->tsdn);
Jason Evans602c8e02014-08-18 16:22:13 -07001208 if (gctx->cnt_summed.curobjs != 0)
Jason Evansb2c0d632016-04-13 23:36:15 -07001209 arg->leak_ngctx++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001210 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001211
1212 return (NULL);
1213}
1214
Jason Evans20c31de2014-10-02 23:01:10 -07001215static void
1216prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
Jason Evans602c8e02014-08-18 16:22:13 -07001217{
Jason Evans5460aa62014-09-22 21:09:23 -07001218 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001219 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001220
Jason Evans20c31de2014-10-02 23:01:10 -07001221 /*
1222 * Standard tree iteration won't work here, because as soon as we
1223 * decrement gctx->nlimbo and unlock gctx, another thread can
1224 * concurrently destroy it, which will corrupt the tree. Therefore,
1225 * tear down the tree one node at a time during iteration.
1226 */
1227 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1228 gctx_tree_remove(gctxs, gctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001229 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001230 {
1231 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001232
Jason Evans20c31de2014-10-02 23:01:10 -07001233 next = NULL;
1234 do {
1235 prof_tctx_t *to_destroy =
1236 tctx_tree_iter(&gctx->tctxs, next,
Jason Evansc1e00ef2016-05-10 22:21:10 -07001237 prof_tctx_finish_iter,
1238 (void *)tsd_tsdn(tsd));
Jason Evans20c31de2014-10-02 23:01:10 -07001239 if (to_destroy != NULL) {
1240 next = tctx_tree_next(&gctx->tctxs,
1241 to_destroy);
1242 tctx_tree_remove(&gctx->tctxs,
1243 to_destroy);
Jason Evansdb722722016-03-23 20:29:33 -07001244 idalloctm(tsd_tsdn(tsd),
Jason Evans8c9be3e2016-04-16 00:36:11 -07001245 iealloc(tsd_tsdn(tsd), to_destroy),
1246 to_destroy, NULL, true, true);
Jason Evans20c31de2014-10-02 23:01:10 -07001247 } else
1248 next = NULL;
1249 } while (next != NULL);
1250 }
1251 gctx->nlimbo--;
1252 if (prof_gctx_should_destroy(gctx)) {
1253 gctx->nlimbo++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001254 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001255 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evans20c31de2014-10-02 23:01:10 -07001256 } else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001257 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001258 }
Jason Evans602c8e02014-08-18 16:22:13 -07001259}
1260
Jason Evansb2c0d632016-04-13 23:36:15 -07001261struct prof_tdata_merge_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001262 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001263 prof_cnt_t cnt_all;
1264};
Jason Evans602c8e02014-08-18 16:22:13 -07001265
Jason Evansb2c0d632016-04-13 23:36:15 -07001266static prof_tdata_t *
1267prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1268 void *opaque)
1269{
1270 struct prof_tdata_merge_iter_arg_s *arg =
1271 (struct prof_tdata_merge_iter_arg_s *)opaque;
1272
Jason Evansc1e00ef2016-05-10 22:21:10 -07001273 malloc_mutex_lock(arg->tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001274 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001275 size_t tabind;
1276 union {
1277 prof_tctx_t *p;
1278 void *v;
1279 } tctx;
1280
1281 tdata->dumping = true;
1282 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001283 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1284 &tctx.v);)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001285 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001286
Jason Evansb2c0d632016-04-13 23:36:15 -07001287 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1288 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001289 if (opt_prof_accum) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001290 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1291 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
Jason Evans602c8e02014-08-18 16:22:13 -07001292 }
1293 } else
1294 tdata->dumping = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001295 malloc_mutex_unlock(arg->tsdn, tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001296
1297 return (NULL);
1298}
1299
1300static prof_tdata_t *
1301prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1302{
1303 bool propagate_err = *(bool *)arg;
1304
Jason Evans551ebc42014-10-03 10:16:09 -07001305 if (!tdata->dumping)
Jason Evans602c8e02014-08-18 16:22:13 -07001306 return (NULL);
1307
1308 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001309 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001310 tdata->thr_uid, tdata->cnt_summed.curobjs,
1311 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1312 tdata->cnt_summed.accumbytes,
1313 (tdata->thread_name != NULL) ? " " : "",
1314 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1315 return (tdata);
1316 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001317}
1318
Jason Evans20c31de2014-10-02 23:01:10 -07001319#ifdef JEMALLOC_JET
1320#undef prof_dump_header
1321#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1322#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001323static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001324prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
Jason Evansa881cd22010-10-02 15:18:50 -07001325{
Jason Evans602c8e02014-08-18 16:22:13 -07001326 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001327
Jason Evans602c8e02014-08-18 16:22:13 -07001328 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001329 "heap_v2/%"FMTu64"\n"
1330 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001331 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1332 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1333 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001334
Jason Evansc1e00ef2016-05-10 22:21:10 -07001335 malloc_mutex_lock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001336 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1337 (void *)&propagate_err) != NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001338 malloc_mutex_unlock(tsdn, &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001339 return (ret);
Jason Evansa881cd22010-10-02 15:18:50 -07001340}
Jason Evans20c31de2014-10-02 23:01:10 -07001341#ifdef JEMALLOC_JET
1342#undef prof_dump_header
1343#define prof_dump_header JEMALLOC_N(prof_dump_header)
1344prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1345#endif
Jason Evansa881cd22010-10-02 15:18:50 -07001346
Jason Evans22ca8552010-03-02 11:57:30 -08001347static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001348prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
Jason Evansb2c0d632016-04-13 23:36:15 -07001349 const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001350{
Jason Evans4f37ef62014-01-16 13:23:56 -08001351 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001352 unsigned i;
Jason Evansb2c0d632016-04-13 23:36:15 -07001353 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001354
Jason Evans7372b152012-02-10 20:22:09 -08001355 cassert(config_prof);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001356 malloc_mutex_assert_owner(tsdn, gctx->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001357
Jason Evans602c8e02014-08-18 16:22:13 -07001358 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001359 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001360 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1361 assert(gctx->cnt_summed.curobjs == 0);
1362 assert(gctx->cnt_summed.curbytes == 0);
1363 assert(gctx->cnt_summed.accumobjs == 0);
1364 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001365 ret = false;
1366 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001367 }
1368
Jason Evans602c8e02014-08-18 16:22:13 -07001369 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001370 ret = true;
1371 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001372 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001373 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001374 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001375 (uintptr_t)bt->vec[i])) {
1376 ret = true;
1377 goto label_return;
1378 }
1379 }
Jason Evans22ca8552010-03-02 11:57:30 -08001380
Jason Evans602c8e02014-08-18 16:22:13 -07001381 if (prof_dump_printf(propagate_err,
1382 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001383 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001384 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1385 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1386 ret = true;
1387 goto label_return;
1388 }
1389
Jason Evansc1e00ef2016-05-10 22:21:10 -07001390 prof_tctx_dump_iter_arg.tsdn = tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001391 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001392 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001393 (void *)&prof_tctx_dump_iter_arg) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001394 ret = true;
1395 goto label_return;
1396 }
1397
Jason Evans772163b2014-01-17 15:40:52 -08001398 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001399label_return:
Jason Evans4f37ef62014-01-16 13:23:56 -08001400 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -08001401}
1402
Jason Evans788d29d2016-02-20 23:46:14 -08001403#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001404JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001405static int
1406prof_open_maps(const char *format, ...)
1407{
1408 int mfd;
1409 va_list ap;
1410 char filename[PATH_MAX + 1];
1411
1412 va_start(ap, format);
1413 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1414 va_end(ap);
1415 mfd = open(filename, O_RDONLY);
1416
1417 return (mfd);
1418}
Jason Evans788d29d2016-02-20 23:46:14 -08001419#endif
1420
1421static int
1422prof_getpid(void)
1423{
1424
1425#ifdef _WIN32
1426 return (GetCurrentProcessId());
1427#else
1428 return (getpid());
1429#endif
1430}
Jason Evans8e33c212015-05-01 09:03:20 -07001431
Jason Evans22ca8552010-03-02 11:57:30 -08001432static bool
1433prof_dump_maps(bool propagate_err)
Jason Evansc7177182010-02-11 09:25:56 -08001434{
Jason Evans93f39f82013-10-21 15:07:40 -07001435 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001436 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001437
Jason Evans7372b152012-02-10 20:22:09 -08001438 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001439#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001440 mfd = prof_open_maps("/proc/curproc/map");
rustyx7f283982016-01-30 14:51:16 +01001441#elif defined(_WIN32)
1442 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001443#else
Jason Evans8e33c212015-05-01 09:03:20 -07001444 {
Jason Evans788d29d2016-02-20 23:46:14 -08001445 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001446
1447 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1448 if (mfd == -1)
1449 mfd = prof_open_maps("/proc/%d/maps", pid);
1450 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001451#endif
Jason Evansc7177182010-02-11 09:25:56 -08001452 if (mfd != -1) {
1453 ssize_t nread;
1454
Jason Evans4f37ef62014-01-16 13:23:56 -08001455 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001456 propagate_err) {
1457 ret = true;
1458 goto label_return;
1459 }
Jason Evansc7177182010-02-11 09:25:56 -08001460 nread = 0;
1461 do {
1462 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001463 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001464 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001465 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001466 propagate_err) {
1467 ret = true;
1468 goto label_return;
1469 }
Jason Evansc7177182010-02-11 09:25:56 -08001470 }
1471 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001472 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001473 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001474 } else {
1475 ret = true;
1476 goto label_return;
1477 }
Jason Evans22ca8552010-03-02 11:57:30 -08001478
Jason Evans93f39f82013-10-21 15:07:40 -07001479 ret = false;
1480label_return:
1481 if (mfd != -1)
1482 close(mfd);
1483 return (ret);
Jason Evansc7177182010-02-11 09:25:56 -08001484}
1485
Jason Evansdc391ad2016-05-04 12:14:36 -07001486/*
1487 * See prof_sample_threshold_update() comment for why the body of this function
1488 * is conditionally compiled.
1489 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001490static void
Jason Evans602c8e02014-08-18 16:22:13 -07001491prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evans4f37ef62014-01-16 13:23:56 -08001492 const char *filename)
1493{
1494
Jason Evansdc391ad2016-05-04 12:14:36 -07001495#ifdef JEMALLOC_PROF
1496 /*
1497 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1498 * differ slightly from what jeprof reports, because here we scale the
1499 * summary values, whereas jeprof scales each context individually and
1500 * reports the sums of the scaled values.
1501 */
Jason Evans4f37ef62014-01-16 13:23:56 -08001502 if (cnt_all->curbytes != 0) {
Jason Evansdc391ad2016-05-04 12:14:36 -07001503 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1504 double ratio = (((double)cnt_all->curbytes) /
1505 (double)cnt_all->curobjs) / sample_period;
1506 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1507 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1508 * scale_factor);
1509 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1510 scale_factor);
1511
1512 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1513 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1514 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1515 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001516 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001517 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001518 filename);
1519 }
Jason Evansdc391ad2016-05-04 12:14:36 -07001520#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001521}
1522
Jason Evansb2c0d632016-04-13 23:36:15 -07001523struct prof_gctx_dump_iter_arg_s {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001524 tsdn_t *tsdn;
Jason Evansb2c0d632016-04-13 23:36:15 -07001525 bool propagate_err;
1526};
1527
Jason Evans602c8e02014-08-18 16:22:13 -07001528static prof_gctx_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001529prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001530{
Jason Evans602c8e02014-08-18 16:22:13 -07001531 prof_gctx_t *ret;
Jason Evansb2c0d632016-04-13 23:36:15 -07001532 struct prof_gctx_dump_iter_arg_s *arg =
1533 (struct prof_gctx_dump_iter_arg_s *)opaque;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001534
Jason Evansc1e00ef2016-05-10 22:21:10 -07001535 malloc_mutex_lock(arg->tsdn, gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001536
Jason Evansc1e00ef2016-05-10 22:21:10 -07001537 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
Jason Evansb2c0d632016-04-13 23:36:15 -07001538 gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001539 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001540 goto label_return;
1541 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001542
Jason Evans602c8e02014-08-18 16:22:13 -07001543 ret = NULL;
1544label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -07001545 malloc_mutex_unlock(arg->tsdn, gctx->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001546 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001547}
1548
Jason Evans22ca8552010-03-02 11:57:30 -08001549static bool
Jason Evans5460aa62014-09-22 21:09:23 -07001550prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
Jason Evans6109fe02010-02-10 10:37:56 -08001551{
Jason Evans602c8e02014-08-18 16:22:13 -07001552 prof_tdata_t *tdata;
Jason Evansb2c0d632016-04-13 23:36:15 -07001553 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001554 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001555 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001556 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001557 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001558 } gctx;
Jason Evansb2c0d632016-04-13 23:36:15 -07001559 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1560 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001561 prof_gctx_tree_t gctxs;
Jason Evans6109fe02010-02-10 10:37:56 -08001562
Jason Evans7372b152012-02-10 20:22:09 -08001563 cassert(config_prof);
1564
Jason Evans20c31de2014-10-02 23:01:10 -07001565 tdata = prof_tdata_get(tsd, true);
Jason Evans5460aa62014-09-22 21:09:23 -07001566 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001567 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001568
Jason Evansc1e00ef2016-05-10 22:21:10 -07001569 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evansc93ed812014-10-30 16:50:33 -07001570 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001571
Jason Evans602c8e02014-08-18 16:22:13 -07001572 /*
1573 * Put gctx's in limbo and clear their counters in preparation for
1574 * summing.
1575 */
1576 gctx_tree_new(&gctxs);
Jason Evans551ebc42014-10-03 10:16:09 -07001577 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001578 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
Jason Evans602c8e02014-08-18 16:22:13 -07001579
1580 /*
1581 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1582 * stats and merge them into the associated gctx's.
1583 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001584 prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001585 memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001586 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07001587 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
1588 (void *)&prof_tdata_merge_iter_arg);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001589 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001590
1591 /* Merge tctx stats into gctx's. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001592 prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001593 prof_gctx_merge_iter_arg.leak_ngctx = 0;
1594 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
1595 (void *)&prof_gctx_merge_iter_arg);
Jason Evans602c8e02014-08-18 16:22:13 -07001596
Jason Evansc93ed812014-10-30 16:50:33 -07001597 prof_leave(tsd, tdata);
Jason Evans4f37ef62014-01-16 13:23:56 -08001598
1599 /* Create dump file. */
Jason Evans772163b2014-01-17 15:40:52 -08001600 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001601 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001602
1603 /* Dump profile header. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001604 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
Jason Evansb2c0d632016-04-13 23:36:15 -07001605 &prof_tdata_merge_iter_arg.cnt_all))
Jason Evans4f37ef62014-01-16 13:23:56 -08001606 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001607
Jason Evans602c8e02014-08-18 16:22:13 -07001608 /* Dump per gctx profile stats. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001609 prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
Jason Evansb2c0d632016-04-13 23:36:15 -07001610 prof_gctx_dump_iter_arg.propagate_err = propagate_err;
Jason Evans602c8e02014-08-18 16:22:13 -07001611 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
Jason Evansb2c0d632016-04-13 23:36:15 -07001612 (void *)&prof_gctx_dump_iter_arg) != NULL)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001613 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001614
Jason Evansc7177182010-02-11 09:25:56 -08001615 /* Dump /proc/<pid>/maps if possible. */
Jason Evans22ca8552010-03-02 11:57:30 -08001616 if (prof_dump_maps(propagate_err))
Jason Evans4f37ef62014-01-16 13:23:56 -08001617 goto label_write_error;
Jason Evansc7177182010-02-11 09:25:56 -08001618
Jason Evans4f37ef62014-01-16 13:23:56 -08001619 if (prof_dump_close(propagate_err))
1620 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001621
Jason Evans20c31de2014-10-02 23:01:10 -07001622 prof_gctx_finish(tsd, &gctxs);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001623 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001624
Jason Evansb2c0d632016-04-13 23:36:15 -07001625 if (leakcheck) {
1626 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1627 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1628 }
Jason Evans22ca8552010-03-02 11:57:30 -08001629 return (false);
Jason Evans4f37ef62014-01-16 13:23:56 -08001630label_write_error:
1631 prof_dump_close(propagate_err);
1632label_open_close_error:
Jason Evans20c31de2014-10-02 23:01:10 -07001633 prof_gctx_finish(tsd, &gctxs);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001634 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001635 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001636}
1637
Jason Evansd81e4bd2012-03-06 14:57:45 -08001638#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001639#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001640static void
Chris Peterson3e310b32014-05-28 19:04:06 -07001641prof_dump_filename(char *filename, char v, uint64_t vseq)
Jason Evans6109fe02010-02-10 10:37:56 -08001642{
Jason Evans6109fe02010-02-10 10:37:56 -08001643
Jason Evans7372b152012-02-10 20:22:09 -08001644 cassert(config_prof);
1645
Jason Evans4f37ef62014-01-16 13:23:56 -08001646 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001647 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1648 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001649 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001650 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001651 } else {
1652 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1653 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001654 "%s.%d.%"FMTu64".%c.heap",
Jason Evans788d29d2016-02-20 23:46:14 -08001655 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001656 }
Jason Evans52386b22012-04-22 16:00:11 -07001657 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001658}
1659
1660static void
1661prof_fdump(void)
1662{
Jason Evans5460aa62014-09-22 21:09:23 -07001663 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001664 char filename[DUMP_FILENAME_BUFSIZE];
1665
Jason Evans7372b152012-02-10 20:22:09 -08001666 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001667 assert(opt_prof_final);
1668 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001669
Jason Evans551ebc42014-10-03 10:16:09 -07001670 if (!prof_booted)
Jason Evans6109fe02010-02-10 10:37:56 -08001671 return;
Jason Evans029d44c2014-10-04 11:12:53 -07001672 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001673
Jason Evansc1e00ef2016-05-10 22:21:10 -07001674 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001675 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001676 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans57efa7b2014-10-08 17:57:19 -07001677 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001678}
1679
1680void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001681prof_idump(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001682{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001683 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001684 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001685
Jason Evans7372b152012-02-10 20:22:09 -08001686 cassert(config_prof);
1687
Jason Evansc1e00ef2016-05-10 22:21:10 -07001688 if (!prof_booted || tsdn_null(tsdn))
Jason Evans6109fe02010-02-10 10:37:56 -08001689 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001690 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001691 tdata = prof_tdata_get(tsd, false);
1692 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001693 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001694 if (tdata->enq) {
1695 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001696 return;
1697 }
Jason Evans6109fe02010-02-10 10:37:56 -08001698
Jason Evanse7339702010-10-23 18:37:06 -07001699 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001700 char filename[PATH_MAX + 1];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001701 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001702 prof_dump_filename(filename, 'i', prof_dump_iseq);
1703 prof_dump_iseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001704 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001705 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001706 }
Jason Evans6109fe02010-02-10 10:37:56 -08001707}
1708
Jason Evans22ca8552010-03-02 11:57:30 -08001709bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001710prof_mdump(tsd_t *tsd, const char *filename)
Jason Evans6109fe02010-02-10 10:37:56 -08001711{
Jason Evans22ca8552010-03-02 11:57:30 -08001712 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001713
Jason Evans7372b152012-02-10 20:22:09 -08001714 cassert(config_prof);
1715
Jason Evans551ebc42014-10-03 10:16:09 -07001716 if (!opt_prof || !prof_booted)
Jason Evans22ca8552010-03-02 11:57:30 -08001717 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001718
Jason Evans22ca8552010-03-02 11:57:30 -08001719 if (filename == NULL) {
1720 /* No filename specified, so automatically generate one. */
Jason Evanse7339702010-10-23 18:37:06 -07001721 if (opt_prof_prefix[0] == '\0')
1722 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001723 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001724 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1725 prof_dump_mseq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001726 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001727 filename = filename_buf;
1728 }
Jason Evans5460aa62014-09-22 21:09:23 -07001729 return (prof_dump(tsd, true, filename, false));
Jason Evans6109fe02010-02-10 10:37:56 -08001730}
1731
1732void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001733prof_gdump(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001734{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001735 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001736 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001737
Jason Evans7372b152012-02-10 20:22:09 -08001738 cassert(config_prof);
1739
Jason Evansc1e00ef2016-05-10 22:21:10 -07001740 if (!prof_booted || tsdn_null(tsdn))
Jason Evans6109fe02010-02-10 10:37:56 -08001741 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001742 tsd = tsdn_tsd(tsdn);
Jason Evans5460aa62014-09-22 21:09:23 -07001743 tdata = prof_tdata_get(tsd, false);
1744 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001745 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001746 if (tdata->enq) {
1747 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001748 return;
1749 }
Jason Evans6109fe02010-02-10 10:37:56 -08001750
Jason Evanse7339702010-10-23 18:37:06 -07001751 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001752 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001753 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
Jason Evanse7339702010-10-23 18:37:06 -07001754 prof_dump_filename(filename, 'u', prof_dump_useq);
1755 prof_dump_useq++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001756 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001757 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001758 }
Jason Evans6109fe02010-02-10 10:37:56 -08001759}
1760
1761static void
Jason Evansae03bf62013-01-22 12:02:08 -08001762prof_bt_hash(const void *key, size_t r_hash[2])
Jason Evans6109fe02010-02-10 10:37:56 -08001763{
Jason Evans6109fe02010-02-10 10:37:56 -08001764 prof_bt_t *bt = (prof_bt_t *)key;
1765
Jason Evans7372b152012-02-10 20:22:09 -08001766 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001767
Jason Evansae03bf62013-01-22 12:02:08 -08001768 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001769}
1770
1771static bool
1772prof_bt_keycomp(const void *k1, const void *k2)
1773{
1774 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1775 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1776
Jason Evans7372b152012-02-10 20:22:09 -08001777 cassert(config_prof);
1778
Jason Evans6109fe02010-02-10 10:37:56 -08001779 if (bt1->len != bt2->len)
1780 return (false);
1781 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1782}
1783
Jason Evans602c8e02014-08-18 16:22:13 -07001784JEMALLOC_INLINE_C uint64_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001785prof_thr_uid_alloc(tsdn_t *tsdn)
Jason Evans6109fe02010-02-10 10:37:56 -08001786{
Jason Evans9d8f3d22014-09-11 18:06:30 -07001787 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001788
Jason Evansc1e00ef2016-05-10 22:21:10 -07001789 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001790 thr_uid = next_thr_uid;
1791 next_thr_uid++;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001792 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07001793
1794 return (thr_uid);
Jason Evans602c8e02014-08-18 16:22:13 -07001795}
1796
1797static prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001798prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
Jason Evansfc12c0b2014-10-03 23:25:30 -07001799 char *thread_name, bool active)
Jason Evans602c8e02014-08-18 16:22:13 -07001800{
1801 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001802
Jason Evans7372b152012-02-10 20:22:09 -08001803 cassert(config_prof);
1804
Jason Evans4d6a1342010-10-20 19:05:59 -07001805 /* Initialize an empty cache for this thread. */
Jason Evansb54d1602016-10-20 23:59:12 -07001806 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
Jason Evansc1e00ef2016-05-10 22:21:10 -07001807 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1808 arena_get(TSDN_NULL, 0, true), true);
Jason Evans602c8e02014-08-18 16:22:13 -07001809 if (tdata == NULL)
Jason Evans4d6a1342010-10-20 19:05:59 -07001810 return (NULL);
1811
Jason Evans602c8e02014-08-18 16:22:13 -07001812 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1813 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001814 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001815 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001816 tdata->attached = true;
1817 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001818 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001819
Jason Evansb54d1602016-10-20 23:59:12 -07001820 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
1821 prof_bt_keycomp)) {
1822 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata,
1823 NULL, true, true);
Jason Evans4d6a1342010-10-20 19:05:59 -07001824 return (NULL);
1825 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001826
Jason Evans602c8e02014-08-18 16:22:13 -07001827 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1828 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001829
Jason Evans602c8e02014-08-18 16:22:13 -07001830 tdata->enq = false;
1831 tdata->enq_idump = false;
1832 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001833
Jason Evans602c8e02014-08-18 16:22:13 -07001834 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001835 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001836
Jason Evansb54d1602016-10-20 23:59:12 -07001837 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001838 tdata_tree_insert(&tdatas, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001839 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001840
1841 return (tdata);
1842}
1843
1844prof_tdata_t *
Jason Evansb54d1602016-10-20 23:59:12 -07001845prof_tdata_init(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07001846{
1847
Jason Evansb54d1602016-10-20 23:59:12 -07001848 return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
1849 NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
Jason Evans602c8e02014-08-18 16:22:13 -07001850}
1851
Jason Evans602c8e02014-08-18 16:22:13 -07001852static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001853prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001854{
1855
Jason Evansf04a0be2014-10-04 15:03:49 -07001856 if (tdata->attached && !even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001857 return (false);
1858 if (ckh_count(&tdata->bt2tctx) != 0)
1859 return (false);
1860 return (true);
1861}
1862
Jason Evansb2c0d632016-04-13 23:36:15 -07001863static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001864prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
Jason Evansb2c0d632016-04-13 23:36:15 -07001865 bool even_if_attached)
1866{
1867
Jason Evansc1e00ef2016-05-10 22:21:10 -07001868 malloc_mutex_assert_owner(tsdn, tdata->lock);
Jason Evansb2c0d632016-04-13 23:36:15 -07001869
Jason Evansc1e00ef2016-05-10 22:21:10 -07001870 return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001871}
1872
Jason Evans602c8e02014-08-18 16:22:13 -07001873static void
Jason Evansb54d1602016-10-20 23:59:12 -07001874prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
Jason Evansf04a0be2014-10-04 15:03:49 -07001875 bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001876{
1877
Jason Evansb54d1602016-10-20 23:59:12 -07001878 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001879
Jason Evans602c8e02014-08-18 16:22:13 -07001880 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001881
Jason Evansc1e00ef2016-05-10 22:21:10 -07001882 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
Jason Evansb2c0d632016-04-13 23:36:15 -07001883
Jason Evansdb722722016-03-23 20:29:33 -07001884 if (tdata->thread_name != NULL) {
Jason Evansb54d1602016-10-20 23:59:12 -07001885 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
1886 tdata->thread_name), tdata->thread_name, NULL, true, true);
Jason Evansdb722722016-03-23 20:29:33 -07001887 }
Jason Evansb54d1602016-10-20 23:59:12 -07001888 ckh_delete(tsd, &tdata->bt2tctx);
1889 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata, NULL,
1890 true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001891}
1892
1893static void
Jason Evansb54d1602016-10-20 23:59:12 -07001894prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
Jason Evans20c31de2014-10-02 23:01:10 -07001895{
1896
Jason Evansb54d1602016-10-20 23:59:12 -07001897 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1898 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1899 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans20c31de2014-10-02 23:01:10 -07001900}
1901
1902static void
1903prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001904{
1905 bool destroy_tdata;
1906
Jason Evansc1e00ef2016-05-10 22:21:10 -07001907 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001908 if (tdata->attached) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001909 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
1910 true);
Jason Evansf04a0be2014-10-04 15:03:49 -07001911 /*
1912 * Only detach if !destroy_tdata, because detaching would allow
1913 * another thread to win the race to destroy tdata.
1914 */
1915 if (!destroy_tdata)
1916 tdata->attached = false;
Jason Evans029d44c2014-10-04 11:12:53 -07001917 tsd_prof_tdata_set(tsd, NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001918 } else
1919 destroy_tdata = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001920 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -07001921 if (destroy_tdata)
Jason Evansb54d1602016-10-20 23:59:12 -07001922 prof_tdata_destroy(tsd, tdata, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001923}
1924
Jason Evans20c31de2014-10-02 23:01:10 -07001925prof_tdata_t *
1926prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001927{
Jason Evans20c31de2014-10-02 23:01:10 -07001928 uint64_t thr_uid = tdata->thr_uid;
1929 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001930 char *thread_name = (tdata->thread_name != NULL) ?
Jason Evansc1e00ef2016-05-10 22:21:10 -07001931 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001932 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07001933
Jason Evans20c31de2014-10-02 23:01:10 -07001934 prof_tdata_detach(tsd, tdata);
Jason Evansb54d1602016-10-20 23:59:12 -07001935 return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1936 active));
Jason Evans602c8e02014-08-18 16:22:13 -07001937}
1938
Jason Evans20c31de2014-10-02 23:01:10 -07001939static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001940prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001941{
Jason Evans20c31de2014-10-02 23:01:10 -07001942 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001943
Jason Evansc1e00ef2016-05-10 22:21:10 -07001944 malloc_mutex_lock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001945 if (!tdata->expired) {
1946 tdata->expired = true;
1947 destroy_tdata = tdata->attached ? false :
Jason Evansc1e00ef2016-05-10 22:21:10 -07001948 prof_tdata_should_destroy(tsdn, tdata, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001949 } else
1950 destroy_tdata = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001951 malloc_mutex_unlock(tsdn, tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001952
1953 return (destroy_tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001954}
1955
1956static prof_tdata_t *
1957prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1958{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001959 tsdn_t *tsdn = (tsdn_t *)arg;
Jason Evans602c8e02014-08-18 16:22:13 -07001960
Jason Evansc1e00ef2016-05-10 22:21:10 -07001961 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001962}
1963
1964void
Jason Evansb54d1602016-10-20 23:59:12 -07001965prof_reset(tsd_t *tsd, size_t lg_sample)
Jason Evans602c8e02014-08-18 16:22:13 -07001966{
Jason Evans20c31de2014-10-02 23:01:10 -07001967 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001968
1969 assert(lg_sample < (sizeof(uint64_t) << 3));
1970
Jason Evansb54d1602016-10-20 23:59:12 -07001971 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1972 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07001973
1974 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07001975
1976 next = NULL;
1977 do {
1978 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
Jason Evansb54d1602016-10-20 23:59:12 -07001979 prof_tdata_reset_iter, (void *)tsd);
Jason Evans20c31de2014-10-02 23:01:10 -07001980 if (to_destroy != NULL) {
1981 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansb54d1602016-10-20 23:59:12 -07001982 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001983 } else
1984 next = NULL;
1985 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001986
Jason Evansb54d1602016-10-20 23:59:12 -07001987 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
1988 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07001989}
1990
Jason Evanscd9a1342012-03-21 18:33:03 -07001991void
Jason Evans5460aa62014-09-22 21:09:23 -07001992prof_tdata_cleanup(tsd_t *tsd)
Jason Evans4d6a1342010-10-20 19:05:59 -07001993{
Jason Evans5460aa62014-09-22 21:09:23 -07001994 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07001995
Jason Evans5460aa62014-09-22 21:09:23 -07001996 if (!config_prof)
1997 return;
Jason Evans7372b152012-02-10 20:22:09 -08001998
Jason Evans5460aa62014-09-22 21:09:23 -07001999 tdata = tsd_prof_tdata_get(tsd);
2000 if (tdata != NULL)
2001 prof_tdata_detach(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08002002}
2003
Jason Evansfc12c0b2014-10-03 23:25:30 -07002004bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002005prof_active_get(tsdn_t *tsdn)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002006{
2007 bool prof_active_current;
2008
Jason Evansc1e00ef2016-05-10 22:21:10 -07002009 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002010 prof_active_current = prof_active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002011 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002012 return (prof_active_current);
2013}
2014
2015bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002016prof_active_set(tsdn_t *tsdn, bool active)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002017{
2018 bool prof_active_old;
2019
Jason Evansc1e00ef2016-05-10 22:21:10 -07002020 malloc_mutex_lock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002021 prof_active_old = prof_active;
2022 prof_active = active;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002023 malloc_mutex_unlock(tsdn, &prof_active_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002024 return (prof_active_old);
2025}
2026
Jason Evans602c8e02014-08-18 16:22:13 -07002027const char *
Jason Evansb2c0d632016-04-13 23:36:15 -07002028prof_thread_name_get(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07002029{
Jason Evans5460aa62014-09-22 21:09:23 -07002030 prof_tdata_t *tdata;
2031
Jason Evans5460aa62014-09-22 21:09:23 -07002032 tdata = prof_tdata_get(tsd, true);
2033 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002034 return ("");
2035 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07002036}
2037
Jason Evansfc12c0b2014-10-03 23:25:30 -07002038static char *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002039prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002040{
2041 char *ret;
2042 size_t size;
2043
2044 if (thread_name == NULL)
2045 return (NULL);
2046
2047 size = strlen(thread_name) + 1;
2048 if (size == 1)
2049 return ("");
2050
Jason Evansc1e00ef2016-05-10 22:21:10 -07002051 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2052 arena_get(TSDN_NULL, 0, true), true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002053 if (ret == NULL)
2054 return (NULL);
2055 memcpy(ret, thread_name, size);
2056 return (ret);
2057}
2058
2059int
Jason Evans5460aa62014-09-22 21:09:23 -07002060prof_thread_name_set(tsd_t *tsd, const char *thread_name)
Jason Evans602c8e02014-08-18 16:22:13 -07002061{
2062 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07002063 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07002064 char *s;
2065
Jason Evans5460aa62014-09-22 21:09:23 -07002066 tdata = prof_tdata_get(tsd, true);
2067 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002068 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07002069
Jason Evansfc12c0b2014-10-03 23:25:30 -07002070 /* Validate input. */
2071 if (thread_name == NULL)
2072 return (EFAULT);
2073 for (i = 0; thread_name[i] != '\0'; i++) {
2074 char c = thread_name[i];
2075 if (!isgraph(c) && !isblank(c))
2076 return (EFAULT);
2077 }
2078
Jason Evansc1e00ef2016-05-10 22:21:10 -07002079 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -07002080 if (s == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002081 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07002082
Jason Evansfc12c0b2014-10-03 23:25:30 -07002083 if (tdata->thread_name != NULL) {
Jason Evans8c9be3e2016-04-16 00:36:11 -07002084 idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
2085 tdata->thread_name), tdata->thread_name, NULL, true, true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002086 tdata->thread_name = NULL;
2087 }
2088 if (strlen(s) > 0)
2089 tdata->thread_name = s;
2090 return (0);
Jason Evans602c8e02014-08-18 16:22:13 -07002091}
2092
2093bool
Jason Evansb2c0d632016-04-13 23:36:15 -07002094prof_thread_active_get(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07002095{
Jason Evans5460aa62014-09-22 21:09:23 -07002096 prof_tdata_t *tdata;
2097
Jason Evans5460aa62014-09-22 21:09:23 -07002098 tdata = prof_tdata_get(tsd, true);
2099 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002100 return (false);
2101 return (tdata->active);
2102}
2103
2104bool
Jason Evansb2c0d632016-04-13 23:36:15 -07002105prof_thread_active_set(tsd_t *tsd, bool active)
Jason Evans602c8e02014-08-18 16:22:13 -07002106{
2107 prof_tdata_t *tdata;
2108
Jason Evans5460aa62014-09-22 21:09:23 -07002109 tdata = prof_tdata_get(tsd, true);
2110 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002111 return (true);
2112 tdata->active = active;
2113 return (false);
2114}
2115
Jason Evansfc12c0b2014-10-03 23:25:30 -07002116bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002117prof_thread_active_init_get(tsdn_t *tsdn)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002118{
2119 bool active_init;
2120
Jason Evansc1e00ef2016-05-10 22:21:10 -07002121 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002122 active_init = prof_thread_active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002123 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002124 return (active_init);
2125}
2126
2127bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002128prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
Jason Evansfc12c0b2014-10-03 23:25:30 -07002129{
2130 bool active_init_old;
2131
Jason Evansc1e00ef2016-05-10 22:21:10 -07002132 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002133 active_init_old = prof_thread_active_init;
2134 prof_thread_active_init = active_init;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002135 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002136 return (active_init_old);
2137}
2138
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002139bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002140prof_gdump_get(tsdn_t *tsdn)
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002141{
2142 bool prof_gdump_current;
2143
Jason Evansc1e00ef2016-05-10 22:21:10 -07002144 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002145 prof_gdump_current = prof_gdump_val;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002146 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002147 return (prof_gdump_current);
2148}
2149
2150bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07002151prof_gdump_set(tsdn_t *tsdn, bool gdump)
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002152{
2153 bool prof_gdump_old;
2154
Jason Evansc1e00ef2016-05-10 22:21:10 -07002155 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002156 prof_gdump_old = prof_gdump_val;
2157 prof_gdump_val = gdump;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002158 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002159 return (prof_gdump_old);
2160}
2161
Jason Evans6109fe02010-02-10 10:37:56 -08002162void
2163prof_boot0(void)
2164{
2165
Jason Evans7372b152012-02-10 20:22:09 -08002166 cassert(config_prof);
2167
Jason Evanse7339702010-10-23 18:37:06 -07002168 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2169 sizeof(PROF_PREFIX_DEFAULT));
2170}
2171
2172void
2173prof_boot1(void)
2174{
2175
Jason Evans7372b152012-02-10 20:22:09 -08002176 cassert(config_prof);
2177
Jason Evans6109fe02010-02-10 10:37:56 -08002178 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002179 * opt_prof must be in its final state before any arenas are
2180 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002181 */
2182
Jason Evans551ebc42014-10-03 10:16:09 -07002183 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002184 /*
2185 * Enable opt_prof, but in such a way that profiles are never
2186 * automatically dumped.
2187 */
2188 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002189 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002190 } else if (opt_prof) {
2191 if (opt_lg_prof_interval >= 0) {
2192 prof_interval = (((uint64_t)1U) <<
2193 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002194 }
Jason Evansa02fc082010-03-31 17:35:51 -07002195 }
Jason Evans6109fe02010-02-10 10:37:56 -08002196}
2197
2198bool
Jason Evansb54d1602016-10-20 23:59:12 -07002199prof_boot2(tsd_t *tsd)
Jason Evans6109fe02010-02-10 10:37:56 -08002200{
2201
Jason Evans7372b152012-02-10 20:22:09 -08002202 cassert(config_prof);
2203
Jason Evans6109fe02010-02-10 10:37:56 -08002204 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07002205 unsigned i;
2206
Jason Evans602c8e02014-08-18 16:22:13 -07002207 lg_prof_sample = opt_lg_prof_sample;
2208
Jason Evansfc12c0b2014-10-03 23:25:30 -07002209 prof_active = opt_prof_active;
Jason Evansb2c0d632016-04-13 23:36:15 -07002210 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
2211 WITNESS_RANK_PROF_ACTIVE))
Jason Evansfc12c0b2014-10-03 23:25:30 -07002212 return (true);
2213
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002214 prof_gdump_val = opt_prof_gdump;
Jason Evansb2c0d632016-04-13 23:36:15 -07002215 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
2216 WITNESS_RANK_PROF_GDUMP))
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002217 return (true);
2218
Jason Evansfc12c0b2014-10-03 23:25:30 -07002219 prof_thread_active_init = opt_prof_thread_active_init;
Jason Evansb2c0d632016-04-13 23:36:15 -07002220 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2221 "prof_thread_active_init",
2222 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
Jason Evansfc12c0b2014-10-03 23:25:30 -07002223 return (true);
2224
Jason Evansb54d1602016-10-20 23:59:12 -07002225 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evans6109fe02010-02-10 10:37:56 -08002226 prof_bt_keycomp))
2227 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07002228 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
2229 WITNESS_RANK_PROF_BT2GCTX))
Jason Evans6109fe02010-02-10 10:37:56 -08002230 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002231
Jason Evans602c8e02014-08-18 16:22:13 -07002232 tdata_tree_new(&tdatas);
Jason Evansb2c0d632016-04-13 23:36:15 -07002233 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
2234 WITNESS_RANK_PROF_TDATAS))
Jason Evans602c8e02014-08-18 16:22:13 -07002235 return (true);
2236
2237 next_thr_uid = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002238 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
2239 WITNESS_RANK_PROF_NEXT_THR_UID))
Jason Evans9d8f3d22014-09-11 18:06:30 -07002240 return (true);
Jason Evans602c8e02014-08-18 16:22:13 -07002241
Jason Evansb2c0d632016-04-13 23:36:15 -07002242 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
2243 WITNESS_RANK_PROF_DUMP_SEQ))
Jason Evans6109fe02010-02-10 10:37:56 -08002244 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07002245 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
2246 WITNESS_RANK_PROF_DUMP))
Jason Evans4f37ef62014-01-16 13:23:56 -08002247 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002248
Jason Evans57efa7b2014-10-08 17:57:19 -07002249 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2250 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002251 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08002252 if (opt_abort)
2253 abort();
2254 }
Jason Evans6da54182012-03-23 18:05:51 -07002255
Jason Evansb54d1602016-10-20 23:59:12 -07002256 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
2257 PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
Jason Evans602c8e02014-08-18 16:22:13 -07002258 if (gctx_locks == NULL)
Jason Evans6da54182012-03-23 18:05:51 -07002259 return (true);
2260 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002261 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
2262 WITNESS_RANK_PROF_GCTX))
Jason Evans602c8e02014-08-18 16:22:13 -07002263 return (true);
2264 }
2265
Jason Evansb54d1602016-10-20 23:59:12 -07002266 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
Jason Evansb2c0d632016-04-13 23:36:15 -07002267 PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
Jason Evans602c8e02014-08-18 16:22:13 -07002268 if (tdata_locks == NULL)
2269 return (true);
2270 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002271 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
2272 WITNESS_RANK_PROF_TDATA))
Jason Evans6da54182012-03-23 18:05:51 -07002273 return (true);
2274 }
Jason Evans6109fe02010-02-10 10:37:56 -08002275 }
2276
Jason Evansb27805b2010-02-10 18:15:53 -08002277#ifdef JEMALLOC_PROF_LIBGCC
2278 /*
2279 * Cause the backtracing machinery to allocate its internal state
2280 * before enabling profiling.
2281 */
2282 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2283#endif
2284
Jason Evans6109fe02010-02-10 10:37:56 -08002285 prof_booted = true;
2286
2287 return (false);
2288}
2289
Jason Evans20f1fc92012-10-09 14:46:22 -07002290void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002291prof_prefork0(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002292{
2293
2294 if (opt_prof) {
2295 unsigned i;
2296
Jason Evansc1e00ef2016-05-10 22:21:10 -07002297 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2298 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2299 malloc_mutex_prefork(tsdn, &tdatas_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002300 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002301 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002302 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002303 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002304 }
2305}
2306
2307void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002308prof_prefork1(tsdn_t *tsdn)
Jason Evans174c0c32016-04-25 23:14:40 -07002309{
2310
2311 if (opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002312 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2313 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2314 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2315 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2316 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002317 }
2318}
2319
2320void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002321prof_postfork_parent(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002322{
2323
2324 if (opt_prof) {
2325 unsigned i;
2326
Jason Evansc1e00ef2016-05-10 22:21:10 -07002327 malloc_mutex_postfork_parent(tsdn,
2328 &prof_thread_active_init_mtx);
2329 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2330 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2331 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2332 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002333 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002334 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002335 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002336 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
2337 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2338 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2339 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002340 }
2341}
2342
2343void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002344prof_postfork_child(tsdn_t *tsdn)
Jason Evans20f1fc92012-10-09 14:46:22 -07002345{
2346
2347 if (opt_prof) {
2348 unsigned i;
2349
Jason Evansc1e00ef2016-05-10 22:21:10 -07002350 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2351 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2352 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2353 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2354 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002355 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002356 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
Jason Evans174c0c32016-04-25 23:14:40 -07002357 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002358 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
2359 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2360 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2361 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002362 }
2363}
2364
Jason Evans6109fe02010-02-10 10:37:56 -08002365/******************************************************************************/