blob: a4012f04e9ac3f6e2c5eed0e824d64664c72d211 [file] [log] [blame]
Jason Evans6109fe02010-02-10 10:37:56 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
17bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070018bool opt_prof_active = true;
Jason Evansb9477e72010-03-01 20:15:26 -080019size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
20size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070021ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070022bool opt_prof_gdump = false;
Jason Evans6109fe02010-02-10 10:37:56 -080023bool opt_prof_leak = false;
Jason Evansa881cd22010-10-02 15:18:50 -070024bool opt_prof_accum = true;
Jason Evanse7339702010-10-23 18:37:06 -070025char opt_prof_prefix[PATH_MAX + 1];
Jason Evans6109fe02010-02-10 10:37:56 -080026
Jason Evansd34f9e72010-02-11 13:19:21 -080027uint64_t prof_interval;
Jason Evans0b270a92010-03-31 16:45:04 -070028bool prof_promote;
Jason Evansd34f9e72010-02-11 13:19:21 -080029
Jason Evans4d6a1342010-10-20 19:05:59 -070030unsigned prof_bt_max;
31
32#ifndef NO_TLS
33__thread prof_tdata_t *prof_tdata_tls
34 JEMALLOC_ATTR(tls_model("initial-exec"));
35#endif
36pthread_key_t prof_tdata_tsd;
37
Jason Evans6109fe02010-02-10 10:37:56 -080038/*
39 * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070040 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080041 */
42static ckh_t bt2ctx;
43static malloc_mutex_t bt2ctx_mtx;
44
Jason Evans6109fe02010-02-10 10:37:56 -080045static malloc_mutex_t prof_dump_seq_mtx;
46static uint64_t prof_dump_seq;
47static uint64_t prof_dump_iseq;
48static uint64_t prof_dump_mseq;
49static uint64_t prof_dump_useq;
50
51/*
52 * This buffer is rather large for stack allocation, so use a single buffer for
53 * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
54 * it must be locked anyway during dumping.
55 */
56static char prof_dump_buf[PROF_DUMP_BUF_SIZE];
57static unsigned prof_dump_buf_end;
58static int prof_dump_fd;
59
60/* Do not dump any profiles until bootstrapping is complete. */
61static bool prof_booted = false;
62
63static malloc_mutex_t enq_mtx;
64static bool enq;
Jason Evansd34f9e72010-02-11 13:19:21 -080065static bool enq_idump;
Jason Evanse7339702010-10-23 18:37:06 -070066static bool enq_gdump;
Jason Evans6109fe02010-02-10 10:37:56 -080067
68/******************************************************************************/
69/* Function prototypes for non-inline static functions. */
70
71static prof_bt_t *bt_dup(prof_bt_t *bt);
Jason Evansa881cd22010-10-02 15:18:50 -070072static void bt_destroy(prof_bt_t *bt);
Jason Evansb27805b2010-02-10 18:15:53 -080073#ifdef JEMALLOC_PROF_LIBGCC
74static _Unwind_Reason_Code prof_unwind_init_callback(
75 struct _Unwind_Context *context, void *arg);
76static _Unwind_Reason_Code prof_unwind_callback(
77 struct _Unwind_Context *context, void *arg);
78#endif
Jason Evans22ca8552010-03-02 11:57:30 -080079static bool prof_flush(bool propagate_err);
80static bool prof_write(const char *s, bool propagate_err);
Jason Evansa881cd22010-10-02 15:18:50 -070081static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
Jason Evans6109fe02010-02-10 10:37:56 -080082 size_t *leak_nctx);
Jason Evansa881cd22010-10-02 15:18:50 -070083static void prof_ctx_destroy(prof_ctx_t *ctx);
84static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
Jason Evans22ca8552010-03-02 11:57:30 -080085static bool prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt,
86 bool propagate_err);
87static bool prof_dump_maps(bool propagate_err);
88static bool prof_dump(const char *filename, bool leakcheck,
89 bool propagate_err);
Jason Evans6109fe02010-02-10 10:37:56 -080090static void prof_dump_filename(char *filename, char v, int64_t vseq);
91static void prof_fdump(void);
92static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
93 size_t *hash2);
94static bool prof_bt_keycomp(const void *k1, const void *k2);
Jason Evans4d6a1342010-10-20 19:05:59 -070095static void prof_tdata_cleanup(void *arg);
Jason Evans6109fe02010-02-10 10:37:56 -080096
97/******************************************************************************/
98
Jason Evans4d6a1342010-10-20 19:05:59 -070099void
Jason Evans6109fe02010-02-10 10:37:56 -0800100bt_init(prof_bt_t *bt, void **vec)
101{
102
Jason Evans7372b152012-02-10 20:22:09 -0800103 cassert(config_prof);
104
Jason Evans6109fe02010-02-10 10:37:56 -0800105 bt->vec = vec;
106 bt->len = 0;
107}
108
Jason Evansa881cd22010-10-02 15:18:50 -0700109static void
110bt_destroy(prof_bt_t *bt)
111{
112
Jason Evans7372b152012-02-10 20:22:09 -0800113 cassert(config_prof);
114
Jason Evansa881cd22010-10-02 15:18:50 -0700115 idalloc(bt);
116}
117
Jason Evans6109fe02010-02-10 10:37:56 -0800118static prof_bt_t *
119bt_dup(prof_bt_t *bt)
120{
121 prof_bt_t *ret;
122
Jason Evans7372b152012-02-10 20:22:09 -0800123 cassert(config_prof);
124
Jason Evans6109fe02010-02-10 10:37:56 -0800125 /*
126 * Create a single allocation that has space for vec immediately
127 * following the prof_bt_t structure. The backtraces that get
128 * stored in the backtrace caches are copied from stack-allocated
129 * temporary variables, so size is known at creation time. Making this
130 * a contiguous object improves cache locality.
131 */
132 ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
133 (bt->len * sizeof(void *)));
134 if (ret == NULL)
135 return (NULL);
136 ret->vec = (void **)((uintptr_t)ret +
137 QUANTUM_CEILING(sizeof(prof_bt_t)));
138 memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
139 ret->len = bt->len;
140
141 return (ret);
142}
143
144static inline void
145prof_enter(void)
146{
147
Jason Evans7372b152012-02-10 20:22:09 -0800148 cassert(config_prof);
149
Jason Evans6109fe02010-02-10 10:37:56 -0800150 malloc_mutex_lock(&enq_mtx);
151 enq = true;
152 malloc_mutex_unlock(&enq_mtx);
153
154 malloc_mutex_lock(&bt2ctx_mtx);
155}
156
157static inline void
158prof_leave(void)
159{
Jason Evanse7339702010-10-23 18:37:06 -0700160 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800161
Jason Evans7372b152012-02-10 20:22:09 -0800162 cassert(config_prof);
163
Jason Evans6109fe02010-02-10 10:37:56 -0800164 malloc_mutex_unlock(&bt2ctx_mtx);
165
166 malloc_mutex_lock(&enq_mtx);
167 enq = false;
Jason Evansd34f9e72010-02-11 13:19:21 -0800168 idump = enq_idump;
169 enq_idump = false;
Jason Evanse7339702010-10-23 18:37:06 -0700170 gdump = enq_gdump;
171 enq_gdump = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800172 malloc_mutex_unlock(&enq_mtx);
173
Jason Evansd34f9e72010-02-11 13:19:21 -0800174 if (idump)
175 prof_idump();
Jason Evanse7339702010-10-23 18:37:06 -0700176 if (gdump)
177 prof_gdump();
Jason Evans6109fe02010-02-10 10:37:56 -0800178}
179
Jason Evans77f350b2011-03-15 22:23:12 -0700180#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700181void
Jason Evans6109fe02010-02-10 10:37:56 -0800182prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
183{
184 unw_context_t uc;
185 unw_cursor_t cursor;
186 unsigned i;
187 int err;
188
Jason Evans7372b152012-02-10 20:22:09 -0800189 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800190 assert(bt->len == 0);
191 assert(bt->vec != NULL);
192 assert(max <= (1U << opt_lg_prof_bt_max));
193
194 unw_getcontext(&uc);
195 unw_init_local(&cursor, &uc);
196
Jason Evans9f949f92011-03-22 20:44:40 -0700197 /* Throw away (nignore+1) stack frames, if that many exist. */
198 for (i = 0; i < nignore + 1; i++) {
199 err = unw_step(&cursor);
200 if (err <= 0)
201 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800202 }
203
Jason Evans9f949f92011-03-22 20:44:40 -0700204 /*
205 * Iterate over stack frames until there are no more, or until no space
206 * remains in bt.
207 */
208 for (i = 0; i < max; i++) {
209 unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
210 bt->len++;
211 err = unw_step(&cursor);
212 if (err <= 0)
213 break;
Jason Evans6109fe02010-02-10 10:37:56 -0800214 }
Jason Evans6109fe02010-02-10 10:37:56 -0800215}
Jason Evans7372b152012-02-10 20:22:09 -0800216#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700217static _Unwind_Reason_Code
218prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
219{
220
Jason Evans7372b152012-02-10 20:22:09 -0800221 cassert(config_prof);
222
Jason Evans77f350b2011-03-15 22:23:12 -0700223 return (_URC_NO_REASON);
224}
225
226static _Unwind_Reason_Code
227prof_unwind_callback(struct _Unwind_Context *context, void *arg)
228{
229 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
230
Jason Evans7372b152012-02-10 20:22:09 -0800231 cassert(config_prof);
232
Jason Evans77f350b2011-03-15 22:23:12 -0700233 if (data->nignore > 0)
234 data->nignore--;
235 else {
236 data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
237 data->bt->len++;
238 if (data->bt->len == data->max)
239 return (_URC_END_OF_STACK);
240 }
241
242 return (_URC_NO_REASON);
243}
244
245void
246prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
247{
248 prof_unwind_data_t data = {bt, nignore, max};
249
Jason Evans7372b152012-02-10 20:22:09 -0800250 cassert(config_prof);
251
Jason Evans77f350b2011-03-15 22:23:12 -0700252 _Unwind_Backtrace(prof_unwind_callback, &data);
253}
Jason Evans7372b152012-02-10 20:22:09 -0800254#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700255void
Jason Evans6109fe02010-02-10 10:37:56 -0800256prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
257{
Jason Evans6109fe02010-02-10 10:37:56 -0800258#define BT_FRAME(i) \
Jason Evanse4f78462010-10-22 10:45:59 -0700259 if ((i) < nignore + max) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800260 void *p; \
261 if (__builtin_frame_address(i) == 0) \
Jason Evansb27805b2010-02-10 18:15:53 -0800262 return; \
Jason Evans6109fe02010-02-10 10:37:56 -0800263 p = __builtin_return_address(i); \
264 if (p == NULL) \
Jason Evansb27805b2010-02-10 18:15:53 -0800265 return; \
Jason Evanse4f78462010-10-22 10:45:59 -0700266 if (i >= nignore) { \
267 bt->vec[(i) - nignore] = p; \
268 bt->len = (i) - nignore + 1; \
Jason Evans6109fe02010-02-10 10:37:56 -0800269 } \
270 } else \
Jason Evansb27805b2010-02-10 18:15:53 -0800271 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800272
Jason Evans7372b152012-02-10 20:22:09 -0800273 cassert(config_prof);
Jason Evansb04a9402010-10-27 19:47:40 -0700274 assert(nignore <= 3);
Jason Evans6109fe02010-02-10 10:37:56 -0800275 assert(max <= (1U << opt_lg_prof_bt_max));
276
Jason Evans6109fe02010-02-10 10:37:56 -0800277 BT_FRAME(0)
278 BT_FRAME(1)
279 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800280 BT_FRAME(3)
281 BT_FRAME(4)
282 BT_FRAME(5)
283 BT_FRAME(6)
284 BT_FRAME(7)
285 BT_FRAME(8)
286 BT_FRAME(9)
287
288 BT_FRAME(10)
289 BT_FRAME(11)
290 BT_FRAME(12)
291 BT_FRAME(13)
292 BT_FRAME(14)
293 BT_FRAME(15)
294 BT_FRAME(16)
295 BT_FRAME(17)
296 BT_FRAME(18)
297 BT_FRAME(19)
298
299 BT_FRAME(20)
300 BT_FRAME(21)
301 BT_FRAME(22)
302 BT_FRAME(23)
303 BT_FRAME(24)
304 BT_FRAME(25)
305 BT_FRAME(26)
306 BT_FRAME(27)
307 BT_FRAME(28)
308 BT_FRAME(29)
309
310 BT_FRAME(30)
311 BT_FRAME(31)
312 BT_FRAME(32)
313 BT_FRAME(33)
314 BT_FRAME(34)
315 BT_FRAME(35)
316 BT_FRAME(36)
317 BT_FRAME(37)
318 BT_FRAME(38)
319 BT_FRAME(39)
320
321 BT_FRAME(40)
322 BT_FRAME(41)
323 BT_FRAME(42)
324 BT_FRAME(43)
325 BT_FRAME(44)
326 BT_FRAME(45)
327 BT_FRAME(46)
328 BT_FRAME(47)
329 BT_FRAME(48)
330 BT_FRAME(49)
331
332 BT_FRAME(50)
333 BT_FRAME(51)
334 BT_FRAME(52)
335 BT_FRAME(53)
336 BT_FRAME(54)
337 BT_FRAME(55)
338 BT_FRAME(56)
339 BT_FRAME(57)
340 BT_FRAME(58)
341 BT_FRAME(59)
342
343 BT_FRAME(60)
344 BT_FRAME(61)
345 BT_FRAME(62)
346 BT_FRAME(63)
347 BT_FRAME(64)
348 BT_FRAME(65)
349 BT_FRAME(66)
350 BT_FRAME(67)
351 BT_FRAME(68)
352 BT_FRAME(69)
353
354 BT_FRAME(70)
355 BT_FRAME(71)
356 BT_FRAME(72)
357 BT_FRAME(73)
358 BT_FRAME(74)
359 BT_FRAME(75)
360 BT_FRAME(76)
361 BT_FRAME(77)
362 BT_FRAME(78)
363 BT_FRAME(79)
364
365 BT_FRAME(80)
366 BT_FRAME(81)
367 BT_FRAME(82)
368 BT_FRAME(83)
369 BT_FRAME(84)
370 BT_FRAME(85)
371 BT_FRAME(86)
372 BT_FRAME(87)
373 BT_FRAME(88)
374 BT_FRAME(89)
375
376 BT_FRAME(90)
377 BT_FRAME(91)
378 BT_FRAME(92)
379 BT_FRAME(93)
380 BT_FRAME(94)
381 BT_FRAME(95)
382 BT_FRAME(96)
383 BT_FRAME(97)
384 BT_FRAME(98)
385 BT_FRAME(99)
386
387 BT_FRAME(100)
388 BT_FRAME(101)
389 BT_FRAME(102)
390 BT_FRAME(103)
391 BT_FRAME(104)
392 BT_FRAME(105)
393 BT_FRAME(106)
394 BT_FRAME(107)
395 BT_FRAME(108)
396 BT_FRAME(109)
397
398 BT_FRAME(110)
399 BT_FRAME(111)
400 BT_FRAME(112)
401 BT_FRAME(113)
402 BT_FRAME(114)
403 BT_FRAME(115)
404 BT_FRAME(116)
405 BT_FRAME(117)
406 BT_FRAME(118)
407 BT_FRAME(119)
408
409 BT_FRAME(120)
410 BT_FRAME(121)
411 BT_FRAME(122)
412 BT_FRAME(123)
413 BT_FRAME(124)
414 BT_FRAME(125)
415 BT_FRAME(126)
416 BT_FRAME(127)
417
Jason Evansb04a9402010-10-27 19:47:40 -0700418 /* Extras to compensate for nignore. */
Jason Evans6109fe02010-02-10 10:37:56 -0800419 BT_FRAME(128)
420 BT_FRAME(129)
421 BT_FRAME(130)
Jason Evans6109fe02010-02-10 10:37:56 -0800422#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800423}
Jason Evans7372b152012-02-10 20:22:09 -0800424#else
425void
426prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
427{
428
429 cassert(config_prof);
430 assert(false);
431}
Jason Evans6109fe02010-02-10 10:37:56 -0800432#endif
433
Jason Evans4d6a1342010-10-20 19:05:59 -0700434prof_thr_cnt_t *
Jason Evans6109fe02010-02-10 10:37:56 -0800435prof_lookup(prof_bt_t *bt)
436{
Jason Evans075e77c2010-09-20 19:53:25 -0700437 union {
438 prof_thr_cnt_t *p;
439 void *v;
440 } ret;
Jason Evans4d6a1342010-10-20 19:05:59 -0700441 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -0800442
Jason Evans7372b152012-02-10 20:22:09 -0800443 cassert(config_prof);
444
Jason Evans4d6a1342010-10-20 19:05:59 -0700445 prof_tdata = PROF_TCACHE_GET();
446 if (prof_tdata == NULL) {
447 prof_tdata = prof_tdata_init();
448 if (prof_tdata == NULL)
Jason Evans6109fe02010-02-10 10:37:56 -0800449 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800450 }
451
Jason Evans4d6a1342010-10-20 19:05:59 -0700452 if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
Jason Evans075e77c2010-09-20 19:53:25 -0700453 union {
454 prof_bt_t *p;
455 void *v;
456 } btkey;
457 union {
458 prof_ctx_t *p;
459 void *v;
460 } ctx;
Jason Evans10e45232011-01-14 17:27:44 -0800461 bool new_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800462
463 /*
464 * This thread's cache lacks bt. Look for it in the global
465 * cache.
466 */
467 prof_enter();
Jason Evans075e77c2010-09-20 19:53:25 -0700468 if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
Jason Evans6109fe02010-02-10 10:37:56 -0800469 /* bt has never been seen before. Insert it. */
Jason Evans075e77c2010-09-20 19:53:25 -0700470 ctx.v = imalloc(sizeof(prof_ctx_t));
471 if (ctx.v == NULL) {
Jason Evans6109fe02010-02-10 10:37:56 -0800472 prof_leave();
473 return (NULL);
474 }
Jason Evans075e77c2010-09-20 19:53:25 -0700475 btkey.p = bt_dup(bt);
476 if (btkey.v == NULL) {
Jason Evans6109fe02010-02-10 10:37:56 -0800477 prof_leave();
Jason Evans075e77c2010-09-20 19:53:25 -0700478 idalloc(ctx.v);
Jason Evans6109fe02010-02-10 10:37:56 -0800479 return (NULL);
480 }
Jason Evans075e77c2010-09-20 19:53:25 -0700481 ctx.p->bt = btkey.p;
482 if (malloc_mutex_init(&ctx.p->lock)) {
Jason Evans6109fe02010-02-10 10:37:56 -0800483 prof_leave();
Jason Evans075e77c2010-09-20 19:53:25 -0700484 idalloc(btkey.v);
485 idalloc(ctx.v);
Jason Evans6109fe02010-02-10 10:37:56 -0800486 return (NULL);
487 }
Jason Evans075e77c2010-09-20 19:53:25 -0700488 memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
489 ql_new(&ctx.p->cnts_ql);
490 if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
Jason Evans6109fe02010-02-10 10:37:56 -0800491 /* OOM. */
492 prof_leave();
Jason Evansa881cd22010-10-02 15:18:50 -0700493 malloc_mutex_destroy(&ctx.p->lock);
Jason Evans075e77c2010-09-20 19:53:25 -0700494 idalloc(btkey.v);
495 idalloc(ctx.v);
Jason Evans6109fe02010-02-10 10:37:56 -0800496 return (NULL);
497 }
Jason Evans10e45232011-01-14 17:27:44 -0800498 /*
499 * Artificially raise curobjs, in order to avoid a race
500 * condition with prof_ctx_merge()/prof_ctx_destroy().
Jason Evansa9076c92011-08-30 23:40:11 -0700501 *
502 * No locking is necessary for ctx here because no other
503 * threads have had the opportunity to fetch it from
504 * bt2ctx yet.
Jason Evans10e45232011-01-14 17:27:44 -0800505 */
506 ctx.p->cnt_merged.curobjs++;
507 new_ctx = true;
Jason Evansa9076c92011-08-30 23:40:11 -0700508 } else {
509 /*
510 * Artificially raise curobjs, in order to avoid a race
511 * condition with prof_ctx_merge()/prof_ctx_destroy().
512 */
513 malloc_mutex_lock(&ctx.p->lock);
514 ctx.p->cnt_merged.curobjs++;
515 malloc_mutex_unlock(&ctx.p->lock);
Jason Evans10e45232011-01-14 17:27:44 -0800516 new_ctx = false;
Jason Evansa9076c92011-08-30 23:40:11 -0700517 }
Jason Evans6109fe02010-02-10 10:37:56 -0800518 prof_leave();
519
520 /* Link a prof_thd_cnt_t into ctx for this thread. */
Jason Evans0b526ff2012-02-13 18:04:26 -0800521 if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
Jason Evans4d6a1342010-10-20 19:05:59 -0700522 assert(ckh_count(&prof_tdata->bt2cnt) > 0);
Jason Evansa881cd22010-10-02 15:18:50 -0700523 /*
Jason Evanse4f78462010-10-22 10:45:59 -0700524 * Flush the least recently used cnt in order to keep
525 * bt2cnt from becoming too large.
Jason Evansa881cd22010-10-02 15:18:50 -0700526 */
Jason Evans4d6a1342010-10-20 19:05:59 -0700527 ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
Jason Evansa881cd22010-10-02 15:18:50 -0700528 assert(ret.v != NULL);
Jason Evansa9076c92011-08-30 23:40:11 -0700529 if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
530 NULL, NULL))
531 assert(false);
Jason Evans4d6a1342010-10-20 19:05:59 -0700532 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evansa881cd22010-10-02 15:18:50 -0700533 prof_ctx_merge(ret.p->ctx, ret.p);
534 /* ret can now be re-used. */
535 } else {
Jason Evans0b526ff2012-02-13 18:04:26 -0800536 assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
Jason Evansa881cd22010-10-02 15:18:50 -0700537 /* Allocate and partially initialize a new cnt. */
538 ret.v = imalloc(sizeof(prof_thr_cnt_t));
Jason Evansb04a9402010-10-27 19:47:40 -0700539 if (ret.p == NULL) {
Jason Evans0cdd42e2011-08-09 19:06:06 -0700540 if (new_ctx)
541 prof_ctx_destroy(ctx.p);
Jason Evansa881cd22010-10-02 15:18:50 -0700542 return (NULL);
Jason Evansb04a9402010-10-27 19:47:40 -0700543 }
Jason Evansa881cd22010-10-02 15:18:50 -0700544 ql_elm_new(ret.p, cnts_link);
545 ql_elm_new(ret.p, lru_link);
546 }
547 /* Finish initializing ret. */
Jason Evans075e77c2010-09-20 19:53:25 -0700548 ret.p->ctx = ctx.p;
549 ret.p->epoch = 0;
550 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans4d6a1342010-10-20 19:05:59 -0700551 if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
Jason Evans0cdd42e2011-08-09 19:06:06 -0700552 if (new_ctx)
553 prof_ctx_destroy(ctx.p);
Jason Evans075e77c2010-09-20 19:53:25 -0700554 idalloc(ret.v);
Jason Evans6109fe02010-02-10 10:37:56 -0800555 return (NULL);
556 }
Jason Evans4d6a1342010-10-20 19:05:59 -0700557 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evans10e45232011-01-14 17:27:44 -0800558 malloc_mutex_lock(&ctx.p->lock);
Jason Evansa881cd22010-10-02 15:18:50 -0700559 ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
Jason Evansa9076c92011-08-30 23:40:11 -0700560 ctx.p->cnt_merged.curobjs--;
Jason Evans075e77c2010-09-20 19:53:25 -0700561 malloc_mutex_unlock(&ctx.p->lock);
Jason Evansa881cd22010-10-02 15:18:50 -0700562 } else {
563 /* Move ret to the front of the LRU. */
Jason Evans4d6a1342010-10-20 19:05:59 -0700564 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
565 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evans6109fe02010-02-10 10:37:56 -0800566 }
567
Jason Evans075e77c2010-09-20 19:53:25 -0700568 return (ret.p);
Jason Evans6109fe02010-02-10 10:37:56 -0800569}
570
Jason Evans22ca8552010-03-02 11:57:30 -0800571static bool
572prof_flush(bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800573{
Jason Evans22ca8552010-03-02 11:57:30 -0800574 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800575 ssize_t err;
576
Jason Evans7372b152012-02-10 20:22:09 -0800577 cassert(config_prof);
578
Jason Evans6109fe02010-02-10 10:37:56 -0800579 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
580 if (err == -1) {
Jason Evans22ca8552010-03-02 11:57:30 -0800581 if (propagate_err == false) {
Jason Evans698805c2010-03-03 17:45:38 -0800582 malloc_write("<jemalloc>: write() failed during heap "
583 "profile flush\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800584 if (opt_abort)
585 abort();
586 }
587 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800588 }
589 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800590
591 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800592}
593
Jason Evans22ca8552010-03-02 11:57:30 -0800594static bool
595prof_write(const char *s, bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800596{
597 unsigned i, slen, n;
598
Jason Evans7372b152012-02-10 20:22:09 -0800599 cassert(config_prof);
600
Jason Evans6109fe02010-02-10 10:37:56 -0800601 i = 0;
602 slen = strlen(s);
603 while (i < slen) {
604 /* Flush the buffer if it is full. */
605 if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE)
Jason Evans22ca8552010-03-02 11:57:30 -0800606 if (prof_flush(propagate_err) && propagate_err)
607 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -0800608
609 if (prof_dump_buf_end + slen <= PROF_DUMP_BUF_SIZE) {
610 /* Finish writing. */
611 n = slen - i;
612 } else {
613 /* Write as much of s as will fit. */
614 n = PROF_DUMP_BUF_SIZE - prof_dump_buf_end;
615 }
616 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
617 prof_dump_buf_end += n;
618 i += n;
619 }
Jason Evans22ca8552010-03-02 11:57:30 -0800620
621 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -0800622}
623
624static void
Jason Evansa881cd22010-10-02 15:18:50 -0700625prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800626{
627 prof_thr_cnt_t *thr_cnt;
628 prof_cnt_t tcnt;
629
Jason Evans7372b152012-02-10 20:22:09 -0800630 cassert(config_prof);
631
Jason Evans6109fe02010-02-10 10:37:56 -0800632 malloc_mutex_lock(&ctx->lock);
633
Jason Evansa881cd22010-10-02 15:18:50 -0700634 memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
635 ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
Jason Evans6109fe02010-02-10 10:37:56 -0800636 volatile unsigned *epoch = &thr_cnt->epoch;
637
638 while (true) {
639 unsigned epoch0 = *epoch;
640
641 /* Make sure epoch is even. */
642 if (epoch0 & 1U)
643 continue;
644
645 memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
646
647 /* Terminate if epoch didn't change while reading. */
648 if (*epoch == epoch0)
649 break;
650 }
651
Jason Evansa881cd22010-10-02 15:18:50 -0700652 ctx->cnt_summed.curobjs += tcnt.curobjs;
653 ctx->cnt_summed.curbytes += tcnt.curbytes;
654 if (opt_prof_accum) {
655 ctx->cnt_summed.accumobjs += tcnt.accumobjs;
656 ctx->cnt_summed.accumbytes += tcnt.accumbytes;
657 }
Jason Evans6109fe02010-02-10 10:37:56 -0800658 }
659
Jason Evans9ce3bfd2010-10-02 22:39:59 -0700660 if (ctx->cnt_summed.curobjs != 0)
661 (*leak_nctx)++;
662
Jason Evansa881cd22010-10-02 15:18:50 -0700663 /* Add to cnt_all. */
664 cnt_all->curobjs += ctx->cnt_summed.curobjs;
665 cnt_all->curbytes += ctx->cnt_summed.curbytes;
666 if (opt_prof_accum) {
667 cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
668 cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
669 }
Jason Evans6109fe02010-02-10 10:37:56 -0800670
671 malloc_mutex_unlock(&ctx->lock);
672}
673
Jason Evansa881cd22010-10-02 15:18:50 -0700674static void
675prof_ctx_destroy(prof_ctx_t *ctx)
676{
677
Jason Evans7372b152012-02-10 20:22:09 -0800678 cassert(config_prof);
679
Jason Evansa881cd22010-10-02 15:18:50 -0700680 /*
681 * Check that ctx is still unused by any thread cache before destroying
Jason Evans0cdd42e2011-08-09 19:06:06 -0700682 * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
683 * order to avoid a race condition with this function, as does
684 * prof_ctx_merge() in order to avoid a race between the main body of
685 * prof_ctx_merge() and entry into this function.
Jason Evansa881cd22010-10-02 15:18:50 -0700686 */
687 prof_enter();
688 malloc_mutex_lock(&ctx->lock);
Jason Evansb04a9402010-10-27 19:47:40 -0700689 if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) {
Jason Evansa881cd22010-10-02 15:18:50 -0700690 assert(ctx->cnt_merged.curbytes == 0);
691 assert(ctx->cnt_merged.accumobjs == 0);
692 assert(ctx->cnt_merged.accumbytes == 0);
693 /* Remove ctx from bt2ctx. */
Jason Evansa9076c92011-08-30 23:40:11 -0700694 if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
695 assert(false);
Jason Evansa881cd22010-10-02 15:18:50 -0700696 prof_leave();
697 /* Destroy ctx. */
698 malloc_mutex_unlock(&ctx->lock);
699 bt_destroy(ctx->bt);
700 malloc_mutex_destroy(&ctx->lock);
701 idalloc(ctx);
702 } else {
Jason Evans0cdd42e2011-08-09 19:06:06 -0700703 /*
704 * Compensate for increment in prof_ctx_merge() or
705 * prof_lookup().
706 */
Jason Evansb04a9402010-10-27 19:47:40 -0700707 ctx->cnt_merged.curobjs--;
Jason Evansa881cd22010-10-02 15:18:50 -0700708 malloc_mutex_unlock(&ctx->lock);
709 prof_leave();
710 }
711}
712
713static void
714prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
715{
716 bool destroy;
717
Jason Evans7372b152012-02-10 20:22:09 -0800718 cassert(config_prof);
719
Jason Evansa881cd22010-10-02 15:18:50 -0700720 /* Merge cnt stats and detach from ctx. */
721 malloc_mutex_lock(&ctx->lock);
722 ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
723 ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
724 ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
725 ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
726 ql_remove(&ctx->cnts_ql, cnt, cnts_link);
727 if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
Jason Evansb04a9402010-10-27 19:47:40 -0700728 ctx->cnt_merged.curobjs == 0) {
729 /*
730 * Artificially raise ctx->cnt_merged.curobjs in order to keep
731 * another thread from winning the race to destroy ctx while
732 * this one has ctx->lock dropped. Without this, it would be
733 * possible for another thread to:
734 *
735 * 1) Sample an allocation associated with ctx.
736 * 2) Deallocate the sampled object.
737 * 3) Successfully prof_ctx_destroy(ctx).
738 *
739 * The result would be that ctx no longer exists by the time
740 * this thread accesses it in prof_ctx_destroy().
741 */
742 ctx->cnt_merged.curobjs++;
Jason Evansa881cd22010-10-02 15:18:50 -0700743 destroy = true;
Jason Evansb04a9402010-10-27 19:47:40 -0700744 } else
Jason Evansa881cd22010-10-02 15:18:50 -0700745 destroy = false;
746 malloc_mutex_unlock(&ctx->lock);
747 if (destroy)
748 prof_ctx_destroy(ctx);
749}
750
Jason Evans22ca8552010-03-02 11:57:30 -0800751static bool
752prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800753{
754 char buf[UMAX2S_BUFSIZE];
755 unsigned i;
756
Jason Evans7372b152012-02-10 20:22:09 -0800757 cassert(config_prof);
758
Jason Evansa881cd22010-10-02 15:18:50 -0700759 if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
760 assert(ctx->cnt_summed.curbytes == 0);
761 assert(ctx->cnt_summed.accumobjs == 0);
762 assert(ctx->cnt_summed.accumbytes == 0);
763 return (false);
764 }
765
Jason Evanse7339702010-10-23 18:37:06 -0700766 if (prof_write(u2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -0800767 || prof_write(": ", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700768 || prof_write(u2s(ctx->cnt_summed.curbytes, 10, buf),
Jason Evans22ca8552010-03-02 11:57:30 -0800769 propagate_err)
770 || prof_write(" [", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700771 || prof_write(u2s(ctx->cnt_summed.accumobjs, 10, buf),
Jason Evans22ca8552010-03-02 11:57:30 -0800772 propagate_err)
773 || prof_write(": ", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700774 || prof_write(u2s(ctx->cnt_summed.accumbytes, 10, buf),
Jason Evans22ca8552010-03-02 11:57:30 -0800775 propagate_err)
776 || prof_write("] @", propagate_err))
777 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -0800778
779 for (i = 0; i < bt->len; i++) {
Jason Evans22ca8552010-03-02 11:57:30 -0800780 if (prof_write(" 0x", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700781 || prof_write(u2s((uintptr_t)bt->vec[i], 16, buf),
Jason Evans22ca8552010-03-02 11:57:30 -0800782 propagate_err))
783 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -0800784 }
785
Jason Evans22ca8552010-03-02 11:57:30 -0800786 if (prof_write("\n", propagate_err))
787 return (true);
788
789 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -0800790}
791
Jason Evans22ca8552010-03-02 11:57:30 -0800792static bool
793prof_dump_maps(bool propagate_err)
Jason Evansc7177182010-02-11 09:25:56 -0800794{
795 int mfd;
796 char buf[UMAX2S_BUFSIZE];
797 char *s;
798 unsigned i, slen;
799 /* /proc/<pid>/maps\0 */
800 char mpath[6 + UMAX2S_BUFSIZE
801 + 5 + 1];
802
Jason Evans7372b152012-02-10 20:22:09 -0800803 cassert(config_prof);
804
Jason Evansc7177182010-02-11 09:25:56 -0800805 i = 0;
806
807 s = "/proc/";
808 slen = strlen(s);
809 memcpy(&mpath[i], s, slen);
810 i += slen;
811
Jason Evanse7339702010-10-23 18:37:06 -0700812 s = u2s(getpid(), 10, buf);
Jason Evansc7177182010-02-11 09:25:56 -0800813 slen = strlen(s);
814 memcpy(&mpath[i], s, slen);
815 i += slen;
816
817 s = "/maps";
818 slen = strlen(s);
819 memcpy(&mpath[i], s, slen);
820 i += slen;
821
822 mpath[i] = '\0';
823
824 mfd = open(mpath, O_RDONLY);
825 if (mfd != -1) {
826 ssize_t nread;
827
Jason Evans22ca8552010-03-02 11:57:30 -0800828 if (prof_write("\nMAPPED_LIBRARIES:\n", propagate_err) &&
829 propagate_err)
830 return (true);
Jason Evansc7177182010-02-11 09:25:56 -0800831 nread = 0;
832 do {
833 prof_dump_buf_end += nread;
834 if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE) {
835 /* Make space in prof_dump_buf before read(). */
Jason Evans22ca8552010-03-02 11:57:30 -0800836 if (prof_flush(propagate_err) && propagate_err)
837 return (true);
Jason Evansc7177182010-02-11 09:25:56 -0800838 }
839 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
840 PROF_DUMP_BUF_SIZE - prof_dump_buf_end);
841 } while (nread > 0);
Jason Evansd34f9e72010-02-11 13:19:21 -0800842 close(mfd);
Jason Evans22ca8552010-03-02 11:57:30 -0800843 } else
844 return (true);
845
846 return (false);
Jason Evansc7177182010-02-11 09:25:56 -0800847}
848
Jason Evans22ca8552010-03-02 11:57:30 -0800849static bool
850prof_dump(const char *filename, bool leakcheck, bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800851{
852 prof_cnt_t cnt_all;
853 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -0700854 union {
855 prof_bt_t *p;
856 void *v;
857 } bt;
858 union {
859 prof_ctx_t *p;
860 void *v;
861 } ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800862 char buf[UMAX2S_BUFSIZE];
863 size_t leak_nctx;
864
Jason Evans7372b152012-02-10 20:22:09 -0800865 cassert(config_prof);
866
Jason Evans6109fe02010-02-10 10:37:56 -0800867 prof_enter();
868 prof_dump_fd = creat(filename, 0644);
869 if (prof_dump_fd == -1) {
Jason Evans22ca8552010-03-02 11:57:30 -0800870 if (propagate_err == false) {
Jason Evans698805c2010-03-03 17:45:38 -0800871 malloc_write("<jemalloc>: creat(\"");
872 malloc_write(filename);
873 malloc_write("\", 0644) failed\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800874 if (opt_abort)
875 abort();
876 }
Jason Evans22ca8552010-03-02 11:57:30 -0800877 goto ERROR;
Jason Evans6109fe02010-02-10 10:37:56 -0800878 }
879
880 /* Merge per thread profile stats, and sum them in cnt_all. */
881 memset(&cnt_all, 0, sizeof(prof_cnt_t));
882 leak_nctx = 0;
Jason Evans588a32c2010-10-02 22:38:14 -0700883 for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
Jason Evansa881cd22010-10-02 15:18:50 -0700884 prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
Jason Evans6109fe02010-02-10 10:37:56 -0800885
886 /* Dump profile header. */
Jason Evans22ca8552010-03-02 11:57:30 -0800887 if (prof_write("heap profile: ", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700888 || prof_write(u2s(cnt_all.curobjs, 10, buf), propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -0800889 || prof_write(": ", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700890 || prof_write(u2s(cnt_all.curbytes, 10, buf), propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -0800891 || prof_write(" [", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700892 || prof_write(u2s(cnt_all.accumobjs, 10, buf), propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -0800893 || prof_write(": ", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700894 || prof_write(u2s(cnt_all.accumbytes, 10, buf), propagate_err))
Jason Evans22ca8552010-03-02 11:57:30 -0800895 goto ERROR;
896
897 if (opt_lg_prof_sample == 0) {
898 if (prof_write("] @ heapprofile\n", propagate_err))
899 goto ERROR;
900 } else {
901 if (prof_write("] @ heap_v2/", propagate_err)
Jason Evanse7339702010-10-23 18:37:06 -0700902 || prof_write(u2s((uint64_t)1U << opt_lg_prof_sample, 10,
Jason Evans22ca8552010-03-02 11:57:30 -0800903 buf), propagate_err)
904 || prof_write("\n", propagate_err))
905 goto ERROR;
Jason Evansb9477e72010-03-01 20:15:26 -0800906 }
Jason Evans6109fe02010-02-10 10:37:56 -0800907
908 /* Dump per ctx profile stats. */
Jason Evans075e77c2010-09-20 19:53:25 -0700909 for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
Jason Evans6109fe02010-02-10 10:37:56 -0800910 == false;) {
Jason Evans075e77c2010-09-20 19:53:25 -0700911 if (prof_dump_ctx(ctx.p, bt.p, propagate_err))
Jason Evans22ca8552010-03-02 11:57:30 -0800912 goto ERROR;
Jason Evans6109fe02010-02-10 10:37:56 -0800913 }
914
Jason Evansc7177182010-02-11 09:25:56 -0800915 /* Dump /proc/<pid>/maps if possible. */
Jason Evans22ca8552010-03-02 11:57:30 -0800916 if (prof_dump_maps(propagate_err))
917 goto ERROR;
Jason Evansc7177182010-02-11 09:25:56 -0800918
Jason Evans22ca8552010-03-02 11:57:30 -0800919 if (prof_flush(propagate_err))
920 goto ERROR;
Jason Evans6109fe02010-02-10 10:37:56 -0800921 close(prof_dump_fd);
922 prof_leave();
923
924 if (leakcheck && cnt_all.curbytes != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800925 malloc_write("<jemalloc>: Leak summary: ");
Jason Evanse7339702010-10-23 18:37:06 -0700926 malloc_write(u2s(cnt_all.curbytes, 10, buf));
Jason Evans698805c2010-03-03 17:45:38 -0800927 malloc_write((cnt_all.curbytes != 1) ? " bytes, " : " byte, ");
Jason Evanse7339702010-10-23 18:37:06 -0700928 malloc_write(u2s(cnt_all.curobjs, 10, buf));
Jason Evans698805c2010-03-03 17:45:38 -0800929 malloc_write((cnt_all.curobjs != 1) ? " objects, " :
930 " object, ");
Jason Evanse7339702010-10-23 18:37:06 -0700931 malloc_write(u2s(leak_nctx, 10, buf));
Jason Evans698805c2010-03-03 17:45:38 -0800932 malloc_write((leak_nctx != 1) ? " contexts\n" : " context\n");
933 malloc_write("<jemalloc>: Run pprof on \"");
934 malloc_write(filename);
935 malloc_write("\" for leak detail\n");
Jason Evans6109fe02010-02-10 10:37:56 -0800936 }
Jason Evans22ca8552010-03-02 11:57:30 -0800937
938 return (false);
939ERROR:
940 prof_leave();
941 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -0800942}
943
Jason Evansb01a6c22010-02-11 10:25:36 -0800944#define DUMP_FILENAME_BUFSIZE (PATH_MAX+ UMAX2S_BUFSIZE \
945 + 1 \
946 + UMAX2S_BUFSIZE \
947 + 2 \
948 + UMAX2S_BUFSIZE \
949 + 5 + 1)
Jason Evans6109fe02010-02-10 10:37:56 -0800950static void
951prof_dump_filename(char *filename, char v, int64_t vseq)
952{
953 char buf[UMAX2S_BUFSIZE];
954 char *s;
955 unsigned i, slen;
956
Jason Evans7372b152012-02-10 20:22:09 -0800957 cassert(config_prof);
958
Jason Evansb01a6c22010-02-11 10:25:36 -0800959 /*
960 * Construct a filename of the form:
961 *
962 * <prefix>.<pid>.<seq>.v<vseq>.heap\0
Jason Evansb01a6c22010-02-11 10:25:36 -0800963 */
964
Jason Evans6109fe02010-02-10 10:37:56 -0800965 i = 0;
966
Jason Evanse7339702010-10-23 18:37:06 -0700967 s = opt_prof_prefix;
Jason Evans6109fe02010-02-10 10:37:56 -0800968 slen = strlen(s);
969 memcpy(&filename[i], s, slen);
970 i += slen;
971
972 s = ".";
973 slen = strlen(s);
974 memcpy(&filename[i], s, slen);
975 i += slen;
976
Jason Evanse7339702010-10-23 18:37:06 -0700977 s = u2s(getpid(), 10, buf);
978 slen = strlen(s);
979 memcpy(&filename[i], s, slen);
980 i += slen;
981
982 s = ".";
983 slen = strlen(s);
984 memcpy(&filename[i], s, slen);
985 i += slen;
986
987 s = u2s(prof_dump_seq, 10, buf);
Jason Evans6109fe02010-02-10 10:37:56 -0800988 prof_dump_seq++;
989 slen = strlen(s);
990 memcpy(&filename[i], s, slen);
991 i += slen;
992
993 s = ".";
994 slen = strlen(s);
995 memcpy(&filename[i], s, slen);
996 i += slen;
997
998 filename[i] = v;
999 i++;
1000
1001 if (vseq != 0xffffffffffffffffLLU) {
Jason Evanse7339702010-10-23 18:37:06 -07001002 s = u2s(vseq, 10, buf);
Jason Evans6109fe02010-02-10 10:37:56 -08001003 slen = strlen(s);
1004 memcpy(&filename[i], s, slen);
1005 i += slen;
1006 }
1007
1008 s = ".heap";
1009 slen = strlen(s);
1010 memcpy(&filename[i], s, slen);
1011 i += slen;
1012
1013 filename[i] = '\0';
1014}
1015
1016static void
1017prof_fdump(void)
1018{
1019 char filename[DUMP_FILENAME_BUFSIZE];
1020
Jason Evans7372b152012-02-10 20:22:09 -08001021 cassert(config_prof);
1022
Jason Evans6109fe02010-02-10 10:37:56 -08001023 if (prof_booted == false)
1024 return;
1025
Jason Evanse7339702010-10-23 18:37:06 -07001026 if (opt_prof_prefix[0] != '\0') {
1027 malloc_mutex_lock(&prof_dump_seq_mtx);
1028 prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU);
1029 malloc_mutex_unlock(&prof_dump_seq_mtx);
1030 prof_dump(filename, opt_prof_leak, false);
1031 }
Jason Evans6109fe02010-02-10 10:37:56 -08001032}
1033
1034void
1035prof_idump(void)
1036{
1037 char filename[DUMP_FILENAME_BUFSIZE];
1038
Jason Evans7372b152012-02-10 20:22:09 -08001039 cassert(config_prof);
1040
Jason Evans6109fe02010-02-10 10:37:56 -08001041 if (prof_booted == false)
1042 return;
Jason Evansd34f9e72010-02-11 13:19:21 -08001043 malloc_mutex_lock(&enq_mtx);
1044 if (enq) {
1045 enq_idump = true;
1046 malloc_mutex_unlock(&enq_mtx);
1047 return;
1048 }
1049 malloc_mutex_unlock(&enq_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -08001050
Jason Evanse7339702010-10-23 18:37:06 -07001051 if (opt_prof_prefix[0] != '\0') {
1052 malloc_mutex_lock(&prof_dump_seq_mtx);
1053 prof_dump_filename(filename, 'i', prof_dump_iseq);
1054 prof_dump_iseq++;
1055 malloc_mutex_unlock(&prof_dump_seq_mtx);
1056 prof_dump(filename, false, false);
1057 }
Jason Evans6109fe02010-02-10 10:37:56 -08001058}
1059
Jason Evans22ca8552010-03-02 11:57:30 -08001060bool
1061prof_mdump(const char *filename)
Jason Evans6109fe02010-02-10 10:37:56 -08001062{
Jason Evans22ca8552010-03-02 11:57:30 -08001063 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001064
Jason Evans7372b152012-02-10 20:22:09 -08001065 cassert(config_prof);
1066
Jason Evans22ca8552010-03-02 11:57:30 -08001067 if (opt_prof == false || prof_booted == false)
1068 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001069
Jason Evans22ca8552010-03-02 11:57:30 -08001070 if (filename == NULL) {
1071 /* No filename specified, so automatically generate one. */
Jason Evanse7339702010-10-23 18:37:06 -07001072 if (opt_prof_prefix[0] == '\0')
1073 return (true);
Jason Evans22ca8552010-03-02 11:57:30 -08001074 malloc_mutex_lock(&prof_dump_seq_mtx);
1075 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1076 prof_dump_mseq++;
1077 malloc_mutex_unlock(&prof_dump_seq_mtx);
1078 filename = filename_buf;
1079 }
1080 return (prof_dump(filename, false, true));
Jason Evans6109fe02010-02-10 10:37:56 -08001081}
1082
1083void
Jason Evanse7339702010-10-23 18:37:06 -07001084prof_gdump(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001085{
1086 char filename[DUMP_FILENAME_BUFSIZE];
1087
Jason Evans7372b152012-02-10 20:22:09 -08001088 cassert(config_prof);
1089
Jason Evans6109fe02010-02-10 10:37:56 -08001090 if (prof_booted == false)
1091 return;
1092 malloc_mutex_lock(&enq_mtx);
1093 if (enq) {
Jason Evanse7339702010-10-23 18:37:06 -07001094 enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001095 malloc_mutex_unlock(&enq_mtx);
1096 return;
1097 }
1098 malloc_mutex_unlock(&enq_mtx);
1099
Jason Evanse7339702010-10-23 18:37:06 -07001100 if (opt_prof_prefix[0] != '\0') {
1101 malloc_mutex_lock(&prof_dump_seq_mtx);
1102 prof_dump_filename(filename, 'u', prof_dump_useq);
1103 prof_dump_useq++;
1104 malloc_mutex_unlock(&prof_dump_seq_mtx);
1105 prof_dump(filename, false, false);
1106 }
Jason Evans6109fe02010-02-10 10:37:56 -08001107}
1108
1109static void
1110prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
1111{
1112 size_t ret1, ret2;
1113 uint64_t h;
1114 prof_bt_t *bt = (prof_bt_t *)key;
1115
Jason Evans7372b152012-02-10 20:22:09 -08001116 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001117 assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
1118 assert(hash1 != NULL);
1119 assert(hash2 != NULL);
1120
1121 h = hash(bt->vec, bt->len * sizeof(void *), 0x94122f335b332aeaLLU);
1122 if (minbits <= 32) {
1123 /*
1124 * Avoid doing multiple hashes, since a single hash provides
1125 * enough bits.
1126 */
1127 ret1 = h & ZU(0xffffffffU);
1128 ret2 = h >> 32;
1129 } else {
1130 ret1 = h;
1131 ret2 = hash(bt->vec, bt->len * sizeof(void *),
Jason Evansf0b22cf2011-05-22 10:49:44 -07001132 0x8432a476666bbc13LLU);
Jason Evans6109fe02010-02-10 10:37:56 -08001133 }
1134
1135 *hash1 = ret1;
1136 *hash2 = ret2;
1137}
1138
1139static bool
1140prof_bt_keycomp(const void *k1, const void *k2)
1141{
1142 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1143 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1144
Jason Evans7372b152012-02-10 20:22:09 -08001145 cassert(config_prof);
1146
Jason Evans6109fe02010-02-10 10:37:56 -08001147 if (bt1->len != bt2->len)
1148 return (false);
1149 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1150}
1151
Jason Evans4d6a1342010-10-20 19:05:59 -07001152prof_tdata_t *
1153prof_tdata_init(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001154{
Jason Evans4d6a1342010-10-20 19:05:59 -07001155 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001156
Jason Evans7372b152012-02-10 20:22:09 -08001157 cassert(config_prof);
1158
Jason Evans4d6a1342010-10-20 19:05:59 -07001159 /* Initialize an empty cache for this thread. */
1160 prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
1161 if (prof_tdata == NULL)
1162 return (NULL);
1163
1164 if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
1165 prof_bt_hash, prof_bt_keycomp)) {
1166 idalloc(prof_tdata);
1167 return (NULL);
1168 }
1169 ql_new(&prof_tdata->lru_ql);
1170
1171 prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
1172 if (prof_tdata->vec == NULL) {
Jason Evans4d6a1342010-10-20 19:05:59 -07001173 ckh_delete(&prof_tdata->bt2cnt);
1174 idalloc(prof_tdata);
1175 return (NULL);
1176 }
1177
1178 prof_tdata->prn_state = 0;
1179 prof_tdata->threshold = 0;
1180 prof_tdata->accum = 0;
1181
1182 PROF_TCACHE_SET(prof_tdata);
1183
1184 return (prof_tdata);
1185}
1186
1187static void
1188prof_tdata_cleanup(void *arg)
1189{
Jason Evans41b954e2011-08-08 17:10:07 -07001190 prof_thr_cnt_t *cnt;
1191 prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
Jason Evans4d6a1342010-10-20 19:05:59 -07001192
Jason Evans7372b152012-02-10 20:22:09 -08001193 cassert(config_prof);
1194
Jason Evans41b954e2011-08-08 17:10:07 -07001195 /*
Jason Evans0cdd42e2011-08-09 19:06:06 -07001196 * Delete the hash table. All of its contents can still be iterated
1197 * over via the LRU.
Jason Evans41b954e2011-08-08 17:10:07 -07001198 */
1199 ckh_delete(&prof_tdata->bt2cnt);
Jason Evans6109fe02010-02-10 10:37:56 -08001200
Jason Evans0cdd42e2011-08-09 19:06:06 -07001201 /* Iteratively merge cnt's into the global stats and delete them. */
Jason Evans41b954e2011-08-08 17:10:07 -07001202 while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
Jason Evans41b954e2011-08-08 17:10:07 -07001203 ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
Jason Evans0cdd42e2011-08-09 19:06:06 -07001204 prof_ctx_merge(cnt->ctx, cnt);
Jason Evans41b954e2011-08-08 17:10:07 -07001205 idalloc(cnt);
Jason Evans6109fe02010-02-10 10:37:56 -08001206 }
Jason Evans41b954e2011-08-08 17:10:07 -07001207
1208 idalloc(prof_tdata->vec);
1209
1210 idalloc(prof_tdata);
1211 PROF_TCACHE_SET(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001212}
1213
1214void
1215prof_boot0(void)
1216{
1217
Jason Evans7372b152012-02-10 20:22:09 -08001218 cassert(config_prof);
1219
Jason Evanse7339702010-10-23 18:37:06 -07001220 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
1221 sizeof(PROF_PREFIX_DEFAULT));
1222}
1223
1224void
1225prof_boot1(void)
1226{
1227
Jason Evans7372b152012-02-10 20:22:09 -08001228 cassert(config_prof);
1229
Jason Evans6109fe02010-02-10 10:37:56 -08001230 /*
Jason Evans0b270a92010-03-31 16:45:04 -07001231 * opt_prof and prof_promote must be in their final state before any
1232 * arenas are initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08001233 */
1234
1235 if (opt_prof_leak && opt_prof == false) {
1236 /*
1237 * Enable opt_prof, but in such a way that profiles are never
1238 * automatically dumped.
1239 */
1240 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07001241 opt_prof_gdump = false;
Jason Evans6109fe02010-02-10 10:37:56 -08001242 prof_interval = 0;
Jason Evansa02fc082010-03-31 17:35:51 -07001243 } else if (opt_prof) {
1244 if (opt_lg_prof_interval >= 0) {
1245 prof_interval = (((uint64_t)1U) <<
1246 opt_lg_prof_interval);
1247 } else
1248 prof_interval = 0;
1249 }
Jason Evans0b270a92010-03-31 16:45:04 -07001250
1251 prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT);
Jason Evans6109fe02010-02-10 10:37:56 -08001252}
1253
1254bool
Jason Evanse7339702010-10-23 18:37:06 -07001255prof_boot2(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001256{
1257
Jason Evans7372b152012-02-10 20:22:09 -08001258 cassert(config_prof);
1259
Jason Evans6109fe02010-02-10 10:37:56 -08001260 if (opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08001261 if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
1262 prof_bt_keycomp))
1263 return (true);
1264 if (malloc_mutex_init(&bt2ctx_mtx))
1265 return (true);
Jason Evans4d6a1342010-10-20 19:05:59 -07001266 if (pthread_key_create(&prof_tdata_tsd, prof_tdata_cleanup)
Jason Evans6109fe02010-02-10 10:37:56 -08001267 != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001268 malloc_write(
1269 "<jemalloc>: Error in pthread_key_create()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08001270 abort();
1271 }
1272
1273 prof_bt_max = (1U << opt_lg_prof_bt_max);
1274 if (malloc_mutex_init(&prof_dump_seq_mtx))
1275 return (true);
1276
1277 if (malloc_mutex_init(&enq_mtx))
1278 return (true);
1279 enq = false;
Jason Evansd34f9e72010-02-11 13:19:21 -08001280 enq_idump = false;
Jason Evanse7339702010-10-23 18:37:06 -07001281 enq_gdump = false;
Jason Evans6109fe02010-02-10 10:37:56 -08001282
1283 if (atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001284 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08001285 if (opt_abort)
1286 abort();
1287 }
1288 }
1289
Jason Evansb27805b2010-02-10 18:15:53 -08001290#ifdef JEMALLOC_PROF_LIBGCC
1291 /*
1292 * Cause the backtracing machinery to allocate its internal state
1293 * before enabling profiling.
1294 */
1295 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
1296#endif
1297
Jason Evans6109fe02010-02-10 10:37:56 -08001298 prof_booted = true;
1299
1300 return (false);
1301}
1302
Jason Evans6109fe02010-02-10 10:37:56 -08001303/******************************************************************************/