blob: 45cb67fae52c47ef2796832364dc6be9eebbe23d [file] [log] [blame]
Jason Evans6109fe02010-02-10 10:37:56 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
Jason Evanscd9a1342012-03-21 18:33:03 -070017malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
18
Jason Evans6109fe02010-02-10 10:37:56 -080019bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070020bool opt_prof_active = true;
Jason Evansb9477e72010-03-01 20:15:26 -080021size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070022ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070023bool opt_prof_gdump = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070024bool opt_prof_final = true;
Jason Evans6109fe02010-02-10 10:37:56 -080025bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070026bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080027char opt_prof_prefix[
28 /* Minimize memory bloat for non-prof builds. */
29#ifdef JEMALLOC_PROF
30 PATH_MAX +
31#endif
32 1
33];
Jason Evans6109fe02010-02-10 10:37:56 -080034
Jason Evansa3b33862012-11-13 12:56:27 -080035uint64_t prof_interval = 0;
Jason Evans0b270a92010-03-31 16:45:04 -070036bool prof_promote;
Jason Evansd34f9e72010-02-11 13:19:21 -080037
Jason Evans6109fe02010-02-10 10:37:56 -080038/*
Jason Evans6da54182012-03-23 18:05:51 -070039 * Table of mutexes that are shared among ctx's. These are leaf locks, so
40 * there is no problem with using them for more than one ctx at the same time.
41 * The primary motivation for this sharing though is that ctx's are ephemeral,
42 * and destroying mutexes causes complications for systems that allocate when
43 * creating/destroying mutexes.
44 */
45static malloc_mutex_t *ctx_locks;
46static unsigned cum_ctxs; /* Atomic counter. */
47
48/*
Jason Evans6109fe02010-02-10 10:37:56 -080049 * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070050 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080051 */
52static ckh_t bt2ctx;
53static malloc_mutex_t bt2ctx_mtx;
54
Jason Evans6109fe02010-02-10 10:37:56 -080055static malloc_mutex_t prof_dump_seq_mtx;
56static uint64_t prof_dump_seq;
57static uint64_t prof_dump_iseq;
58static uint64_t prof_dump_mseq;
59static uint64_t prof_dump_useq;
60
61/*
62 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -080063 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -080064 */
Jason Evans4f37ef62014-01-16 13:23:56 -080065static malloc_mutex_t prof_dump_mtx;
66static char prof_dump_buf[
67 /* Minimize memory bloat for non-prof builds. */
68#ifdef JEMALLOC_PROF
69 PROF_DUMP_BUFSIZE
70#else
71 1
72#endif
73];
Jason Evans6109fe02010-02-10 10:37:56 -080074static unsigned prof_dump_buf_end;
75static int prof_dump_fd;
76
77/* Do not dump any profiles until bootstrapping is complete. */
78static bool prof_booted = false;
79
Jason Evans6109fe02010-02-10 10:37:56 -080080/******************************************************************************/
Jason Evans6109fe02010-02-10 10:37:56 -080081
Jason Evans4d6a1342010-10-20 19:05:59 -070082void
Jason Evans6109fe02010-02-10 10:37:56 -080083bt_init(prof_bt_t *bt, void **vec)
84{
85
Jason Evans7372b152012-02-10 20:22:09 -080086 cassert(config_prof);
87
Jason Evans6109fe02010-02-10 10:37:56 -080088 bt->vec = vec;
89 bt->len = 0;
90}
91
Jason Evansa881cd22010-10-02 15:18:50 -070092static void
93bt_destroy(prof_bt_t *bt)
94{
95
Jason Evans7372b152012-02-10 20:22:09 -080096 cassert(config_prof);
97
Jason Evansa881cd22010-10-02 15:18:50 -070098 idalloc(bt);
99}
100
Jason Evans6109fe02010-02-10 10:37:56 -0800101static prof_bt_t *
102bt_dup(prof_bt_t *bt)
103{
104 prof_bt_t *ret;
105
Jason Evans7372b152012-02-10 20:22:09 -0800106 cassert(config_prof);
107
Jason Evans6109fe02010-02-10 10:37:56 -0800108 /*
109 * Create a single allocation that has space for vec immediately
110 * following the prof_bt_t structure. The backtraces that get
111 * stored in the backtrace caches are copied from stack-allocated
112 * temporary variables, so size is known at creation time. Making this
113 * a contiguous object improves cache locality.
114 */
115 ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
116 (bt->len * sizeof(void *)));
117 if (ret == NULL)
118 return (NULL);
119 ret->vec = (void **)((uintptr_t)ret +
120 QUANTUM_CEILING(sizeof(prof_bt_t)));
121 memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
122 ret->len = bt->len;
123
124 return (ret);
125}
126
127static inline void
Jason Evans52386b22012-04-22 16:00:11 -0700128prof_enter(prof_tdata_t *prof_tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800129{
130
Jason Evans7372b152012-02-10 20:22:09 -0800131 cassert(config_prof);
132
Jason Evans52386b22012-04-22 16:00:11 -0700133 assert(prof_tdata->enq == false);
134 prof_tdata->enq = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800135
136 malloc_mutex_lock(&bt2ctx_mtx);
137}
138
139static inline void
Jason Evans52386b22012-04-22 16:00:11 -0700140prof_leave(prof_tdata_t *prof_tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800141{
Jason Evanse7339702010-10-23 18:37:06 -0700142 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800143
Jason Evans7372b152012-02-10 20:22:09 -0800144 cassert(config_prof);
145
Jason Evans6109fe02010-02-10 10:37:56 -0800146 malloc_mutex_unlock(&bt2ctx_mtx);
147
Jason Evans52386b22012-04-22 16:00:11 -0700148 assert(prof_tdata->enq);
149 prof_tdata->enq = false;
150 idump = prof_tdata->enq_idump;
151 prof_tdata->enq_idump = false;
152 gdump = prof_tdata->enq_gdump;
153 prof_tdata->enq_gdump = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800154
Jason Evansd34f9e72010-02-11 13:19:21 -0800155 if (idump)
156 prof_idump();
Jason Evanse7339702010-10-23 18:37:06 -0700157 if (gdump)
158 prof_gdump();
Jason Evans6109fe02010-02-10 10:37:56 -0800159}
160
Jason Evans77f350b2011-03-15 22:23:12 -0700161#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700162void
Jason Evans53891462012-02-13 18:23:41 -0800163prof_backtrace(prof_bt_t *bt, unsigned nignore)
Jason Evans6109fe02010-02-10 10:37:56 -0800164{
165 unw_context_t uc;
166 unw_cursor_t cursor;
167 unsigned i;
168 int err;
169
Jason Evans7372b152012-02-10 20:22:09 -0800170 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800171 assert(bt->len == 0);
172 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800173
174 unw_getcontext(&uc);
175 unw_init_local(&cursor, &uc);
176
Jason Evans9f949f92011-03-22 20:44:40 -0700177 /* Throw away (nignore+1) stack frames, if that many exist. */
178 for (i = 0; i < nignore + 1; i++) {
179 err = unw_step(&cursor);
180 if (err <= 0)
181 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800182 }
183
Jason Evans9f949f92011-03-22 20:44:40 -0700184 /*
185 * Iterate over stack frames until there are no more, or until no space
186 * remains in bt.
187 */
Jason Evans53891462012-02-13 18:23:41 -0800188 for (i = 0; i < PROF_BT_MAX; i++) {
Jason Evans9f949f92011-03-22 20:44:40 -0700189 unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
190 bt->len++;
191 err = unw_step(&cursor);
192 if (err <= 0)
193 break;
Jason Evans6109fe02010-02-10 10:37:56 -0800194 }
Jason Evans6109fe02010-02-10 10:37:56 -0800195}
Jason Evans7372b152012-02-10 20:22:09 -0800196#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700197static _Unwind_Reason_Code
198prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
199{
200
Jason Evans7372b152012-02-10 20:22:09 -0800201 cassert(config_prof);
202
Jason Evans77f350b2011-03-15 22:23:12 -0700203 return (_URC_NO_REASON);
204}
205
206static _Unwind_Reason_Code
207prof_unwind_callback(struct _Unwind_Context *context, void *arg)
208{
209 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
210
Jason Evans7372b152012-02-10 20:22:09 -0800211 cassert(config_prof);
212
Jason Evans77f350b2011-03-15 22:23:12 -0700213 if (data->nignore > 0)
214 data->nignore--;
215 else {
216 data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
217 data->bt->len++;
218 if (data->bt->len == data->max)
219 return (_URC_END_OF_STACK);
220 }
221
222 return (_URC_NO_REASON);
223}
224
225void
Jason Evans53891462012-02-13 18:23:41 -0800226prof_backtrace(prof_bt_t *bt, unsigned nignore)
Jason Evans77f350b2011-03-15 22:23:12 -0700227{
Jason Evans53891462012-02-13 18:23:41 -0800228 prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700229
Jason Evans7372b152012-02-10 20:22:09 -0800230 cassert(config_prof);
231
Jason Evans77f350b2011-03-15 22:23:12 -0700232 _Unwind_Backtrace(prof_unwind_callback, &data);
233}
Jason Evans7372b152012-02-10 20:22:09 -0800234#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700235void
Jason Evans53891462012-02-13 18:23:41 -0800236prof_backtrace(prof_bt_t *bt, unsigned nignore)
Jason Evans6109fe02010-02-10 10:37:56 -0800237{
Jason Evans6109fe02010-02-10 10:37:56 -0800238#define BT_FRAME(i) \
Jason Evans53891462012-02-13 18:23:41 -0800239 if ((i) < nignore + PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800240 void *p; \
241 if (__builtin_frame_address(i) == 0) \
Jason Evansb27805b2010-02-10 18:15:53 -0800242 return; \
Jason Evans6109fe02010-02-10 10:37:56 -0800243 p = __builtin_return_address(i); \
244 if (p == NULL) \
Jason Evansb27805b2010-02-10 18:15:53 -0800245 return; \
Jason Evanse4f78462010-10-22 10:45:59 -0700246 if (i >= nignore) { \
247 bt->vec[(i) - nignore] = p; \
248 bt->len = (i) - nignore + 1; \
Jason Evans6109fe02010-02-10 10:37:56 -0800249 } \
250 } else \
Jason Evansb27805b2010-02-10 18:15:53 -0800251 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800252
Jason Evans7372b152012-02-10 20:22:09 -0800253 cassert(config_prof);
Jason Evansb04a9402010-10-27 19:47:40 -0700254 assert(nignore <= 3);
Jason Evans6109fe02010-02-10 10:37:56 -0800255
Jason Evans6109fe02010-02-10 10:37:56 -0800256 BT_FRAME(0)
257 BT_FRAME(1)
258 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800259 BT_FRAME(3)
260 BT_FRAME(4)
261 BT_FRAME(5)
262 BT_FRAME(6)
263 BT_FRAME(7)
264 BT_FRAME(8)
265 BT_FRAME(9)
266
267 BT_FRAME(10)
268 BT_FRAME(11)
269 BT_FRAME(12)
270 BT_FRAME(13)
271 BT_FRAME(14)
272 BT_FRAME(15)
273 BT_FRAME(16)
274 BT_FRAME(17)
275 BT_FRAME(18)
276 BT_FRAME(19)
277
278 BT_FRAME(20)
279 BT_FRAME(21)
280 BT_FRAME(22)
281 BT_FRAME(23)
282 BT_FRAME(24)
283 BT_FRAME(25)
284 BT_FRAME(26)
285 BT_FRAME(27)
286 BT_FRAME(28)
287 BT_FRAME(29)
288
289 BT_FRAME(30)
290 BT_FRAME(31)
291 BT_FRAME(32)
292 BT_FRAME(33)
293 BT_FRAME(34)
294 BT_FRAME(35)
295 BT_FRAME(36)
296 BT_FRAME(37)
297 BT_FRAME(38)
298 BT_FRAME(39)
299
300 BT_FRAME(40)
301 BT_FRAME(41)
302 BT_FRAME(42)
303 BT_FRAME(43)
304 BT_FRAME(44)
305 BT_FRAME(45)
306 BT_FRAME(46)
307 BT_FRAME(47)
308 BT_FRAME(48)
309 BT_FRAME(49)
310
311 BT_FRAME(50)
312 BT_FRAME(51)
313 BT_FRAME(52)
314 BT_FRAME(53)
315 BT_FRAME(54)
316 BT_FRAME(55)
317 BT_FRAME(56)
318 BT_FRAME(57)
319 BT_FRAME(58)
320 BT_FRAME(59)
321
322 BT_FRAME(60)
323 BT_FRAME(61)
324 BT_FRAME(62)
325 BT_FRAME(63)
326 BT_FRAME(64)
327 BT_FRAME(65)
328 BT_FRAME(66)
329 BT_FRAME(67)
330 BT_FRAME(68)
331 BT_FRAME(69)
332
333 BT_FRAME(70)
334 BT_FRAME(71)
335 BT_FRAME(72)
336 BT_FRAME(73)
337 BT_FRAME(74)
338 BT_FRAME(75)
339 BT_FRAME(76)
340 BT_FRAME(77)
341 BT_FRAME(78)
342 BT_FRAME(79)
343
344 BT_FRAME(80)
345 BT_FRAME(81)
346 BT_FRAME(82)
347 BT_FRAME(83)
348 BT_FRAME(84)
349 BT_FRAME(85)
350 BT_FRAME(86)
351 BT_FRAME(87)
352 BT_FRAME(88)
353 BT_FRAME(89)
354
355 BT_FRAME(90)
356 BT_FRAME(91)
357 BT_FRAME(92)
358 BT_FRAME(93)
359 BT_FRAME(94)
360 BT_FRAME(95)
361 BT_FRAME(96)
362 BT_FRAME(97)
363 BT_FRAME(98)
364 BT_FRAME(99)
365
366 BT_FRAME(100)
367 BT_FRAME(101)
368 BT_FRAME(102)
369 BT_FRAME(103)
370 BT_FRAME(104)
371 BT_FRAME(105)
372 BT_FRAME(106)
373 BT_FRAME(107)
374 BT_FRAME(108)
375 BT_FRAME(109)
376
377 BT_FRAME(110)
378 BT_FRAME(111)
379 BT_FRAME(112)
380 BT_FRAME(113)
381 BT_FRAME(114)
382 BT_FRAME(115)
383 BT_FRAME(116)
384 BT_FRAME(117)
385 BT_FRAME(118)
386 BT_FRAME(119)
387
388 BT_FRAME(120)
389 BT_FRAME(121)
390 BT_FRAME(122)
391 BT_FRAME(123)
392 BT_FRAME(124)
393 BT_FRAME(125)
394 BT_FRAME(126)
395 BT_FRAME(127)
396
Jason Evansb04a9402010-10-27 19:47:40 -0700397 /* Extras to compensate for nignore. */
Jason Evans6109fe02010-02-10 10:37:56 -0800398 BT_FRAME(128)
399 BT_FRAME(129)
400 BT_FRAME(130)
Jason Evans6109fe02010-02-10 10:37:56 -0800401#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800402}
Jason Evans7372b152012-02-10 20:22:09 -0800403#else
404void
Jason Evans53891462012-02-13 18:23:41 -0800405prof_backtrace(prof_bt_t *bt, unsigned nignore)
Jason Evans7372b152012-02-10 20:22:09 -0800406{
407
408 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700409 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800410}
Jason Evans6109fe02010-02-10 10:37:56 -0800411#endif
412
Jason Evans4f37ef62014-01-16 13:23:56 -0800413static malloc_mutex_t *
414prof_ctx_mutex_choose(void)
415{
416 unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
417
418 return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
419}
420
421static void
422prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
423{
424
425 ctx->bt = bt;
426 ctx->lock = prof_ctx_mutex_choose();
427 /*
428 * Set nlimbo to 1, in order to avoid a race condition with
429 * prof_ctx_merge()/prof_ctx_destroy().
430 */
431 ctx->nlimbo = 1;
432 ql_elm_new(ctx, dump_link);
433 memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
434 ql_new(&ctx->cnts_ql);
435}
436
437static void
438prof_ctx_destroy(prof_ctx_t *ctx)
439{
440 prof_tdata_t *prof_tdata;
441
442 cassert(config_prof);
443
444 /*
445 * Check that ctx is still unused by any thread cache before destroying
446 * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
447 * condition with this function, as does prof_ctx_merge() in order to
448 * avoid a race between the main body of prof_ctx_merge() and entry
449 * into this function.
450 */
451 prof_tdata = prof_tdata_get(false);
452 assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
453 prof_enter(prof_tdata);
454 malloc_mutex_lock(ctx->lock);
455 if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
456 ctx->nlimbo == 1) {
457 assert(ctx->cnt_merged.curbytes == 0);
458 assert(ctx->cnt_merged.accumobjs == 0);
459 assert(ctx->cnt_merged.accumbytes == 0);
460 /* Remove ctx from bt2ctx. */
461 if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
462 not_reached();
463 prof_leave(prof_tdata);
464 /* Destroy ctx. */
465 malloc_mutex_unlock(ctx->lock);
466 bt_destroy(ctx->bt);
467 idalloc(ctx);
468 } else {
469 /*
470 * Compensate for increment in prof_ctx_merge() or
471 * prof_lookup().
472 */
473 ctx->nlimbo--;
474 malloc_mutex_unlock(ctx->lock);
475 prof_leave(prof_tdata);
476 }
477}
478
479static void
480prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
481{
482 bool destroy;
483
484 cassert(config_prof);
485
486 /* Merge cnt stats and detach from ctx. */
487 malloc_mutex_lock(ctx->lock);
488 ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
489 ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
490 ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
491 ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
492 ql_remove(&ctx->cnts_ql, cnt, cnts_link);
493 if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
494 ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
495 /*
496 * Increment ctx->nlimbo in order to keep another thread from
497 * winning the race to destroy ctx while this one has ctx->lock
498 * dropped. Without this, it would be possible for another
499 * thread to:
500 *
501 * 1) Sample an allocation associated with ctx.
502 * 2) Deallocate the sampled object.
503 * 3) Successfully prof_ctx_destroy(ctx).
504 *
505 * The result would be that ctx no longer exists by the time
506 * this thread accesses it in prof_ctx_destroy().
507 */
508 ctx->nlimbo++;
509 destroy = true;
510 } else
511 destroy = false;
512 malloc_mutex_unlock(ctx->lock);
513 if (destroy)
514 prof_ctx_destroy(ctx);
515}
516
Jason Evansfb1775e2014-01-14 17:04:34 -0800517static bool
518prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
519 prof_ctx_t **p_ctx, bool *p_new_ctx)
520{
521 union {
522 prof_ctx_t *p;
523 void *v;
524 } ctx;
525 union {
526 prof_bt_t *p;
527 void *v;
528 } btkey;
529 bool new_ctx;
530
531 prof_enter(prof_tdata);
532 if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
533 /* bt has never been seen before. Insert it. */
534 ctx.v = imalloc(sizeof(prof_ctx_t));
535 if (ctx.v == NULL) {
536 prof_leave(prof_tdata);
537 return (true);
538 }
539 btkey.p = bt_dup(bt);
540 if (btkey.v == NULL) {
541 prof_leave(prof_tdata);
542 idalloc(ctx.v);
543 return (true);
544 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800545 prof_ctx_init(ctx.p, btkey.p);
Jason Evansfb1775e2014-01-14 17:04:34 -0800546 if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
547 /* OOM. */
548 prof_leave(prof_tdata);
549 idalloc(btkey.v);
550 idalloc(ctx.v);
551 return (true);
552 }
553 new_ctx = true;
554 } else {
555 /*
556 * Increment nlimbo, in order to avoid a race condition with
557 * prof_ctx_merge()/prof_ctx_destroy().
558 */
559 malloc_mutex_lock(ctx.p->lock);
560 ctx.p->nlimbo++;
561 malloc_mutex_unlock(ctx.p->lock);
562 new_ctx = false;
563 }
564 prof_leave(prof_tdata);
565
566 *p_btkey = btkey.v;
567 *p_ctx = ctx.p;
568 *p_new_ctx = new_ctx;
569 return (false);
570}
571
Jason Evans4d6a1342010-10-20 19:05:59 -0700572prof_thr_cnt_t *
Jason Evans6109fe02010-02-10 10:37:56 -0800573prof_lookup(prof_bt_t *bt)
574{
Jason Evans075e77c2010-09-20 19:53:25 -0700575 union {
576 prof_thr_cnt_t *p;
577 void *v;
578 } ret;
Jason Evans4d6a1342010-10-20 19:05:59 -0700579 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -0800580
Jason Evans7372b152012-02-10 20:22:09 -0800581 cassert(config_prof);
582
Jason Evansbbe29d32013-01-30 15:03:11 -0800583 prof_tdata = prof_tdata_get(false);
Jason Evansf2789942012-04-28 23:27:13 -0700584 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
Jason Evans52386b22012-04-22 16:00:11 -0700585 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800586
Jason Evans4d6a1342010-10-20 19:05:59 -0700587 if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800588 void *btkey;
589 prof_ctx_t *ctx;
Jason Evans10e45232011-01-14 17:27:44 -0800590 bool new_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800591
592 /*
593 * This thread's cache lacks bt. Look for it in the global
594 * cache.
595 */
Jason Evansfb1775e2014-01-14 17:04:34 -0800596 if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
597 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800598
599 /* Link a prof_thd_cnt_t into ctx for this thread. */
Jason Evans0b526ff2012-02-13 18:04:26 -0800600 if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
Jason Evans4d6a1342010-10-20 19:05:59 -0700601 assert(ckh_count(&prof_tdata->bt2cnt) > 0);
Jason Evansa881cd22010-10-02 15:18:50 -0700602 /*
Jason Evanse4f78462010-10-22 10:45:59 -0700603 * Flush the least recently used cnt in order to keep
604 * bt2cnt from becoming too large.
Jason Evansa881cd22010-10-02 15:18:50 -0700605 */
Jason Evans4d6a1342010-10-20 19:05:59 -0700606 ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
Jason Evansa881cd22010-10-02 15:18:50 -0700607 assert(ret.v != NULL);
Jason Evansa9076c92011-08-30 23:40:11 -0700608 if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
609 NULL, NULL))
Jason Evans6556e282013-10-21 14:56:27 -0700610 not_reached();
Jason Evans4d6a1342010-10-20 19:05:59 -0700611 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evansa881cd22010-10-02 15:18:50 -0700612 prof_ctx_merge(ret.p->ctx, ret.p);
613 /* ret can now be re-used. */
614 } else {
Jason Evans0b526ff2012-02-13 18:04:26 -0800615 assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
Jason Evansa881cd22010-10-02 15:18:50 -0700616 /* Allocate and partially initialize a new cnt. */
617 ret.v = imalloc(sizeof(prof_thr_cnt_t));
Jason Evansb04a9402010-10-27 19:47:40 -0700618 if (ret.p == NULL) {
Jason Evans0cdd42e2011-08-09 19:06:06 -0700619 if (new_ctx)
Jason Evansfb1775e2014-01-14 17:04:34 -0800620 prof_ctx_destroy(ctx);
Jason Evansa881cd22010-10-02 15:18:50 -0700621 return (NULL);
Jason Evansb04a9402010-10-27 19:47:40 -0700622 }
Jason Evansa881cd22010-10-02 15:18:50 -0700623 ql_elm_new(ret.p, cnts_link);
624 ql_elm_new(ret.p, lru_link);
625 }
626 /* Finish initializing ret. */
Jason Evansfb1775e2014-01-14 17:04:34 -0800627 ret.p->ctx = ctx;
Jason Evans075e77c2010-09-20 19:53:25 -0700628 ret.p->epoch = 0;
629 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evansfb1775e2014-01-14 17:04:34 -0800630 if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
Jason Evans0cdd42e2011-08-09 19:06:06 -0700631 if (new_ctx)
Jason Evansfb1775e2014-01-14 17:04:34 -0800632 prof_ctx_destroy(ctx);
Jason Evans075e77c2010-09-20 19:53:25 -0700633 idalloc(ret.v);
Jason Evans6109fe02010-02-10 10:37:56 -0800634 return (NULL);
635 }
Jason Evans4d6a1342010-10-20 19:05:59 -0700636 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evansfb1775e2014-01-14 17:04:34 -0800637 malloc_mutex_lock(ctx->lock);
638 ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
639 ctx->nlimbo--;
640 malloc_mutex_unlock(ctx->lock);
Jason Evansa881cd22010-10-02 15:18:50 -0700641 } else {
642 /* Move ret to the front of the LRU. */
Jason Evans4d6a1342010-10-20 19:05:59 -0700643 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
644 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
Jason Evans6109fe02010-02-10 10:37:56 -0800645 }
646
Jason Evans075e77c2010-09-20 19:53:25 -0700647 return (ret.p);
Jason Evans6109fe02010-02-10 10:37:56 -0800648}
649
Jason Evans22ca8552010-03-02 11:57:30 -0800650static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800651prof_dump_open(bool propagate_err, const char *filename)
652{
653
654 prof_dump_fd = creat(filename, 0644);
655 if (prof_dump_fd == -1) {
656 if (propagate_err == false) {
657 malloc_printf(
658 "<jemalloc>: creat(\"%s\"), 0644) failed\n",
659 filename);
660 if (opt_abort)
661 abort();
662 }
663 return (true);
664 }
665
666 return (false);
667}
668
669static bool
670prof_dump_flush(bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800671{
Jason Evans22ca8552010-03-02 11:57:30 -0800672 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800673 ssize_t err;
674
Jason Evans7372b152012-02-10 20:22:09 -0800675 cassert(config_prof);
676
Jason Evans6109fe02010-02-10 10:37:56 -0800677 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
678 if (err == -1) {
Jason Evans22ca8552010-03-02 11:57:30 -0800679 if (propagate_err == false) {
Jason Evans698805c2010-03-03 17:45:38 -0800680 malloc_write("<jemalloc>: write() failed during heap "
681 "profile flush\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800682 if (opt_abort)
683 abort();
684 }
685 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800686 }
687 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800688
689 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800690}
691
Jason Evans22ca8552010-03-02 11:57:30 -0800692static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800693prof_dump_close(bool propagate_err)
694{
695 bool ret;
696
697 assert(prof_dump_fd != -1);
698 ret = prof_dump_flush(propagate_err);
699 close(prof_dump_fd);
700 prof_dump_fd = -1;
701
702 return (ret);
703}
704
705static bool
706prof_dump_write(bool propagate_err, const char *s)
Jason Evans6109fe02010-02-10 10:37:56 -0800707{
708 unsigned i, slen, n;
709
Jason Evans7372b152012-02-10 20:22:09 -0800710 cassert(config_prof);
711
Jason Evans6109fe02010-02-10 10:37:56 -0800712 i = 0;
713 slen = strlen(s);
714 while (i < slen) {
715 /* Flush the buffer if it is full. */
Jason Evanscd9a1342012-03-21 18:33:03 -0700716 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
Jason Evans4f37ef62014-01-16 13:23:56 -0800717 if (prof_dump_flush(propagate_err) && propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -0800718 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -0800719
Jason Evanscd9a1342012-03-21 18:33:03 -0700720 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -0800721 /* Finish writing. */
722 n = slen - i;
723 } else {
724 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -0700725 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800726 }
727 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
728 prof_dump_buf_end += n;
729 i += n;
730 }
Jason Evans22ca8552010-03-02 11:57:30 -0800731
732 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -0800733}
734
Jason Evansd81e4bd2012-03-06 14:57:45 -0800735JEMALLOC_ATTR(format(printf, 2, 3))
736static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800737prof_dump_printf(bool propagate_err, const char *format, ...)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800738{
739 bool ret;
740 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -0700741 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -0800742
743 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -0700744 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800745 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -0800746 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800747
748 return (ret);
749}
750
Jason Evans6109fe02010-02-10 10:37:56 -0800751static void
Jason Evans4f37ef62014-01-16 13:23:56 -0800752prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
753 prof_ctx_list_t *ctx_ql)
Jason Evans6109fe02010-02-10 10:37:56 -0800754{
755 prof_thr_cnt_t *thr_cnt;
756 prof_cnt_t tcnt;
757
Jason Evans7372b152012-02-10 20:22:09 -0800758 cassert(config_prof);
759
Jason Evans6da54182012-03-23 18:05:51 -0700760 malloc_mutex_lock(ctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800761
Jason Evans4f37ef62014-01-16 13:23:56 -0800762 /*
763 * Increment nlimbo so that ctx won't go away before dump.
764 * Additionally, link ctx into the dump list so that it is included in
765 * prof_dump()'s second pass.
766 */
767 ctx->nlimbo++;
768 ql_tail_insert(ctx_ql, ctx, dump_link);
769
Jason Evansa881cd22010-10-02 15:18:50 -0700770 memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
771 ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
Jason Evans6109fe02010-02-10 10:37:56 -0800772 volatile unsigned *epoch = &thr_cnt->epoch;
773
774 while (true) {
775 unsigned epoch0 = *epoch;
776
777 /* Make sure epoch is even. */
778 if (epoch0 & 1U)
779 continue;
780
781 memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
782
783 /* Terminate if epoch didn't change while reading. */
784 if (*epoch == epoch0)
785 break;
786 }
787
Jason Evansa881cd22010-10-02 15:18:50 -0700788 ctx->cnt_summed.curobjs += tcnt.curobjs;
789 ctx->cnt_summed.curbytes += tcnt.curbytes;
790 if (opt_prof_accum) {
791 ctx->cnt_summed.accumobjs += tcnt.accumobjs;
792 ctx->cnt_summed.accumbytes += tcnt.accumbytes;
793 }
Jason Evans6109fe02010-02-10 10:37:56 -0800794 }
795
Jason Evans9ce3bfd2010-10-02 22:39:59 -0700796 if (ctx->cnt_summed.curobjs != 0)
797 (*leak_nctx)++;
798
Jason Evansa881cd22010-10-02 15:18:50 -0700799 /* Add to cnt_all. */
800 cnt_all->curobjs += ctx->cnt_summed.curobjs;
801 cnt_all->curbytes += ctx->cnt_summed.curbytes;
802 if (opt_prof_accum) {
803 cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
804 cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
805 }
Jason Evans6109fe02010-02-10 10:37:56 -0800806
Jason Evans6da54182012-03-23 18:05:51 -0700807 malloc_mutex_unlock(ctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800808}
809
Jason Evans4f37ef62014-01-16 13:23:56 -0800810static bool
811prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
Jason Evansa881cd22010-10-02 15:18:50 -0700812{
813
Jason Evans4f37ef62014-01-16 13:23:56 -0800814 if (opt_lg_prof_sample == 0) {
815 if (prof_dump_printf(propagate_err,
816 "heap profile: %"PRId64": %"PRId64
817 " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
818 cnt_all->curobjs, cnt_all->curbytes,
819 cnt_all->accumobjs, cnt_all->accumbytes))
820 return (true);
Jason Evansa881cd22010-10-02 15:18:50 -0700821 } else {
Jason Evans4f37ef62014-01-16 13:23:56 -0800822 if (prof_dump_printf(propagate_err,
823 "heap profile: %"PRId64": %"PRId64
824 " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
825 cnt_all->curobjs, cnt_all->curbytes,
826 cnt_all->accumobjs, cnt_all->accumbytes,
827 ((uint64_t)1U << opt_lg_prof_sample)))
828 return (true);
Jason Evansa881cd22010-10-02 15:18:50 -0700829 }
Jason Evans4f37ef62014-01-16 13:23:56 -0800830
831 return (false);
Jason Evansa881cd22010-10-02 15:18:50 -0700832}
833
834static void
Jason Evans4f37ef62014-01-16 13:23:56 -0800835prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
Jason Evansa881cd22010-10-02 15:18:50 -0700836{
Jason Evansa881cd22010-10-02 15:18:50 -0700837
Jason Evans4f37ef62014-01-16 13:23:56 -0800838 ctx->nlimbo--;
839 ql_remove(ctx_ql, ctx, dump_link);
840}
Jason Evans7372b152012-02-10 20:22:09 -0800841
Jason Evans4f37ef62014-01-16 13:23:56 -0800842static void
843prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
844{
845
Jason Evans6da54182012-03-23 18:05:51 -0700846 malloc_mutex_lock(ctx->lock);
Jason Evans4f37ef62014-01-16 13:23:56 -0800847 prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
Jason Evans6da54182012-03-23 18:05:51 -0700848 malloc_mutex_unlock(ctx->lock);
Jason Evansa881cd22010-10-02 15:18:50 -0700849}
850
Jason Evans22ca8552010-03-02 11:57:30 -0800851static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800852prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
853 prof_ctx_list_t *ctx_ql)
Jason Evans6109fe02010-02-10 10:37:56 -0800854{
Jason Evans4f37ef62014-01-16 13:23:56 -0800855 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800856 unsigned i;
857
Jason Evans7372b152012-02-10 20:22:09 -0800858 cassert(config_prof);
859
Jason Evans52386b22012-04-22 16:00:11 -0700860 /*
861 * Current statistics can sum to 0 as a result of unmerged per thread
862 * statistics. Additionally, interval- and growth-triggered dumps can
863 * occur between the time a ctx is created and when its statistics are
864 * filled in. Avoid dumping any ctx that is an artifact of either
865 * implementation detail.
866 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800867 malloc_mutex_lock(ctx->lock);
Jason Evans52386b22012-04-22 16:00:11 -0700868 if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
869 (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
870 assert(ctx->cnt_summed.curobjs == 0);
Jason Evansa881cd22010-10-02 15:18:50 -0700871 assert(ctx->cnt_summed.curbytes == 0);
872 assert(ctx->cnt_summed.accumobjs == 0);
873 assert(ctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -0800874 ret = false;
875 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -0700876 }
877
Jason Evans4f37ef62014-01-16 13:23:56 -0800878 if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
Jason Evansd81e4bd2012-03-06 14:57:45 -0800879 " [%"PRIu64": %"PRIu64"] @",
880 ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
Jason Evans4f37ef62014-01-16 13:23:56 -0800881 ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
882 ret = true;
883 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -0800884 }
885
Jason Evans4f37ef62014-01-16 13:23:56 -0800886 for (i = 0; i < bt->len; i++) {
887 if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
888 (uintptr_t)bt->vec[i])) {
889 ret = true;
890 goto label_return;
891 }
892 }
Jason Evans22ca8552010-03-02 11:57:30 -0800893
Jason Evans4f37ef62014-01-16 13:23:56 -0800894 if (prof_dump_write(propagate_err, "\n")) {
895 ret = true;
896 goto label_return;
897 }
898
899label_return:
900 prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
901 malloc_mutex_unlock(ctx->lock);
902 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800903}
904
Jason Evans22ca8552010-03-02 11:57:30 -0800905static bool
906prof_dump_maps(bool propagate_err)
Jason Evansc7177182010-02-11 09:25:56 -0800907{
Jason Evans93f39f82013-10-21 15:07:40 -0700908 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -0800909 int mfd;
Jason Evanscd9a1342012-03-21 18:33:03 -0700910 char filename[PATH_MAX + 1];
Jason Evansc7177182010-02-11 09:25:56 -0800911
Jason Evans7372b152012-02-10 20:22:09 -0800912 cassert(config_prof);
913
Jason Evanscd9a1342012-03-21 18:33:03 -0700914 malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
915 (int)getpid());
916 mfd = open(filename, O_RDONLY);
Jason Evansc7177182010-02-11 09:25:56 -0800917 if (mfd != -1) {
918 ssize_t nread;
919
Jason Evans4f37ef62014-01-16 13:23:56 -0800920 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -0700921 propagate_err) {
922 ret = true;
923 goto label_return;
924 }
Jason Evansc7177182010-02-11 09:25:56 -0800925 nread = 0;
926 do {
927 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -0700928 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -0800929 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -0800930 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -0700931 propagate_err) {
932 ret = true;
933 goto label_return;
934 }
Jason Evansc7177182010-02-11 09:25:56 -0800935 }
936 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -0700937 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -0800938 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -0700939 } else {
940 ret = true;
941 goto label_return;
942 }
Jason Evans22ca8552010-03-02 11:57:30 -0800943
Jason Evans93f39f82013-10-21 15:07:40 -0700944 ret = false;
945label_return:
946 if (mfd != -1)
947 close(mfd);
948 return (ret);
Jason Evansc7177182010-02-11 09:25:56 -0800949}
950
Jason Evans4f37ef62014-01-16 13:23:56 -0800951static void
952prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
953 const char *filename)
954{
955
956 if (cnt_all->curbytes != 0) {
957 malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
958 PRId64" object%s, %zu context%s\n",
959 cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
960 cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
961 leak_nctx, (leak_nctx != 1) ? "s" : "");
962 malloc_printf(
963 "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
964 filename);
965 }
966}
967
Jason Evans22ca8552010-03-02 11:57:30 -0800968static bool
Jason Evansd81e4bd2012-03-06 14:57:45 -0800969prof_dump(bool propagate_err, const char *filename, bool leakcheck)
Jason Evans6109fe02010-02-10 10:37:56 -0800970{
Jason Evans52386b22012-04-22 16:00:11 -0700971 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -0800972 prof_cnt_t cnt_all;
973 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -0700974 union {
Jason Evans075e77c2010-09-20 19:53:25 -0700975 prof_ctx_t *p;
976 void *v;
977 } ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800978 size_t leak_nctx;
Jason Evans4f37ef62014-01-16 13:23:56 -0800979 prof_ctx_list_t ctx_ql;
Jason Evans6109fe02010-02-10 10:37:56 -0800980
Jason Evans7372b152012-02-10 20:22:09 -0800981 cassert(config_prof);
982
Jason Evansbbe29d32013-01-30 15:03:11 -0800983 prof_tdata = prof_tdata_get(false);
Jason Evansf2789942012-04-28 23:27:13 -0700984 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
Jason Evans52386b22012-04-22 16:00:11 -0700985 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800986
987 malloc_mutex_lock(&prof_dump_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800988
989 /* Merge per thread profile stats, and sum them in cnt_all. */
990 memset(&cnt_all, 0, sizeof(prof_cnt_t));
991 leak_nctx = 0;
Jason Evans4f37ef62014-01-16 13:23:56 -0800992 ql_new(&ctx_ql);
993 prof_enter(prof_tdata);
Jason Evans588a32c2010-10-02 22:38:14 -0700994 for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
Jason Evans4f37ef62014-01-16 13:23:56 -0800995 prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
996 prof_leave(prof_tdata);
997
998 /* Create dump file. */
999 if (prof_dump_open(propagate_err, filename))
1000 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001001
1002 /* Dump profile header. */
Jason Evans4f37ef62014-01-16 13:23:56 -08001003 if (prof_dump_header(propagate_err, &cnt_all))
1004 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001005
Jason Evans239692b2013-10-28 12:41:37 -07001006 /* Dump per ctx profile stats. */
Jason Evans4f37ef62014-01-16 13:23:56 -08001007 while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
1008 if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
1009 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001010 }
1011
Jason Evansc7177182010-02-11 09:25:56 -08001012 /* Dump /proc/<pid>/maps if possible. */
Jason Evans22ca8552010-03-02 11:57:30 -08001013 if (prof_dump_maps(propagate_err))
Jason Evans4f37ef62014-01-16 13:23:56 -08001014 goto label_write_error;
Jason Evansc7177182010-02-11 09:25:56 -08001015
Jason Evans4f37ef62014-01-16 13:23:56 -08001016 if (prof_dump_close(propagate_err))
1017 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001018
Jason Evans4f37ef62014-01-16 13:23:56 -08001019 malloc_mutex_unlock(&prof_dump_mtx);
1020
1021 if (leakcheck)
1022 prof_leakcheck(&cnt_all, leak_nctx, filename);
Jason Evans22ca8552010-03-02 11:57:30 -08001023
1024 return (false);
Jason Evans4f37ef62014-01-16 13:23:56 -08001025label_write_error:
1026 prof_dump_close(propagate_err);
1027label_open_close_error:
1028 while ((ctx.p = ql_first(&ctx_ql)) != NULL)
1029 prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
1030 malloc_mutex_unlock(&prof_dump_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001031 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001032}
1033
Jason Evansd81e4bd2012-03-06 14:57:45 -08001034#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001035#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001036static void
1037prof_dump_filename(char *filename, char v, int64_t vseq)
1038{
Jason Evans6109fe02010-02-10 10:37:56 -08001039
Jason Evans7372b152012-02-10 20:22:09 -08001040 cassert(config_prof);
1041
Jason Evans4f37ef62014-01-16 13:23:56 -08001042 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001043 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1044 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1045 "%s.%d.%"PRIu64".%c%"PRId64".heap",
1046 opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
1047 } else {
1048 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1049 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1050 "%s.%d.%"PRIu64".%c.heap",
1051 opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001052 }
Jason Evans52386b22012-04-22 16:00:11 -07001053 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001054}
1055
1056static void
1057prof_fdump(void)
1058{
1059 char filename[DUMP_FILENAME_BUFSIZE];
1060
Jason Evans7372b152012-02-10 20:22:09 -08001061 cassert(config_prof);
1062
Jason Evans6109fe02010-02-10 10:37:56 -08001063 if (prof_booted == false)
1064 return;
1065
Jason Evans0b25fe72012-04-17 16:39:33 -07001066 if (opt_prof_final && opt_prof_prefix[0] != '\0') {
Jason Evanse7339702010-10-23 18:37:06 -07001067 malloc_mutex_lock(&prof_dump_seq_mtx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001068 prof_dump_filename(filename, 'f', VSEQ_INVALID);
Jason Evanse7339702010-10-23 18:37:06 -07001069 malloc_mutex_unlock(&prof_dump_seq_mtx);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001070 prof_dump(false, filename, opt_prof_leak);
Jason Evanse7339702010-10-23 18:37:06 -07001071 }
Jason Evans6109fe02010-02-10 10:37:56 -08001072}
1073
1074void
1075prof_idump(void)
1076{
Jason Evans52386b22012-04-22 16:00:11 -07001077 prof_tdata_t *prof_tdata;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001078 char filename[PATH_MAX + 1];
Jason Evans6109fe02010-02-10 10:37:56 -08001079
Jason Evans7372b152012-02-10 20:22:09 -08001080 cassert(config_prof);
1081
Jason Evans6109fe02010-02-10 10:37:56 -08001082 if (prof_booted == false)
1083 return;
Jason Evansbbe29d32013-01-30 15:03:11 -08001084 prof_tdata = prof_tdata_get(false);
Jason Evans0050a0f2012-04-28 18:14:24 -07001085 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
Jason Evans52386b22012-04-22 16:00:11 -07001086 return;
1087 if (prof_tdata->enq) {
1088 prof_tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001089 return;
1090 }
Jason Evans6109fe02010-02-10 10:37:56 -08001091
Jason Evanse7339702010-10-23 18:37:06 -07001092 if (opt_prof_prefix[0] != '\0') {
1093 malloc_mutex_lock(&prof_dump_seq_mtx);
1094 prof_dump_filename(filename, 'i', prof_dump_iseq);
1095 prof_dump_iseq++;
1096 malloc_mutex_unlock(&prof_dump_seq_mtx);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001097 prof_dump(false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001098 }
Jason Evans6109fe02010-02-10 10:37:56 -08001099}
1100
Jason Evans22ca8552010-03-02 11:57:30 -08001101bool
1102prof_mdump(const char *filename)
Jason Evans6109fe02010-02-10 10:37:56 -08001103{
Jason Evans22ca8552010-03-02 11:57:30 -08001104 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001105
Jason Evans7372b152012-02-10 20:22:09 -08001106 cassert(config_prof);
1107
Jason Evans22ca8552010-03-02 11:57:30 -08001108 if (opt_prof == false || prof_booted == false)
1109 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001110
Jason Evans22ca8552010-03-02 11:57:30 -08001111 if (filename == NULL) {
1112 /* No filename specified, so automatically generate one. */
Jason Evanse7339702010-10-23 18:37:06 -07001113 if (opt_prof_prefix[0] == '\0')
1114 return (true);
Jason Evans22ca8552010-03-02 11:57:30 -08001115 malloc_mutex_lock(&prof_dump_seq_mtx);
1116 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1117 prof_dump_mseq++;
1118 malloc_mutex_unlock(&prof_dump_seq_mtx);
1119 filename = filename_buf;
1120 }
Jason Evansd81e4bd2012-03-06 14:57:45 -08001121 return (prof_dump(true, filename, false));
Jason Evans6109fe02010-02-10 10:37:56 -08001122}
1123
1124void
Jason Evanse7339702010-10-23 18:37:06 -07001125prof_gdump(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001126{
Jason Evans52386b22012-04-22 16:00:11 -07001127 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001128 char filename[DUMP_FILENAME_BUFSIZE];
1129
Jason Evans7372b152012-02-10 20:22:09 -08001130 cassert(config_prof);
1131
Jason Evans6109fe02010-02-10 10:37:56 -08001132 if (prof_booted == false)
1133 return;
Jason Evansbbe29d32013-01-30 15:03:11 -08001134 prof_tdata = prof_tdata_get(false);
Jason Evans0050a0f2012-04-28 18:14:24 -07001135 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
Jason Evans52386b22012-04-22 16:00:11 -07001136 return;
1137 if (prof_tdata->enq) {
1138 prof_tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001139 return;
1140 }
Jason Evans6109fe02010-02-10 10:37:56 -08001141
Jason Evanse7339702010-10-23 18:37:06 -07001142 if (opt_prof_prefix[0] != '\0') {
1143 malloc_mutex_lock(&prof_dump_seq_mtx);
1144 prof_dump_filename(filename, 'u', prof_dump_useq);
1145 prof_dump_useq++;
1146 malloc_mutex_unlock(&prof_dump_seq_mtx);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001147 prof_dump(false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001148 }
Jason Evans6109fe02010-02-10 10:37:56 -08001149}
1150
1151static void
Jason Evansae03bf62013-01-22 12:02:08 -08001152prof_bt_hash(const void *key, size_t r_hash[2])
Jason Evans6109fe02010-02-10 10:37:56 -08001153{
Jason Evans6109fe02010-02-10 10:37:56 -08001154 prof_bt_t *bt = (prof_bt_t *)key;
1155
Jason Evans7372b152012-02-10 20:22:09 -08001156 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001157
Jason Evansae03bf62013-01-22 12:02:08 -08001158 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001159}
1160
1161static bool
1162prof_bt_keycomp(const void *k1, const void *k2)
1163{
1164 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1165 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1166
Jason Evans7372b152012-02-10 20:22:09 -08001167 cassert(config_prof);
1168
Jason Evans6109fe02010-02-10 10:37:56 -08001169 if (bt1->len != bt2->len)
1170 return (false);
1171 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1172}
1173
Jason Evans4d6a1342010-10-20 19:05:59 -07001174prof_tdata_t *
1175prof_tdata_init(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001176{
Jason Evans4d6a1342010-10-20 19:05:59 -07001177 prof_tdata_t *prof_tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001178
Jason Evans7372b152012-02-10 20:22:09 -08001179 cassert(config_prof);
1180
Jason Evans4d6a1342010-10-20 19:05:59 -07001181 /* Initialize an empty cache for this thread. */
1182 prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
1183 if (prof_tdata == NULL)
1184 return (NULL);
1185
1186 if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
1187 prof_bt_hash, prof_bt_keycomp)) {
1188 idalloc(prof_tdata);
1189 return (NULL);
1190 }
1191 ql_new(&prof_tdata->lru_ql);
1192
Jason Evans53891462012-02-13 18:23:41 -08001193 prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
Jason Evans4d6a1342010-10-20 19:05:59 -07001194 if (prof_tdata->vec == NULL) {
Jason Evans4d6a1342010-10-20 19:05:59 -07001195 ckh_delete(&prof_tdata->bt2cnt);
1196 idalloc(prof_tdata);
1197 return (NULL);
1198 }
1199
Jason Evans84f7cdb2012-03-02 15:59:45 -08001200 prof_tdata->prng_state = 0;
Jason Evans4d6a1342010-10-20 19:05:59 -07001201 prof_tdata->threshold = 0;
1202 prof_tdata->accum = 0;
1203
Jason Evans52386b22012-04-22 16:00:11 -07001204 prof_tdata->enq = false;
1205 prof_tdata->enq_idump = false;
1206 prof_tdata->enq_gdump = false;
1207
Jason Evanscd9a1342012-03-21 18:33:03 -07001208 prof_tdata_tsd_set(&prof_tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001209
1210 return (prof_tdata);
1211}
1212
Jason Evanscd9a1342012-03-21 18:33:03 -07001213void
Jason Evans4d6a1342010-10-20 19:05:59 -07001214prof_tdata_cleanup(void *arg)
1215{
Jason Evans41b954e2011-08-08 17:10:07 -07001216 prof_thr_cnt_t *cnt;
Jason Evanscd9a1342012-03-21 18:33:03 -07001217 prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
Jason Evans4d6a1342010-10-20 19:05:59 -07001218
Jason Evans7372b152012-02-10 20:22:09 -08001219 cassert(config_prof);
1220
Jason Evans0050a0f2012-04-28 18:14:24 -07001221 if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
1222 /*
1223 * Another destructor deallocated memory after this destructor
1224 * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
1225 * in order to receive another callback.
1226 */
1227 prof_tdata = PROF_TDATA_STATE_PURGATORY;
1228 prof_tdata_tsd_set(&prof_tdata);
1229 } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
1230 /*
1231 * The previous time this destructor was called, we set the key
1232 * to PROF_TDATA_STATE_PURGATORY so that other destructors
1233 * wouldn't cause re-creation of the prof_tdata. This time, do
1234 * nothing, so that the destructor will not be called again.
1235 */
1236 } else if (prof_tdata != NULL) {
1237 /*
1238 * Delete the hash table. All of its contents can still be
1239 * iterated over via the LRU.
1240 */
1241 ckh_delete(&prof_tdata->bt2cnt);
1242 /*
1243 * Iteratively merge cnt's into the global stats and delete
1244 * them.
1245 */
1246 while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
1247 ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
1248 prof_ctx_merge(cnt->ctx, cnt);
1249 idalloc(cnt);
1250 }
1251 idalloc(prof_tdata->vec);
1252 idalloc(prof_tdata);
1253 prof_tdata = PROF_TDATA_STATE_PURGATORY;
1254 prof_tdata_tsd_set(&prof_tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001255 }
1256}
1257
1258void
1259prof_boot0(void)
1260{
1261
Jason Evans7372b152012-02-10 20:22:09 -08001262 cassert(config_prof);
1263
Jason Evanse7339702010-10-23 18:37:06 -07001264 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
1265 sizeof(PROF_PREFIX_DEFAULT));
1266}
1267
1268void
1269prof_boot1(void)
1270{
1271
Jason Evans7372b152012-02-10 20:22:09 -08001272 cassert(config_prof);
1273
Jason Evans6109fe02010-02-10 10:37:56 -08001274 /*
Jason Evans0b270a92010-03-31 16:45:04 -07001275 * opt_prof and prof_promote must be in their final state before any
1276 * arenas are initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08001277 */
1278
1279 if (opt_prof_leak && opt_prof == false) {
1280 /*
1281 * Enable opt_prof, but in such a way that profiles are never
1282 * automatically dumped.
1283 */
1284 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07001285 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07001286 } else if (opt_prof) {
1287 if (opt_lg_prof_interval >= 0) {
1288 prof_interval = (((uint64_t)1U) <<
1289 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08001290 }
Jason Evansa02fc082010-03-31 17:35:51 -07001291 }
Jason Evans0b270a92010-03-31 16:45:04 -07001292
Jason Evansae4c7b42012-04-02 07:04:34 -07001293 prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
Jason Evans6109fe02010-02-10 10:37:56 -08001294}
1295
1296bool
Jason Evanse7339702010-10-23 18:37:06 -07001297prof_boot2(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001298{
1299
Jason Evans7372b152012-02-10 20:22:09 -08001300 cassert(config_prof);
1301
Jason Evans6109fe02010-02-10 10:37:56 -08001302 if (opt_prof) {
Jason Evans6da54182012-03-23 18:05:51 -07001303 unsigned i;
1304
Jason Evans6109fe02010-02-10 10:37:56 -08001305 if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
1306 prof_bt_keycomp))
1307 return (true);
1308 if (malloc_mutex_init(&bt2ctx_mtx))
1309 return (true);
Jason Evanscd9a1342012-03-21 18:33:03 -07001310 if (prof_tdata_tsd_boot()) {
Jason Evans698805c2010-03-03 17:45:38 -08001311 malloc_write(
1312 "<jemalloc>: Error in pthread_key_create()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08001313 abort();
1314 }
1315
Jason Evans6109fe02010-02-10 10:37:56 -08001316 if (malloc_mutex_init(&prof_dump_seq_mtx))
1317 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001318 if (malloc_mutex_init(&prof_dump_mtx))
1319 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001320
Jason Evans6109fe02010-02-10 10:37:56 -08001321 if (atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001322 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08001323 if (opt_abort)
1324 abort();
1325 }
Jason Evans6da54182012-03-23 18:05:51 -07001326
1327 ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
1328 sizeof(malloc_mutex_t));
1329 if (ctx_locks == NULL)
1330 return (true);
1331 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
1332 if (malloc_mutex_init(&ctx_locks[i]))
1333 return (true);
1334 }
Jason Evans6109fe02010-02-10 10:37:56 -08001335 }
1336
Jason Evansb27805b2010-02-10 18:15:53 -08001337#ifdef JEMALLOC_PROF_LIBGCC
1338 /*
1339 * Cause the backtracing machinery to allocate its internal state
1340 * before enabling profiling.
1341 */
1342 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
1343#endif
1344
Jason Evans6109fe02010-02-10 10:37:56 -08001345 prof_booted = true;
1346
1347 return (false);
1348}
1349
Jason Evans20f1fc92012-10-09 14:46:22 -07001350void
1351prof_prefork(void)
1352{
1353
1354 if (opt_prof) {
1355 unsigned i;
1356
Jason Evansf1c3da82013-10-21 14:59:10 -07001357 malloc_mutex_prefork(&bt2ctx_mtx);
1358 malloc_mutex_prefork(&prof_dump_seq_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07001359 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evansf1c3da82013-10-21 14:59:10 -07001360 malloc_mutex_prefork(&ctx_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07001361 }
1362}
1363
1364void
1365prof_postfork_parent(void)
1366{
1367
1368 if (opt_prof) {
1369 unsigned i;
1370
1371 for (i = 0; i < PROF_NCTX_LOCKS; i++)
1372 malloc_mutex_postfork_parent(&ctx_locks[i]);
1373 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
1374 malloc_mutex_postfork_parent(&bt2ctx_mtx);
1375 }
1376}
1377
1378void
1379prof_postfork_child(void)
1380{
1381
1382 if (opt_prof) {
1383 unsigned i;
1384
1385 for (i = 0; i < PROF_NCTX_LOCKS; i++)
1386 malloc_mutex_postfork_child(&ctx_locks[i]);
1387 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
1388 malloc_mutex_postfork_child(&bt2ctx_mtx);
1389 }
1390}
1391
Jason Evans6109fe02010-02-10 10:37:56 -08001392/******************************************************************************/