blob: 44b865a5de09263dce4429d69b25febd036aeb53 [file] [log] [blame]
nethercotec9f36922004-02-14 16:40:02 +00001
2/*--------------------------------------------------------------------*/
3/*--- Massif: a heap profiling skin. ms_main.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Massif, a Valgrind skin for profiling memory
8 usage of programs.
9
nethercote2da914c2004-05-11 09:17:49 +000010 Copyright (C) 2003-2004 Nicholas Nethercote
nethercotec9f36922004-02-14 16:40:02 +000011 njn25@cam.ac.uk
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
31// Memory profiler. Produces a graph, gives lots of information about
32// allocation contexts, in terms of space.time values (ie. area under the
33// graph). Allocation context information is hierarchical, and can thus
34// be inspected step-wise to an appropriate depth. See comments on data
35// structures below for more info on how things work.
36
37#include "vg_skin.h"
38//#include "vg_profile.c"
39
40#include "valgrind.h" // For {MALLOC,FREE}LIKE_BLOCK
41
42/*------------------------------------------------------------*/
43/*--- Overview of operation ---*/
44/*------------------------------------------------------------*/
45
46// Heap blocks are tracked, and the amount of space allocated by various
47// contexts (ie. lines of code, more or less) is also tracked.
48// Periodically, a census is taken, and the amount of space used, at that
49// point, by the most significant (highly allocating) contexts is recorded.
50// Census start off frequently, but are scaled back as the program goes on,
51// so that there are always a good number of them. At the end, overall
52// spacetimes for different contexts (of differing levels of precision) is
53// calculated, the graph is printed, and the text giving spacetimes for the
54// increasingly precise contexts is given.
55//
56// Measures the following:
57// - heap blocks
58// - heap admin bytes
59// - stack(s)
60// - code (code segments loaded at startup, and loaded with mmap)
61// - data (data segments loaded at startup, and loaded/created with mmap,
62// and brk()d segments)
63
64/*------------------------------------------------------------*/
65/*--- Main types ---*/
66/*------------------------------------------------------------*/
67
68// An XPt represents an "execution point", ie. a code address. Each XPt is
69// part of a tree of XPts (an "execution tree", or "XTree"). Each
70// top-to-bottom path through an XTree gives an execution context ("XCon"),
71// and is equivalent to a traditional Valgrind ExeContext.
72//
73// The XPt at the top of an XTree (but below "alloc_xpt") is called a
74// "top-XPt". The XPts are the bottom of an XTree (leaf nodes) are
75// "bottom-XPTs". The number of XCons in an XTree is equal to the number of
76// bottom-XPTs in that XTree.
77//
78// All XCons have the same top-XPt, "alloc_xpt", which represents all
79// allocation functions like malloc(). It's a bit of a fake XPt, though,
80// and is only used because it makes some of the code simpler.
81//
82// XTrees are bi-directional.
83//
84// > parent < Example: if child1() calls parent() and child2()
85// / | \ also calls parent(), and parent() calls malloc(),
86// | / \ | the XTree will look like this.
87// | v v |
88// child1 child2
89
90typedef struct _XPt XPt;
91
92struct _XPt {
93 Addr eip; // code address
94
95 // Bottom-XPts: space for the precise context.
96 // Other XPts: space of all the descendent bottom-XPts.
97 // Nb: this value goes up and down as the program executes.
98 UInt curr_space;
99
100 // An approximate space.time calculation used along the way for selecting
101 // which contexts to include at each census point.
102 // !!! top-XPTs only !!!
103 ULong spacetime;
104
105 // spacetime2 is an exact space.time calculation done at the end, and
106 // used in the results.
107 // Note that it is *doubled*, to avoid rounding errors.
108 // !!! not used for 'alloc_xpt' !!!
109 ULong spacetime2;
110
111 // n_children and max_children are integers; a very big program might
112 // have more than 65536 allocation points (Konqueror startup has 1800).
113 XPt* parent; // pointer to parent XPt
114 UInt n_children; // number of children
115 UInt max_children; // capacity of children array
116 XPt** children; // pointers to children XPts
117};
118
119// Each census snapshots the most significant XTrees, each XTree having a
120// top-XPt as its root. The 'curr_space' element for each XPt is recorded
121// in the snapshot. The snapshot contains all the XTree's XPts, not in a
122// tree structure, but flattened into an array. This flat snapshot is used
123// at the end for computing spacetime2 for each XPt.
124//
125// Graph resolution, x-axis: no point having more than about 200 census
126// x-points; you can't see them on the graph. Therefore:
127//
128// - do a census every 1 ms for first 200 --> 200, all (200 ms)
129// - halve (drop half of them) --> 100, every 2nd (200 ms)
130// - do a census every 2 ms for next 200 --> 200, every 2nd (400 ms)
131// - halve --> 100, every 4th (400 ms)
132// - do a census every 4 ms for next 400 --> 200, every 4th (800 ms)
133// - etc.
134//
135// This isn't exactly right, because we actually drop (N/2)-1 when halving,
136// but it shows the basic idea.
137
138#define MAX_N_CENSI 200 // Keep it even, for simplicity
139
140// Graph resolution, y-axis: hp2ps only draws the 19 biggest (in space-time)
141// bands, rest get lumped into OTHERS. I only print the top N
142// (cumulative-so-far space-time) at each point. N should be a bit bigger
143// than 19 in case the cumulative space-time doesn't fit with the eventual
144// space-time computed by hp2ps (but it should be close if the samples are
145// evenly spread, since hp2ps does an approximate per-band space-time
146// calculation that just sums the totals; ie. it assumes all samples are
147// the same distance apart).
148
149#define MAX_SNAPSHOTS 32
150
151typedef
152 struct {
153 XPt* xpt;
154 UInt space;
155 }
156 XPtSnapshot;
157
158// An XTree snapshot is stored as an array of of XPt snapshots.
159typedef XPtSnapshot* XTreeSnapshot;
160
161typedef
162 struct {
163 Int ms_time; // Int: must allow -1
164 XTreeSnapshot xtree_snapshots[MAX_SNAPSHOTS+1]; // +1 for zero-termination
165 UInt others_space;
166 UInt heap_admin_space;
167 UInt stacks_space;
168 }
169 Census;
170
171// Metadata for heap blocks. Each one contains a pointer to a bottom-XPt,
172// which is a foothold into the XCon at which it was allocated. From
173// HP_Chunks, XPt 'space' fields are incremented (at allocation) and
174// decremented (at deallocation).
175//
176// Nb: first two fields must match core's VgHashNode.
177typedef
178 struct _HP_Chunk {
179 struct _HP_Chunk* next;
180 Addr data; // Ptr to actual block
181 UInt size; // Size requested
182 XPt* where; // Where allocated; bottom-XPt
183 }
184 HP_Chunk;
185
186/*------------------------------------------------------------*/
187/*--- Profiling events ---*/
188/*------------------------------------------------------------*/
189
190typedef
191 enum {
192 VgpGetXPt = VgpFini+1,
193 VgpGetXPtSearch,
194 VgpCensus,
195 VgpCensusHeap,
196 VgpCensusSnapshot,
197 VgpCensusTreeSize,
198 VgpUpdateXCon,
199 VgpCalcSpacetime2,
200 VgpPrintHp,
201 VgpPrintXPts,
202 }
203 VgpSkinCC;
204
205/*------------------------------------------------------------*/
206/*--- Statistics ---*/
207/*------------------------------------------------------------*/
208
209// Konqueror startup, to give an idea of the numbers involved with a biggish
210// program, with default depth:
211//
212// depth=3 depth=40
213// - 310,000 allocations
214// - 300,000 frees
215// - 15,000 XPts 800,000 XPts
216// - 1,800 top-XPts
217
218static UInt n_xpts = 0;
219static UInt n_bot_xpts = 0;
220static UInt n_allocs = 0;
221static UInt n_zero_allocs = 0;
222static UInt n_frees = 0;
223static UInt n_children_reallocs = 0;
224static UInt n_snapshot_frees = 0;
225
226static UInt n_halvings = 0;
227static UInt n_real_censi = 0;
228static UInt n_fake_censi = 0;
229static UInt n_attempted_censi = 0;
230
231/*------------------------------------------------------------*/
232/*--- Globals ---*/
233/*------------------------------------------------------------*/
234
235#define FILENAME_LEN 256
236
237#define SPRINTF(zz_buf, fmt, args...) \
238 do { Int len = VG_(sprintf)(zz_buf, fmt, ## args); \
239 VG_(write)(fd, (void*)zz_buf, len); \
240 } while (0)
241
242#define BUF_LEN 1024 // general purpose
243static Char buf [BUF_LEN];
244static Char buf2[BUF_LEN];
245static Char buf3[BUF_LEN];
246
247static UInt sigstacks_space = 0; // Current signal stacks space sum
248
249static VgHashTable malloc_list = NULL; // HP_Chunks
250
251static UInt n_heap_blocks = 0;
252
253
254#define MAX_ALLOC_FNS 32 // includes the builtin ones
255
nethercotec7469182004-05-11 09:21:08 +0000256// First few filled in, rest should be zeroed. Zero-terminated vector.
257static UInt n_alloc_fns = 11;
nethercotec9f36922004-02-14 16:40:02 +0000258static Char* alloc_fns[MAX_ALLOC_FNS] = {
259 "malloc",
260 "operator new(unsigned)",
261 "operator new[](unsigned)",
nethercoteeb479cb2004-05-11 16:37:17 +0000262 "operator new(unsigned, std::nothrow_t const&)",
263 "operator new[](unsigned, std::nothrow_t const&)",
nethercotec9f36922004-02-14 16:40:02 +0000264 "__builtin_new",
265 "__builtin_vec_new",
266 "calloc",
267 "realloc",
268 "my_malloc", // from vg_libpthread.c
fitzhardinge51f3ff12004-03-04 22:42:03 +0000269 "memalign",
nethercotec9f36922004-02-14 16:40:02 +0000270};
271
272
273/*------------------------------------------------------------*/
274/*--- Command line args ---*/
275/*------------------------------------------------------------*/
276
277#define MAX_DEPTH 50
278
279typedef
280 enum {
281 XText, XHTML,
282 }
283 XFormat;
284
285static Bool clo_heap = True;
286static UInt clo_heap_admin = 8;
287static Bool clo_stacks = True;
288static Bool clo_depth = 3;
289static XFormat clo_format = XText;
290
291Bool SK_(process_cmd_line_option)(Char* arg)
292{
nethercote27fec902004-06-16 21:26:32 +0000293 VG_BOOL_CLO("--heap", clo_heap)
294 else VG_BOOL_CLO("--stacks", clo_stacks)
nethercotec9f36922004-02-14 16:40:02 +0000295
nethercote27fec902004-06-16 21:26:32 +0000296 else VG_NUM_CLO ("--heap-admin", clo_heap_admin)
297 else VG_BNUM_CLO("--depth", clo_depth, 1, MAX_DEPTH)
nethercotec9f36922004-02-14 16:40:02 +0000298
299 else if (VG_CLO_STREQN(11, arg, "--alloc-fn=")) {
300 alloc_fns[n_alloc_fns] = & arg[11];
301 n_alloc_fns++;
302 if (n_alloc_fns >= MAX_ALLOC_FNS) {
303 VG_(printf)("Too many alloc functions specified, sorry");
304 VG_(bad_option)(arg);
305 }
306 }
307
308 else if (VG_CLO_STREQ(arg, "--format=text"))
309 clo_format = XText;
310 else if (VG_CLO_STREQ(arg, "--format=html"))
311 clo_format = XHTML;
312
313 else
314 return VG_(replacement_malloc_process_cmd_line_option)(arg);
nethercote27fec902004-06-16 21:26:32 +0000315
nethercotec9f36922004-02-14 16:40:02 +0000316 return True;
317}
318
319void SK_(print_usage)(void)
320{
321 VG_(printf)(
322" --heap=no|yes profile heap blocks [yes]\n"
323" --heap-admin=<number> average admin bytes per heap block [8]\n"
324" --stacks=no|yes profile stack(s) [yes]\n"
325" --depth=<number> depth of contexts [3]\n"
326" --alloc-fn=<name> specify <fn> as an alloc function [empty]\n"
327" --format=text|html format of textual output [text]\n"
328 );
329 VG_(replacement_malloc_print_usage)();
330}
331
332void SK_(print_debug_usage)(void)
333{
334 VG_(replacement_malloc_print_debug_usage)();
335}
336
337/*------------------------------------------------------------*/
338/*--- Execution contexts ---*/
339/*------------------------------------------------------------*/
340
341// Fake XPt representing all allocation functions like malloc(). Acts as
342// parent node to all top-XPts.
343static XPt* alloc_xpt;
344
345// Cheap allocation for blocks that never need to be freed. Saves about 10%
346// for Konqueror startup with --depth=40.
347static void* perm_malloc(UInt n_bytes)
348{
349 static Addr hp = 0; // current heap pointer
350 static Addr hp_lim = 0; // maximum usable byte in current block
351
352 #define SUPERBLOCK_SIZE (1 << 20) // 1 MB
353
354 if (hp + n_bytes > hp_lim) {
355 hp = (Addr)VG_(get_memory_from_mmap)(SUPERBLOCK_SIZE, "perm_malloc");
356 hp_lim = hp + SUPERBLOCK_SIZE - 1;
357 }
358
359 hp += n_bytes;
360
361 return (void*)(hp - n_bytes);
362}
363
364
365
366static XPt* new_XPt(Addr eip, XPt* parent, Bool is_bottom)
367{
368 XPt* xpt = perm_malloc(sizeof(XPt));
369 xpt->eip = eip;
370
371 xpt->curr_space = 0;
372 xpt->spacetime = 0;
373 xpt->spacetime2 = 0;
374
375 xpt->parent = parent;
nethercotefc016352004-04-27 09:51:51 +0000376
377 // Check parent is not a bottom-XPt
378 sk_assert(parent == NULL || 0 != parent->max_children);
nethercotec9f36922004-02-14 16:40:02 +0000379
380 xpt->n_children = 0;
381
382 // If a bottom-XPt, don't allocate space for children. This can be 50%
383 // or more, although it tends to drop as --depth increases (eg. 10% for
384 // konqueror with --depth=20).
385 if ( is_bottom ) {
386 xpt->max_children = 0;
387 xpt->children = NULL;
388 n_bot_xpts++;
389 } else {
390 xpt->max_children = 4;
391 xpt->children = VG_(malloc)( xpt->max_children * sizeof(XPt*) );
392 }
393
394 // Update statistics
395 n_xpts++;
396
397 return xpt;
398}
399
400static Bool is_alloc_fn(Addr eip)
401{
402 Int i;
403
404 if ( VG_(get_fnname)(eip, buf, BUF_LEN) ) {
405 for (i = 0; i < n_alloc_fns; i++) {
406 if (VG_STREQ(buf, alloc_fns[i]))
407 return True;
408 }
409 }
410 return False;
411}
412
413// Returns an XCon, from the bottom-XPt. Nb: the XPt returned must be a
414// bottom-XPt now and must always remain a bottom-XPt. We go to some effort
415// to ensure this in certain cases. See comments below.
416static XPt* get_XCon( ThreadId tid, Bool custom_malloc )
417{
418 // Static to minimise stack size. +1 for added 0xffffffff %eip.
419 static Addr eips[MAX_DEPTH + MAX_ALLOC_FNS + 1];
420
421 XPt* xpt = alloc_xpt;
422 UInt n_eips, L, A, B, nC;
423 UInt overestimate;
424 Bool reached_bottom;
425
426 VGP_PUSHCC(VgpGetXPt);
427
428 // Want at least clo_depth non-alloc-fn entries in the snapshot.
429 // However, because we have 1 or more (an unknown number, at this point)
430 // alloc-fns ignored, we overestimate the size needed for the stack
431 // snapshot. Then, if necessary, we repeatedly increase the size until
432 // it is enough.
433 overestimate = 2;
434 while (True) {
435 n_eips = VG_(stack_snapshot)( tid, eips, clo_depth + overestimate );
436
437 // Now we add a dummy "unknown" %eip at the end. This is only used if we
438 // run out of %eips before hitting clo_depth. It's done to ensure the
439 // XPt we return is (now and forever) a bottom-XPt. If the returned XPt
440 // wasn't a bottom-XPt (now or later) it would cause problems later (eg.
441 // the parent's spacetime wouldn't be equal to the total of the
442 // childrens' spacetimes).
443 eips[ n_eips++ ] = 0xffffffff;
444
445 // Skip over alloc functions in eips[].
446 for (L = 0; is_alloc_fn(eips[L]) && L < n_eips; L++) { }
447
448 // Must be at least one alloc function, unless client used
449 // MALLOCLIKE_BLOCK
450 if (!custom_malloc) sk_assert(L > 0);
451
452 // Should be at least one non-alloc function. If not, try again.
453 if (L == n_eips) {
454 overestimate += 2;
455 if (overestimate > MAX_ALLOC_FNS)
456 VG_(skin_panic)("No stk snapshot big enough to find non-alloc fns");
457 } else {
458 break;
459 }
460 }
461 A = L;
462 B = n_eips - 1;
463 reached_bottom = False;
464
465 // By this point, the eips we care about are in eips[A]..eips[B]
466
467 // Now do the search/insertion of the XCon. 'L' is the loop counter,
468 // being the index into eips[].
469 while (True) {
470 // Look for %eip in xpt's children.
471 // XXX: linear search, ugh -- about 10% of time for konqueror startup
472 // XXX: tried cacheing last result, only hit about 4% for konqueror
473 // Nb: this search hits about 98% of the time for konqueror
474 VGP_PUSHCC(VgpGetXPtSearch);
475
476 // If we've searched/added deep enough, or run out of EIPs, this is
477 // the bottom XPt.
478 if (L - A + 1 == clo_depth || L == B)
479 reached_bottom = True;
480
481 nC = 0;
482 while (True) {
483 if (nC == xpt->n_children) {
484 // not found, insert new XPt
485 sk_assert(xpt->max_children != 0);
486 sk_assert(xpt->n_children <= xpt->max_children);
487 // Expand 'children' if necessary
488 if (xpt->n_children == xpt->max_children) {
489 xpt->max_children *= 2;
490 xpt->children = VG_(realloc)( xpt->children,
491 xpt->max_children * sizeof(XPt*) );
492 n_children_reallocs++;
493 }
494 // Make new XPt for %eip, insert in list
495 xpt->children[ xpt->n_children++ ] =
496 new_XPt(eips[L], xpt, reached_bottom);
497 break;
498 }
499 if (eips[L] == xpt->children[nC]->eip) break; // found the %eip
500 nC++; // keep looking
501 }
502 VGP_POPCC(VgpGetXPtSearch);
503
504 // Return found/built bottom-XPt.
505 if (reached_bottom) {
506 sk_assert(0 == xpt->children[nC]->n_children); // Must be bottom-XPt
507 VGP_POPCC(VgpGetXPt);
508 return xpt->children[nC];
509 }
510
511 // Descend to next level in XTree, the newly found/built non-bottom-XPt
512 xpt = xpt->children[nC];
513 L++;
514 }
515}
516
517// Update 'curr_space' of every XPt in the XCon, by percolating upwards.
518static void update_XCon(XPt* xpt, Int space_delta)
519{
520 VGP_PUSHCC(VgpUpdateXCon);
521
522 sk_assert(True == clo_heap);
523 sk_assert(0 != space_delta);
524 sk_assert(NULL != xpt);
525 sk_assert(0 == xpt->n_children); // must be bottom-XPt
526
527 while (xpt != alloc_xpt) {
528 if (space_delta < 0) sk_assert(xpt->curr_space >= -space_delta);
529 xpt->curr_space += space_delta;
530 xpt = xpt->parent;
531 }
532 if (space_delta < 0) sk_assert(alloc_xpt->curr_space >= -space_delta);
533 alloc_xpt->curr_space += space_delta;
534
535 VGP_POPCC(VgpUpdateXCon);
536}
537
538// Actually want a reverse sort, biggest to smallest
539static Int XPt_cmp_spacetime(void* n1, void* n2)
540{
541 XPt* xpt1 = *(XPt**)n1;
542 XPt* xpt2 = *(XPt**)n2;
543 return (xpt1->spacetime < xpt2->spacetime ? 1 : -1);
544}
545
546static Int XPt_cmp_spacetime2(void* n1, void* n2)
547{
548 XPt* xpt1 = *(XPt**)n1;
549 XPt* xpt2 = *(XPt**)n2;
550 return (xpt1->spacetime2 < xpt2->spacetime2 ? 1 : -1);
551}
552
553
554/*------------------------------------------------------------*/
555/*--- A generic Queue ---*/
556/*------------------------------------------------------------*/
557
558typedef
559 struct {
560 UInt head; // Index of first entry
561 UInt tail; // Index of final+1 entry, ie. next free slot
562 UInt max_elems;
563 void** elems;
564 }
565 Queue;
566
567static Queue* construct_queue(UInt size)
568{
569 UInt i;
570 Queue* q = VG_(malloc)(sizeof(Queue));
571 q->head = 0;
572 q->tail = 0;
573 q->max_elems = size;
574 q->elems = VG_(malloc)(size * sizeof(void*));
575 for (i = 0; i < size; i++)
576 q->elems[i] = NULL;
577
578 return q;
579}
580
581static void destruct_queue(Queue* q)
582{
583 VG_(free)(q->elems);
584 VG_(free)(q);
585}
586
587static void shuffle(Queue* dest_q, void** old_elems)
588{
589 UInt i, j;
590 for (i = 0, j = dest_q->head; j < dest_q->tail; i++, j++)
591 dest_q->elems[i] = old_elems[j];
592 dest_q->head = 0;
593 dest_q->tail = i;
594 for ( ; i < dest_q->max_elems; i++)
595 dest_q->elems[i] = NULL; // paranoia
596}
597
598// Shuffles elements down. If not enough slots free, increase size. (We
599// don't wait until we've completely run out of space, because there could
600// be lots of shuffling just before that point which would be slow.)
601static void adjust(Queue* q)
602{
603 void** old_elems;
604
605 sk_assert(q->tail == q->max_elems);
606 if (q->head < 10) {
607 old_elems = q->elems;
608 q->max_elems *= 2;
609 q->elems = VG_(malloc)(q->max_elems * sizeof(void*));
610 shuffle(q, old_elems);
611 VG_(free)(old_elems);
612 } else {
613 shuffle(q, q->elems);
614 }
615}
616
617static void enqueue(Queue* q, void* elem)
618{
619 if (q->tail == q->max_elems)
620 adjust(q);
621 q->elems[q->tail++] = elem;
622}
623
624static Bool is_empty_queue(Queue* q)
625{
626 return (q->head == q->tail);
627}
628
629static void* dequeue(Queue* q)
630{
631 if (is_empty_queue(q))
632 return NULL; // Queue empty
633 else
634 return q->elems[q->head++];
635}
636
637/*------------------------------------------------------------*/
638/*--- malloc() et al replacement wrappers ---*/
639/*------------------------------------------------------------*/
640
641static __inline__
642void add_HP_Chunk(HP_Chunk* hc)
643{
644 n_heap_blocks++;
645 VG_(HT_add_node) ( malloc_list, (VgHashNode*)hc );
646}
647
648static __inline__
649HP_Chunk* get_HP_Chunk(void* p, HP_Chunk*** prev_chunks_next_ptr)
650{
651 return (HP_Chunk*)VG_(HT_get_node) ( malloc_list, (UInt)p,
652 (VgHashNode***)prev_chunks_next_ptr );
653}
654
655static __inline__
656void remove_HP_Chunk(HP_Chunk* hc, HP_Chunk** prev_chunks_next_ptr)
657{
658 sk_assert(n_heap_blocks > 0);
659 n_heap_blocks--;
660 *prev_chunks_next_ptr = hc->next;
661}
662
663// Forward declaration
664static void hp_census(void);
665
666static __inline__
667void new_block_meta ( void* p, Int size, Bool custom_malloc )
668{
669 HP_Chunk* hc;
670
671 VGP_PUSHCC(VgpCliMalloc);
672
673 if (0 == size) n_zero_allocs++;
674
675 // Make new HP_Chunk node, add to malloclist
676 hc = VG_(malloc)(sizeof(HP_Chunk));
677 hc->size = size;
678 hc->data = (Addr)p;
679
680 if (clo_heap) {
681 hc->where = get_XCon( VG_(get_current_or_recent_tid)(), custom_malloc );
682 if (size != 0)
683 update_XCon(hc->where, size);
684 } else {
685 hc->where = NULL; // paranoia
686 }
687
688 add_HP_Chunk( hc );
689
690 hp_census(); // do a census!
691
692 VGP_POPCC(VgpCliMalloc);
693}
694
695static __inline__
696void* new_block ( Int size, UInt align, Bool is_zeroed )
697{
698 void* p;
699
700 if (size < 0) return NULL;
701
702 VGP_PUSHCC(VgpCliMalloc);
703
704 // Update statistics
705 n_allocs++;
706
707 p = VG_(cli_malloc)( align, size );
708 if (is_zeroed) VG_(memset)(p, 0, size);
709 new_block_meta(p, size, /*custom_malloc*/False);
710
711 VGP_POPCC(VgpCliMalloc);
712 return p;
713}
714
715static __inline__
716void die_block ( void* p, Bool custom_free )
717{
718 HP_Chunk* hc;
719 HP_Chunk** remove_handle;
720
721 VGP_PUSHCC(VgpCliMalloc);
722
723 // Update statistics
724 n_frees++;
725
726 hc = get_HP_Chunk ( p, &remove_handle );
727 if (hc == NULL)
728 return; // must have been a bogus free(), or p==NULL
729
730 sk_assert(hc->data == (Addr)p);
731
732 if (clo_heap && hc->size != 0)
733 update_XCon(hc->where, -hc->size);
734
735 // Actually free the heap block
736 if (!custom_free)
737 VG_(cli_free)( p );
738
739 // Remove HP_Chunk from malloclist, destroy
740 remove_HP_Chunk(hc, remove_handle);
741
742 hp_census(); // do a census!
743
744 VG_(free)( hc );
745 VGP_POPCC(VgpCliMalloc);
746}
747
748
749void* SK_(malloc) ( Int n )
750{
751 return new_block( n, VG_(clo_alignment), /*is_zeroed*/False );
752}
753
754void* SK_(__builtin_new) ( Int n )
755{
756 return new_block( n, VG_(clo_alignment), /*is_zeroed*/False );
757}
758
759void* SK_(__builtin_vec_new) ( Int n )
760{
761 return new_block( n, VG_(clo_alignment), /*is_zeroed*/False );
762}
763
764void* SK_(calloc) ( Int m, Int size )
765{
766 return new_block( m*size, VG_(clo_alignment), /*is_zeroed*/True );
767}
768
fitzhardinge51f3ff12004-03-04 22:42:03 +0000769void *SK_(memalign)( Int align, Int n )
770{
771 return new_block( n, align, False );
772}
773
nethercotec9f36922004-02-14 16:40:02 +0000774void SK_(free) ( void* p )
775{
776 die_block( p, /*custom_free*/False );
777}
778
779void SK_(__builtin_delete) ( void* p )
780{
781 die_block( p, /*custom_free*/False);
782}
783
784void SK_(__builtin_vec_delete) ( void* p )
785{
786 die_block( p, /*custom_free*/False );
787}
788
789void* SK_(realloc) ( void* p_old, Int new_size )
790{
791 HP_Chunk* hc;
792 HP_Chunk** remove_handle;
793 Int i;
794 void* p_new;
795 UInt old_size;
796 XPt *old_where, *new_where;
797
798 VGP_PUSHCC(VgpCliMalloc);
799
800 // First try and find the block.
801 hc = get_HP_Chunk ( p_old, &remove_handle );
802 if (hc == NULL) {
803 VGP_POPCC(VgpCliMalloc);
804 return NULL; // must have been a bogus free()
805 }
806
807 sk_assert(hc->data == (Addr)p_old);
808 old_size = hc->size;
809
810 if (new_size <= old_size) {
811 // new size is smaller or same; block not moved
812 p_new = p_old;
813
814 } else {
815 // new size is bigger; make new block, copy shared contents, free old
816 p_new = VG_(cli_malloc)(VG_(clo_alignment), new_size);
817
818 for (i = 0; i < old_size; i++)
819 ((UChar*)p_new)[i] = ((UChar*)p_old)[i];
820
821 VG_(cli_free)(p_old);
822 }
823
824 old_where = hc->where;
825 new_where = get_XCon( VG_(get_current_or_recent_tid)(),
826 /*custom_malloc*/False);
827
828 // Update HP_Chunk
829 hc->data = (Addr)p_new;
830 hc->size = new_size;
831 hc->where = new_where;
832
833 // Update XPt curr_space fields
834 if (clo_heap) {
835 if (0 != old_size) update_XCon(old_where, -old_size);
836 if (0 != new_size) update_XCon(new_where, new_size);
837 }
838
839 // If block has moved, have to remove and reinsert in the malloclist
840 // (since the updated 'data' field is the hash lookup key).
841 if (p_new != p_old) {
842 remove_HP_Chunk(hc, remove_handle);
843 add_HP_Chunk(hc);
844 }
845
846 VGP_POPCC(VgpCliMalloc);
847 return p_new;
848}
849
850
851/*------------------------------------------------------------*/
852/*--- Taking a census ---*/
853/*------------------------------------------------------------*/
854
855static Census censi[MAX_N_CENSI];
856static UInt curr_census = 0;
857
858// Must return False so that all stacks are traversed
thughes4ad52d02004-06-27 17:37:21 +0000859static Bool count_stack_size( Addr stack_min, Addr stack_max, void *cp )
nethercotec9f36922004-02-14 16:40:02 +0000860{
thughes4ad52d02004-06-27 17:37:21 +0000861 *(UInt *)cp += (stack_max - stack_min);
nethercotec9f36922004-02-14 16:40:02 +0000862 return False;
863}
864
865static UInt get_xtree_size(XPt* xpt, UInt ix)
866{
867 UInt i;
868
869// VG_(printf)("%4d ", xpt->curr_space);
870
871 // If this one has size zero, all the children will be size zero too, so
872 // nothing interesting to record.
873// if (0 != xpt->curr_space || 0 == ix) {
874 if (xpt->curr_space / (double)alloc_xpt->curr_space > 0.002 || 0 == ix) {
875 ix++;
876
877 // Count all (non-zero) descendent XPts
878 for (i = 0; i < xpt->n_children; i++)
879 ix = get_xtree_size(xpt->children[i], ix);
880 }
881 return ix;
882}
883
884static
885UInt do_space_snapshot(XPt xpt[], XTreeSnapshot xtree_snapshot, UInt ix)
886{
887 UInt i;
888
889 // Snapshot this XPt, if non-zero space, or the first one
890// if (0 != xpt->curr_space || 0 == ix) {
891 if (xpt->curr_space / (double)alloc_xpt->curr_space > 0.002 || 0 == ix) {
892 xtree_snapshot[ix].xpt = xpt;
893 xtree_snapshot[ix].space = xpt->curr_space;
894 ix++;
895
896 // Snapshot all (non-zero) descendent XPts
897 for (i = 0; i < xpt->n_children; i++)
898 ix = do_space_snapshot(xpt->children[i], xtree_snapshot, ix);
899 }
900 return ix;
901}
902
903static UInt ms_interval;
904static UInt do_every_nth_census = 30;
905
906// Weed out half the censi; we choose those that represent the smallest
907// time-spans, because that loses the least information.
908//
909// Algorithm for N censi: We find the census representing the smallest
910// timeframe, and remove it. We repeat this until (N/2)-1 censi are gone.
911// (It's (N/2)-1 because we never remove the first and last censi.)
912// We have to do this one census at a time, rather than finding the (N/2)-1
913// smallest censi in one hit, because when a census is removed, it's
914// neighbours immediately cover greater timespans. So it's N^2, but N only
915// equals 200, and this is only done every 100 censi, which is not too often.
916static void halve_censi(void)
917{
918 Int i, jp, j, jn, k;
919 Census* min_census;
920
921 n_halvings++;
922 if (VG_(clo_verbosity) > 1)
923 VG_(message)(Vg_UserMsg, "Halving censi...");
924
925 // Sets j to the index of the first not-yet-removed census at or after i
926 #define FIND_CENSUS(i, j) \
927 for (j = i; -1 == censi[j].ms_time; j++) { }
928
929 for (i = 2; i < MAX_N_CENSI; i += 2) {
930 // Find the censi representing the smallest timespan. The timespan
931 // for census n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
932 // censi A and B. We don't consider the first and last censi for
933 // removal.
934 Int min_span = 0x7fffffff;
935 Int min_j = 0;
936
937 // Initial triple: (prev, curr, next) == (jp, j, jn)
938 jp = 0;
939 FIND_CENSUS(1, j);
940 FIND_CENSUS(j+1, jn);
941 while (jn < MAX_N_CENSI) {
942 Int timespan = censi[jn].ms_time - censi[jp].ms_time;
943 sk_assert(timespan >= 0);
944 if (timespan < min_span) {
945 min_span = timespan;
946 min_j = j;
947 }
948 // Move on to next triple
949 jp = j;
950 j = jn;
951 FIND_CENSUS(jn+1, jn);
952 }
953 // We've found the least important census, now remove it
954 min_census = & censi[ min_j ];
955 for (k = 0; NULL != min_census->xtree_snapshots[k]; k++) {
956 n_snapshot_frees++;
957 VG_(free)(min_census->xtree_snapshots[k]);
958 min_census->xtree_snapshots[k] = NULL;
959 }
960 min_census->ms_time = -1;
961 }
962
963 // Slide down the remaining censi over the removed ones. The '<=' is
964 // because we are removing on (N/2)-1, rather than N/2.
965 for (i = 0, j = 0; i <= MAX_N_CENSI / 2; i++, j++) {
966 FIND_CENSUS(j, j);
967 if (i != j) {
968 censi[i] = censi[j];
969 }
970 }
971 curr_census = i;
972
973 // Double intervals
974 ms_interval *= 2;
975 do_every_nth_census *= 2;
976
977 if (VG_(clo_verbosity) > 1)
978 VG_(message)(Vg_UserMsg, "...done");
979}
980
981// Take a census. Census time seems to be insignificant (usually <= 0 ms,
982// almost always <= 1ms) so don't have to worry about subtracting it from
983// running time in any way.
984//
985// XXX: NOT TRUE! with bigger depths, konqueror censuses can easily take
986// 50ms!
987static void hp_census(void)
988{
989 static UInt ms_prev_census = 0;
990 static UInt ms_next_census = 0; // zero allows startup census
991
992 Int ms_time, ms_time_since_prev;
993 Int i, K;
994 Census* census;
995
996 VGP_PUSHCC(VgpCensus);
997
998 // Only do a census if it's time
999 ms_time = VG_(read_millisecond_timer)();
1000 ms_time_since_prev = ms_time - ms_prev_census;
1001 if (ms_time < ms_next_census) {
1002 n_fake_censi++;
1003 VGP_POPCC(VgpCensus);
1004 return;
1005 }
1006 n_real_censi++;
1007
1008 census = & censi[curr_census];
1009
1010 census->ms_time = ms_time;
1011
1012 // Heap: snapshot the K most significant XTrees -------------------
1013 if (clo_heap) {
1014 K = ( alloc_xpt->n_children < MAX_SNAPSHOTS
1015 ? alloc_xpt->n_children
1016 : MAX_SNAPSHOTS); // max out
1017
1018 // Update .spacetime field (approximatively) for all top-XPts.
1019 // We *do not* do it for any non-top-XPTs.
1020 for (i = 0; i < alloc_xpt->n_children; i++) {
1021 XPt* top_XPt = alloc_xpt->children[i];
1022 top_XPt->spacetime += top_XPt->curr_space * ms_time_since_prev;
1023 }
1024 // Sort top-XPts by spacetime2 field.
1025 VG_(ssort)(alloc_xpt->children, alloc_xpt->n_children, sizeof(XPt*),
1026 XPt_cmp_spacetime);
1027
1028 VGP_PUSHCC(VgpCensusHeap);
1029
1030 // For each significant top-level XPt, record space info about its
1031 // entire XTree, in a single census entry.
1032 // Nb: the xtree_size count/snapshot buffer allocation, and the actual
1033 // snapshot, take similar amounts of time (measured with the
1034 // millesecond counter).
1035 for (i = 0; i < K; i++) {
1036 UInt xtree_size, xtree_size2;
1037// VG_(printf)("%7u ", alloc_xpt->children[i]->spacetime);
1038 // Count how many XPts are in the XTree; make array of that size
1039 // (+1 for zero termination, which calloc() does for us).
1040 VGP_PUSHCC(VgpCensusTreeSize);
1041 xtree_size = get_xtree_size( alloc_xpt->children[i], 0 );
1042 VGP_POPCC(VgpCensusTreeSize);
1043 census->xtree_snapshots[i] =
1044 VG_(calloc)(xtree_size+1, sizeof(XPtSnapshot));
jseward612e8362004-03-07 10:23:20 +00001045 if (0 && VG_(clo_verbosity) > 1)
nethercotec9f36922004-02-14 16:40:02 +00001046 VG_(printf)("calloc: %d (%d B)\n", xtree_size+1,
1047 (xtree_size+1) * sizeof(XPtSnapshot));
1048
1049 // Take space-snapshot: copy 'curr_space' for every XPt in the
1050 // XTree into the snapshot array, along with pointers to the XPts.
1051 // (Except for ones with curr_space==0, which wouldn't contribute
1052 // to the final spacetime2 calculation anyway; excluding them
1053 // saves a lot of memory and up to 40% time with big --depth valus.
1054 VGP_PUSHCC(VgpCensusSnapshot);
1055 xtree_size2 = do_space_snapshot(alloc_xpt->children[i],
1056 census->xtree_snapshots[i], 0);
1057 sk_assert(xtree_size == xtree_size2);
1058 VGP_POPCC(VgpCensusSnapshot);
1059 }
1060// VG_(printf)("\n\n");
1061 // Zero-terminate 'xtree_snapshot' array
1062 census->xtree_snapshots[i] = NULL;
1063
1064 VGP_POPCC(VgpCensusHeap);
1065
1066 //VG_(printf)("printed %d censi\n", K);
1067
1068 // Lump the rest into a single "others" entry.
1069 census->others_space = 0;
1070 for (i = K; i < alloc_xpt->n_children; i++) {
1071 census->others_space += alloc_xpt->children[i]->curr_space;
1072 }
1073 }
1074
1075 // Heap admin -------------------------------------------------------
1076 if (clo_heap_admin > 0)
1077 census->heap_admin_space = clo_heap_admin * n_heap_blocks;
1078
1079 // Stack(s) ---------------------------------------------------------
1080 if (clo_stacks) {
thughes4ad52d02004-06-27 17:37:21 +00001081 census->stacks_space = sigstacks_space;
nethercotec9f36922004-02-14 16:40:02 +00001082 // slightly abusing this function
thughes4ad52d02004-06-27 17:37:21 +00001083 VG_(first_matching_thread_stack)( count_stack_size, &census->stacks_space );
nethercotec9f36922004-02-14 16:40:02 +00001084 i++;
1085 }
1086
1087 // Finish, update interval if necessary -----------------------------
1088 curr_census++;
1089 census = NULL; // don't use again now that curr_census changed
1090
1091 // Halve the entries, if our census table is full
1092 if (MAX_N_CENSI == curr_census) {
1093 halve_censi();
1094 }
1095
1096 // Take time for next census from now, rather than when this census
1097 // should have happened. Because, if there's a big gap due to a kernel
1098 // operation, there's no point doing catch-up censi every BB for a while
1099 // -- that would just give N censi at almost the same time.
1100 if (VG_(clo_verbosity) > 1) {
1101 VG_(message)(Vg_UserMsg, "census: %d ms (took %d ms)", ms_time,
1102 VG_(read_millisecond_timer)() - ms_time );
1103 }
1104 ms_prev_census = ms_time;
1105 ms_next_census = ms_time + ms_interval;
1106 //ms_next_census += ms_interval;
1107
1108 //VG_(printf)("Next: %d ms\n", ms_next_census);
1109
1110 VGP_POPCC(VgpCensus);
1111}
1112
1113/*------------------------------------------------------------*/
1114/*--- Tracked events ---*/
1115/*------------------------------------------------------------*/
1116
1117static void new_mem_stack_signal(Addr a, UInt len)
1118{
1119 sigstacks_space += len;
1120}
1121
1122static void die_mem_stack_signal(Addr a, UInt len)
1123{
1124 sk_assert(sigstacks_space >= len);
1125 sigstacks_space -= len;
1126}
1127
1128/*------------------------------------------------------------*/
1129/*--- Client Requests ---*/
1130/*------------------------------------------------------------*/
1131
1132Bool SK_(handle_client_request) ( ThreadId tid, UInt* argv, UInt* ret )
1133{
1134 switch (argv[0]) {
1135 case VG_USERREQ__MALLOCLIKE_BLOCK: {
1136 void* p = (void*)argv[1];
1137 UInt sizeB = argv[2];
1138 *ret = 0;
1139 new_block_meta( p, sizeB, /*custom_malloc*/True );
1140 return True;
1141 }
1142 case VG_USERREQ__FREELIKE_BLOCK: {
1143 void* p = (void*)argv[1];
1144 *ret = 0;
1145 die_block( p, /*custom_free*/True );
1146 return True;
1147 }
1148 default:
1149 *ret = 0;
1150 return False;
1151 }
1152}
1153
1154/*------------------------------------------------------------*/
1155/*--- Initialisation ---*/
1156/*------------------------------------------------------------*/
1157
1158// Current directory at startup.
1159static Char* base_dir;
1160
1161UInt VG_(vg_malloc_redzone_szB) = 0;
1162
1163void SK_(pre_clo_init)()
1164{
1165 VG_(details_name) ("Massif");
nethercote29b02612004-03-16 19:41:14 +00001166 VG_(details_version) (NULL);
nethercotec9f36922004-02-14 16:40:02 +00001167 VG_(details_description) ("a space profiler");
1168 VG_(details_copyright_author)("Copyright (C) 2003, Nicholas Nethercote");
nethercote27645c72004-02-23 15:33:33 +00001169 VG_(details_bug_reports_to) (VG_BUGS_TO);
nethercotec9f36922004-02-14 16:40:02 +00001170
1171 // Needs
1172 VG_(needs_libc_freeres)();
1173 VG_(needs_command_line_options)();
1174 VG_(needs_client_requests) ();
1175
1176 // Events to track
1177 VG_(init_new_mem_stack_signal) ( new_mem_stack_signal );
1178 VG_(init_die_mem_stack_signal) ( die_mem_stack_signal );
1179
1180 // Profiling events
1181 VGP_(register_profile_event)(VgpGetXPt, "get-XPt");
1182 VGP_(register_profile_event)(VgpGetXPtSearch, "get-XPt-search");
1183 VGP_(register_profile_event)(VgpCensus, "census");
1184 VGP_(register_profile_event)(VgpCensusHeap, "census-heap");
1185 VGP_(register_profile_event)(VgpCensusSnapshot, "census-snapshot");
1186 VGP_(register_profile_event)(VgpCensusTreeSize, "census-treesize");
1187 VGP_(register_profile_event)(VgpUpdateXCon, "update-XCon");
1188 VGP_(register_profile_event)(VgpCalcSpacetime2, "calc-spacetime2");
1189 VGP_(register_profile_event)(VgpPrintHp, "print-hp");
1190 VGP_(register_profile_event)(VgpPrintXPts, "print-XPts");
1191
1192 // HP_Chunks
1193 malloc_list = VG_(HT_construct)();
1194
1195 // Dummy node at top of the context structure.
1196 alloc_xpt = new_XPt(0, NULL, /*is_bottom*/False);
1197
1198 sk_assert( VG_(getcwd_alloc)(&base_dir) );
1199}
1200
1201void SK_(post_clo_init)(void)
1202{
1203 ms_interval = 1;
1204
1205 // Do an initial sample for t = 0
1206 hp_census();
1207}
1208
1209/*------------------------------------------------------------*/
1210/*--- Instrumentation ---*/
1211/*------------------------------------------------------------*/
1212
1213UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1214{
1215 return cb_in;
1216}
1217
1218/*------------------------------------------------------------*/
1219/*--- Spacetime recomputation ---*/
1220/*------------------------------------------------------------*/
1221
1222// Although we've been calculating spacetime along the way, because the
1223// earlier calculations were done at a finer timescale, the .spacetime field
1224// might not agree with what hp2ps sees, because we've thrown away some of
1225// the information. So recompute it at the scale that hp2ps sees, so we can
1226// confidently determine which contexts hp2ps will choose for displaying as
1227// distinct bands. This recomputation only happens to the significant ones
1228// that get printed in the .hp file, so it's cheap.
1229//
1230// The spacetime calculation:
1231// ( a[0]*d(0,1) + a[1]*(d(0,1) + d(1,2)) + ... + a[N-1]*d(N-2,N-1) ) / 2
1232// where
1233// a[N] is the space at census N
1234// d(A,B) is the time interval between censi A and B
1235// and
1236// d(A,B) + d(B,C) == d(A,C)
1237//
1238// Key point: we can calculate the area for a census without knowing the
1239// previous or subsequent censi's space; because any over/underestimates
1240// for this census will be reversed in the next, balancing out. This is
1241// important, as getting the previous/next census entry for a particular
1242// AP is a pain with this data structure, but getting the prev/next
1243// census time is easy.
1244//
1245// Each heap calculation gets added to its context's spacetime2 field.
1246// The ULong* values are all running totals, hence the use of "+=" everywhere.
1247
1248// This does the calculations for a single census.
1249static void calc_spacetime2b(Census* census, UInt d_t1_t2,
1250 ULong* twice_heap_ST,
1251 ULong* twice_heap_admin_ST,
1252 ULong* twice_stack_ST)
1253{
1254 UInt i, j;
1255 XPtSnapshot* xpt_snapshot;
1256
1257 // Heap --------------------------------------------------------
1258 if (clo_heap) {
1259 for (i = 0; NULL != census->xtree_snapshots[i]; i++) {
1260 // Compute total heap spacetime2 for the entire XTree using only the
1261 // top-XPt (the first XPt in xtree_snapshot).
1262 *twice_heap_ST += d_t1_t2 * census->xtree_snapshots[i][0].space;
1263
1264 // Increment spacetime2 for every XPt in xtree_snapshot (inc. top one)
1265 for (j = 0; NULL != census->xtree_snapshots[i][j].xpt; j++) {
1266 xpt_snapshot = & census->xtree_snapshots[i][j];
1267 xpt_snapshot->xpt->spacetime2 += d_t1_t2 * xpt_snapshot->space;
1268 }
1269 }
1270 *twice_heap_ST += d_t1_t2 * census->others_space;
1271 }
1272
1273 // Heap admin --------------------------------------------------
1274 if (clo_heap_admin > 0)
1275 *twice_heap_admin_ST += d_t1_t2 * census->heap_admin_space;
1276
1277 // Stack(s) ----------------------------------------------------
1278 if (clo_stacks)
1279 *twice_stack_ST += d_t1_t2 * census->stacks_space;
1280}
1281
1282// This does the calculations for all censi.
1283static void calc_spacetime2(ULong* heap2, ULong* heap_admin2, ULong* stack2)
1284{
1285 UInt i, N = curr_census;
1286
1287 VGP_PUSHCC(VgpCalcSpacetime2);
1288
1289 *heap2 = 0;
1290 *heap_admin2 = 0;
1291 *stack2 = 0;
1292
1293 if (N <= 1)
1294 return;
1295
1296 calc_spacetime2b( &censi[0], censi[1].ms_time - censi[0].ms_time,
1297 heap2, heap_admin2, stack2 );
1298
1299 for (i = 1; i <= N-2; i++) {
1300 calc_spacetime2b( & censi[i], censi[i+1].ms_time - censi[i-1].ms_time,
1301 heap2, heap_admin2, stack2 );
1302 }
1303
1304 calc_spacetime2b( & censi[N-1], censi[N-1].ms_time - censi[N-2].ms_time,
1305 heap2, heap_admin2, stack2 );
1306 // Now get rid of the halves. May lose a 0.5 on each, doesn't matter.
1307 *heap2 /= 2;
1308 *heap_admin2 /= 2;
1309 *stack2 /= 2;
1310
1311 VGP_POPCC(VgpCalcSpacetime2);
1312}
1313
1314/*------------------------------------------------------------*/
1315/*--- Writing the graph file ---*/
1316/*------------------------------------------------------------*/
1317
1318static Char* make_filename(Char* dir, Char* suffix)
1319{
1320 Char* filename;
1321
1322 /* Block is big enough for dir name + massif.<pid>.<suffix> */
1323 filename = VG_(malloc)((VG_(strlen)(dir) + 32)*sizeof(Char));
1324 VG_(sprintf)(filename, "%s/massif.%d%s", dir, VG_(getpid)(), suffix);
1325
1326 return filename;
1327}
1328
1329// Make string acceptable to hp2ps (sigh): remove spaces, escape parentheses.
1330static Char* clean_fnname(Char *d, Char* s)
1331{
1332 Char* dorig = d;
1333 while (*s) {
1334 if (' ' == *s) { *d = '%'; }
1335 else if ('(' == *s) { *d++ = '\\'; *d = '('; }
1336 else if (')' == *s) { *d++ = '\\'; *d = ')'; }
1337 else { *d = *s; };
1338 s++;
1339 d++;
1340 }
1341 *d = '\0';
1342 return dorig;
1343}
1344
1345static void file_err ( Char* file )
1346{
1347 VG_(message)(Vg_UserMsg, "error: can't open output file `%s'", file );
1348 VG_(message)(Vg_UserMsg, " ... so profile results will be missing.");
1349}
1350
1351/* Format, by example:
1352
1353 JOB "a.out -p"
1354 DATE "Fri Apr 17 11:43:45 1992"
1355 SAMPLE_UNIT "seconds"
1356 VALUE_UNIT "bytes"
1357 BEGIN_SAMPLE 0.00
1358 SYSTEM 24
1359 END_SAMPLE 0.00
1360 BEGIN_SAMPLE 1.00
1361 elim 180
1362 insert 24
1363 intersect 12
1364 disin 60
1365 main 12
1366 reduce 20
1367 SYSTEM 12
1368 END_SAMPLE 1.00
1369 MARK 1.50
1370 MARK 1.75
1371 MARK 1.80
1372 BEGIN_SAMPLE 2.00
1373 elim 192
1374 insert 24
1375 intersect 12
1376 disin 84
1377 main 12
1378 SYSTEM 24
1379 END_SAMPLE 2.00
1380 BEGIN_SAMPLE 2.82
1381 END_SAMPLE 2.82
1382 */
1383static void write_hp_file(void)
1384{
1385 Int i, j;
1386 Int fd, res;
1387 Char *hp_file, *ps_file, *aux_file;
1388 Char* cmdfmt;
1389 Char* cmdbuf;
1390 Int cmdlen;
1391
1392 VGP_PUSHCC(VgpPrintHp);
1393
1394 // Open file
1395 hp_file = make_filename( base_dir, ".hp" );
1396 ps_file = make_filename( base_dir, ".ps" );
1397 aux_file = make_filename( base_dir, ".aux" );
1398 fd = VG_(open)(hp_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
1399 VKI_S_IRUSR|VKI_S_IWUSR);
1400 if (fd < 0) {
1401 file_err( hp_file );
1402 VGP_POPCC(VgpPrintHp);
1403 return;
1404 }
1405
1406 // File header, including command line
1407 SPRINTF(buf, "JOB \"");
1408 for (i = 0; i < VG_(client_argc); i++)
1409 SPRINTF(buf, "%s ", VG_(client_argv)[i]);
1410 SPRINTF(buf, /*" (%d ms/sample)\"\n"*/ "\"\n"
1411 "DATE \"\"\n"
1412 "SAMPLE_UNIT \"ms\"\n"
1413 "VALUE_UNIT \"bytes\"\n", ms_interval);
1414
1415 // Censi
1416 for (i = 0; i < curr_census; i++) {
1417 Census* census = & censi[i];
1418
1419 // Census start
1420 SPRINTF(buf, "MARK %d.0\n"
1421 "BEGIN_SAMPLE %d.0\n",
1422 census->ms_time, census->ms_time);
1423
1424 // Heap -----------------------------------------------------------
1425 if (clo_heap) {
1426 // Print all the significant XPts from that census
1427 for (j = 0; NULL != census->xtree_snapshots[j]; j++) {
1428 // Grab the jth top-XPt
1429 XTreeSnapshot xtree_snapshot = & census->xtree_snapshots[j][0];
1430 if ( ! VG_(get_fnname)(xtree_snapshot->xpt->eip, buf2, 16)) {
1431 VG_(sprintf)(buf2, "???");
1432 }
1433 SPRINTF(buf, "x%x:%s %d\n", xtree_snapshot->xpt->eip,
1434 clean_fnname(buf3, buf2), xtree_snapshot->space);
1435 }
1436
1437 // Remaining heap block alloc points, combined
1438 if (census->others_space > 0)
1439 SPRINTF(buf, "other %d\n", census->others_space);
1440 }
1441
1442 // Heap admin -----------------------------------------------------
1443 if (clo_heap_admin > 0 && census->heap_admin_space)
1444 SPRINTF(buf, "heap-admin %d\n", census->heap_admin_space);
1445
1446 // Stack(s) -------------------------------------------------------
1447 if (clo_stacks)
1448 SPRINTF(buf, "stack(s) %d\n", census->stacks_space);
1449
1450 // Census end
1451 SPRINTF(buf, "END_SAMPLE %d.0\n", census->ms_time);
1452 }
1453
1454 // Close file
1455 sk_assert(fd >= 0);
1456 VG_(close)(fd);
1457
1458 // Attempt to convert file using hp2ps
1459 cmdfmt = "%s/hp2ps -c -t1 %s";
1460 cmdlen = VG_(strlen)(VG_(libdir)) + VG_(strlen)(hp_file)
1461 + VG_(strlen)(cmdfmt);
1462 cmdbuf = VG_(malloc)( sizeof(Char) * cmdlen );
1463 VG_(sprintf)(cmdbuf, cmdfmt, VG_(libdir), hp_file);
1464 res = VG_(system)(cmdbuf);
1465 VG_(free)(cmdbuf);
1466 if (res != 0) {
1467 VG_(message)(Vg_UserMsg,
1468 "Conversion to PostScript failed. Try converting manually.");
1469 } else {
1470 // remove the .hp and .aux file
1471 VG_(unlink)(hp_file);
1472 VG_(unlink)(aux_file);
1473 }
1474
1475 VG_(free)(hp_file);
1476 VG_(free)(ps_file);
1477 VG_(free)(aux_file);
1478
1479 VGP_POPCC(VgpPrintHp);
1480}
1481
1482/*------------------------------------------------------------*/
1483/*--- Writing the XPt text/HTML file ---*/
1484/*------------------------------------------------------------*/
1485
1486static void percentify(Int n, Int pow, Int field_width, char xbuf[])
1487{
1488 int i, len, space;
1489
1490 VG_(sprintf)(xbuf, "%d.%d%%", n / pow, n % pow);
1491 len = VG_(strlen)(xbuf);
1492 space = field_width - len;
1493 if (space < 0) space = 0; /* Allow for v. small field_width */
1494 i = len;
1495
1496 /* Right justify in field */
1497 for ( ; i >= 0; i--) xbuf[i + space] = xbuf[i];
1498 for (i = 0; i < space; i++) xbuf[i] = ' ';
1499}
1500
1501// Nb: uses a static buffer, each call trashes the last string returned.
1502static Char* make_perc(ULong spacetime, ULong total_spacetime)
1503{
1504 static Char mbuf[32];
1505
1506 UInt p = 10;
1507 percentify(spacetime * 100 * p / total_spacetime, p, 5, mbuf);
1508 return mbuf;
1509}
1510
1511// Nb: passed in XPt is a lower-level XPt; %eips are grabbed from
1512// bottom-to-top of XCon, and then printed in the reverse order.
1513static UInt pp_XCon(Int fd, XPt* xpt)
1514{
1515 Addr rev_eips[clo_depth+1];
1516 Int i = 0;
1517 Int n = 0;
1518 Bool is_HTML = ( XHTML == clo_format );
1519 Char* maybe_br = ( is_HTML ? "<br>" : "" );
1520 Char* maybe_indent = ( is_HTML ? "&nbsp;&nbsp;" : "" );
1521
1522 sk_assert(NULL != xpt);
1523
1524 while (True) {
1525 rev_eips[i] = xpt->eip;
1526 n++;
1527 if (alloc_xpt == xpt->parent) break;
1528 i++;
1529 xpt = xpt->parent;
1530 }
1531
1532 for (i = n-1; i >= 0; i--) {
1533 // -1 means point to calling line
1534 VG_(describe_eip)(rev_eips[i]-1, buf2, BUF_LEN);
1535 SPRINTF(buf, " %s%s%s\n", maybe_indent, buf2, maybe_br);
1536 }
1537
1538 return n;
1539}
1540
1541// Important point: for HTML, each XPt must be identified uniquely for the
1542// HTML links to all match up correctly. Using xpt->eip is not
1543// sufficient, because function pointers mean that you can call more than
1544// one other function from a single code location. So instead we use the
1545// address of the xpt struct itself, which is guaranteed to be unique.
1546
1547static void pp_all_XPts2(Int fd, Queue* q, ULong heap_spacetime,
1548 ULong total_spacetime)
1549{
1550 UInt i;
1551 XPt *xpt, *child;
1552 UInt L = 0;
1553 UInt c1 = 1;
1554 UInt c2 = 0;
1555 ULong sum = 0;
1556 UInt n;
1557 Char *eip_desc, *perc;
1558 Bool is_HTML = ( XHTML == clo_format );
1559 Char* maybe_br = ( is_HTML ? "<br>" : "" );
1560 Char* maybe_p = ( is_HTML ? "<p>" : "" );
1561 Char* maybe_ul = ( is_HTML ? "<ul>" : "" );
1562 Char* maybe_li = ( is_HTML ? "<li>" : "" );
1563 Char* maybe_fli = ( is_HTML ? "</li>" : "" );
1564 Char* maybe_ful = ( is_HTML ? "</ul>" : "" );
1565 Char* end_hr = ( is_HTML ? "<hr>" :
1566 "=================================" );
1567 Char* depth = ( is_HTML ? "<code>--depth</code>" : "--depth" );
1568
1569 SPRINTF(buf, "== %d ===========================%s\n", L, maybe_br);
1570
1571 while (NULL != (xpt = (XPt*)dequeue(q))) {
1572 // Check that non-top-level XPts have a zero .spacetime field.
1573 if (xpt->parent != alloc_xpt) sk_assert( 0 == xpt->spacetime );
1574
1575 // Check that the sum of all children .spacetime2s equals parent's
1576 // (unless alloc_xpt, when it should == 0).
1577 if (alloc_xpt == xpt) {
1578 sk_assert(0 == xpt->spacetime2);
1579 } else {
1580 sum = 0;
1581 for (i = 0; i < xpt->n_children; i++) {
1582 sum += xpt->children[i]->spacetime2;
1583 }
1584 //sk_assert(sum == xpt->spacetime2);
1585 // It's possible that not all the children were included in the
1586 // spacetime2 calculations. Hopefully almost all of them were, and
1587 // all the important ones.
1588// sk_assert(sum <= xpt->spacetime2);
1589// sk_assert(sum * 1.05 > xpt->spacetime2 );
1590// if (sum != xpt->spacetime2) {
1591// VG_(printf)("%ld, %ld\n", sum, xpt->spacetime2);
1592// }
1593 }
1594
1595 if (xpt == alloc_xpt) {
1596 SPRINTF(buf, "Heap allocation functions accounted for "
1597 "%s of measured spacetime%s\n",
1598 make_perc(heap_spacetime, total_spacetime), maybe_br);
1599 } else {
1600 // Remember: spacetime2 is space.time *doubled*
1601 perc = make_perc(xpt->spacetime2 / 2, total_spacetime);
1602 if (is_HTML) {
1603 SPRINTF(buf, "<a name=\"b%x\"></a>"
1604 "Context accounted for "
1605 "<a href=\"#a%x\">%s</a> of measured spacetime<br>\n",
1606 xpt, xpt, perc);
1607 } else {
1608 SPRINTF(buf, "Context accounted for %s of measured spacetime\n",
1609 perc);
1610 }
1611 n = pp_XCon(fd, xpt);
1612 sk_assert(n == L);
1613 }
1614
1615 // Sort children by spacetime2
1616 VG_(ssort)(xpt->children, xpt->n_children, sizeof(XPt*),
1617 XPt_cmp_spacetime2);
1618
1619 SPRINTF(buf, "%s\nCalled from:%s\n", maybe_p, maybe_ul);
1620 for (i = 0; i < xpt->n_children; i++) {
1621 child = xpt->children[i];
1622
1623 // Stop when <1% of total spacetime
1624 if (child->spacetime2 * 1000 / (total_spacetime * 2) < 5) {
1625 UInt n_insig = xpt->n_children - i;
1626 Char* s = ( n_insig == 1 ? "" : "s" );
1627 Char* and = ( 0 == i ? "" : "and " );
1628 Char* other = ( 0 == i ? "" : "other " );
1629 SPRINTF(buf, " %s%s%d %sinsignificant place%s%s\n\n",
1630 maybe_li, and, n_insig, other, s, maybe_fli);
1631 break;
1632 }
1633
1634 // Remember: spacetime2 is space.time *doubled*
1635 perc = make_perc(child->spacetime2 / 2, total_spacetime);
1636 eip_desc = VG_(describe_eip)(child->eip-1, buf2, BUF_LEN);
1637 if (is_HTML) {
1638 SPRINTF(buf, "<li><a name=\"a%x\"></a>", child );
1639
1640 if (child->n_children > 0) {
1641 SPRINTF(buf, "<a href=\"#b%x\">%s</a>", child, perc);
1642 } else {
1643 SPRINTF(buf, "%s", perc);
1644 }
1645 SPRINTF(buf, ": %s\n", eip_desc);
1646 } else {
1647 SPRINTF(buf, " %6s: %s\n\n", perc, eip_desc);
1648 }
1649
1650 if (child->n_children > 0) {
1651 enqueue(q, (void*)child);
1652 c2++;
1653 }
1654 }
1655 SPRINTF(buf, "%s%s", maybe_ful, maybe_p);
1656 c1--;
1657
1658 // Putting markers between levels of the structure:
1659 // c1 tracks how many to go on this level, c2 tracks how many we've
1660 // queued up for the next level while finishing off this level.
1661 // When c1 gets to zero, we've changed levels, so print a marker,
1662 // move c2 into c1, and zero c2.
1663 if (0 == c1) {
1664 L++;
1665 c1 = c2;
1666 c2 = 0;
1667 if (! is_empty_queue(q) ) { // avoid empty one at end
1668 SPRINTF(buf, "== %d ===========================%s\n", L, maybe_br);
1669 }
1670 } else {
1671 SPRINTF(buf, "---------------------------------%s\n", maybe_br);
1672 }
1673 }
1674 SPRINTF(buf, "%s\n\nEnd of information. Rerun with a bigger "
1675 "%s value for more.\n", end_hr, depth);
1676}
1677
1678static void pp_all_XPts(Int fd, XPt* xpt, ULong heap_spacetime,
1679 ULong total_spacetime)
1680{
1681 Queue* q = construct_queue(100);
1682 enqueue(q, xpt);
1683 pp_all_XPts2(fd, q, heap_spacetime, total_spacetime);
1684 destruct_queue(q);
1685}
1686
1687static void
1688write_text_file(ULong total_ST, ULong heap_ST)
1689{
1690 Int fd, i;
1691 Char* text_file;
1692 Char* maybe_p = ( XHTML == clo_format ? "<p>" : "" );
1693
1694 VGP_PUSHCC(VgpPrintXPts);
1695
1696 // Open file
1697 text_file = make_filename( base_dir,
1698 ( XText == clo_format ? ".txt" : ".html" ) );
1699
1700 fd = VG_(open)(text_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
1701 VKI_S_IRUSR|VKI_S_IWUSR);
1702 if (fd < 0) {
1703 file_err( text_file );
1704 VGP_POPCC(VgpPrintXPts);
1705 return;
1706 }
1707
1708 // Header
1709 if (XHTML == clo_format) {
1710 SPRINTF(buf, "<html>\n"
1711 "<head>\n"
1712 "<title>%s</title>\n"
1713 "</head>\n"
1714 "<body>\n",
1715 text_file);
1716 }
1717
1718 // Command line
1719 SPRINTF(buf, "Command: ");
1720 for (i = 0; i < VG_(client_argc); i++)
1721 SPRINTF(buf, "%s ", VG_(client_argv)[i]);
1722 SPRINTF(buf, "\n%s\n", maybe_p);
1723
1724 if (clo_heap)
1725 pp_all_XPts(fd, alloc_xpt, heap_ST, total_ST);
1726
1727 sk_assert(fd >= 0);
1728 VG_(close)(fd);
1729
1730 VGP_POPCC(VgpPrintXPts);
1731}
1732
1733/*------------------------------------------------------------*/
1734/*--- Finalisation ---*/
1735/*------------------------------------------------------------*/
1736
1737static void
1738print_summary(ULong total_ST, ULong heap_ST, ULong heap_admin_ST,
1739 ULong stack_ST)
1740{
1741 VG_(message)(Vg_UserMsg, "Total spacetime: %,ld ms.B", total_ST);
1742
1743 // Heap --------------------------------------------------------------
1744 if (clo_heap)
1745 VG_(message)(Vg_UserMsg, "heap: %s",
1746 make_perc(heap_ST, total_ST) );
1747
1748 // Heap admin --------------------------------------------------------
1749 if (clo_heap_admin)
1750 VG_(message)(Vg_UserMsg, "heap admin: %s",
1751 make_perc(heap_admin_ST, total_ST));
1752
1753 sk_assert( VG_(HT_count_nodes)(malloc_list) == n_heap_blocks );
1754
1755 // Stack(s) ----------------------------------------------------------
1756 if (clo_stacks)
1757 VG_(message)(Vg_UserMsg, "stack(s): %s",
1758 make_perc(stack_ST, total_ST));
1759
1760 if (VG_(clo_verbosity) > 1) {
1761 sk_assert(n_xpts > 0); // always have alloc_xpt
1762 VG_(message)(Vg_DebugMsg, " allocs: %u", n_allocs);
1763 VG_(message)(Vg_DebugMsg, "zeroallocs: %u (%d%%)", n_zero_allocs,
1764 n_zero_allocs * 100 / n_allocs );
1765 VG_(message)(Vg_DebugMsg, " frees: %u", n_frees);
1766 VG_(message)(Vg_DebugMsg, " XPts: %u (%d B)", n_xpts,
1767 n_xpts*sizeof(XPt));
1768 VG_(message)(Vg_DebugMsg, " bot-XPts: %u (%d%%)", n_bot_xpts,
1769 n_bot_xpts * 100 / n_xpts);
1770 VG_(message)(Vg_DebugMsg, " top-XPts: %u (%d%%)", alloc_xpt->n_children,
1771 alloc_xpt->n_children * 100 / n_xpts);
1772 VG_(message)(Vg_DebugMsg, "c-reallocs: %u", n_children_reallocs);
1773 VG_(message)(Vg_DebugMsg, "snap-frees: %u", n_snapshot_frees);
1774 VG_(message)(Vg_DebugMsg, "atmp censi: %u", n_attempted_censi);
1775 VG_(message)(Vg_DebugMsg, "fake censi: %u", n_fake_censi);
1776 VG_(message)(Vg_DebugMsg, "real censi: %u", n_real_censi);
1777 VG_(message)(Vg_DebugMsg, " halvings: %u", n_halvings);
1778 }
1779}
1780
1781void SK_(fini)(Int exit_status)
1782{
1783 ULong total_ST = 0;
1784 ULong heap_ST = 0;
1785 ULong heap_admin_ST = 0;
1786 ULong stack_ST = 0;
1787
1788 // Do a final (empty) sample to show program's end
1789 hp_census();
1790
1791 // Redo spacetimes of significant contexts to match the .hp file.
1792 calc_spacetime2(&heap_ST, &heap_admin_ST, &stack_ST);
1793 total_ST = heap_ST + heap_admin_ST + stack_ST;
1794 write_hp_file ( );
1795 write_text_file( total_ST, heap_ST );
1796 print_summary ( total_ST, heap_ST, heap_admin_ST, stack_ST );
1797}
1798
1799VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 0)
1800
1801/*--------------------------------------------------------------------*/
1802/*--- end ms_main.c ---*/
1803/*--------------------------------------------------------------------*/
1804