blob: a5966126cc7088fc643e54367fb170bcd65eca4c [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn43c799e2003-04-08 00:08:52 +000010 for detecting memory errors.
11
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "mac_shared.h"
34
35/* Define to debug the memory-leak-detector. */
36/* #define VG_DEBUG_LEAKCHECK */
37
38/*------------------------------------------------------------*/
39/*--- Low-level address-space scanning, for the leak ---*/
40/*--- detector. ---*/
41/*------------------------------------------------------------*/
42
43static
44jmp_buf memscan_jmpbuf;
45
46
47static
48void vg_scan_all_valid_memory_sighandler ( Int sigNo )
49{
50 __builtin_longjmp(memscan_jmpbuf, 1);
51}
52
53
54/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
55 space and pass the addresses and values of all addressible,
56 defined, aligned words to notify_word. This is the basis for the
57 leak detector. Returns the number of calls made to notify_word.
58
59 Addresses are validated 3 ways. First we enquire whether (addr >>
60 16) denotes a 64k chunk in use, by asking is_valid_64k_chunk(). If
61 so, we decide for ourselves whether each x86-level (4 K) page in
62 the chunk is safe to inspect. If yes, we enquire with
63 is_valid_address() whether or not each of the 1024 word-locations
64 on the page is valid. Only if so are that address and its contents
65 passed to notify_word.
66
67 This is all to avoid duplication of this machinery between the
68 memcheck and addrcheck skins.
69*/
70static
71UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
72 Bool is_valid_address ( Addr ),
73 void (*notify_word)( Addr, UInt ) )
74{
75 /* All volatile, because some gccs seem paranoid about longjmp(). */
76 volatile Bool anyValid;
77 volatile Addr pageBase, addr;
78 volatile UInt res, numPages, page, primaryMapNo;
79 volatile UInt page_first_word, nWordsNotified;
80
81 vki_ksigaction sigbus_saved;
82 vki_ksigaction sigbus_new;
83 vki_ksigaction sigsegv_saved;
84 vki_ksigaction sigsegv_new;
85 vki_ksigset_t blockmask_saved;
86 vki_ksigset_t unblockmask_new;
87
88 /* Temporarily install a new sigsegv and sigbus handler, and make
89 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
90 first two can never be blocked anyway?) */
91
92 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
93 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
94 sigbus_new.ksa_restorer = NULL;
95 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
96 sk_assert(res == 0);
97
98 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
99 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
100 sigsegv_new.ksa_restorer = NULL;
101 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
102 sk_assert(res == 0+0);
103
104 res = VG_(ksigemptyset)( &unblockmask_new );
105 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
106 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
107 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
108 sk_assert(res == 0+0+0);
109
110 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
111 sk_assert(res == 0+0+0+0);
112
113 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
114 sk_assert(res == 0+0+0+0+0);
115
116 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
117 sk_assert(res == 0+0+0+0+0+0);
118
119 /* The signal handlers are installed. Actually do the memory scan. */
120 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
121 sk_assert(numPages == 1048576);
122 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
123
124 nWordsNotified = 0;
125
126 for (page = 0; page < numPages; page++) {
127
128 /* Base address of this 4k page. */
129 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
130
131 /* Skip if this page is in an unused 64k chunk. */
132 primaryMapNo = pageBase >> 16;
133 if (!is_valid_64k_chunk(primaryMapNo))
134 continue;
135
136 /* Next, establish whether or not we want to consider any
137 locations on this page. We need to do so before actually
138 prodding it, because prodding it when in fact it is not
139 needed can cause a page fault which under some rare
140 circumstances can cause the kernel to extend the stack
141 segment all the way down to here, which is seriously bad.
142 Hence: */
143 anyValid = False;
144 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
145 if (is_valid_address(addr)) {
146 anyValid = True;
147 break;
148 }
149 }
150
151 if (!anyValid)
152 continue; /* nothing interesting here .. move to the next page */
153
154 /* Ok, we have to prod cautiously at the page and see if it
155 explodes or not. */
156 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
157 /* try this ... */
158 page_first_word = * (volatile UInt*)pageBase;
159 /* we get here if we didn't get a fault */
160 /* Scan the page */
161 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
162 if (is_valid_address(addr)) {
163 nWordsNotified++;
164 notify_word ( addr, *(UInt*)addr );
165 }
166 }
167 } else {
168 /* We get here if reading the first word of the page caused a
169 fault, which in turn caused the signal handler to longjmp.
170 Ignore this page. */
171 if (0)
172 VG_(printf)(
173 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
174 (void*)pageBase
175 );
176 }
177 }
178
179 /* Restore signal state to whatever it was before. */
180 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
181 sk_assert(res == 0 +0);
182
183 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
184 sk_assert(res == 0 +0 +0);
185
186 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
187 sk_assert(res == 0 +0 +0 +0);
188
189 return nWordsNotified;
190}
191
192/*------------------------------------------------------------*/
193/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
194/*------------------------------------------------------------*/
195
196/* A block is either
197 -- Proper-ly reached; a pointer to its start has been found
198 -- Interior-ly reached; only an interior pointer to it has been found
199 -- Unreached; so far, no pointers to any part of it have been found.
200*/
201typedef
202 enum { Unreached, Interior, Proper }
203 Reachedness;
204
205/* A block record, used for generating err msgs. */
206typedef
207 struct _LossRecord {
208 struct _LossRecord* next;
209 /* Where these lost blocks were allocated. */
210 ExeContext* allocated_at;
211 /* Their reachability. */
212 Reachedness loss_mode;
213 /* Number of blocks and total # bytes involved. */
214 UInt total_bytes;
215 UInt num_blocks;
216 }
217 LossRecord;
218
219
220/* Find the i such that ptr points at or inside the block described by
221 shadows[i]. Return -1 if none found. This assumes that shadows[]
222 has been sorted on the ->data field. */
223
224#ifdef VG_DEBUG_LEAKCHECK
225/* Used to sanity-check the fast binary-search mechanism. */
226static
njn3e884182003-04-15 13:03:23 +0000227Int find_shadow_for_OLD ( Addr ptr,
228 MAC_Chunk** shadows,
229 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000230
231{
232 Int i;
233 Addr a_lo, a_hi;
234 PROF_EVENT(70);
235 for (i = 0; i < n_shadows; i++) {
236 PROF_EVENT(71);
237 a_lo = shadows[i]->data;
238 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
239 if (a_lo <= ptr && ptr <= a_hi)
240 return i;
241 }
242 return -1;
243}
244#endif
245
246
247static
njn3e884182003-04-15 13:03:23 +0000248Int find_shadow_for ( Addr ptr,
249 MAC_Chunk** shadows,
250 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000251{
252 Addr a_mid_lo, a_mid_hi;
253 Int lo, mid, hi, retVal;
254 /* VG_(printf)("find shadow for %p = ", ptr); */
255 retVal = -1;
256 lo = 0;
257 hi = n_shadows-1;
258 while (True) {
njn3e884182003-04-15 13:03:23 +0000259 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000260 if (lo > hi) break; /* not found */
261
262 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000263 a_mid_lo = shadows[mid]->data;
264 a_mid_hi = shadows[mid]->data + shadows[mid]->size - 1;
njn43c799e2003-04-08 00:08:52 +0000265
266 if (ptr < a_mid_lo) {
267 hi = mid-1;
268 continue;
269 }
270 if (ptr > a_mid_hi) {
271 lo = mid+1;
272 continue;
273 }
274 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
275 retVal = mid;
276 break;
277 }
278
279# ifdef VG_DEBUG_LEAKCHECK
280 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
281# endif
282 /* VG_(printf)("%d\n", retVal); */
283 return retVal;
284}
285
286/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000287static MAC_Chunk** lc_shadows;
288static Int lc_n_shadows;
289static Reachedness* lc_reachedness;
290static Addr lc_min_mallocd_addr;
291static Addr lc_max_mallocd_addr;
njn43c799e2003-04-08 00:08:52 +0000292
293static
294void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
295{
296 Int sh_no;
297 Addr ptr;
298
299 /* Rule out some known causes of bogus pointers. Mostly these do
300 not cause much trouble because only a few false pointers can
301 ever lurk in these places. This mainly stops it reporting that
302 blocks are still reachable in stupid test programs like this
303
304 int main (void) { char* a = malloc(100); return 0; }
305
306 which people seem inordinately fond of writing, for some reason.
307
308 Note that this is a complete kludge. It would be better to
309 ignore any addresses corresponding to valgrind.so's .bss and
310 .data segments, but I cannot think of a reliable way to identify
311 where the .bss segment has been put. If you can, drop me a
312 line.
313 */
sewardjecf8e102003-07-12 12:11:39 +0000314 if (VG_(within_stack)(a)) return;
315 if (VG_(within_m_state_static_OR_threads)(a)) return;
316 if (a == (Addr)(&lc_min_mallocd_addr)) return;
317 if (a == (Addr)(&lc_max_mallocd_addr)) return;
njn43c799e2003-04-08 00:08:52 +0000318
319 /* OK, let's get on and do something Useful for a change. */
320
321 ptr = (Addr)word_at_a;
njn3e884182003-04-15 13:03:23 +0000322 if (ptr >= lc_min_mallocd_addr && ptr <= lc_max_mallocd_addr) {
njn43c799e2003-04-08 00:08:52 +0000323 /* Might be legitimate; we'll have to investigate further. */
njn3e884182003-04-15 13:03:23 +0000324 sh_no = find_shadow_for ( ptr, lc_shadows, lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000325 if (sh_no != -1) {
326 /* Found a block at/into which ptr points. */
njn3e884182003-04-15 13:03:23 +0000327 sk_assert(sh_no >= 0 && sh_no < lc_n_shadows);
328 sk_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
njn43c799e2003-04-08 00:08:52 +0000329 /* Decide whether Proper-ly or Interior-ly reached. */
njn3e884182003-04-15 13:03:23 +0000330 if (ptr == lc_shadows[sh_no]->data) {
njn43c799e2003-04-08 00:08:52 +0000331 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
njn3e884182003-04-15 13:03:23 +0000332 lc_reachedness[sh_no] = Proper;
njn43c799e2003-04-08 00:08:52 +0000333 } else {
njn3e884182003-04-15 13:03:23 +0000334 if (lc_reachedness[sh_no] == Unreached)
335 lc_reachedness[sh_no] = Interior;
njn43c799e2003-04-08 00:08:52 +0000336 }
337 }
338 }
339}
340
341/* Used for printing leak errors, avoids exposing the LossRecord type (which
342 comes in as void*, requiring a cast. */
343void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
344{
345 LossRecord* l = (LossRecord*)vl;
346
347 VG_(message)(Vg_UserMsg, "");
348 VG_(message)(Vg_UserMsg,
349 "%d bytes in %d blocks are %s in loss record %d of %d",
350 l->total_bytes, l->num_blocks,
351 l->loss_mode==Unreached ? "definitely lost"
352 : (l->loss_mode==Interior ? "possibly lost"
353 : "still reachable"),
354 n_this_record, n_total_records
355 );
356 VG_(pp_ExeContext)(l->allocated_at);
357}
358
njne8b5c052003-07-22 22:03:58 +0000359Int MAC_(bytes_leaked) = 0;
360Int MAC_(bytes_dubious) = 0;
361Int MAC_(bytes_reachable) = 0;
362Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000363
njn06072ec2003-09-30 15:35:13 +0000364static Int lc_compar(void* n1, void* n2)
365{
366 MAC_Chunk* mc1 = *(MAC_Chunk**)n1;
367 MAC_Chunk* mc2 = *(MAC_Chunk**)n2;
368 return (mc1->data < mc2->data ? -1 : 1);
369}
370
njn43c799e2003-04-08 00:08:52 +0000371/* Top level entry point to leak detector. Call here, passing in
372 suitable address-validating functions (see comment at top of
373 vg_scan_all_valid_memory above). All this is to avoid duplication
374 of the leak-detection code for the Memcheck and Addrcheck skins.
375 Also pass in a skin-specific function to extract the .where field
376 for allocated blocks, an indication of the resolution wanted for
377 distinguishing different allocation points, and whether or not
378 reachable blocks should be shown.
379*/
380void MAC_(do_detect_memory_leaks) (
381 Bool is_valid_64k_chunk ( UInt ),
382 Bool is_valid_address ( Addr )
383)
384{
385 Int i;
njne8b5c052003-07-22 22:03:58 +0000386 Int blocks_leaked;
387 Int blocks_dubious;
388 Int blocks_reachable;
389 Int blocks_suppressed;
njn43c799e2003-04-08 00:08:52 +0000390 Int n_lossrecords;
391 UInt bytes_notified;
392 Bool is_suppressed;
393
394 LossRecord* errlist;
395 LossRecord* p;
396
njn06072ec2003-09-30 15:35:13 +0000397 /* VG_(HT_to_array) allocates storage for shadows */
398 lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
399 &lc_n_shadows );
400
401 /* Sort the array. */
402 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
403
404 /* Sanity check; assert that the blocks are now in order */
405 for (i = 0; i < lc_n_shadows-1; i++) {
406 sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
407 }
njn3e884182003-04-15 13:03:23 +0000408
409 /* Sanity check -- make sure they don't overlap */
410 for (i = 0; i < lc_n_shadows-1; i++) {
411 sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
412 < lc_shadows[i+1]->data );
413 }
414
415 if (lc_n_shadows == 0) {
416 sk_assert(lc_shadows == NULL);
sewardj37d06f22003-09-17 21:48:26 +0000417 if (VG_(clo_verbosity) >= 1) {
418 VG_(message)(Vg_UserMsg,
419 "No malloc'd blocks -- no leaks are possible.");
420 }
njn43c799e2003-04-08 00:08:52 +0000421 return;
422 }
423
nethercote0f19bce2003-12-02 10:17:44 +0000424 if (VG_(clo_verbosity) > 0)
425 VG_(message)(Vg_UserMsg,
426 "searching for pointers to %d not-freed blocks.",
427 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000428
njn3e884182003-04-15 13:03:23 +0000429 lc_min_mallocd_addr = lc_shadows[0]->data;
430 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
431 + lc_shadows[lc_n_shadows-1]->size - 1;
njn43c799e2003-04-08 00:08:52 +0000432
njn3e884182003-04-15 13:03:23 +0000433 lc_reachedness = VG_(malloc)( lc_n_shadows * sizeof(Reachedness) );
434 for (i = 0; i < lc_n_shadows; i++)
435 lc_reachedness[i] = Unreached;
njn43c799e2003-04-08 00:08:52 +0000436
437 /* Do the scan of memory. */
438 bytes_notified
439 = VKI_BYTES_PER_WORD
440 * vg_scan_all_valid_memory (
441 is_valid_64k_chunk,
442 is_valid_address,
443 &vg_detect_memory_leaks_notify_addr
444 );
445
nethercote0f19bce2003-12-02 10:17:44 +0000446 if (VG_(clo_verbosity) > 0)
447 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
njn43c799e2003-04-08 00:08:52 +0000448
449 /* Common up the lost blocks so we can print sensible error messages. */
450 n_lossrecords = 0;
451 errlist = NULL;
njn3e884182003-04-15 13:03:23 +0000452 for (i = 0; i < lc_n_shadows; i++) {
njn43c799e2003-04-08 00:08:52 +0000453
njn3e884182003-04-15 13:03:23 +0000454 ExeContext* where = lc_shadows[i]->where;
njn43c799e2003-04-08 00:08:52 +0000455
456 for (p = errlist; p != NULL; p = p->next) {
njn3e884182003-04-15 13:03:23 +0000457 if (p->loss_mode == lc_reachedness[i]
njn43c799e2003-04-08 00:08:52 +0000458 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
459 p->allocated_at,
460 where) ) {
461 break;
462 }
463 }
464 if (p != NULL) {
465 p->num_blocks ++;
njn3e884182003-04-15 13:03:23 +0000466 p->total_bytes += lc_shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000467 } else {
468 n_lossrecords ++;
469 p = VG_(malloc)(sizeof(LossRecord));
njn3e884182003-04-15 13:03:23 +0000470 p->loss_mode = lc_reachedness[i];
njn43c799e2003-04-08 00:08:52 +0000471 p->allocated_at = where;
njn3e884182003-04-15 13:03:23 +0000472 p->total_bytes = lc_shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000473 p->num_blocks = 1;
474 p->next = errlist;
475 errlist = p;
476 }
477 }
478
479 /* Print out the commoned-up blocks and collect summary stats. */
njne8b5c052003-07-22 22:03:58 +0000480 blocks_leaked = MAC_(bytes_leaked) = 0;
481 blocks_dubious = MAC_(bytes_dubious) = 0;
482 blocks_reachable = MAC_(bytes_reachable) = 0;
483 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000484
485 for (i = 0; i < n_lossrecords; i++) {
486 Bool print_record;
487 LossRecord* p_min = NULL;
488 UInt n_min = 0xFFFFFFFF;
489 for (p = errlist; p != NULL; p = p->next) {
490 if (p->num_blocks > 0 && p->total_bytes < n_min) {
491 n_min = p->total_bytes;
492 p_min = p;
493 }
494 }
495 sk_assert(p_min != NULL);
496
497 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
498 we disallow that when --leak-check=yes.
499
500 Prints the error if not suppressed, unless it's reachable (Proper)
501 and --show-reachable=no */
502
503 print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
504 is_suppressed =
njn72718642003-07-24 08:45:32 +0000505 VG_(unique_error) ( VG_(get_current_tid)(), LeakErr, (UInt)i+1,
njn43c799e2003-04-08 00:08:52 +0000506 (Char*)n_lossrecords, (void*) p_min,
njn3e884182003-04-15 13:03:23 +0000507 p_min->allocated_at, print_record,
njn47363ab2003-04-21 13:24:40 +0000508 /*allow_GDB_attach*/False, /*count_error*/False );
njn43c799e2003-04-08 00:08:52 +0000509
510 if (is_suppressed) {
njne8b5c052003-07-22 22:03:58 +0000511 blocks_suppressed += p_min->num_blocks;
512 MAC_(bytes_suppressed) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000513
njne8b5c052003-07-22 22:03:58 +0000514 } else if (Unreached == p_min->loss_mode) {
515 blocks_leaked += p_min->num_blocks;
516 MAC_(bytes_leaked) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000517
njne8b5c052003-07-22 22:03:58 +0000518 } else if (Interior == p_min->loss_mode) {
519 blocks_dubious += p_min->num_blocks;
520 MAC_(bytes_dubious) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000521
njne8b5c052003-07-22 22:03:58 +0000522 } else if (Proper == p_min->loss_mode) {
523 blocks_reachable += p_min->num_blocks;
524 MAC_(bytes_reachable) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000525
526 } else {
527 VG_(skin_panic)("generic_detect_memory_leaks: unknown loss mode");
528 }
529 p_min->num_blocks = 0;
530 }
531
nethercote0f19bce2003-12-02 10:17:44 +0000532 if (VG_(clo_verbosity) > 0) {
533 VG_(message)(Vg_UserMsg, "");
534 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
535 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
536 MAC_(bytes_leaked), blocks_leaked );
537 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
538 MAC_(bytes_dubious), blocks_dubious );
539 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
540 MAC_(bytes_reachable), blocks_reachable );
541 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
542 MAC_(bytes_suppressed), blocks_suppressed );
543 if (!MAC_(clo_show_reachable)) {
544 VG_(message)(Vg_UserMsg,
545 "Reachable blocks (those to which a pointer was found) are not shown.");
546 VG_(message)(Vg_UserMsg,
547 "To see them, rerun with: --show-reachable=yes");
548 }
njn43c799e2003-04-08 00:08:52 +0000549 }
njn43c799e2003-04-08 00:08:52 +0000550
njn3e884182003-04-15 13:03:23 +0000551 VG_(free) ( lc_shadows );
552 VG_(free) ( lc_reachedness );
njn43c799e2003-04-08 00:08:52 +0000553}
554
555/*--------------------------------------------------------------------*/
556/*--- end mac_leakcheck.c ---*/
557/*--------------------------------------------------------------------*/
558