blob: 581d649d3a57e11c1ca3be52e7457afb80579c6f [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "mac_shared.h"
34
35/* Define to debug the memory-leak-detector. */
36/* #define VG_DEBUG_LEAKCHECK */
37
38/*------------------------------------------------------------*/
39/*--- Low-level address-space scanning, for the leak ---*/
40/*--- detector. ---*/
41/*------------------------------------------------------------*/
42
43static
44jmp_buf memscan_jmpbuf;
45
46
47static
48void vg_scan_all_valid_memory_sighandler ( Int sigNo )
49{
50 __builtin_longjmp(memscan_jmpbuf, 1);
51}
52
53
54/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
55 space and pass the addresses and values of all addressible,
56 defined, aligned words to notify_word. This is the basis for the
57 leak detector. Returns the number of calls made to notify_word.
58
59 Addresses are validated 3 ways. First we enquire whether (addr >>
60 16) denotes a 64k chunk in use, by asking is_valid_64k_chunk(). If
61 so, we decide for ourselves whether each x86-level (4 K) page in
62 the chunk is safe to inspect. If yes, we enquire with
63 is_valid_address() whether or not each of the 1024 word-locations
64 on the page is valid. Only if so are that address and its contents
65 passed to notify_word.
66
67 This is all to avoid duplication of this machinery between the
68 memcheck and addrcheck skins.
69*/
70static
71UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
72 Bool is_valid_address ( Addr ),
73 void (*notify_word)( Addr, UInt ) )
74{
75 /* All volatile, because some gccs seem paranoid about longjmp(). */
76 volatile Bool anyValid;
77 volatile Addr pageBase, addr;
78 volatile UInt res, numPages, page, primaryMapNo;
79 volatile UInt page_first_word, nWordsNotified;
80
81 vki_ksigaction sigbus_saved;
82 vki_ksigaction sigbus_new;
83 vki_ksigaction sigsegv_saved;
84 vki_ksigaction sigsegv_new;
85 vki_ksigset_t blockmask_saved;
86 vki_ksigset_t unblockmask_new;
87
88 /* Temporarily install a new sigsegv and sigbus handler, and make
89 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
90 first two can never be blocked anyway?) */
91
92 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
93 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
94 sigbus_new.ksa_restorer = NULL;
95 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
96 sk_assert(res == 0);
97
98 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
99 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
100 sigsegv_new.ksa_restorer = NULL;
101 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
102 sk_assert(res == 0+0);
103
104 res = VG_(ksigemptyset)( &unblockmask_new );
105 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
106 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
107 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
108 sk_assert(res == 0+0+0);
109
110 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
111 sk_assert(res == 0+0+0+0);
112
113 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
114 sk_assert(res == 0+0+0+0+0);
115
116 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
117 sk_assert(res == 0+0+0+0+0+0);
118
119 /* The signal handlers are installed. Actually do the memory scan. */
120 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
121 sk_assert(numPages == 1048576);
122 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
123
124 nWordsNotified = 0;
125
126 for (page = 0; page < numPages; page++) {
127
128 /* Base address of this 4k page. */
129 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
130
131 /* Skip if this page is in an unused 64k chunk. */
132 primaryMapNo = pageBase >> 16;
133 if (!is_valid_64k_chunk(primaryMapNo))
134 continue;
135
136 /* Next, establish whether or not we want to consider any
137 locations on this page. We need to do so before actually
138 prodding it, because prodding it when in fact it is not
139 needed can cause a page fault which under some rare
140 circumstances can cause the kernel to extend the stack
141 segment all the way down to here, which is seriously bad.
142 Hence: */
143 anyValid = False;
144 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
145 if (is_valid_address(addr)) {
146 anyValid = True;
147 break;
148 }
149 }
150
151 if (!anyValid)
152 continue; /* nothing interesting here .. move to the next page */
153
154 /* Ok, we have to prod cautiously at the page and see if it
155 explodes or not. */
156 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
157 /* try this ... */
158 page_first_word = * (volatile UInt*)pageBase;
159 /* we get here if we didn't get a fault */
160 /* Scan the page */
161 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
162 if (is_valid_address(addr)) {
163 nWordsNotified++;
164 notify_word ( addr, *(UInt*)addr );
165 }
166 }
167 } else {
168 /* We get here if reading the first word of the page caused a
169 fault, which in turn caused the signal handler to longjmp.
170 Ignore this page. */
171 if (0)
172 VG_(printf)(
173 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
174 (void*)pageBase
175 );
176 }
177 }
178
179 /* Restore signal state to whatever it was before. */
180 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
181 sk_assert(res == 0 +0);
182
183 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
184 sk_assert(res == 0 +0 +0);
185
186 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
187 sk_assert(res == 0 +0 +0 +0);
188
189 return nWordsNotified;
190}
191
192/*------------------------------------------------------------*/
193/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
194/*------------------------------------------------------------*/
195
196/* A block is either
197 -- Proper-ly reached; a pointer to its start has been found
198 -- Interior-ly reached; only an interior pointer to it has been found
199 -- Unreached; so far, no pointers to any part of it have been found.
200*/
201typedef
202 enum { Unreached, Interior, Proper }
203 Reachedness;
204
205/* A block record, used for generating err msgs. */
206typedef
207 struct _LossRecord {
208 struct _LossRecord* next;
209 /* Where these lost blocks were allocated. */
210 ExeContext* allocated_at;
211 /* Their reachability. */
212 Reachedness loss_mode;
213 /* Number of blocks and total # bytes involved. */
214 UInt total_bytes;
215 UInt num_blocks;
216 }
217 LossRecord;
218
219
220/* Find the i such that ptr points at or inside the block described by
221 shadows[i]. Return -1 if none found. This assumes that shadows[]
222 has been sorted on the ->data field. */
223
224#ifdef VG_DEBUG_LEAKCHECK
225/* Used to sanity-check the fast binary-search mechanism. */
226static
njn3e884182003-04-15 13:03:23 +0000227Int find_shadow_for_OLD ( Addr ptr,
228 MAC_Chunk** shadows,
229 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000230
231{
232 Int i;
233 Addr a_lo, a_hi;
234 PROF_EVENT(70);
235 for (i = 0; i < n_shadows; i++) {
236 PROF_EVENT(71);
237 a_lo = shadows[i]->data;
238 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
239 if (a_lo <= ptr && ptr <= a_hi)
240 return i;
241 }
242 return -1;
243}
244#endif
245
246
247static
njn3e884182003-04-15 13:03:23 +0000248Int find_shadow_for ( Addr ptr,
249 MAC_Chunk** shadows,
250 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000251{
252 Addr a_mid_lo, a_mid_hi;
253 Int lo, mid, hi, retVal;
254 /* VG_(printf)("find shadow for %p = ", ptr); */
255 retVal = -1;
256 lo = 0;
257 hi = n_shadows-1;
258 while (True) {
njn3e884182003-04-15 13:03:23 +0000259 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000260 if (lo > hi) break; /* not found */
261
262 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000263 a_mid_lo = shadows[mid]->data;
264 a_mid_hi = shadows[mid]->data + shadows[mid]->size - 1;
njn43c799e2003-04-08 00:08:52 +0000265
266 if (ptr < a_mid_lo) {
267 hi = mid-1;
268 continue;
269 }
270 if (ptr > a_mid_hi) {
271 lo = mid+1;
272 continue;
273 }
274 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
275 retVal = mid;
276 break;
277 }
278
279# ifdef VG_DEBUG_LEAKCHECK
280 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
281# endif
282 /* VG_(printf)("%d\n", retVal); */
283 return retVal;
284}
285
286/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000287static MAC_Chunk** lc_shadows;
288static Int lc_n_shadows;
289static Reachedness* lc_reachedness;
290static Addr lc_min_mallocd_addr;
291static Addr lc_max_mallocd_addr;
njn43c799e2003-04-08 00:08:52 +0000292
293static
294void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
295{
296 Int sh_no;
297 Addr ptr;
298
299 /* Rule out some known causes of bogus pointers. Mostly these do
300 not cause much trouble because only a few false pointers can
301 ever lurk in these places. This mainly stops it reporting that
302 blocks are still reachable in stupid test programs like this
303
304 int main (void) { char* a = malloc(100); return 0; }
305
306 which people seem inordinately fond of writing, for some reason.
307
308 Note that this is a complete kludge. It would be better to
309 ignore any addresses corresponding to valgrind.so's .bss and
310 .data segments, but I cannot think of a reliable way to identify
311 where the .bss segment has been put. If you can, drop me a
312 line.
313 */
sewardjecf8e102003-07-12 12:11:39 +0000314 if (VG_(within_stack)(a)) return;
315 if (VG_(within_m_state_static_OR_threads)(a)) return;
316 if (a == (Addr)(&lc_min_mallocd_addr)) return;
317 if (a == (Addr)(&lc_max_mallocd_addr)) return;
njn43c799e2003-04-08 00:08:52 +0000318
319 /* OK, let's get on and do something Useful for a change. */
320
321 ptr = (Addr)word_at_a;
njn3e884182003-04-15 13:03:23 +0000322 if (ptr >= lc_min_mallocd_addr && ptr <= lc_max_mallocd_addr) {
njn43c799e2003-04-08 00:08:52 +0000323 /* Might be legitimate; we'll have to investigate further. */
njn3e884182003-04-15 13:03:23 +0000324 sh_no = find_shadow_for ( ptr, lc_shadows, lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000325 if (sh_no != -1) {
326 /* Found a block at/into which ptr points. */
njn3e884182003-04-15 13:03:23 +0000327 sk_assert(sh_no >= 0 && sh_no < lc_n_shadows);
328 sk_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
njn43c799e2003-04-08 00:08:52 +0000329 /* Decide whether Proper-ly or Interior-ly reached. */
njn3e884182003-04-15 13:03:23 +0000330 if (ptr == lc_shadows[sh_no]->data) {
njn43c799e2003-04-08 00:08:52 +0000331 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
njn3e884182003-04-15 13:03:23 +0000332 lc_reachedness[sh_no] = Proper;
njn43c799e2003-04-08 00:08:52 +0000333 } else {
njn3e884182003-04-15 13:03:23 +0000334 if (lc_reachedness[sh_no] == Unreached)
335 lc_reachedness[sh_no] = Interior;
njn43c799e2003-04-08 00:08:52 +0000336 }
337 }
338 }
339}
340
341/* Used for printing leak errors, avoids exposing the LossRecord type (which
342 comes in as void*, requiring a cast. */
343void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
344{
345 LossRecord* l = (LossRecord*)vl;
346
347 VG_(message)(Vg_UserMsg, "");
348 VG_(message)(Vg_UserMsg,
349 "%d bytes in %d blocks are %s in loss record %d of %d",
350 l->total_bytes, l->num_blocks,
351 l->loss_mode==Unreached ? "definitely lost"
352 : (l->loss_mode==Interior ? "possibly lost"
353 : "still reachable"),
354 n_this_record, n_total_records
355 );
356 VG_(pp_ExeContext)(l->allocated_at);
357}
358
njne8b5c052003-07-22 22:03:58 +0000359Int MAC_(bytes_leaked) = 0;
360Int MAC_(bytes_dubious) = 0;
361Int MAC_(bytes_reachable) = 0;
362Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000363
njn43c799e2003-04-08 00:08:52 +0000364/* Top level entry point to leak detector. Call here, passing in
365 suitable address-validating functions (see comment at top of
366 vg_scan_all_valid_memory above). All this is to avoid duplication
367 of the leak-detection code for the Memcheck and Addrcheck skins.
368 Also pass in a skin-specific function to extract the .where field
369 for allocated blocks, an indication of the resolution wanted for
370 distinguishing different allocation points, and whether or not
371 reachable blocks should be shown.
372*/
373void MAC_(do_detect_memory_leaks) (
374 Bool is_valid_64k_chunk ( UInt ),
375 Bool is_valid_address ( Addr )
376)
377{
378 Int i;
njne8b5c052003-07-22 22:03:58 +0000379 Int blocks_leaked;
380 Int blocks_dubious;
381 Int blocks_reachable;
382 Int blocks_suppressed;
njn43c799e2003-04-08 00:08:52 +0000383 Int n_lossrecords;
384 UInt bytes_notified;
385 Bool is_suppressed;
386
387 LossRecord* errlist;
388 LossRecord* p;
389
njn3e884182003-04-15 13:03:23 +0000390 /* VG_(HashTable_to_array) allocates storage for shadows */
391 lc_shadows = (MAC_Chunk**)VG_(HT_to_sorted_array)( MAC_(malloc_list),
392 &lc_n_shadows );
393
394 /* Sanity check -- make sure they don't overlap */
395 for (i = 0; i < lc_n_shadows-1; i++) {
396 sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
397 < lc_shadows[i+1]->data );
398 }
399
400 if (lc_n_shadows == 0) {
401 sk_assert(lc_shadows == NULL);
njn43c799e2003-04-08 00:08:52 +0000402 VG_(message)(Vg_UserMsg,
403 "No malloc'd blocks -- no leaks are possible.");
404 return;
405 }
406
407 VG_(message)(Vg_UserMsg, "searching for pointers to %d not-freed blocks.",
njn3e884182003-04-15 13:03:23 +0000408 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000409
njn3e884182003-04-15 13:03:23 +0000410 lc_min_mallocd_addr = lc_shadows[0]->data;
411 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
412 + lc_shadows[lc_n_shadows-1]->size - 1;
njn43c799e2003-04-08 00:08:52 +0000413
njn3e884182003-04-15 13:03:23 +0000414 lc_reachedness = VG_(malloc)( lc_n_shadows * sizeof(Reachedness) );
415 for (i = 0; i < lc_n_shadows; i++)
416 lc_reachedness[i] = Unreached;
njn43c799e2003-04-08 00:08:52 +0000417
418 /* Do the scan of memory. */
419 bytes_notified
420 = VKI_BYTES_PER_WORD
421 * vg_scan_all_valid_memory (
422 is_valid_64k_chunk,
423 is_valid_address,
424 &vg_detect_memory_leaks_notify_addr
425 );
426
427 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
428
429 /* Common up the lost blocks so we can print sensible error messages. */
430 n_lossrecords = 0;
431 errlist = NULL;
njn3e884182003-04-15 13:03:23 +0000432 for (i = 0; i < lc_n_shadows; i++) {
njn43c799e2003-04-08 00:08:52 +0000433
njn3e884182003-04-15 13:03:23 +0000434 ExeContext* where = lc_shadows[i]->where;
njn43c799e2003-04-08 00:08:52 +0000435
436 for (p = errlist; p != NULL; p = p->next) {
njn3e884182003-04-15 13:03:23 +0000437 if (p->loss_mode == lc_reachedness[i]
njn43c799e2003-04-08 00:08:52 +0000438 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
439 p->allocated_at,
440 where) ) {
441 break;
442 }
443 }
444 if (p != NULL) {
445 p->num_blocks ++;
njn3e884182003-04-15 13:03:23 +0000446 p->total_bytes += lc_shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000447 } else {
448 n_lossrecords ++;
449 p = VG_(malloc)(sizeof(LossRecord));
njn3e884182003-04-15 13:03:23 +0000450 p->loss_mode = lc_reachedness[i];
njn43c799e2003-04-08 00:08:52 +0000451 p->allocated_at = where;
njn3e884182003-04-15 13:03:23 +0000452 p->total_bytes = lc_shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000453 p->num_blocks = 1;
454 p->next = errlist;
455 errlist = p;
456 }
457 }
458
459 /* Print out the commoned-up blocks and collect summary stats. */
njne8b5c052003-07-22 22:03:58 +0000460 blocks_leaked = MAC_(bytes_leaked) = 0;
461 blocks_dubious = MAC_(bytes_dubious) = 0;
462 blocks_reachable = MAC_(bytes_reachable) = 0;
463 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000464
465 for (i = 0; i < n_lossrecords; i++) {
466 Bool print_record;
467 LossRecord* p_min = NULL;
468 UInt n_min = 0xFFFFFFFF;
469 for (p = errlist; p != NULL; p = p->next) {
470 if (p->num_blocks > 0 && p->total_bytes < n_min) {
471 n_min = p->total_bytes;
472 p_min = p;
473 }
474 }
475 sk_assert(p_min != NULL);
476
477 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
478 we disallow that when --leak-check=yes.
479
480 Prints the error if not suppressed, unless it's reachable (Proper)
481 and --show-reachable=no */
482
483 print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
484 is_suppressed =
485 VG_(unique_error) ( /*tst*/NULL, LeakErr, (UInt)i+1,
486 (Char*)n_lossrecords, (void*) p_min,
njn3e884182003-04-15 13:03:23 +0000487 p_min->allocated_at, print_record,
njn47363ab2003-04-21 13:24:40 +0000488 /*allow_GDB_attach*/False, /*count_error*/False );
njn43c799e2003-04-08 00:08:52 +0000489
490 if (is_suppressed) {
njne8b5c052003-07-22 22:03:58 +0000491 blocks_suppressed += p_min->num_blocks;
492 MAC_(bytes_suppressed) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000493
njne8b5c052003-07-22 22:03:58 +0000494 } else if (Unreached == p_min->loss_mode) {
495 blocks_leaked += p_min->num_blocks;
496 MAC_(bytes_leaked) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000497
njne8b5c052003-07-22 22:03:58 +0000498 } else if (Interior == p_min->loss_mode) {
499 blocks_dubious += p_min->num_blocks;
500 MAC_(bytes_dubious) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000501
njne8b5c052003-07-22 22:03:58 +0000502 } else if (Proper == p_min->loss_mode) {
503 blocks_reachable += p_min->num_blocks;
504 MAC_(bytes_reachable) += p_min->total_bytes;
njn43c799e2003-04-08 00:08:52 +0000505
506 } else {
507 VG_(skin_panic)("generic_detect_memory_leaks: unknown loss mode");
508 }
509 p_min->num_blocks = 0;
510 }
511
512 VG_(message)(Vg_UserMsg, "");
513 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
514 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
njne8b5c052003-07-22 22:03:58 +0000515 MAC_(bytes_leaked), blocks_leaked );
njn43c799e2003-04-08 00:08:52 +0000516 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
njne8b5c052003-07-22 22:03:58 +0000517 MAC_(bytes_dubious), blocks_dubious );
njn43c799e2003-04-08 00:08:52 +0000518 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
njne8b5c052003-07-22 22:03:58 +0000519 MAC_(bytes_reachable), blocks_reachable );
njn43c799e2003-04-08 00:08:52 +0000520 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
njne8b5c052003-07-22 22:03:58 +0000521 MAC_(bytes_suppressed), blocks_suppressed );
njn43c799e2003-04-08 00:08:52 +0000522 if (!MAC_(clo_show_reachable)) {
523 VG_(message)(Vg_UserMsg,
524 "Reachable blocks (those to which a pointer was found) are not shown.");
525 VG_(message)(Vg_UserMsg,
526 "To see them, rerun with: --show-reachable=yes");
527 }
528 VG_(message)(Vg_UserMsg, "");
529
njn3e884182003-04-15 13:03:23 +0000530 VG_(free) ( lc_shadows );
531 VG_(free) ( lc_reachedness );
njn43c799e2003-04-08 00:08:52 +0000532}
533
534/*--------------------------------------------------------------------*/
535/*--- end mac_leakcheck.c ---*/
536/*--------------------------------------------------------------------*/
537