blob: b465c5244b42695bcb6b6dc7aed12bd23dce9c30 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardj4d474d02008-02-11 11:34:59 +000010 Copyright (C) 2000-2008 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njn4802b382005-06-11 04:58:29 +000033#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000034#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000039#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000040#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
njn73c51342005-06-18 15:18:15 +000043#include "pub_tool_signals.h"
njn1d0825f2006-03-27 11:37:07 +000044#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000045
njn1d0825f2006-03-27 11:37:07 +000046#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include <setjmp.h> // For jmp_buf
49
50
njn43c799e2003-04-08 00:08:52 +000051/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000052#define VG_DEBUG_LEAKCHECK 0
53#define VG_DEBUG_CLIQUE 0
54
njn43c799e2003-04-08 00:08:52 +000055/*------------------------------------------------------------*/
56/*--- Low-level address-space scanning, for the leak ---*/
57/*--- detector. ---*/
58/*------------------------------------------------------------*/
59
60static
61jmp_buf memscan_jmpbuf;
62
63
64static
njn695c16e2005-03-27 03:40:28 +000065void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000066{
sewardjb5f6f512005-03-10 23:59:00 +000067 if (0)
barta0b6b2c2008-07-07 06:49:24 +000068 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
sewardjb5f6f512005-03-10 23:59:00 +000069 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
70 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000071}
72
sewardj45f4e7c2005-09-27 19:20:21 +000073
74/* TODO: GIVE THIS A PROPER HOME
njn1d0825f2006-03-27 11:37:07 +000075 TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
sewardj45f4e7c2005-09-27 19:20:21 +000076 Extract from aspacem a vector of the current segment start
77 addresses. The vector is dynamically allocated and should be freed
78 by the caller when done. REQUIRES m_mallocfree to be running.
79 Writes the number of addresses required into *n_acquired. */
80
81static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
82{
83 Addr* starts;
sewardjae986ca2005-10-12 12:53:20 +000084 Int n_starts, r = 0;
sewardj45f4e7c2005-09-27 19:20:21 +000085
86 n_starts = 1;
87 while (True) {
sewardj9c606bd2008-09-18 18:12:50 +000088 starts = VG_(malloc)( "mc.gss.1", n_starts * sizeof(Addr) );
sewardj45f4e7c2005-09-27 19:20:21 +000089 if (starts == NULL)
90 break;
91 r = VG_(am_get_segment_starts)( starts, n_starts );
92 if (r >= 0)
93 break;
94 VG_(free)(starts);
95 n_starts *= 2;
96 }
97
98 if (starts == NULL) {
99 *n_acquired = 0;
100 return NULL;
101 }
102
103 *n_acquired = r;
104 return starts;
105}
106
107
njn43c799e2003-04-08 00:08:52 +0000108/*------------------------------------------------------------*/
109/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
110/*------------------------------------------------------------*/
111
sewardjb5f6f512005-03-10 23:59:00 +0000112/* An entry in the mark stack */
sewardj45f4e7c2005-09-27 19:20:21 +0000113typedef
114 struct {
115 Int next:30; /* Index of next in mark stack */
116 UInt state:2; /* Reachedness */
117 SizeT indirect; /* if Unreached, how much is unreachable from here */
118 }
119 MarkStack;
njn43c799e2003-04-08 00:08:52 +0000120
njn43c799e2003-04-08 00:08:52 +0000121/* Find the i such that ptr points at or inside the block described by
122 shadows[i]. Return -1 if none found. This assumes that shadows[]
123 has been sorted on the ->data field. */
124
sewardjb5f6f512005-03-10 23:59:00 +0000125#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000126/* Used to sanity-check the fast binary-search mechanism. */
127static
njn1d0825f2006-03-27 11:37:07 +0000128Int find_shadow_for_OLD ( Addr ptr,
129 MC_Chunk** shadows,
130 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000131
132{
133 Int i;
134 Addr a_lo, a_hi;
tom16ade0d2005-07-18 09:41:57 +0000135 PROF_EVENT(70, "find_shadow_for_OLD");
njn43c799e2003-04-08 00:08:52 +0000136 for (i = 0; i < n_shadows; i++) {
tom16ade0d2005-07-18 09:41:57 +0000137 PROF_EVENT(71, "find_shadow_for_OLD(loop)");
njn43c799e2003-04-08 00:08:52 +0000138 a_lo = shadows[i]->data;
njn718d3b12006-12-16 00:54:12 +0000139 a_hi = ((Addr)shadows[i]->data) + shadows[i]->szB;
sewardj3cf26a52006-07-27 23:48:53 +0000140 if (a_lo <= ptr && ptr < a_hi)
njn43c799e2003-04-08 00:08:52 +0000141 return i;
142 }
143 return -1;
144}
145#endif
146
147
148static
njn1d0825f2006-03-27 11:37:07 +0000149Int find_shadow_for ( Addr ptr,
150 MC_Chunk** shadows,
151 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000152{
153 Addr a_mid_lo, a_mid_hi;
154 Int lo, mid, hi, retVal;
155 /* VG_(printf)("find shadow for %p = ", ptr); */
156 retVal = -1;
157 lo = 0;
158 hi = n_shadows-1;
159 while (True) {
njn3e884182003-04-15 13:03:23 +0000160 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000161 if (lo > hi) break; /* not found */
162
163 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000164 a_mid_lo = shadows[mid]->data;
njn718d3b12006-12-16 00:54:12 +0000165 a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
sewardj5bee4f82006-07-29 09:00:25 +0000166 /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
167 Special-case zero-sized blocks - treat them as if they had
168 size 1. Not doing so causes them to not cover any address
169 range at all and so will never be identified as the target of
170 any pointer, which causes them to be incorrectly reported as
171 definitely leaked. */
njn718d3b12006-12-16 00:54:12 +0000172 if (shadows[mid]->szB == 0)
sewardj5bee4f82006-07-29 09:00:25 +0000173 a_mid_hi++;
njn43c799e2003-04-08 00:08:52 +0000174
175 if (ptr < a_mid_lo) {
176 hi = mid-1;
177 continue;
178 }
sewardj3cf26a52006-07-27 23:48:53 +0000179 if (ptr >= a_mid_hi) {
njn43c799e2003-04-08 00:08:52 +0000180 lo = mid+1;
181 continue;
182 }
sewardj3cf26a52006-07-27 23:48:53 +0000183 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000184 retVal = mid;
185 break;
186 }
187
sewardjb5f6f512005-03-10 23:59:00 +0000188# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000189 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000190# endif
191 /* VG_(printf)("%d\n", retVal); */
192 return retVal;
193}
194
195/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn1d0825f2006-03-27 11:37:07 +0000196static MC_Chunk** lc_shadows;
197static Int lc_n_shadows;
198static MarkStack* lc_markstack;
199static Int lc_markstack_top;
200static Addr lc_min_mallocd_addr;
201static Addr lc_max_mallocd_addr;
202static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000203
sewardj05fe85e2005-04-27 22:46:36 +0000204static Bool (*lc_is_within_valid_secondary) (Addr addr);
205static Bool (*lc_is_valid_aligned_word) (Addr addr);
sewardjb5f6f512005-03-10 23:59:00 +0000206
njn43c799e2003-04-08 00:08:52 +0000207
njn1d0825f2006-03-27 11:37:07 +0000208SizeT MC_(bytes_leaked) = 0;
209SizeT MC_(bytes_indirect) = 0;
210SizeT MC_(bytes_dubious) = 0;
211SizeT MC_(bytes_reachable) = 0;
212SizeT MC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000213
njn8df80b22009-03-02 05:11:06 +0000214SizeT MC_(blocks_leaked) = 0;
215SizeT MC_(blocks_indirect) = 0;
216SizeT MC_(blocks_dubious) = 0;
217SizeT MC_(blocks_reachable) = 0;
218SizeT MC_(blocks_suppressed) = 0;
219
njn06072ec2003-09-30 15:35:13 +0000220static Int lc_compar(void* n1, void* n2)
221{
njn1d0825f2006-03-27 11:37:07 +0000222 MC_Chunk* mc1 = *(MC_Chunk**)n1;
223 MC_Chunk* mc2 = *(MC_Chunk**)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000224 if (mc1->data < mc2->data) return -1;
225 if (mc1->data > mc2->data) return 1;
226 return 0;
njn06072ec2003-09-30 15:35:13 +0000227}
228
sewardjb5f6f512005-03-10 23:59:00 +0000229/* If ptr is pointing to a heap-allocated block which hasn't been seen
230 before, push it onto the mark stack. Clique is the index of the
231 clique leader; -1 if none. */
sewardj45f4e7c2005-09-27 19:20:21 +0000232static void lc_markstack_push_WRK(Addr ptr, Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000233{
234 Int sh_no;
235
sewardj45f4e7c2005-09-27 19:20:21 +0000236 /* quick filter */
237 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_NONE))
sewardjb5f6f512005-03-10 23:59:00 +0000238 return;
239
240 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
241
242 if (VG_DEBUG_LEAKCHECK)
barta0b6b2c2008-07-07 06:49:24 +0000243 VG_(printf)("ptr=%#lx -> block %d\n", ptr, sh_no);
sewardjb5f6f512005-03-10 23:59:00 +0000244
245 if (sh_no == -1)
246 return;
247
sewardj76754cf2005-03-14 00:14:04 +0000248 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
sewardj3cf26a52006-07-27 23:48:53 +0000249 tl_assert(ptr >= lc_shadows[sh_no]->data);
sewardj5bee4f82006-07-29 09:00:25 +0000250 tl_assert(ptr < lc_shadows[sh_no]->data
njn718d3b12006-12-16 00:54:12 +0000251 + lc_shadows[sh_no]->szB
252 + (lc_shadows[sh_no]->szB==0 ? 1 : 0));
sewardjb5f6f512005-03-10 23:59:00 +0000253
254 if (lc_markstack[sh_no].state == Unreached) {
255 if (0)
barta0b6b2c2008-07-07 06:49:24 +0000256 VG_(printf)("pushing %#lx-%#lx\n", lc_shadows[sh_no]->data,
njn718d3b12006-12-16 00:54:12 +0000257 lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
sewardjb5f6f512005-03-10 23:59:00 +0000258
sewardj76754cf2005-03-14 00:14:04 +0000259 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000260 lc_markstack[sh_no].next = lc_markstack_top;
261 lc_markstack_top = sh_no;
262 }
263
sewardj45f4e7c2005-09-27 19:20:21 +0000264 tl_assert(clique >= -1 && clique < lc_n_shadows);
265
sewardjb5f6f512005-03-10 23:59:00 +0000266 if (clique != -1) {
267 if (0)
barta0b6b2c2008-07-07 06:49:24 +0000268 VG_(printf)("mopup: %d: %#lx is %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000269 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
270
271 /* An unmarked block - add it to the clique. Add its size to
272 the clique-leader's indirect size. If the new block was
273 itself a clique leader, it isn't any more, so add its
274 indirect to the new clique leader.
275
276 If this block *is* the clique leader, it means this is a
277 cyclic structure, so none of this applies. */
278 if (lc_markstack[sh_no].state == Unreached) {
279 lc_markstack[sh_no].state = IndirectLeak;
280
281 if (sh_no != clique) {
282 if (VG_DEBUG_CLIQUE) {
283 if (lc_markstack[sh_no].indirect)
njn8a7b41b2007-09-23 00:51:24 +0000284 VG_(printf)(" clique %d joining clique %d adding %lu+%lu bytes\n",
sewardjb5f6f512005-03-10 23:59:00 +0000285 sh_no, clique,
barta0b6b2c2008-07-07 06:49:24 +0000286 lc_shadows[sh_no]->szB + 0UL,
287 lc_markstack[sh_no].indirect);
sewardjb5f6f512005-03-10 23:59:00 +0000288 else
njn8a7b41b2007-09-23 00:51:24 +0000289 VG_(printf)(" %d joining %d adding %lu\n",
barta0b6b2c2008-07-07 06:49:24 +0000290 sh_no, clique,
291 lc_shadows[sh_no]->szB + 0UL);
sewardjb5f6f512005-03-10 23:59:00 +0000292 }
293
njn718d3b12006-12-16 00:54:12 +0000294 lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000295 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
296 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
297 }
298 }
299 } else if (ptr == lc_shadows[sh_no]->data) {
300 lc_markstack[sh_no].state = Proper;
301 } else {
302 if (lc_markstack[sh_no].state == Unreached)
303 lc_markstack[sh_no].state = Interior;
304 }
305}
306
307static void lc_markstack_push(Addr ptr)
308{
sewardj45f4e7c2005-09-27 19:20:21 +0000309 lc_markstack_push_WRK(ptr, -1);
sewardjb5f6f512005-03-10 23:59:00 +0000310}
311
312/* Return the top of the mark stack, if any. */
313static Int lc_markstack_pop(void)
314{
315 Int ret = lc_markstack_top;
316
317 if (ret != -1) {
318 lc_markstack_top = lc_markstack[ret].next;
319 lc_markstack[ret].next = -1;
320 }
321
322 return ret;
323}
324
sewardj45d94cc2005-04-20 14:44:11 +0000325
sewardjb5f6f512005-03-10 23:59:00 +0000326/* Scan a block of memory between [start, start+len). This range may
327 be bogus, inaccessable, or otherwise strange; we deal with it.
328
329 If clique != -1, it means we're gathering leaked memory into
330 cliques, and clique is the index of the current clique leader. */
sewardj45f4e7c2005-09-27 19:20:21 +0000331static void lc_scan_memory_WRK(Addr start, SizeT len, Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000332{
njn13bfd852005-06-02 03:52:53 +0000333 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
334 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000335 vki_sigset_t sigmask;
336
337 if (VG_DEBUG_LEAKCHECK)
barta0b6b2c2008-07-07 06:49:24 +0000338 VG_(printf)("scan %#lx-%#lx\n", start, start+len);
sewardjb5f6f512005-03-10 23:59:00 +0000339 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000340 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000341
sewardj45f4e7c2005-09-27 19:20:21 +0000342 // lc_scanned += end-ptr;
sewardjb5f6f512005-03-10 23:59:00 +0000343
sewardj45f4e7c2005-09-27 19:20:21 +0000344 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
njn13bfd852005-06-02 03:52:53 +0000345 ptr = VG_PGROUNDUP(ptr+1); /* first page bad */
sewardjb5f6f512005-03-10 23:59:00 +0000346
sewardj05fe85e2005-04-27 22:46:36 +0000347 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000348 Addr addr;
349
350 /* Skip invalid chunks */
sewardj05fe85e2005-04-27 22:46:36 +0000351 if (!(*lc_is_within_valid_secondary)(ptr)) {
njn1d0825f2006-03-27 11:37:07 +0000352 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
sewardjb5f6f512005-03-10 23:59:00 +0000353 continue;
354 }
355
356 /* Look to see if this page seems reasonble */
357 if ((ptr % VKI_PAGE_SIZE) == 0) {
sewardj45f4e7c2005-09-27 19:20:21 +0000358 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
sewardjb5f6f512005-03-10 23:59:00 +0000359 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
360 }
361
362 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
sewardj05fe85e2005-04-27 22:46:36 +0000363 if ((*lc_is_valid_aligned_word)(ptr)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000364 lc_scanned += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +0000365 addr = *(Addr *)ptr;
sewardj45f4e7c2005-09-27 19:20:21 +0000366 lc_markstack_push_WRK(addr, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000367 } else if (0 && VG_DEBUG_LEAKCHECK)
barta0b6b2c2008-07-07 06:49:24 +0000368 VG_(printf)("%#lx not valid\n", ptr);
sewardjb5f6f512005-03-10 23:59:00 +0000369 ptr += sizeof(Addr);
370 } else {
371 /* We need to restore the signal mask, because we were
372 longjmped out of a signal handler. */
373 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
374
njn13bfd852005-06-02 03:52:53 +0000375 ptr = VG_PGROUNDUP(ptr+1); /* bad page - skip it */
sewardjb5f6f512005-03-10 23:59:00 +0000376 }
377 }
378
379 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
380 VG_(set_fault_catcher)(NULL);
381}
382
sewardj45d94cc2005-04-20 14:44:11 +0000383
sewardjb5f6f512005-03-10 23:59:00 +0000384static void lc_scan_memory(Addr start, SizeT len)
385{
sewardjf1efbd42009-01-24 01:42:20 +0000386 if (VG_(clo_verbosity) > 2) {
387 VG_(message)(Vg_DebugMsg, " Scanning segment: %#lx..%#lx (%ld)",
njnbc00bb72009-01-19 00:43:34 +0000388 start, start+len-1, len);
389 }
sewardj45f4e7c2005-09-27 19:20:21 +0000390 lc_scan_memory_WRK(start, len, -1);
sewardjb5f6f512005-03-10 23:59:00 +0000391}
392
393/* Process the mark stack until empty. If mopup is true, then we're
394 actually gathering leaked blocks, so they should be marked
395 IndirectLeak. */
396static void lc_do_leakcheck(Int clique)
397{
398 Int top;
399
400 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000401 tl_assert(top >= 0 && top < lc_n_shadows);
402 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000403
njn718d3b12006-12-16 00:54:12 +0000404 lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->szB, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000405 }
406}
407
njnb8dca862005-03-14 02:42:44 +0000408static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000409{
410 Int i;
411 Int n_lossrecords;
412 LossRecord* errlist;
413 LossRecord* p;
414 Bool is_suppressed;
415
416 /* Go through and group lost structures into cliques. For each
417 Unreached block, push it onto the mark stack, and find all the
418 blocks linked to it. These are marked IndirectLeak, and their
419 size is added to the clique leader's indirect size. If one of
420 the found blocks was itself a clique leader (from a previous
421 pass), then the cliques are merged. */
422 for (i = 0; i < lc_n_shadows; i++) {
423 if (VG_DEBUG_CLIQUE)
barta0b6b2c2008-07-07 06:49:24 +0000424 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
njn718d3b12006-12-16 00:54:12 +0000425 i, lc_shadows[i]->data, lc_markstack[i].state);
sewardjb5f6f512005-03-10 23:59:00 +0000426 if (lc_markstack[i].state != Unreached)
427 continue;
428
sewardj76754cf2005-03-14 00:14:04 +0000429 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000430
431 if (VG_DEBUG_CLIQUE)
barta0b6b2c2008-07-07 06:49:24 +0000432 VG_(printf)("%d: gathering clique %#lx\n", i, lc_shadows[i]->data);
sewardjb5f6f512005-03-10 23:59:00 +0000433
sewardj45f4e7c2005-09-27 19:20:21 +0000434 lc_markstack_push_WRK(lc_shadows[i]->data, i);
sewardjb5f6f512005-03-10 23:59:00 +0000435
436 lc_do_leakcheck(i);
437
sewardj76754cf2005-03-14 00:14:04 +0000438 tl_assert(lc_markstack_top == -1);
sewardj75d8eb92005-12-18 02:48:40 +0000439 tl_assert(lc_markstack[i].state == IndirectLeak
440 /* jrs 20051218: Ashley Pittman supplied a
441 custom-allocator test program which causes the ==
442 IndirectLeak condition to fail - it causes .state
443 to be Unreached. Since I have no idea how this
444 clique stuff works and no time to figure it out,
445 just allow that condition too. This could well be
446 a completely bogus fix. It doesn't seem unsafe
447 given that in any case the .state field is
448 immediately overwritten by the next statement. */
449 || lc_markstack[i].state == Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000450
451 lc_markstack[i].state = Unreached; /* Return to unreached state,
452 to indicate its a clique
453 leader */
454 }
455
456 /* Common up the lost blocks so we can print sensible error messages. */
457 n_lossrecords = 0;
458 errlist = NULL;
459 for (i = 0; i < lc_n_shadows; i++) {
460 ExeContext* where = lc_shadows[i]->where;
461
462 for (p = errlist; p != NULL; p = p->next) {
463 if (p->loss_mode == lc_markstack[i].state
njn1d0825f2006-03-27 11:37:07 +0000464 && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
sewardjb5f6f512005-03-10 23:59:00 +0000465 p->allocated_at,
466 where) ) {
467 break;
468 }
469 }
470 if (p != NULL) {
471 p->num_blocks ++;
njn718d3b12006-12-16 00:54:12 +0000472 p->total_bytes += lc_shadows[i]->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000473 p->indirect_bytes += lc_markstack[i].indirect;
474 } else {
475 n_lossrecords ++;
sewardj9c606bd2008-09-18 18:12:50 +0000476 p = VG_(malloc)( "mc.fr.1", sizeof(LossRecord));
sewardjb5f6f512005-03-10 23:59:00 +0000477 p->loss_mode = lc_markstack[i].state;
478 p->allocated_at = where;
njn718d3b12006-12-16 00:54:12 +0000479 p->total_bytes = lc_shadows[i]->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000480 p->indirect_bytes = lc_markstack[i].indirect;
481 p->num_blocks = 1;
482 p->next = errlist;
483 errlist = p;
484 }
485 }
486
487 /* Print out the commoned-up blocks and collect summary stats. */
488 for (i = 0; i < n_lossrecords; i++) {
489 Bool print_record;
490 LossRecord* p_min = NULL;
njn0fd92f42005-10-06 03:32:42 +0000491 SizeT n_min = ~(0x0L);
sewardjb5f6f512005-03-10 23:59:00 +0000492 for (p = errlist; p != NULL; p = p->next) {
493 if (p->num_blocks > 0 && p->total_bytes < n_min) {
494 n_min = p->total_bytes + p->indirect_bytes;
495 p_min = p;
496 }
497 }
sewardj76754cf2005-03-14 00:14:04 +0000498 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000499
500 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
501 we disallow that when --leak-check=yes.
502
njn02977032005-05-17 04:00:11 +0000503 Prints the error if not suppressed, unless it's reachable (Proper
504 or IndirectLeak) and --show-reachable=no */
sewardjb5f6f512005-03-10 23:59:00 +0000505
njn1d0825f2006-03-27 11:37:07 +0000506 print_record = ( MC_(clo_show_reachable) ||
njn02977032005-05-17 04:00:11 +0000507 Unreached == p_min->loss_mode ||
508 Interior == p_min->loss_mode );
509
510 // Nb: because VG_(unique_error) does all the error processing
511 // immediately, and doesn't save the error, leakExtra can be
512 // stack-allocated.
sewardjb5f6f512005-03-10 23:59:00 +0000513 is_suppressed =
njn718d3b12006-12-16 00:54:12 +0000514 MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
njn1d0825f2006-03-27 11:37:07 +0000515 print_record );
sewardjb5f6f512005-03-10 23:59:00 +0000516
517 if (is_suppressed) {
njn8df80b22009-03-02 05:11:06 +0000518 MC_(blocks_suppressed) += p_min->num_blocks;
519 MC_(bytes_suppressed) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000520
njn1d0825f2006-03-27 11:37:07 +0000521 } else if (Unreached == p_min->loss_mode) {
njn8df80b22009-03-02 05:11:06 +0000522 MC_(blocks_leaked) += p_min->num_blocks;
523 MC_(bytes_leaked) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000524
njn1d0825f2006-03-27 11:37:07 +0000525 } else if (IndirectLeak == p_min->loss_mode) {
njn8df80b22009-03-02 05:11:06 +0000526 MC_(blocks_indirect) += p_min->num_blocks;
527 MC_(bytes_indirect) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000528
njn8df80b22009-03-02 05:11:06 +0000529 } else if (Interior == p_min->loss_mode) {
530 MC_(blocks_dubious) += p_min->num_blocks;
531 MC_(bytes_dubious) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000532
njn8df80b22009-03-02 05:11:06 +0000533 } else if (Proper == p_min->loss_mode) {
534 MC_(blocks_reachable) += p_min->num_blocks;
535 MC_(bytes_reachable) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000536
537 } else {
sewardj76754cf2005-03-14 00:14:04 +0000538 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000539 }
540 p_min->num_blocks = 0;
541 }
542}
543
544/* Compute a quick summary of the leak check. */
tom151a6392005-11-11 12:30:36 +0000545static void make_summary(void)
sewardjb5f6f512005-03-10 23:59:00 +0000546{
547 Int i;
548
549 for(i = 0; i < lc_n_shadows; i++) {
njn718d3b12006-12-16 00:54:12 +0000550 SizeT size = lc_shadows[i]->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000551
552 switch(lc_markstack[i].state) {
553 case Unreached:
njn8df80b22009-03-02 05:11:06 +0000554 MC_(blocks_leaked)++;
njn1d0825f2006-03-27 11:37:07 +0000555 MC_(bytes_leaked) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000556 break;
557
558 case Proper:
njn8df80b22009-03-02 05:11:06 +0000559 MC_(blocks_reachable)++;
njn1d0825f2006-03-27 11:37:07 +0000560 MC_(bytes_reachable) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000561 break;
562
563 case Interior:
njn8df80b22009-03-02 05:11:06 +0000564 MC_(blocks_dubious)++;
njn1d0825f2006-03-27 11:37:07 +0000565 MC_(bytes_dubious) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000566 break;
567
568 case IndirectLeak: /* shouldn't happen */
njn8df80b22009-03-02 05:11:06 +0000569 MC_(blocks_indirect)++;
njn1d0825f2006-03-27 11:37:07 +0000570 MC_(bytes_indirect) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000571 break;
572 }
573 }
574}
575
sewardj3cf26a52006-07-27 23:48:53 +0000576static MC_Chunk**
577find_active_shadows(UInt* n_shadows)
578{
579 /* Our goal is to construct a set of shadows that includes every
580 * mempool chunk, and every malloc region that *doesn't* contain a
581 * mempool chunk. We do this in several phases.
582 *
583 * First we collect all the malloc chunks into an array and sort it.
584 * We do this because we want to query the chunks by interior
585 * pointers, requiring binary search.
586 *
587 * Second we build an array containing a Bool for each malloc chunk,
588 * indicating whether it contains any mempools.
589 *
590 * Third we loop over the mempool tables. For each chunk in each
591 * pool, we set the entry in the Bool array corresponding to the
592 * malloc chunk containing the mempool chunk.
593 *
594 * Finally we copy the mempool chunks and the non-marked malloc
595 * chunks into a combined array of shadows, free our temporaries,
596 * and return the combined array.
597 */
598
599 MC_Mempool *mp;
600 MC_Chunk **mallocs, **shadows, *mc;
601 UInt n_mallocs, m, s;
602 Bool *malloc_chunk_holds_a_pool_chunk;
603
604 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
605
606 if (n_mallocs == 0) {
607 tl_assert(mallocs == NULL);
608 *n_shadows = 0;
609 return NULL;
610 }
611
612 VG_(ssort)((void*)mallocs, n_mallocs,
613 sizeof(VgHashNode*), lc_compar);
614
sewardj9c606bd2008-09-18 18:12:50 +0000615 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
616 n_mallocs, sizeof(Bool) );
sewardj3cf26a52006-07-27 23:48:53 +0000617
618 *n_shadows = n_mallocs;
619
620 VG_(HT_ResetIter)(MC_(mempool_list));
621 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
622 VG_(HT_ResetIter)(mp->chunks);
623 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
624
625 /* We'll need a shadow for this chunk. */
626 ++(*n_shadows);
627
sewardj9c606bd2008-09-18 18:12:50 +0000628 /* Possibly invalidate the malloc holding the beginning of
629 this chunk. */
sewardj3cf26a52006-07-27 23:48:53 +0000630 m = find_shadow_for(mc->data, mallocs, n_mallocs);
631 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
632 tl_assert(*n_shadows > 0);
633 --(*n_shadows);
634 malloc_chunk_holds_a_pool_chunk[m] = True;
635 }
636
637 /* Possibly invalidate the malloc holding the end of this chunk. */
njn718d3b12006-12-16 00:54:12 +0000638 if (mc->szB > 1) {
639 m = find_shadow_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
sewardj3cf26a52006-07-27 23:48:53 +0000640 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
641 tl_assert(*n_shadows > 0);
642 --(*n_shadows);
643 malloc_chunk_holds_a_pool_chunk[m] = True;
644 }
645 }
646 }
647 }
648
649 tl_assert(*n_shadows > 0);
sewardj9c606bd2008-09-18 18:12:50 +0000650 shadows = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (*n_shadows));
sewardj3cf26a52006-07-27 23:48:53 +0000651 s = 0;
652
653 /* Copy the mempool chunks into the final array. */
654 VG_(HT_ResetIter)(MC_(mempool_list));
655 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
656 VG_(HT_ResetIter)(mp->chunks);
657 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
658 tl_assert(s < *n_shadows);
659 shadows[s++] = mc;
660 }
661 }
662
663 /* Copy the malloc chunks into the final array. */
664 for (m = 0; m < n_mallocs; ++m) {
665 if (!malloc_chunk_holds_a_pool_chunk[m]) {
666 tl_assert(s < *n_shadows);
667 shadows[s++] = mallocs[m];
668 }
669 }
670
671 tl_assert(s == *n_shadows);
672 VG_(free)(mallocs);
673 VG_(free)(malloc_chunk_holds_a_pool_chunk);
674
675 return shadows;
676}
677
678
njn43c799e2003-04-08 00:08:52 +0000679/* Top level entry point to leak detector. Call here, passing in
680 suitable address-validating functions (see comment at top of
njn1d0825f2006-03-27 11:37:07 +0000681 scan_all_valid_memory above). These functions used to encapsulate the
682 differences between Memcheck and Addrcheck; they no longer do but it
683 doesn't hurt to keep them here.
njn43c799e2003-04-08 00:08:52 +0000684*/
njn1d0825f2006-03-27 11:37:07 +0000685void MC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000686 ThreadId tid, LeakCheckMode mode,
sewardj05fe85e2005-04-27 22:46:36 +0000687 Bool (*is_within_valid_secondary) ( Addr ),
688 Bool (*is_valid_aligned_word) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000689)
690{
njnb8dca862005-03-14 02:42:44 +0000691 Int i;
njn43c799e2003-04-08 00:08:52 +0000692
sewardj76754cf2005-03-14 00:14:04 +0000693 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000694
sewardj3cf26a52006-07-27 23:48:53 +0000695 lc_shadows = find_active_shadows(&lc_n_shadows);
njn06072ec2003-09-30 15:35:13 +0000696
697 /* Sort the array. */
698 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
699
700 /* Sanity check; assert that the blocks are now in order */
701 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000702 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000703 }
njn3e884182003-04-15 13:03:23 +0000704
sewardj92691472008-05-29 12:23:24 +0000705 /* Sanity check -- make sure they don't overlap. But do allow
706 exact duplicates. If this assertion fails, it may mean that the
707 application has done something stupid with
708 VALGRIND_MALLOCLIKE_BLOCK client requests, specifically, has
709 made overlapping requests (which are nonsensical). Another way
710 to screw up is to use VALGRIND_MALLOCLIKE_BLOCK for stack
711 locations; again nonsensical. */
njn3e884182003-04-15 13:03:23 +0000712 for (i = 0; i < lc_n_shadows-1; i++) {
tom026debf2008-07-18 08:48:04 +0000713 Bool nonsense_overlap = ! (
714 /* normal case - no overlap */
715 (lc_shadows[i]->data + lc_shadows[i]->szB <= lc_shadows[i+1]->data)
716 ||
717 /* degenerate case: exact duplicates */
718 (lc_shadows[i]->data == lc_shadows[i+1]->data
719 && lc_shadows[i]->szB == lc_shadows[i+1]->szB)
720 );
721 if (nonsense_overlap) {
722 VG_(message)(Vg_UserMsg, "Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)",
723 lc_shadows[ i]->data, (lc_shadows[ i]->data + lc_shadows[ i]->szB),
724 lc_shadows[1+ i]->data, (lc_shadows[1+ i]->data + lc_shadows[1+ i]->szB) );
725 }
726 tl_assert (!nonsense_overlap);
njn3e884182003-04-15 13:03:23 +0000727 }
728
729 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000730 tl_assert(lc_shadows == NULL);
sewardj71bc3cb2005-05-19 00:25:45 +0000731 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
sewardj37d06f22003-09-17 21:48:26 +0000732 VG_(message)(Vg_UserMsg,
njn6b239582005-12-19 19:33:36 +0000733 "All heap blocks were freed -- no leaks are possible.");
sewardj37d06f22003-09-17 21:48:26 +0000734 }
njn43c799e2003-04-08 00:08:52 +0000735 return;
736 }
737
sewardj71bc3cb2005-05-19 00:25:45 +0000738 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
nethercote0f19bce2003-12-02 10:17:44 +0000739 VG_(message)(Vg_UserMsg,
barta0b6b2c2008-07-07 06:49:24 +0000740 "searching for pointers to %'d not-freed blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000741 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000742
njn3e884182003-04-15 13:03:23 +0000743 lc_min_mallocd_addr = lc_shadows[0]->data;
744 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
njn718d3b12006-12-16 00:54:12 +0000745 + lc_shadows[lc_n_shadows-1]->szB;
njn43c799e2003-04-08 00:08:52 +0000746
sewardj9c606bd2008-09-18 18:12:50 +0000747 lc_markstack = VG_(malloc)( "mc.ddml.1",
748 lc_n_shadows * sizeof(*lc_markstack) );
sewardjb5f6f512005-03-10 23:59:00 +0000749 for (i = 0; i < lc_n_shadows; i++) {
750 lc_markstack[i].next = -1;
751 lc_markstack[i].state = Unreached;
752 lc_markstack[i].indirect = 0;
753 }
754 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000755
sewardj05fe85e2005-04-27 22:46:36 +0000756 lc_is_within_valid_secondary = is_within_valid_secondary;
757 lc_is_valid_aligned_word = is_valid_aligned_word;
sewardjb5f6f512005-03-10 23:59:00 +0000758
759 lc_scanned = 0;
760
sewardj45f4e7c2005-09-27 19:20:21 +0000761 /* Push roots onto the mark stack. Roots are:
762 - the integer registers of all threads
763 - all mappings belonging to the client, including stacks
764 - .. but excluding any client heap segments.
765 Client heap segments are excluded because we wish to differentiate
766 client heap blocks which are referenced only from inside the heap
767 from those outside. This facilitates the indirect vs direct loss
768 categorisation, which [if the users ever manage to understand it]
769 is really useful for detecting lost cycles.
770 */
sewardj72a28b22006-10-17 02:23:23 +0000771 { Addr* seg_starts;
sewardj45f4e7c2005-09-27 19:20:21 +0000772 Int n_seg_starts;
773 seg_starts = get_seg_starts( &n_seg_starts );
774 tl_assert(seg_starts && n_seg_starts > 0);
775 /* VG_(am_show_nsegments)( 0,"leakcheck"); */
776 for (i = 0; i < n_seg_starts; i++) {
sewardj72a28b22006-10-17 02:23:23 +0000777 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
sewardj45f4e7c2005-09-27 19:20:21 +0000778 tl_assert(seg);
779 if (seg->kind != SkFileC && seg->kind != SkAnonC)
780 continue;
781 if (!(seg->hasR && seg->hasW))
782 continue;
783 if (seg->isCH)
784 continue;
sewardjde3ad732006-07-27 23:12:17 +0000785
786 /* Don't poke around in device segments as this may cause
787 hangs. Exclude /dev/zero just in case someone allocated
788 memory by explicitly mapping /dev/zero. */
789 if (seg->kind == SkFileC
790 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
sewardj72a28b22006-10-17 02:23:23 +0000791 HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
sewardjde3ad732006-07-27 23:12:17 +0000792 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
793 /* don't skip /dev/zero */
794 } else {
795 /* skip this device mapping */
796 continue;
797 }
798 }
799
sewardj45f4e7c2005-09-27 19:20:21 +0000800 if (0)
barta0b6b2c2008-07-07 06:49:24 +0000801 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
sewardj45f4e7c2005-09-27 19:20:21 +0000802 lc_scan_memory(seg->start, seg->end+1 - seg->start);
803 }
804 }
sewardjb5f6f512005-03-10 23:59:00 +0000805
806 /* Push registers onto mark stack */
njn6ace3ea2005-06-17 03:06:27 +0000807 VG_(apply_to_GP_regs)(lc_markstack_push);
sewardjb5f6f512005-03-10 23:59:00 +0000808
809 /* Keep walking the heap until everything is found */
810 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000811
sewardj71bc3cb2005-05-19 00:25:45 +0000812 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
barta0b6b2c2008-07-07 06:49:24 +0000813 VG_(message)(Vg_UserMsg, "checked %'lu bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000814
njn8df80b22009-03-02 05:11:06 +0000815 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
816 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
817 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
818 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
819 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000820
sewardjb5f6f512005-03-10 23:59:00 +0000821 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000822 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000823 else
824 make_summary();
njn43c799e2003-04-08 00:08:52 +0000825
sewardj71bc3cb2005-05-19 00:25:45 +0000826 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
nethercote0f19bce2003-12-02 10:17:44 +0000827 VG_(message)(Vg_UserMsg, "");
828 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
barta0b6b2c2008-07-07 06:49:24 +0000829 VG_(message)(Vg_UserMsg, " definitely lost: %'lu bytes in %'lu blocks.",
njn8df80b22009-03-02 05:11:06 +0000830 MC_(bytes_leaked), MC_(blocks_leaked) );
831 if (MC_(blocks_indirect) > 0)
barta0b6b2c2008-07-07 06:49:24 +0000832 VG_(message)(Vg_UserMsg, " indirectly lost: %'lu bytes in %'lu blocks.",
njn8df80b22009-03-02 05:11:06 +0000833 MC_(bytes_indirect), MC_(blocks_indirect) );
barta0b6b2c2008-07-07 06:49:24 +0000834 VG_(message)(Vg_UserMsg, " possibly lost: %'lu bytes in %'lu blocks.",
njn8df80b22009-03-02 05:11:06 +0000835 MC_(bytes_dubious), MC_(blocks_dubious) );
barta0b6b2c2008-07-07 06:49:24 +0000836 VG_(message)(Vg_UserMsg, " still reachable: %'lu bytes in %'lu blocks.",
njn8df80b22009-03-02 05:11:06 +0000837 MC_(bytes_reachable), MC_(blocks_reachable) );
barta0b6b2c2008-07-07 06:49:24 +0000838 VG_(message)(Vg_UserMsg, " suppressed: %'lu bytes in %'lu blocks.",
njn8df80b22009-03-02 05:11:06 +0000839 MC_(bytes_suppressed), MC_(blocks_suppressed) );
sewardjce4717e2006-12-06 18:05:54 +0000840 if (mode == LC_Summary
njn8df80b22009-03-02 05:11:06 +0000841 && (MC_(blocks_leaked) + MC_(blocks_indirect)
842 + MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
sewardjce4717e2006-12-06 18:05:54 +0000843 VG_(message)(Vg_UserMsg,
844 "Rerun with --leak-check=full to see details of leaked memory.");
845 }
njn8df80b22009-03-02 05:11:06 +0000846 if (MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
nethercote0f19bce2003-12-02 10:17:44 +0000847 VG_(message)(Vg_UserMsg,
848 "Reachable blocks (those to which a pointer was found) are not shown.");
849 VG_(message)(Vg_UserMsg,
sewardjce4717e2006-12-06 18:05:54 +0000850 "To see them, rerun with: --leak-check=full --show-reachable=yes");
nethercote0f19bce2003-12-02 10:17:44 +0000851 }
njn43c799e2003-04-08 00:08:52 +0000852 }
njn43c799e2003-04-08 00:08:52 +0000853
njn3e884182003-04-15 13:03:23 +0000854 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000855 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000856}
857
858/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000859/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +0000860/*--------------------------------------------------------------------*/
861