blob: d282e62c260052bfb9a1569a636871a39175c6ed [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardje4b0bf02006-06-05 23:21:15 +000010 Copyright (C) 2000-2006 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njn4802b382005-06-11 04:58:29 +000033#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000034#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000039#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000040#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
njn73c51342005-06-18 15:18:15 +000043#include "pub_tool_signals.h"
njn1d0825f2006-03-27 11:37:07 +000044#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000045
njn1d0825f2006-03-27 11:37:07 +000046#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include <setjmp.h> // For jmp_buf
49
50
njn43c799e2003-04-08 00:08:52 +000051/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000052#define VG_DEBUG_LEAKCHECK 0
53#define VG_DEBUG_CLIQUE 0
54
njn43c799e2003-04-08 00:08:52 +000055/*------------------------------------------------------------*/
56/*--- Low-level address-space scanning, for the leak ---*/
57/*--- detector. ---*/
58/*------------------------------------------------------------*/
59
60static
61jmp_buf memscan_jmpbuf;
62
63
64static
njn695c16e2005-03-27 03:40:28 +000065void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000066{
sewardjb5f6f512005-03-10 23:59:00 +000067 if (0)
68 VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
69 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
70 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000071}
72
sewardj45f4e7c2005-09-27 19:20:21 +000073
74/* TODO: GIVE THIS A PROPER HOME
njn1d0825f2006-03-27 11:37:07 +000075 TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
sewardj45f4e7c2005-09-27 19:20:21 +000076 Extract from aspacem a vector of the current segment start
77 addresses. The vector is dynamically allocated and should be freed
78 by the caller when done. REQUIRES m_mallocfree to be running.
79 Writes the number of addresses required into *n_acquired. */
80
81static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
82{
83 Addr* starts;
sewardjae986ca2005-10-12 12:53:20 +000084 Int n_starts, r = 0;
sewardj45f4e7c2005-09-27 19:20:21 +000085
86 n_starts = 1;
87 while (True) {
88 starts = VG_(malloc)( n_starts * sizeof(Addr) );
89 if (starts == NULL)
90 break;
91 r = VG_(am_get_segment_starts)( starts, n_starts );
92 if (r >= 0)
93 break;
94 VG_(free)(starts);
95 n_starts *= 2;
96 }
97
98 if (starts == NULL) {
99 *n_acquired = 0;
100 return NULL;
101 }
102
103 *n_acquired = r;
104 return starts;
105}
106
107
njn43c799e2003-04-08 00:08:52 +0000108/*------------------------------------------------------------*/
109/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
110/*------------------------------------------------------------*/
111
112/* A block is either
113 -- Proper-ly reached; a pointer to its start has been found
114 -- Interior-ly reached; only an interior pointer to it has been found
115 -- Unreached; so far, no pointers to any part of it have been found.
sewardjb5f6f512005-03-10 23:59:00 +0000116 -- IndirectLeak; leaked, but referred to by another leaked block
njn43c799e2003-04-08 00:08:52 +0000117*/
sewardj45f4e7c2005-09-27 19:20:21 +0000118typedef
119 enum {
sewardj75d8eb92005-12-18 02:48:40 +0000120 Unreached =0,
121 IndirectLeak =1,
122 Interior =2,
123 Proper =3
sewardj45f4e7c2005-09-27 19:20:21 +0000124 }
125 Reachedness;
sewardjb5f6f512005-03-10 23:59:00 +0000126
127/* An entry in the mark stack */
sewardj45f4e7c2005-09-27 19:20:21 +0000128typedef
129 struct {
130 Int next:30; /* Index of next in mark stack */
131 UInt state:2; /* Reachedness */
132 SizeT indirect; /* if Unreached, how much is unreachable from here */
133 }
134 MarkStack;
njn43c799e2003-04-08 00:08:52 +0000135
136/* A block record, used for generating err msgs. */
137typedef
138 struct _LossRecord {
139 struct _LossRecord* next;
140 /* Where these lost blocks were allocated. */
141 ExeContext* allocated_at;
142 /* Their reachability. */
143 Reachedness loss_mode;
144 /* Number of blocks and total # bytes involved. */
njn0fd92f42005-10-06 03:32:42 +0000145 SizeT total_bytes;
146 SizeT indirect_bytes;
njn43c799e2003-04-08 00:08:52 +0000147 UInt num_blocks;
148 }
149 LossRecord;
150
njn02977032005-05-17 04:00:11 +0000151/* The 'extra' struct for leak errors. */
sewardj45f4e7c2005-09-27 19:20:21 +0000152typedef
153 struct {
154 UInt n_this_record;
155 UInt n_total_records;
156 LossRecord* lossRecord;
157 }
158 LeakExtra;
njn43c799e2003-04-08 00:08:52 +0000159
160/* Find the i such that ptr points at or inside the block described by
161 shadows[i]. Return -1 if none found. This assumes that shadows[]
162 has been sorted on the ->data field. */
163
sewardjb5f6f512005-03-10 23:59:00 +0000164#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000165/* Used to sanity-check the fast binary-search mechanism. */
166static
njn1d0825f2006-03-27 11:37:07 +0000167Int find_shadow_for_OLD ( Addr ptr,
168 MC_Chunk** shadows,
169 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000170
171{
172 Int i;
173 Addr a_lo, a_hi;
tom16ade0d2005-07-18 09:41:57 +0000174 PROF_EVENT(70, "find_shadow_for_OLD");
njn43c799e2003-04-08 00:08:52 +0000175 for (i = 0; i < n_shadows; i++) {
tom16ade0d2005-07-18 09:41:57 +0000176 PROF_EVENT(71, "find_shadow_for_OLD(loop)");
njn43c799e2003-04-08 00:08:52 +0000177 a_lo = shadows[i]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000178 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
sewardj3cf26a52006-07-27 23:48:53 +0000179 if (a_lo <= ptr && ptr < a_hi)
njn43c799e2003-04-08 00:08:52 +0000180 return i;
181 }
182 return -1;
183}
184#endif
185
186
187static
njn1d0825f2006-03-27 11:37:07 +0000188Int find_shadow_for ( Addr ptr,
189 MC_Chunk** shadows,
190 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000191{
192 Addr a_mid_lo, a_mid_hi;
193 Int lo, mid, hi, retVal;
194 /* VG_(printf)("find shadow for %p = ", ptr); */
195 retVal = -1;
196 lo = 0;
197 hi = n_shadows-1;
198 while (True) {
njn3e884182003-04-15 13:03:23 +0000199 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000200 if (lo > hi) break; /* not found */
201
202 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000203 a_mid_lo = shadows[mid]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000204 a_mid_hi = shadows[mid]->data + shadows[mid]->size;
sewardj5bee4f82006-07-29 09:00:25 +0000205 /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
206 Special-case zero-sized blocks - treat them as if they had
207 size 1. Not doing so causes them to not cover any address
208 range at all and so will never be identified as the target of
209 any pointer, which causes them to be incorrectly reported as
210 definitely leaked. */
211 if (shadows[mid]->size == 0)
212 a_mid_hi++;
njn43c799e2003-04-08 00:08:52 +0000213
214 if (ptr < a_mid_lo) {
215 hi = mid-1;
216 continue;
217 }
sewardj3cf26a52006-07-27 23:48:53 +0000218 if (ptr >= a_mid_hi) {
njn43c799e2003-04-08 00:08:52 +0000219 lo = mid+1;
220 continue;
221 }
sewardj3cf26a52006-07-27 23:48:53 +0000222 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000223 retVal = mid;
224 break;
225 }
226
sewardjb5f6f512005-03-10 23:59:00 +0000227# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000228 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000229# endif
230 /* VG_(printf)("%d\n", retVal); */
231 return retVal;
232}
233
234/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn1d0825f2006-03-27 11:37:07 +0000235static MC_Chunk** lc_shadows;
236static Int lc_n_shadows;
237static MarkStack* lc_markstack;
238static Int lc_markstack_top;
239static Addr lc_min_mallocd_addr;
240static Addr lc_max_mallocd_addr;
241static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000242
sewardj05fe85e2005-04-27 22:46:36 +0000243static Bool (*lc_is_within_valid_secondary) (Addr addr);
244static Bool (*lc_is_valid_aligned_word) (Addr addr);
sewardjb5f6f512005-03-10 23:59:00 +0000245
sewardj71bc3cb2005-05-19 00:25:45 +0000246static const HChar* str_lossmode ( Reachedness lossmode )
njn43c799e2003-04-08 00:08:52 +0000247{
sewardj71bc3cb2005-05-19 00:25:45 +0000248 const HChar *loss = "?";
249 switch (lossmode) {
250 case Unreached: loss = "definitely lost"; break;
251 case IndirectLeak: loss = "indirectly lost"; break;
252 case Interior: loss = "possibly lost"; break;
253 case Proper: loss = "still reachable"; break;
njn43c799e2003-04-08 00:08:52 +0000254 }
sewardjb5f6f512005-03-10 23:59:00 +0000255 return loss;
njn43c799e2003-04-08 00:08:52 +0000256}
257
sewardj71bc3cb2005-05-19 00:25:45 +0000258static const HChar* xml_kind ( Reachedness lossmode )
259{
260 const HChar *loss = "?";
261 switch (lossmode) {
262 case Unreached: loss = "Leak_DefinitelyLost"; break;
263 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
264 case Interior: loss = "Leak_PossiblyLost"; break;
265 case Proper: loss = "Leak_StillReachable"; break;
266 }
267 return loss;
268}
269
270
njn43c799e2003-04-08 00:08:52 +0000271/* Used for printing leak errors, avoids exposing the LossRecord type (which
272 comes in as void*, requiring a cast. */
njn1d0825f2006-03-27 11:37:07 +0000273void MC_(pp_LeakError)(void* vextra)
njn43c799e2003-04-08 00:08:52 +0000274{
sewardj71bc3cb2005-05-19 00:25:45 +0000275 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
276 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
277
njn02977032005-05-17 04:00:11 +0000278 LeakExtra* extra = (LeakExtra*)vextra;
279 LossRecord* l = extra->lossRecord;
280 const Char *loss = str_lossmode(l->loss_mode);
njn43c799e2003-04-08 00:08:52 +0000281
sewardj71bc3cb2005-05-19 00:25:45 +0000282 if (VG_(clo_xml)) {
sewardj97f7e0c2005-07-19 15:00:25 +0000283 VG_(message)(Vg_UserMsg, " <kind>%t</kind>", xml_kind(l->loss_mode));
sewardj71bc3cb2005-05-19 00:25:45 +0000284 } else {
285 VG_(message)(Vg_UserMsg, "");
286 }
287
sewardjb5f6f512005-03-10 23:59:00 +0000288 if (l->indirect_bytes) {
289 VG_(message)(Vg_UserMsg,
njn0fd92f42005-10-06 03:32:42 +0000290 "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
291 " are %s in loss record %,u of %,u%s",
sewardj71bc3cb2005-05-19 00:25:45 +0000292 xpre,
293 l->total_bytes + l->indirect_bytes,
294 l->total_bytes, l->indirect_bytes, l->num_blocks,
295 loss, extra->n_this_record, extra->n_total_records,
296 xpost
297 );
298 if (VG_(clo_xml)) {
njn0fd92f42005-10-06 03:32:42 +0000299 // Nb: don't put commas in these XML numbers
300 VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
sewardj71bc3cb2005-05-19 00:25:45 +0000301 l->total_bytes + l->indirect_bytes);
njn0fd92f42005-10-06 03:32:42 +0000302 VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
sewardj71bc3cb2005-05-19 00:25:45 +0000303 l->num_blocks);
304 }
sewardjb5f6f512005-03-10 23:59:00 +0000305 } else {
sewardj71bc3cb2005-05-19 00:25:45 +0000306 VG_(message)(
307 Vg_UserMsg,
njn0fd92f42005-10-06 03:32:42 +0000308 "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
sewardj71bc3cb2005-05-19 00:25:45 +0000309 xpre,
310 l->total_bytes, l->num_blocks,
311 loss, extra->n_this_record, extra->n_total_records,
312 xpost
313 );
314 if (VG_(clo_xml)) {
315 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
316 l->total_bytes);
317 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
318 l->num_blocks);
319 }
sewardjb5f6f512005-03-10 23:59:00 +0000320 }
njn43c799e2003-04-08 00:08:52 +0000321 VG_(pp_ExeContext)(l->allocated_at);
322}
323
njn1d0825f2006-03-27 11:37:07 +0000324SizeT MC_(bytes_leaked) = 0;
325SizeT MC_(bytes_indirect) = 0;
326SizeT MC_(bytes_dubious) = 0;
327SizeT MC_(bytes_reachable) = 0;
328SizeT MC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000329
njn06072ec2003-09-30 15:35:13 +0000330static Int lc_compar(void* n1, void* n2)
331{
njn1d0825f2006-03-27 11:37:07 +0000332 MC_Chunk* mc1 = *(MC_Chunk**)n1;
333 MC_Chunk* mc2 = *(MC_Chunk**)n2;
njn06072ec2003-09-30 15:35:13 +0000334 return (mc1->data < mc2->data ? -1 : 1);
335}
336
sewardjb5f6f512005-03-10 23:59:00 +0000337/* If ptr is pointing to a heap-allocated block which hasn't been seen
338 before, push it onto the mark stack. Clique is the index of the
339 clique leader; -1 if none. */
sewardj45f4e7c2005-09-27 19:20:21 +0000340static void lc_markstack_push_WRK(Addr ptr, Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000341{
342 Int sh_no;
343
sewardj45f4e7c2005-09-27 19:20:21 +0000344 /* quick filter */
345 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_NONE))
sewardjb5f6f512005-03-10 23:59:00 +0000346 return;
347
348 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
349
350 if (VG_DEBUG_LEAKCHECK)
351 VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
352
353 if (sh_no == -1)
354 return;
355
sewardj76754cf2005-03-14 00:14:04 +0000356 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
sewardj3cf26a52006-07-27 23:48:53 +0000357 tl_assert(ptr >= lc_shadows[sh_no]->data);
sewardj5bee4f82006-07-29 09:00:25 +0000358 tl_assert(ptr < lc_shadows[sh_no]->data
359 + lc_shadows[sh_no]->size
360 + (lc_shadows[sh_no]->size==0 ? 1 : 0));
sewardjb5f6f512005-03-10 23:59:00 +0000361
362 if (lc_markstack[sh_no].state == Unreached) {
363 if (0)
364 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
365 lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
366
sewardj76754cf2005-03-14 00:14:04 +0000367 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000368 lc_markstack[sh_no].next = lc_markstack_top;
369 lc_markstack_top = sh_no;
370 }
371
sewardj45f4e7c2005-09-27 19:20:21 +0000372 tl_assert(clique >= -1 && clique < lc_n_shadows);
373
sewardjb5f6f512005-03-10 23:59:00 +0000374 if (clique != -1) {
375 if (0)
376 VG_(printf)("mopup: %d: %p is %d\n",
377 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
378
379 /* An unmarked block - add it to the clique. Add its size to
380 the clique-leader's indirect size. If the new block was
381 itself a clique leader, it isn't any more, so add its
382 indirect to the new clique leader.
383
384 If this block *is* the clique leader, it means this is a
385 cyclic structure, so none of this applies. */
386 if (lc_markstack[sh_no].state == Unreached) {
387 lc_markstack[sh_no].state = IndirectLeak;
388
389 if (sh_no != clique) {
390 if (VG_DEBUG_CLIQUE) {
391 if (lc_markstack[sh_no].indirect)
392 VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
393 sh_no, clique,
394 lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
395 else
396 VG_(printf)(" %d joining %d adding %d\n",
397 sh_no, clique, lc_shadows[sh_no]->size);
398 }
399
400 lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
401 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
402 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
403 }
404 }
405 } else if (ptr == lc_shadows[sh_no]->data) {
406 lc_markstack[sh_no].state = Proper;
407 } else {
408 if (lc_markstack[sh_no].state == Unreached)
409 lc_markstack[sh_no].state = Interior;
410 }
411}
412
413static void lc_markstack_push(Addr ptr)
414{
sewardj45f4e7c2005-09-27 19:20:21 +0000415 lc_markstack_push_WRK(ptr, -1);
sewardjb5f6f512005-03-10 23:59:00 +0000416}
417
418/* Return the top of the mark stack, if any. */
419static Int lc_markstack_pop(void)
420{
421 Int ret = lc_markstack_top;
422
423 if (ret != -1) {
424 lc_markstack_top = lc_markstack[ret].next;
425 lc_markstack[ret].next = -1;
426 }
427
428 return ret;
429}
430
sewardj45d94cc2005-04-20 14:44:11 +0000431
sewardjb5f6f512005-03-10 23:59:00 +0000432/* Scan a block of memory between [start, start+len). This range may
433 be bogus, inaccessable, or otherwise strange; we deal with it.
434
435 If clique != -1, it means we're gathering leaked memory into
436 cliques, and clique is the index of the current clique leader. */
sewardj45f4e7c2005-09-27 19:20:21 +0000437static void lc_scan_memory_WRK(Addr start, SizeT len, Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000438{
njn13bfd852005-06-02 03:52:53 +0000439 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
440 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000441 vki_sigset_t sigmask;
442
443 if (VG_DEBUG_LEAKCHECK)
tom16ade0d2005-07-18 09:41:57 +0000444 VG_(printf)("scan %p-%p\n", start, start+len);
sewardjb5f6f512005-03-10 23:59:00 +0000445 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000446 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000447
sewardj45f4e7c2005-09-27 19:20:21 +0000448 // lc_scanned += end-ptr;
sewardjb5f6f512005-03-10 23:59:00 +0000449
sewardj45f4e7c2005-09-27 19:20:21 +0000450 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
njn13bfd852005-06-02 03:52:53 +0000451 ptr = VG_PGROUNDUP(ptr+1); /* first page bad */
sewardjb5f6f512005-03-10 23:59:00 +0000452
sewardj05fe85e2005-04-27 22:46:36 +0000453 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000454 Addr addr;
455
456 /* Skip invalid chunks */
sewardj05fe85e2005-04-27 22:46:36 +0000457 if (!(*lc_is_within_valid_secondary)(ptr)) {
njn1d0825f2006-03-27 11:37:07 +0000458 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
sewardjb5f6f512005-03-10 23:59:00 +0000459 continue;
460 }
461
462 /* Look to see if this page seems reasonble */
463 if ((ptr % VKI_PAGE_SIZE) == 0) {
sewardj45f4e7c2005-09-27 19:20:21 +0000464 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
sewardjb5f6f512005-03-10 23:59:00 +0000465 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
466 }
467
468 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
sewardj05fe85e2005-04-27 22:46:36 +0000469 if ((*lc_is_valid_aligned_word)(ptr)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000470 lc_scanned += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +0000471 addr = *(Addr *)ptr;
sewardj45f4e7c2005-09-27 19:20:21 +0000472 lc_markstack_push_WRK(addr, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000473 } else if (0 && VG_DEBUG_LEAKCHECK)
474 VG_(printf)("%p not valid\n", ptr);
475 ptr += sizeof(Addr);
476 } else {
477 /* We need to restore the signal mask, because we were
478 longjmped out of a signal handler. */
479 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
480
njn13bfd852005-06-02 03:52:53 +0000481 ptr = VG_PGROUNDUP(ptr+1); /* bad page - skip it */
sewardjb5f6f512005-03-10 23:59:00 +0000482 }
483 }
484
485 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
486 VG_(set_fault_catcher)(NULL);
487}
488
sewardj45d94cc2005-04-20 14:44:11 +0000489
sewardjb5f6f512005-03-10 23:59:00 +0000490static void lc_scan_memory(Addr start, SizeT len)
491{
sewardj45f4e7c2005-09-27 19:20:21 +0000492 lc_scan_memory_WRK(start, len, -1);
sewardjb5f6f512005-03-10 23:59:00 +0000493}
494
495/* Process the mark stack until empty. If mopup is true, then we're
496 actually gathering leaked blocks, so they should be marked
497 IndirectLeak. */
498static void lc_do_leakcheck(Int clique)
499{
500 Int top;
501
502 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000503 tl_assert(top >= 0 && top < lc_n_shadows);
504 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000505
sewardj45f4e7c2005-09-27 19:20:21 +0000506 lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->size, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000507 }
508}
509
njn0fd92f42005-10-06 03:32:42 +0000510static SizeT blocks_leaked;
511static SizeT blocks_indirect;
512static SizeT blocks_dubious;
513static SizeT blocks_reachable;
514static SizeT blocks_suppressed;
sewardjb5f6f512005-03-10 23:59:00 +0000515
njnb8dca862005-03-14 02:42:44 +0000516static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000517{
518 Int i;
519 Int n_lossrecords;
520 LossRecord* errlist;
521 LossRecord* p;
522 Bool is_suppressed;
njn02977032005-05-17 04:00:11 +0000523 LeakExtra leak_extra;
sewardjb5f6f512005-03-10 23:59:00 +0000524
525 /* Go through and group lost structures into cliques. For each
526 Unreached block, push it onto the mark stack, and find all the
527 blocks linked to it. These are marked IndirectLeak, and their
528 size is added to the clique leader's indirect size. If one of
529 the found blocks was itself a clique leader (from a previous
530 pass), then the cliques are merged. */
531 for (i = 0; i < lc_n_shadows; i++) {
532 if (VG_DEBUG_CLIQUE)
533 VG_(printf)("cliques: %d at %p -> %s\n",
njn02977032005-05-17 04:00:11 +0000534 i, lc_shadows[i]->data, str_lossmode(lc_markstack[i].state));
sewardjb5f6f512005-03-10 23:59:00 +0000535 if (lc_markstack[i].state != Unreached)
536 continue;
537
sewardj76754cf2005-03-14 00:14:04 +0000538 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000539
540 if (VG_DEBUG_CLIQUE)
541 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
542
sewardj45f4e7c2005-09-27 19:20:21 +0000543 lc_markstack_push_WRK(lc_shadows[i]->data, i);
sewardjb5f6f512005-03-10 23:59:00 +0000544
545 lc_do_leakcheck(i);
546
sewardj76754cf2005-03-14 00:14:04 +0000547 tl_assert(lc_markstack_top == -1);
sewardj75d8eb92005-12-18 02:48:40 +0000548 tl_assert(lc_markstack[i].state == IndirectLeak
549 /* jrs 20051218: Ashley Pittman supplied a
550 custom-allocator test program which causes the ==
551 IndirectLeak condition to fail - it causes .state
552 to be Unreached. Since I have no idea how this
553 clique stuff works and no time to figure it out,
554 just allow that condition too. This could well be
555 a completely bogus fix. It doesn't seem unsafe
556 given that in any case the .state field is
557 immediately overwritten by the next statement. */
558 || lc_markstack[i].state == Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000559
560 lc_markstack[i].state = Unreached; /* Return to unreached state,
561 to indicate its a clique
562 leader */
563 }
564
565 /* Common up the lost blocks so we can print sensible error messages. */
566 n_lossrecords = 0;
567 errlist = NULL;
568 for (i = 0; i < lc_n_shadows; i++) {
569 ExeContext* where = lc_shadows[i]->where;
570
571 for (p = errlist; p != NULL; p = p->next) {
572 if (p->loss_mode == lc_markstack[i].state
njn1d0825f2006-03-27 11:37:07 +0000573 && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
sewardjb5f6f512005-03-10 23:59:00 +0000574 p->allocated_at,
575 where) ) {
576 break;
577 }
578 }
579 if (p != NULL) {
580 p->num_blocks ++;
581 p->total_bytes += lc_shadows[i]->size;
582 p->indirect_bytes += lc_markstack[i].indirect;
583 } else {
584 n_lossrecords ++;
585 p = VG_(malloc)(sizeof(LossRecord));
586 p->loss_mode = lc_markstack[i].state;
587 p->allocated_at = where;
588 p->total_bytes = lc_shadows[i]->size;
589 p->indirect_bytes = lc_markstack[i].indirect;
590 p->num_blocks = 1;
591 p->next = errlist;
592 errlist = p;
593 }
594 }
595
596 /* Print out the commoned-up blocks and collect summary stats. */
597 for (i = 0; i < n_lossrecords; i++) {
598 Bool print_record;
599 LossRecord* p_min = NULL;
njn0fd92f42005-10-06 03:32:42 +0000600 SizeT n_min = ~(0x0L);
sewardjb5f6f512005-03-10 23:59:00 +0000601 for (p = errlist; p != NULL; p = p->next) {
602 if (p->num_blocks > 0 && p->total_bytes < n_min) {
603 n_min = p->total_bytes + p->indirect_bytes;
604 p_min = p;
605 }
606 }
sewardj76754cf2005-03-14 00:14:04 +0000607 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000608
609 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
610 we disallow that when --leak-check=yes.
611
njn02977032005-05-17 04:00:11 +0000612 Prints the error if not suppressed, unless it's reachable (Proper
613 or IndirectLeak) and --show-reachable=no */
sewardjb5f6f512005-03-10 23:59:00 +0000614
njn1d0825f2006-03-27 11:37:07 +0000615 print_record = ( MC_(clo_show_reachable) ||
njn02977032005-05-17 04:00:11 +0000616 Unreached == p_min->loss_mode ||
617 Interior == p_min->loss_mode );
618
619 // Nb: because VG_(unique_error) does all the error processing
620 // immediately, and doesn't save the error, leakExtra can be
621 // stack-allocated.
622 leak_extra.n_this_record = i+1;
623 leak_extra.n_total_records = n_lossrecords;
624 leak_extra.lossRecord = p_min;
sewardjb5f6f512005-03-10 23:59:00 +0000625 is_suppressed =
njn1d0825f2006-03-27 11:37:07 +0000626 MC_(record_leak_error) ( tid, &leak_extra, p_min->allocated_at,
627 print_record );
sewardjb5f6f512005-03-10 23:59:00 +0000628
629 if (is_suppressed) {
njn1d0825f2006-03-27 11:37:07 +0000630 blocks_suppressed += p_min->num_blocks;
631 MC_(bytes_suppressed) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000632
njn1d0825f2006-03-27 11:37:07 +0000633 } else if (Unreached == p_min->loss_mode) {
634 blocks_leaked += p_min->num_blocks;
635 MC_(bytes_leaked) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000636
njn1d0825f2006-03-27 11:37:07 +0000637 } else if (IndirectLeak == p_min->loss_mode) {
638 blocks_indirect += p_min->num_blocks;
639 MC_(bytes_indirect) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000640
njn1d0825f2006-03-27 11:37:07 +0000641 } else if (Interior == p_min->loss_mode) {
642 blocks_dubious += p_min->num_blocks;
643 MC_(bytes_dubious) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000644
njn1d0825f2006-03-27 11:37:07 +0000645 } else if (Proper == p_min->loss_mode) {
646 blocks_reachable += p_min->num_blocks;
647 MC_(bytes_reachable) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000648
649 } else {
sewardj76754cf2005-03-14 00:14:04 +0000650 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000651 }
652 p_min->num_blocks = 0;
653 }
654}
655
656/* Compute a quick summary of the leak check. */
tom151a6392005-11-11 12:30:36 +0000657static void make_summary(void)
sewardjb5f6f512005-03-10 23:59:00 +0000658{
659 Int i;
660
661 for(i = 0; i < lc_n_shadows; i++) {
662 SizeT size = lc_shadows[i]->size;
663
664 switch(lc_markstack[i].state) {
665 case Unreached:
666 blocks_leaked++;
njn1d0825f2006-03-27 11:37:07 +0000667 MC_(bytes_leaked) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000668 break;
669
670 case Proper:
671 blocks_reachable++;
njn1d0825f2006-03-27 11:37:07 +0000672 MC_(bytes_reachable) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000673 break;
674
675 case Interior:
676 blocks_dubious++;
njn1d0825f2006-03-27 11:37:07 +0000677 MC_(bytes_dubious) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000678 break;
679
680 case IndirectLeak: /* shouldn't happen */
681 blocks_indirect++;
njn1d0825f2006-03-27 11:37:07 +0000682 MC_(bytes_indirect) += size;
sewardjb5f6f512005-03-10 23:59:00 +0000683 break;
684 }
685 }
686}
687
sewardj3cf26a52006-07-27 23:48:53 +0000688static MC_Chunk**
689find_active_shadows(UInt* n_shadows)
690{
691 /* Our goal is to construct a set of shadows that includes every
692 * mempool chunk, and every malloc region that *doesn't* contain a
693 * mempool chunk. We do this in several phases.
694 *
695 * First we collect all the malloc chunks into an array and sort it.
696 * We do this because we want to query the chunks by interior
697 * pointers, requiring binary search.
698 *
699 * Second we build an array containing a Bool for each malloc chunk,
700 * indicating whether it contains any mempools.
701 *
702 * Third we loop over the mempool tables. For each chunk in each
703 * pool, we set the entry in the Bool array corresponding to the
704 * malloc chunk containing the mempool chunk.
705 *
706 * Finally we copy the mempool chunks and the non-marked malloc
707 * chunks into a combined array of shadows, free our temporaries,
708 * and return the combined array.
709 */
710
711 MC_Mempool *mp;
712 MC_Chunk **mallocs, **shadows, *mc;
713 UInt n_mallocs, m, s;
714 Bool *malloc_chunk_holds_a_pool_chunk;
715
716 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
717
718 if (n_mallocs == 0) {
719 tl_assert(mallocs == NULL);
720 *n_shadows = 0;
721 return NULL;
722 }
723
724 VG_(ssort)((void*)mallocs, n_mallocs,
725 sizeof(VgHashNode*), lc_compar);
726
727 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( n_mallocs, sizeof(Bool) );
728
729 *n_shadows = n_mallocs;
730
731 VG_(HT_ResetIter)(MC_(mempool_list));
732 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
733 VG_(HT_ResetIter)(mp->chunks);
734 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
735
736 /* We'll need a shadow for this chunk. */
737 ++(*n_shadows);
738
739 /* Possibly invalidate the malloc holding the beginning of this chunk. */
740 m = find_shadow_for(mc->data, mallocs, n_mallocs);
741 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
742 tl_assert(*n_shadows > 0);
743 --(*n_shadows);
744 malloc_chunk_holds_a_pool_chunk[m] = True;
745 }
746
747 /* Possibly invalidate the malloc holding the end of this chunk. */
748 if (mc->size > 1) {
749 m = find_shadow_for(mc->data + (mc->size - 1), mallocs, n_mallocs);
750 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
751 tl_assert(*n_shadows > 0);
752 --(*n_shadows);
753 malloc_chunk_holds_a_pool_chunk[m] = True;
754 }
755 }
756 }
757 }
758
759 tl_assert(*n_shadows > 0);
760 shadows = VG_(malloc)(sizeof(VgHashNode*) * (*n_shadows));
761 s = 0;
762
763 /* Copy the mempool chunks into the final array. */
764 VG_(HT_ResetIter)(MC_(mempool_list));
765 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
766 VG_(HT_ResetIter)(mp->chunks);
767 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
768 tl_assert(s < *n_shadows);
769 shadows[s++] = mc;
770 }
771 }
772
773 /* Copy the malloc chunks into the final array. */
774 for (m = 0; m < n_mallocs; ++m) {
775 if (!malloc_chunk_holds_a_pool_chunk[m]) {
776 tl_assert(s < *n_shadows);
777 shadows[s++] = mallocs[m];
778 }
779 }
780
781 tl_assert(s == *n_shadows);
782 VG_(free)(mallocs);
783 VG_(free)(malloc_chunk_holds_a_pool_chunk);
784
785 return shadows;
786}
787
788
njn43c799e2003-04-08 00:08:52 +0000789/* Top level entry point to leak detector. Call here, passing in
790 suitable address-validating functions (see comment at top of
njn1d0825f2006-03-27 11:37:07 +0000791 scan_all_valid_memory above). These functions used to encapsulate the
792 differences between Memcheck and Addrcheck; they no longer do but it
793 doesn't hurt to keep them here.
njn43c799e2003-04-08 00:08:52 +0000794*/
njn1d0825f2006-03-27 11:37:07 +0000795void MC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000796 ThreadId tid, LeakCheckMode mode,
sewardj05fe85e2005-04-27 22:46:36 +0000797 Bool (*is_within_valid_secondary) ( Addr ),
798 Bool (*is_valid_aligned_word) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000799)
800{
njnb8dca862005-03-14 02:42:44 +0000801 Int i;
njn43c799e2003-04-08 00:08:52 +0000802
sewardj76754cf2005-03-14 00:14:04 +0000803 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000804
sewardj3cf26a52006-07-27 23:48:53 +0000805 lc_shadows = find_active_shadows(&lc_n_shadows);
njn06072ec2003-09-30 15:35:13 +0000806
807 /* Sort the array. */
808 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
809
810 /* Sanity check; assert that the blocks are now in order */
811 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000812 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000813 }
njn3e884182003-04-15 13:03:23 +0000814
815 /* Sanity check -- make sure they don't overlap */
816 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000817 tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
sewardj7e9ed712005-12-18 02:37:50 +0000818 <= lc_shadows[i+1]->data );
njn3e884182003-04-15 13:03:23 +0000819 }
820
821 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000822 tl_assert(lc_shadows == NULL);
sewardj71bc3cb2005-05-19 00:25:45 +0000823 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
sewardj37d06f22003-09-17 21:48:26 +0000824 VG_(message)(Vg_UserMsg,
njn6b239582005-12-19 19:33:36 +0000825 "All heap blocks were freed -- no leaks are possible.");
sewardj37d06f22003-09-17 21:48:26 +0000826 }
njn43c799e2003-04-08 00:08:52 +0000827 return;
828 }
829
sewardj71bc3cb2005-05-19 00:25:45 +0000830 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
nethercote0f19bce2003-12-02 10:17:44 +0000831 VG_(message)(Vg_UserMsg,
njn0fd92f42005-10-06 03:32:42 +0000832 "searching for pointers to %,d not-freed blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000833 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000834
njn3e884182003-04-15 13:03:23 +0000835 lc_min_mallocd_addr = lc_shadows[0]->data;
836 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
sewardjb5f6f512005-03-10 23:59:00 +0000837 + lc_shadows[lc_n_shadows-1]->size;
njn43c799e2003-04-08 00:08:52 +0000838
sewardjb5f6f512005-03-10 23:59:00 +0000839 lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
840 for (i = 0; i < lc_n_shadows; i++) {
841 lc_markstack[i].next = -1;
842 lc_markstack[i].state = Unreached;
843 lc_markstack[i].indirect = 0;
844 }
845 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000846
sewardj05fe85e2005-04-27 22:46:36 +0000847 lc_is_within_valid_secondary = is_within_valid_secondary;
848 lc_is_valid_aligned_word = is_valid_aligned_word;
sewardjb5f6f512005-03-10 23:59:00 +0000849
850 lc_scanned = 0;
851
sewardj45f4e7c2005-09-27 19:20:21 +0000852 /* Push roots onto the mark stack. Roots are:
853 - the integer registers of all threads
854 - all mappings belonging to the client, including stacks
855 - .. but excluding any client heap segments.
856 Client heap segments are excluded because we wish to differentiate
857 client heap blocks which are referenced only from inside the heap
858 from those outside. This facilitates the indirect vs direct loss
859 categorisation, which [if the users ever manage to understand it]
860 is really useful for detecting lost cycles.
861 */
862 { NSegment* seg;
863 Addr* seg_starts;
864 Int n_seg_starts;
865 seg_starts = get_seg_starts( &n_seg_starts );
866 tl_assert(seg_starts && n_seg_starts > 0);
867 /* VG_(am_show_nsegments)( 0,"leakcheck"); */
868 for (i = 0; i < n_seg_starts; i++) {
869 seg = VG_(am_find_nsegment)( seg_starts[i] );
870 tl_assert(seg);
871 if (seg->kind != SkFileC && seg->kind != SkAnonC)
872 continue;
873 if (!(seg->hasR && seg->hasW))
874 continue;
875 if (seg->isCH)
876 continue;
sewardjde3ad732006-07-27 23:12:17 +0000877
878 /* Don't poke around in device segments as this may cause
879 hangs. Exclude /dev/zero just in case someone allocated
880 memory by explicitly mapping /dev/zero. */
881 if (seg->kind == SkFileC
882 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
883 HChar* dev_name = VG_(am_get_filename)( seg );
884 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
885 /* don't skip /dev/zero */
886 } else {
887 /* skip this device mapping */
888 continue;
889 }
890 }
891
sewardj45f4e7c2005-09-27 19:20:21 +0000892 if (0)
893 VG_(printf)("ACCEPT %2d %p %p\n", i, seg->start, seg->end);
894 lc_scan_memory(seg->start, seg->end+1 - seg->start);
895 }
896 }
sewardjb5f6f512005-03-10 23:59:00 +0000897
898 /* Push registers onto mark stack */
njn6ace3ea2005-06-17 03:06:27 +0000899 VG_(apply_to_GP_regs)(lc_markstack_push);
sewardjb5f6f512005-03-10 23:59:00 +0000900
901 /* Keep walking the heap until everything is found */
902 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000903
sewardj71bc3cb2005-05-19 00:25:45 +0000904 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
njn0fd92f42005-10-06 03:32:42 +0000905 VG_(message)(Vg_UserMsg, "checked %,lu bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000906
njn1d0825f2006-03-27 11:37:07 +0000907 blocks_leaked = MC_(bytes_leaked) = 0;
908 blocks_indirect = MC_(bytes_indirect) = 0;
909 blocks_dubious = MC_(bytes_dubious) = 0;
910 blocks_reachable = MC_(bytes_reachable) = 0;
911 blocks_suppressed = MC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000912
sewardjb5f6f512005-03-10 23:59:00 +0000913 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000914 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000915 else
916 make_summary();
njn43c799e2003-04-08 00:08:52 +0000917
sewardj71bc3cb2005-05-19 00:25:45 +0000918 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
nethercote0f19bce2003-12-02 10:17:44 +0000919 VG_(message)(Vg_UserMsg, "");
920 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
njn0fd92f42005-10-06 03:32:42 +0000921 VG_(message)(Vg_UserMsg, " definitely lost: %,lu bytes in %,lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000922 MC_(bytes_leaked), blocks_leaked );
sewardjb5f6f512005-03-10 23:59:00 +0000923 if (blocks_indirect > 0)
njn0fd92f42005-10-06 03:32:42 +0000924 VG_(message)(Vg_UserMsg, " indirectly lost: %,lu bytes in %,lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000925 MC_(bytes_indirect), blocks_indirect );
njn0fd92f42005-10-06 03:32:42 +0000926 VG_(message)(Vg_UserMsg, " possibly lost: %,lu bytes in %,lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000927 MC_(bytes_dubious), blocks_dubious );
njn0fd92f42005-10-06 03:32:42 +0000928 VG_(message)(Vg_UserMsg, " still reachable: %,lu bytes in %,lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000929 MC_(bytes_reachable), blocks_reachable );
njn0fd92f42005-10-06 03:32:42 +0000930 VG_(message)(Vg_UserMsg, " suppressed: %,lu bytes in %,lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000931 MC_(bytes_suppressed), blocks_suppressed );
njn6a329422005-03-12 20:38:13 +0000932 if (mode == LC_Summary && blocks_leaked > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000933 VG_(message)(Vg_UserMsg,
934 "Use --leak-check=full to see details of leaked memory.");
njn1d0825f2006-03-27 11:37:07 +0000935 else if (!MC_(clo_show_reachable)) {
nethercote0f19bce2003-12-02 10:17:44 +0000936 VG_(message)(Vg_UserMsg,
937 "Reachable blocks (those to which a pointer was found) are not shown.");
938 VG_(message)(Vg_UserMsg,
939 "To see them, rerun with: --show-reachable=yes");
940 }
njn43c799e2003-04-08 00:08:52 +0000941 }
njn43c799e2003-04-08 00:08:52 +0000942
njn3e884182003-04-15 13:03:23 +0000943 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000944 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000945}
946
947/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000948/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +0000949/*--------------------------------------------------------------------*/
950