blob: 9c68d640842050cf1965fc8311fb40d9d1600244 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn43c799e2003-04-08 00:08:52 +000010 for detecting memory errors.
11
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000035#include "pub_tool_errormgr.h" // For mac_shared.h
36#include "pub_tool_execontext.h" // For mac_shared.h
37#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000038#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000039#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000040#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000041#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000042#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_mallocfree.h"
44#include "pub_tool_options.h"
45#include "pub_tool_profile.h" // For mac_shared.h
njn73c51342005-06-18 15:18:15 +000046#include "pub_tool_signals.h"
njn43c799e2003-04-08 00:08:52 +000047
njnc7561b92005-06-19 01:24:32 +000048#include "mac_shared.h"
49
50#include <setjmp.h> // For jmp_buf
51
52
njn43c799e2003-04-08 00:08:52 +000053/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000054#define VG_DEBUG_LEAKCHECK 0
55#define VG_DEBUG_CLIQUE 0
56
njn43c799e2003-04-08 00:08:52 +000057/*------------------------------------------------------------*/
58/*--- Low-level address-space scanning, for the leak ---*/
59/*--- detector. ---*/
60/*------------------------------------------------------------*/
61
62static
63jmp_buf memscan_jmpbuf;
64
65
66static
njn695c16e2005-03-27 03:40:28 +000067void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000068{
sewardjb5f6f512005-03-10 23:59:00 +000069 if (0)
70 VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
71 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
72 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000073}
74
75/*------------------------------------------------------------*/
76/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
77/*------------------------------------------------------------*/
78
79/* A block is either
80 -- Proper-ly reached; a pointer to its start has been found
81 -- Interior-ly reached; only an interior pointer to it has been found
82 -- Unreached; so far, no pointers to any part of it have been found.
sewardjb5f6f512005-03-10 23:59:00 +000083 -- IndirectLeak; leaked, but referred to by another leaked block
njn43c799e2003-04-08 00:08:52 +000084*/
sewardjb5f6f512005-03-10 23:59:00 +000085typedef enum {
86 Unreached,
87 IndirectLeak,
88 Interior,
89 Proper
90 } Reachedness;
91
92/* An entry in the mark stack */
93typedef struct {
94 Int next:30; /* Index of next in mark stack */
95 UInt state:2; /* Reachedness */
96 SizeT indirect; /* if Unreached, how much is unreachable from here */
97} MarkStack;
njn43c799e2003-04-08 00:08:52 +000098
99/* A block record, used for generating err msgs. */
100typedef
101 struct _LossRecord {
102 struct _LossRecord* next;
103 /* Where these lost blocks were allocated. */
104 ExeContext* allocated_at;
105 /* Their reachability. */
106 Reachedness loss_mode;
107 /* Number of blocks and total # bytes involved. */
108 UInt total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000109 UInt indirect_bytes;
njn43c799e2003-04-08 00:08:52 +0000110 UInt num_blocks;
111 }
112 LossRecord;
113
njn02977032005-05-17 04:00:11 +0000114/* The 'extra' struct for leak errors. */
115typedef struct {
116 UInt n_this_record;
117 UInt n_total_records;
118 LossRecord* lossRecord;
119} LeakExtra;
njn43c799e2003-04-08 00:08:52 +0000120
121/* Find the i such that ptr points at or inside the block described by
122 shadows[i]. Return -1 if none found. This assumes that shadows[]
123 has been sorted on the ->data field. */
124
sewardjb5f6f512005-03-10 23:59:00 +0000125#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000126/* Used to sanity-check the fast binary-search mechanism. */
127static
njn3e884182003-04-15 13:03:23 +0000128Int find_shadow_for_OLD ( Addr ptr,
129 MAC_Chunk** shadows,
130 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000131
132{
133 Int i;
134 Addr a_lo, a_hi;
135 PROF_EVENT(70);
136 for (i = 0; i < n_shadows; i++) {
137 PROF_EVENT(71);
138 a_lo = shadows[i]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000139 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000140 if (a_lo <= ptr && ptr <= a_hi)
141 return i;
142 }
143 return -1;
144}
145#endif
146
147
148static
njn3e884182003-04-15 13:03:23 +0000149Int find_shadow_for ( Addr ptr,
150 MAC_Chunk** shadows,
151 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000152{
153 Addr a_mid_lo, a_mid_hi;
154 Int lo, mid, hi, retVal;
155 /* VG_(printf)("find shadow for %p = ", ptr); */
156 retVal = -1;
157 lo = 0;
158 hi = n_shadows-1;
159 while (True) {
njn3e884182003-04-15 13:03:23 +0000160 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000161 if (lo > hi) break; /* not found */
162
163 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000164 a_mid_lo = shadows[mid]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000165 a_mid_hi = shadows[mid]->data + shadows[mid]->size;
njn43c799e2003-04-08 00:08:52 +0000166
167 if (ptr < a_mid_lo) {
168 hi = mid-1;
169 continue;
170 }
171 if (ptr > a_mid_hi) {
172 lo = mid+1;
173 continue;
174 }
sewardj76754cf2005-03-14 00:14:04 +0000175 tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000176 retVal = mid;
177 break;
178 }
179
sewardjb5f6f512005-03-10 23:59:00 +0000180# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000181 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000182# endif
183 /* VG_(printf)("%d\n", retVal); */
184 return retVal;
185}
186
187/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000188static MAC_Chunk** lc_shadows;
189static Int lc_n_shadows;
sewardjb5f6f512005-03-10 23:59:00 +0000190static MarkStack* lc_markstack;
191static Int lc_markstack_top;
njn3e884182003-04-15 13:03:23 +0000192static Addr lc_min_mallocd_addr;
193static Addr lc_max_mallocd_addr;
sewardjb5f6f512005-03-10 23:59:00 +0000194static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000195
sewardj05fe85e2005-04-27 22:46:36 +0000196static Bool (*lc_is_within_valid_secondary) (Addr addr);
197static Bool (*lc_is_valid_aligned_word) (Addr addr);
sewardjb5f6f512005-03-10 23:59:00 +0000198
sewardj71bc3cb2005-05-19 00:25:45 +0000199static const HChar* str_lossmode ( Reachedness lossmode )
njn43c799e2003-04-08 00:08:52 +0000200{
sewardj71bc3cb2005-05-19 00:25:45 +0000201 const HChar *loss = "?";
202 switch (lossmode) {
203 case Unreached: loss = "definitely lost"; break;
204 case IndirectLeak: loss = "indirectly lost"; break;
205 case Interior: loss = "possibly lost"; break;
206 case Proper: loss = "still reachable"; break;
njn43c799e2003-04-08 00:08:52 +0000207 }
sewardjb5f6f512005-03-10 23:59:00 +0000208 return loss;
njn43c799e2003-04-08 00:08:52 +0000209}
210
sewardj71bc3cb2005-05-19 00:25:45 +0000211static const HChar* xml_kind ( Reachedness lossmode )
212{
213 const HChar *loss = "?";
214 switch (lossmode) {
215 case Unreached: loss = "Leak_DefinitelyLost"; break;
216 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
217 case Interior: loss = "Leak_PossiblyLost"; break;
218 case Proper: loss = "Leak_StillReachable"; break;
219 }
220 return loss;
221}
222
223
njn43c799e2003-04-08 00:08:52 +0000224/* Used for printing leak errors, avoids exposing the LossRecord type (which
225 comes in as void*, requiring a cast. */
njn02977032005-05-17 04:00:11 +0000226void MAC_(pp_LeakError)(void* vextra)
njn43c799e2003-04-08 00:08:52 +0000227{
sewardj71bc3cb2005-05-19 00:25:45 +0000228 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
229 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
230
njn02977032005-05-17 04:00:11 +0000231 LeakExtra* extra = (LeakExtra*)vextra;
232 LossRecord* l = extra->lossRecord;
233 const Char *loss = str_lossmode(l->loss_mode);
njn43c799e2003-04-08 00:08:52 +0000234
sewardj71bc3cb2005-05-19 00:25:45 +0000235 if (VG_(clo_xml)) {
236 VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_kind(l->loss_mode));
237 } else {
238 VG_(message)(Vg_UserMsg, "");
239 }
240
sewardjb5f6f512005-03-10 23:59:00 +0000241 if (l->indirect_bytes) {
242 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +0000243 "%s%d (%d direct, %d indirect) bytes in %d blocks"
244 " are %s in loss record %d of %d%s",
245 xpre,
246 l->total_bytes + l->indirect_bytes,
247 l->total_bytes, l->indirect_bytes, l->num_blocks,
248 loss, extra->n_this_record, extra->n_total_records,
249 xpost
250 );
251 if (VG_(clo_xml)) {
252 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
253 l->total_bytes + l->indirect_bytes);
254 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
255 l->num_blocks);
256 }
sewardjb5f6f512005-03-10 23:59:00 +0000257 } else {
sewardj71bc3cb2005-05-19 00:25:45 +0000258 VG_(message)(
259 Vg_UserMsg,
260 "%s%d bytes in %d blocks are %s in loss record %d of %d%s",
261 xpre,
262 l->total_bytes, l->num_blocks,
263 loss, extra->n_this_record, extra->n_total_records,
264 xpost
265 );
266 if (VG_(clo_xml)) {
267 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
268 l->total_bytes);
269 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
270 l->num_blocks);
271 }
sewardjb5f6f512005-03-10 23:59:00 +0000272 }
njn43c799e2003-04-08 00:08:52 +0000273 VG_(pp_ExeContext)(l->allocated_at);
274}
275
njne8b5c052003-07-22 22:03:58 +0000276Int MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000277Int MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000278Int MAC_(bytes_dubious) = 0;
279Int MAC_(bytes_reachable) = 0;
280Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000281
njn06072ec2003-09-30 15:35:13 +0000282static Int lc_compar(void* n1, void* n2)
283{
284 MAC_Chunk* mc1 = *(MAC_Chunk**)n1;
285 MAC_Chunk* mc2 = *(MAC_Chunk**)n2;
286 return (mc1->data < mc2->data ? -1 : 1);
287}
288
sewardjb5f6f512005-03-10 23:59:00 +0000289/* If ptr is pointing to a heap-allocated block which hasn't been seen
290 before, push it onto the mark stack. Clique is the index of the
291 clique leader; -1 if none. */
292static void _lc_markstack_push(Addr ptr, Int clique)
293{
294 Int sh_no;
295
296 if (!VG_(is_client_addr)(ptr)) /* quick filter */
297 return;
298
299 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
300
301 if (VG_DEBUG_LEAKCHECK)
302 VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
303
304 if (sh_no == -1)
305 return;
306
sewardj76754cf2005-03-14 00:14:04 +0000307 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
308 tl_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
sewardjb5f6f512005-03-10 23:59:00 +0000309
310 if (lc_markstack[sh_no].state == Unreached) {
311 if (0)
312 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
313 lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
314
sewardj76754cf2005-03-14 00:14:04 +0000315 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000316 lc_markstack[sh_no].next = lc_markstack_top;
317 lc_markstack_top = sh_no;
318 }
319
320 if (clique != -1) {
321 if (0)
322 VG_(printf)("mopup: %d: %p is %d\n",
323 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
324
325 /* An unmarked block - add it to the clique. Add its size to
326 the clique-leader's indirect size. If the new block was
327 itself a clique leader, it isn't any more, so add its
328 indirect to the new clique leader.
329
330 If this block *is* the clique leader, it means this is a
331 cyclic structure, so none of this applies. */
332 if (lc_markstack[sh_no].state == Unreached) {
333 lc_markstack[sh_no].state = IndirectLeak;
334
335 if (sh_no != clique) {
336 if (VG_DEBUG_CLIQUE) {
337 if (lc_markstack[sh_no].indirect)
338 VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
339 sh_no, clique,
340 lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
341 else
342 VG_(printf)(" %d joining %d adding %d\n",
343 sh_no, clique, lc_shadows[sh_no]->size);
344 }
345
346 lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
347 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
348 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
349 }
350 }
351 } else if (ptr == lc_shadows[sh_no]->data) {
352 lc_markstack[sh_no].state = Proper;
353 } else {
354 if (lc_markstack[sh_no].state == Unreached)
355 lc_markstack[sh_no].state = Interior;
356 }
357}
358
359static void lc_markstack_push(Addr ptr)
360{
361 _lc_markstack_push(ptr, -1);
362}
363
364/* Return the top of the mark stack, if any. */
365static Int lc_markstack_pop(void)
366{
367 Int ret = lc_markstack_top;
368
369 if (ret != -1) {
370 lc_markstack_top = lc_markstack[ret].next;
371 lc_markstack[ret].next = -1;
372 }
373
374 return ret;
375}
376
sewardj45d94cc2005-04-20 14:44:11 +0000377
sewardjb5f6f512005-03-10 23:59:00 +0000378/* Scan a block of memory between [start, start+len). This range may
379 be bogus, inaccessable, or otherwise strange; we deal with it.
380
381 If clique != -1, it means we're gathering leaked memory into
382 cliques, and clique is the index of the current clique leader. */
383static void _lc_scan_memory(Addr start, SizeT len, Int clique)
384{
njn13bfd852005-06-02 03:52:53 +0000385 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
386 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000387 vki_sigset_t sigmask;
388
389 if (VG_DEBUG_LEAKCHECK)
390 VG_(printf)("scan %p-%p\n", start, len);
391 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000392 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000393
394 lc_scanned += end-ptr;
395
396 if (!VG_(is_client_addr)(ptr) ||
397 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
njn13bfd852005-06-02 03:52:53 +0000398 ptr = VG_PGROUNDUP(ptr+1); /* first page bad */
sewardjb5f6f512005-03-10 23:59:00 +0000399
sewardj05fe85e2005-04-27 22:46:36 +0000400 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000401 Addr addr;
402
403 /* Skip invalid chunks */
sewardj05fe85e2005-04-27 22:46:36 +0000404 if (!(*lc_is_within_valid_secondary)(ptr)) {
njn13bfd852005-06-02 03:52:53 +0000405 ptr = VG_ROUNDUP(ptr+1, SECONDARY_SIZE);
sewardjb5f6f512005-03-10 23:59:00 +0000406 continue;
407 }
408
409 /* Look to see if this page seems reasonble */
410 if ((ptr % VKI_PAGE_SIZE) == 0) {
411 if (!VG_(is_client_addr)(ptr) ||
412 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
413 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
414 }
415
416 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
sewardj05fe85e2005-04-27 22:46:36 +0000417 if ((*lc_is_valid_aligned_word)(ptr)) {
sewardjb5f6f512005-03-10 23:59:00 +0000418 addr = *(Addr *)ptr;
419 _lc_markstack_push(addr, clique);
420 } else if (0 && VG_DEBUG_LEAKCHECK)
421 VG_(printf)("%p not valid\n", ptr);
422 ptr += sizeof(Addr);
423 } else {
424 /* We need to restore the signal mask, because we were
425 longjmped out of a signal handler. */
426 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
427
njn13bfd852005-06-02 03:52:53 +0000428 ptr = VG_PGROUNDUP(ptr+1); /* bad page - skip it */
sewardjb5f6f512005-03-10 23:59:00 +0000429 }
430 }
431
432 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
433 VG_(set_fault_catcher)(NULL);
434}
435
sewardj45d94cc2005-04-20 14:44:11 +0000436
sewardjb5f6f512005-03-10 23:59:00 +0000437static void lc_scan_memory(Addr start, SizeT len)
438{
439 _lc_scan_memory(start, len, -1);
440}
441
442/* Process the mark stack until empty. If mopup is true, then we're
443 actually gathering leaked blocks, so they should be marked
444 IndirectLeak. */
445static void lc_do_leakcheck(Int clique)
446{
447 Int top;
448
449 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000450 tl_assert(top >= 0 && top < lc_n_shadows);
451 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000452
453 _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
454 }
455}
456
457static Int blocks_leaked;
458static Int blocks_indirect;
459static Int blocks_dubious;
460static Int blocks_reachable;
461static Int blocks_suppressed;
462
njnb8dca862005-03-14 02:42:44 +0000463static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000464{
465 Int i;
466 Int n_lossrecords;
467 LossRecord* errlist;
468 LossRecord* p;
469 Bool is_suppressed;
njn02977032005-05-17 04:00:11 +0000470 LeakExtra leak_extra;
sewardjb5f6f512005-03-10 23:59:00 +0000471
472 /* Go through and group lost structures into cliques. For each
473 Unreached block, push it onto the mark stack, and find all the
474 blocks linked to it. These are marked IndirectLeak, and their
475 size is added to the clique leader's indirect size. If one of
476 the found blocks was itself a clique leader (from a previous
477 pass), then the cliques are merged. */
478 for (i = 0; i < lc_n_shadows; i++) {
479 if (VG_DEBUG_CLIQUE)
480 VG_(printf)("cliques: %d at %p -> %s\n",
njn02977032005-05-17 04:00:11 +0000481 i, lc_shadows[i]->data, str_lossmode(lc_markstack[i].state));
sewardjb5f6f512005-03-10 23:59:00 +0000482 if (lc_markstack[i].state != Unreached)
483 continue;
484
sewardj76754cf2005-03-14 00:14:04 +0000485 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000486
487 if (VG_DEBUG_CLIQUE)
488 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
489
490 _lc_markstack_push(lc_shadows[i]->data, i);
491
492 lc_do_leakcheck(i);
493
sewardj76754cf2005-03-14 00:14:04 +0000494 tl_assert(lc_markstack_top == -1);
495 tl_assert(lc_markstack[i].state == IndirectLeak);
sewardjb5f6f512005-03-10 23:59:00 +0000496
497 lc_markstack[i].state = Unreached; /* Return to unreached state,
498 to indicate its a clique
499 leader */
500 }
501
502 /* Common up the lost blocks so we can print sensible error messages. */
503 n_lossrecords = 0;
504 errlist = NULL;
505 for (i = 0; i < lc_n_shadows; i++) {
506 ExeContext* where = lc_shadows[i]->where;
507
508 for (p = errlist; p != NULL; p = p->next) {
509 if (p->loss_mode == lc_markstack[i].state
510 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
511 p->allocated_at,
512 where) ) {
513 break;
514 }
515 }
516 if (p != NULL) {
517 p->num_blocks ++;
518 p->total_bytes += lc_shadows[i]->size;
519 p->indirect_bytes += lc_markstack[i].indirect;
520 } else {
521 n_lossrecords ++;
522 p = VG_(malloc)(sizeof(LossRecord));
523 p->loss_mode = lc_markstack[i].state;
524 p->allocated_at = where;
525 p->total_bytes = lc_shadows[i]->size;
526 p->indirect_bytes = lc_markstack[i].indirect;
527 p->num_blocks = 1;
528 p->next = errlist;
529 errlist = p;
530 }
531 }
532
533 /* Print out the commoned-up blocks and collect summary stats. */
534 for (i = 0; i < n_lossrecords; i++) {
535 Bool print_record;
536 LossRecord* p_min = NULL;
537 UInt n_min = 0xFFFFFFFF;
538 for (p = errlist; p != NULL; p = p->next) {
539 if (p->num_blocks > 0 && p->total_bytes < n_min) {
540 n_min = p->total_bytes + p->indirect_bytes;
541 p_min = p;
542 }
543 }
sewardj76754cf2005-03-14 00:14:04 +0000544 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000545
546 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
547 we disallow that when --leak-check=yes.
548
njn02977032005-05-17 04:00:11 +0000549 Prints the error if not suppressed, unless it's reachable (Proper
550 or IndirectLeak) and --show-reachable=no */
sewardjb5f6f512005-03-10 23:59:00 +0000551
552 print_record = ( MAC_(clo_show_reachable) ||
njn02977032005-05-17 04:00:11 +0000553 Unreached == p_min->loss_mode ||
554 Interior == p_min->loss_mode );
555
556 // Nb: because VG_(unique_error) does all the error processing
557 // immediately, and doesn't save the error, leakExtra can be
558 // stack-allocated.
559 leak_extra.n_this_record = i+1;
560 leak_extra.n_total_records = n_lossrecords;
561 leak_extra.lossRecord = p_min;
sewardjb5f6f512005-03-10 23:59:00 +0000562 is_suppressed =
njn02977032005-05-17 04:00:11 +0000563 VG_(unique_error) ( tid, LeakErr, /*Addr*/0, /*s*/NULL,
564 /*extra*/&leak_extra,
565 /*where*/p_min->allocated_at, print_record,
sewardjb5f6f512005-03-10 23:59:00 +0000566 /*allow_GDB_attach*/False, /*count_error*/False );
567
568 if (is_suppressed) {
569 blocks_suppressed += p_min->num_blocks;
570 MAC_(bytes_suppressed) += p_min->total_bytes;
571
572 } else if (Unreached == p_min->loss_mode) {
573 blocks_leaked += p_min->num_blocks;
574 MAC_(bytes_leaked) += p_min->total_bytes;
575
576 } else if (IndirectLeak == p_min->loss_mode) {
577 blocks_indirect += p_min->num_blocks;
578 MAC_(bytes_indirect)+= p_min->total_bytes;
579
580 } else if (Interior == p_min->loss_mode) {
581 blocks_dubious += p_min->num_blocks;
582 MAC_(bytes_dubious) += p_min->total_bytes;
583
584 } else if (Proper == p_min->loss_mode) {
585 blocks_reachable += p_min->num_blocks;
586 MAC_(bytes_reachable) += p_min->total_bytes;
587
588 } else {
sewardj76754cf2005-03-14 00:14:04 +0000589 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000590 }
591 p_min->num_blocks = 0;
592 }
593}
594
595/* Compute a quick summary of the leak check. */
596static void make_summary()
597{
598 Int i;
599
600 for(i = 0; i < lc_n_shadows; i++) {
601 SizeT size = lc_shadows[i]->size;
602
603 switch(lc_markstack[i].state) {
604 case Unreached:
605 blocks_leaked++;
606 MAC_(bytes_leaked) += size;
607 break;
608
609 case Proper:
610 blocks_reachable++;
611 MAC_(bytes_reachable) += size;
612 break;
613
614 case Interior:
615 blocks_dubious++;
616 MAC_(bytes_dubious) += size;
617 break;
618
619 case IndirectLeak: /* shouldn't happen */
620 blocks_indirect++;
621 MAC_(bytes_indirect) += size;
622 break;
623 }
624 }
625}
626
njn43c799e2003-04-08 00:08:52 +0000627/* Top level entry point to leak detector. Call here, passing in
628 suitable address-validating functions (see comment at top of
njn695c16e2005-03-27 03:40:28 +0000629 scan_all_valid_memory above). All this is to avoid duplication
nethercote996901a2004-08-03 13:29:09 +0000630 of the leak-detection code for Memcheck and Addrcheck.
631 Also pass in a tool-specific function to extract the .where field
njn43c799e2003-04-08 00:08:52 +0000632 for allocated blocks, an indication of the resolution wanted for
633 distinguishing different allocation points, and whether or not
634 reachable blocks should be shown.
635*/
636void MAC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000637 ThreadId tid, LeakCheckMode mode,
sewardj05fe85e2005-04-27 22:46:36 +0000638 Bool (*is_within_valid_secondary) ( Addr ),
639 Bool (*is_valid_aligned_word) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000640)
641{
njnb8dca862005-03-14 02:42:44 +0000642 Int i;
njn43c799e2003-04-08 00:08:52 +0000643
sewardj76754cf2005-03-14 00:14:04 +0000644 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000645
njn06072ec2003-09-30 15:35:13 +0000646 /* VG_(HT_to_array) allocates storage for shadows */
647 lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
648 &lc_n_shadows );
649
650 /* Sort the array. */
651 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
652
653 /* Sanity check; assert that the blocks are now in order */
654 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000655 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000656 }
njn3e884182003-04-15 13:03:23 +0000657
658 /* Sanity check -- make sure they don't overlap */
659 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000660 tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
njn3e884182003-04-15 13:03:23 +0000661 < lc_shadows[i+1]->data );
662 }
663
664 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000665 tl_assert(lc_shadows == NULL);
sewardj71bc3cb2005-05-19 00:25:45 +0000666 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
sewardj37d06f22003-09-17 21:48:26 +0000667 VG_(message)(Vg_UserMsg,
668 "No malloc'd blocks -- no leaks are possible.");
669 }
njn43c799e2003-04-08 00:08:52 +0000670 return;
671 }
672
sewardj71bc3cb2005-05-19 00:25:45 +0000673 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
nethercote0f19bce2003-12-02 10:17:44 +0000674 VG_(message)(Vg_UserMsg,
675 "searching for pointers to %d not-freed blocks.",
676 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000677
njn3e884182003-04-15 13:03:23 +0000678 lc_min_mallocd_addr = lc_shadows[0]->data;
679 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
sewardjb5f6f512005-03-10 23:59:00 +0000680 + lc_shadows[lc_n_shadows-1]->size;
njn43c799e2003-04-08 00:08:52 +0000681
sewardjb5f6f512005-03-10 23:59:00 +0000682 lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
683 for (i = 0; i < lc_n_shadows; i++) {
684 lc_markstack[i].next = -1;
685 lc_markstack[i].state = Unreached;
686 lc_markstack[i].indirect = 0;
687 }
688 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000689
sewardj05fe85e2005-04-27 22:46:36 +0000690 lc_is_within_valid_secondary = is_within_valid_secondary;
691 lc_is_valid_aligned_word = is_valid_aligned_word;
sewardjb5f6f512005-03-10 23:59:00 +0000692
693 lc_scanned = 0;
694
695 /* Do the scan of memory, pushing any pointers onto the mark stack */
696 VG_(find_root_memory)(lc_scan_memory);
697
698 /* Push registers onto mark stack */
njn6ace3ea2005-06-17 03:06:27 +0000699 VG_(apply_to_GP_regs)(lc_markstack_push);
sewardjb5f6f512005-03-10 23:59:00 +0000700
701 /* Keep walking the heap until everything is found */
702 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000703
sewardj71bc3cb2005-05-19 00:25:45 +0000704 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
sewardjb5f6f512005-03-10 23:59:00 +0000705 VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000706
njne8b5c052003-07-22 22:03:58 +0000707 blocks_leaked = MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000708 blocks_indirect = MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000709 blocks_dubious = MAC_(bytes_dubious) = 0;
710 blocks_reachable = MAC_(bytes_reachable) = 0;
711 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000712
sewardjb5f6f512005-03-10 23:59:00 +0000713 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000714 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000715 else
716 make_summary();
njn43c799e2003-04-08 00:08:52 +0000717
sewardj71bc3cb2005-05-19 00:25:45 +0000718 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
nethercote0f19bce2003-12-02 10:17:44 +0000719 VG_(message)(Vg_UserMsg, "");
720 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
721 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
722 MAC_(bytes_leaked), blocks_leaked );
sewardjb5f6f512005-03-10 23:59:00 +0000723 if (blocks_indirect > 0)
724 VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
725 MAC_(bytes_indirect), blocks_indirect );
726 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000727 MAC_(bytes_dubious), blocks_dubious );
728 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
729 MAC_(bytes_reachable), blocks_reachable );
730 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
731 MAC_(bytes_suppressed), blocks_suppressed );
njn6a329422005-03-12 20:38:13 +0000732 if (mode == LC_Summary && blocks_leaked > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000733 VG_(message)(Vg_UserMsg,
734 "Use --leak-check=full to see details of leaked memory.");
735 else if (!MAC_(clo_show_reachable)) {
nethercote0f19bce2003-12-02 10:17:44 +0000736 VG_(message)(Vg_UserMsg,
737 "Reachable blocks (those to which a pointer was found) are not shown.");
738 VG_(message)(Vg_UserMsg,
739 "To see them, rerun with: --show-reachable=yes");
740 }
njn43c799e2003-04-08 00:08:52 +0000741 }
njn43c799e2003-04-08 00:08:52 +0000742
njn3e884182003-04-15 13:03:23 +0000743 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000744 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000745}
746
747/*--------------------------------------------------------------------*/
748/*--- end mac_leakcheck.c ---*/
749/*--------------------------------------------------------------------*/
750