blob: fd5542225408fbc842fba76be044d6e6f796e68d [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn43c799e2003-04-08 00:08:52 +000010 for detecting memory errors.
11
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjb5f6f512005-03-10 23:59:00 +000033#include <setjmp.h>
njn43c799e2003-04-08 00:08:52 +000034#include "mac_shared.h"
35
36/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000037#define VG_DEBUG_LEAKCHECK 0
38#define VG_DEBUG_CLIQUE 0
39
40#define ROUNDDN(p, a) ((Addr)(p) & ~((a)-1))
41#define ROUNDUP(p, a) ROUNDDN((p)+(a)-1, (a))
42#define PGROUNDDN(p) ROUNDDN(p, VKI_PAGE_SIZE)
43#define PGROUNDUP(p) ROUNDUP(p, VKI_PAGE_SIZE)
njn43c799e2003-04-08 00:08:52 +000044
45/*------------------------------------------------------------*/
46/*--- Low-level address-space scanning, for the leak ---*/
47/*--- detector. ---*/
48/*------------------------------------------------------------*/
49
50static
51jmp_buf memscan_jmpbuf;
52
53
54static
sewardjb5f6f512005-03-10 23:59:00 +000055void vg_scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000056{
sewardjb5f6f512005-03-10 23:59:00 +000057 if (0)
58 VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
59 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
60 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000061}
62
63/*------------------------------------------------------------*/
64/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
65/*------------------------------------------------------------*/
66
67/* A block is either
68 -- Proper-ly reached; a pointer to its start has been found
69 -- Interior-ly reached; only an interior pointer to it has been found
70 -- Unreached; so far, no pointers to any part of it have been found.
sewardjb5f6f512005-03-10 23:59:00 +000071 -- IndirectLeak; leaked, but referred to by another leaked block
njn43c799e2003-04-08 00:08:52 +000072*/
sewardjb5f6f512005-03-10 23:59:00 +000073typedef enum {
74 Unreached,
75 IndirectLeak,
76 Interior,
77 Proper
78 } Reachedness;
79
80/* An entry in the mark stack */
81typedef struct {
82 Int next:30; /* Index of next in mark stack */
83 UInt state:2; /* Reachedness */
84 SizeT indirect; /* if Unreached, how much is unreachable from here */
85} MarkStack;
njn43c799e2003-04-08 00:08:52 +000086
87/* A block record, used for generating err msgs. */
88typedef
89 struct _LossRecord {
90 struct _LossRecord* next;
91 /* Where these lost blocks were allocated. */
92 ExeContext* allocated_at;
93 /* Their reachability. */
94 Reachedness loss_mode;
95 /* Number of blocks and total # bytes involved. */
96 UInt total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +000097 UInt indirect_bytes;
njn43c799e2003-04-08 00:08:52 +000098 UInt num_blocks;
99 }
100 LossRecord;
101
102
103/* Find the i such that ptr points at or inside the block described by
104 shadows[i]. Return -1 if none found. This assumes that shadows[]
105 has been sorted on the ->data field. */
106
sewardjb5f6f512005-03-10 23:59:00 +0000107#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000108/* Used to sanity-check the fast binary-search mechanism. */
109static
njn3e884182003-04-15 13:03:23 +0000110Int find_shadow_for_OLD ( Addr ptr,
111 MAC_Chunk** shadows,
112 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000113
114{
115 Int i;
116 Addr a_lo, a_hi;
117 PROF_EVENT(70);
118 for (i = 0; i < n_shadows; i++) {
119 PROF_EVENT(71);
120 a_lo = shadows[i]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000121 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000122 if (a_lo <= ptr && ptr <= a_hi)
123 return i;
124 }
125 return -1;
126}
127#endif
128
129
130static
njn3e884182003-04-15 13:03:23 +0000131Int find_shadow_for ( Addr ptr,
132 MAC_Chunk** shadows,
133 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000134{
135 Addr a_mid_lo, a_mid_hi;
136 Int lo, mid, hi, retVal;
137 /* VG_(printf)("find shadow for %p = ", ptr); */
138 retVal = -1;
139 lo = 0;
140 hi = n_shadows-1;
141 while (True) {
njn3e884182003-04-15 13:03:23 +0000142 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000143 if (lo > hi) break; /* not found */
144
145 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000146 a_mid_lo = shadows[mid]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000147 a_mid_hi = shadows[mid]->data + shadows[mid]->size;
njn43c799e2003-04-08 00:08:52 +0000148
149 if (ptr < a_mid_lo) {
150 hi = mid-1;
151 continue;
152 }
153 if (ptr > a_mid_hi) {
154 lo = mid+1;
155 continue;
156 }
sewardj76754cf2005-03-14 00:14:04 +0000157 tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000158 retVal = mid;
159 break;
160 }
161
sewardjb5f6f512005-03-10 23:59:00 +0000162# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000163 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000164# endif
165 /* VG_(printf)("%d\n", retVal); */
166 return retVal;
167}
168
169/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000170static MAC_Chunk** lc_shadows;
171static Int lc_n_shadows;
sewardjb5f6f512005-03-10 23:59:00 +0000172static MarkStack* lc_markstack;
173static Int lc_markstack_top;
njn3e884182003-04-15 13:03:23 +0000174static Addr lc_min_mallocd_addr;
175static Addr lc_max_mallocd_addr;
sewardjb5f6f512005-03-10 23:59:00 +0000176static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000177
sewardjb5f6f512005-03-10 23:59:00 +0000178static Bool (*lc_is_valid_chunk) (UInt chunk);
179static Bool (*lc_is_valid_address)(Addr addr);
180
181static const Char *pp_lossmode(Reachedness lossmode)
njn43c799e2003-04-08 00:08:52 +0000182{
sewardjb5f6f512005-03-10 23:59:00 +0000183 const Char *loss = "?";
njn43c799e2003-04-08 00:08:52 +0000184
sewardjb5f6f512005-03-10 23:59:00 +0000185 switch(lossmode) {
186 case Unreached: loss = "definitely lost"; break;
187 case IndirectLeak: loss = "indirectly lost"; break;
188 case Interior: loss = "possibly lost"; break;
189 case Proper: loss = "still reachable"; break;
njn43c799e2003-04-08 00:08:52 +0000190 }
sewardjb5f6f512005-03-10 23:59:00 +0000191
192 return loss;
njn43c799e2003-04-08 00:08:52 +0000193}
194
195/* Used for printing leak errors, avoids exposing the LossRecord type (which
196 comes in as void*, requiring a cast. */
197void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
198{
199 LossRecord* l = (LossRecord*)vl;
sewardjb5f6f512005-03-10 23:59:00 +0000200 const Char *loss = pp_lossmode(l->loss_mode);
njn43c799e2003-04-08 00:08:52 +0000201
202 VG_(message)(Vg_UserMsg, "");
sewardjb5f6f512005-03-10 23:59:00 +0000203 if (l->indirect_bytes) {
204 VG_(message)(Vg_UserMsg,
205 "%d (%d direct, %d indirect) bytes in %d blocks are %s in loss record %d of %d",
206 l->total_bytes + l->indirect_bytes,
207 l->total_bytes, l->indirect_bytes, l->num_blocks,
208 loss, n_this_record, n_total_records);
209 } else {
210 VG_(message)(Vg_UserMsg,
211 "%d bytes in %d blocks are %s in loss record %d of %d",
212 l->total_bytes, l->num_blocks,
213 loss, n_this_record, n_total_records);
214 }
njn43c799e2003-04-08 00:08:52 +0000215 VG_(pp_ExeContext)(l->allocated_at);
216}
217
njne8b5c052003-07-22 22:03:58 +0000218Int MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000219Int MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000220Int MAC_(bytes_dubious) = 0;
221Int MAC_(bytes_reachable) = 0;
222Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000223
njn06072ec2003-09-30 15:35:13 +0000224static Int lc_compar(void* n1, void* n2)
225{
226 MAC_Chunk* mc1 = *(MAC_Chunk**)n1;
227 MAC_Chunk* mc2 = *(MAC_Chunk**)n2;
228 return (mc1->data < mc2->data ? -1 : 1);
229}
230
sewardjb5f6f512005-03-10 23:59:00 +0000231/* If ptr is pointing to a heap-allocated block which hasn't been seen
232 before, push it onto the mark stack. Clique is the index of the
233 clique leader; -1 if none. */
234static void _lc_markstack_push(Addr ptr, Int clique)
235{
236 Int sh_no;
237
238 if (!VG_(is_client_addr)(ptr)) /* quick filter */
239 return;
240
241 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
242
243 if (VG_DEBUG_LEAKCHECK)
244 VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
245
246 if (sh_no == -1)
247 return;
248
sewardj76754cf2005-03-14 00:14:04 +0000249 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
250 tl_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
sewardjb5f6f512005-03-10 23:59:00 +0000251
252 if (lc_markstack[sh_no].state == Unreached) {
253 if (0)
254 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
255 lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
256
sewardj76754cf2005-03-14 00:14:04 +0000257 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000258 lc_markstack[sh_no].next = lc_markstack_top;
259 lc_markstack_top = sh_no;
260 }
261
262 if (clique != -1) {
263 if (0)
264 VG_(printf)("mopup: %d: %p is %d\n",
265 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
266
267 /* An unmarked block - add it to the clique. Add its size to
268 the clique-leader's indirect size. If the new block was
269 itself a clique leader, it isn't any more, so add its
270 indirect to the new clique leader.
271
272 If this block *is* the clique leader, it means this is a
273 cyclic structure, so none of this applies. */
274 if (lc_markstack[sh_no].state == Unreached) {
275 lc_markstack[sh_no].state = IndirectLeak;
276
277 if (sh_no != clique) {
278 if (VG_DEBUG_CLIQUE) {
279 if (lc_markstack[sh_no].indirect)
280 VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
281 sh_no, clique,
282 lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
283 else
284 VG_(printf)(" %d joining %d adding %d\n",
285 sh_no, clique, lc_shadows[sh_no]->size);
286 }
287
288 lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
289 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
290 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
291 }
292 }
293 } else if (ptr == lc_shadows[sh_no]->data) {
294 lc_markstack[sh_no].state = Proper;
295 } else {
296 if (lc_markstack[sh_no].state == Unreached)
297 lc_markstack[sh_no].state = Interior;
298 }
299}
300
301static void lc_markstack_push(Addr ptr)
302{
303 _lc_markstack_push(ptr, -1);
304}
305
306/* Return the top of the mark stack, if any. */
307static Int lc_markstack_pop(void)
308{
309 Int ret = lc_markstack_top;
310
311 if (ret != -1) {
312 lc_markstack_top = lc_markstack[ret].next;
313 lc_markstack[ret].next = -1;
314 }
315
316 return ret;
317}
318
319/* Scan a block of memory between [start, start+len). This range may
320 be bogus, inaccessable, or otherwise strange; we deal with it.
321
322 If clique != -1, it means we're gathering leaked memory into
323 cliques, and clique is the index of the current clique leader. */
324static void _lc_scan_memory(Addr start, SizeT len, Int clique)
325{
326 Addr ptr = ROUNDUP(start, sizeof(Addr));
327 Addr end = ROUNDDN(start+len, sizeof(Addr));
328 vki_sigset_t sigmask;
329
330 if (VG_DEBUG_LEAKCHECK)
331 VG_(printf)("scan %p-%p\n", start, len);
332 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
333 VG_(set_fault_catcher)(vg_scan_all_valid_memory_catcher);
334
335 lc_scanned += end-ptr;
336
337 if (!VG_(is_client_addr)(ptr) ||
338 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
339 ptr = PGROUNDUP(ptr+1); /* first page bad */
340
341 while(ptr < end) {
342 Addr addr;
343
344 /* Skip invalid chunks */
345 if (!(*lc_is_valid_chunk)(PM_IDX(ptr))) {
346 ptr = ROUNDUP(ptr+1, SECONDARY_SIZE);
347 continue;
348 }
349
350 /* Look to see if this page seems reasonble */
351 if ((ptr % VKI_PAGE_SIZE) == 0) {
352 if (!VG_(is_client_addr)(ptr) ||
353 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
354 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
355 }
356
357 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
358 if ((*lc_is_valid_address)(ptr)) {
359 addr = *(Addr *)ptr;
360 _lc_markstack_push(addr, clique);
361 } else if (0 && VG_DEBUG_LEAKCHECK)
362 VG_(printf)("%p not valid\n", ptr);
363 ptr += sizeof(Addr);
364 } else {
365 /* We need to restore the signal mask, because we were
366 longjmped out of a signal handler. */
367 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
368
369 ptr = PGROUNDUP(ptr+1); /* bad page - skip it */
370 }
371 }
372
373 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
374 VG_(set_fault_catcher)(NULL);
375}
376
377static void lc_scan_memory(Addr start, SizeT len)
378{
379 _lc_scan_memory(start, len, -1);
380}
381
382/* Process the mark stack until empty. If mopup is true, then we're
383 actually gathering leaked blocks, so they should be marked
384 IndirectLeak. */
385static void lc_do_leakcheck(Int clique)
386{
387 Int top;
388
389 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000390 tl_assert(top >= 0 && top < lc_n_shadows);
391 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000392
393 _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
394 }
395}
396
397static Int blocks_leaked;
398static Int blocks_indirect;
399static Int blocks_dubious;
400static Int blocks_reachable;
401static Int blocks_suppressed;
402
njnb8dca862005-03-14 02:42:44 +0000403static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000404{
405 Int i;
406 Int n_lossrecords;
407 LossRecord* errlist;
408 LossRecord* p;
409 Bool is_suppressed;
410
411 /* Go through and group lost structures into cliques. For each
412 Unreached block, push it onto the mark stack, and find all the
413 blocks linked to it. These are marked IndirectLeak, and their
414 size is added to the clique leader's indirect size. If one of
415 the found blocks was itself a clique leader (from a previous
416 pass), then the cliques are merged. */
417 for (i = 0; i < lc_n_shadows; i++) {
418 if (VG_DEBUG_CLIQUE)
419 VG_(printf)("cliques: %d at %p -> %s\n",
420 i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
421 if (lc_markstack[i].state != Unreached)
422 continue;
423
sewardj76754cf2005-03-14 00:14:04 +0000424 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000425
426 if (VG_DEBUG_CLIQUE)
427 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
428
429 _lc_markstack_push(lc_shadows[i]->data, i);
430
431 lc_do_leakcheck(i);
432
sewardj76754cf2005-03-14 00:14:04 +0000433 tl_assert(lc_markstack_top == -1);
434 tl_assert(lc_markstack[i].state == IndirectLeak);
sewardjb5f6f512005-03-10 23:59:00 +0000435
436 lc_markstack[i].state = Unreached; /* Return to unreached state,
437 to indicate its a clique
438 leader */
439 }
440
441 /* Common up the lost blocks so we can print sensible error messages. */
442 n_lossrecords = 0;
443 errlist = NULL;
444 for (i = 0; i < lc_n_shadows; i++) {
445 ExeContext* where = lc_shadows[i]->where;
446
447 for (p = errlist; p != NULL; p = p->next) {
448 if (p->loss_mode == lc_markstack[i].state
449 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
450 p->allocated_at,
451 where) ) {
452 break;
453 }
454 }
455 if (p != NULL) {
456 p->num_blocks ++;
457 p->total_bytes += lc_shadows[i]->size;
458 p->indirect_bytes += lc_markstack[i].indirect;
459 } else {
460 n_lossrecords ++;
461 p = VG_(malloc)(sizeof(LossRecord));
462 p->loss_mode = lc_markstack[i].state;
463 p->allocated_at = where;
464 p->total_bytes = lc_shadows[i]->size;
465 p->indirect_bytes = lc_markstack[i].indirect;
466 p->num_blocks = 1;
467 p->next = errlist;
468 errlist = p;
469 }
470 }
471
472 /* Print out the commoned-up blocks and collect summary stats. */
473 for (i = 0; i < n_lossrecords; i++) {
474 Bool print_record;
475 LossRecord* p_min = NULL;
476 UInt n_min = 0xFFFFFFFF;
477 for (p = errlist; p != NULL; p = p->next) {
478 if (p->num_blocks > 0 && p->total_bytes < n_min) {
479 n_min = p->total_bytes + p->indirect_bytes;
480 p_min = p;
481 }
482 }
sewardj76754cf2005-03-14 00:14:04 +0000483 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000484
485 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
486 we disallow that when --leak-check=yes.
487
488 Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
489 and --show-reachable=no */
490
491 print_record = ( MAC_(clo_show_reachable) ||
492 Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
493 is_suppressed =
njnb8dca862005-03-14 02:42:44 +0000494 VG_(unique_error) ( tid, LeakErr, (UInt)i+1,
sewardjb5f6f512005-03-10 23:59:00 +0000495 (Char*)n_lossrecords, (void*) p_min,
496 p_min->allocated_at, print_record,
497 /*allow_GDB_attach*/False, /*count_error*/False );
498
499 if (is_suppressed) {
500 blocks_suppressed += p_min->num_blocks;
501 MAC_(bytes_suppressed) += p_min->total_bytes;
502
503 } else if (Unreached == p_min->loss_mode) {
504 blocks_leaked += p_min->num_blocks;
505 MAC_(bytes_leaked) += p_min->total_bytes;
506
507 } else if (IndirectLeak == p_min->loss_mode) {
508 blocks_indirect += p_min->num_blocks;
509 MAC_(bytes_indirect)+= p_min->total_bytes;
510
511 } else if (Interior == p_min->loss_mode) {
512 blocks_dubious += p_min->num_blocks;
513 MAC_(bytes_dubious) += p_min->total_bytes;
514
515 } else if (Proper == p_min->loss_mode) {
516 blocks_reachable += p_min->num_blocks;
517 MAC_(bytes_reachable) += p_min->total_bytes;
518
519 } else {
sewardj76754cf2005-03-14 00:14:04 +0000520 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000521 }
522 p_min->num_blocks = 0;
523 }
524}
525
526/* Compute a quick summary of the leak check. */
527static void make_summary()
528{
529 Int i;
530
531 for(i = 0; i < lc_n_shadows; i++) {
532 SizeT size = lc_shadows[i]->size;
533
534 switch(lc_markstack[i].state) {
535 case Unreached:
536 blocks_leaked++;
537 MAC_(bytes_leaked) += size;
538 break;
539
540 case Proper:
541 blocks_reachable++;
542 MAC_(bytes_reachable) += size;
543 break;
544
545 case Interior:
546 blocks_dubious++;
547 MAC_(bytes_dubious) += size;
548 break;
549
550 case IndirectLeak: /* shouldn't happen */
551 blocks_indirect++;
552 MAC_(bytes_indirect) += size;
553 break;
554 }
555 }
556}
557
njn43c799e2003-04-08 00:08:52 +0000558/* Top level entry point to leak detector. Call here, passing in
559 suitable address-validating functions (see comment at top of
560 vg_scan_all_valid_memory above). All this is to avoid duplication
nethercote996901a2004-08-03 13:29:09 +0000561 of the leak-detection code for Memcheck and Addrcheck.
562 Also pass in a tool-specific function to extract the .where field
njn43c799e2003-04-08 00:08:52 +0000563 for allocated blocks, an indication of the resolution wanted for
564 distinguishing different allocation points, and whether or not
565 reachable blocks should be shown.
566*/
567void MAC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000568 ThreadId tid, LeakCheckMode mode,
sewardjb5f6f512005-03-10 23:59:00 +0000569 Bool (*is_valid_64k_chunk) ( UInt ),
570 Bool (*is_valid_address) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000571)
572{
njnb8dca862005-03-14 02:42:44 +0000573 Int i;
njn43c799e2003-04-08 00:08:52 +0000574
sewardj76754cf2005-03-14 00:14:04 +0000575 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000576
njn06072ec2003-09-30 15:35:13 +0000577 /* VG_(HT_to_array) allocates storage for shadows */
578 lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
579 &lc_n_shadows );
580
581 /* Sort the array. */
582 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
583
584 /* Sanity check; assert that the blocks are now in order */
585 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000586 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000587 }
njn3e884182003-04-15 13:03:23 +0000588
589 /* Sanity check -- make sure they don't overlap */
590 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000591 tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
njn3e884182003-04-15 13:03:23 +0000592 < lc_shadows[i+1]->data );
593 }
594
595 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000596 tl_assert(lc_shadows == NULL);
sewardj37d06f22003-09-17 21:48:26 +0000597 if (VG_(clo_verbosity) >= 1) {
598 VG_(message)(Vg_UserMsg,
599 "No malloc'd blocks -- no leaks are possible.");
600 }
njn43c799e2003-04-08 00:08:52 +0000601 return;
602 }
603
nethercote0f19bce2003-12-02 10:17:44 +0000604 if (VG_(clo_verbosity) > 0)
605 VG_(message)(Vg_UserMsg,
606 "searching for pointers to %d not-freed blocks.",
607 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000608
njn3e884182003-04-15 13:03:23 +0000609 lc_min_mallocd_addr = lc_shadows[0]->data;
610 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
sewardjb5f6f512005-03-10 23:59:00 +0000611 + lc_shadows[lc_n_shadows-1]->size;
njn43c799e2003-04-08 00:08:52 +0000612
sewardjb5f6f512005-03-10 23:59:00 +0000613 lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
614 for (i = 0; i < lc_n_shadows; i++) {
615 lc_markstack[i].next = -1;
616 lc_markstack[i].state = Unreached;
617 lc_markstack[i].indirect = 0;
618 }
619 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000620
sewardjb5f6f512005-03-10 23:59:00 +0000621 lc_is_valid_chunk = is_valid_64k_chunk;
622 lc_is_valid_address = is_valid_address;
623
624 lc_scanned = 0;
625
626 /* Do the scan of memory, pushing any pointers onto the mark stack */
627 VG_(find_root_memory)(lc_scan_memory);
628
629 /* Push registers onto mark stack */
630 VG_(mark_from_registers)(lc_markstack_push);
631
632 /* Keep walking the heap until everything is found */
633 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000634
nethercote0f19bce2003-12-02 10:17:44 +0000635 if (VG_(clo_verbosity) > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000636 VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000637
njne8b5c052003-07-22 22:03:58 +0000638 blocks_leaked = MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000639 blocks_indirect = MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000640 blocks_dubious = MAC_(bytes_dubious) = 0;
641 blocks_reachable = MAC_(bytes_reachable) = 0;
642 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000643
sewardjb5f6f512005-03-10 23:59:00 +0000644 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000645 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000646 else
647 make_summary();
njn43c799e2003-04-08 00:08:52 +0000648
nethercote0f19bce2003-12-02 10:17:44 +0000649 if (VG_(clo_verbosity) > 0) {
650 VG_(message)(Vg_UserMsg, "");
651 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
652 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
653 MAC_(bytes_leaked), blocks_leaked );
sewardjb5f6f512005-03-10 23:59:00 +0000654 if (blocks_indirect > 0)
655 VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
656 MAC_(bytes_indirect), blocks_indirect );
657 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000658 MAC_(bytes_dubious), blocks_dubious );
659 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
660 MAC_(bytes_reachable), blocks_reachable );
661 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
662 MAC_(bytes_suppressed), blocks_suppressed );
njn6a329422005-03-12 20:38:13 +0000663 if (mode == LC_Summary && blocks_leaked > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000664 VG_(message)(Vg_UserMsg,
665 "Use --leak-check=full to see details of leaked memory.");
666 else if (!MAC_(clo_show_reachable)) {
nethercote0f19bce2003-12-02 10:17:44 +0000667 VG_(message)(Vg_UserMsg,
668 "Reachable blocks (those to which a pointer was found) are not shown.");
669 VG_(message)(Vg_UserMsg,
670 "To see them, rerun with: --show-reachable=yes");
671 }
njn43c799e2003-04-08 00:08:52 +0000672 }
njn43c799e2003-04-08 00:08:52 +0000673
njn3e884182003-04-15 13:03:23 +0000674 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000675 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000676}
677
678/*--------------------------------------------------------------------*/
679/*--- end mac_leakcheck.c ---*/
680/*--------------------------------------------------------------------*/
681