blob: afea0734f735933d84e9d45380432f55e507f87a [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn43c799e2003-04-08 00:08:52 +000010 for detecting memory errors.
11
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjb5f6f512005-03-10 23:59:00 +000033#include <setjmp.h>
njn43c799e2003-04-08 00:08:52 +000034#include "mac_shared.h"
35
36/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000037#define VG_DEBUG_LEAKCHECK 0
38#define VG_DEBUG_CLIQUE 0
39
40#define ROUNDDN(p, a) ((Addr)(p) & ~((a)-1))
41#define ROUNDUP(p, a) ROUNDDN((p)+(a)-1, (a))
42#define PGROUNDDN(p) ROUNDDN(p, VKI_PAGE_SIZE)
43#define PGROUNDUP(p) ROUNDUP(p, VKI_PAGE_SIZE)
njn43c799e2003-04-08 00:08:52 +000044
45/*------------------------------------------------------------*/
46/*--- Low-level address-space scanning, for the leak ---*/
47/*--- detector. ---*/
48/*------------------------------------------------------------*/
49
50static
51jmp_buf memscan_jmpbuf;
52
53
54static
njn695c16e2005-03-27 03:40:28 +000055void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000056{
sewardjb5f6f512005-03-10 23:59:00 +000057 if (0)
58 VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
59 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
60 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000061}
62
63/*------------------------------------------------------------*/
64/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
65/*------------------------------------------------------------*/
66
67/* A block is either
68 -- Proper-ly reached; a pointer to its start has been found
69 -- Interior-ly reached; only an interior pointer to it has been found
70 -- Unreached; so far, no pointers to any part of it have been found.
sewardjb5f6f512005-03-10 23:59:00 +000071 -- IndirectLeak; leaked, but referred to by another leaked block
njn43c799e2003-04-08 00:08:52 +000072*/
sewardjb5f6f512005-03-10 23:59:00 +000073typedef enum {
74 Unreached,
75 IndirectLeak,
76 Interior,
77 Proper
78 } Reachedness;
79
80/* An entry in the mark stack */
81typedef struct {
82 Int next:30; /* Index of next in mark stack */
83 UInt state:2; /* Reachedness */
84 SizeT indirect; /* if Unreached, how much is unreachable from here */
85} MarkStack;
njn43c799e2003-04-08 00:08:52 +000086
87/* A block record, used for generating err msgs. */
88typedef
89 struct _LossRecord {
90 struct _LossRecord* next;
91 /* Where these lost blocks were allocated. */
92 ExeContext* allocated_at;
93 /* Their reachability. */
94 Reachedness loss_mode;
95 /* Number of blocks and total # bytes involved. */
96 UInt total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +000097 UInt indirect_bytes;
njn43c799e2003-04-08 00:08:52 +000098 UInt num_blocks;
99 }
100 LossRecord;
101
102
103/* Find the i such that ptr points at or inside the block described by
104 shadows[i]. Return -1 if none found. This assumes that shadows[]
105 has been sorted on the ->data field. */
106
sewardjb5f6f512005-03-10 23:59:00 +0000107#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000108/* Used to sanity-check the fast binary-search mechanism. */
109static
njn3e884182003-04-15 13:03:23 +0000110Int find_shadow_for_OLD ( Addr ptr,
111 MAC_Chunk** shadows,
112 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000113
114{
115 Int i;
116 Addr a_lo, a_hi;
117 PROF_EVENT(70);
118 for (i = 0; i < n_shadows; i++) {
119 PROF_EVENT(71);
120 a_lo = shadows[i]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000121 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000122 if (a_lo <= ptr && ptr <= a_hi)
123 return i;
124 }
125 return -1;
126}
127#endif
128
129
130static
njn3e884182003-04-15 13:03:23 +0000131Int find_shadow_for ( Addr ptr,
132 MAC_Chunk** shadows,
133 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000134{
135 Addr a_mid_lo, a_mid_hi;
136 Int lo, mid, hi, retVal;
137 /* VG_(printf)("find shadow for %p = ", ptr); */
138 retVal = -1;
139 lo = 0;
140 hi = n_shadows-1;
141 while (True) {
njn3e884182003-04-15 13:03:23 +0000142 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000143 if (lo > hi) break; /* not found */
144
145 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000146 a_mid_lo = shadows[mid]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000147 a_mid_hi = shadows[mid]->data + shadows[mid]->size;
njn43c799e2003-04-08 00:08:52 +0000148
149 if (ptr < a_mid_lo) {
150 hi = mid-1;
151 continue;
152 }
153 if (ptr > a_mid_hi) {
154 lo = mid+1;
155 continue;
156 }
sewardj76754cf2005-03-14 00:14:04 +0000157 tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000158 retVal = mid;
159 break;
160 }
161
sewardjb5f6f512005-03-10 23:59:00 +0000162# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000163 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000164# endif
165 /* VG_(printf)("%d\n", retVal); */
166 return retVal;
167}
168
169/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000170static MAC_Chunk** lc_shadows;
171static Int lc_n_shadows;
sewardjb5f6f512005-03-10 23:59:00 +0000172static MarkStack* lc_markstack;
173static Int lc_markstack_top;
njn3e884182003-04-15 13:03:23 +0000174static Addr lc_min_mallocd_addr;
175static Addr lc_max_mallocd_addr;
sewardjb5f6f512005-03-10 23:59:00 +0000176static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000177
sewardj05fe85e2005-04-27 22:46:36 +0000178static Bool (*lc_is_within_valid_secondary) (Addr addr);
179static Bool (*lc_is_valid_aligned_word) (Addr addr);
sewardjb5f6f512005-03-10 23:59:00 +0000180
181static const Char *pp_lossmode(Reachedness lossmode)
njn43c799e2003-04-08 00:08:52 +0000182{
sewardjb5f6f512005-03-10 23:59:00 +0000183 const Char *loss = "?";
njn43c799e2003-04-08 00:08:52 +0000184
sewardjb5f6f512005-03-10 23:59:00 +0000185 switch(lossmode) {
186 case Unreached: loss = "definitely lost"; break;
187 case IndirectLeak: loss = "indirectly lost"; break;
188 case Interior: loss = "possibly lost"; break;
189 case Proper: loss = "still reachable"; break;
njn43c799e2003-04-08 00:08:52 +0000190 }
sewardjb5f6f512005-03-10 23:59:00 +0000191
192 return loss;
njn43c799e2003-04-08 00:08:52 +0000193}
194
195/* Used for printing leak errors, avoids exposing the LossRecord type (which
196 comes in as void*, requiring a cast. */
197void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
198{
199 LossRecord* l = (LossRecord*)vl;
sewardjb5f6f512005-03-10 23:59:00 +0000200 const Char *loss = pp_lossmode(l->loss_mode);
njn43c799e2003-04-08 00:08:52 +0000201
202 VG_(message)(Vg_UserMsg, "");
sewardjb5f6f512005-03-10 23:59:00 +0000203 if (l->indirect_bytes) {
204 VG_(message)(Vg_UserMsg,
205 "%d (%d direct, %d indirect) bytes in %d blocks are %s in loss record %d of %d",
206 l->total_bytes + l->indirect_bytes,
207 l->total_bytes, l->indirect_bytes, l->num_blocks,
208 loss, n_this_record, n_total_records);
209 } else {
210 VG_(message)(Vg_UserMsg,
211 "%d bytes in %d blocks are %s in loss record %d of %d",
212 l->total_bytes, l->num_blocks,
213 loss, n_this_record, n_total_records);
214 }
njn43c799e2003-04-08 00:08:52 +0000215 VG_(pp_ExeContext)(l->allocated_at);
216}
217
njne8b5c052003-07-22 22:03:58 +0000218Int MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000219Int MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000220Int MAC_(bytes_dubious) = 0;
221Int MAC_(bytes_reachable) = 0;
222Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000223
njn06072ec2003-09-30 15:35:13 +0000224static Int lc_compar(void* n1, void* n2)
225{
226 MAC_Chunk* mc1 = *(MAC_Chunk**)n1;
227 MAC_Chunk* mc2 = *(MAC_Chunk**)n2;
228 return (mc1->data < mc2->data ? -1 : 1);
229}
230
sewardjb5f6f512005-03-10 23:59:00 +0000231/* If ptr is pointing to a heap-allocated block which hasn't been seen
232 before, push it onto the mark stack. Clique is the index of the
233 clique leader; -1 if none. */
234static void _lc_markstack_push(Addr ptr, Int clique)
235{
236 Int sh_no;
237
238 if (!VG_(is_client_addr)(ptr)) /* quick filter */
239 return;
240
241 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
242
243 if (VG_DEBUG_LEAKCHECK)
244 VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
245
246 if (sh_no == -1)
247 return;
248
sewardj76754cf2005-03-14 00:14:04 +0000249 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
250 tl_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
sewardjb5f6f512005-03-10 23:59:00 +0000251
252 if (lc_markstack[sh_no].state == Unreached) {
253 if (0)
254 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
255 lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
256
sewardj76754cf2005-03-14 00:14:04 +0000257 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000258 lc_markstack[sh_no].next = lc_markstack_top;
259 lc_markstack_top = sh_no;
260 }
261
262 if (clique != -1) {
263 if (0)
264 VG_(printf)("mopup: %d: %p is %d\n",
265 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
266
267 /* An unmarked block - add it to the clique. Add its size to
268 the clique-leader's indirect size. If the new block was
269 itself a clique leader, it isn't any more, so add its
270 indirect to the new clique leader.
271
272 If this block *is* the clique leader, it means this is a
273 cyclic structure, so none of this applies. */
274 if (lc_markstack[sh_no].state == Unreached) {
275 lc_markstack[sh_no].state = IndirectLeak;
276
277 if (sh_no != clique) {
278 if (VG_DEBUG_CLIQUE) {
279 if (lc_markstack[sh_no].indirect)
280 VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
281 sh_no, clique,
282 lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
283 else
284 VG_(printf)(" %d joining %d adding %d\n",
285 sh_no, clique, lc_shadows[sh_no]->size);
286 }
287
288 lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
289 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
290 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
291 }
292 }
293 } else if (ptr == lc_shadows[sh_no]->data) {
294 lc_markstack[sh_no].state = Proper;
295 } else {
296 if (lc_markstack[sh_no].state == Unreached)
297 lc_markstack[sh_no].state = Interior;
298 }
299}
300
301static void lc_markstack_push(Addr ptr)
302{
303 _lc_markstack_push(ptr, -1);
304}
305
306/* Return the top of the mark stack, if any. */
307static Int lc_markstack_pop(void)
308{
309 Int ret = lc_markstack_top;
310
311 if (ret != -1) {
312 lc_markstack_top = lc_markstack[ret].next;
313 lc_markstack[ret].next = -1;
314 }
315
316 return ret;
317}
318
sewardj45d94cc2005-04-20 14:44:11 +0000319
sewardjb5f6f512005-03-10 23:59:00 +0000320/* Scan a block of memory between [start, start+len). This range may
321 be bogus, inaccessable, or otherwise strange; we deal with it.
322
323 If clique != -1, it means we're gathering leaked memory into
324 cliques, and clique is the index of the current clique leader. */
325static void _lc_scan_memory(Addr start, SizeT len, Int clique)
326{
327 Addr ptr = ROUNDUP(start, sizeof(Addr));
328 Addr end = ROUNDDN(start+len, sizeof(Addr));
329 vki_sigset_t sigmask;
330
331 if (VG_DEBUG_LEAKCHECK)
332 VG_(printf)("scan %p-%p\n", start, len);
333 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000334 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000335
336 lc_scanned += end-ptr;
337
338 if (!VG_(is_client_addr)(ptr) ||
339 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
340 ptr = PGROUNDUP(ptr+1); /* first page bad */
341
sewardj05fe85e2005-04-27 22:46:36 +0000342 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000343 Addr addr;
344
345 /* Skip invalid chunks */
sewardj05fe85e2005-04-27 22:46:36 +0000346 if (!(*lc_is_within_valid_secondary)(ptr)) {
sewardjb5f6f512005-03-10 23:59:00 +0000347 ptr = ROUNDUP(ptr+1, SECONDARY_SIZE);
348 continue;
349 }
350
351 /* Look to see if this page seems reasonble */
352 if ((ptr % VKI_PAGE_SIZE) == 0) {
353 if (!VG_(is_client_addr)(ptr) ||
354 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
355 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
356 }
357
358 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
sewardj05fe85e2005-04-27 22:46:36 +0000359 if ((*lc_is_valid_aligned_word)(ptr)) {
sewardjb5f6f512005-03-10 23:59:00 +0000360 addr = *(Addr *)ptr;
361 _lc_markstack_push(addr, clique);
362 } else if (0 && VG_DEBUG_LEAKCHECK)
363 VG_(printf)("%p not valid\n", ptr);
364 ptr += sizeof(Addr);
365 } else {
366 /* We need to restore the signal mask, because we were
367 longjmped out of a signal handler. */
368 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
369
370 ptr = PGROUNDUP(ptr+1); /* bad page - skip it */
371 }
372 }
373
374 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
375 VG_(set_fault_catcher)(NULL);
376}
377
sewardj45d94cc2005-04-20 14:44:11 +0000378
sewardjb5f6f512005-03-10 23:59:00 +0000379static void lc_scan_memory(Addr start, SizeT len)
380{
381 _lc_scan_memory(start, len, -1);
382}
383
384/* Process the mark stack until empty. If mopup is true, then we're
385 actually gathering leaked blocks, so they should be marked
386 IndirectLeak. */
387static void lc_do_leakcheck(Int clique)
388{
389 Int top;
390
391 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000392 tl_assert(top >= 0 && top < lc_n_shadows);
393 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000394
395 _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
396 }
397}
398
399static Int blocks_leaked;
400static Int blocks_indirect;
401static Int blocks_dubious;
402static Int blocks_reachable;
403static Int blocks_suppressed;
404
njnb8dca862005-03-14 02:42:44 +0000405static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000406{
407 Int i;
408 Int n_lossrecords;
409 LossRecord* errlist;
410 LossRecord* p;
411 Bool is_suppressed;
412
413 /* Go through and group lost structures into cliques. For each
414 Unreached block, push it onto the mark stack, and find all the
415 blocks linked to it. These are marked IndirectLeak, and their
416 size is added to the clique leader's indirect size. If one of
417 the found blocks was itself a clique leader (from a previous
418 pass), then the cliques are merged. */
419 for (i = 0; i < lc_n_shadows; i++) {
420 if (VG_DEBUG_CLIQUE)
421 VG_(printf)("cliques: %d at %p -> %s\n",
422 i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
423 if (lc_markstack[i].state != Unreached)
424 continue;
425
sewardj76754cf2005-03-14 00:14:04 +0000426 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000427
428 if (VG_DEBUG_CLIQUE)
429 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
430
431 _lc_markstack_push(lc_shadows[i]->data, i);
432
433 lc_do_leakcheck(i);
434
sewardj76754cf2005-03-14 00:14:04 +0000435 tl_assert(lc_markstack_top == -1);
436 tl_assert(lc_markstack[i].state == IndirectLeak);
sewardjb5f6f512005-03-10 23:59:00 +0000437
438 lc_markstack[i].state = Unreached; /* Return to unreached state,
439 to indicate its a clique
440 leader */
441 }
442
443 /* Common up the lost blocks so we can print sensible error messages. */
444 n_lossrecords = 0;
445 errlist = NULL;
446 for (i = 0; i < lc_n_shadows; i++) {
447 ExeContext* where = lc_shadows[i]->where;
448
449 for (p = errlist; p != NULL; p = p->next) {
450 if (p->loss_mode == lc_markstack[i].state
451 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
452 p->allocated_at,
453 where) ) {
454 break;
455 }
456 }
457 if (p != NULL) {
458 p->num_blocks ++;
459 p->total_bytes += lc_shadows[i]->size;
460 p->indirect_bytes += lc_markstack[i].indirect;
461 } else {
462 n_lossrecords ++;
463 p = VG_(malloc)(sizeof(LossRecord));
464 p->loss_mode = lc_markstack[i].state;
465 p->allocated_at = where;
466 p->total_bytes = lc_shadows[i]->size;
467 p->indirect_bytes = lc_markstack[i].indirect;
468 p->num_blocks = 1;
469 p->next = errlist;
470 errlist = p;
471 }
472 }
473
474 /* Print out the commoned-up blocks and collect summary stats. */
475 for (i = 0; i < n_lossrecords; i++) {
476 Bool print_record;
477 LossRecord* p_min = NULL;
478 UInt n_min = 0xFFFFFFFF;
479 for (p = errlist; p != NULL; p = p->next) {
480 if (p->num_blocks > 0 && p->total_bytes < n_min) {
481 n_min = p->total_bytes + p->indirect_bytes;
482 p_min = p;
483 }
484 }
sewardj76754cf2005-03-14 00:14:04 +0000485 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000486
487 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
488 we disallow that when --leak-check=yes.
489
490 Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
491 and --show-reachable=no */
492
493 print_record = ( MAC_(clo_show_reachable) ||
494 Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
495 is_suppressed =
njnb8dca862005-03-14 02:42:44 +0000496 VG_(unique_error) ( tid, LeakErr, (UInt)i+1,
sewardjb5f6f512005-03-10 23:59:00 +0000497 (Char*)n_lossrecords, (void*) p_min,
498 p_min->allocated_at, print_record,
499 /*allow_GDB_attach*/False, /*count_error*/False );
500
501 if (is_suppressed) {
502 blocks_suppressed += p_min->num_blocks;
503 MAC_(bytes_suppressed) += p_min->total_bytes;
504
505 } else if (Unreached == p_min->loss_mode) {
506 blocks_leaked += p_min->num_blocks;
507 MAC_(bytes_leaked) += p_min->total_bytes;
508
509 } else if (IndirectLeak == p_min->loss_mode) {
510 blocks_indirect += p_min->num_blocks;
511 MAC_(bytes_indirect)+= p_min->total_bytes;
512
513 } else if (Interior == p_min->loss_mode) {
514 blocks_dubious += p_min->num_blocks;
515 MAC_(bytes_dubious) += p_min->total_bytes;
516
517 } else if (Proper == p_min->loss_mode) {
518 blocks_reachable += p_min->num_blocks;
519 MAC_(bytes_reachable) += p_min->total_bytes;
520
521 } else {
sewardj76754cf2005-03-14 00:14:04 +0000522 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000523 }
524 p_min->num_blocks = 0;
525 }
526}
527
528/* Compute a quick summary of the leak check. */
529static void make_summary()
530{
531 Int i;
532
533 for(i = 0; i < lc_n_shadows; i++) {
534 SizeT size = lc_shadows[i]->size;
535
536 switch(lc_markstack[i].state) {
537 case Unreached:
538 blocks_leaked++;
539 MAC_(bytes_leaked) += size;
540 break;
541
542 case Proper:
543 blocks_reachable++;
544 MAC_(bytes_reachable) += size;
545 break;
546
547 case Interior:
548 blocks_dubious++;
549 MAC_(bytes_dubious) += size;
550 break;
551
552 case IndirectLeak: /* shouldn't happen */
553 blocks_indirect++;
554 MAC_(bytes_indirect) += size;
555 break;
556 }
557 }
558}
559
njn43c799e2003-04-08 00:08:52 +0000560/* Top level entry point to leak detector. Call here, passing in
561 suitable address-validating functions (see comment at top of
njn695c16e2005-03-27 03:40:28 +0000562 scan_all_valid_memory above). All this is to avoid duplication
nethercote996901a2004-08-03 13:29:09 +0000563 of the leak-detection code for Memcheck and Addrcheck.
564 Also pass in a tool-specific function to extract the .where field
njn43c799e2003-04-08 00:08:52 +0000565 for allocated blocks, an indication of the resolution wanted for
566 distinguishing different allocation points, and whether or not
567 reachable blocks should be shown.
568*/
569void MAC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000570 ThreadId tid, LeakCheckMode mode,
sewardj05fe85e2005-04-27 22:46:36 +0000571 Bool (*is_within_valid_secondary) ( Addr ),
572 Bool (*is_valid_aligned_word) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000573)
574{
njnb8dca862005-03-14 02:42:44 +0000575 Int i;
njn43c799e2003-04-08 00:08:52 +0000576
sewardj76754cf2005-03-14 00:14:04 +0000577 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000578
njn06072ec2003-09-30 15:35:13 +0000579 /* VG_(HT_to_array) allocates storage for shadows */
580 lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
581 &lc_n_shadows );
582
583 /* Sort the array. */
584 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
585
586 /* Sanity check; assert that the blocks are now in order */
587 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000588 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000589 }
njn3e884182003-04-15 13:03:23 +0000590
591 /* Sanity check -- make sure they don't overlap */
592 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000593 tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
njn3e884182003-04-15 13:03:23 +0000594 < lc_shadows[i+1]->data );
595 }
596
597 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000598 tl_assert(lc_shadows == NULL);
sewardj37d06f22003-09-17 21:48:26 +0000599 if (VG_(clo_verbosity) >= 1) {
600 VG_(message)(Vg_UserMsg,
601 "No malloc'd blocks -- no leaks are possible.");
602 }
njn43c799e2003-04-08 00:08:52 +0000603 return;
604 }
605
nethercote0f19bce2003-12-02 10:17:44 +0000606 if (VG_(clo_verbosity) > 0)
607 VG_(message)(Vg_UserMsg,
608 "searching for pointers to %d not-freed blocks.",
609 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000610
njn3e884182003-04-15 13:03:23 +0000611 lc_min_mallocd_addr = lc_shadows[0]->data;
612 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
sewardjb5f6f512005-03-10 23:59:00 +0000613 + lc_shadows[lc_n_shadows-1]->size;
njn43c799e2003-04-08 00:08:52 +0000614
sewardjb5f6f512005-03-10 23:59:00 +0000615 lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
616 for (i = 0; i < lc_n_shadows; i++) {
617 lc_markstack[i].next = -1;
618 lc_markstack[i].state = Unreached;
619 lc_markstack[i].indirect = 0;
620 }
621 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000622
sewardj05fe85e2005-04-27 22:46:36 +0000623 lc_is_within_valid_secondary = is_within_valid_secondary;
624 lc_is_valid_aligned_word = is_valid_aligned_word;
sewardjb5f6f512005-03-10 23:59:00 +0000625
626 lc_scanned = 0;
627
628 /* Do the scan of memory, pushing any pointers onto the mark stack */
629 VG_(find_root_memory)(lc_scan_memory);
630
631 /* Push registers onto mark stack */
632 VG_(mark_from_registers)(lc_markstack_push);
633
634 /* Keep walking the heap until everything is found */
635 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000636
nethercote0f19bce2003-12-02 10:17:44 +0000637 if (VG_(clo_verbosity) > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000638 VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000639
njne8b5c052003-07-22 22:03:58 +0000640 blocks_leaked = MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000641 blocks_indirect = MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000642 blocks_dubious = MAC_(bytes_dubious) = 0;
643 blocks_reachable = MAC_(bytes_reachable) = 0;
644 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000645
sewardjb5f6f512005-03-10 23:59:00 +0000646 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000647 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000648 else
649 make_summary();
njn43c799e2003-04-08 00:08:52 +0000650
nethercote0f19bce2003-12-02 10:17:44 +0000651 if (VG_(clo_verbosity) > 0) {
652 VG_(message)(Vg_UserMsg, "");
653 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
654 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
655 MAC_(bytes_leaked), blocks_leaked );
sewardjb5f6f512005-03-10 23:59:00 +0000656 if (blocks_indirect > 0)
657 VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
658 MAC_(bytes_indirect), blocks_indirect );
659 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000660 MAC_(bytes_dubious), blocks_dubious );
661 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
662 MAC_(bytes_reachable), blocks_reachable );
663 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
664 MAC_(bytes_suppressed), blocks_suppressed );
njn6a329422005-03-12 20:38:13 +0000665 if (mode == LC_Summary && blocks_leaked > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000666 VG_(message)(Vg_UserMsg,
667 "Use --leak-check=full to see details of leaked memory.");
668 else if (!MAC_(clo_show_reachable)) {
nethercote0f19bce2003-12-02 10:17:44 +0000669 VG_(message)(Vg_UserMsg,
670 "Reachable blocks (those to which a pointer was found) are not shown.");
671 VG_(message)(Vg_UserMsg,
672 "To see them, rerun with: --show-reachable=yes");
673 }
njn43c799e2003-04-08 00:08:52 +0000674 }
njn43c799e2003-04-08 00:08:52 +0000675
njn3e884182003-04-15 13:03:23 +0000676 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000677 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000678}
679
680/*--------------------------------------------------------------------*/
681/*--- end mac_leakcheck.c ---*/
682/*--------------------------------------------------------------------*/
683