blob: bee38b25fb4fe715f86f42b1563de588d401b671 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
3/*--- The leak checker, shared between Memcheck and Addrcheck. ---*/
4/*--- mac_leakcheck.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn43c799e2003-04-08 00:08:52 +000010 for detecting memory errors.
11
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn43c799e2003-04-08 00:08:52 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjb5f6f512005-03-10 23:59:00 +000033#include <setjmp.h>
njn43c799e2003-04-08 00:08:52 +000034#include "mac_shared.h"
35
36/* Define to debug the memory-leak-detector. */
sewardjb5f6f512005-03-10 23:59:00 +000037#define VG_DEBUG_LEAKCHECK 0
38#define VG_DEBUG_CLIQUE 0
39
40#define ROUNDDN(p, a) ((Addr)(p) & ~((a)-1))
41#define ROUNDUP(p, a) ROUNDDN((p)+(a)-1, (a))
42#define PGROUNDDN(p) ROUNDDN(p, VKI_PAGE_SIZE)
43#define PGROUNDUP(p) ROUNDUP(p, VKI_PAGE_SIZE)
njn43c799e2003-04-08 00:08:52 +000044
45/*------------------------------------------------------------*/
46/*--- Low-level address-space scanning, for the leak ---*/
47/*--- detector. ---*/
48/*------------------------------------------------------------*/
49
50static
51jmp_buf memscan_jmpbuf;
52
53
54static
njn695c16e2005-03-27 03:40:28 +000055void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
njn43c799e2003-04-08 00:08:52 +000056{
sewardjb5f6f512005-03-10 23:59:00 +000057 if (0)
58 VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
59 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
60 __builtin_longjmp(memscan_jmpbuf, 1);
njn43c799e2003-04-08 00:08:52 +000061}
62
63/*------------------------------------------------------------*/
64/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
65/*------------------------------------------------------------*/
66
67/* A block is either
68 -- Proper-ly reached; a pointer to its start has been found
69 -- Interior-ly reached; only an interior pointer to it has been found
70 -- Unreached; so far, no pointers to any part of it have been found.
sewardjb5f6f512005-03-10 23:59:00 +000071 -- IndirectLeak; leaked, but referred to by another leaked block
njn43c799e2003-04-08 00:08:52 +000072*/
sewardjb5f6f512005-03-10 23:59:00 +000073typedef enum {
74 Unreached,
75 IndirectLeak,
76 Interior,
77 Proper
78 } Reachedness;
79
80/* An entry in the mark stack */
81typedef struct {
82 Int next:30; /* Index of next in mark stack */
83 UInt state:2; /* Reachedness */
84 SizeT indirect; /* if Unreached, how much is unreachable from here */
85} MarkStack;
njn43c799e2003-04-08 00:08:52 +000086
87/* A block record, used for generating err msgs. */
88typedef
89 struct _LossRecord {
90 struct _LossRecord* next;
91 /* Where these lost blocks were allocated. */
92 ExeContext* allocated_at;
93 /* Their reachability. */
94 Reachedness loss_mode;
95 /* Number of blocks and total # bytes involved. */
96 UInt total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +000097 UInt indirect_bytes;
njn43c799e2003-04-08 00:08:52 +000098 UInt num_blocks;
99 }
100 LossRecord;
101
102
103/* Find the i such that ptr points at or inside the block described by
104 shadows[i]. Return -1 if none found. This assumes that shadows[]
105 has been sorted on the ->data field. */
106
sewardjb5f6f512005-03-10 23:59:00 +0000107#if VG_DEBUG_LEAKCHECK
njn43c799e2003-04-08 00:08:52 +0000108/* Used to sanity-check the fast binary-search mechanism. */
109static
njn3e884182003-04-15 13:03:23 +0000110Int find_shadow_for_OLD ( Addr ptr,
111 MAC_Chunk** shadows,
112 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000113
114{
115 Int i;
116 Addr a_lo, a_hi;
117 PROF_EVENT(70);
118 for (i = 0; i < n_shadows; i++) {
119 PROF_EVENT(71);
120 a_lo = shadows[i]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000121 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
njn43c799e2003-04-08 00:08:52 +0000122 if (a_lo <= ptr && ptr <= a_hi)
123 return i;
124 }
125 return -1;
126}
127#endif
128
129
130static
njn3e884182003-04-15 13:03:23 +0000131Int find_shadow_for ( Addr ptr,
132 MAC_Chunk** shadows,
133 Int n_shadows )
njn43c799e2003-04-08 00:08:52 +0000134{
135 Addr a_mid_lo, a_mid_hi;
136 Int lo, mid, hi, retVal;
137 /* VG_(printf)("find shadow for %p = ", ptr); */
138 retVal = -1;
139 lo = 0;
140 hi = n_shadows-1;
141 while (True) {
njn3e884182003-04-15 13:03:23 +0000142 /* invariant: current unsearched space is from lo to hi, inclusive. */
njn43c799e2003-04-08 00:08:52 +0000143 if (lo > hi) break; /* not found */
144
145 mid = (lo + hi) / 2;
njn3e884182003-04-15 13:03:23 +0000146 a_mid_lo = shadows[mid]->data;
sewardjb5f6f512005-03-10 23:59:00 +0000147 a_mid_hi = shadows[mid]->data + shadows[mid]->size;
njn43c799e2003-04-08 00:08:52 +0000148
149 if (ptr < a_mid_lo) {
150 hi = mid-1;
151 continue;
152 }
153 if (ptr > a_mid_hi) {
154 lo = mid+1;
155 continue;
156 }
sewardj76754cf2005-03-14 00:14:04 +0000157 tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn43c799e2003-04-08 00:08:52 +0000158 retVal = mid;
159 break;
160 }
161
sewardjb5f6f512005-03-10 23:59:00 +0000162# if VG_DEBUG_LEAKCHECK
sewardj76754cf2005-03-14 00:14:04 +0000163 tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn43c799e2003-04-08 00:08:52 +0000164# endif
165 /* VG_(printf)("%d\n", retVal); */
166 return retVal;
167}
168
169/* Globals, for the following callback used by VG_(detect_memory_leaks). */
njn3e884182003-04-15 13:03:23 +0000170static MAC_Chunk** lc_shadows;
171static Int lc_n_shadows;
sewardjb5f6f512005-03-10 23:59:00 +0000172static MarkStack* lc_markstack;
173static Int lc_markstack_top;
njn3e884182003-04-15 13:03:23 +0000174static Addr lc_min_mallocd_addr;
175static Addr lc_max_mallocd_addr;
sewardjb5f6f512005-03-10 23:59:00 +0000176static SizeT lc_scanned;
njn43c799e2003-04-08 00:08:52 +0000177
sewardjb5f6f512005-03-10 23:59:00 +0000178static Bool (*lc_is_valid_chunk) (UInt chunk);
179static Bool (*lc_is_valid_address)(Addr addr);
180
181static const Char *pp_lossmode(Reachedness lossmode)
njn43c799e2003-04-08 00:08:52 +0000182{
sewardjb5f6f512005-03-10 23:59:00 +0000183 const Char *loss = "?";
njn43c799e2003-04-08 00:08:52 +0000184
sewardjb5f6f512005-03-10 23:59:00 +0000185 switch(lossmode) {
186 case Unreached: loss = "definitely lost"; break;
187 case IndirectLeak: loss = "indirectly lost"; break;
188 case Interior: loss = "possibly lost"; break;
189 case Proper: loss = "still reachable"; break;
njn43c799e2003-04-08 00:08:52 +0000190 }
sewardjb5f6f512005-03-10 23:59:00 +0000191
192 return loss;
njn43c799e2003-04-08 00:08:52 +0000193}
194
195/* Used for printing leak errors, avoids exposing the LossRecord type (which
196 comes in as void*, requiring a cast. */
197void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
198{
199 LossRecord* l = (LossRecord*)vl;
sewardjb5f6f512005-03-10 23:59:00 +0000200 const Char *loss = pp_lossmode(l->loss_mode);
njn43c799e2003-04-08 00:08:52 +0000201
202 VG_(message)(Vg_UserMsg, "");
sewardjb5f6f512005-03-10 23:59:00 +0000203 if (l->indirect_bytes) {
204 VG_(message)(Vg_UserMsg,
205 "%d (%d direct, %d indirect) bytes in %d blocks are %s in loss record %d of %d",
206 l->total_bytes + l->indirect_bytes,
207 l->total_bytes, l->indirect_bytes, l->num_blocks,
208 loss, n_this_record, n_total_records);
209 } else {
210 VG_(message)(Vg_UserMsg,
211 "%d bytes in %d blocks are %s in loss record %d of %d",
212 l->total_bytes, l->num_blocks,
213 loss, n_this_record, n_total_records);
214 }
njn43c799e2003-04-08 00:08:52 +0000215 VG_(pp_ExeContext)(l->allocated_at);
216}
217
njne8b5c052003-07-22 22:03:58 +0000218Int MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000219Int MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000220Int MAC_(bytes_dubious) = 0;
221Int MAC_(bytes_reachable) = 0;
222Int MAC_(bytes_suppressed) = 0;
njn47363ab2003-04-21 13:24:40 +0000223
njn06072ec2003-09-30 15:35:13 +0000224static Int lc_compar(void* n1, void* n2)
225{
226 MAC_Chunk* mc1 = *(MAC_Chunk**)n1;
227 MAC_Chunk* mc2 = *(MAC_Chunk**)n2;
228 return (mc1->data < mc2->data ? -1 : 1);
229}
230
sewardjb5f6f512005-03-10 23:59:00 +0000231/* If ptr is pointing to a heap-allocated block which hasn't been seen
232 before, push it onto the mark stack. Clique is the index of the
233 clique leader; -1 if none. */
234static void _lc_markstack_push(Addr ptr, Int clique)
235{
236 Int sh_no;
237
238 if (!VG_(is_client_addr)(ptr)) /* quick filter */
239 return;
240
241 sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
242
243 if (VG_DEBUG_LEAKCHECK)
244 VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
245
246 if (sh_no == -1)
247 return;
248
sewardj76754cf2005-03-14 00:14:04 +0000249 tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
250 tl_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
sewardjb5f6f512005-03-10 23:59:00 +0000251
252 if (lc_markstack[sh_no].state == Unreached) {
253 if (0)
254 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
255 lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
256
sewardj76754cf2005-03-14 00:14:04 +0000257 tl_assert(lc_markstack[sh_no].next == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000258 lc_markstack[sh_no].next = lc_markstack_top;
259 lc_markstack_top = sh_no;
260 }
261
262 if (clique != -1) {
263 if (0)
264 VG_(printf)("mopup: %d: %p is %d\n",
265 sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
266
267 /* An unmarked block - add it to the clique. Add its size to
268 the clique-leader's indirect size. If the new block was
269 itself a clique leader, it isn't any more, so add its
270 indirect to the new clique leader.
271
272 If this block *is* the clique leader, it means this is a
273 cyclic structure, so none of this applies. */
274 if (lc_markstack[sh_no].state == Unreached) {
275 lc_markstack[sh_no].state = IndirectLeak;
276
277 if (sh_no != clique) {
278 if (VG_DEBUG_CLIQUE) {
279 if (lc_markstack[sh_no].indirect)
280 VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
281 sh_no, clique,
282 lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
283 else
284 VG_(printf)(" %d joining %d adding %d\n",
285 sh_no, clique, lc_shadows[sh_no]->size);
286 }
287
288 lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
289 lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
290 lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
291 }
292 }
293 } else if (ptr == lc_shadows[sh_no]->data) {
294 lc_markstack[sh_no].state = Proper;
295 } else {
296 if (lc_markstack[sh_no].state == Unreached)
297 lc_markstack[sh_no].state = Interior;
298 }
299}
300
301static void lc_markstack_push(Addr ptr)
302{
303 _lc_markstack_push(ptr, -1);
304}
305
306/* Return the top of the mark stack, if any. */
307static Int lc_markstack_pop(void)
308{
309 Int ret = lc_markstack_top;
310
311 if (ret != -1) {
312 lc_markstack_top = lc_markstack[ret].next;
313 lc_markstack[ret].next = -1;
314 }
315
316 return ret;
317}
318
sewardj45d94cc2005-04-20 14:44:11 +0000319
sewardjb5f6f512005-03-10 23:59:00 +0000320/* Scan a block of memory between [start, start+len). This range may
321 be bogus, inaccessable, or otherwise strange; we deal with it.
322
323 If clique != -1, it means we're gathering leaked memory into
324 cliques, and clique is the index of the current clique leader. */
325static void _lc_scan_memory(Addr start, SizeT len, Int clique)
326{
sewardj45d94cc2005-04-20 14:44:11 +0000327#if 0
sewardjb5f6f512005-03-10 23:59:00 +0000328 Addr ptr = ROUNDUP(start, sizeof(Addr));
329 Addr end = ROUNDDN(start+len, sizeof(Addr));
330 vki_sigset_t sigmask;
331
332 if (VG_DEBUG_LEAKCHECK)
333 VG_(printf)("scan %p-%p\n", start, len);
334 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000335 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000336
337 lc_scanned += end-ptr;
338
339 if (!VG_(is_client_addr)(ptr) ||
340 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
341 ptr = PGROUNDUP(ptr+1); /* first page bad */
342
343 while(ptr < end) {
344 Addr addr;
345
346 /* Skip invalid chunks */
347 if (!(*lc_is_valid_chunk)(PM_IDX(ptr))) {
348 ptr = ROUNDUP(ptr+1, SECONDARY_SIZE);
349 continue;
350 }
351
352 /* Look to see if this page seems reasonble */
353 if ((ptr % VKI_PAGE_SIZE) == 0) {
354 if (!VG_(is_client_addr)(ptr) ||
355 !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
356 ptr += VKI_PAGE_SIZE; /* bad page - skip it */
357 }
358
359 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
360 if ((*lc_is_valid_address)(ptr)) {
361 addr = *(Addr *)ptr;
362 _lc_markstack_push(addr, clique);
363 } else if (0 && VG_DEBUG_LEAKCHECK)
364 VG_(printf)("%p not valid\n", ptr);
365 ptr += sizeof(Addr);
366 } else {
367 /* We need to restore the signal mask, because we were
368 longjmped out of a signal handler. */
369 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
370
371 ptr = PGROUNDUP(ptr+1); /* bad page - skip it */
372 }
373 }
374
375 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
376 VG_(set_fault_catcher)(NULL);
sewardj45d94cc2005-04-20 14:44:11 +0000377#endif
sewardjb5f6f512005-03-10 23:59:00 +0000378}
379
sewardj45d94cc2005-04-20 14:44:11 +0000380
sewardjb5f6f512005-03-10 23:59:00 +0000381static void lc_scan_memory(Addr start, SizeT len)
382{
383 _lc_scan_memory(start, len, -1);
384}
385
386/* Process the mark stack until empty. If mopup is true, then we're
387 actually gathering leaked blocks, so they should be marked
388 IndirectLeak. */
389static void lc_do_leakcheck(Int clique)
390{
391 Int top;
392
393 while((top = lc_markstack_pop()) != -1) {
sewardj76754cf2005-03-14 00:14:04 +0000394 tl_assert(top >= 0 && top < lc_n_shadows);
395 tl_assert(lc_markstack[top].state != Unreached);
sewardjb5f6f512005-03-10 23:59:00 +0000396
397 _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
398 }
399}
400
401static Int blocks_leaked;
402static Int blocks_indirect;
403static Int blocks_dubious;
404static Int blocks_reachable;
405static Int blocks_suppressed;
406
njnb8dca862005-03-14 02:42:44 +0000407static void full_report(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000408{
409 Int i;
410 Int n_lossrecords;
411 LossRecord* errlist;
412 LossRecord* p;
413 Bool is_suppressed;
414
415 /* Go through and group lost structures into cliques. For each
416 Unreached block, push it onto the mark stack, and find all the
417 blocks linked to it. These are marked IndirectLeak, and their
418 size is added to the clique leader's indirect size. If one of
419 the found blocks was itself a clique leader (from a previous
420 pass), then the cliques are merged. */
421 for (i = 0; i < lc_n_shadows; i++) {
422 if (VG_DEBUG_CLIQUE)
423 VG_(printf)("cliques: %d at %p -> %s\n",
424 i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
425 if (lc_markstack[i].state != Unreached)
426 continue;
427
sewardj76754cf2005-03-14 00:14:04 +0000428 tl_assert(lc_markstack_top == -1);
sewardjb5f6f512005-03-10 23:59:00 +0000429
430 if (VG_DEBUG_CLIQUE)
431 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
432
433 _lc_markstack_push(lc_shadows[i]->data, i);
434
435 lc_do_leakcheck(i);
436
sewardj76754cf2005-03-14 00:14:04 +0000437 tl_assert(lc_markstack_top == -1);
438 tl_assert(lc_markstack[i].state == IndirectLeak);
sewardjb5f6f512005-03-10 23:59:00 +0000439
440 lc_markstack[i].state = Unreached; /* Return to unreached state,
441 to indicate its a clique
442 leader */
443 }
444
445 /* Common up the lost blocks so we can print sensible error messages. */
446 n_lossrecords = 0;
447 errlist = NULL;
448 for (i = 0; i < lc_n_shadows; i++) {
449 ExeContext* where = lc_shadows[i]->where;
450
451 for (p = errlist; p != NULL; p = p->next) {
452 if (p->loss_mode == lc_markstack[i].state
453 && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
454 p->allocated_at,
455 where) ) {
456 break;
457 }
458 }
459 if (p != NULL) {
460 p->num_blocks ++;
461 p->total_bytes += lc_shadows[i]->size;
462 p->indirect_bytes += lc_markstack[i].indirect;
463 } else {
464 n_lossrecords ++;
465 p = VG_(malloc)(sizeof(LossRecord));
466 p->loss_mode = lc_markstack[i].state;
467 p->allocated_at = where;
468 p->total_bytes = lc_shadows[i]->size;
469 p->indirect_bytes = lc_markstack[i].indirect;
470 p->num_blocks = 1;
471 p->next = errlist;
472 errlist = p;
473 }
474 }
475
476 /* Print out the commoned-up blocks and collect summary stats. */
477 for (i = 0; i < n_lossrecords; i++) {
478 Bool print_record;
479 LossRecord* p_min = NULL;
480 UInt n_min = 0xFFFFFFFF;
481 for (p = errlist; p != NULL; p = p->next) {
482 if (p->num_blocks > 0 && p->total_bytes < n_min) {
483 n_min = p->total_bytes + p->indirect_bytes;
484 p_min = p;
485 }
486 }
sewardj76754cf2005-03-14 00:14:04 +0000487 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000488
489 /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
490 we disallow that when --leak-check=yes.
491
492 Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
493 and --show-reachable=no */
494
495 print_record = ( MAC_(clo_show_reachable) ||
496 Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
497 is_suppressed =
njnb8dca862005-03-14 02:42:44 +0000498 VG_(unique_error) ( tid, LeakErr, (UInt)i+1,
sewardjb5f6f512005-03-10 23:59:00 +0000499 (Char*)n_lossrecords, (void*) p_min,
500 p_min->allocated_at, print_record,
501 /*allow_GDB_attach*/False, /*count_error*/False );
502
503 if (is_suppressed) {
504 blocks_suppressed += p_min->num_blocks;
505 MAC_(bytes_suppressed) += p_min->total_bytes;
506
507 } else if (Unreached == p_min->loss_mode) {
508 blocks_leaked += p_min->num_blocks;
509 MAC_(bytes_leaked) += p_min->total_bytes;
510
511 } else if (IndirectLeak == p_min->loss_mode) {
512 blocks_indirect += p_min->num_blocks;
513 MAC_(bytes_indirect)+= p_min->total_bytes;
514
515 } else if (Interior == p_min->loss_mode) {
516 blocks_dubious += p_min->num_blocks;
517 MAC_(bytes_dubious) += p_min->total_bytes;
518
519 } else if (Proper == p_min->loss_mode) {
520 blocks_reachable += p_min->num_blocks;
521 MAC_(bytes_reachable) += p_min->total_bytes;
522
523 } else {
sewardj76754cf2005-03-14 00:14:04 +0000524 VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000525 }
526 p_min->num_blocks = 0;
527 }
528}
529
530/* Compute a quick summary of the leak check. */
531static void make_summary()
532{
533 Int i;
534
535 for(i = 0; i < lc_n_shadows; i++) {
536 SizeT size = lc_shadows[i]->size;
537
538 switch(lc_markstack[i].state) {
539 case Unreached:
540 blocks_leaked++;
541 MAC_(bytes_leaked) += size;
542 break;
543
544 case Proper:
545 blocks_reachable++;
546 MAC_(bytes_reachable) += size;
547 break;
548
549 case Interior:
550 blocks_dubious++;
551 MAC_(bytes_dubious) += size;
552 break;
553
554 case IndirectLeak: /* shouldn't happen */
555 blocks_indirect++;
556 MAC_(bytes_indirect) += size;
557 break;
558 }
559 }
560}
561
njn43c799e2003-04-08 00:08:52 +0000562/* Top level entry point to leak detector. Call here, passing in
563 suitable address-validating functions (see comment at top of
njn695c16e2005-03-27 03:40:28 +0000564 scan_all_valid_memory above). All this is to avoid duplication
nethercote996901a2004-08-03 13:29:09 +0000565 of the leak-detection code for Memcheck and Addrcheck.
566 Also pass in a tool-specific function to extract the .where field
njn43c799e2003-04-08 00:08:52 +0000567 for allocated blocks, an indication of the resolution wanted for
568 distinguishing different allocation points, and whether or not
569 reachable blocks should be shown.
570*/
571void MAC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +0000572 ThreadId tid, LeakCheckMode mode,
sewardjb5f6f512005-03-10 23:59:00 +0000573 Bool (*is_valid_64k_chunk) ( UInt ),
574 Bool (*is_valid_address) ( Addr )
njn43c799e2003-04-08 00:08:52 +0000575)
576{
njnb8dca862005-03-14 02:42:44 +0000577 Int i;
njn43c799e2003-04-08 00:08:52 +0000578
sewardj76754cf2005-03-14 00:14:04 +0000579 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000580
njn06072ec2003-09-30 15:35:13 +0000581 /* VG_(HT_to_array) allocates storage for shadows */
582 lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
583 &lc_n_shadows );
584
585 /* Sort the array. */
586 VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
587
588 /* Sanity check; assert that the blocks are now in order */
589 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000590 tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
njn06072ec2003-09-30 15:35:13 +0000591 }
njn3e884182003-04-15 13:03:23 +0000592
593 /* Sanity check -- make sure they don't overlap */
594 for (i = 0; i < lc_n_shadows-1; i++) {
sewardj76754cf2005-03-14 00:14:04 +0000595 tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
njn3e884182003-04-15 13:03:23 +0000596 < lc_shadows[i+1]->data );
597 }
598
599 if (lc_n_shadows == 0) {
sewardj76754cf2005-03-14 00:14:04 +0000600 tl_assert(lc_shadows == NULL);
sewardj37d06f22003-09-17 21:48:26 +0000601 if (VG_(clo_verbosity) >= 1) {
602 VG_(message)(Vg_UserMsg,
603 "No malloc'd blocks -- no leaks are possible.");
604 }
njn43c799e2003-04-08 00:08:52 +0000605 return;
606 }
607
nethercote0f19bce2003-12-02 10:17:44 +0000608 if (VG_(clo_verbosity) > 0)
609 VG_(message)(Vg_UserMsg,
610 "searching for pointers to %d not-freed blocks.",
611 lc_n_shadows );
njn43c799e2003-04-08 00:08:52 +0000612
njn3e884182003-04-15 13:03:23 +0000613 lc_min_mallocd_addr = lc_shadows[0]->data;
614 lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
sewardjb5f6f512005-03-10 23:59:00 +0000615 + lc_shadows[lc_n_shadows-1]->size;
njn43c799e2003-04-08 00:08:52 +0000616
sewardjb5f6f512005-03-10 23:59:00 +0000617 lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
618 for (i = 0; i < lc_n_shadows; i++) {
619 lc_markstack[i].next = -1;
620 lc_markstack[i].state = Unreached;
621 lc_markstack[i].indirect = 0;
622 }
623 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000624
sewardjb5f6f512005-03-10 23:59:00 +0000625 lc_is_valid_chunk = is_valid_64k_chunk;
626 lc_is_valid_address = is_valid_address;
627
628 lc_scanned = 0;
629
630 /* Do the scan of memory, pushing any pointers onto the mark stack */
631 VG_(find_root_memory)(lc_scan_memory);
632
633 /* Push registers onto mark stack */
634 VG_(mark_from_registers)(lc_markstack_push);
635
636 /* Keep walking the heap until everything is found */
637 lc_do_leakcheck(-1);
njn43c799e2003-04-08 00:08:52 +0000638
nethercote0f19bce2003-12-02 10:17:44 +0000639 if (VG_(clo_verbosity) > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000640 VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
njn43c799e2003-04-08 00:08:52 +0000641
njne8b5c052003-07-22 22:03:58 +0000642 blocks_leaked = MAC_(bytes_leaked) = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000643 blocks_indirect = MAC_(bytes_indirect) = 0;
njne8b5c052003-07-22 22:03:58 +0000644 blocks_dubious = MAC_(bytes_dubious) = 0;
645 blocks_reachable = MAC_(bytes_reachable) = 0;
646 blocks_suppressed = MAC_(bytes_suppressed) = 0;
njn43c799e2003-04-08 00:08:52 +0000647
sewardjb5f6f512005-03-10 23:59:00 +0000648 if (mode == LC_Full)
njnb8dca862005-03-14 02:42:44 +0000649 full_report(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000650 else
651 make_summary();
njn43c799e2003-04-08 00:08:52 +0000652
nethercote0f19bce2003-12-02 10:17:44 +0000653 if (VG_(clo_verbosity) > 0) {
654 VG_(message)(Vg_UserMsg, "");
655 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
656 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
657 MAC_(bytes_leaked), blocks_leaked );
sewardjb5f6f512005-03-10 23:59:00 +0000658 if (blocks_indirect > 0)
659 VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
660 MAC_(bytes_indirect), blocks_indirect );
661 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
nethercote0f19bce2003-12-02 10:17:44 +0000662 MAC_(bytes_dubious), blocks_dubious );
663 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
664 MAC_(bytes_reachable), blocks_reachable );
665 VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
666 MAC_(bytes_suppressed), blocks_suppressed );
njn6a329422005-03-12 20:38:13 +0000667 if (mode == LC_Summary && blocks_leaked > 0)
sewardjb5f6f512005-03-10 23:59:00 +0000668 VG_(message)(Vg_UserMsg,
669 "Use --leak-check=full to see details of leaked memory.");
670 else if (!MAC_(clo_show_reachable)) {
nethercote0f19bce2003-12-02 10:17:44 +0000671 VG_(message)(Vg_UserMsg,
672 "Reachable blocks (those to which a pointer was found) are not shown.");
673 VG_(message)(Vg_UserMsg,
674 "To see them, rerun with: --show-reachable=yes");
675 }
njn43c799e2003-04-08 00:08:52 +0000676 }
njn43c799e2003-04-08 00:08:52 +0000677
njn3e884182003-04-15 13:03:23 +0000678 VG_(free) ( lc_shadows );
sewardjb5f6f512005-03-10 23:59:00 +0000679 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +0000680}
681
682/*--------------------------------------------------------------------*/
683/*--- end mac_leakcheck.c ---*/
684/*--------------------------------------------------------------------*/
685