Merge in changes from the 2.4.0 line. This basically brings in the
overhaul of the thread support. Many things are now probably broken,
but at least with --tool=none, simple and not-so-simple threaded and
non-thread programs work.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3265 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mac_leakcheck.c b/memcheck/mac_leakcheck.c
index edae5a8..f2c8565 100644
--- a/memcheck/mac_leakcheck.c
+++ b/memcheck/mac_leakcheck.c
@@ -30,10 +30,17 @@
The GNU General Public License is contained in the file COPYING.
*/
+#include <setjmp.h>
#include "mac_shared.h"
/* Define to debug the memory-leak-detector. */
-/* #define VG_DEBUG_LEAKCHECK */
+#define VG_DEBUG_LEAKCHECK 0
+#define VG_DEBUG_CLIQUE 0
+
+#define ROUNDDN(p, a) ((Addr)(p) & ~((a)-1))
+#define ROUNDUP(p, a) ROUNDDN((p)+(a)-1, (a))
+#define PGROUNDDN(p) ROUNDDN(p, VKI_PAGE_SIZE)
+#define PGROUNDUP(p) ROUNDUP(p, VKI_PAGE_SIZE)
/*------------------------------------------------------------*/
/*--- Low-level address-space scanning, for the leak ---*/
@@ -45,148 +52,12 @@
static
-void vg_scan_all_valid_memory_sighandler ( Int sigNo )
+void vg_scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
{
- __builtin_longjmp(memscan_jmpbuf, 1);
-}
-
-
-/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
- space and pass the addresses and values of all addressible,
- defined, aligned words to notify_word. This is the basis for the
- leak detector. Returns the number of calls made to notify_word.
-
- Addresses are validated 3 ways. First we enquire whether (addr >>
- 16) denotes a 64k chunk in use, by asking is_valid_64k_chunk(). If
- so, we decide for ourselves whether each x86-level (4 K) page in
- the chunk is safe to inspect. If yes, we enquire with
- is_valid_address() whether or not each of the 1024 word-locations
- on the page is valid. Only if so are that address and its contents
- passed to notify_word.
-
- This is all to avoid duplication of this machinery between
- Memcheck and Addrcheck.
-*/
-static
-UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
- Bool is_valid_address ( Addr ),
- void (*notify_word)( Addr, UInt ) )
-{
- /* All volatile, because some gccs seem paranoid about longjmp(). */
- volatile Bool anyValid;
- volatile Addr pageBase, addr;
- volatile UInt res, numPages, page, primaryMapNo;
- volatile UInt page_first_word, nWordsNotified;
-
- struct vki_sigaction sigbus_saved;
- struct vki_sigaction sigbus_new;
- struct vki_sigaction sigsegv_saved;
- struct vki_sigaction sigsegv_new;
- vki_sigset_t blockmask_saved;
- vki_sigset_t unblockmask_new;
-
- /* Temporarily install a new sigsegv and sigbus handler, and make
- sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
- first two can never be blocked anyway?) */
-
- sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
- sigbus_new.sa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
- sigbus_new.sa_restorer = NULL;
- res = VG_(sigemptyset)( &sigbus_new.sa_mask );
- tl_assert(res == 0);
-
- sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
- sigsegv_new.sa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
- sigsegv_new.sa_restorer = NULL;
- res = VG_(sigemptyset)( &sigsegv_new.sa_mask );
- tl_assert(res == 0+0);
-
- res = VG_(sigemptyset)( &unblockmask_new );
- res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGBUS );
- res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGSEGV );
- res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGTERM );
- tl_assert(res == 0+0+0);
-
- res = VG_(sigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
- tl_assert(res == 0+0+0+0);
-
- res = VG_(sigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
- tl_assert(res == 0+0+0+0+0);
-
- res = VG_(sigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
- tl_assert(res == 0+0+0+0+0+0);
-
- /* The signal handlers are installed. Actually do the memory scan. */
- numPages = 1 << (32-VKI_PAGE_SHIFT);
- tl_assert(numPages == 1048576);
- tl_assert(4096 == (1 << VKI_PAGE_SHIFT));
-
- nWordsNotified = 0;
-
- for (page = 0; page < numPages; page++) {
-
- /* Base address of this 4k page. */
- pageBase = page << VKI_PAGE_SHIFT;
-
- /* Skip if this page is in an unused 64k chunk. */
- primaryMapNo = pageBase >> 16;
- if (!is_valid_64k_chunk(primaryMapNo))
- continue;
-
- /* Next, establish whether or not we want to consider any
- locations on this page. We need to do so before actually
- prodding it, because prodding it when in fact it is not
- needed can cause a page fault which under some rare
- circumstances can cause the kernel to extend the stack
- segment all the way down to here, which is seriously bad.
- Hence: */
- anyValid = False;
- for (addr = pageBase; addr < pageBase+VKI_PAGE_SIZE; addr += 4) {
- if (is_valid_address(addr)) {
- anyValid = True;
- break;
- }
- }
-
- if (!anyValid)
- continue; /* nothing interesting here .. move to the next page */
-
- /* Ok, we have to prod cautiously at the page and see if it
- explodes or not. */
- if (__builtin_setjmp(memscan_jmpbuf) == 0) {
- /* try this ... */
- page_first_word = * (volatile UInt*)pageBase;
- /* we get here if we didn't get a fault */
- /* Scan the page */
- for (addr = pageBase; addr < pageBase+VKI_PAGE_SIZE; addr += 4) {
- if (is_valid_address(addr)) {
- nWordsNotified++;
- notify_word ( addr, *(UInt*)addr );
- }
- }
- } else {
- /* We get here if reading the first word of the page caused a
- fault, which in turn caused the signal handler to longjmp.
- Ignore this page. */
- if (0)
- VG_(printf)(
- "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
- (void*)pageBase
- );
- }
- }
-
- /* Restore signal state to whatever it was before. */
- res = VG_(sigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
- tl_assert(res == 0 +0);
-
- res = VG_(sigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
- tl_assert(res == 0 +0 +0);
-
- res = VG_(sigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
- tl_assert(res == 0 +0 +0 +0);
-
- return nWordsNotified;
+ if (0)
+ VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
+ if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
+ __builtin_longjmp(memscan_jmpbuf, 1);
}
/*------------------------------------------------------------*/
@@ -197,10 +68,21 @@
-- Proper-ly reached; a pointer to its start has been found
-- Interior-ly reached; only an interior pointer to it has been found
-- Unreached; so far, no pointers to any part of it have been found.
+ -- IndirectLeak; leaked, but referred to by another leaked block
*/
-typedef
- enum { Unreached, Interior, Proper }
- Reachedness;
+typedef enum {
+ Unreached,
+ IndirectLeak,
+ Interior,
+ Proper
+ } Reachedness;
+
+/* An entry in the mark stack */
+typedef struct {
+ Int next:30; /* Index of next in mark stack */
+ UInt state:2; /* Reachedness */
+ SizeT indirect; /* if Unreached, how much is unreachable from here */
+} MarkStack;
/* A block record, used for generating err msgs. */
typedef
@@ -212,6 +94,7 @@
Reachedness loss_mode;
/* Number of blocks and total # bytes involved. */
UInt total_bytes;
+ UInt indirect_bytes;
UInt num_blocks;
}
LossRecord;
@@ -221,7 +104,7 @@
shadows[i]. Return -1 if none found. This assumes that shadows[]
has been sorted on the ->data field. */
-#ifdef VG_DEBUG_LEAKCHECK
+#if VG_DEBUG_LEAKCHECK
/* Used to sanity-check the fast binary-search mechanism. */
static
Int find_shadow_for_OLD ( Addr ptr,
@@ -235,7 +118,7 @@
for (i = 0; i < n_shadows; i++) {
PROF_EVENT(71);
a_lo = shadows[i]->data;
- a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
+ a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
if (a_lo <= ptr && ptr <= a_hi)
return i;
}
@@ -261,7 +144,7 @@
mid = (lo + hi) / 2;
a_mid_lo = shadows[mid]->data;
- a_mid_hi = shadows[mid]->data + shadows[mid]->size - 1;
+ a_mid_hi = shadows[mid]->data + shadows[mid]->size;
if (ptr < a_mid_lo) {
hi = mid-1;
@@ -271,13 +154,13 @@
lo = mid+1;
continue;
}
- tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
+ sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
retVal = mid;
break;
}
-# ifdef VG_DEBUG_LEAKCHECK
- tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
+# if VG_DEBUG_LEAKCHECK
+ sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
# endif
/* VG_(printf)("%d\n", retVal); */
return retVal;
@@ -286,53 +169,27 @@
/* Globals, for the following callback used by VG_(detect_memory_leaks). */
static MAC_Chunk** lc_shadows;
static Int lc_n_shadows;
-static Reachedness* lc_reachedness;
+static MarkStack* lc_markstack;
+static Int lc_markstack_top;
static Addr lc_min_mallocd_addr;
static Addr lc_max_mallocd_addr;
+static SizeT lc_scanned;
-static
-void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
+static Bool (*lc_is_valid_chunk) (UInt chunk);
+static Bool (*lc_is_valid_address)(Addr addr);
+
+static const Char *pp_lossmode(Reachedness lossmode)
{
- Int sh_no;
- Addr ptr;
+ const Char *loss = "?";
- /* Rule out some known causes of bogus pointers. Mostly these do
- not cause much trouble because only a few false pointers can
- ever lurk in these places. This mainly stops it reporting that
- blocks are still reachable in stupid test programs like this
-
- int main (void) { char* a = malloc(100); return 0; }
-
- which people seem inordinately fond of writing, for some reason.
-
- Note that this is a complete kludge. It would be better to
- ignore any addresses corresponding to valgrind.so's .bss and
- .data segments, but I cannot think of a reliable way to identify
- where the .bss segment has been put. If you can, drop me a
- line.
- */
- if (!VG_(is_client_addr)(a)) return;
-
- /* OK, let's get on and do something Useful for a change. */
-
- ptr = (Addr)word_at_a;
- if (ptr >= lc_min_mallocd_addr && ptr <= lc_max_mallocd_addr) {
- /* Might be legitimate; we'll have to investigate further. */
- sh_no = find_shadow_for ( ptr, lc_shadows, lc_n_shadows );
- if (sh_no != -1) {
- /* Found a block at/into which ptr points. */
- tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
- tl_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
- /* Decide whether Proper-ly or Interior-ly reached. */
- if (ptr == lc_shadows[sh_no]->data) {
- if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
- lc_reachedness[sh_no] = Proper;
- } else {
- if (lc_reachedness[sh_no] == Unreached)
- lc_reachedness[sh_no] = Interior;
- }
- }
+ switch(lossmode) {
+ case Unreached: loss = "definitely lost"; break;
+ case IndirectLeak: loss = "indirectly lost"; break;
+ case Interior: loss = "possibly lost"; break;
+ case Proper: loss = "still reachable"; break;
}
+
+ return loss;
}
/* Used for printing leak errors, avoids exposing the LossRecord type (which
@@ -340,20 +197,26 @@
void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
{
LossRecord* l = (LossRecord*)vl;
+ const Char *loss = pp_lossmode(l->loss_mode);
VG_(message)(Vg_UserMsg, "");
- VG_(message)(Vg_UserMsg,
- "%d bytes in %d blocks are %s in loss record %d of %d",
- l->total_bytes, l->num_blocks,
- l->loss_mode==Unreached ? "definitely lost"
- : (l->loss_mode==Interior ? "possibly lost"
- : "still reachable"),
- n_this_record, n_total_records
- );
+ if (l->indirect_bytes) {
+ VG_(message)(Vg_UserMsg,
+ "%d (%d direct, %d indirect) bytes in %d blocks are %s in loss record %d of %d",
+ l->total_bytes + l->indirect_bytes,
+ l->total_bytes, l->indirect_bytes, l->num_blocks,
+ loss, n_this_record, n_total_records);
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "%d bytes in %d blocks are %s in loss record %d of %d",
+ l->total_bytes, l->num_blocks,
+ loss, n_this_record, n_total_records);
+ }
VG_(pp_ExeContext)(l->allocated_at);
}
Int MAC_(bytes_leaked) = 0;
+Int MAC_(bytes_indirect) = 0;
Int MAC_(bytes_dubious) = 0;
Int MAC_(bytes_reachable) = 0;
Int MAC_(bytes_suppressed) = 0;
@@ -365,6 +228,333 @@
return (mc1->data < mc2->data ? -1 : 1);
}
+/* If ptr is pointing to a heap-allocated block which hasn't been seen
+ before, push it onto the mark stack. Clique is the index of the
+ clique leader; -1 if none. */
+static void _lc_markstack_push(Addr ptr, Int clique)
+{
+ Int sh_no;
+
+ if (!VG_(is_client_addr)(ptr)) /* quick filter */
+ return;
+
+ sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
+
+ if (VG_DEBUG_LEAKCHECK)
+ VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
+
+ if (sh_no == -1)
+ return;
+
+ sk_assert(sh_no >= 0 && sh_no < lc_n_shadows);
+ sk_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
+
+ if (lc_markstack[sh_no].state == Unreached) {
+ if (0)
+ VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
+ lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
+
+ sk_assert(lc_markstack[sh_no].next == -1);
+ lc_markstack[sh_no].next = lc_markstack_top;
+ lc_markstack_top = sh_no;
+ }
+
+ if (clique != -1) {
+ if (0)
+ VG_(printf)("mopup: %d: %p is %d\n",
+ sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
+
+ /* An unmarked block - add it to the clique. Add its size to
+ the clique-leader's indirect size. If the new block was
+ itself a clique leader, it isn't any more, so add its
+ indirect to the new clique leader.
+
+ If this block *is* the clique leader, it means this is a
+ cyclic structure, so none of this applies. */
+ if (lc_markstack[sh_no].state == Unreached) {
+ lc_markstack[sh_no].state = IndirectLeak;
+
+ if (sh_no != clique) {
+ if (VG_DEBUG_CLIQUE) {
+ if (lc_markstack[sh_no].indirect)
+ VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
+ sh_no, clique,
+ lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
+ else
+ VG_(printf)(" %d joining %d adding %d\n",
+ sh_no, clique, lc_shadows[sh_no]->size);
+ }
+
+ lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
+ lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
+ lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
+ }
+ }
+ } else if (ptr == lc_shadows[sh_no]->data) {
+ lc_markstack[sh_no].state = Proper;
+ } else {
+ if (lc_markstack[sh_no].state == Unreached)
+ lc_markstack[sh_no].state = Interior;
+ }
+}
+
+static void lc_markstack_push(Addr ptr)
+{
+ _lc_markstack_push(ptr, -1);
+}
+
+/* Return the top of the mark stack, if any. */
+static Int lc_markstack_pop(void)
+{
+ Int ret = lc_markstack_top;
+
+ if (ret != -1) {
+ lc_markstack_top = lc_markstack[ret].next;
+ lc_markstack[ret].next = -1;
+ }
+
+ return ret;
+}
+
+/* Scan a block of memory between [start, start+len). This range may
+ be bogus, inaccessable, or otherwise strange; we deal with it.
+
+ If clique != -1, it means we're gathering leaked memory into
+ cliques, and clique is the index of the current clique leader. */
+static void _lc_scan_memory(Addr start, SizeT len, Int clique)
+{
+ Addr ptr = ROUNDUP(start, sizeof(Addr));
+ Addr end = ROUNDDN(start+len, sizeof(Addr));
+ vki_sigset_t sigmask;
+
+ if (VG_DEBUG_LEAKCHECK)
+ VG_(printf)("scan %p-%p\n", start, len);
+ VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
+ VG_(set_fault_catcher)(vg_scan_all_valid_memory_catcher);
+
+ lc_scanned += end-ptr;
+
+ if (!VG_(is_client_addr)(ptr) ||
+ !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
+ ptr = PGROUNDUP(ptr+1); /* first page bad */
+
+ while(ptr < end) {
+ Addr addr;
+
+ /* Skip invalid chunks */
+ if (!(*lc_is_valid_chunk)(PM_IDX(ptr))) {
+ ptr = ROUNDUP(ptr+1, SECONDARY_SIZE);
+ continue;
+ }
+
+ /* Look to see if this page seems reasonble */
+ if ((ptr % VKI_PAGE_SIZE) == 0) {
+ if (!VG_(is_client_addr)(ptr) ||
+ !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
+ ptr += VKI_PAGE_SIZE; /* bad page - skip it */
+ }
+
+ if (__builtin_setjmp(memscan_jmpbuf) == 0) {
+ if ((*lc_is_valid_address)(ptr)) {
+ addr = *(Addr *)ptr;
+ _lc_markstack_push(addr, clique);
+ } else if (0 && VG_DEBUG_LEAKCHECK)
+ VG_(printf)("%p not valid\n", ptr);
+ ptr += sizeof(Addr);
+ } else {
+ /* We need to restore the signal mask, because we were
+ longjmped out of a signal handler. */
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
+
+ ptr = PGROUNDUP(ptr+1); /* bad page - skip it */
+ }
+ }
+
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
+ VG_(set_fault_catcher)(NULL);
+}
+
+static void lc_scan_memory(Addr start, SizeT len)
+{
+ _lc_scan_memory(start, len, -1);
+}
+
+/* Process the mark stack until empty. If mopup is true, then we're
+ actually gathering leaked blocks, so they should be marked
+ IndirectLeak. */
+static void lc_do_leakcheck(Int clique)
+{
+ Int top;
+
+ while((top = lc_markstack_pop()) != -1) {
+ sk_assert(top >= 0 && top < lc_n_shadows);
+ sk_assert(lc_markstack[top].state != Unreached);
+
+ _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
+ }
+}
+
+static Int blocks_leaked;
+static Int blocks_indirect;
+static Int blocks_dubious;
+static Int blocks_reachable;
+static Int blocks_suppressed;
+
+static void full_report()
+{
+ Int i;
+ Int n_lossrecords;
+ LossRecord* errlist;
+ LossRecord* p;
+ Bool is_suppressed;
+
+ /* Go through and group lost structures into cliques. For each
+ Unreached block, push it onto the mark stack, and find all the
+ blocks linked to it. These are marked IndirectLeak, and their
+ size is added to the clique leader's indirect size. If one of
+ the found blocks was itself a clique leader (from a previous
+ pass), then the cliques are merged. */
+ for (i = 0; i < lc_n_shadows; i++) {
+ if (VG_DEBUG_CLIQUE)
+ VG_(printf)("cliques: %d at %p -> %s\n",
+ i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
+ if (lc_markstack[i].state != Unreached)
+ continue;
+
+ sk_assert(lc_markstack_top == -1);
+
+ if (VG_DEBUG_CLIQUE)
+ VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
+
+ _lc_markstack_push(lc_shadows[i]->data, i);
+
+ lc_do_leakcheck(i);
+
+ sk_assert(lc_markstack_top == -1);
+ sk_assert(lc_markstack[i].state == IndirectLeak);
+
+ lc_markstack[i].state = Unreached; /* Return to unreached state,
+ to indicate its a clique
+ leader */
+ }
+
+ /* Common up the lost blocks so we can print sensible error messages. */
+ n_lossrecords = 0;
+ errlist = NULL;
+ for (i = 0; i < lc_n_shadows; i++) {
+ ExeContext* where = lc_shadows[i]->where;
+
+ for (p = errlist; p != NULL; p = p->next) {
+ if (p->loss_mode == lc_markstack[i].state
+ && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
+ p->allocated_at,
+ where) ) {
+ break;
+ }
+ }
+ if (p != NULL) {
+ p->num_blocks ++;
+ p->total_bytes += lc_shadows[i]->size;
+ p->indirect_bytes += lc_markstack[i].indirect;
+ } else {
+ n_lossrecords ++;
+ p = VG_(malloc)(sizeof(LossRecord));
+ p->loss_mode = lc_markstack[i].state;
+ p->allocated_at = where;
+ p->total_bytes = lc_shadows[i]->size;
+ p->indirect_bytes = lc_markstack[i].indirect;
+ p->num_blocks = 1;
+ p->next = errlist;
+ errlist = p;
+ }
+ }
+
+ /* Print out the commoned-up blocks and collect summary stats. */
+ for (i = 0; i < n_lossrecords; i++) {
+ Bool print_record;
+ LossRecord* p_min = NULL;
+ UInt n_min = 0xFFFFFFFF;
+ for (p = errlist; p != NULL; p = p->next) {
+ if (p->num_blocks > 0 && p->total_bytes < n_min) {
+ n_min = p->total_bytes + p->indirect_bytes;
+ p_min = p;
+ }
+ }
+ sk_assert(p_min != NULL);
+
+ /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
+ we disallow that when --leak-check=yes.
+
+ Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
+ and --show-reachable=no */
+
+ print_record = ( MAC_(clo_show_reachable) ||
+ Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
+ is_suppressed =
+ VG_(unique_error) ( VG_(get_VCPU_tid)(), LeakErr, (UInt)i+1,
+ (Char*)n_lossrecords, (void*) p_min,
+ p_min->allocated_at, print_record,
+ /*allow_GDB_attach*/False, /*count_error*/False );
+
+ if (is_suppressed) {
+ blocks_suppressed += p_min->num_blocks;
+ MAC_(bytes_suppressed) += p_min->total_bytes;
+
+ } else if (Unreached == p_min->loss_mode) {
+ blocks_leaked += p_min->num_blocks;
+ MAC_(bytes_leaked) += p_min->total_bytes;
+
+ } else if (IndirectLeak == p_min->loss_mode) {
+ blocks_indirect += p_min->num_blocks;
+ MAC_(bytes_indirect)+= p_min->total_bytes;
+
+ } else if (Interior == p_min->loss_mode) {
+ blocks_dubious += p_min->num_blocks;
+ MAC_(bytes_dubious) += p_min->total_bytes;
+
+ } else if (Proper == p_min->loss_mode) {
+ blocks_reachable += p_min->num_blocks;
+ MAC_(bytes_reachable) += p_min->total_bytes;
+
+ } else {
+ VG_(skin_panic)("generic_detect_memory_leaks: unknown loss mode");
+ }
+ p_min->num_blocks = 0;
+ }
+}
+
+/* Compute a quick summary of the leak check. */
+static void make_summary()
+{
+ Int i;
+
+ for(i = 0; i < lc_n_shadows; i++) {
+ SizeT size = lc_shadows[i]->size;
+
+ switch(lc_markstack[i].state) {
+ case Unreached:
+ blocks_leaked++;
+ MAC_(bytes_leaked) += size;
+ break;
+
+ case Proper:
+ blocks_reachable++;
+ MAC_(bytes_reachable) += size;
+ break;
+
+ case Interior:
+ blocks_dubious++;
+ MAC_(bytes_dubious) += size;
+ break;
+
+ case IndirectLeak: /* shouldn't happen */
+ blocks_indirect++;
+ MAC_(bytes_indirect) += size;
+ break;
+ }
+ }
+}
+
/* Top level entry point to leak detector. Call here, passing in
suitable address-validating functions (see comment at top of
vg_scan_all_valid_memory above). All this is to avoid duplication
@@ -375,22 +565,14 @@
reachable blocks should be shown.
*/
void MAC_(do_detect_memory_leaks) (
- ThreadId tid,
- Bool is_valid_64k_chunk ( UInt ),
- Bool is_valid_address ( Addr )
+ LeakCheckMode mode,
+ Bool (*is_valid_64k_chunk) ( UInt ),
+ Bool (*is_valid_address) ( Addr )
)
{
Int i;
- Int blocks_leaked;
- Int blocks_dubious;
- Int blocks_reachable;
- Int blocks_suppressed;
- Int n_lossrecords;
- UInt bytes_notified;
- Bool is_suppressed;
- LossRecord* errlist;
- LossRecord* p;
+ sk_assert(mode != LC_Off);
/* VG_(HT_to_array) allocates storage for shadows */
lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
@@ -401,17 +583,17 @@
/* Sanity check; assert that the blocks are now in order */
for (i = 0; i < lc_n_shadows-1; i++) {
- tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
+ sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
}
/* Sanity check -- make sure they don't overlap */
for (i = 0; i < lc_n_shadows-1; i++) {
- tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
+ sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
< lc_shadows[i+1]->data );
}
if (lc_n_shadows == 0) {
- tl_assert(lc_shadows == NULL);
+ sk_assert(lc_shadows == NULL);
if (VG_(clo_verbosity) >= 1) {
VG_(message)(Vg_UserMsg,
"No malloc'd blocks -- no leaks are possible.");
@@ -426,119 +608,62 @@
lc_min_mallocd_addr = lc_shadows[0]->data;
lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
- + lc_shadows[lc_n_shadows-1]->size - 1;
+ + lc_shadows[lc_n_shadows-1]->size;
- lc_reachedness = VG_(malloc)( lc_n_shadows * sizeof(Reachedness) );
- for (i = 0; i < lc_n_shadows; i++)
- lc_reachedness[i] = Unreached;
+ lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
+ for (i = 0; i < lc_n_shadows; i++) {
+ lc_markstack[i].next = -1;
+ lc_markstack[i].state = Unreached;
+ lc_markstack[i].indirect = 0;
+ }
+ lc_markstack_top = -1;
- /* Do the scan of memory. */
- bytes_notified
- = sizeof(UWord)
- * vg_scan_all_valid_memory (
- is_valid_64k_chunk,
- is_valid_address,
- &vg_detect_memory_leaks_notify_addr
- );
+ lc_is_valid_chunk = is_valid_64k_chunk;
+ lc_is_valid_address = is_valid_address;
+
+ lc_scanned = 0;
+
+ /* Do the scan of memory, pushing any pointers onto the mark stack */
+ VG_(find_root_memory)(lc_scan_memory);
+
+ /* Push registers onto mark stack */
+ VG_(mark_from_registers)(lc_markstack_push);
+
+ /* Keep walking the heap until everything is found */
+ lc_do_leakcheck(-1);
if (VG_(clo_verbosity) > 0)
- VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
+ VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
- /* Common up the lost blocks so we can print sensible error messages. */
- n_lossrecords = 0;
- errlist = NULL;
- for (i = 0; i < lc_n_shadows; i++) {
-
- ExeContext* where = lc_shadows[i]->where;
-
- for (p = errlist; p != NULL; p = p->next) {
- if (p->loss_mode == lc_reachedness[i]
- && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
- p->allocated_at,
- where) ) {
- break;
- }
- }
- if (p != NULL) {
- p->num_blocks ++;
- p->total_bytes += lc_shadows[i]->size;
- } else {
- n_lossrecords ++;
- p = VG_(malloc)(sizeof(LossRecord));
- p->loss_mode = lc_reachedness[i];
- p->allocated_at = where;
- p->total_bytes = lc_shadows[i]->size;
- p->num_blocks = 1;
- p->next = errlist;
- errlist = p;
- }
- }
-
- /* Print out the commoned-up blocks and collect summary stats. */
blocks_leaked = MAC_(bytes_leaked) = 0;
+ blocks_indirect = MAC_(bytes_indirect) = 0;
blocks_dubious = MAC_(bytes_dubious) = 0;
blocks_reachable = MAC_(bytes_reachable) = 0;
blocks_suppressed = MAC_(bytes_suppressed) = 0;
- for (i = 0; i < n_lossrecords; i++) {
- Bool print_record;
- LossRecord* p_min = NULL;
- UInt n_min = 0xFFFFFFFF;
- for (p = errlist; p != NULL; p = p->next) {
- if (p->num_blocks > 0 && p->total_bytes < n_min) {
- n_min = p->total_bytes;
- p_min = p;
- }
- }
- tl_assert(p_min != NULL);
-
- /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
- we disallow that when --leak-check=yes.
-
- Prints the error if not suppressed, unless it's reachable (Proper)
- and --show-reachable=no */
-
- print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
- is_suppressed =
- VG_(unique_error) ( tid, LeakErr, (UInt)i+1,
- (Char*)(UWord)n_lossrecords, (void*) p_min,
- p_min->allocated_at, print_record,
- /*allow_GDB_attach*/False, /*count_error*/False );
-
- if (is_suppressed) {
- blocks_suppressed += p_min->num_blocks;
- MAC_(bytes_suppressed) += p_min->total_bytes;
-
- } else if (Unreached == p_min->loss_mode) {
- blocks_leaked += p_min->num_blocks;
- MAC_(bytes_leaked) += p_min->total_bytes;
-
- } else if (Interior == p_min->loss_mode) {
- blocks_dubious += p_min->num_blocks;
- MAC_(bytes_dubious) += p_min->total_bytes;
-
- } else if (Proper == p_min->loss_mode) {
- blocks_reachable += p_min->num_blocks;
- MAC_(bytes_reachable) += p_min->total_bytes;
-
- } else {
- VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
- }
- p_min->num_blocks = 0;
- }
+ if (mode == LC_Full)
+ full_report();
+ else
+ make_summary();
if (VG_(clo_verbosity) > 0) {
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
MAC_(bytes_leaked), blocks_leaked );
- VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
+ if (blocks_indirect > 0)
+ VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
+ MAC_(bytes_indirect), blocks_indirect );
+ VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
MAC_(bytes_dubious), blocks_dubious );
VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
MAC_(bytes_reachable), blocks_reachable );
VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
MAC_(bytes_suppressed), blocks_suppressed );
- if (!MAC_(clo_show_reachable)) {
+ if (mode == LC_Summary)
+ VG_(message)(Vg_UserMsg,
+ "Use --leak-check=full to see details of leaked memory.");
+ else if (!MAC_(clo_show_reachable)) {
VG_(message)(Vg_UserMsg,
"Reachable blocks (those to which a pointer was found) are not shown.");
VG_(message)(Vg_UserMsg,
@@ -547,7 +672,7 @@
}
VG_(free) ( lc_shadows );
- VG_(free) ( lc_reachedness );
+ VG_(free) ( lc_markstack );
}
/*--------------------------------------------------------------------*/