add heuristics decreasing false possible "possible leaks" in c++ code.

The option --leak-check-heuristics=heur1,heur2,... can activate
various heuristics to decrease the number of false positive
"possible leaks" for C++ code. The available heuristics are
detecting valid interior pointers to std::stdstring, to new[] allocated
arrays with elements having destructors and to interior pointers pointing
to an inner part of a C++ object using multiple inheritance.

This fixes 280271 Valgrind reports possible memory leaks on still-reachable
std::string 

This has been tested on x86/amd64/ppc32/ppc64.

First performance measurements seems to show a neglectible impact on
the leak search.

More feedback welcome both on performance and functional aspects
(false positive 'possibly leaked' rate decrease and/or 
false negative 'possibly leaked' rate increase).

Note that the heuristic is not checking that the memory has been
allocated with "new" or "new[]", as it is expected that in some cases,
specific alloc fn are used for c++ objects instead of the standard new/new[].
If needed, we might add an option to check the alloc functions
to be new/new[].



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@13582 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mc_leakcheck.c b/memcheck/mc_leakcheck.c
index 98addd7..4590a1a 100644
--- a/memcheck/mc_leakcheck.c
+++ b/memcheck/mc_leakcheck.c
@@ -427,12 +427,17 @@
 typedef 
    struct {
       UInt  state:2;    // Reachedness.
-      UInt  pending:1;  // Scan pending.  
+      UInt  pending:1;  // Scan pending.
+      UInt  heuristic: (sizeof(UInt)*8)-3;
+      // Heuristic with which this block was considered reachable.
+      // LchNone if state != Reachable or no heuristic needed to
+      // consider it reachable.
+   
       union {
-         SizeT indirect_szB : (sizeof(SizeT)*8)-3; // If Unreached, how many bytes
-                                                   //   are unreachable from here.
-         SizeT  clique :  (sizeof(SizeT)*8)-3;      // if IndirectLeak, clique leader
-                                                   // to which it belongs.
+         SizeT indirect_szB;
+         // If Unreached, how many bytes are unreachable from here.
+         SizeT  clique;
+         // if IndirectLeak, clique leader to which it belongs.
       } IorC;
    } 
    LC_Extra;
@@ -461,9 +466,11 @@
 // Array of sorted loss record (produced during last leak search).
 static LossRecord** lr_array;
 
+// Value of the heuristics parameter used in the current (or last) leak check.
+static UInt detect_memory_leaks_last_heuristics;
 
 // DeltaMode used the last time we called detect_memory_leaks.
-// The recorded leak errors must be output using a logic based on this delta_mode.
+// The recorded leak errors are output using a logic based on this delta_mode.
 // The below avoids replicating the delta_mode in each LossRecord.
 LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
 
@@ -495,6 +502,13 @@
 SizeT MC_(blocks_reachable)  = 0;
 SizeT MC_(blocks_suppressed) = 0;
 
+// Subset of MC_(bytes_reachable) and MC_(blocks_reachable) which
+// are considered reachable due to the corresponding heuristic.
+static SizeT MC_(bytes_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
+                                               = {0,0,0,0};
+static SizeT MC_(blocks_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
+                                                = {0,0,0,0};
+
 // Determines if a pointer is to a chunk.  Returns the chunk number et al
 // via call-by-reference.
 static Bool
@@ -568,6 +582,181 @@
    }
 }
 
+static const HChar* pp_heuristic(LeakCheckHeuristic h)
+{
+   switch(h) {
+   case LchNone:                return "none";
+   case LchStdString:           return "stdstring";
+   case LchNewArray:            return "newarray";
+   case LchMultipleInheritance: return "multipleinheritance";
+   default:                     return "???invalid heuristic???";
+   }
+}
+
+// True if ptr looks like the address of a vtable, i.e. if ptr
+// points to an array of pointers to functions.
+// It is assumed the only caller of this function is heuristic_reachedness
+// which must check that ptr is aligned and above page 0.
+// Checking that ptr is above page 0 is an optimisation : it is assumed
+// that no vtable is located in the page 0. So, all small integer values
+// encountered during the scan will not incur the cost of calling this
+// function.
+static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
+{
+   // ??? If performance problem:
+   // ??? maybe implement a cache (array indexed by ptr % primenr)
+   // ??? of "I am a vtable ptr" ???
+
+   // ??? Maybe the debug info could (efficiently?) be used to detect vtables ?
+   
+   // We consider ptr as a vtable ptr if it points to a table
+   // where we find only NULL pointers or pointers pointing at an
+   // executable region. We must find at least 2 non NULL pointers
+   // before considering ptr as a vtable pointer.
+   // We scan a maximum of VTABLE_MAX_CHECK words for these 2 non NULL
+   // pointers.
+#define VTABLE_MAX_CHECK 20 
+
+   NSegment const *seg;
+   UInt nr_fn_ptrs = 0;
+   Addr scan;
+   Addr scan_max;
+
+   // First verify ptr points inside a client mapped file section.
+   // ??? is a vtable always in a file mapped readable section ?
+   seg = VG_(am_find_nsegment) (ptr);
+   if (seg == NULL
+       || seg->kind != SkFileC
+       || !seg->hasR)
+      return False;
+
+   // Check potential function pointers, up to a maximum of VTABLE_MAX_CHECK.
+   scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
+   // If ptr is near the end of seg, avoid scan_max exceeding the end of seg:
+   if (scan_max > seg->end - sizeof(Addr))
+      scan_max = seg->end - sizeof(Addr);
+   for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
+      Addr pot_fn = *((Addr *)scan);
+      if (pot_fn == 0)
+         continue; // NULL fn pointer. Seems it can happen in vtable.
+      seg = VG_(am_find_nsegment) (pot_fn);
+#if defined(VGA_ppc64)
+      // ppc64 use a thunk table. So, we have one more level of indirection
+      // to follow.
+      if (seg == NULL
+          || seg->kind != SkFileC
+          || !seg->hasR
+          || !seg->hasW)
+         return False; // ptr to nowhere, or not a ptr to thunks.
+      pot_fn = *((Addr *)pot_fn);
+      if (pot_fn == 0)
+         continue; // NULL fn pointer. Seems it can happen in vtable.
+      seg = VG_(am_find_nsegment) (pot_fn);
+#endif
+      if (seg == NULL
+          || seg->kind != SkFileC
+          || !seg->hasT)
+         return False; // ptr to nowhere, or not a fn ptr.
+      nr_fn_ptrs++;
+      if (nr_fn_ptrs == 2)
+         return True;
+   }
+
+   return False;
+}
+
+// If ch is heuristically reachable via an heuristic member of heur_set,
+// returns this heuristic.
+// If ch cannot be considered reachable using one of these heuristics,
+// return LchNone.
+// This should only be called when ptr is an interior ptr to ch.
+// The StdString/NewArray/MultipleInheritance heuristics are directly
+// inspired from DrMemory:
+//  see http://www.burningcutlery.com/derek/docs/drmem-CGO11.pdf [section VI,C]
+//  and bug 280271.
+static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
+                                                 MC_Chunk *ch, LC_Extra *ex,
+                                                 UInt heur_set)
+{
+   if (HiS(LchStdString, heur_set)) {
+      // Detects inner pointers to Std::String for layout being
+      //     length capacity refcount char_array[] \0
+      // where ptr points to the beginning of the char_array.
+      if ( ptr == ch->data + 3 * sizeof(SizeT)) {
+         const SizeT length = *((SizeT*)ch->data);
+         const SizeT capacity = *((SizeT*)ch->data+1);
+         if (length <= capacity
+             && (3 * sizeof(SizeT) + capacity + 1 == ch->szB)) {
+            // ??? could check there is no null byte from ptr to ptr+length-1
+            // ???    and that there is a null byte at ptr+length.
+            // ???
+            // ??? could check that ch->allockind is MC_AllocNew ???
+            // ??? probably not a good idea, as I guess stdstring
+            // ??? allocator can be done via custom allocator
+            // ??? or even a call to malloc ????
+            return LchStdString;
+         }
+      }
+   }
+
+   if (HiS(LchNewArray, heur_set)) {
+      // Detects inner pointers at second word of new[] array, following
+      // a plausible nr of elements.
+      // Such inner pointers are used for arrays of elements
+      // having a destructor, as the delete[] of the array must know
+      // how many elements to destroy.
+      //
+      // We have a strange/wrong case for 'ptr = new MyClass[0];' :
+      // For such a case, the returned ptr points just outside the
+      // allocated chunk. This chunk is then seen as a definite
+      // leak by Valgrind, as it is not considered an interior pointer.
+      // It is the c++ equivalent of bug 99923 (malloc(0) wrongly considered
+      // as definitely leaked). See the trick in find_chunk_for handling
+      // 0-sized block. This trick does not work for 'new MyClass[0]'
+      // because a chunk "word-sized" is allocated to store the (0) nr
+      // of elements.
+      if ( ptr == ch->data + sizeof(SizeT)) {
+         const SizeT nr_elts = *((SizeT*)ch->data);
+         if (nr_elts > 0 && (ch->szB - sizeof(SizeT)) % nr_elts == 0) {
+            // ??? could check that ch->allockind is MC_AllocNewVec ???
+            return LchNewArray;
+         }
+      }
+   }
+
+   if (HiS(LchMultipleInheritance, heur_set)) {
+      // Detect inner pointer used for multiple inheritance.
+      // Assumption is that the vtable pointers are before the object.
+      if (VG_IS_WORD_ALIGNED(ptr)) {
+         Addr first_addr;
+         Addr inner_addr;
+
+         // Avoid the call to is_vtable_addr when the addr is not
+         // aligned or points in the page0, as it is unlikely
+         // a vtable is located in this page. This last optimisation
+         // avoids to call aligned_ptr_above_page0_is_vtable_addr
+         // for all small integers.
+         // Note: we could possibly also avoid calling this function
+         // for small negative integers, as no vtable should be located
+         // in the last page.
+         inner_addr = *((Addr*)ptr);
+         if (VG_IS_WORD_ALIGNED(inner_addr) 
+             && inner_addr >= (Addr)VKI_PAGE_SIZE) {
+            first_addr = *((Addr*)ch->data);
+            if (VG_IS_WORD_ALIGNED(first_addr)
+                && first_addr >= (Addr)VKI_PAGE_SIZE
+                && aligned_ptr_above_page0_is_vtable_addr(inner_addr)
+                && aligned_ptr_above_page0_is_vtable_addr(first_addr)) {
+               // ??? could check that ch->allockind is MC_AllocNew ???
+               return LchMultipleInheritance;
+            }
+         }
+      }
+   }
+
+   return LchNone;
+}
+
 
 // If 'ptr' is pointing to a heap-allocated block which hasn't been seen
 // before, push it onto the mark stack.
@@ -577,16 +766,45 @@
    Int ch_no;
    MC_Chunk* ch;
    LC_Extra* ex;
+   Reachedness ch_via_ptr; // Is ch reachable via ptr, and how ?
 
    if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
       return;
+
+   if (ex->state == Reachable) {
+      // If block was considered reachable via an heuristic,
+      // and it is now directly reachable via ptr, clear the
+      // heuristic.
+      if (ex->heuristic && ptr == ch->data) {
+         // ch was up to now considered as reachable dur to
+         // ex->heuristic. We have a direct ptr now => clear
+         // the heuristic field.
+         ex->heuristic = LchNone;
+      }
+      return;
+   }
    
    // Possibly upgrade the state, ie. one of:
    // - Unreached --> Possible
    // - Unreached --> Reachable 
    // - Possible  --> Reachable
-   if (ptr == ch->data && is_prior_definite && ex->state != Reachable) {
-      // 'ptr' points to the start of the block, and the prior node is
+
+   if (ptr == ch->data)
+      ch_via_ptr = Reachable;
+   else if (detect_memory_leaks_last_heuristics) {
+      ex->heuristic 
+         = heuristic_reachedness (ptr, ch, ex,
+                                  detect_memory_leaks_last_heuristics);
+      if (ex->heuristic)
+         ch_via_ptr = Reachable;
+      else
+         ch_via_ptr = Possible;
+   } else
+      ch_via_ptr = Possible;
+         
+   if (ch_via_ptr == Reachable && is_prior_definite) {
+      // 'ptr' points to the start of the block or is to be considered as
+      // pointing to the start of the block, and the prior node is
       // definite, which means that this block is definitely reachable.
       ex->state = Reachable;
 
@@ -657,7 +875,8 @@
 }
 
 static void
-lc_push_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique, Bool is_prior_definite)
+lc_push_if_a_chunk_ptr(Addr ptr,
+                       Int clique, Int cur_clique, Bool is_prior_definite)
 {
    if (-1 == clique) 
       lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
@@ -703,11 +922,12 @@
 // 2. Search ptr mode (searched != 0).
 // -----------------------------------
 // In this mode, searches for pointers to a specific address range 
-// In such a case, lc_scan_memory just scans [start..start+len[ for pointers to searched
-// and outputs the places where searched is found. It does not recursively scans the
-// found memory.
+// In such a case, lc_scan_memory just scans [start..start+len[ for pointers
+// to searched and outputs the places where searched is found.
+// It does not recursively scans the found memory.
 static void
-lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique, Int cur_clique,
+lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
+               Int clique, Int cur_clique,
                Addr searched, SizeT szB)
 {
    /* memory scan is based on the assumption that valid pointers are aligned
@@ -795,12 +1015,28 @@
          // let's see if its contents point to a chunk.
          if (UNLIKELY(searched)) {
             if (addr >= searched && addr < searched + szB) {
-               if (addr == searched)
+               if (addr == searched) {
                   VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
-               else
+                  MC_(pp_describe_addr) (ptr);
+               } else {
+                  Int ch_no;
+                  MC_Chunk *ch;
+                  LC_Extra *ex;
                   VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
                             ptr, (long unsigned) addr - searched, searched);
-               MC_(pp_describe_addr) (ptr);
+                  MC_(pp_describe_addr) (ptr);
+                  if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
+                     Int h;
+                     for (h = LchStdString; h <= LchMultipleInheritance; h++) {
+                        if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
+                           VG_(umsg)("block at %#lx considered reachable "
+                                     "by ptr %#lx using %s heuristic\n",
+                                     ch->data, addr, pp_heuristic(h));
+                        }
+                     }
+                     tl_assert (h == N_LEAK_CHECK_HEURISTICS - 1);
+                  }
+               }
             }
          } else {
             lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
@@ -947,7 +1183,8 @@
    Int          i, n_lossrecords, start_lr_output_scan;
    LossRecord*  lr;
    Bool         is_suppressed;
-   SizeT        old_bytes_leaked      = MC_(bytes_leaked); /* to report delta in summary */
+   /* old_* variables are used to report delta in summary.  */
+   SizeT        old_bytes_leaked      = MC_(bytes_leaked);
    SizeT        old_bytes_indirect    = MC_(bytes_indirect); 
    SizeT        old_bytes_dubious     = MC_(bytes_dubious); 
    SizeT        old_bytes_reachable   = MC_(bytes_reachable); 
@@ -958,6 +1195,18 @@
    SizeT        old_blocks_reachable  = MC_(blocks_reachable);
    SizeT        old_blocks_suppressed = MC_(blocks_suppressed);
 
+   SizeT old_bytes_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
+   SizeT old_blocks_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
+
+   for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++) {
+      old_bytes_heuristically_reachable[i]   
+         =  MC_(bytes_heuristically_reachable)[i];
+      MC_(bytes_heuristically_reachable)[i] = 0;
+      old_blocks_heuristically_reachable[i]  
+         =  MC_(blocks_heuristically_reachable)[i];
+      MC_(blocks_heuristically_reachable)[i] = 0;
+   }
+
    if (lr_table == NULL)
       // Create the lr_table, which holds the loss records.
       // If the lr_table already exists, it means it contains
@@ -1003,6 +1252,15 @@
       lrkey.state        = ex->state;
       lrkey.allocated_at = MC_(allocated_at)(ch);
 
+     if (ex->heuristic) {
+        MC_(bytes_heuristically_reachable)[ex->heuristic] += ch->szB;
+        MC_(blocks_heuristically_reachable)[ex->heuristic]++;
+        if (VG_DEBUG_LEAKCHECK)
+           VG_(printf)("heuristic %s %#lx len %lu\n",
+                       pp_heuristic(ex->heuristic),
+                       ch->data, (unsigned long)ch->szB);
+     }
+
       old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
       if (old_lr) {
          // We found an existing loss record matching this chunk.  Update the
@@ -1108,45 +1366,68 @@
    if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
       HChar d_bytes[20];
       HChar d_blocks[20];
+#     define DBY(new,old) \
+      MC_(snprintf_delta) (d_bytes, 20, (new), (old), lcp->deltamode)
+#     define DBL(new,old) \
+      MC_(snprintf_delta) (d_blocks, 20, (new), (old), lcp->deltamode)
 
       VG_(umsg)("LEAK SUMMARY:\n");
       VG_(umsg)("   definitely lost: %'lu%s bytes in %'lu%s blocks\n",
                 MC_(bytes_leaked), 
-                MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_leaked), old_bytes_leaked, lcp->deltamode),
+                DBY (MC_(bytes_leaked), old_bytes_leaked),
                 MC_(blocks_leaked),
-                MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_leaked), old_blocks_leaked, lcp->deltamode));
+                DBL (MC_(blocks_leaked), old_blocks_leaked));
       VG_(umsg)("   indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
                 MC_(bytes_indirect), 
-                MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_indirect), old_bytes_indirect, lcp->deltamode),
+                DBY (MC_(bytes_indirect), old_bytes_indirect),
                 MC_(blocks_indirect),
-                MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_indirect), old_blocks_indirect, lcp->deltamode) );
+                DBL (MC_(blocks_indirect), old_blocks_indirect));
       VG_(umsg)("     possibly lost: %'lu%s bytes in %'lu%s blocks\n",
                 MC_(bytes_dubious), 
-                MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_dubious), old_bytes_dubious, lcp->deltamode), 
+                DBY (MC_(bytes_dubious), old_bytes_dubious), 
                 MC_(blocks_dubious),
-                MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_dubious), old_blocks_dubious, lcp->deltamode) );
+                DBL (MC_(blocks_dubious), old_blocks_dubious));
       VG_(umsg)("   still reachable: %'lu%s bytes in %'lu%s blocks\n",
                 MC_(bytes_reachable), 
-                MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_reachable), old_bytes_reachable, lcp->deltamode), 
+                DBY (MC_(bytes_reachable), old_bytes_reachable), 
                 MC_(blocks_reachable),
-                MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_reachable), old_blocks_reachable, lcp->deltamode) );
+                DBL (MC_(blocks_reachable), old_blocks_reachable));
+      for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
+         if (old_blocks_heuristically_reachable[i] > 0 
+             || MC_(blocks_heuristically_reachable)[i] > 0) {
+            VG_(umsg)("                      of which "
+                      "reachable via heuristic:\n");
+            break;
+         }
+      for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
+         if (old_blocks_heuristically_reachable[i] > 0 
+             || MC_(blocks_heuristically_reachable)[i] > 0)
+            VG_(umsg)("                        %19s: "
+                      "%'lu%s bytes in %'lu%s blocks\n",
+                      pp_heuristic(i),
+                      MC_(bytes_heuristically_reachable)[i], 
+                      DBY (MC_(bytes_heuristically_reachable)[i],
+                           old_bytes_heuristically_reachable[i]), 
+                      MC_(blocks_heuristically_reachable)[i],
+                      DBL (MC_(blocks_heuristically_reachable)[i],
+                           old_blocks_heuristically_reachable[i]));
       VG_(umsg)("        suppressed: %'lu%s bytes in %'lu%s blocks\n",
                 MC_(bytes_suppressed), 
-                MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_suppressed), old_bytes_suppressed, lcp->deltamode), 
+                DBY (MC_(bytes_suppressed), old_bytes_suppressed), 
                 MC_(blocks_suppressed),
-                MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_suppressed), old_blocks_suppressed, lcp->deltamode) );
+                DBL (MC_(blocks_suppressed), old_blocks_suppressed));
       if (lcp->mode != LC_Full &&
           (MC_(blocks_leaked) + MC_(blocks_indirect) +
            MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
          if (lcp->requested_by_monitor_command)
-            VG_(umsg)("To see details of leaked memory, give 'full' arg to leak_check\n");
+            VG_(umsg)("To see details of leaked memory, "
+                      "give 'full' arg to leak_check\n");
          else
             VG_(umsg)("Rerun with --leak-check=full to see details "
                       "of leaked memory\n");
       }
       if (lcp->mode == LC_Full &&
-          MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds))
-      {
+          MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds)) {
          VG_(umsg)("Reachable blocks (those to which a pointer "
                    "was found) are not shown.\n");
          if (lcp->requested_by_monitor_command)
@@ -1156,6 +1437,8 @@
                       "--show-leak-kinds=all\n");
       }
       VG_(umsg)("\n");
+      #undef DBL
+      #undef DBY
    }
 }
 
@@ -1169,7 +1452,8 @@
 
    for (ind = 0; ind < lc_n_chunks; ind++) {
       LC_Extra*     ind_ex = &(lc_extras)[ind];
-      if (ind_ex->state == IndirectLeak && ind_ex->IorC.clique == (SizeT) clique) {
+      if (ind_ex->state == IndirectLeak 
+          && ind_ex->IorC.clique == (SizeT) clique) {
          MC_Chunk*    ind_ch = lc_chunks[ind];
          LossRecord*  ind_lr;
          LossRecordKey ind_lrkey;
@@ -1232,7 +1516,7 @@
       old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
       if (old_lr) {
          // We found an existing loss record matching this chunk.
-         // If this is the loss record we are looking for, then output the pointer.
+         // If this is the loss record we are looking for, output the pointer.
          if (old_lr == lr_array[loss_record_nr]) {
             VG_(umsg)("%p[%lu]\n",
                       (void *)ch->data, (unsigned long) ch->szB);
@@ -1257,8 +1541,8 @@
 
 // If searched = 0, scan memory root set, pushing onto the mark stack the blocks
 // encountered.
-// Otherwise (searched != 0), scan the memory root set searching for ptr pointing
-// inside [searched, searched+szB[.
+// Otherwise (searched != 0), scan the memory root set searching for ptr
+// pointing inside [searched, searched+szB[.
 static void scan_memory_root_set(Addr searched, SizeT szB)
 {
    Int   i;
@@ -1332,6 +1616,7 @@
 
 
    MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
+   detect_memory_leaks_last_heuristics = lcp->heuristics;
 
    // Get the chunks, stop if there were none.
    if (lc_chunks) {
@@ -1438,6 +1723,7 @@
    for (i = 0; i < lc_n_chunks; i++) {
       lc_extras[i].state        = Unreached;
       lc_extras[i].pending      = False;
+      lc_extras[i].heuristic = LchNone;
       lc_extras[i].IorC.indirect_szB = 0;
    }
 
@@ -1540,11 +1826,12 @@
       VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
                  szB, address);
 
+   chunks = find_active_chunks(&n_chunks);
+
    // Scan memory root-set, searching for ptr pointing in address[szB]
    scan_memory_root_set(address, szB);
 
    // Scan active malloc-ed chunks
-   chunks = find_active_chunks(&n_chunks);
    for (i = 0; i < n_chunks; i++) {
       lc_scan_memory(chunks[i]->data, chunks[i]->szB,
                      /*is_prior_definite*/True,