Allows to run regression tests in an outer/inner setup.

A '3 lines how to':
   perl tests/vg_regtest --outer-valgrind=../trunk_untouched/install/bin/valgrind --all
           (the outer results for a test xxx is in xxx.outer.log)
   To run with another tool (e.g. drd), add the argument --outer-tool=drd


Still to do/things to improve:

* Most (inner) tests are successful when running under an outer
  memcheck. Need to analyse the reasons of remaining failures.

* The memcheck annotations in m_mallocfree.c can be improved:
  - A superblock is marked 'undefined', it should rather be marked
    'no access'.
  - When a free block is splitted, the remaining free block is
    not made 'no access'. Instead, it is made 'undefined'.
      => this decreases the chance to find bugs.
      => this is not very efficient (e.g. the rest of a superblock
         is often marked undefined repetitively).
    Similarly, the free block created by VG_(arena_memalign)
    is marked 'undefined'. 'No access' would be preferrable.
  - mkInuseBlock marks the new block as undefined. This is probably
    not needed, as VALGRIND_MALLOCLIKE_BLOCK will do it already.
  - VG_(arena_malloc) should give the requested size to
    VALGRIND_MALLOCLIKE_BLOCK, not the malloc usable size,
    as this decreases the chance to find buffer overrun bugs.
    But giving the requested size is tricky (see comments in
    the code).

* need to do memcheck annotations in m_poolalloc.c
   so as to allow leak checking for pool allocated elements.

* vg_regtest.in
  - should analyse the results of the outer and should
    produce a separate result for the tests for which
    the outer detects an error or a memory leak or ...


Changes done:
   README_DEVELOPERS: document the new outer/inner features.
   manual-core.xml: document the new sim-hint no-inner-prefix
   tests/outer_inner.supp: new file, containing the suppressions for inner.
   vg_regtest.in: implement new args --outer-valgrind, --outer-tool, --outer-args.
   m_mallocfree.c: annotations for memcheck.
   m_libcprint.c: handle the new sim-hint no-inner-prefix
   m_main.c: do an (early) parse of --sim-hints




git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12441 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/m_libcprint.c b/coregrind/m_libcprint.c
index b9b24f6..3657da4 100644
--- a/coregrind/m_libcprint.c
+++ b/coregrind/m_libcprint.c
@@ -396,11 +396,16 @@
 
       // Print one '>' in front of the messages for each level of
       // self-hosting being performed.
+      // Do not print such '>' if sim hint "no-inner-prefix" given
+      // (useful to run regression tests in an outer/inner setup
+      // and avoid the diff failing due to these unexpected '>').
       depth = RUNNING_ON_VALGRIND;
-      if (depth > 10)
-         depth = 10; // ?!?!
-      for (i = 0; i < depth; i++) {
-         b->buf[b->buf_used++] = '>';
+      if (depth > 0 && !VG_(strstr)(VG_(clo_sim_hints), "no-inner-prefix")) {
+         if (depth > 10)
+            depth = 10; // ?!?!
+         for (i = 0; i < depth; i++) {
+            b->buf[b->buf_used++] = '>';
+         }
       }
 
       if (Vg_FailMsg == b->kind) {
diff --git a/coregrind/m_main.c b/coregrind/m_main.c
index 7bdb5bf..094e884 100644
--- a/coregrind/m_main.c
+++ b/coregrind/m_main.c
@@ -294,6 +294,7 @@
    - get the toolname (--tool=)
    - set VG_(clo_max_stackframe) (--max-stackframe=)
    - set VG_(clo_main_stacksize) (--main-stacksize=)
+   - set VG_(clo_sim_hints) (--sim-hints=)
 
    That's all it does.  The main command line processing is done below
    by main_process_cmd_line_options.  Note that
@@ -334,6 +335,11 @@
       // before main_process_cmd_line_options().
       else if VG_INT_CLO(str, "--max-stackframe", VG_(clo_max_stackframe)) {}
       else if VG_INT_CLO(str, "--main-stacksize", VG_(clo_main_stacksize)) {}
+
+      // Set up VG_(clo_sim_hints). This is needed a.o. for an inner
+      // running in an outer, to have "no-inner-prefix" enabled
+      // as early as possible.
+      else if VG_STR_CLO (str, "--sim-hints",     VG_(clo_sim_hints)) {}
    }
 }
 
@@ -451,6 +457,7 @@
       else if VG_STREQ(     arg, "-d")                   {}
       else if VG_STREQN(16, arg, "--max-stackframe")     {}
       else if VG_STREQN(16, arg, "--main-stacksize")     {}
+      else if VG_STREQN(11, arg,  "--sim-hints")         {}
       else if VG_STREQN(14, arg, "--profile-heap")       {}
 
       // These options are new.
@@ -514,7 +521,6 @@
       else if VG_BOOL_CLO(arg, "--trace-syscalls",   VG_(clo_trace_syscalls)) {}
       else if VG_BOOL_CLO(arg, "--wait-for-gdb",     VG_(clo_wait_for_gdb)) {}
       else if VG_STR_CLO (arg, "--db-command",       VG_(clo_db_command)) {}
-      else if VG_STR_CLO (arg, "--sim-hints",        VG_(clo_sim_hints)) {}
       else if VG_BOOL_CLO(arg, "--sym-offsets",      VG_(clo_sym_offsets)) {}
       else if VG_BOOL_CLO(arg, "--read-var-info",    VG_(clo_read_var_info)) {}
 
diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c
index 010c4c9..40c12f6 100644
--- a/coregrind/m_mallocfree.c
+++ b/coregrind/m_mallocfree.c
@@ -42,9 +42,11 @@
 #include "pub_core_threadstate.h"   // For VG_INVALID_THREADID
 #include "pub_core_transtab.h"
 #include "pub_core_tooliface.h"
-#include "valgrind.h"
 
-//zz#include "memcheck/memcheck.h"
+#include "pub_tool_inner.h"
+#if defined(ENABLE_INNER_CLIENT_REQUEST)
+#include "memcheck/memcheck.h"
+#endif
 
 // #define DEBUG_MALLOC      // turn on heavyweight debugging machinery
 // #define VERBOSE_MALLOC    // make verbose, esp. in debugging machinery
@@ -776,7 +778,7 @@
       }
    }
    vg_assert(NULL != sb);
-   //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
+   INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb, cszB));
    vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
    sb->n_payload_bytes = cszB - sizeof(Superblock);
    sb->unsplittable = (unsplittable ? sb : NULL);
@@ -1042,6 +1044,12 @@
    // The lo and hi size fields will be checked (indirectly) by the call
    // to get_rz_hi_byte().
    if (!a->clientmem && is_inuse_block(b)) {
+      // In the inner, for memcheck sake, temporarily mark redzone accessible.
+      INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED
+                    (b + hp_overhead_szB() + sizeof(SizeT), a->rz_szB));
+      INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED
+                    (b + get_bszB(b)
+                     - sizeof(SizeT) - a->rz_szB, a->rz_szB));
       for (i = 0; i < a->rz_szB; i++) {
          if (get_rz_lo_byte(b, i) != 
             (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
@@ -1050,6 +1058,11 @@
             (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
                {BLEAT("redzone-hi");return False;}
       }      
+      INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS
+                    (b + hp_overhead_szB() + sizeof(SizeT), a->rz_szB));
+      INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS
+                    (b + get_bszB(b)
+                     - sizeof(SizeT) - a->rz_szB, a->rz_szB));
    }
    return True;
 #  undef BLEAT
@@ -1338,7 +1351,7 @@
 {
    SizeT pszB = bszB_to_pszB(a, bszB);
    vg_assert(b_lno == pszB_to_listNo(pszB));
-   //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
+   INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
    // Set the size fields and indicate not-in-use.
    set_bszB(b, mk_free_bszB(bszB));
 
@@ -1367,7 +1380,7 @@
 {
    UInt i;
    vg_assert(bszB >= min_useful_bszB(a));
-   //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
+   INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
    set_bszB(b, mk_inuse_bszB(bszB));
    set_prev_b(b, NULL);    // Take off freelist
    set_next_b(b, NULL);    // ditto
@@ -1594,7 +1607,26 @@
    v = get_block_payload(a, b);
    vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
 
-   /* VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False); */
+   // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
+   // We have 2 possible options:
+   // 1. The final resulting usable size.
+   // 2. The initial (non-aligned) req_pszB.
+   // Memcheck implements option 2 easily, as the initial requested size
+   // is maintained in the mc_chunk data structure.
+   // This is not as easy in the core, as there is no such structure.
+   // (note: using the aligned req_pszB is not simpler than 2, as
+   //  requesting an aligned req_pszB might still be satisfied by returning
+   // a (slightly) bigger block than requested if the remaining part of 
+   // of a free block is not big enough to make a free block by itself).
+   // Implement Sol 2 can be done the following way:
+   // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
+   // redzone just after the block can be used to determine the
+   // initial requested size.
+   // Currently, not implemented => we use Option 1.
+   INNER_REQUEST
+      (VALGRIND_MALLOCLIKE_BLOCK(v, 
+                                 VG_(arena_malloc_usable_size)(aid, v), 
+                                 a->rz_szB, False));
 
    /* For debugging/testing purposes, fill the newly allocated area
       with a definite value in an attempt to shake out any
@@ -1788,6 +1820,31 @@
          deferred_reclaimSuperblock (a, sb);
       }
 
+      // Inform that ptr has been released. We give redzone size 
+      // 0 instead of a->rz_szB as proper accessibility is done just after.
+      INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
+      
+      // We need to (re-)establish the minimum accessibility needed
+      // for free list management. E.g. if block ptr has been put in a free
+      // list and a neighbour block is released afterwards, the
+      // "lo" and "hi" portions of the block ptr will be accessed to
+      // glue the 2 blocks together.
+      // We could mark the whole block as not accessible, and each time
+      // transiently mark accessible the needed lo/hi parts. Not done as this
+      // is quite complex, for very little expected additional bug detection.
+      // fully unaccessible. Note that the below marks the (possibly) merged
+      // block, not the block corresponding to the ptr argument.
+
+      // First mark the whole block unaccessible.
+      INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b, b_bszB));
+      // Then mark the relevant administrative headers as defined.
+      // No need to mark the heap profile portion as defined, this is not
+      // used for free blocks.
+      INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + hp_overhead_szB(),
+                                              sizeof(SizeT) + sizeof(void*)));
+      INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + b_bszB
+                                              - sizeof(SizeT) - sizeof(void*),
+                                              sizeof(SizeT) + sizeof(void*)));
    } else {
       // b must be first block (i.e. no unused bytes at the beginning)
       vg_assert((Block*)sb_start == b);
@@ -1796,6 +1853,12 @@
       other_b = b + b_bszB;
       vg_assert(other_b-1 == (Block*)sb_end);
 
+      // Inform that ptr has been released. Redzone size value
+      // is not relevant (so we give  0 instead of a->rz_szB)
+      // as it is expected that the aspacemgr munmap will be used by
+      //  outer to mark the whole superblock as unaccessible.
+      INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
+
       // Reclaim immediately the unsplittable superblock sb.
       reclaimSuperblock (a, sb);
    }
@@ -1804,7 +1867,6 @@
    sanity_check_malloc_arena(aid);
 #  endif
 
-   //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
 }
 
 
@@ -1897,6 +1959,11 @@
    /* Give up if we couldn't allocate enough space */
    if (base_p == 0)
       return 0;
+   /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
+      inside VG_(arena_malloc). We need to indicate it is free, then
+      we need to mark it undefined to allow the below code to access is. */
+   INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p, a->rz_szB));
+   INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p, base_pszB_req));
 
    /* Block ptr for the block we are going to split. */
    base_b = get_payload_block ( a, base_p );
@@ -1949,7 +2016,8 @@
 
    vg_assert( (((Addr)align_p) % req_alignB) == 0 );
 
-   //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
+   INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p,
+                                           req_pszB, a->rz_szB, False));
 
    return align_p;
 }
@@ -2041,8 +2109,6 @@
 
    VG_(memset)(p, 0, size);
 
-   //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
-
    return p;
 }