Implement --redzone-size and --core-redzone-size

* For tools replacing the malloc library (e.g. Memcheck, Helgrind, ...),
  the option --redzone-size=<number> allows to control the padding 
  blocks (redzones) added before and after each client allocated block.
  Smaller redzones decrease the memory needed by Valgrind. Bigger
  redzones increase the chance to detect blocks overrun or underrun.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12807 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mc_errors.c b/memcheck/mc_errors.c
index 5084f28..84d9045 100644
--- a/memcheck/mc_errors.c
+++ b/memcheck/mc_errors.c
@@ -1082,7 +1082,7 @@
 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
 {
    return VG_(addr_is_in_block)( a, mc->data, mc->szB,
-                                 MC_MALLOC_REDZONE_SZB );
+                                 MC_(Malloc_Redzone_SzB) );
 }
 static
 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
diff --git a/memcheck/mc_include.h b/memcheck/mc_include.h
index 017868e..d035e5e 100644
--- a/memcheck/mc_include.h
+++ b/memcheck/mc_include.h
@@ -42,8 +42,12 @@
 /*--- Tracking the heap                                    ---*/
 /*------------------------------------------------------------*/
 
-/* We want at least a 16B redzone on client heap blocks for Memcheck */
-#define MC_MALLOC_REDZONE_SZB    16
+/* By default, we want at least a 16B redzone on client heap blocks
+   for Memcheck.
+   The default can be modified by --redzone-size. */
+#define MC_MALLOC_DEFAULT_REDZONE_SZB    16
+// effective redzone, as (possibly) modified by --redzone-size:
+extern SizeT MC_(Malloc_Redzone_SzB);
 
 /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
 typedef
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 114afcd..faa3911 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -6335,7 +6335,8 @@
                                    MC_(__builtin_vec_delete),
                                    MC_(realloc),
                                    MC_(malloc_usable_size), 
-                                   MC_MALLOC_REDZONE_SZB );
+                                   MC_MALLOC_DEFAULT_REDZONE_SZB );
+   MC_(Malloc_Redzone_SzB) = VG_(malloc_effective_client_redzone_size)();
 
    VG_(needs_xml_output)          ();
 
diff --git a/memcheck/mc_malloc_wrappers.c b/memcheck/mc_malloc_wrappers.c
index 7399c86..3c2086d 100644
--- a/memcheck/mc_malloc_wrappers.c
+++ b/memcheck/mc_malloc_wrappers.c
@@ -63,6 +63,8 @@
 /*--- Tracking malloc'd and free'd blocks                  ---*/
 /*------------------------------------------------------------*/
 
+SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
+
 /* Record malloc'd blocks. */
 VgHashTable MC_(malloc_list) = NULL;
 
@@ -174,7 +176,7 @@
       mc = freed_list_start[i];
       while (mc) {
          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
-                                    MC_MALLOC_REDZONE_SZB ))
+                                    MC_(Malloc_Redzone_SzB) ))
             return mc;
          mc = mc->next;
       }
@@ -387,19 +389,19 @@
 void MC_(free) ( ThreadId tid, void* p )
 {
    MC_(handle_free)( 
-      tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
+      tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
 }
 
 void MC_(__builtin_delete) ( ThreadId tid, void* p )
 {
    MC_(handle_free)(
-      tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
+      tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
 }
 
 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
 {
    MC_(handle_free)(
-      tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
+      tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
 }
 
 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
@@ -454,10 +456,10 @@
          tl_assert(ec);
 
          /* Retained part is copied, red zones set as normal */
-         MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, 
-                                 MC_MALLOC_REDZONE_SZB );
+         MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), 
+                                 MC_(Malloc_Redzone_SzB) );
          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
-         MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+         MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
 
          /* Copy from old to new */
          VG_(memcpy)((void*)a_new, p_old, new_szB);
@@ -472,7 +474,7 @@
          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
             than recycling the old one, so that any erroneous accesses to the
             old memory are reported. */
-         die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+         die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
 
          // Allocate a new chunk.
          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
@@ -497,12 +499,12 @@
          tl_assert(VG_(is_plausible_ECU)(ecu));
 
          /* First half kept and copied, second half new, red zones as normal */
-         MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, 
-                                 MC_MALLOC_REDZONE_SZB );
+         MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), 
+                                 MC_(Malloc_Redzone_SzB) );
          MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
          MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
                                                         ecu | MC_OKIND_HEAP );
-         MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+         MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB) );
 
          /* Possibly fill new area with specified junk */
          if (MC_(clo_malloc_fill) != -1) {
@@ -525,7 +527,7 @@
          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
             than recycling the old one, so that any erroneous accesses to the
             old memory are reported. */
-         die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+         die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
 
          // Allocate a new chunk.
          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
diff --git a/memcheck/tests/Makefile.am b/memcheck/tests/Makefile.am
index 96b3167..d0470aa 100644
--- a/memcheck/tests/Makefile.am
+++ b/memcheck/tests/Makefile.am
@@ -74,6 +74,8 @@
 	clientperm.stdout.exp clientperm.vgtest \
 	clireq_nofill.stderr.exp \
 	clireq_nofill.stdout.exp clireq_nofill.vgtest \
+	clo_redzone_default.vgtest clo_redzone_128.vgtest \
+	clo_redzone_default.stderr.exp clo_redzone_128.stderr.exp \
 	custom_alloc.stderr.exp custom_alloc.vgtest custom_alloc.stderr.exp-s390x-mvc \
 	custom-overlap.stderr.exp custom-overlap.vgtest \
 	deep-backtrace.vgtest deep-backtrace.stderr.exp \
@@ -240,6 +242,7 @@
 	calloc-overflow \
 	clientperm \
 	clireq_nofill \
+	clo_redzone \
 	custom_alloc \
 	custom-overlap \
 	deep-backtrace \
diff --git a/memcheck/tests/clo_redzone.c b/memcheck/tests/clo_redzone.c
new file mode 100644
index 0000000..5733ffa
--- /dev/null
+++ b/memcheck/tests/clo_redzone.c
@@ -0,0 +1,17 @@
+#include <stdio.h>
+#include <stdlib.h>
+int main()
+{
+   __attribute__((unused)) char *p = malloc (1);
+   char *b1 = malloc (128);
+   char *b2 = malloc (128);
+   fprintf (stderr, "b1 %p b2 %p\n", b1, b2);
+
+   // Try to land in b2 from b1, causing no error
+   // with the default redzone-size, but having
+   // an error with a bigger redzone-size.
+   // We need to choose a value which lands in b2
+   // on 32 bits and 64 bits.
+   b1[127 + 70] = 'a';
+   return 0;
+}
diff --git a/memcheck/tests/clo_redzone_128.stderr.exp b/memcheck/tests/clo_redzone_128.stderr.exp
new file mode 100644
index 0000000..08b360d
--- /dev/null
+++ b/memcheck/tests/clo_redzone_128.stderr.exp
@@ -0,0 +1,7 @@
+b1 0x........ b2 0x........
+Invalid write of size 1
+   ...
+ Address 0x........ is 69 bytes after a block of size 128 alloc'd
+   at 0x........: malloc (vg_replace_malloc.c:...)
+   ...
+
diff --git a/memcheck/tests/clo_redzone_128.vgtest b/memcheck/tests/clo_redzone_128.vgtest
new file mode 100644
index 0000000..6b7b2a6
--- /dev/null
+++ b/memcheck/tests/clo_redzone_128.vgtest
@@ -0,0 +1,2 @@
+vgopts: --leak-check=no -q --redzone-size=128
+prog: clo_redzone
diff --git a/memcheck/tests/clo_redzone_default.stderr.exp b/memcheck/tests/clo_redzone_default.stderr.exp
new file mode 100644
index 0000000..f86f233
--- /dev/null
+++ b/memcheck/tests/clo_redzone_default.stderr.exp
@@ -0,0 +1 @@
+b1 0x........ b2 0x........
diff --git a/memcheck/tests/clo_redzone_default.vgtest b/memcheck/tests/clo_redzone_default.vgtest
new file mode 100644
index 0000000..fc63752
--- /dev/null
+++ b/memcheck/tests/clo_redzone_default.vgtest
@@ -0,0 +1,2 @@
+vgopts: --leak-check=no -q
+prog: clo_redzone
diff --git a/memcheck/tests/x86-linux/scalar.stderr.exp b/memcheck/tests/x86-linux/scalar.stderr.exp
index 364e6e7..54616db 100644
--- a/memcheck/tests/x86-linux/scalar.stderr.exp
+++ b/memcheck/tests/x86-linux/scalar.stderr.exp
@@ -2116,7 +2116,9 @@
 Syscall param rt_sigaction(act->sa_mask) points to unaddressable byte(s)
    ...
    by 0x........: main (scalar.c:776)
- Address 0x........ is not stack'd, malloc'd or (recently) free'd
+ Address 0x........ is 16 bytes after a block of size 4 alloc'd
+   at 0x........: malloc (vg_replace_malloc.c:...)
+   by 0x........: main (scalar.c:30)
 
 Syscall param rt_sigaction(act->sa_flags) points to unaddressable byte(s)
    ...