Added VALGRIND_MALLOCLIKE_BLOCK and VALGRIND_FREELIKE_BLOCK which allow you to
use a custom-allocator and detect almost as many errors as you could detect if
you used malloc/new/new[].  (eg. leaks detected, free errors, free mismatch,
etc).

Had to fiddle with mac_malloc_wrappers.c a bit to factor out the appropriate
code to be called from the client request handling code.  Also had to add a
new element `MAC_AllocCustom' to the MAC_AllocKind type.

Also added a little documentation, and a regression test.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1643 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/docs/coregrind_core.html b/coregrind/docs/coregrind_core.html
index 2ce6162..edf4d49 100644
--- a/coregrind/docs/coregrind_core.html
+++ b/coregrind/docs/coregrind_core.html
@@ -882,6 +882,18 @@
     were leaked, dubious, reachable and suppressed.  Again, useful for
     test harness code.
 <p>
+<li><code>VALGRIND_MALLOCLIKE_BLOCK</code>: If your program manages its own
+    memory instead of using the standard
+    <code>malloc()</code>/<code>new</code>/<code>new[]</code>, Memcheck will
+    not detect nearly as many errors, and the error messages won't be as
+    informative.  To improve this situation, use this macro just after your
+    custom allocator allocates some new memory.  See the comments in
+    <code>memcheck/memcheck.h</code> for information on how to use it.
+<p>
+<li><code>VALGRIND_FREELIKE_BLOCK</code>: This should be used in conjunction 
+    with <code>VALGRIND_MALLOCLIKE_BLOCK</code>.  Again, see
+    <code>memcheck/memcheck.h</code> for information on how to use it.  
+<p>
 </ul>
 <p>
 
diff --git a/memcheck/mac_malloc_wrappers.c b/memcheck/mac_malloc_wrappers.c
index 5d22cce..239f2c1 100644
--- a/memcheck/mac_malloc_wrappers.c
+++ b/memcheck/mac_malloc_wrappers.c
@@ -115,13 +115,9 @@
    return NULL;
 }
 
-/* Allocate a user-chunk of size bytes.  Also allocate its shadow
-   block, make the shadow block point at the user block.  Put the
-   shadow chunk on the appropriate list, and set all memory
-   protections correctly. */
-
-static void add_MAC_Chunk ( ThreadState* tst,
-                            Addr p, UInt size, MAC_AllocKind kind )
+/* Allocate its shadow chunk, put it on the appropriate list. */
+static
+void add_MAC_Chunk ( ThreadState* tst, Addr p, UInt size, MAC_AllocKind kind )
 {
    MAC_Chunk* mc;
 
@@ -145,28 +141,22 @@
 void (*MAC_(copy_mem_heap))( Addr from, Addr to, UInt len );
 
 /* Allocate memory and note change in memory available */
-static __inline__
-void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
-                          Bool is_zeroed, MAC_AllocKind kind )
+__inline__
+void MAC_(new_block) ( ThreadState* tst, Addr p, UInt size,
+                        UInt rzB, Bool is_zeroed, MAC_AllocKind kind )
 {
-   Addr p;
-
    VGP_PUSHCC(VgpCliMalloc);
 
    cmalloc_n_mallocs ++;
    cmalloc_bs_mallocd += size;
 
-   p = (Addr)VG_(cli_malloc)(alignment, size);
+   add_MAC_Chunk( tst, p, size, kind );
 
-   add_MAC_Chunk ( tst, p, size, kind );
-
-   MAC_(ban_mem_heap)( p-VG_(vg_malloc_redzone_szB), 
-                         VG_(vg_malloc_redzone_szB) );
+   MAC_(ban_mem_heap)( p-rzB, rzB );
    MAC_(new_mem_heap)( p, size, is_zeroed );
-   MAC_(ban_mem_heap)( p+size, VG_(vg_malloc_redzone_szB) );
+   MAC_(ban_mem_heap)( p+size, rzB );
 
    VGP_POPCC(VgpCliMalloc);
-   return (void*)p;
 }
 
 void* SK_(malloc) ( ThreadState* tst, Int n )
@@ -175,8 +165,10 @@
       VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
       return NULL;
    } else {
-      return alloc_and_new_mem ( tst, n, VG_(clo_alignment), 
-                                 /*is_zeroed*/False, MAC_AllocMalloc );
+      Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
+      MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
+                        /*is_zeroed*/False, MAC_AllocMalloc );
+      return (void*)p;
    }
 }
 
@@ -186,8 +178,10 @@
       VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
       return NULL;
    } else {
-      return alloc_and_new_mem ( tst, n, VG_(clo_alignment), 
-                                 /*is_zeroed*/False, MAC_AllocNew );
+      Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
+      MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
+                        /*is_zeroed*/False, MAC_AllocNew );
+      return (void*)p;
    }
 }
 
@@ -198,8 +192,10 @@
                    "Warning: silly arg (%d) to __builtin_vec_new()", n );
       return NULL;
    } else {
-      return alloc_and_new_mem ( tst, n, VG_(clo_alignment), 
-                                 /*is_zeroed*/False, MAC_AllocNewVec );
+      Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
+      MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
+                        /*is_zeroed*/False, MAC_AllocNewVec );
+      return (void*)p;
    }
 }
 
@@ -209,41 +205,42 @@
       VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
       return NULL;
    } else {
-      return alloc_and_new_mem ( tst, n, align, /*is_zeroed*/False, 
-                                 MAC_AllocMalloc );
+      Addr p = (Addr)VG_(cli_malloc)( align, n );
+      MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
+                        /*is_zeroed*/False, MAC_AllocMalloc );
+      return (void*)p;
    }
 }
 
 void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
 {
-   void* p;
-   Int   size, i;
+   Int   n, i;
 
-   size = nmemb * size1;
+   n = nmemb * size1;
 
    if (nmemb < 0 || size1 < 0) {
       VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
                                nmemb, size1 );
       return NULL;
    } else {
-      p = alloc_and_new_mem ( tst, size, VG_(clo_alignment), 
-                              /*is_zeroed*/True, MAC_AllocMalloc );
-      for (i = 0; i < size; i++) 
+      Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
+      MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
+                        /*is_zeroed*/True, MAC_AllocMalloc );
+      for (i = 0; i < n; i++) 
          ((UChar*)p)[i] = 0;
-      return p;
+      return (void*)p;
    }
 }
 
 static
 void die_and_free_mem ( ThreadState* tst, MAC_Chunk* mc,
-                        MAC_Chunk** prev_chunks_next_ptr )
+                        MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
 {
    /* Note: ban redzones again -- just in case user de-banned them
       with a client request... */
-   MAC_(ban_mem_heap)( mc->data-VG_(vg_malloc_redzone_szB), 
-                                VG_(vg_malloc_redzone_szB) );
+   MAC_(ban_mem_heap)( mc->data-rzB, rzB );
    MAC_(die_mem_heap)( mc->data, mc->size );
-   MAC_(ban_mem_heap)( mc->data+mc->size, VG_(vg_malloc_redzone_szB) );
+   MAC_(ban_mem_heap)( mc->data+mc->size, rzB );
 
    /* Remove mc from the malloclist using prev_chunks_next_ptr to
       avoid repeating the hash table lookup.  Can't remove until at least
@@ -254,13 +251,17 @@
    /* Record where freed */
    mc->where = VG_(get_ExeContext) ( tst );
 
-   /* Put it out of harm's way for a while. */
-   add_to_freed_queue ( mc );
+   /* Put it out of harm's way for a while, if not from a client request */
+   if (MAC_AllocCustom != mc->allockind)
+      add_to_freed_queue ( mc );
+   else
+      VG_(free) ( mc );
 }
 
 
-static __inline__
-void handle_free ( ThreadState* tst, void* p, MAC_AllocKind kind )
+__inline__
+void MAC_(handle_free) ( ThreadState* tst, Addr p, UInt rzB,
+                         MAC_AllocKind kind )
 {
    MAC_Chunk*  mc;
    MAC_Chunk** prev_chunks_next_ptr;
@@ -271,35 +272,34 @@
 
    mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
                                        (VgHashNode***)&prev_chunks_next_ptr );
-
    if (mc == NULL) {
-      MAC_(record_free_error) ( tst, (Addr)p );
+      MAC_(record_free_error) ( tst, p );
       VGP_POPCC(VgpCliMalloc);
       return;
    }
 
    /* check if its a matching free() / delete / delete [] */
    if (kind != mc->allockind) {
-      MAC_(record_freemismatch_error) ( tst, (Addr)p );
+      MAC_(record_freemismatch_error) ( tst, p );
    }
 
-   die_and_free_mem ( tst, mc, prev_chunks_next_ptr );
+   die_and_free_mem ( tst, mc, prev_chunks_next_ptr, rzB );
    VGP_POPCC(VgpCliMalloc);
 }
 
 void SK_(free) ( ThreadState* tst, void* p )
 {
-   handle_free(tst, p, MAC_AllocMalloc);
+   MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
 }
 
 void SK_(__builtin_delete) ( ThreadState* tst, void* p )
 {
-   handle_free(tst, p, MAC_AllocNew);
+   MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
 }
 
 void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
 {
-   handle_free(tst, p, MAC_AllocNewVec);
+   MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
 }
 
 void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
@@ -370,7 +370,8 @@
          ((UChar*)p_new)[i] = ((UChar*)p)[i];
 
       /* Free old memory */
-      die_and_free_mem ( tst, mc, prev_chunks_next_ptr );
+      die_and_free_mem ( tst, mc, prev_chunks_next_ptr,
+                         VG_(vg_malloc_redzone_szB) );
 
       /* this has to be after die_and_free_mem, otherwise the
          former succeeds in shorting out the new block, not the
diff --git a/memcheck/mac_needs.c b/memcheck/mac_needs.c
index 874a6da..b48765c 100644
--- a/memcheck/mac_needs.c
+++ b/memcheck/mac_needs.c
@@ -277,7 +277,7 @@
 
    switch (VG_(get_error_kind)(err)) {
       case FreeErr:
-         VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
+         VG_(message)(Vg_UserMsg, "Invalid free() / delete / delete[]");
          /* fall through */
       case FreeMismatchErr:
          if (VG_(get_error_kind)(err) == FreeMismatchErr)
@@ -773,17 +773,34 @@
 Bool MAC_(handle_common_client_requests)(ThreadState* tst, UInt* arg,
                                          UInt* ret )
 {
-   UInt** argp = (UInt**)arg;
+   UInt* argv = (UInt*)arg;
    
    switch (arg[0]) {
-   case VG_USERREQ__COUNT_LEAKS: /* count leaked bytes */
+   case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
+      UInt** argp = (UInt**)arg;
       *argp[1] = MAC_(total_bytes_leaked);
       *argp[2] = MAC_(total_bytes_dubious);
       *argp[3] = MAC_(total_bytes_reachable);
       *argp[4] = MAC_(total_bytes_suppressed);
       *ret = 0;
       return True;
+   }
+   case VG_USERREQ__MALLOCLIKE_BLOCK: {
+      Addr p         = (Addr)argv[1];
+      UInt sizeB     =       argv[2];
+      UInt rzB       =       argv[3];
+      Bool is_zeroed = (Bool)argv[4];
 
+      MAC_(new_block) ( tst, p, sizeB, rzB, is_zeroed, MAC_AllocCustom );
+      return True;
+   }
+   case VG_USERREQ__FREELIKE_BLOCK: {
+      Addr p         = (Addr)argv[1];
+      UInt rzB       =       argv[2];
+
+      MAC_(handle_free) ( tst, p, rzB, MAC_AllocCustom );
+      return True;
+   }
    default:
       return False;
    }
diff --git a/memcheck/mac_shared.h b/memcheck/mac_shared.h
index 302d9ff..ff1cea3 100644
--- a/memcheck/mac_shared.h
+++ b/memcheck/mac_shared.h
@@ -128,7 +128,8 @@
    enum {
       MAC_AllocMalloc = 0,
       MAC_AllocNew    = 1,
-      MAC_AllocNewVec = 2
+      MAC_AllocNewVec = 2,
+      MAC_AllocCustom = 3
    }
    MAC_AllocKind;
    
@@ -285,6 +286,12 @@
 
 extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
 
+extern void MAC_(new_block) ( ThreadState* tst, Addr p, UInt size,
+                              UInt rzB, Bool is_zeroed,
+                              MAC_AllocKind kind );
+extern void MAC_(handle_free) ( ThreadState* tst, Addr p, UInt rzB,
+                                MAC_AllocKind kind );
+
 extern void MAC_(record_address_error)     ( Addr a, Int size, Bool isWrite );
 extern void MAC_(record_core_mem_error)    ( ThreadState* tst, Bool isWrite,
                                              Char* s );
diff --git a/memcheck/memcheck.h b/memcheck/memcheck.h
index f598beb..8a38cce 100644
--- a/memcheck/memcheck.h
+++ b/memcheck/memcheck.h
@@ -80,7 +80,9 @@
       VG_USERREQ__CHECK_WRITABLE,
       VG_USERREQ__CHECK_READABLE,
       VG_USERREQ__DO_LEAK_CHECK,
-      VG_USERREQ__COUNT_LEAKS
+      VG_USERREQ__COUNT_LEAKS,
+      VG_USERREQ__MALLOCLIKE_BLOCK,
+      VG_USERREQ__FREELIKE_BLOCK,
    } Vg_MemCheckClientRequest;
 
 
@@ -170,6 +172,44 @@
       (volatile unsigned char *)&(__lvalue),                       \
                       (unsigned int)(sizeof (__lvalue)))
 
+/* Mark a block of memory as having been allocated by a malloc()-like
+   function.  `addr' is the start of the usable block (ie. after any
+   redzone) `rzB' is redzone size if the allocator can apply redzones;
+   use '0' if not.  Adding redzones makes it more likely Valgrind will spot
+   block overruns.  `is_zeroed' indicates if the memory is zeroed, as it is
+   for calloc().  Put it immediately after the point where a block is
+   allocated. 
+   
+   If you're allocating memory via superblocks, and then handing out small
+   chunks of each superblock, if you don't have redzones on your small
+   blocks, it's worth marking the superblock with VALGRIND_MAKE_NOACCESS
+   when it's created, so that block overruns are detected.  But if you can
+   put redzones on, it's probably better to not do this, so that messages
+   for small overruns are described in terms of the small block rather than
+   the superblock (but if you have a big overrun that skips over a redzone,
+   you could miss an error this way).  See memcheck/tests/custom_alloc.c
+   for an example.
+
+   Nb: block must be freed via a free()-like function specified
+   with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)     \
+   {unsigned int _qzz_res;                                         \
+    VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0,                           \
+                            VG_USERREQ__MALLOCLIKE_BLOCK,          \
+                            addr, sizeB, rzB, is_zeroed);          \
+   }
+
+/* Mark a block of memory as having been freed by a free()-like function.
+   `rzB' is redzone size;  it must match that given to
+   VALGRIND_MALLOCLIKE_BLOCK.  Memory not freed will be detected by the leak
+   checker.  Put it immediately after the point where the block is freed. */
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)                         \
+   {unsigned int _qzz_res;                                         \
+    VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0,                           \
+                            VG_USERREQ__FREELIKE_BLOCK,            \
+                            addr, rzB, 0, 0);                      \
+   }
+
 /* Do a memory leak check mid-execution.  */
 #define VALGRIND_DO_LEAK_CHECK                                     \
    {unsigned int _qzz_res;                                         \
diff --git a/memcheck/tests/Makefile.am b/memcheck/tests/Makefile.am
index dfd9f8a..1704e04 100644
--- a/memcheck/tests/Makefile.am
+++ b/memcheck/tests/Makefile.am
@@ -24,6 +24,7 @@
 	buflen_check.stderr.exp buflen_check.vgtest \
 	clientperm.stderr.exp \
 	clientperm.stdout.exp clientperm.vgtest \
+	custom_alloc.stderr.exp custom_alloc.vgtest \
 	doublefree.stderr.exp doublefree.vgtest \
 	error_counts.stderr.exp error_counts.stdout.exp error_counts.vgtest \
 	errs1.stderr.exp errs1.vgtest \
@@ -63,6 +64,7 @@
 
 check_PROGRAMS = \
 	badaddrvalue badfree badjump badloop buflen_check clientperm \
+	custom_alloc \
 	doublefree error_counts errs1 exitprog fprw fwrite inits inline \
 	malloc1 malloc2 malloc3 manuel1 manuel2 manuel3 \
 	memalign_test memcmptest mmaptest nanoleak null_socket \
@@ -82,6 +84,7 @@
 badloop_SOURCES 	= badloop.c
 buflen_check_SOURCES	= buflen_check.c
 clientperm_SOURCES 	= clientperm.c
+custom_alloc_SOURCES 	= custom_alloc.c
 doublefree_SOURCES 	= doublefree.c
 error_counts_SOURCES 	= error_counts.c
 errs1_SOURCES 		= errs1.c
diff --git a/memcheck/tests/custom_alloc.c b/memcheck/tests/custom_alloc.c
new file mode 100644
index 0000000..0a6ec8b
--- /dev/null
+++ b/memcheck/tests/custom_alloc.c
@@ -0,0 +1,96 @@
+#include <unistd.h>
+#include <sys/mman.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "../memcheck.h"
+
+#define SUPERBLOCK_SIZE    100000
+
+//-------------------------------------------------------------------------
+// Allocator
+//-------------------------------------------------------------------------
+
+void* get_superblock(void)
+{
+   void* p = mmap( 0, SUPERBLOCK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC,
+                   MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
+
+   assert(p != ((void*)(-1)));
+
+   // Mark it no access;  although it's addressible we don't want the 
+   // program to be using it unless its handed out by custom_alloc()
+
+   // with redzones, better not to have it
+   VALGRIND_MAKE_NOACCESS(p, SUPERBLOCK_SIZE);
+
+   return p;
+}
+
+// has a redzone
+static void* custom_alloc(int size)
+{
+#define RZ  8
+   static void* hp     = 0;    // current heap pointer
+   static void* hp_lim = 0;    // maximum usable byte in current block
+   int          size2  = size + RZ*2;
+   void*        p;
+
+   if (hp + size2 > hp_lim) {
+      hp = get_superblock();
+      hp_lim = hp + SUPERBLOCK_SIZE - 1;
+   }  
+
+   p = hp + RZ;
+   hp += size2;
+
+   VALGRIND_MALLOCLIKE_BLOCK( p, size, RZ, /*is_zeroed*/1 );
+   return (void*)p;
+}     
+
+static void custom_free(void* p)
+{
+   // don't actually free any memory... but mark it as freed
+   VALGRIND_FREELIKE_BLOCK( p, RZ );
+}
+#undef RZ
+
+
+
+//-------------------------------------------------------------------------
+// Rest
+//-------------------------------------------------------------------------
+
+void make_leak(void)
+{
+   int* array2 = custom_alloc(sizeof(int) * 10);
+   array2 = 0;          // leak
+   return;
+}
+
+int main(void)
+{
+   int* array;
+   int* array3;
+
+   array = custom_alloc(sizeof(int) * 10);
+   array[8]  = 8;
+   array[9]  = 8;
+   array[10] = 10;      // invalid write (ok w/o MALLOCLIKE -- in superblock)
+
+   custom_free(array);  // ok
+
+   custom_free(NULL);   // invalid free (ok without MALLOCLIKE)
+
+   array3 = malloc(sizeof(int) * 10);
+   custom_free(array3); // mismatched free (ok without MALLOCLIKE)
+
+   make_leak();
+   return array[0];     // use after free (ok without MALLOCLIKE/MAKE_NOACCESS)
+                        // (nb: initialised because is_zeroed==1 above)
+                        // unfortunately not identified as being in a free'd
+                        // block because the freeing of the block and shadow
+                        // chunk isn't postponed.
+   
+   // leak from make_leak()
+}
diff --git a/memcheck/tests/custom_alloc.stderr.exp b/memcheck/tests/custom_alloc.stderr.exp
new file mode 100644
index 0000000..9ce0280
--- /dev/null
+++ b/memcheck/tests/custom_alloc.stderr.exp
@@ -0,0 +1,44 @@
+
+Invalid write of size 4
+   at 0x........: main (custom_alloc.c:79)
+   by 0x........: __libc_start_main (...libc...)
+   by 0x........: ...
+   Address 0x........ is 48 bytes inside a block of size 100000 client-defined
+   at 0x........: get_superblock (custom_alloc.c:25)
+   by 0x........: custom_alloc (custom_alloc.c:40)
+   by 0x........: main (custom_alloc.c:76)
+   by 0x........: __libc_start_main (...libc...)
+
+Invalid free() / delete / delete[]
+   at 0x........: custom_free (custom_alloc.c:54)
+   by 0x........: main (custom_alloc.c:83)
+   by 0x........: __libc_start_main (...libc...)
+   by 0x........: ...
+   Address 0x........ is not stack'd, malloc'd or free'd
+
+Mismatched free() / delete / delete []
+   at 0x........: custom_free (custom_alloc.c:54)
+   by 0x........: main (custom_alloc.c:86)
+   by 0x........: __libc_start_main (...libc...)
+   by 0x........: ...
+   Address 0x........ is 0 bytes inside a block of size 40 alloc'd
+   at 0x........: malloc (vg_replace_malloc.c:...)
+   by 0x........: main (custom_alloc.c:85)
+   by 0x........: __libc_start_main (...libc...)
+   by 0x........: ...
+
+Invalid read of size 4
+   at 0x........: main (custom_alloc.c:89)
+   by 0x........: __libc_start_main (...libc...)
+   by 0x........: ...
+   Address 0x........ is 8 bytes inside a block of size 100000 client-defined
+   at 0x........: get_superblock (custom_alloc.c:25)
+   by 0x........: custom_alloc (custom_alloc.c:40)
+   by 0x........: main (custom_alloc.c:76)
+   by 0x........: __libc_start_main (...libc...)
+
+ERROR SUMMARY: 4 errors from 4 contexts (suppressed: 0 from 0)
+malloc/free: in use at exit: 40 bytes in 1 blocks.
+malloc/free: 3 allocs, 3 frees, 120 bytes allocated.
+For a detailed leak analysis,  rerun with: --leak-check=yes
+For counts of detected errors, rerun with: -v
diff --git a/memcheck/tests/custom_alloc.vgtest b/memcheck/tests/custom_alloc.vgtest
new file mode 100644
index 0000000..2b8a141
--- /dev/null
+++ b/memcheck/tests/custom_alloc.vgtest
@@ -0,0 +1 @@
+prog: custom_alloc