Add memcheck client requests VALGRIND_GET_VBITS / VALGRIND_SET_VBITS
for fetching/setting metadata so that it can be sent between
unconnected address spaces (or whatever).


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1718 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mc_clientreqs.c b/memcheck/mc_clientreqs.c
index 58c9b0e..d99cfa7 100644
--- a/memcheck/mc_clientreqs.c
+++ b/memcheck/mc_clientreqs.c
@@ -214,6 +214,22 @@
 	 *ret = 0;
 	 break;
 
+      case VG_USERREQ__GET_VBITS:
+         /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
+            error. */
+         /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
+         *ret = MC_(get_or_set_vbits_for_client)
+                   ( arg[1], arg[2], arg[3], False /* get them */ );
+         break;
+
+      case VG_USERREQ__SET_VBITS:
+         /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
+            error. */
+         /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
+         *ret = MC_(get_or_set_vbits_for_client)
+                   ( arg[1], arg[2], arg[3], True /* set them */ );
+         break;
+
       default:
          if (MAC_(handle_common_client_requests)(tst, arg, ret )) {
             return True;
diff --git a/memcheck/mc_include.h b/memcheck/mc_include.h
index 136548f..b0a9864 100644
--- a/memcheck/mc_include.h
+++ b/memcheck/mc_include.h
@@ -143,6 +143,12 @@
 
 extern void MC_(detect_memory_leaks) ( void );
 
+extern Int  MC_(get_or_set_vbits_for_client) ( 
+               Addr dataV, 
+               Addr vbitsV, 
+               UInt size, 
+               Bool setting /* True <=> set vbits,  False <=> get vbits */ 
+            );
 
 /* Functions defined in vg_memcheck_clientreqs.c */
 extern Bool MC_(client_perm_maybe_describe)( Addr a, AddrInfo* ai );
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 1a9f4d6..38a6ae9 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -255,6 +255,21 @@
 }
 
 
+static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
+{
+   SecMap* sm;
+   UInt    sm_off;
+   ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
+   sm     = primary_map[a >> 16];
+   sm_off = a & 0xFFFF;
+   PROF_EVENT(23);
+#  ifdef VG_DEBUG_MEMORY
+   sk_assert(IS_ALIGNED4_ADDR(a));
+#  endif
+   ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
+}
+
+
 /*------------------------------------------------------------*/
 /*--- Setting permissions over address ranges.             ---*/
 /*------------------------------------------------------------*/
@@ -1311,6 +1326,76 @@
 
 
 /*------------------------------------------------------------*/
+/*--- Metadata get/set functions, for client requests.     ---*/
+/*------------------------------------------------------------*/
+
+/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
+   error, 3 == addressing error. */
+Int MC_(get_or_set_vbits_for_client) ( 
+   Addr dataV, 
+   Addr vbitsV, 
+   UInt size, 
+   Bool setting /* True <=> set vbits,  False <=> get vbits */ 
+)
+{
+   Bool addressibleD = True;
+   Bool addressibleV = True;
+   UInt* data  = (UInt*)dataV;
+   UInt* vbits = (UInt*)vbitsV;
+   UInt  szW   = size / 4; /* sigh */
+   UInt  i;
+
+   /* Check alignment of args. */
+   if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
+      return 2;
+   if ((size & 3) != 0)
+      return 2;
+  
+   /* Check that arrays are addressible. */
+   for (i = 0; i < szW; i++) {
+      UInt* dataP  = &data[i];
+      UInt* vbitsP = &vbits[i];
+      if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
+         addressibleD = False;
+         break;
+      }
+      if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
+         addressibleV = False;
+         break;
+      }
+   }
+   if (!addressibleD) {
+      MAC_(record_address_error)( dataV, size, 
+                                  setting ? True : False );
+      return 3;
+   }
+   if (!addressibleV) {
+      MAC_(record_address_error)( vbitsV, size, 
+                                  setting ? False : True );
+      return 3;
+   }
+ 
+   /* Do the copy */
+   if (setting) {
+      /* setting */
+      for (i = 0; i < szW; i++) {
+         if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
+            MC_(record_value_error)(4);
+         set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
+      }
+   } else {
+      /* getting */
+      for (i = 0; i < szW; i++) {
+         vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
+         set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
+      }
+   }
+
+   return 1;
+}
+
+
+/*------------------------------------------------------------*/
 /*--- Detecting leaked (unreachable) malloc'd blocks.      ---*/
 /*------------------------------------------------------------*/
 
diff --git a/memcheck/memcheck.h b/memcheck/memcheck.h
index 8a38cce..20cccfe 100644
--- a/memcheck/memcheck.h
+++ b/memcheck/memcheck.h
@@ -83,6 +83,8 @@
       VG_USERREQ__COUNT_LEAKS,
       VG_USERREQ__MALLOCLIKE_BLOCK,
       VG_USERREQ__FREELIKE_BLOCK,
+      VG_USERREQ__GET_VBITS,
+      VG_USERREQ__SET_VBITS
    } Vg_MemCheckClientRequest;
 
 
@@ -228,3 +230,44 @@
    }
 
 #endif
+
+
+/* Get in zzvbits the validity data for the zznbytes starting at
+   zzsrc.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   if zzsrc/zzvbits arrays are not aligned 0 % 4, or
+          zznbytes is not 0 % 4.
+      3   if any parts of zzsrc/zzvbits are not addressible.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zzsrc,zzvbits,zznbytes)               \
+   (__extension__({unsigned int _qzz_res;                        \
+    char* czzsrc   = (char*)zzsrc;                               \
+    char* czzvbits = (char*)zzvbits;                             \
+    VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0,                         \
+                            VG_USERREQ__GET_VBITS,               \
+                            czzsrc, czzvbits, zznbytes,0 );      \
+    _qzz_res;                                                    \
+   }))
+
+/* Apply the validity data in zzvbits to the zznbytes starting at
+   zzdst.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   if zzdst/zzvbits arrays are not aligned 0 % 4, or
+          zznbytes is not 0 % 4.
+      3   if any parts of zzdst/zzvbits are not addressible.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zzdst,zzvbits,zznbytes)               \
+   (__extension__({unsigned int _qzz_res;                        \
+    char* czzdst   = (char*)zzdst;                               \
+    char* czzvbits = (char*)zzvbits;                             \
+    VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0,                         \
+                            VG_USERREQ__SET_VBITS,               \
+                            czzdst, czzvbits, zznbytes,0 );      \
+    _qzz_res;                                                    \
+   }))