Some newer systems have a new version of pthreadtypes.h that comes from glibc
2.3.3, which is binary-compatible, but not source compatible with the old one,
which came from LinuxThreads.  We were using the types defined in the old one,
which caused compilation errors on systems using the new one.

This commit introduces our own versions of these types.  Our versions are laid
out identically to the LinuxThreads ones, but the field names are different.
We convert all pthread types to our versions before using them, so we don't
rely on the pthreadtypes.h types any more.  Hopefully this will fix the
problem;  I have three reports that it does.  Let's see...


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2272 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h
index a764362..49b47ec 100644
--- a/coregrind/vg_include.h
+++ b/coregrind/vg_include.h
@@ -644,6 +644,41 @@
 
 
 /* ---------------------------------------------------------------------
+   Exports of vg_libpthread.c
+   ------------------------------------------------------------------ */
+
+/* Replacements for pthread types, shared between vg_libpthread.c and
+   vg_scheduler.c.  See comment in vg_libpthread.c above the other
+   vg_pthread_*_t types for a description of how these are used. */
+
+struct _vg_pthread_fastlock
+{
+   long int __vg_status;   /* "Free" or "taken" or head of waiting list */
+   int __vg_spinlock;      /* Used by compare_and_swap emulation. Also,
+                           adaptive SMP lock stores spin count here. */
+};
+
+typedef struct
+{
+   int __vg_m_reserved;               /* Reserved for future use */
+   int __vg_m_count;                  /* Depth of recursive locking */
+   /*_pthread_descr*/ void* __vg_m_owner;       /* Owner thread (if recursive or errcheck) */
+   int __vg_m_kind;                   /* Mutex kind: fast, recursive or errcheck */
+   struct _vg_pthread_fastlock __vg_m_lock; /* Underlying fast lock */
+}  vg_pthread_mutex_t;
+
+typedef struct
+{
+  struct _vg_pthread_fastlock __vg_c_lock; /* Protect against concurrent access */
+  /*_pthread_descr*/ void* __vg_c_waiting;        /* Threads waiting on this condition */
+  // Padding ensures the size is 48 bytes
+  char __vg_padding[48 - sizeof(struct _vg_pthread_fastlock)
+         - sizeof(void*) - sizeof(long long)];
+  long long __vg_align;
+} vg_pthread_cond_t;
+
+
+/* ---------------------------------------------------------------------
    Exports of vg_scheduler.c
    ------------------------------------------------------------------ */
 
@@ -702,7 +737,7 @@
       When .status == WaitCV, points to the mutex associated with
       the condition variable indicated by the .associated_cv field.
       In all other cases, should be NULL. */
-   void* /*pthread_mutex_t* */ associated_mx;
+   vg_pthread_mutex_t* associated_mx;
 
    /* When .status == WaitCV, points to the condition variable I am
       waiting for.  In all other cases, should be NULL. */
@@ -981,6 +1016,7 @@
    out what's happening. */
 #define VG_PTHREAD_PREHISTORY		0x80000000
 
+
 /* ---------------------------------------------------------------------
    Exports of vg_signals.c
    ------------------------------------------------------------------ */
diff --git a/coregrind/vg_libpthread.c b/coregrind/vg_libpthread.c
index 02d1a61..0f79e9f 100644
--- a/coregrind/vg_libpthread.c
+++ b/coregrind/vg_libpthread.c
@@ -82,6 +82,88 @@
 
 
 /* ---------------------------------------------------------------------
+   Our own definition of types that vary between LinuxThreads and NPTL.
+   ------------------------------------------------------------------ */
+
+/* Moving from LinuxThreads to NPTL, several crucial types (eg.
+   pthread_mutex_t, pthread_mutexattr_t, etc) in pthreadtypes.h were changed
+   in binary-compatible, but source-incompatible, ways.  We can similarly
+   use any layout we want, so long as it's binary-compatible.  However, we
+   can no longer use the LinuxThreads types, because they won't work on NPTL
+   systems.  Thus, we have to introduce a layer of indirection, and define
+   our own versions of these types (vg_pthread_mutex_t, etc).  NPTL does
+   pretty much the same thing, and it keeps many of its internal types
+   secret.
+
+   We can layout our types however we want, as long as we put the small
+   number of fields in the right place for binary compatibility (eg.
+   mutex->kind).  To make life easy, our versions have the exact same layout
+   as the LinuxThreads ones;  only the type names and field names are
+   different (they differ only by include "vg" at the start).
+
+   In our implementation of the pthread operations (pthread_mutex_lock(),
+   pthread_mutexattr_settype(), etc) we always cast the standard pthread
+   types to our own types, (eg. pthread_mutex_t --> vg_pthread_mutex_t),
+   before working with them.
+    
+   Note that we have various mutexes (and condvars) in this file that have the
+   type pthread_mutex_t (and pthread_cond_t).  That is fine, because they
+   are always only handled by calling the standard pthread functions (eg.
+   pthread_mutex_lock()) on them.  Phew.
+
+   WARNING: as a result of all this, we should *never* access these standard
+   pthread types as is;  they *must* be converted to the vg_pthread_foo_t
+   equivalent.   It would be nice if this was enforced...  (but compilation
+   on NPTL-only systems should fail if this rule isn't followed...?)
+*/
+
+#include <sched.h>   // for 'struct __sched_param'
+
+typedef struct __vg_pthread_attr_s
+{
+   int __vg_detachstate;
+   int __vg_schedpolicy;
+   struct __sched_param __vg_schedparam;
+   int __vg_inheritsched;
+   int __vg_scope;
+   size_t __vg_guardsize;
+   int __vg_stackaddr_set;
+   void *__vg_stackaddr;
+   size_t __vg_stacksize;
+} vg_pthread_attr_t;
+
+typedef struct
+{
+   int __vg_mutexkind;
+} vg_pthread_mutexattr_t;
+
+typedef struct _vg_pthread_rwlock_t
+{
+   struct _vg_pthread_fastlock __vg_rw_lock; /* Lock to guarantee mutual exclusion */
+   int __vg_rw_readers;                   /* Number of readers */
+   /*_pthread_descr*/ void* __vg_rw_writer;         /* Identity of writer, or NULL if none */
+   /*_pthread_descr*/ void* __vg_rw_read_waiting;   /* Threads waiting for reading */
+   /*_pthread_descr*/ void* __vg_rw_write_waiting;  /* Threads waiting for writing */
+   int __vg_rw_kind;                      /* Reader/Writer preference selection */
+   int __vg_rw_pshared;                   /* Shared between processes or not */
+} vg_pthread_rwlock_t;
+
+typedef struct
+{
+   int __vg_lockkind;
+   int __vg_pshared;
+} vg_pthread_rwlockattr_t;
+
+/* Converting pthread types to vg_pthread types.  We always check that the
+   passed-in type is as big as ours, for safety.  We also zero the pointer
+   to the original struct, to ensure we don't accidentally use it again. */
+
+#define CONVERT(foo, x, vg_x) \
+   my_assert(sizeof(*x) >= sizeof(vg_pthread_##foo##_t)); \
+   vg_x = (vg_pthread_##foo##_t*)x; \
+   x = 0;  // ensure we don't accidentally use x again!
+
+/* ---------------------------------------------------------------------
    Forwardses.
    ------------------------------------------------------------------ */
 
@@ -301,28 +383,38 @@
 
 int pthread_attr_init(pthread_attr_t *attr)
 {
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+   
    /* Just initialise the fields which we might look at. */
-   attr->__detachstate = PTHREAD_CREATE_JOINABLE;
+   vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
    /* Linuxthreads sets this field to the value __getpagesize(), so I
       guess the following is OK. */
-   attr->__guardsize = VKI_BYTES_PER_PAGE;   return 0;
+   vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+   return 0;
 }
 
 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
 {
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+
    if (detachstate != PTHREAD_CREATE_JOINABLE 
        && detachstate != PTHREAD_CREATE_DETACHED) {
       pthread_error("pthread_attr_setdetachstate: "
                     "detachstate is invalid");
       return EINVAL;
    }
-   attr->__detachstate = detachstate;
+   vg_attr->__vg_detachstate = detachstate;
    return 0;
 }
 
 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
 {
-   *detachstate = attr->__detachstate;
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+
+   *detachstate = vg_attr->__vg_detachstate;
    return 0;
 }
 
@@ -340,6 +432,7 @@
 {
    size_t limit;
    char buf[1024];
+
    ensure_valgrind("pthread_attr_setstacksize");
    limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB 
                                  - 1000; /* paranoia */
@@ -412,25 +505,28 @@
 {
    int    detached;
    size_t limit;
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+
    ensure_valgrind("pthread_getattr_np");
    kludged("pthread_getattr_np", NULL);
    limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB 
                                  - 1000; /* paranoia */
-   attr->__detachstate = PTHREAD_CREATE_JOINABLE;
-   attr->__schedpolicy = SCHED_OTHER;
-   attr->__schedparam.sched_priority = 0;
-   attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
-   attr->__scope = PTHREAD_SCOPE_SYSTEM;
-   attr->__guardsize = VKI_BYTES_PER_PAGE;
-   attr->__stackaddr = NULL;
-   attr->__stackaddr_set = 0;
-   attr->__stacksize = limit;
+   vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
+   vg_attr->__vg_schedpolicy = SCHED_OTHER;
+   vg_attr->__vg_schedparam.sched_priority = 0;
+   vg_attr->__vg_inheritsched = PTHREAD_EXPLICIT_SCHED;
+   vg_attr->__vg_scope = PTHREAD_SCOPE_SYSTEM;
+   vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+   vg_attr->__vg_stackaddr = NULL;
+   vg_attr->__vg_stackaddr_set = 0;
+   vg_attr->__vg_stacksize = limit;
    VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
                            VG_USERREQ__SET_OR_GET_DETACH, 
                            2 /* get */, thread, 0, 0);
    my_assert(detached == 0 || detached == 1);
    if (detached)
-      attr->__detachstate = PTHREAD_CREATE_DETACHED;
+      vg_attr->__vg_detachstate = PTHREAD_CREATE_DETACHED;
    return 0;
 }
 
@@ -463,16 +559,20 @@
 
 int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
 {
-  if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
-    return EINVAL;
-  attr->__schedpolicy = policy;
-  return 0;
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+   if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
+      return EINVAL;
+   vg_attr->__vg_schedpolicy = policy;
+   return 0;
 }
 
 int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
 {
-  *policy = attr->__schedpolicy;
-  return 0;
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+   *policy = vg_attr->__vg_schedpolicy;
+   return 0;
 }
 
 
@@ -498,7 +598,9 @@
 WEAK 
 int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
 {
-   *guardsize = attr->__guardsize;
+   vg_pthread_attr_t* vg_attr;
+   CONVERT(attr, attr, vg_attr);
+   *guardsize = vg_attr->__vg_guardsize;
    return 0;
 }  
 
@@ -785,6 +887,8 @@
    int            tid_child;
    NewThreadInfo* info;
    int            gs;
+   vg_pthread_attr_t* __vg_attr;
+   CONVERT(attr, __attr, __vg_attr);
 
    ensure_valgrind("pthread_create");
 
@@ -797,8 +901,8 @@
    info = my_malloc(sizeof(NewThreadInfo));
    my_assert(info != NULL);
 
-   if (__attr)
-      info->attr__detachstate = __attr->__detachstate;
+   if (__vg_attr)
+      info->attr__detachstate = __vg_attr->__vg_detachstate;
    else 
       info->attr__detachstate = PTHREAD_CREATE_JOINABLE;
 
@@ -983,12 +1087,17 @@
 
 int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {
-   attr->__mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
+   vg_pthread_mutexattr_t* vg_attr;
+   CONVERT(mutexattr, attr, vg_attr);
+   vg_attr->__vg_mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
    return 0;
 }
 
 int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
 {
+   vg_pthread_mutexattr_t* vg_attr;
+   CONVERT(mutexattr, attr, vg_attr);
+
    switch (type) {
 #     ifndef GLIBC_2_1    
       case PTHREAD_MUTEX_TIMED_NP:
@@ -999,7 +1108,7 @@
 #     endif
       case PTHREAD_MUTEX_RECURSIVE_NP:
       case PTHREAD_MUTEX_ERRORCHECK_NP:
-         attr->__mutexkind = type;
+         vg_attr->__vg_mutexkind = type;
          return 0;
       default:
          pthread_error("pthread_mutexattr_settype: "
@@ -1033,11 +1142,16 @@
 int __pthread_mutex_init(pthread_mutex_t *mutex, 
                          const  pthread_mutexattr_t *mutexattr)
 {
-   mutex->__m_count = 0;
-   mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
-   mutex->__m_kind  = PTHREAD_MUTEX_ERRORCHECK_NP;
-   if (mutexattr)
-      mutex->__m_kind = mutexattr->__mutexkind;
+   vg_pthread_mutex_t* vg_mutex;
+   vg_pthread_mutexattr_t* vg_mutexattr;
+   CONVERT(mutex, mutex, vg_mutex); 
+   CONVERT(mutexattr, mutexattr, vg_mutexattr);
+   
+   vg_mutex->__vg_m_count = 0;
+   vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+   vg_mutex->__vg_m_kind  = PTHREAD_MUTEX_ERRORCHECK_NP;
+   if (vg_mutexattr)
+      vg_mutex->__vg_m_kind = vg_mutexattr->__vg_mutexkind;
    return 0;
 }
 
@@ -1045,19 +1159,21 @@
 int __pthread_mutex_lock(pthread_mutex_t *mutex)
 {
    int res;
-
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
+   
    if (RUNNING_ON_VALGRIND) {
       VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
                               VG_USERREQ__PTHREAD_MUTEX_LOCK,
-                              mutex, 0, 0, 0);
+                              vg_mutex, 0, 0, 0);
       return res;
    } else {
       /* Play at locking */
       if (0)
 	 kludged("prehistoric lock", NULL);
-      mutex->__m_owner = (_pthread_descr)1;
-      mutex->__m_count = 1;
-      mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+      vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+      vg_mutex->__vg_m_count = 1;
+      vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
       return 0; /* success */
    }
 }
@@ -1066,19 +1182,21 @@
 int __pthread_mutex_trylock(pthread_mutex_t *mutex)
 {
    int res;
-
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
+   
    if (RUNNING_ON_VALGRIND) {
       VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
                               VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
-                              mutex, 0, 0, 0);
+                              vg_mutex, 0, 0, 0);
       return res;
    } else {
       /* Play at locking */
       if (0)
 	 kludged("prehistoric trylock", NULL);
-      mutex->__m_owner = (_pthread_descr)1;
-      mutex->__m_count = 1;
-      mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+      vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+      vg_mutex->__vg_m_count = 1;
+      vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
       return 0; /* success */
    }
 }
@@ -1087,19 +1205,21 @@
 int __pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
    int res;
-
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
+   
    if (RUNNING_ON_VALGRIND) {
       VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
                               VG_USERREQ__PTHREAD_MUTEX_UNLOCK,
-                              mutex, 0, 0, 0);
+                              vg_mutex, 0, 0, 0);
       return res;
    } else {
       /* Play at locking */
       if (0)
 	 kludged("prehistoric unlock", NULL);
-      mutex->__m_owner = 0;
-      mutex->__m_count = 0;
-      mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
+      vg_mutex->__vg_m_owner = 0;
+      vg_mutex->__vg_m_count = 0;
+      vg_mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
       return 0; /* success */
    }
 }
@@ -1107,20 +1227,22 @@
 
 int __pthread_mutex_destroy(pthread_mutex_t *mutex)
 {
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
+
    /* Valgrind doesn't hold any resources on behalf of the mutex, so no
       need to involve it. */
-   if (mutex->__m_count > 0) {
+   if (vg_mutex->__vg_m_count > 0) {
       /* Oh, the horror.  glibc's internal use of pthreads "knows"
 	 that destroying a lock does an implicit unlock.  Make it
 	 explicit. */
-      __pthread_mutex_unlock(mutex);
-      pthread_error("pthread_mutex_destroy: "
-		    "mutex is still in use");
+      __pthread_mutex_unlock( (pthread_mutex_t*)vg_mutex );
+      pthread_error("pthread_mutex_destroy: mutex is still in use");
       return EBUSY;
    }
-   mutex->__m_count = 0;
-   mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
-   mutex->__m_kind  = PTHREAD_MUTEX_ERRORCHECK_NP;
+   vg_mutex->__vg_m_count = 0;
+   vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+   vg_mutex->__vg_m_kind  = PTHREAD_MUTEX_ERRORCHECK_NP;
    return 0;
 }
 
@@ -1144,7 +1266,9 @@
 int pthread_cond_init( pthread_cond_t *cond,
 		       const pthread_condattr_t *cond_attr)
 {
-   cond->__c_waiting = (_pthread_descr)VG_INVALID_THREADID;
+   vg_pthread_cond_t* vg_cond;
+   CONVERT(cond, cond, vg_cond);
+   vg_cond->__vg_c_waiting = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
    return 0;
 }
 
@@ -1192,10 +1316,13 @@
 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
    int res;
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
+
    ensure_valgrind("pthread_cond_wait");
    VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
                            VG_USERREQ__PTHREAD_COND_WAIT,
-			   cond, mutex, 0, 0);
+			   cond, vg_mutex, 0, 0);
    return res;
 }
 
@@ -1208,6 +1335,8 @@
    struct  timeval timeval_now;
    unsigned long long int ull_ms_now_after_1970;
    unsigned long long int ull_ms_end_after_1970;
+   vg_pthread_mutex_t* vg_mutex;
+   CONVERT(mutex, mutex, vg_mutex);
 
    ensure_valgrind("pthread_cond_timedwait");
    VALGRIND_MAGIC_SEQUENCE(ms_now, 0xFFFFFFFF /* default */,
@@ -1229,7 +1358,7 @@
       = ms_now + (unsigned int)(ull_ms_end_after_1970 - ull_ms_now_after_1970);
    VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
                            VG_USERREQ__PTHREAD_COND_TIMEDWAIT,
-			   cond, mutex, ms_end, 0);
+			   cond, vg_mutex, ms_end, 0);
    return res;
 }
 
@@ -2657,14 +2786,16 @@
 /* Take the address of a LinuxThreads rwlock_t and return the shadow
    address of our version.  Further, if the LinuxThreads version
    appears to have been statically initialised, do the same to the one
-   we allocate here.  The pthread_rwlock_t.__rw_readers field is set
-   to zero by PTHREAD_RWLOCK_INITIALIZER, so we take zero as meaning
-   uninitialised and non-zero meaning initialised. 
+   we allocate here.  The vg_pthread_rwlock_t.__vg_rw_readers field is set
+   to zero by PTHREAD_RWLOCK_INITIALIZER (as are several other fields), so
+   we take zero as meaning uninitialised and non-zero meaning initialised. 
 */
 static vg_rwlock_t* rw_remap ( pthread_rwlock_t* orig )
 {
    int          res, i;
    vg_rwlock_t* vg_rwl;
+   vg_pthread_rwlock_t* vg_orig;
+   
    res = __pthread_mutex_lock(&rw_remap_mx);
    my_assert(res == 0);
 
@@ -2688,10 +2819,11 @@
    vg_rwl = &rw_remap_new[i];
 
    /* Initialise the shadow, if required. */
-   if (orig->__rw_readers == 0) {
-      orig->__rw_readers = 1;
+   CONVERT(rwlock, orig, vg_orig);
+   if (vg_orig->__vg_rw_readers == 0) {
+      vg_orig->__vg_rw_readers = 1;
       init_vg_rwlock(vg_rwl);
-      if (orig->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
+      if (vg_orig->__vg_rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
          vg_rwl->prefer_w = 0;
    }
 
@@ -2703,14 +2835,19 @@
                           const pthread_rwlockattr_t* attr )
 {
    vg_rwlock_t* rwl;
+   vg_pthread_rwlock_t* vg_orig;
+   vg_pthread_rwlockattr_t* vg_attr;
+   CONVERT(rwlock, orig, vg_orig);
+   CONVERT(rwlockattr, attr, vg_attr);
+
    if (0) printf ("pthread_rwlock_init\n");
    /* Force the remapper to initialise the shadow. */
-   orig->__rw_readers = 0;
+   vg_orig->__vg_rw_readers = 0;
    /* Install the lock preference; the remapper needs to know it. */
-   orig->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
-   if (attr)
-      orig->__rw_kind = attr->__lockkind;
-   rwl = rw_remap ( orig );
+   vg_orig->__vg_rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
+   if (vg_attr)
+      vg_orig->__vg_rw_kind = vg_attr->__vg_lockkind;
+   rwl = rw_remap ( (pthread_rwlock_t*)vg_orig );
    return 0;
 }
 
@@ -2728,6 +2865,7 @@
 {
    int res;
    vg_rwlock_t* rwl;
+
    if (0) printf ("pthread_rwlock_rdlock\n");
    rwl = rw_remap ( orig );
    res = __pthread_mutex_lock(&rwl->mx);
@@ -2761,6 +2899,7 @@
 {
    int res;
    vg_rwlock_t* rwl;
+
    if (0) printf ("pthread_rwlock_tryrdlock\n");
    rwl = rw_remap ( orig );
    res = __pthread_mutex_lock(&rwl->mx);
@@ -2798,6 +2937,7 @@
 {
    int res;
    vg_rwlock_t* rwl;
+
    if (0) printf ("pthread_rwlock_wrlock\n");
    rwl = rw_remap ( orig );
    res = __pthread_mutex_lock(&rwl->mx);
@@ -2959,10 +3099,11 @@
 int
 pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
 {
-  attr->__lockkind = 0;
-  attr->__pshared = PTHREAD_PROCESS_PRIVATE;
-
-  return 0;
+   vg_pthread_rwlockattr_t* vg_attr;
+   CONVERT(rwlockattr, attr, vg_attr);
+   vg_attr->__vg_lockkind = 0;
+   vg_attr->__vg_pshared = PTHREAD_PROCESS_PRIVATE;
+   return 0;
 }
 
 /* Copied directly from LinuxThreads. */
@@ -2976,16 +3117,19 @@
 int
 pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
 {
-  if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
-    return EINVAL;
+   vg_pthread_rwlockattr_t* vg_attr;
+   CONVERT(rwlockattr, attr, vg_attr);
 
-  /* For now it is not possible to shared a conditional variable.  */
-  if (pshared != PTHREAD_PROCESS_PRIVATE)
-    return ENOSYS;
+   if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
+      return EINVAL;
 
-  attr->__pshared = pshared;
+   /* For now it is not possible to shared a conditional variable.  */
+   if (pshared != PTHREAD_PROCESS_PRIVATE)
+      return ENOSYS;
 
-  return 0;
+   vg_attr->__vg_pshared = pshared;
+
+   return 0;
 }
 
 
diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c
index 75ad420..0d47ef6 100644
--- a/coregrind/vg_scheduler.c
+++ b/coregrind/vg_scheduler.c
@@ -1931,39 +1931,26 @@
    MUTEXes
    -------------------------------------------------------- */
 
-/* pthread_mutex_t is a struct with at 5 words:
-      typedef struct
-      {
-        int __m_reserved;         -- Reserved for future use
-        int __m_count;            -- Depth of recursive locking
-        _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
-        int __m_kind;      -- Mutex kind: fast, recursive or errcheck
-        struct _pthread_fastlock __m_lock;  -- Underlying fast lock
-      } pthread_mutex_t;
+/* vg_pthread_mutex_t is defined in vg_include.h.
 
-   #define PTHREAD_MUTEX_INITIALIZER \
-     {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
-   # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
-     {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
-   # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
-     {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
-   # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
-     {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
+   The initializers zero everything, except possibly the fourth word,
+   which in vg_pthread_mutex_t is the __vg_m_kind field.  It gets set to one
+   of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
 
    How we use it:
 
-   __m_kind  never changes and indicates whether or not it is recursive.
+   __vg_m_kind  never changes and indicates whether or not it is recursive.
 
-   __m_count indicates the lock count; if 0, the mutex is not owned by 
+   __vg_m_count indicates the lock count; if 0, the mutex is not owned by 
              anybody.  
 
-   __m_owner has a ThreadId value stuffed into it.  We carefully arrange 
+   __vg_m_owner has a ThreadId value stuffed into it.  We carefully arrange 
              that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
              statically initialised mutexes correctly appear 
              to belong to nobody.
 
-   In summary, a not-in-use mutex is distinguised by having __m_owner
-   == 0 (VG_INVALID_THREADID) and __m_count == 0 too.  If one of those
+   In summary, a not-in-use mutex is distinguised by having __vg_m_owner
+   == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too.  If one of those
    conditions holds, the other should too.
 
    There is no linked list of threads waiting for this mutex.  Instead
@@ -1977,7 +1964,7 @@
 
 /* Helper fns ... */
 static
-void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex, 
+void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex, 
                                            Char* caller )
 {
    Int  i;
@@ -1993,19 +1980,19 @@
          break;
    }
 
-   VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex );
+   VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
 
    vg_assert(i <= VG_N_THREADS);
    if (i == VG_N_THREADS) {
       /* Nobody else is waiting on it. */
-      mutex->__m_count = 0;
-      mutex->__m_owner = VG_INVALID_THREADID;
+      mutex->__vg_m_count = 0;
+      mutex->__vg_m_owner = VG_INVALID_THREADID;
    } else {
       /* Notionally transfer the hold to thread i, whose
          pthread_mutex_lock() call now returns with 0 (success). */
       /* The .count is already == 1. */
       vg_assert(VG_(threads)[i].associated_mx == mutex);
-      mutex->__m_owner = (_pthread_descr)i;
+      mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
       VG_(threads)[i].status        = VgTs_Runnable;
       VG_(threads)[i].associated_mx = NULL;
       /* m_edx already holds pth_mx_lock() success (0) */
@@ -2024,7 +2011,7 @@
 static
 void do_pthread_mutex_lock( ThreadId tid, 
                             Bool is_trylock, 
-                            pthread_mutex_t* mutex )
+                            vg_pthread_mutex_t* mutex )
 {
    Char  msg_buf[100];
    Char* caller
@@ -2049,7 +2036,7 @@
    }
 
    /* More paranoia ... */
-   switch (mutex->__m_kind) {
+   switch (mutex->__vg_m_kind) {
 #     ifndef GLIBC_2_1
       case PTHREAD_MUTEX_TIMED_NP:
       case PTHREAD_MUTEX_ADAPTIVE_NP:
@@ -2059,7 +2046,7 @@
 #     endif
       case PTHREAD_MUTEX_RECURSIVE_NP:
       case PTHREAD_MUTEX_ERRORCHECK_NP:
-         if (mutex->__m_count >= 0) break;
+         if (mutex->__vg_m_count >= 0) break;
          /* else fall thru */
       default:
          VG_(record_pthread_error)( tid, 
@@ -2068,8 +2055,8 @@
          return;
    }
 
-   if (mutex->__m_count > 0) {
-      if (!VG_(is_valid_tid)((ThreadId)mutex->__m_owner)) {
+   if (mutex->__vg_m_count > 0) {
+      if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
          VG_(record_pthread_error)( tid, 
             "pthread_mutex_lock/trylock: mutex has invalid owner");
          SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
@@ -2077,15 +2064,15 @@
       }	 
 
       /* Someone has it already. */
-      if ((ThreadId)mutex->__m_owner == tid) {
+      if ((ThreadId)mutex->__vg_m_owner == tid) {
          /* It's locked -- by me! */
-         if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
+         if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
             /* return 0 (success). */
-            mutex->__m_count++;
+            mutex->__vg_m_count++;
             SET_PTHREQ_RETVAL(tid, 0);
             if (0)
                VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n", 
-                           tid, mutex, mutex->__m_count);
+                           tid, mutex, mutex->__vg_m_count);
             return;
          } else {
             if (is_trylock)
@@ -2097,7 +2084,7 @@
       } else {
          /* Someone else has it; we have to wait.  Mark ourselves
             thusly. */
-         /* GUARD: __m_count > 0 && __m_owner is valid */
+         /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
          if (is_trylock) {
             /* caller is polling; so return immediately. */
             SET_PTHREQ_RETVAL(tid, EBUSY);
@@ -2118,13 +2105,13 @@
 
    } else {
       /* Nobody owns it.  Sanity check ... */
-      vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
+      vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
 
       VG_TRACK ( pre_mutex_lock, tid, mutex );
 
       /* We get it! [for the first time]. */
-      mutex->__m_count = 1;
-      mutex->__m_owner = (_pthread_descr)tid;
+      mutex->__vg_m_count = 1;
+      mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
 
       /* return 0 (success). */
       SET_PTHREQ_RETVAL(tid, 0);
@@ -2136,7 +2123,7 @@
 
 static
 void do_pthread_mutex_unlock ( ThreadId tid,
-                               pthread_mutex_t* mutex )
+                               vg_pthread_mutex_t* mutex )
 {
    Char msg_buf[100];
 
@@ -2158,14 +2145,14 @@
 
    /* If this was locked before the dawn of time, pretend it was
       locked now so that it balances with unlocks */
-   if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) {
-      mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
-      VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
-      VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
+   if (mutex->__vg_m_kind & VG_PTHREAD_PREHISTORY) {
+      mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
+      VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
+      VG_TRACK( post_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
    }
 
    /* More paranoia ... */
-   switch (mutex->__m_kind) {
+   switch (mutex->__vg_m_kind) {
 #     ifndef GLIBC_2_1    
       case PTHREAD_MUTEX_TIMED_NP:
       case PTHREAD_MUTEX_ADAPTIVE_NP:
@@ -2175,7 +2162,7 @@
 #     endif
       case PTHREAD_MUTEX_RECURSIVE_NP:
       case PTHREAD_MUTEX_ERRORCHECK_NP:
-         if (mutex->__m_count >= 0) break;
+         if (mutex->__vg_m_count >= 0) break;
          /* else fall thru */
       default:
          VG_(record_pthread_error)( tid, 
@@ -2185,7 +2172,7 @@
    }
 
    /* Barf if we don't currently hold the mutex. */
-   if (mutex->__m_count == 0) {
+   if (mutex->__vg_m_count == 0) {
       /* nobody holds it */
       VG_(record_pthread_error)( tid, 
          "pthread_mutex_unlock: mutex is not locked");
@@ -2193,7 +2180,7 @@
       return;
    }
 
-   if ((ThreadId)mutex->__m_owner != tid) {
+   if ((ThreadId)mutex->__vg_m_owner != tid) {
       /* we don't hold it */
       VG_(record_pthread_error)( tid, 
          "pthread_mutex_unlock: mutex is locked by a different thread");
@@ -2203,17 +2190,17 @@
 
    /* If it's a multiply-locked recursive mutex, just decrement the
       lock count and return. */
-   if (mutex->__m_count > 1) {
-      vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
-      mutex->__m_count --;
+   if (mutex->__vg_m_count > 1) {
+      vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
+      mutex->__vg_m_count --;
       SET_PTHREQ_RETVAL(tid, 0); /* success */
       return;
    }
 
    /* Now we're sure it is locked exactly once, and by the thread who
       is now doing an unlock on it.  */
-   vg_assert(mutex->__m_count == 1);
-   vg_assert((ThreadId)mutex->__m_owner == tid);
+   vg_assert(mutex->__vg_m_count == 1);
+   vg_assert((ThreadId)mutex->__vg_m_owner == tid);
 
    /* Release at max one thread waiting on this mutex. */
    release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
@@ -2227,26 +2214,11 @@
    CONDITION VARIABLES
    -------------------------------------------------------- */
 
-/* The relevant native types are as follows:
-   (copied from /usr/include/bits/pthreadtypes.h)
+/* The relevant type (vg_pthread_cond_t) is in vg_include.h.
 
-   -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
-   typedef struct
-   {
-     struct _pthread_fastlock __c_lock; -- Protect against concurrent access
-     _pthread_descr __c_waiting;        -- Threads waiting on this condition
-   } pthread_cond_t;
-
-   -- Attribute for conditionally variables.
-   typedef struct
-   {
-     int __dummy;
-   } pthread_condattr_t;
-
-   #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
-
-   We don't use any fields of pthread_cond_t for anything at all.
-   Only the identity of the CVs is important.
+   We don't use any fields of vg_pthread_cond_t for anything at all.
+   Only the identity of the CVs is important.  (Actually, we initialise
+   __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
 
    Linux pthreads supports no attributes on condition variables, so we
    don't need to think too hard there.  */
@@ -2256,8 +2228,8 @@
 void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
 {
    Char             msg_buf[100];
-   pthread_mutex_t* mx;
-   pthread_cond_t*  cv;
+   vg_pthread_mutex_t* mx;
+   vg_pthread_cond_t*  cv;
 
    vg_assert(VG_(is_valid_tid)(tid) 
              && VG_(threads)[tid].status == VgTs_WaitCV
@@ -2267,27 +2239,27 @@
    cv = VG_(threads)[tid].associated_cv;
    vg_assert(cv != NULL);
 
-   if (mx->__m_owner == VG_INVALID_THREADID) {
+   if (mx->__vg_m_owner == VG_INVALID_THREADID) {
       /* Currently unheld; hand it out to thread tid. */
-      vg_assert(mx->__m_count == 0);
+      vg_assert(mx->__vg_m_count == 0);
       VG_(threads)[tid].status        = VgTs_Runnable;
       SET_PTHREQ_RETVAL(tid, ETIMEDOUT);  /* pthread_cond_wait return value */
       VG_(threads)[tid].associated_cv = NULL;
       VG_(threads)[tid].associated_mx = NULL;
-      mx->__m_owner = (_pthread_descr)tid;
-      mx->__m_count = 1;
+      mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
+      mx->__vg_m_count = 1;
 
       VG_TRACK( post_mutex_lock, tid, mx );
 
       if (VG_(clo_trace_pthread_level) >= 1) {
          VG_(sprintf)(msg_buf, 
-            "pthread_cond_timedwai cv %p: TIMEOUT with mx %p", 
+            "pthread_cond_timedwait cv %p: TIMEOUT with mx %p", 
             cv, mx );
          print_pthread_event(tid, msg_buf);
       }
    } else {
       /* Currently held.  Make thread tid be blocked on it. */
-      vg_assert(mx->__m_count > 0);
+      vg_assert(mx->__vg_m_count > 0);
       VG_TRACK( pre_mutex_lock, tid, mx );
 
       VG_(threads)[tid].status        = VgTs_WaitMX;
@@ -2296,7 +2268,7 @@
       VG_(threads)[tid].associated_mx = mx;
       if (VG_(clo_trace_pthread_level) >= 1) {
          VG_(sprintf)(msg_buf, 
-            "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p", 
+            "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p", 
             cv, mx );
          print_pthread_event(tid, msg_buf);
       }
@@ -2305,13 +2277,13 @@
 
 
 static
-void release_N_threads_waiting_on_cond ( pthread_cond_t* cond, 
+void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond, 
                                          Int n_to_release, 
                                          Char* caller )
 {
    Int              i;
    Char             msg_buf[100];
-   pthread_mutex_t* mx;
+   vg_pthread_mutex_t* mx;
 
    while (True) {
       if (n_to_release == 0)
@@ -2337,14 +2309,14 @@
 
       VG_TRACK( pre_mutex_lock, i, mx );
 
-      if (mx->__m_owner == VG_INVALID_THREADID) {
+      if (mx->__vg_m_owner == VG_INVALID_THREADID) {
          /* Currently unheld; hand it out to thread i. */
-         vg_assert(mx->__m_count == 0);
+         vg_assert(mx->__vg_m_count == 0);
          VG_(threads)[i].status        = VgTs_Runnable;
          VG_(threads)[i].associated_cv = NULL;
          VG_(threads)[i].associated_mx = NULL;
-         mx->__m_owner = (_pthread_descr)i;
-         mx->__m_count = 1;
+         mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
+         mx->__vg_m_count = 1;
          /* .m_edx already holds pth_cond_wait success value (0) */
 
 	 VG_TRACK( post_mutex_lock, i, mx );
@@ -2357,7 +2329,7 @@
 
       } else {
          /* Currently held.  Make thread i be blocked on it. */
-         vg_assert(mx->__m_count > 0);
+         vg_assert(mx->__vg_m_count > 0);
          VG_(threads)[i].status        = VgTs_WaitMX;
          VG_(threads)[i].associated_cv = NULL;
          VG_(threads)[i].associated_mx = mx;
@@ -2378,8 +2350,8 @@
 
 static
 void do_pthread_cond_wait ( ThreadId tid,
-                            pthread_cond_t *cond, 
-                            pthread_mutex_t *mutex,
+                            vg_pthread_cond_t *cond, 
+                            vg_pthread_mutex_t *mutex,
 			    UInt ms_end )
 {
    Char msg_buf[100];
@@ -2406,7 +2378,7 @@
    }
 
    /* More paranoia ... */
-   switch (mutex->__m_kind) {
+   switch (mutex->__vg_m_kind) {
 #     ifndef GLIBC_2_1    
       case PTHREAD_MUTEX_TIMED_NP:
       case PTHREAD_MUTEX_ADAPTIVE_NP:
@@ -2416,7 +2388,7 @@
 #     endif
       case PTHREAD_MUTEX_RECURSIVE_NP:
       case PTHREAD_MUTEX_ERRORCHECK_NP:
-         if (mutex->__m_count >= 0) break;
+         if (mutex->__vg_m_count >= 0) break;
          /* else fall thru */
       default:
          VG_(record_pthread_error)( tid, 
@@ -2426,8 +2398,8 @@
    }
 
    /* Barf if we don't currently hold the mutex. */
-   if (mutex->__m_count == 0 /* nobody holds it */
-       || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
+   if (mutex->__vg_m_count == 0 /* nobody holds it */
+       || (ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
          VG_(record_pthread_error)( tid, 
             "pthread_cond_wait/timedwait: mutex is unlocked "
             "or is locked but not owned by thread");
@@ -2458,7 +2430,7 @@
 static
 void do_pthread_cond_signal_or_broadcast ( ThreadId tid, 
                                            Bool broadcast,
-                                           pthread_cond_t *cond )
+                                           vg_pthread_cond_t *cond )
 {
    Char  msg_buf[100];
    Char* caller 
@@ -3010,15 +2982,15 @@
 
       case VG_USERREQ__PTHREAD_COND_WAIT:
          do_pthread_cond_wait( tid, 
-                               (pthread_cond_t *)(arg[1]),
-                               (pthread_mutex_t *)(arg[2]),
+                               (vg_pthread_cond_t *)(arg[1]),
+                               (vg_pthread_mutex_t *)(arg[2]),
                                0xFFFFFFFF /* no timeout */ );
          break;
 
       case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
          do_pthread_cond_wait( tid, 
-                               (pthread_cond_t *)(arg[1]),
-                               (pthread_mutex_t *)(arg[2]),
+                               (vg_pthread_cond_t *)(arg[1]),
+                               (vg_pthread_mutex_t *)(arg[2]),
                                arg[3] /* timeout millisecond point */ );
          break;
 
@@ -3026,14 +2998,14 @@
          do_pthread_cond_signal_or_broadcast( 
             tid, 
 	    False, /* signal, not broadcast */
-            (pthread_cond_t *)(arg[1]) );
+            (vg_pthread_cond_t *)(arg[1]) );
          break;
 
       case VG_USERREQ__PTHREAD_COND_BROADCAST:
          do_pthread_cond_signal_or_broadcast( 
             tid, 
 	    True, /* broadcast, not signal */
-            (pthread_cond_t *)(arg[1]) );
+            (vg_pthread_cond_t *)(arg[1]) );
          break;
 
       case VG_USERREQ__PTHREAD_KEY_VALIDATE:
@@ -3276,8 +3248,8 @@
 static
 void scheduler_sanity ( void )
 {
-   pthread_mutex_t* mx;
-   pthread_cond_t*  cv;
+   vg_pthread_mutex_t* mx;
+   vg_pthread_cond_t*  cv;
    Int              i;
    struct timeout*  top;
    UInt		    lasttime = 0;
@@ -3316,9 +3288,9 @@
             Possibly to do with signals. */
          vg_assert(cv == NULL);
          /* 1 */ vg_assert(mx != NULL);
-	 /* 2 */ vg_assert(mx->__m_count > 0);
-         /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
-         /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner); 
+	 /* 2 */ vg_assert(mx->__vg_m_count > 0);
+         /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
+         /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner); 
       } else 
       if (VG_(threads)[i].status == VgTs_WaitCV) {
          vg_assert(cv != NULL);