Made indentation in the DRD source code uniform: indentation size is now two spaces in all soure files.

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@7684 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/exp-drd/drd_bitmap.c b/exp-drd/drd_bitmap.c
index 5131ebc..d56062f 100644
--- a/exp-drd/drd_bitmap.c
+++ b/exp-drd/drd_bitmap.c
@@ -51,26 +51,26 @@
 
 struct bitmap* bm_new()
 {
-   struct bitmap* bm;
+  struct bitmap* bm;
 
-   // If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD
-   // in drd_bitmap.h.
-   tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
+  // If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD
+  // in drd_bitmap.h.
+  tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
 
-   bm = VG_(malloc)(sizeof(*bm));
-   tl_assert(bm);
-   bm->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
+  bm = VG_(malloc)(sizeof(*bm));
+  tl_assert(bm);
+  bm->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
 
-   s_bitmap_creation_count++;
+  s_bitmap_creation_count++;
 
-   return bm;
+  return bm;
 }
 
 void bm_delete(struct bitmap* const bm)
 {
-   tl_assert(bm);
-   VG_(OSetGen_Destroy)(bm->oset);
-   VG_(free)(bm);
+  tl_assert(bm);
+  VG_(OSetGen_Destroy)(bm->oset);
+  VG_(free)(bm);
 }
 
 /**
@@ -82,210 +82,210 @@
                      const Addr a1, const Addr a2,
                      const BmAccessTypeT access_type)
 {
-   Addr b, b_next;
+  Addr b, b_next;
 
-   tl_assert(bm);
-   tl_assert(a1 < a2);
+  tl_assert(bm);
+  tl_assert(a1 < a2);
 
-   for (b = a1; b < a2; b = b_next)
-   {
-      Addr b_start;
-      Addr b_end;
-      struct bitmap2* bm2;
-      SPLIT_ADDRESS(b);
+  for (b = a1; b < a2; b = b_next)
+  {
+    Addr b_start;
+    Addr b_end;
+    struct bitmap2* bm2;
+    SPLIT_ADDRESS(b);
 
-      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-      if (b_next > a2)
-      {
-         b_next = a2;
-      }
+    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+    if (b_next > a2)
+    {
+      b_next = a2;
+    }
 
-      bm2 = bm2_lookup_or_insert(bm, b1);
-      tl_assert(bm2);
+    bm2 = bm2_lookup_or_insert(bm, b1);
+    tl_assert(bm2);
 
-      if ((bm2->addr << ADDR0_BITS) < a1)
-         b_start = a1;
+    if ((bm2->addr << ADDR0_BITS) < a1)
+      b_start = a1;
+    else
+      if ((bm2->addr << ADDR0_BITS) < a2)
+        b_start = (bm2->addr << ADDR0_BITS);
       else
-         if ((bm2->addr << ADDR0_BITS) < a2)
-            b_start = (bm2->addr << ADDR0_BITS);
-         else
-            break;
-      tl_assert(a1 <= b_start && b_start <= a2);
+        break;
+    tl_assert(a1 <= b_start && b_start <= a2);
 
-      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-         b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-      else
-         b_end = a2;
-      tl_assert(a1 <= b_end && b_end <= a2);
-      tl_assert(b_start < b_end);
-      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+    if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+      b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+    else
+      b_end = a2;
+    tl_assert(a1 <= b_end && b_end <= a2);
+    tl_assert(b_start < b_end);
+    tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
       
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+    for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+    {
+      if (access_type == eLoad)
       {
-         if (access_type == eLoad)
-         {
-            bm0_set(bm2->bm1.bm0_r, b0);
-         }
-         else
-         {
-            bm0_set(bm2->bm1.bm0_w, b0);
-         }
+        bm0_set(bm2->bm1.bm0_r, b0);
       }
-   }
+      else
+      {
+        bm0_set(bm2->bm1.bm0_w, b0);
+      }
+    }
+  }
 }
 
 static inline
 void bm_access_aligned_load(struct bitmap* const bm,
                             const Addr a1, const Addr a2)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
 #if 0
-   /* Commented out the statements below because of performance reasons. */
-   tl_assert(bm);
-   tl_assert(a1 < a2);
-   tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
-             || (a2 - a1) == 4 || (a2 - a1) == 8);
-   tl_assert((a1 & (a2 - a1 - 1)) == 0);
+  /* Commented out the statements below because of performance reasons. */
+  tl_assert(bm);
+  tl_assert(a1 < a2);
+  tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+            || (a2 - a1) == 4 || (a2 - a1) == 8);
+  tl_assert((a1 & (a2 - a1 - 1)) == 0);
 #endif
 
-   bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
-   tl_assert(bm2);
+  bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
+  tl_assert(bm2);
 
-   bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+  bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
 }
 
 static inline
 void bm_access_aligned_store(struct bitmap* const bm,
                              const Addr a1, const Addr a2)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
 #if 0
-   /* Commented out the statements below because of performance reasons. */
-   tl_assert(bm);
-   tl_assert(a1 < a2);
-   tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
-             || (a2 - a1) == 4 || (a2 - a1) == 8);
-   tl_assert((a1 & (a2 - a1 - 1)) == 0);
+  /* Commented out the statements below because of performance reasons. */
+  tl_assert(bm);
+  tl_assert(a1 < a2);
+  tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+            || (a2 - a1) == 4 || (a2 - a1) == 8);
+  tl_assert((a1 & (a2 - a1 - 1)) == 0);
 #endif
 
-   bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
-   tl_assert(bm2);
+  bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
+  tl_assert(bm2);
 
-   bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+  bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
 }
 
 void bm_access_range_load(struct bitmap* const bm,
                           const Addr a1, const Addr a2)
 {
-   bm_access_range(bm, a1, a2, eLoad);
+  bm_access_range(bm, a1, a2, eLoad);
 }
 
 void bm_access_load_1(struct bitmap* const bm, const Addr a1)
 {
-   bm_access_aligned_load(bm, a1, a1 + 1);
+  bm_access_aligned_load(bm, a1, a1 + 1);
 }
 
 void bm_access_load_2(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 1) == 0)
-      bm_access_aligned_load(bm, a1, a1 + 2);
-   else
-      bm_access_range(bm, a1, a1 + 2, eLoad);
+  if ((a1 & 1) == 0)
+    bm_access_aligned_load(bm, a1, a1 + 2);
+  else
+    bm_access_range(bm, a1, a1 + 2, eLoad);
 }
 
 void bm_access_load_4(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 3) == 0)
-      bm_access_aligned_load(bm, a1, a1 + 4);
-   else
-      bm_access_range(bm, a1, a1 + 4, eLoad);
+  if ((a1 & 3) == 0)
+    bm_access_aligned_load(bm, a1, a1 + 4);
+  else
+    bm_access_range(bm, a1, a1 + 4, eLoad);
 }
 
 void bm_access_load_8(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 7) == 0)
-      bm_access_aligned_load(bm, a1, a1 + 8);
-   else if ((a1 & 3) == 0)
-   {
-      bm_access_aligned_load(bm, a1 + 0, a1 + 4);
-      bm_access_aligned_load(bm, a1 + 4, a1 + 8);
-   }
-   else
-      bm_access_range(bm, a1, a1 + 8, eLoad);
+  if ((a1 & 7) == 0)
+    bm_access_aligned_load(bm, a1, a1 + 8);
+  else if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_load(bm, a1 + 0, a1 + 4);
+    bm_access_aligned_load(bm, a1 + 4, a1 + 8);
+  }
+  else
+    bm_access_range(bm, a1, a1 + 8, eLoad);
 }
 
 void bm_access_store_1(struct bitmap* const bm, const Addr a1)
 {
-   bm_access_aligned_store(bm, a1, a1 + 1);
+  bm_access_aligned_store(bm, a1, a1 + 1);
 }
 
 void bm_access_store_2(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 1) == 0)
-      bm_access_aligned_store(bm, a1, a1 + 2);
-   else
-      bm_access_range(bm, a1, a1 + 2, eStore);
+  if ((a1 & 1) == 0)
+    bm_access_aligned_store(bm, a1, a1 + 2);
+  else
+    bm_access_range(bm, a1, a1 + 2, eStore);
 }
 
 void bm_access_store_4(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 3) == 0)
-      bm_access_aligned_store(bm, a1, a1 + 4);
-   else
-      bm_access_range(bm, a1, a1 + 4, eStore);
+  if ((a1 & 3) == 0)
+    bm_access_aligned_store(bm, a1, a1 + 4);
+  else
+    bm_access_range(bm, a1, a1 + 4, eStore);
 }
 
 void bm_access_store_8(struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 7) == 0)
-      bm_access_aligned_store(bm, a1, a1 + 8);
-   else if ((a1 & 3) == 0)
-   {
-      bm_access_aligned_store(bm, a1 + 0, a1 + 4);
-      bm_access_aligned_store(bm, a1 + 4, a1 + 8);
-   }
-   else
-      bm_access_range(bm, a1, a1 + 8, eStore);
+  if ((a1 & 7) == 0)
+    bm_access_aligned_store(bm, a1, a1 + 8);
+  else if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_store(bm, a1 + 0, a1 + 4);
+    bm_access_aligned_store(bm, a1 + 4, a1 + 8);
+  }
+  else
+    bm_access_range(bm, a1, a1 + 8, eStore);
 }
 
 void bm_access_range_store(struct bitmap* const bm,
                            const Addr a1, const Addr a2)
 {
-   bm_access_range(bm, a1, a2, eStore);
+  bm_access_range(bm, a1, a2, eStore);
 }
 
 Bool bm_has(const struct bitmap* const bm, const Addr a1, const Addr a2,
             const BmAccessTypeT access_type)
 {
-   Addr b;
-   for (b = a1; b < a2; b++)
-   {
-      if (! bm_has_1(bm, b, access_type))
-      {
-         return False;
-      }
-   }
-   return True;
+  Addr b;
+  for (b = a1; b < a2; b++)
+  {
+    if (! bm_has_1(bm, b, access_type))
+    {
+      return False;
+    }
+  }
+  return True;
 }
 
 Bool bm_has_any(const struct bitmap* const bm,
                 const Addr a1, const Addr a2,
                 const BmAccessTypeT access_type)
 {
-   Addr b;
+  Addr b;
 
-   tl_assert(bm);
+  tl_assert(bm);
 
-   for (b = a1; b < a2; b++)
-   {
-      if (bm_has_1(bm, b, access_type))
-      {
-         return True;
-      }
-   }
-   return False;
+  for (b = a1; b < a2; b++)
+  {
+    if (bm_has_1(bm, b, access_type))
+    {
+      return True;
+    }
+  }
+  return False;
 }
 
 /* Return a non-zero value if there is a read access, write access or both */
@@ -294,56 +294,56 @@
                         const Addr a1,
                         const Addr a2)
 {
-   Addr b, b_next;
+  Addr b, b_next;
 
-   tl_assert(bm);
+  tl_assert(bm);
 
-   for (b = a1; b < a2; b = b_next)
-   {
-      struct bitmap2* bm2 = bm_lookup(bm, b);
+  for (b = a1; b < a2; b = b_next)
+  {
+    struct bitmap2* bm2 = bm_lookup(bm, b);
 
-      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-      if (b_next > a2)
-      {
-         b_next = a2;
-      }
+    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+    if (b_next > a2)
+    {
+      b_next = a2;
+    }
 
-      if (bm2)
-      {
-         Addr b_start;
-         Addr b_end;
-         UWord b0;
-         const struct bitmap1* const p1 = &bm2->bm1;
+    if (bm2)
+    {
+      Addr b_start;
+      Addr b_end;
+      UWord b0;
+      const struct bitmap1* const p1 = &bm2->bm1;
 
-         if ((bm2->addr << ADDR0_BITS) < a1)
-            b_start = a1;
-         else
-            if ((bm2->addr << ADDR0_BITS) < a2)
-               b_start = (bm2->addr << ADDR0_BITS);
-            else
-               break;
-         tl_assert(a1 <= b_start && b_start <= a2);
+      if ((bm2->addr << ADDR0_BITS) < a1)
+        b_start = a1;
+      else
+        if ((bm2->addr << ADDR0_BITS) < a2)
+          b_start = (bm2->addr << ADDR0_BITS);
+        else
+          break;
+      tl_assert(a1 <= b_start && b_start <= a2);
 
-         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-         else
-            b_end = a2;
-         tl_assert(a1 <= b_end && b_end <= a2);
-         tl_assert(b_start < b_end);
-         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+      else
+        b_end = a2;
+      tl_assert(a1 <= b_end && b_end <= a2);
+      tl_assert(b_start < b_end);
+      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
       
-         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
-         {
-            const UWord mask
-               = bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0);
-            if (mask)
-            {
-               return mask;
-            }
-         }
+      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      {
+        const UWord mask
+          = bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0);
+        if (mask)
+        {
+          return mask;
+        }
       }
-   }
-   return 0;
+    }
+  }
+  return 0;
 }
 
 /**
@@ -355,339 +355,339 @@
                const Addr a,
                const BmAccessTypeT access_type)
 {
-   struct bitmap2* p2;
-   struct bitmap1* p1;
-   UWord* p0;
-   const UWord a0 = a & ADDR0_MASK;
+  struct bitmap2* p2;
+  struct bitmap1* p1;
+  UWord* p0;
+  const UWord a0 = a & ADDR0_MASK;
 
-   tl_assert(bm);
+  tl_assert(bm);
 
-   p2 = bm_lookup(bm, a);
-   if (p2)
-   {
-      p1 = &p2->bm1;
-      p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
-      return bm0_is_set(p0, a0);
-   }
-   return 0;
+  p2 = bm_lookup(bm, a);
+  if (p2)
+  {
+    p1 = &p2->bm1;
+    p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
+    return bm0_is_set(p0, a0);
+  }
+  return 0;
 }
 
 static __inline__
 void bm1_clear(struct bitmap1* const bm1, const Addr a1, const Addr a2)
 {
-   UWord idx;
-   UWord mask;
+  UWord idx;
+  UWord mask;
 
 #if 0
-   /* Commented out the statements below because of performance reasons. */
-   tl_assert(a1);
-   tl_assert(a1 <= a2);
-   tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2)
-             || UWORD_MSB(a1) == UWORD_MSB(a2 - 1));
+  /* Commented out the statements below because of performance reasons. */
+  tl_assert(a1);
+  tl_assert(a1 <= a2);
+  tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2)
+            || UWORD_MSB(a1) == UWORD_MSB(a2 - 1));
 #endif
 
-   idx = (a1 & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
-   /* mask: a contiguous series of one bits. The first bit set is bit */
-   /* UWORD_LSB(a2-1), and the last bit set is UWORD_LSB(a1).         */
-   mask = UWORD_LSB(a2) ? bm0_mask(a2) - bm0_mask(a1) : - bm0_mask(a1);
-   bm1->bm0_r[idx] &= ~mask;
-   bm1->bm0_w[idx] &= ~mask;
+  idx = (a1 & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
+  /* mask: a contiguous series of one bits. The first bit set is bit */
+  /* UWORD_LSB(a2-1), and the last bit set is UWORD_LSB(a1).         */
+  mask = UWORD_LSB(a2) ? bm0_mask(a2) - bm0_mask(a1) : - bm0_mask(a1);
+  bm1->bm0_r[idx] &= ~mask;
+  bm1->bm0_w[idx] &= ~mask;
 }
 
 void bm_clear_all(const struct bitmap* const bm)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
-   VG_(OSetGen_ResetIter)(bm->oset);
+  VG_(OSetGen_ResetIter)(bm->oset);
 
-   for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
-   {
-      struct bitmap1* const bm1 = &bm2->bm1;
-      tl_assert(bm1);
-      VG_(memset)(&bm1->bm0_r[0], 0, sizeof(bm1->bm0_r));
-      VG_(memset)(&bm1->bm0_w[0], 0, sizeof(bm1->bm0_w));
-   }
+  for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
+  {
+    struct bitmap1* const bm1 = &bm2->bm1;
+    tl_assert(bm1);
+    VG_(memset)(&bm1->bm0_r[0], 0, sizeof(bm1->bm0_r));
+    VG_(memset)(&bm1->bm0_w[0], 0, sizeof(bm1->bm0_w));
+  }
 }
 
 void bm_clear(const struct bitmap* const bm,
               const Addr a1,
               const Addr a2)
 {
-   Addr b, b_next;
+  Addr b, b_next;
 
-   tl_assert(bm);
-   tl_assert(a1);
-   tl_assert(a1 <= a2);
+  tl_assert(bm);
+  tl_assert(a1);
+  tl_assert(a1 <= a2);
 
-   for (b = a1; b < a2; b = b_next)
-   {
-      struct bitmap2* const p2 = bm_lookup(bm, b);
+  for (b = a1; b < a2; b = b_next)
+  {
+    struct bitmap2* const p2 = bm_lookup(bm, b);
 
-      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-      if (b_next > a2)
+    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+    if (b_next > a2)
+    {
+      b_next = a2;
+    }
+
+    if (p2)
+    {
+      Addr c = b;
+      if (UWORD_LSB(c))
       {
-         b_next = a2;
+        Addr c_next = UWORD_MSB(c) + BITS_PER_UWORD;
+        if (c_next > b_next)
+          c_next = b_next;
+        bm1_clear(&p2->bm1, c, c_next);
+        c = c_next;
       }
-
-      if (p2)
+      if (UWORD_LSB(c) == 0)
       {
-         Addr c = b;
-         if (UWORD_LSB(c))
-         {
-            Addr c_next = UWORD_MSB(c) + BITS_PER_UWORD;
-            if (c_next > b_next)
-               c_next = b_next;
-            bm1_clear(&p2->bm1, c, c_next);
-            c = c_next;
-         }
-         if (UWORD_LSB(c) == 0)
-         {
-            const Addr c_next = UWORD_MSB(b_next);
-            tl_assert(UWORD_LSB(c) == 0);
-            tl_assert(UWORD_LSB(c_next) == 0);
-            tl_assert(c_next <= b_next);
-            tl_assert(c <= c_next);
-            if (c_next > c)
-            {
-               UWord idx = (c & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
-               VG_(memset)(&p2->bm1.bm0_r[idx], 0, (c_next - c) / 8);
-               VG_(memset)(&p2->bm1.bm0_w[idx], 0, (c_next - c) / 8);
-               c = c_next;
-            }
-         }
-         if (c != b_next)
-         {
-            bm1_clear(&p2->bm1, c, b_next);
-         }
+        const Addr c_next = UWORD_MSB(b_next);
+        tl_assert(UWORD_LSB(c) == 0);
+        tl_assert(UWORD_LSB(c_next) == 0);
+        tl_assert(c_next <= b_next);
+        tl_assert(c <= c_next);
+        if (c_next > c)
+        {
+          UWord idx = (c & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
+          VG_(memset)(&p2->bm1.bm0_r[idx], 0, (c_next - c) / 8);
+          VG_(memset)(&p2->bm1.bm0_w[idx], 0, (c_next - c) / 8);
+          c = c_next;
+        }
       }
-   }
+      if (c != b_next)
+      {
+        bm1_clear(&p2->bm1, c, b_next);
+      }
+    }
+  }
 }
 
 Bool bm_has_conflict_with(const struct bitmap* const bm,
                           const Addr a1, const Addr a2,
                           const BmAccessTypeT access_type)
 {
-   Addr b, b_next;
+  Addr b, b_next;
 
-   tl_assert(bm);
+  tl_assert(bm);
 
-   for (b = a1; b < a2; b = b_next)
-   {
-      struct bitmap2* bm2 = bm_lookup(bm, b);
+  for (b = a1; b < a2; b = b_next)
+  {
+    struct bitmap2* bm2 = bm_lookup(bm, b);
 
-      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-      if (b_next > a2)
-      {
-         b_next = a2;
-      }
+    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+    if (b_next > a2)
+    {
+      b_next = a2;
+    }
 
-      if (bm2)
-      {
-         Addr b_start;
-         Addr b_end;
-         UWord b0;
-         const struct bitmap1* const p1 = &bm2->bm1;
+    if (bm2)
+    {
+      Addr b_start;
+      Addr b_end;
+      UWord b0;
+      const struct bitmap1* const p1 = &bm2->bm1;
 
-         if ((bm2->addr << ADDR0_BITS) < a1)
-            b_start = a1;
-         else
-            if ((bm2->addr << ADDR0_BITS) < a2)
-               b_start = (bm2->addr << ADDR0_BITS);
-            else
-               break;
-         tl_assert(a1 <= b_start && b_start <= a2);
+      if ((bm2->addr << ADDR0_BITS) < a1)
+        b_start = a1;
+      else
+        if ((bm2->addr << ADDR0_BITS) < a2)
+          b_start = (bm2->addr << ADDR0_BITS);
+        else
+          break;
+      tl_assert(a1 <= b_start && b_start <= a2);
 
-         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-         else
-            b_end = a2;
-         tl_assert(a1 <= b_end && b_end <= a2);
-         tl_assert(b_start < b_end);
-         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+      else
+        b_end = a2;
+      tl_assert(a1 <= b_end && b_end <= a2);
+      tl_assert(b_start < b_end);
+      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
       
-         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
-         {
-            if (access_type == eLoad)
-            {
-               if (bm0_is_set(p1->bm0_w, b0))
-               {
-                  return True;
-               }
-            }
-            else
-            {
-               tl_assert(access_type == eStore);
-               if (bm0_is_set(p1->bm0_r, b0)
-                   | bm0_is_set(p1->bm0_w, b0))
-               {
-                  return True;
-               }
-            }
-         }
+      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      {
+        if (access_type == eLoad)
+        {
+          if (bm0_is_set(p1->bm0_w, b0))
+          {
+            return True;
+          }
+        }
+        else
+        {
+          tl_assert(access_type == eStore);
+          if (bm0_is_set(p1->bm0_r, b0)
+              | bm0_is_set(p1->bm0_w, b0))
+          {
+            return True;
+          }
+        }
       }
-   }
-   return False;
+    }
+  }
+  return False;
 }
 
 static inline
 Bool bm_aligned_load_has_conflict_with(const struct bitmap* const bm,
                                        const Addr a1, const Addr a2)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
 #if 0
-   /* Commented out the statements below because of performance reasons. */
-   tl_assert(bm);
-   tl_assert(a1 < a2);
-   tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
-             || (a2 - a1) == 4 || (a2 - a1) == 8);
-   tl_assert((a1 & (a2 - a1 - 1)) == 0);
+  /* Commented out the statements below because of performance reasons. */
+  tl_assert(bm);
+  tl_assert(a1 < a2);
+  tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+            || (a2 - a1) == 4 || (a2 - a1) == 8);
+  tl_assert((a1 & (a2 - a1 - 1)) == 0);
 #endif
 
-   bm2 = bm_lookup(bm, a1);
+  bm2 = bm_lookup(bm, a1);
 
-   if (bm2
-       && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
-   {
-      return True;
-   }
-   return False;
+  if (bm2
+      && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
+  {
+    return True;
+  }
+  return False;
 }
 
 static inline
 Bool bm_aligned_store_has_conflict_with(const struct bitmap* const bm,
                                         const Addr a1, const Addr a2)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
 #if 0
-   /* Commented out the statements below because of performance reasons. */
-   tl_assert(bm);
-   tl_assert(a1 < a2);
-   tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
-             || (a2 - a1) == 4 || (a2 - a1) == 8);
-   tl_assert((a1 & (a2 - a1 - 1)) == 0);
+  /* Commented out the statements below because of performance reasons. */
+  tl_assert(bm);
+  tl_assert(a1 < a2);
+  tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+            || (a2 - a1) == 4 || (a2 - a1) == 8);
+  tl_assert((a1 & (a2 - a1 - 1)) == 0);
 #endif
 
-   bm2 = bm_lookup(bm, a1);
+  bm2 = bm_lookup(bm, a1);
 
-   if (bm2)
-   {
-      const struct bitmap1* const p1 = &bm2->bm1;
+  if (bm2)
+  {
+    const struct bitmap1* const p1 = &bm2->bm1;
 
-      if (bm0_is_any_set(p1->bm0_r, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK)
-          | bm0_is_any_set(p1->bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
-      {
-         return True;
-      }
-   }
-   return False;
+    if (bm0_is_any_set(p1->bm0_r, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK)
+        | bm0_is_any_set(p1->bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
+    {
+      return True;
+    }
+  }
+  return False;
 }
 
 Bool bm_load_has_conflict_with(const struct bitmap* const bm,
                                const Addr a1, const Addr a2)
 {
-   return bm_has_conflict_with(bm, a1, a2, eLoad);
+  return bm_has_conflict_with(bm, a1, a2, eLoad);
 }
 
 Bool bm_load_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   return bm_aligned_load_has_conflict_with(bm, a1, a1 + 1);
+  return bm_aligned_load_has_conflict_with(bm, a1, a1 + 1);
 }
 
 Bool bm_load_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 1) == 0)
-      return bm_aligned_load_has_conflict_with(bm, a1, a1 + 2);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 2, eLoad);
+  if ((a1 & 1) == 0)
+    return bm_aligned_load_has_conflict_with(bm, a1, a1 + 2);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 2, eLoad);
 }
 
 Bool bm_load_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 3) == 0)
-      return bm_aligned_load_has_conflict_with(bm, a1, a1 + 4);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 4, eLoad);
+  if ((a1 & 3) == 0)
+    return bm_aligned_load_has_conflict_with(bm, a1, a1 + 4);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 4, eLoad);
 }
 
 Bool bm_load_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 7) == 0)
-      return bm_aligned_load_has_conflict_with(bm, a1, a1 + 8);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 8, eLoad);
+  if ((a1 & 7) == 0)
+    return bm_aligned_load_has_conflict_with(bm, a1, a1 + 8);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 8, eLoad);
 }
 
 Bool bm_store_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   return bm_aligned_store_has_conflict_with(bm, a1, a1 + 1);
+  return bm_aligned_store_has_conflict_with(bm, a1, a1 + 1);
 }
 
 Bool bm_store_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 1) == 0)
-      return bm_aligned_store_has_conflict_with(bm, a1, a1 + 2);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 2, eStore);
+  if ((a1 & 1) == 0)
+    return bm_aligned_store_has_conflict_with(bm, a1, a1 + 2);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 2, eStore);
 }
 
 Bool bm_store_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 3) == 0)
-      return bm_aligned_store_has_conflict_with(bm, a1, a1 + 4);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 4, eStore);
+  if ((a1 & 3) == 0)
+    return bm_aligned_store_has_conflict_with(bm, a1, a1 + 4);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 4, eStore);
 }
 
 Bool bm_store_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
 {
-   if ((a1 & 7) == 0)
-      return bm_aligned_store_has_conflict_with(bm, a1, a1 + 8);
-   else
-      return bm_has_conflict_with(bm, a1, a1 + 8, eStore);
+  if ((a1 & 7) == 0)
+    return bm_aligned_store_has_conflict_with(bm, a1, a1 + 8);
+  else
+    return bm_has_conflict_with(bm, a1, a1 + 8, eStore);
 }
 
 Bool bm_store_has_conflict_with(const struct bitmap* const bm,
                                 const Addr a1, const Addr a2)
 {
-   return bm_has_conflict_with(bm, a1, a2, eStore);
+  return bm_has_conflict_with(bm, a1, a2, eStore);
 }
 
 void bm_swap(struct bitmap* const bm1, struct bitmap* const bm2)
 {
-   OSet* const tmp = bm1->oset;
-   bm1->oset = bm2->oset;
-   bm2->oset = tmp;
+  OSet* const tmp = bm1->oset;
+  bm1->oset = bm2->oset;
+  bm2->oset = tmp;
 }
 
 void bm_merge2(struct bitmap* const lhs,
                const struct bitmap* const rhs)
 {
-   struct bitmap2* bm2l;
-   const struct bitmap2* bm2r;
+  struct bitmap2* bm2l;
+  const struct bitmap2* bm2r;
 
-   // First step: allocate any missing bitmaps in *lhs.
-   VG_(OSetGen_ResetIter)(rhs->oset);
-   for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
-   {
-      bm2_lookup_or_insert(lhs, bm2r->addr);
-   }
+  // First step: allocate any missing bitmaps in *lhs.
+  VG_(OSetGen_ResetIter)(rhs->oset);
+  for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
+  {
+    bm2_lookup_or_insert(lhs, bm2r->addr);
+  }
 
-   VG_(OSetGen_ResetIter)(lhs->oset);
-   VG_(OSetGen_ResetIter)(rhs->oset);
+  VG_(OSetGen_ResetIter)(lhs->oset);
+  VG_(OSetGen_ResetIter)(rhs->oset);
 
-   for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
-   {
-      do
-      {
-         bm2l = VG_(OSetGen_Next)(lhs->oset);
-      } while (bm2l->addr < bm2r->addr);
+  for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
+  {
+    do
+    {
+      bm2l = VG_(OSetGen_Next)(lhs->oset);
+    } while (bm2l->addr < bm2r->addr);
 
-      tl_assert(bm2l->addr == bm2r->addr);
+    tl_assert(bm2l->addr == bm2r->addr);
 
-      bm2_merge(bm2l, bm2r);
-   }
+    bm2_merge(bm2l, bm2r);
+  }
 }
 
 /**
@@ -699,105 +699,105 @@
 int bm_has_races(const struct bitmap* const lhs,
                  const struct bitmap* const rhs)
 {
-   VG_(OSetGen_ResetIter)(lhs->oset);
-   VG_(OSetGen_ResetIter)(rhs->oset);
+  VG_(OSetGen_ResetIter)(lhs->oset);
+  VG_(OSetGen_ResetIter)(rhs->oset);
 
-   for (;;)
-   {
-      const struct bitmap2* bm2l = VG_(OSetGen_Next)(lhs->oset);
-      const struct bitmap2* bm2r = VG_(OSetGen_Next)(rhs->oset);
-      const struct bitmap1* bm1l;
-      const struct bitmap1* bm1r;
-      unsigned k;
+  for (;;)
+  {
+    const struct bitmap2* bm2l = VG_(OSetGen_Next)(lhs->oset);
+    const struct bitmap2* bm2r = VG_(OSetGen_Next)(rhs->oset);
+    const struct bitmap1* bm1l;
+    const struct bitmap1* bm1r;
+    unsigned k;
 
-      while (bm2l && bm2r && bm2l->addr != bm2r->addr)
+    while (bm2l && bm2r && bm2l->addr != bm2r->addr)
+    {
+      if (bm2l->addr < bm2r->addr)
+        bm2l = VG_(OSetGen_Next)(lhs->oset);
+      else
+        bm2r = VG_(OSetGen_Next)(rhs->oset);
+    }
+    if (bm2l == 0 || bm2r == 0)
+      break;
+
+    bm1l = &bm2l->bm1;
+    bm1r = &bm2r->bm1;
+
+    for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+    {
+      unsigned b;
+      for (b = 0; b < BITS_PER_UWORD; b++)
       {
-         if (bm2l->addr < bm2r->addr)
-            bm2l = VG_(OSetGen_Next)(lhs->oset);
-         else
-            bm2r = VG_(OSetGen_Next)(rhs->oset);
+        UWord const access
+          = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
+          | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
+          | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
+          | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
+        Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
+        if (HAS_RACE(access) && ! drd_is_suppressed(a, a + 1))
+        {
+          return 1;
+        }
       }
-      if (bm2l == 0 || bm2r == 0)
-         break;
-
-      bm1l = &bm2l->bm1;
-      bm1r = &bm2r->bm1;
-
-      for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-      {
-         unsigned b;
-         for (b = 0; b < BITS_PER_UWORD; b++)
-         {
-            UWord const access
-               = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
-               | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
-               | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
-               | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
-            Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
-            if (HAS_RACE(access) && ! drd_is_suppressed(a, a + 1))
-            {
-               return 1;
-            }
-         }
-      }
-   }
-   return 0;
+    }
+  }
+  return 0;
 }
 
 void bm_print(const struct bitmap* const bm)
 {
-   struct bitmap2* bm2;
+  struct bitmap2* bm2;
 
-   VG_(OSetGen_ResetIter)(bm->oset);
+  VG_(OSetGen_ResetIter)(bm->oset);
 
-   for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
-   {
-      const struct bitmap1* const bm1 = &bm2->bm1;
-      unsigned k;
-      for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+  for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
+  {
+    const struct bitmap1* const bm1 = &bm2->bm1;
+    unsigned k;
+    for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+    {
+      unsigned b;
+      for (b = 0; b < BITS_PER_UWORD; b++)
       {
-         unsigned b;
-         for (b = 0; b < BITS_PER_UWORD; b++)
-         {
-            int const r = bm1->bm0_r[k] & bm0_mask(b);
-            int const w = bm1->bm0_w[k] & bm0_mask(b);
-            Addr const a = MAKE_ADDRESS(bm2->addr, k * BITS_PER_UWORD | b);
-            if (r || w)
-            {
-               VG_(printf)("0x%08lx %c %c\n",
-                           (Addr)(a), 
-                           w ? 'W' : ' ', r ? 'R' : ' ');
-            }
-         }
+        int const r = bm1->bm0_r[k] & bm0_mask(b);
+        int const w = bm1->bm0_w[k] & bm0_mask(b);
+        Addr const a = MAKE_ADDRESS(bm2->addr, k * BITS_PER_UWORD | b);
+        if (r || w)
+        {
+          VG_(printf)("0x%08lx %c %c\n",
+                      (Addr)(a), 
+                      w ? 'W' : ' ', r ? 'R' : ' ');
+        }
       }
-   }
+    }
+  }
 }
 
 ULong bm_get_bitmap_creation_count(void)
 {
-   return s_bitmap_creation_count;
+  return s_bitmap_creation_count;
 }
 
 ULong bm_get_bitmap2_creation_count(void)
 {
-   return s_bitmap2_creation_count;
+  return s_bitmap2_creation_count;
 }
 
 static void bm2_merge(struct bitmap2* const bm2l,
                       const struct bitmap2* const bm2r)
 {
-   unsigned k;
+  unsigned k;
 
-   tl_assert(bm2l->addr == bm2r->addr);
+  tl_assert(bm2l->addr == bm2r->addr);
 
-   for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-   {
-      bm2l->bm1.bm0_r[k] |= bm2r->bm1.bm0_r[k];
-   }
-   for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-   {
-      bm2l->bm1.bm0_w[k] |= bm2r->bm1.bm0_w[k];
-   }
+  for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+  {
+    bm2l->bm1.bm0_r[k] |= bm2r->bm1.bm0_r[k];
+  }
+  for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+  {
+    bm2l->bm1.bm0_w[k] |= bm2r->bm1.bm0_w[k];
+  }
 }
 
 #if 0
@@ -805,60 +805,53 @@
 /* Unit test */
 static
 struct { Addr address; SizeT size; BmAccessTypeT access_type; }
-   s_args[] = {
-      {          0, 1, eLoad  },
-      {        666, 4, eLoad  },
-      {        667, 2, eStore },
-      {       1024, 1, eStore },
-      { 0x0000ffff, 1, eLoad  },
-      { 0x0001ffff, 1, eLoad  },
-      { 0x00ffffff, 1, eLoad  },
-      { 0xffffffff, 1, eStore },
-   };
+  s_args[] = {
+    {          0, 1, eLoad  },
+    {        666, 4, eLoad  },
+    {        667, 2, eStore },
+    {       1024, 1, eStore },
+    { 0x0000ffff, 1, eLoad  },
+    { 0x0001ffff, 1, eLoad  },
+    { 0x00ffffff, 1, eLoad  },
+    { 0xffffffff, 1, eStore },
+  };
 
 void bm_test(void)
 {
-   struct bitmap* bm;
-   struct bitmap* bm2;
-   int i, j;
+  struct bitmap* bm;
+  struct bitmap* bm2;
+  int i, j;
 
-   VG_(printf)("Start of DRD BM unit test.\n");
+  VG_(printf)("Start of DRD BM unit test.\n");
 
-   bm = bm_new();
+  bm = bm_new();
 
-   for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
-   {
-      bm_access_range(bm,
-                      s_args[i].address,
-                      s_args[i].address + s_args[i].size,
-                      s_args[i].access_type);
-   }
+  for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
+  {
+    bm_access_range(bm,
+                    s_args[i].address,
+                    s_args[i].address + s_args[i].size,
+                    s_args[i].access_type);
+  }
 
-   VG_(printf)("Map contents -- should contain 10 addresses:\n");
-   bm_print(bm);
+  VG_(printf)("Map contents -- should contain 10 addresses:\n");
+  bm_print(bm);
 
-   for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
-   {
-      for (j = 0; j < s_args[i].size; j++)
-      {
-         tl_assert(bm_has_1(bm, s_args[i].address + j, s_args[i].access_type));
-      }
-   }
+  for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
+  {
+    for (j = 0; j < s_args[i].size; j++)
+    {
+      tl_assert(bm_has_1(bm, s_args[i].address + j, s_args[i].access_type));
+    }
+  }
 
-   VG_(printf)("Merge result:\n");
-   bm2 = bm_merge(bm, bm);
-   bm_print(bm);
+  VG_(printf)("Merge result:\n");
+  bm2 = bm_merge(bm, bm);
+  bm_print(bm);
 
-   bm_delete(bm);
-   bm_delete(bm2);
+  bm_delete(bm);
+  bm_delete(bm2);
 
-   VG_(printf)("End of DRD BM unit test.\n");
+  VG_(printf)("End of DRD BM unit test.\n");
 }
 #endif
-
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_clientreq.c b/exp-drd/drd_clientreq.c
index 5d11d62..e03f7aa 100644
--- a/exp-drd/drd_clientreq.c
+++ b/exp-drd/drd_clientreq.c
@@ -43,287 +43,281 @@
 
 static void drd_spin_init_or_unlock(const Addr spinlock)
 {
-   struct mutex_info* mutex_p = mutex_get(spinlock);
-   if (mutex_p)
-   {
-      mutex_unlock(spinlock, mutex_type_spinlock);
-   }
-   else
-   {
-      mutex_init(spinlock, mutex_type_spinlock);
-   }
+  struct mutex_info* mutex_p = mutex_get(spinlock);
+  if (mutex_p)
+  {
+    mutex_unlock(spinlock, mutex_type_spinlock);
+  }
+  else
+  {
+    mutex_init(spinlock, mutex_type_spinlock);
+  }
 }
 
 static void drd_pre_cond_wait(const Addr cond,
                               const Addr mutex, const MutexT mutex_type)
 {
-   mutex_unlock(mutex, mutex_type);
-   cond_pre_wait(cond, mutex);
+  mutex_unlock(mutex, mutex_type);
+  cond_pre_wait(cond, mutex);
 }
 
 static void drd_post_cond_wait(const Addr cond,
                                const Addr mutex,
                                const Bool took_lock)
 {
-   cond_post_wait(cond);
-   mutex_post_lock(mutex, took_lock);
+  cond_post_wait(cond);
+  mutex_post_lock(mutex, took_lock);
 }
 
 static void drd_pre_cond_signal(const Addr cond)
 {
-   cond_pre_signal(cond);
+  cond_pre_signal(cond);
 }
 
 static void drd_pre_cond_broadcast(const Addr cond)
 {
-   cond_pre_broadcast(cond);
+  cond_pre_broadcast(cond);
 }
 
 static Bool drd_handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
 {
-   UWord result = 0;
-   const DrdThreadId drd_tid = thread_get_running_tid();
+  UWord result = 0;
+  const DrdThreadId drd_tid = thread_get_running_tid();
 
-   tl_assert(vg_tid == VG_(get_running_tid()));
-   tl_assert(VgThreadIdToDrdThreadId(vg_tid) == drd_tid);
+  tl_assert(vg_tid == VG_(get_running_tid()));
+  tl_assert(VgThreadIdToDrdThreadId(vg_tid) == drd_tid);
 
-   switch (arg[0])
-   {
-   case VG_USERREQ__GET_THREAD_SELF:
-      result = vg_tid;
-      break;
+  switch (arg[0])
+  {
+  case VG_USERREQ__GET_THREAD_SELF:
+    result = vg_tid;
+    break;
 
-   case VG_USERREQ__SET_THREAD_NAME:
-      thread_set_name_fmt(drd_tid, (char*)arg[1], arg[2]);
-      break;
+  case VG_USERREQ__SET_THREAD_NAME:
+    thread_set_name_fmt(drd_tid, (char*)arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__DRD_START_SUPPRESSION:
-      drd_start_suppression(arg[1], arg[2], "client");
-      break;
+  case VG_USERREQ__DRD_START_SUPPRESSION:
+    drd_start_suppression(arg[1], arg[2], "client");
+    break;
 
-   case VG_USERREQ__DRD_FINISH_SUPPRESSION:
-      drd_finish_suppression(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__DRD_FINISH_SUPPRESSION:
+    drd_finish_suppression(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
-      thread_set_stack_startup(drd_tid, VG_(get_SP)(vg_tid));
-      break;
+  case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
+    thread_set_stack_startup(drd_tid, VG_(get_SP)(vg_tid));
+    break;
 
-   case VG_USERREQ__DRD_START_NEW_SEGMENT:
-      thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
-      break;
+  case VG_USERREQ__DRD_START_NEW_SEGMENT:
+    thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
+    break;
 
-   case VG_USERREQ__DRD_TRACE_ADDR:
-      drd_trace_addr(arg[1]);
-      break;
+  case VG_USERREQ__DRD_TRACE_ADDR:
+    drd_trace_addr(arg[1]);
+    break;
 
-   case VG_USERREQ__SET_PTHREADID:
-      thread_set_pthreadid(drd_tid, arg[1]);
-      break;
+  case VG_USERREQ__SET_PTHREADID:
+    thread_set_pthreadid(drd_tid, arg[1]);
+    break;
 
-   case VG_USERREQ__SET_JOINABLE:
-      thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
-      break;
+  case VG_USERREQ__SET_JOINABLE:
+    thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
+    break;
 
-   case VG_USERREQ__POST_THREAD_JOIN:
-      tl_assert(arg[1]);
-      drd_post_thread_join(drd_tid,
-                           PtThreadIdToDrdThreadId(arg[1]));
-      break;
+  case VG_USERREQ__POST_THREAD_JOIN:
+    tl_assert(arg[1]);
+    drd_post_thread_join(drd_tid,
+                         PtThreadIdToDrdThreadId(arg[1]));
+    break;
 
-   case VG_USERREQ__PRE_MUTEX_INIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_pre_mutex_init(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__PRE_MUTEX_INIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_pre_mutex_init(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__POST_MUTEX_INIT:
-      thread_leave_synchr(drd_tid);
-      break;
+  case VG_USERREQ__POST_MUTEX_INIT:
+    thread_leave_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__PRE_MUTEX_DESTROY:
-      thread_enter_synchr(drd_tid);
-      break;
+  case VG_USERREQ__PRE_MUTEX_DESTROY:
+    thread_enter_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__POST_MUTEX_DESTROY:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_post_mutex_destroy(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_MUTEX_DESTROY:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_post_mutex_destroy(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_MUTEX_LOCK:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_pre_mutex_lock(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__PRE_MUTEX_LOCK:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_pre_mutex_lock(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__POST_MUTEX_LOCK:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_post_mutex_lock(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_MUTEX_LOCK:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_post_mutex_lock(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_MUTEX_UNLOCK:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_pre_mutex_unlock(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__PRE_MUTEX_UNLOCK:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_pre_mutex_unlock(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__POST_MUTEX_UNLOCK:
-      thread_leave_synchr(drd_tid);
-      break;
+  case VG_USERREQ__POST_MUTEX_UNLOCK:
+    thread_leave_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__SPIN_INIT_OR_UNLOCK:
-      tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
-      drd_spin_init_or_unlock(arg[1]);
-      break;
+  case VG_USERREQ__SPIN_INIT_OR_UNLOCK:
+    tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
+    drd_spin_init_or_unlock(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_COND_INIT:
-      tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
-      drd_pre_cond_init(arg[1]);
-      break;
+  case VG_USERREQ__PRE_COND_INIT:
+    tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
+    drd_pre_cond_init(arg[1]);
+    break;
 
-   case VG_USERREQ__POST_COND_DESTROY:
-      tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
-      drd_post_cond_destroy(arg[1]);
-      break;
+  case VG_USERREQ__POST_COND_DESTROY:
+    tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
+    drd_post_cond_destroy(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_COND_WAIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_pre_cond_wait(arg[1], arg[2], arg[3]);
-      break;
+  case VG_USERREQ__PRE_COND_WAIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_pre_cond_wait(arg[1], arg[2], arg[3]);
+    break;
 
-   case VG_USERREQ__POST_COND_WAIT:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_post_cond_wait(arg[1], arg[2], arg[3]);
-      break;
+  case VG_USERREQ__POST_COND_WAIT:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_post_cond_wait(arg[1], arg[2], arg[3]);
+    break;
 
-   case VG_USERREQ__PRE_COND_SIGNAL:
-      tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
-      drd_pre_cond_signal(arg[1]);
-      break;
+  case VG_USERREQ__PRE_COND_SIGNAL:
+    tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
+    drd_pre_cond_signal(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_COND_BROADCAST:
-      tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
-      drd_pre_cond_broadcast(arg[1]);
-      break;
+  case VG_USERREQ__PRE_COND_BROADCAST:
+    tl_assert(thread_get_synchr_nesting_count(drd_tid) == 0);
+    drd_pre_cond_broadcast(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_SEM_INIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_semaphore_init(arg[1], arg[2], arg[3]);
-      break;
+  case VG_USERREQ__PRE_SEM_INIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_semaphore_init(arg[1], arg[2], arg[3]);
+    break;
 
-   case VG_USERREQ__POST_SEM_INIT:
-      thread_leave_synchr(drd_tid);
-      break;
+  case VG_USERREQ__POST_SEM_INIT:
+    thread_leave_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__PRE_SEM_DESTROY:
-      thread_enter_synchr(drd_tid);
-      break;
+  case VG_USERREQ__PRE_SEM_DESTROY:
+    thread_enter_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__POST_SEM_DESTROY:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_semaphore_destroy(arg[1]);
-      break;
+  case VG_USERREQ__POST_SEM_DESTROY:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_semaphore_destroy(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_SEM_WAIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_semaphore_pre_wait(drd_tid, arg[1]);
-      break;
+  case VG_USERREQ__PRE_SEM_WAIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_semaphore_pre_wait(drd_tid, arg[1]);
+    break;
 
-   case VG_USERREQ__POST_SEM_WAIT:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_semaphore_post_wait(drd_tid, arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_SEM_WAIT:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_semaphore_post_wait(drd_tid, arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_SEM_POST:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_semaphore_pre_post(drd_tid, arg[1]);
-      break;
+  case VG_USERREQ__PRE_SEM_POST:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_semaphore_pre_post(drd_tid, arg[1]);
+    break;
 
-   case VG_USERREQ__POST_SEM_POST:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_semaphore_post_post(drd_tid, arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_SEM_POST:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_semaphore_post_post(drd_tid, arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_BARRIER_INIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_barrier_init(arg[1], arg[2], arg[3], arg[4]);
-      break;
+  case VG_USERREQ__PRE_BARRIER_INIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_barrier_init(arg[1], arg[2], arg[3], arg[4]);
+    break;
 
-   case VG_USERREQ__POST_BARRIER_INIT:
-      thread_leave_synchr(drd_tid);
-      break;
+  case VG_USERREQ__POST_BARRIER_INIT:
+    thread_leave_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__PRE_BARRIER_DESTROY:
-      thread_enter_synchr(drd_tid);
-      break;
+  case VG_USERREQ__PRE_BARRIER_DESTROY:
+    thread_enter_synchr(drd_tid);
+    break;
 
-   case VG_USERREQ__POST_BARRIER_DESTROY:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_barrier_destroy(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_BARRIER_DESTROY:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_barrier_destroy(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_BARRIER_WAIT:
-      if (thread_enter_synchr(drd_tid) == 0)
-         drd_barrier_pre_wait(drd_tid, arg[1], arg[2]);
-      break;
+  case VG_USERREQ__PRE_BARRIER_WAIT:
+    if (thread_enter_synchr(drd_tid) == 0)
+      drd_barrier_pre_wait(drd_tid, arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__POST_BARRIER_WAIT:
-      if (thread_leave_synchr(drd_tid) == 0)
-         drd_barrier_post_wait(drd_tid, arg[1], arg[2], arg[3]);
-      break;
+  case VG_USERREQ__POST_BARRIER_WAIT:
+    if (thread_leave_synchr(drd_tid) == 0)
+      drd_barrier_post_wait(drd_tid, arg[1], arg[2], arg[3]);
+    break;
 
-   case VG_USERREQ__PRE_RWLOCK_INIT:
-      rwlock_pre_init(arg[1]);
-      break;
+  case VG_USERREQ__PRE_RWLOCK_INIT:
+    rwlock_pre_init(arg[1]);
+    break;
 
-   case VG_USERREQ__POST_RWLOCK_DESTROY:
-      rwlock_post_destroy(arg[1]);
-      break;
+  case VG_USERREQ__POST_RWLOCK_DESTROY:
+    rwlock_post_destroy(arg[1]);
+    break;
 
-   case VG_USERREQ__PRE_RWLOCK_RDLOCK:
-      if (thread_enter_synchr(drd_tid) == 0)
-         rwlock_pre_rdlock(arg[1]);
-      break;
+  case VG_USERREQ__PRE_RWLOCK_RDLOCK:
+    if (thread_enter_synchr(drd_tid) == 0)
+      rwlock_pre_rdlock(arg[1]);
+    break;
 
-   case VG_USERREQ__POST_RWLOCK_RDLOCK:
-      if (thread_leave_synchr(drd_tid) == 0)
-         rwlock_post_rdlock(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_RWLOCK_RDLOCK:
+    if (thread_leave_synchr(drd_tid) == 0)
+      rwlock_post_rdlock(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_RWLOCK_WRLOCK:
-      if (thread_enter_synchr(drd_tid) == 0)
-         rwlock_pre_wrlock(arg[1]);
-      break;
+  case VG_USERREQ__PRE_RWLOCK_WRLOCK:
+    if (thread_enter_synchr(drd_tid) == 0)
+      rwlock_pre_wrlock(arg[1]);
+    break;
 
-   case VG_USERREQ__POST_RWLOCK_WRLOCK:
-      if (thread_leave_synchr(drd_tid) == 0)
-         rwlock_post_wrlock(arg[1], arg[2]);
-      break;
+  case VG_USERREQ__POST_RWLOCK_WRLOCK:
+    if (thread_leave_synchr(drd_tid) == 0)
+      rwlock_post_wrlock(arg[1], arg[2]);
+    break;
 
-   case VG_USERREQ__PRE_RWLOCK_UNLOCK:
-      if (thread_enter_synchr(drd_tid) == 0)
-         rwlock_pre_unlock(arg[1]);
-      break;
+  case VG_USERREQ__PRE_RWLOCK_UNLOCK:
+    if (thread_enter_synchr(drd_tid) == 0)
+      rwlock_pre_unlock(arg[1]);
+    break;
       
-   case VG_USERREQ__POST_RWLOCK_UNLOCK:
-      thread_leave_synchr(drd_tid);
-      break;
+  case VG_USERREQ__POST_RWLOCK_UNLOCK:
+    thread_leave_synchr(drd_tid);
+    break;
 
-   default:
-      VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
-                   arg[0], arg[1]);
-      tl_assert(0);
-      return False;
-   }
+  default:
+    VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
+                 arg[0], arg[1]);
+    tl_assert(0);
+    return False;
+  }
 
-   *ret = result;
-   return True;
+  *ret = result;
+  return True;
 }
 
 void drd_clientreq_init(void)
 {
-   VG_(needs_client_requests)(drd_handle_client_request);
+  VG_(needs_client_requests)(drd_handle_client_request);
 }
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_error.c b/exp-drd/drd_error.c
index cc1923b..e3a7bc9 100644
--- a/exp-drd/drd_error.c
+++ b/exp-drd/drd_error.c
@@ -40,26 +40,26 @@
 
 
 typedef enum {
-   ConflictingAccessSupp
+  ConflictingAccessSupp
 } DRD_SuppKind;
 
 
 static void make_path_relative(Char* const path)
 {
-   int offset = 0;
-   Char cwd[512];
+  int offset = 0;
+  Char cwd[512];
 
-   if (! VG_(get_startup_wd)(cwd, sizeof(cwd)))
-      tl_assert(False);
-   if (VG_(strncmp)(path + offset, cwd, VG_(strlen)(cwd)) == 0)
-   {
-      offset += VG_(strlen)(cwd);
-      if (path[offset] == '/')
-      {
-         offset++;
-      }
-   }
-   VG_(memmove)(path, path + offset, VG_(strlen)(path + offset) + 1);
+  if (! VG_(get_startup_wd)(cwd, sizeof(cwd)))
+    tl_assert(False);
+  if (VG_(strncmp)(path + offset, cwd, VG_(strlen)(cwd)) == 0)
+  {
+    offset += VG_(strlen)(cwd);
+    if (path[offset] == '/')
+    {
+      offset++;
+    }
+  }
+  VG_(memmove)(path, path + offset, VG_(strlen)(path + offset) + 1);
 }
 
 
@@ -67,102 +67,102 @@
 /* messages, putting the result in ai. */
 void describe_addr(Addr const a, SizeT const len, AddrInfo* const ai)
 {
-   Addr       stack_min, stack_max;
-   DebugInfo* sg;
+  Addr       stack_min, stack_max;
+  DebugInfo* sg;
 
-   /* Perhaps it's on a thread's stack? */
-   ai->stack_tid = thread_lookup_stackaddr(a, &stack_min, &stack_max);
-   if (ai->stack_tid != DRD_INVALID_THREADID)
-   {
-      ai->akind     = eStack;
-      ai->size      = len;
-      ai->rwoffset  = a - stack_max;
-      tl_assert(a + ai->size <= stack_max);
-      tl_assert(ai->rwoffset < 0);
+  /* Perhaps it's on a thread's stack? */
+  ai->stack_tid = thread_lookup_stackaddr(a, &stack_min, &stack_max);
+  if (ai->stack_tid != DRD_INVALID_THREADID)
+  {
+    ai->akind     = eStack;
+    ai->size      = len;
+    ai->rwoffset  = a - stack_max;
+    tl_assert(a + ai->size <= stack_max);
+    tl_assert(ai->rwoffset < 0);
+    return;
+  }
+
+  /* Perhaps it's in a mapped segment ? */
+  sg = VG_(find_seginfo)(a);
+  if (sg)
+  {
+    int i, n;
+
+    ai->akind   = eSegment;
+    ai->debuginfo = sg;
+    ai->name[0] = 0;
+    ai->size = 1;
+    ai->rwoffset = 0;
+
+    n = VG_(seginfo_syms_howmany)(sg);
+    for (i = 0; i < n; i++)
+    {
+      Addr addr;
+      Addr tocptr;
+      UInt size;
+      HChar* name;
+      Char filename[256];
+      Int linenum;
+      Bool isText;
+
+      VG_(seginfo_syms_getidx)(sg, i, &addr, &tocptr, &size, &name, &isText);
+      if (isText && addr <= a && a < addr + size)
+      {
+        ai->size     = size;
+        ai->rwoffset = a - addr;
+        tl_assert(name && name[0]);
+        VG_(snprintf)(ai->name, sizeof(ai->name), "%s", name);
+        if (VG_(get_filename_linenum)(addr,
+                                      filename, sizeof(filename),
+                                      0, 0, 0,
+                                      &linenum))
+        {
+          make_path_relative(filename);
+          VG_(snprintf)(ai->descr, sizeof(ai->descr),
+                        " in %s:%d", filename, linenum);
+        }
+        else
+        {
+          i = n;
+        }
+        break;
+      }
+    }
+    if (i == n)
+    {
+      Char filename[512];
+      Char soname[512];
+      VgSectKind kind = VG_(seginfo_sect_kind)(NULL, 0, a);
+      const HChar* sect_kind_name = VG_(pp_SectKind)(kind);
+      VG_(strncpy)(filename, VG_(seginfo_filename)(sg), sizeof(filename));
+      filename[sizeof(filename) - 1] = 0;
+      make_path_relative(filename);
+      VG_(strncpy)(soname, VG_(seginfo_soname)(sg), sizeof(soname));
+      soname[sizeof(soname) - 1] = 0;
+      make_path_relative(soname);
+      VG_(snprintf)(ai->descr, sizeof(ai->descr),
+                    "%s, %s:%s",
+                    filename,
+                    soname,
+                    sect_kind_name);
+    }
+    return;
+  }
+
+  /* Search for a currently malloc'd block which might bracket it. */
+  {
+    Addr data;
+    if (drd_heap_addrinfo(a, &data, &ai->size, &ai->lastchange))
+    {
+      ai->akind = eMallocd;
+      ai->rwoffset = a - data;
       return;
-   }
+    }
+  }
 
-   /* Perhaps it's in a mapped segment ? */
-   sg = VG_(find_seginfo)(a);
-   if (sg)
-   {
-      int i, n;
-
-      ai->akind   = eSegment;
-      ai->debuginfo = sg;
-      ai->name[0] = 0;
-      ai->size = 1;
-      ai->rwoffset = 0;
-
-      n = VG_(seginfo_syms_howmany)(sg);
-      for (i = 0; i < n; i++)
-      {
-         Addr addr;
-         Addr tocptr;
-         UInt size;
-         HChar* name;
-         Char filename[256];
-         Int linenum;
-         Bool isText;
-
-         VG_(seginfo_syms_getidx)(sg, i, &addr, &tocptr, &size, &name, &isText);
-         if (isText && addr <= a && a < addr + size)
-         {
-            ai->size     = size;
-            ai->rwoffset = a - addr;
-            tl_assert(name && name[0]);
-            VG_(snprintf)(ai->name, sizeof(ai->name), "%s", name);
-            if (VG_(get_filename_linenum)(addr,
-                                          filename, sizeof(filename),
-                                          0, 0, 0,
-                                          &linenum))
-            {
-               make_path_relative(filename);
-               VG_(snprintf)(ai->descr, sizeof(ai->descr),
-                             " in %s:%d", filename, linenum);
-            }
-            else
-            {
-               i = n;
-            }
-            break;
-         }
-      }
-      if (i == n)
-      {
-         Char filename[512];
-         Char soname[512];
-         VgSectKind kind = VG_(seginfo_sect_kind)(NULL, 0, a);
-         const HChar* sect_kind_name = VG_(pp_SectKind)(kind);
-         VG_(strncpy)(filename, VG_(seginfo_filename)(sg), sizeof(filename));
-         filename[sizeof(filename) - 1] = 0;
-         make_path_relative(filename);
-         VG_(strncpy)(soname, VG_(seginfo_soname)(sg), sizeof(soname));
-         soname[sizeof(soname) - 1] = 0;
-         make_path_relative(soname);
-         VG_(snprintf)(ai->descr, sizeof(ai->descr),
-                       "%s, %s:%s",
-                       filename,
-                       soname,
-                       sect_kind_name);
-      }
-      return;
-   }
-
-   /* Search for a currently malloc'd block which might bracket it. */
-   {
-      Addr data;
-      if (drd_heap_addrinfo(a, &data, &ai->size, &ai->lastchange))
-      {
-         ai->akind = eMallocd;
-         ai->rwoffset = a - data;
-         return;
-      }
-   }
-
-   /* Clueless ... */
-   ai->akind = eUnknown;
-   return;
+  /* Clueless ... */
+  ai->akind = eUnknown;
+  return;
 }
 
 /**
@@ -171,291 +171,285 @@
 Char* describe_addr_text(Addr const a, SizeT const len, AddrInfo* const ai,
                          Char* const buf, UInt const n_buf)
 {
-   tl_assert(a);
-   tl_assert(ai);
-   tl_assert(buf);
+  tl_assert(a);
+  tl_assert(ai);
+  tl_assert(buf);
 
-   describe_addr(a, len, ai);
+  describe_addr(a, len, ai);
 
-   switch (ai->akind)
-   {
-   case eStack: {
+  switch (ai->akind)
+  {
+  case eStack: {
+    VG_(snprintf)(buf, n_buf,
+                  "stack of %s, offset %d",
+                  thread_get_name(ai->stack_tid), ai->rwoffset);
+    break;
+  }
+  case eSegment: {
+    if (ai->name[0])
+    {
       VG_(snprintf)(buf, n_buf,
-                    "stack of %s, offset %d",
-                    thread_get_name(ai->stack_tid), ai->rwoffset);
-      break;
-   }
-   case eSegment: {
-      if (ai->name[0])
-      {
-         VG_(snprintf)(buf, n_buf,
-                       "%s (offset %ld, size %ld) in %s",
-                       ai->name, ai->rwoffset, ai->size, ai->descr);
-      }
-      else
-      {
-         VG_(snprintf)(buf, n_buf,
-                       "%s",
-                       ai->descr);
-      }
-      break;
-   }
-   case eMallocd: {
-      VG_(snprintf)(buf, n_buf, "heap");
-      VG_(snprintf)(buf + VG_(strlen)(buf), n_buf - VG_(strlen)(buf),
-                    ", offset %ld in block at 0x%lx of size %ld",
-                    ai->rwoffset, a - ai->rwoffset, ai->size);
-      break;
-   }
-   case eUnknown:
-      VG_(snprintf)(buf, n_buf, "unknown");
-      break;
-   default:
-      tl_assert(0);
-   }
-   return buf;
+                    "%s (offset %ld, size %ld) in %s",
+                    ai->name, ai->rwoffset, ai->size, ai->descr);
+    }
+    else
+    {
+      VG_(snprintf)(buf, n_buf,
+                    "%s",
+                    ai->descr);
+    }
+    break;
+  }
+  case eMallocd: {
+    VG_(snprintf)(buf, n_buf, "heap");
+    VG_(snprintf)(buf + VG_(strlen)(buf), n_buf - VG_(strlen)(buf),
+                  ", offset %ld in block at 0x%lx of size %ld",
+                  ai->rwoffset, a - ai->rwoffset, ai->size);
+    break;
+  }
+  case eUnknown:
+    VG_(snprintf)(buf, n_buf, "unknown");
+    break;
+  default:
+    tl_assert(0);
+  }
+  return buf;
 }
 
 static
 void drd_report_data_race2(Error* const err, const DataRaceErrInfo* const dri)
 {
-   AddrInfo ai;
-   Char descr1[256];
-   Char descr2[256];
+  AddrInfo ai;
+  Char descr1[256];
+  Char descr2[256];
 
-   tl_assert(dri);
-   tl_assert(dri->addr);
-   tl_assert(dri->size > 0);
+  tl_assert(dri);
+  tl_assert(dri->addr);
+  tl_assert(dri->size > 0);
 
-   descr1[0] = 0;
-   descr2[0] = 0;
-   VG_(get_data_description)(descr1, descr2, sizeof(descr1), dri->addr);
-   if (descr1[0] == 0)
-   {
-      describe_addr(dri->addr, dri->size, &ai);
-   }
-   VG_(message)(Vg_UserMsg,
-                "Conflicting %s by %s at 0x%08lx size %ld",
-                dri->access_type == eStore ? "store" : "load",
-                thread_get_name(VgThreadIdToDrdThreadId(dri->tid)),
-                dri->addr,
-                dri->size);
-   VG_(pp_ExeContext)(VG_(get_error_where)(err));
-   if (descr1[0])
-   {
-      VG_(message)(Vg_UserMsg, "%s", descr1);
-      VG_(message)(Vg_UserMsg, "%s", descr2);
-   }
-   else if (ai.akind == eMallocd && ai.lastchange)
-   {
-      VG_(message)(Vg_UserMsg,
-                   "Address 0x%lx is at offset %ld from 0x%lx."
-                   " Allocation context:",
-                   dri->addr, ai.rwoffset, dri->addr - ai.rwoffset);
-      VG_(pp_ExeContext)(ai.lastchange);
-   }
-   else
-   {
-      VG_(message)(Vg_UserMsg, "Allocation context: unknown.");
-   }
-   thread_report_conflicting_segments(VgThreadIdToDrdThreadId(dri->tid),
-                                      dri->addr, dri->size, dri->access_type);
+  descr1[0] = 0;
+  descr2[0] = 0;
+  VG_(get_data_description)(descr1, descr2, sizeof(descr1), dri->addr);
+  if (descr1[0] == 0)
+  {
+    describe_addr(dri->addr, dri->size, &ai);
+  }
+  VG_(message)(Vg_UserMsg,
+               "Conflicting %s by %s at 0x%08lx size %ld",
+               dri->access_type == eStore ? "store" : "load",
+               thread_get_name(VgThreadIdToDrdThreadId(dri->tid)),
+               dri->addr,
+               dri->size);
+  VG_(pp_ExeContext)(VG_(get_error_where)(err));
+  if (descr1[0])
+  {
+    VG_(message)(Vg_UserMsg, "%s", descr1);
+    VG_(message)(Vg_UserMsg, "%s", descr2);
+  }
+  else if (ai.akind == eMallocd && ai.lastchange)
+  {
+    VG_(message)(Vg_UserMsg,
+                 "Address 0x%lx is at offset %ld from 0x%lx."
+                 " Allocation context:",
+                 dri->addr, ai.rwoffset, dri->addr - ai.rwoffset);
+    VG_(pp_ExeContext)(ai.lastchange);
+  }
+  else
+  {
+    VG_(message)(Vg_UserMsg, "Allocation context: unknown.");
+  }
+  thread_report_conflicting_segments(VgThreadIdToDrdThreadId(dri->tid),
+                                     dri->addr, dri->size, dri->access_type);
 }
 
 static Bool drd_tool_error_eq(VgRes res, Error* e1, Error* e2)
 {
-   return False;
+  return False;
 }
 
 static void drd_tool_error_pp(Error* const e)
 {
-   switch (VG_(get_error_kind)(e))
-   {
-   case DataRaceErr: {
-      drd_report_data_race2(e, VG_(get_error_extra)(e));
-      break;
-   }
-   case MutexErr: {
-      MutexErrInfo* p = (MutexErrInfo*)(VG_(get_error_extra)(e));
-      tl_assert(p);
-      VG_(message)(Vg_UserMsg,
-                   "%s: mutex 0x%lx, recursion count %d, owner %d.",
-                   VG_(get_error_string)(e),
-                   p->mutex,
-                   p->recursion_count,
-                   p->owner);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case CondErr: {
-      CondErrInfo* cdei =(CondErrInfo*)(VG_(get_error_extra)(e));
-      VG_(message)(Vg_UserMsg,
-                   "%s: cond 0x%lx",
-                   cdei->cond,
-                   VG_(get_error_string)(e));
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case CondRaceErr: {
-      CondRaceErrInfo* cei = (CondRaceErrInfo*)(VG_(get_error_extra)(e));
-      VG_(message)(Vg_UserMsg,
-                   "Race condition: condition variable 0x%lx has been"
-                   " signalled but the associated mutex 0x%lx is not locked"
-                   " by the signalling thread",
-                   cei->cond, cei->mutex);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case CondDestrErr: {
-      CondDestrErrInfo* cdi = (CondDestrErrInfo*)(VG_(get_error_extra)(e));
-      VG_(message)(Vg_UserMsg,
-                   "%s: cond 0x%lx, mutex 0x%lx locked by thread %d",
-                   cdi->cond, cdi->mutex, cdi->tid);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case SemaphoreErr: {
-      SemaphoreErrInfo* sei =(SemaphoreErrInfo*)(VG_(get_error_extra)(e));
-      tl_assert(sei);
-      VG_(message)(Vg_UserMsg,
-                   "%s: semaphore 0x%lx",
-                   VG_(get_error_string)(e),
-                   sei->semaphore);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case BarrierErr: {
-      BarrierErrInfo* sei =(BarrierErrInfo*)(VG_(get_error_extra)(e));
-      tl_assert(sei);
-      VG_(message)(Vg_UserMsg,
-                   "%s: barrier 0x%lx",
-                   VG_(get_error_string)(e),
-                   sei->barrier);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case RwlockErr: {
-      RwlockErrInfo* p = (RwlockErrInfo*)(VG_(get_error_extra)(e));
-      tl_assert(p);
-      VG_(message)(Vg_UserMsg,
-                   "%s: rwlock 0x%lx.",
-                   VG_(get_error_string)(e),
-                   p->rwlock);
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   case GenericErr: {
-      //GenericErrInfo* gei =(GenericErrInfo*)(VG_(get_error_extra)(e));
-      VG_(message)(Vg_UserMsg, "%s", VG_(get_error_string)(e));
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
-   default:
-      VG_(message)(Vg_UserMsg,
-                   "%s",
-                   VG_(get_error_string)(e));
-      VG_(pp_ExeContext)(VG_(get_error_where)(e));
-      break;
-   }
+  switch (VG_(get_error_kind)(e))
+  {
+  case DataRaceErr: {
+    drd_report_data_race2(e, VG_(get_error_extra)(e));
+    break;
+  }
+  case MutexErr: {
+    MutexErrInfo* p = (MutexErrInfo*)(VG_(get_error_extra)(e));
+    tl_assert(p);
+    VG_(message)(Vg_UserMsg,
+                 "%s: mutex 0x%lx, recursion count %d, owner %d.",
+                 VG_(get_error_string)(e),
+                 p->mutex,
+                 p->recursion_count,
+                 p->owner);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case CondErr: {
+    CondErrInfo* cdei =(CondErrInfo*)(VG_(get_error_extra)(e));
+    VG_(message)(Vg_UserMsg,
+                 "%s: cond 0x%lx",
+                 cdei->cond,
+                 VG_(get_error_string)(e));
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case CondRaceErr: {
+    CondRaceErrInfo* cei = (CondRaceErrInfo*)(VG_(get_error_extra)(e));
+    VG_(message)(Vg_UserMsg,
+                 "Race condition: condition variable 0x%lx has been"
+                 " signalled but the associated mutex 0x%lx is not locked"
+                 " by the signalling thread",
+                 cei->cond, cei->mutex);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case CondDestrErr: {
+    CondDestrErrInfo* cdi = (CondDestrErrInfo*)(VG_(get_error_extra)(e));
+    VG_(message)(Vg_UserMsg,
+                 "%s: cond 0x%lx, mutex 0x%lx locked by thread %d",
+                 cdi->cond, cdi->mutex, cdi->tid);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case SemaphoreErr: {
+    SemaphoreErrInfo* sei =(SemaphoreErrInfo*)(VG_(get_error_extra)(e));
+    tl_assert(sei);
+    VG_(message)(Vg_UserMsg,
+                 "%s: semaphore 0x%lx",
+                 VG_(get_error_string)(e),
+                 sei->semaphore);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case BarrierErr: {
+    BarrierErrInfo* sei =(BarrierErrInfo*)(VG_(get_error_extra)(e));
+    tl_assert(sei);
+    VG_(message)(Vg_UserMsg,
+                 "%s: barrier 0x%lx",
+                 VG_(get_error_string)(e),
+                 sei->barrier);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case RwlockErr: {
+    RwlockErrInfo* p = (RwlockErrInfo*)(VG_(get_error_extra)(e));
+    tl_assert(p);
+    VG_(message)(Vg_UserMsg,
+                 "%s: rwlock 0x%lx.",
+                 VG_(get_error_string)(e),
+                 p->rwlock);
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  case GenericErr: {
+    //GenericErrInfo* gei =(GenericErrInfo*)(VG_(get_error_extra)(e));
+    VG_(message)(Vg_UserMsg, "%s", VG_(get_error_string)(e));
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
+  default:
+    VG_(message)(Vg_UserMsg,
+                 "%s",
+                 VG_(get_error_string)(e));
+    VG_(pp_ExeContext)(VG_(get_error_where)(e));
+    break;
+  }
 }
 
 static UInt drd_tool_error_update_extra(Error* e)
 {
-   switch (VG_(get_error_kind)(e))
-   {
-   case DataRaceErr:
-      return sizeof(DataRaceErrInfo);
-   case MutexErr:
-      return sizeof(MutexErrInfo);
-   case CondErr:
-      return sizeof(CondErrInfo);
-   case CondRaceErr:
-      return sizeof(CondRaceErrInfo);
-   case CondDestrErr:
-      return sizeof(CondDestrErrInfo);
-   case SemaphoreErr:
-      return sizeof(SemaphoreErrInfo);
-   case BarrierErr:
-      return sizeof(BarrierErrInfo);
-   case RwlockErr:
-      return sizeof(RwlockErrInfo);
-   case GenericErr:
-      return sizeof(GenericErrInfo);
-   default:
-      tl_assert(False);
-      break;
-   }
+  switch (VG_(get_error_kind)(e))
+  {
+  case DataRaceErr:
+    return sizeof(DataRaceErrInfo);
+  case MutexErr:
+    return sizeof(MutexErrInfo);
+  case CondErr:
+    return sizeof(CondErrInfo);
+  case CondRaceErr:
+    return sizeof(CondRaceErrInfo);
+  case CondDestrErr:
+    return sizeof(CondDestrErrInfo);
+  case SemaphoreErr:
+    return sizeof(SemaphoreErrInfo);
+  case BarrierErr:
+    return sizeof(BarrierErrInfo);
+  case RwlockErr:
+    return sizeof(RwlockErrInfo);
+  case GenericErr:
+    return sizeof(GenericErrInfo);
+  default:
+    tl_assert(False);
+    break;
+  }
 }
 
 static Bool drd_tool_error_recog(Char* const name, Supp* const supp)
 {
-   SuppKind skind;
+  SuppKind skind;
 
-   if (VG_(strcmp)(name, "ConflictingAccess") == 0)
-      skind = ConflictingAccessSupp;
-   else
-      return False;
+  if (VG_(strcmp)(name, "ConflictingAccess") == 0)
+    skind = ConflictingAccessSupp;
+  else
+    return False;
 
-   VG_(set_supp_kind)(supp, skind);
-   return True;
+  VG_(set_supp_kind)(supp, skind);
+  return True;
 }
 
 static Bool drd_tool_error_read_extra(Int fd, Char* buf, Int nBuf, Supp* supp)
 {
-   return True;
+  return True;
 }
 
 static Bool drd_tool_error_matches(Error* const e, Supp* const supp)
 {
-   switch (VG_(get_supp_kind)(supp))
-   {
-   }
-   return True;
+  switch (VG_(get_supp_kind)(supp))
+  {
+  }
+  return True;
 }
 
 static Char* drd_tool_error_name(Error* e)
 {
-   switch (VG_(get_error_kind)(e))
-   {
-   case DataRaceErr:  return "DataRaceErr";
-   case MutexErr:     return "MutexErr";
-   case CondErr:      return "CondErr";
-   case CondRaceErr:  return "CondRaceErr";
-   case CondDestrErr: return "CondDestrErr";
-   case SemaphoreErr: return "SemaphoreErr";
-   case BarrierErr:   return "BarrierErr";
-   case RwlockErr:    return "RwlockErr";
-   case GenericErr:   return "GenericErr";
-   default:
-      tl_assert(0);
-   }
-   return 0;
+  switch (VG_(get_error_kind)(e))
+  {
+  case DataRaceErr:  return "DataRaceErr";
+  case MutexErr:     return "MutexErr";
+  case CondErr:      return "CondErr";
+  case CondRaceErr:  return "CondRaceErr";
+  case CondDestrErr: return "CondDestrErr";
+  case SemaphoreErr: return "SemaphoreErr";
+  case BarrierErr:   return "BarrierErr";
+  case RwlockErr:    return "RwlockErr";
+  case GenericErr:   return "GenericErr";
+  default:
+    tl_assert(0);
+  }
+  return 0;
 }
 
 static void drd_tool_error_print_extra(Error* e)
 {
-   switch (VG_(get_error_kind)(e))
-   {
-      // VG_(printf)("   %s\n", VG_(get_error_string)(err));
-   }
+  switch (VG_(get_error_kind)(e))
+  {
+    // VG_(printf)("   %s\n", VG_(get_error_string)(err));
+  }
 }
 
 void drd_register_error_handlers(void)
 {
-   // Tool error reporting.
-   VG_(needs_tool_errors)(drd_tool_error_eq,
-                          drd_tool_error_pp,
-                          True,
-                          drd_tool_error_update_extra,
-                          drd_tool_error_recog,
-                          drd_tool_error_read_extra,
-                          drd_tool_error_matches,
-                          drd_tool_error_name,
-                          drd_tool_error_print_extra);
+  // Tool error reporting.
+  VG_(needs_tool_errors)(drd_tool_error_eq,
+                         drd_tool_error_pp,
+                         True,
+                         drd_tool_error_update_extra,
+                         drd_tool_error_recog,
+                         drd_tool_error_read_extra,
+                         drd_tool_error_matches,
+                         drd_tool_error_name,
+                         drd_tool_error_print_extra);
 }
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_error.h b/exp-drd/drd_error.h
index e459034..ab95c8f 100644
--- a/exp-drd/drd_error.h
+++ b/exp-drd/drd_error.h
@@ -127,10 +127,3 @@
 
 
 #endif /* __DRD_ERROR_H */
-
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_main.c b/exp-drd/drd_main.c
index 1bd63cc..ef7dfdf 100644
--- a/exp-drd/drd_main.c
+++ b/exp-drd/drd_main.c
@@ -71,72 +71,72 @@
 
 static Bool drd_process_cmd_line_option(Char* arg)
 {
-   Bool trace_barrier     = False;
-   Bool trace_clientobj   = False;
-   Bool trace_cond        = False;
-   Bool trace_csw         = False;
-   Bool trace_danger_set  = False;
-   Bool trace_mutex       = False;
-   Bool trace_rwlock      = False;
-   Bool trace_segment     = False;
-   Bool trace_semaphore   = False;
-   Bool trace_suppression = False;
-   Char* trace_address    = 0;
+  Bool trace_barrier     = False;
+  Bool trace_clientobj   = False;
+  Bool trace_cond        = False;
+  Bool trace_csw         = False;
+  Bool trace_danger_set  = False;
+  Bool trace_mutex       = False;
+  Bool trace_rwlock      = False;
+  Bool trace_segment     = False;
+  Bool trace_semaphore   = False;
+  Bool trace_suppression = False;
+  Char* trace_address    = 0;
 
-   VG_BOOL_CLO     (arg, "--drd-stats",         drd_print_stats)
-   else VG_BOOL_CLO(arg, "--trace-barrier",     trace_barrier)
-   else VG_BOOL_CLO(arg, "--trace-clientobj",   trace_clientobj)
-   else VG_BOOL_CLO(arg, "--trace-cond",        trace_cond)
-   else VG_BOOL_CLO(arg, "--trace-csw",         trace_csw)
-   else VG_BOOL_CLO(arg, "--trace-danger-set",  trace_danger_set)
-   else VG_BOOL_CLO(arg, "--trace-fork-join",   drd_trace_fork_join)
-   else VG_BOOL_CLO(arg, "--trace-mem",         drd_trace_mem)
-   else VG_BOOL_CLO(arg, "--trace-mutex",       trace_mutex)
-   else VG_BOOL_CLO(arg, "--trace-rwlock",      trace_rwlock)
-   else VG_BOOL_CLO(arg, "--trace-segment",     trace_segment)
-   else VG_BOOL_CLO(arg, "--trace-semaphore",   trace_semaphore)
-   else VG_BOOL_CLO(arg, "--trace-suppression", trace_suppression)
-   else VG_STR_CLO (arg, "--trace-address",     trace_address)
-   else
-      return False;
+  VG_BOOL_CLO     (arg, "--drd-stats",         drd_print_stats)
+  else VG_BOOL_CLO(arg, "--trace-barrier",     trace_barrier)
+  else VG_BOOL_CLO(arg, "--trace-clientobj",   trace_clientobj)
+  else VG_BOOL_CLO(arg, "--trace-cond",        trace_cond)
+  else VG_BOOL_CLO(arg, "--trace-csw",         trace_csw)
+  else VG_BOOL_CLO(arg, "--trace-danger-set",  trace_danger_set)
+  else VG_BOOL_CLO(arg, "--trace-fork-join",   drd_trace_fork_join)
+  else VG_BOOL_CLO(arg, "--trace-mem",         drd_trace_mem)
+  else VG_BOOL_CLO(arg, "--trace-mutex",       trace_mutex)
+  else VG_BOOL_CLO(arg, "--trace-rwlock",      trace_rwlock)
+  else VG_BOOL_CLO(arg, "--trace-segment",     trace_segment)
+  else VG_BOOL_CLO(arg, "--trace-semaphore",   trace_semaphore)
+  else VG_BOOL_CLO(arg, "--trace-suppression", trace_suppression)
+  else VG_STR_CLO (arg, "--trace-address",     trace_address)
+  else
+    return False;
 
-   if (trace_address)
-   {
-      drd_trace_address = VG_(strtoll16)(trace_address, 0);
-   }
-   if (trace_barrier)
-      barrier_set_trace(trace_barrier);
-   if (trace_clientobj)
-      clientobj_set_trace(trace_clientobj);
-   if (trace_cond)
-      cond_set_trace(trace_cond);
-   if (trace_csw)
-      thread_trace_context_switches(trace_csw);
-   if (trace_danger_set)
-      thread_trace_danger_set(trace_danger_set);
-   if (trace_mutex)
-      mutex_set_trace(trace_mutex);
-   if (trace_rwlock)
-      rwlock_set_trace(trace_rwlock);
-   if (trace_segment)
-      sg_set_trace(trace_segment);
-   if (trace_semaphore)
-      semaphore_set_trace(trace_semaphore);
-   if (trace_suppression)
-      suppression_set_trace(trace_suppression);
+  if (trace_address)
+  {
+    drd_trace_address = VG_(strtoll16)(trace_address, 0);
+  }
+  if (trace_barrier)
+    barrier_set_trace(trace_barrier);
+  if (trace_clientobj)
+    clientobj_set_trace(trace_clientobj);
+  if (trace_cond)
+    cond_set_trace(trace_cond);
+  if (trace_csw)
+    thread_trace_context_switches(trace_csw);
+  if (trace_danger_set)
+    thread_trace_danger_set(trace_danger_set);
+  if (trace_mutex)
+    mutex_set_trace(trace_mutex);
+  if (trace_rwlock)
+    rwlock_set_trace(trace_rwlock);
+  if (trace_segment)
+    sg_set_trace(trace_segment);
+  if (trace_semaphore)
+    semaphore_set_trace(trace_semaphore);
+  if (trace_suppression)
+    suppression_set_trace(trace_suppression);
 
-   return True;
+  return True;
 }
 
 static void drd_print_usage(void)
 {  
-   VG_(printf)("    --trace-mem=no|yes Trace all memory accesses to stdout[no]"
-               "\n"
-               "    --trace-fork-join=no|yes Trace all thread creation and join"
-               " activity\n"
-               "    --trace-mutex=no|yes Trace all mutex activity\n"
-               "    --trace-segment=no|yes Trace segment actions\n"
-               );
+  VG_(printf)("    --trace-mem=no|yes Trace all memory accesses to stdout[no]"
+              "\n"
+              "    --trace-fork-join=no|yes Trace all thread creation and join"
+              " activity\n"
+              "    --trace-mutex=no|yes Trace all mutex activity\n"
+              "    --trace-segment=no|yes Trace segment actions\n"
+              );
 }
 
 static void drd_print_debug_usage(void)
@@ -151,249 +151,249 @@
 static void drd_trace_mem_access(const Addr addr, const SizeT size,
                                  const BmAccessTypeT access_type)
 {
-   char vc[80];
-   vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
-   VG_(message)(Vg_UserMsg,
-                "%s 0x%lx size %ld %s (vg %d / drd %d / vc %s)",
-                access_type == eLoad ? "load " : "store",
-                addr,
-                size,
-                thread_get_name(thread_get_running_tid()),
-                VG_(get_running_tid)(),
-                thread_get_running_tid(),
-                vc);
-   VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
-                              VG_(clo_backtrace_size));
-   tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
-             == VG_(get_running_tid)());
+  char vc[80];
+  vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
+  VG_(message)(Vg_UserMsg,
+               "%s 0x%lx size %ld %s (vg %d / drd %d / vc %s)",
+               access_type == eLoad ? "load " : "store",
+               addr,
+               size,
+               thread_get_name(thread_get_running_tid()),
+               VG_(get_running_tid)(),
+               thread_get_running_tid(),
+               vc);
+  VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
+                             VG_(clo_backtrace_size));
+  tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
+            == VG_(get_running_tid)());
 }
 
 static void drd_report_race(const Addr addr, const SizeT size,
                             const BmAccessTypeT access_type)
 {
-   DataRaceErrInfo drei;
-   drei.tid  = VG_(get_running_tid)();
-   drei.addr = addr;
-   drei.size = size;
-   drei.access_type = access_type;
-   VG_(maybe_record_error)(VG_(get_running_tid)(),
-                           DataRaceErr,
-                           VG_(get_IP)(VG_(get_running_tid)()),
-                           "Conflicting accesses",
-                           &drei);
+  DataRaceErrInfo drei;
+  drei.tid  = VG_(get_running_tid)();
+  drei.addr = addr;
+  drei.size = size;
+  drei.access_type = access_type;
+  VG_(maybe_record_error)(VG_(get_running_tid)(),
+                          DataRaceErr,
+                          VG_(get_IP)(VG_(get_running_tid)()),
+                          "Conflicting accesses",
+                          &drei);
 }
 
 static VG_REGPARM(2) void drd_trace_load(Addr addr, SizeT size)
 {
-   Segment* sg;
+  Segment* sg;
 
 #if 0
-   /* The assert below has been commented out because of performance reasons.*/
-   tl_assert(thread_get_running_tid()
-             == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
+  /* The assert below has been commented out because of performance reasons.*/
+  tl_assert(thread_get_running_tid()
+            == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
 #endif
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, size, eLoad);
-   }
-   sg = running_thread_get_segment();
-   bm_access_range_load(sg->bm, addr, addr + size);
-   if (bm_load_has_conflict_with(thread_get_danger_set(), addr, addr + size)
-       && ! drd_is_suppressed(addr, addr + size))
-   {
-      drd_report_race(addr, size, eLoad);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, size, eLoad);
+  }
+  sg = running_thread_get_segment();
+  bm_access_range_load(sg->bm, addr, addr + size);
+  if (bm_load_has_conflict_with(thread_get_danger_set(), addr, addr + size)
+      && ! drd_is_suppressed(addr, addr + size))
+  {
+    drd_report_race(addr, size, eLoad);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 1, eLoad);
-   }
-   sg = running_thread_get_segment();
-   bm_access_load_1(sg->bm, addr);
-   if (bm_load_1_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 1))
-   {
-      drd_report_race(addr, 1, eLoad);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 1, eLoad);
+  }
+  sg = running_thread_get_segment();
+  bm_access_load_1(sg->bm, addr);
+  if (bm_load_1_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 1))
+  {
+    drd_report_race(addr, 1, eLoad);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 2, eLoad);
-   }
-   sg = running_thread_get_segment();
-   bm_access_load_2(sg->bm, addr);
-   if (bm_load_2_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 2))
-   {
-      drd_report_race(addr, 2, eLoad);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 2, eLoad);
+  }
+  sg = running_thread_get_segment();
+  bm_access_load_2(sg->bm, addr);
+  if (bm_load_2_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 2))
+  {
+    drd_report_race(addr, 2, eLoad);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 4, eLoad);
-   }
-   sg = running_thread_get_segment();
-   bm_access_load_4(sg->bm, addr);
-   if (bm_load_4_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 4))
-   {
-      drd_report_race(addr, 4, eLoad);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 4, eLoad);
+  }
+  sg = running_thread_get_segment();
+  bm_access_load_4(sg->bm, addr);
+  if (bm_load_4_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 4))
+  {
+    drd_report_race(addr, 4, eLoad);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 8, eLoad);
-   }
-   sg = running_thread_get_segment();
-   bm_access_load_8(sg->bm, addr);
-   if (bm_load_8_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 8))
-   {
-      drd_report_race(addr, 8, eLoad);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 8, eLoad);
+  }
+  sg = running_thread_get_segment();
+  bm_access_load_8(sg->bm, addr);
+  if (bm_load_8_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 8))
+  {
+    drd_report_race(addr, 8, eLoad);
+  }
 }
 
 static
 VG_REGPARM(2) void drd_trace_store(Addr addr, SizeT size)
 {
-   Segment* sg;
+  Segment* sg;
 
 #if 0
-   /* The assert below has been commented out because of performance reasons.*/
-   tl_assert(thread_get_running_tid()
-             == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
+  /* The assert below has been commented out because of performance reasons.*/
+  tl_assert(thread_get_running_tid()
+            == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
 #endif
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, size, eStore);
-   }
-   sg = running_thread_get_segment();
-   bm_access_range_store(sg->bm, addr, addr + size);
-   if (bm_store_has_conflict_with(thread_get_danger_set(), addr, addr + size)
-       && ! drd_is_suppressed(addr, addr + size))
-   {
-      drd_report_race(addr, size, eStore);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, size, eStore);
+  }
+  sg = running_thread_get_segment();
+  bm_access_range_store(sg->bm, addr, addr + size);
+  if (bm_store_has_conflict_with(thread_get_danger_set(), addr, addr + size)
+      && ! drd_is_suppressed(addr, addr + size))
+  {
+    drd_report_race(addr, size, eStore);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 1, eStore);
-   }
-   sg = running_thread_get_segment();
-   bm_access_store_1(sg->bm, addr);
-   if (bm_store_1_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 1))
-   {
-      drd_report_race(addr, 1, eStore);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 1, eStore);
+  }
+  sg = running_thread_get_segment();
+  bm_access_store_1(sg->bm, addr);
+  if (bm_store_1_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 1))
+  {
+    drd_report_race(addr, 1, eStore);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 2, eStore);
-   }
-   sg = running_thread_get_segment();
-   bm_access_store_2(sg->bm, addr);
-   if (bm_store_2_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 2))
-   {
-      drd_report_race(addr, 2, eStore);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 2, eStore);
+  }
+  sg = running_thread_get_segment();
+  bm_access_store_2(sg->bm, addr);
+  if (bm_store_2_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 2))
+  {
+    drd_report_race(addr, 2, eStore);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 4, eStore);
-   }
-   sg = running_thread_get_segment();
-   bm_access_store_4(sg->bm, addr);
-   if (bm_store_4_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 4))
-   {
-      drd_report_race(addr, 4, eStore);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 4, eStore);
+  }
+  sg = running_thread_get_segment();
+  bm_access_store_4(sg->bm, addr);
+  if (bm_store_4_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 4))
+  {
+    drd_report_race(addr, 4, eStore);
+  }
 }
 
 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
 {
-   Segment* sg;
+  Segment* sg;
 
-   if (! running_thread_is_recording())
-      return;
+  if (! running_thread_is_recording())
+    return;
 
-   if (drd_trace_mem || (addr == drd_trace_address))
-   {
-      drd_trace_mem_access(addr, 8, eStore);
-   }
-   sg = running_thread_get_segment();
-   bm_access_store_8(sg->bm, addr);
-   if (bm_store_8_has_conflict_with(thread_get_danger_set(), addr)
-       && ! drd_is_suppressed(addr, addr + 8))
-   {
-      drd_report_race(addr, 8, eStore);
-   }
+  if (drd_trace_mem || (addr == drd_trace_address))
+  {
+    drd_trace_mem_access(addr, 8, eStore);
+  }
+  sg = running_thread_get_segment();
+  bm_access_store_8(sg->bm, addr);
+  if (bm_store_8_has_conflict_with(thread_get_danger_set(), addr)
+      && ! drd_is_suppressed(addr, addr + 8))
+  {
+    drd_report_race(addr, 8, eStore);
+  }
 }
 
 static void drd_pre_mem_read(const CorePart part,
@@ -402,10 +402,10 @@
                              const Addr a,
                              const SizeT size)
 {
-   if (size > 0)
-   {
-      drd_trace_load(a, size);
-   }
+  if (size > 0)
+  {
+    drd_trace_load(a, size);
+  }
 }
 
 static void drd_pre_mem_read_asciiz(const CorePart part,
@@ -413,22 +413,22 @@
                                     Char* const s,
                                     const Addr a)
 {
-   const char* p = (void*)a;
-   SizeT size = 0;
+  const char* p = (void*)a;
+  SizeT size = 0;
 
-   /* Note: the expression '*p' reads client memory and may crash if the */
-   /* client provided an invalid pointer !                               */
-   while (*p)
-   {
-      p++;
-      size++;
-   }
-   // To do: find out what a reasonable upper limit on 'size' is.
-   tl_assert(size < 4096);
-   if (size > 0)
-   {
-      drd_trace_load(a, size);
-   }
+  /* Note: the expression '*p' reads client memory and may crash if the */
+  /* client provided an invalid pointer !                               */
+  while (*p)
+  {
+    p++;
+    size++;
+  }
+  // To do: find out what a reasonable upper limit on 'size' is.
+  tl_assert(size < 4096);
+  if (size > 0)
+  {
+    drd_trace_load(a, size);
+  }
 }
 
 static void drd_post_mem_write(const CorePart part,
@@ -436,55 +436,55 @@
                                const Addr a,
                                const SizeT size)
 {
-   thread_set_vg_running_tid(VG_(get_running_tid)());
-   if (size > 0)
-   {
-      drd_trace_store(a, size);
-   }
+  thread_set_vg_running_tid(VG_(get_running_tid)());
+  if (size > 0)
+  {
+    drd_trace_store(a, size);
+  }
 }
 
 static void drd_start_using_mem(const Addr a1, const SizeT len)
 {
-   const Addr a2 = a1 + len;
+  const Addr a2 = a1 + len;
 
-   tl_assert(a1 < a2);
+  tl_assert(a1 < a2);
 
-   thread_set_vg_running_tid(VG_(get_running_tid)());
+  thread_set_vg_running_tid(VG_(get_running_tid)());
 
-   if (a1 <= drd_trace_address && drd_trace_address < a2)
-   {
-      VG_(message)(Vg_UserMsg, "start 0x%lx size %ld %s (tracing 0x%lx)",
-                   a1, a2 - a1, thread_get_name(thread_get_running_tid()),
-                   drd_trace_address);
-      VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
-                                 VG_(clo_backtrace_size));
-   }
+  if (a1 <= drd_trace_address && drd_trace_address < a2)
+  {
+    VG_(message)(Vg_UserMsg, "start 0x%lx size %ld %s (tracing 0x%lx)",
+                 a1, a2 - a1, thread_get_name(thread_get_running_tid()),
+                 drd_trace_address);
+    VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
+                               VG_(clo_backtrace_size));
+  }
 }
 
 static void drd_stop_using_mem(const Addr a1, const SizeT len)
 {
-   const Addr a2 = a1 + len;
+  const Addr a2 = a1 + len;
 
-   tl_assert(a1 < a2);
+  tl_assert(a1 < a2);
 
-   if (a1 <= drd_trace_address && drd_trace_address < a2)
-   {
-      VG_(message)(Vg_UserMsg, "end   0x%lx size %ld %s (tracing 0x%lx)",
-                   a1, a2 - a1, thread_get_name(thread_get_running_tid()),
-                   drd_trace_address);
-      VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
-                                 VG_(clo_backtrace_size));
-   }
-   thread_stop_using_mem(a1, a2);
-   clientobj_stop_using_mem(a1, a2);
-   drd_suppression_stop_using_mem(a1, a2);
+  if (a1 <= drd_trace_address && drd_trace_address < a2)
+  {
+    VG_(message)(Vg_UserMsg, "end   0x%lx size %ld %s (tracing 0x%lx)",
+                 a1, a2 - a1, thread_get_name(thread_get_running_tid()),
+                 drd_trace_address);
+    VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
+                               VG_(clo_backtrace_size));
+  }
+  thread_stop_using_mem(a1, a2);
+  clientobj_stop_using_mem(a1, a2);
+  drd_suppression_stop_using_mem(a1, a2);
 }
 
 static
 void drd_start_using_mem_w_perms(const Addr a, const SizeT len,
                                  const Bool rr, const Bool ww, const Bool xx)
 {
-   drd_start_using_mem(a, len);
+  drd_start_using_mem(a, len);
 }
 
 /* Called by the core when the stack of a thread grows, to indicate that */
@@ -492,8 +492,8 @@
 /* Assumption: stacks grow downward.                                     */
 static void drd_start_using_mem_stack(const Addr a, const SizeT len)
 {
-   thread_set_stack_min(thread_get_running_tid(), a - VG_STACK_REDZONE_SZB);
-   drd_start_using_mem(a, len);
+  thread_set_stack_min(thread_get_running_tid(), a - VG_STACK_REDZONE_SZB);
+  drd_start_using_mem(a, len);
 }
 
 /* Called by the core when the stack of a thread shrinks, to indicate that */
@@ -501,38 +501,38 @@
 /* Assumption: stacks grow downward.                                       */
 static void drd_stop_using_mem_stack(const Addr a, const SizeT len)
 {
-   thread_set_vg_running_tid(VG_(get_running_tid)());
-   thread_set_stack_min(thread_get_running_tid(),
-                        a + len - VG_STACK_REDZONE_SZB);
-   drd_stop_using_mem(a, len);
+  thread_set_vg_running_tid(VG_(get_running_tid)());
+  thread_set_stack_min(thread_get_running_tid(),
+                       a + len - VG_STACK_REDZONE_SZB);
+  drd_stop_using_mem(a, len);
 }
 
 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len)
 {
-   drd_start_using_mem(a, len);
+  drd_start_using_mem(a, len);
 }
 
 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len)
 {
-   drd_stop_using_mem(a, len);
+  drd_stop_using_mem(a, len);
 }
 
 static
 void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
 {
-   const DrdThreadId drd_creator = VgThreadIdToDrdThreadId(creator);
-   tl_assert(created != VG_INVALID_THREADID);
-   thread_pre_create(drd_creator, created);
-   if (IsValidDrdThreadId(drd_creator))
-   {
-      thread_new_segment(drd_creator);
-   }
-   if (drd_trace_fork_join)
-   {
-      VG_(message)(Vg_DebugMsg,
-                   "drd_pre_thread_create creator = %d/%d, created = %d",
-                   creator, drd_creator, created);
-   }
+  const DrdThreadId drd_creator = VgThreadIdToDrdThreadId(creator);
+  tl_assert(created != VG_INVALID_THREADID);
+  thread_pre_create(drd_creator, created);
+  if (IsValidDrdThreadId(drd_creator))
+  {
+    thread_new_segment(drd_creator);
+  }
+  if (drd_trace_fork_join)
+  {
+    VG_(message)(Vg_DebugMsg,
+                 "drd_pre_thread_create creator = %d/%d, created = %d",
+                 creator, drd_creator, created);
+  }
 }
 
 /* Called by Valgrind's core before any loads or stores are performed on */
@@ -541,144 +541,144 @@
 static
 void drd_post_thread_create(const ThreadId created)
 {
-   const DrdThreadId drd_created = thread_post_create(created);
-   tl_assert(created != VG_INVALID_THREADID);
-   if (drd_trace_fork_join)
-   {
-      VG_(message)(Vg_DebugMsg,
-                   "drd_post_thread_create created = %d/%d",
-                   created, drd_created);
-   }
+  const DrdThreadId drd_created = thread_post_create(created);
+  tl_assert(created != VG_INVALID_THREADID);
+  if (drd_trace_fork_join)
+  {
+    VG_(message)(Vg_DebugMsg,
+                 "drd_post_thread_create created = %d/%d",
+                 created, drd_created);
+  }
 }
 
 /* Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just */
 /* after thread drd_joiner joined thread drd_joinee.                         */
 void drd_post_thread_join(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
 {
-   tl_assert(IsValidDrdThreadId(drd_joiner));
-   tl_assert(IsValidDrdThreadId(drd_joinee));
-   thread_new_segment(drd_joinee);
-   thread_combine_vc(drd_joiner, drd_joinee);
-   thread_new_segment(drd_joiner);
+  tl_assert(IsValidDrdThreadId(drd_joiner));
+  tl_assert(IsValidDrdThreadId(drd_joinee));
+  thread_new_segment(drd_joinee);
+  thread_combine_vc(drd_joiner, drd_joinee);
+  thread_new_segment(drd_joiner);
 
-   if (drd_trace_fork_join)
-   {
-      char msg[256];
-      const ThreadId joiner = DrdThreadIdToVgThreadId(drd_joiner);
-      const ThreadId joinee = DrdThreadIdToVgThreadId(drd_joinee);
-      VG_(snprintf)(msg, sizeof(msg),
-                    "drd_post_thread_join joiner = %d/%d, joinee = %d/%d",
-                    joiner, drd_joiner, joinee, drd_joinee);
-      if (joiner)
-      {
-         VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                       ", new vc: ");
-         vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                    thread_get_vc(drd_joiner));
-      }
-      VG_(message)(Vg_DebugMsg, msg);
-   }
+  if (drd_trace_fork_join)
+  {
+    char msg[256];
+    const ThreadId joiner = DrdThreadIdToVgThreadId(drd_joiner);
+    const ThreadId joinee = DrdThreadIdToVgThreadId(drd_joinee);
+    VG_(snprintf)(msg, sizeof(msg),
+                  "drd_post_thread_join joiner = %d/%d, joinee = %d/%d",
+                  joiner, drd_joiner, joinee, drd_joinee);
+    if (joiner)
+    {
+      VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                    ", new vc: ");
+      vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                 thread_get_vc(drd_joiner));
+    }
+    VG_(message)(Vg_DebugMsg, msg);
+  }
 
-   thread_delete(drd_joinee);
-   mutex_thread_delete(drd_joinee);
-   cond_thread_delete(drd_joinee);
-   semaphore_thread_delete(drd_joinee);
-   barrier_thread_delete(drd_joinee);
+  thread_delete(drd_joinee);
+  mutex_thread_delete(drd_joinee);
+  cond_thread_delete(drd_joinee);
+  semaphore_thread_delete(drd_joinee);
+  barrier_thread_delete(drd_joinee);
 }
 
 void drd_trace_addr(const Addr addr)
 {
-   drd_trace_address = addr;
+  drd_trace_address = addr;
 }
 
 /* Called after a thread has performed its last memory access. */
 static void drd_thread_finished(ThreadId tid)
 {
-   DrdThreadId drd_tid;
+  DrdThreadId drd_tid;
 
-   tl_assert(VG_(get_running_tid)() == tid);
+  tl_assert(VG_(get_running_tid)() == tid);
 
-   drd_tid = VgThreadIdToDrdThreadId(tid);
-   if (drd_trace_fork_join)
-   {
-      VG_(message)(Vg_DebugMsg,
-                   "drd_thread_finished tid = %d/%d%s",
-                   tid,
-                   drd_tid,
-                   thread_get_joinable(drd_tid)
-                   ? ""
-                   : " (which is a detached thread)");
+  drd_tid = VgThreadIdToDrdThreadId(tid);
+  if (drd_trace_fork_join)
+  {
+    VG_(message)(Vg_DebugMsg,
+                 "drd_thread_finished tid = %d/%d%s",
+                 tid,
+                 drd_tid,
+                 thread_get_joinable(drd_tid)
+                 ? ""
+                 : " (which is a detached thread)");
 
-   }
-   thread_finished(drd_tid);
+  }
+  thread_finished(drd_tid);
 }
 
 void drd_pre_mutex_init(const Addr mutex, const MutexT mutex_type)
 {
-   mutex_init(mutex, mutex_type);
+  mutex_init(mutex, mutex_type);
 }
 
 void drd_post_mutex_destroy(const Addr mutex, const MutexT mutex_type)
 {
-   mutex_post_destroy(mutex);
+  mutex_post_destroy(mutex);
 }
 
 void drd_pre_mutex_lock(const Addr mutex, const MutexT mutex_type)
 {
-   mutex_pre_lock(mutex, mutex_type);
+  mutex_pre_lock(mutex, mutex_type);
 }
 
 void drd_post_mutex_lock(const Addr mutex, const Bool took_lock)
 {
-   mutex_post_lock(mutex, took_lock);
+  mutex_post_lock(mutex, took_lock);
 }
 
 void drd_pre_mutex_unlock(const Addr mutex, const MutexT mutex_type)
 {
-   mutex_unlock(mutex, mutex_type);
+  mutex_unlock(mutex, mutex_type);
 }
 
 void drd_pre_cond_init(Addr cond)
 {
-   cond_pre_init(cond);
+  cond_pre_init(cond);
 }
 
 void drd_post_cond_destroy(Addr cond)
 {
-   cond_post_destroy(cond);
+  cond_post_destroy(cond);
 }
 
 void drd_semaphore_init(const Addr semaphore,
                         const Word pshared, const Word value)
 {
-   semaphore_init(semaphore, pshared, value);
+  semaphore_init(semaphore, pshared, value);
 }
 
 void drd_semaphore_destroy(const Addr semaphore)
 {
-   semaphore_destroy(semaphore);
+  semaphore_destroy(semaphore);
 }
 
 void drd_semaphore_pre_wait(const DrdThreadId tid, const Addr semaphore)
 {
-   semaphore_pre_wait(semaphore);
+  semaphore_pre_wait(semaphore);
 }
 
 void drd_semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
                              const Bool waited)
 {
-   semaphore_post_wait(tid, semaphore, waited);
+  semaphore_post_wait(tid, semaphore, waited);
 }
 
 void drd_semaphore_pre_post(const DrdThreadId tid, const Addr semaphore)
 {
-   semaphore_pre_post(tid, semaphore);
+  semaphore_pre_post(tid, semaphore);
 }
 
 void drd_semaphore_post_post(const DrdThreadId tid, const Addr semaphore,
                              const Bool waited)
 {
-   semaphore_post_post(tid, semaphore, waited);
+  semaphore_post_post(tid, semaphore, waited);
 }
 
 
@@ -686,24 +686,24 @@
                       const BarrierT barrier_type, const Word count,
                       const Bool reinitialization)
 {
-   barrier_init(barrier, barrier_type, count, reinitialization);
+  barrier_init(barrier, barrier_type, count, reinitialization);
 }
 
 void drd_barrier_destroy(const Addr barrier, const BarrierT barrier_type)
 {
-   barrier_destroy(barrier, barrier_type);
+  barrier_destroy(barrier, barrier_type);
 }
 
 void drd_barrier_pre_wait(const DrdThreadId tid, const Addr barrier,
                           const BarrierT barrier_type)
 {
-   barrier_pre_wait(tid, barrier, barrier_type);
+  barrier_pre_wait(tid, barrier, barrier_type);
 }
 
 void drd_barrier_post_wait(const DrdThreadId tid, const Addr barrier,
                            const BarrierT barrier_type, const Bool waited)
 {
-   barrier_post_wait(tid, barrier, barrier_type, waited);
+  barrier_post_wait(tid, barrier, barrier_type, waited);
 }
 
 
@@ -715,9 +715,9 @@
 void drd_post_clo_init(void)
 {
 #  if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
-   /* fine */
+  /* fine */
 #  else
-   VG_(printf)("\nWARNING: DRD has only been tested on x86-linux and amd64-linux.\n\n");
+  VG_(printf)("\nWARNING: DRD has only been tested on x86-linux and amd64-linux.\n\n");
 #  endif
 }
 
@@ -725,100 +725,100 @@
                             IRExpr* const addr_expr,
                             const HWord size)
 {
-   IRExpr* size_expr;
-   IRExpr** argv;
-   IRDirty* di;
+  IRExpr* size_expr;
+  IRExpr** argv;
+  IRDirty* di;
 
-   switch (size)
-   {
-   case 1:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_load_1",
-                             VG_(fnptr_to_fnentry)(drd_trace_load_1),
-                             argv);
-      break;
-   case 2:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_load_2",
-                             VG_(fnptr_to_fnentry)(drd_trace_load_2),
-                             argv);
-      break;
-   case 4:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_load_4",
-                             VG_(fnptr_to_fnentry)(drd_trace_load_4),
-                             argv);
-      break;
-   case 8:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_load_8",
-                             VG_(fnptr_to_fnentry)(drd_trace_load_8),
-                             argv);
-      break;
-   default:
-      size_expr = mkIRExpr_HWord(size);
-      argv = mkIRExprVec_2(addr_expr, size_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/2,
-                             "drd_trace_load",
-                             VG_(fnptr_to_fnentry)(drd_trace_load),
-                             argv);
-      break;
-   }
-   addStmtToIRSB(bb, IRStmt_Dirty(di));
+  switch (size)
+  {
+  case 1:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_load_1",
+                           VG_(fnptr_to_fnentry)(drd_trace_load_1),
+                           argv);
+    break;
+  case 2:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_load_2",
+                           VG_(fnptr_to_fnentry)(drd_trace_load_2),
+                           argv);
+    break;
+  case 4:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_load_4",
+                           VG_(fnptr_to_fnentry)(drd_trace_load_4),
+                           argv);
+    break;
+  case 8:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_load_8",
+                           VG_(fnptr_to_fnentry)(drd_trace_load_8),
+                           argv);
+    break;
+  default:
+    size_expr = mkIRExpr_HWord(size);
+    argv = mkIRExprVec_2(addr_expr, size_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/2,
+                           "drd_trace_load",
+                           VG_(fnptr_to_fnentry)(drd_trace_load),
+                           argv);
+    break;
+  }
+  addStmtToIRSB(bb, IRStmt_Dirty(di));
 }
 
 static void instrument_store(IRSB* const bb,
-                            IRExpr* const addr_expr,
-                            const HWord size)
+                             IRExpr* const addr_expr,
+                             const HWord size)
 {
-   IRExpr* size_expr;
-   IRExpr** argv;
-   IRDirty* di;
+  IRExpr* size_expr;
+  IRExpr** argv;
+  IRDirty* di;
 
-   switch (size)
-   {
-   case 1:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_store_1",
-                             VG_(fnptr_to_fnentry)(drd_trace_store_1),
-                             argv);
-      break;
-   case 2:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_store_2",
-                             VG_(fnptr_to_fnentry)(drd_trace_store_2),
-                             argv);
-      break;
-   case 4:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_store_4",
-                             VG_(fnptr_to_fnentry)(drd_trace_store_4),
-                             argv);
-      break;
-   case 8:
-      argv = mkIRExprVec_1(addr_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/1,
-                             "drd_trace_store_8",
-                             VG_(fnptr_to_fnentry)(drd_trace_store_8),
-                             argv);
-      break;
-   default:
-      size_expr = mkIRExpr_HWord(size);
-      argv = mkIRExprVec_2(addr_expr, size_expr);
-      di = unsafeIRDirty_0_N(/*regparms*/2,
-                             "drd_trace_store",
-                             VG_(fnptr_to_fnentry)(drd_trace_store),
-                             argv);
-      break;
-   }
-   addStmtToIRSB(bb, IRStmt_Dirty(di));
+  switch (size)
+  {
+  case 1:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_store_1",
+                           VG_(fnptr_to_fnentry)(drd_trace_store_1),
+                           argv);
+    break;
+  case 2:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_store_2",
+                           VG_(fnptr_to_fnentry)(drd_trace_store_2),
+                           argv);
+    break;
+  case 4:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_store_4",
+                           VG_(fnptr_to_fnentry)(drd_trace_store_4),
+                           argv);
+    break;
+  case 8:
+    argv = mkIRExprVec_1(addr_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/1,
+                           "drd_trace_store_8",
+                           VG_(fnptr_to_fnentry)(drd_trace_store_8),
+                           argv);
+    break;
+  default:
+    size_expr = mkIRExpr_HWord(size);
+    argv = mkIRExprVec_2(addr_expr, size_expr);
+    di = unsafeIRDirty_0_N(/*regparms*/2,
+                           "drd_trace_store",
+                           VG_(fnptr_to_fnentry)(drd_trace_store),
+                           argv);
+    break;
+  }
+  addStmtToIRSB(bb, IRStmt_Dirty(di));
 }
 
 static
@@ -829,224 +829,217 @@
                      IRType const gWordTy,
                      IRType const hWordTy)
 {
-   IRDirty* di;
-   Int      i;
-   IRSB*    bb;
-   IRExpr** argv;
-   Bool     instrument = True;
-   Bool     bus_locked = False;
+  IRDirty* di;
+  Int      i;
+  IRSB*    bb;
+  IRExpr** argv;
+  Bool     instrument = True;
+  Bool     bus_locked = False;
 
-   /* Set up BB */
-   bb           = emptyIRSB();
-   bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
-   bb->next     = deepCopyIRExpr(bb_in->next);
-   bb->jumpkind = bb_in->jumpkind;
+  /* Set up BB */
+  bb           = emptyIRSB();
+  bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
+  bb->next     = deepCopyIRExpr(bb_in->next);
+  bb->jumpkind = bb_in->jumpkind;
 
-   for (i = 0; i < bb_in->stmts_used; i++)
-   {
-      IRStmt* const st = bb_in->stmts[i];
-      tl_assert(st);
-      if (st->tag == Ist_NoOp)
-         continue;
+  for (i = 0; i < bb_in->stmts_used; i++)
+  {
+    IRStmt* const st = bb_in->stmts[i];
+    tl_assert(st);
+    if (st->tag == Ist_NoOp)
+      continue;
 
-      switch (st->tag)
+    switch (st->tag)
+    {
+    case Ist_IMark:
+      instrument = VG_(seginfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
+        != Vg_SectPLT;
+      break;
+
+    case Ist_MBE:
+      switch (st->Ist.MBE.event)
       {
-      case Ist_IMark:
-         instrument = VG_(seginfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
-            != Vg_SectPLT;
-         break;
-
-      case Ist_MBE:
-         switch (st->Ist.MBE.event)
-         {
-         case Imbe_Fence:
-            break; /* not interesting */
-         case Imbe_BusLock:
-            tl_assert(! bus_locked);
-            bus_locked = True;
-            break;
-         case Imbe_BusUnlock:
-            tl_assert(bus_locked);
-            bus_locked = False;
-            break;
-         default:
-            tl_assert(0);
-         }
-         addStmtToIRSB(bb, st);
-         break;
-
-      case Ist_Store:
-         if (instrument && ! bus_locked)
-         {
-            instrument_store(bb,
-                             st->Ist.Store.addr,
-                             sizeofIRType(typeOfIRExpr(bb->tyenv,
-                                                       st->Ist.Store.data)));
-         }
-         addStmtToIRSB(bb, st);
-         break;
-
-      case Ist_WrTmp:
-         if (instrument)
-         {
-            const IRExpr* const data = st->Ist.WrTmp.data;
-            if (data->tag == Iex_Load)
-            {
-               instrument_load(bb,
-                               data->Iex.Load.addr,
-                               sizeofIRType(data->Iex.Load.ty));
-            }
-         }
-         addStmtToIRSB(bb, st);
-         break;
-
-      case Ist_Dirty:
-         if (instrument)
-         {
-            IRDirty* d = st->Ist.Dirty.details;
-            IREffect const mFx = d->mFx;
-            switch (mFx) {
-            case Ifx_None:
-               break;
-            case Ifx_Read:
-            case Ifx_Write:
-            case Ifx_Modify:
-               tl_assert(d->mAddr);
-               tl_assert(d->mSize > 0);
-               argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
-               if (mFx == Ifx_Read || mFx == Ifx_Modify) {
-                  di = unsafeIRDirty_0_N(
-                                         /*regparms*/2,
-                                         "drd_trace_load",
-                                         VG_(fnptr_to_fnentry)(drd_trace_load),
-                                         argv);
-                  addStmtToIRSB(bb, IRStmt_Dirty(di));
-               }
-               if ((mFx == Ifx_Write || mFx == Ifx_Modify)
-                   && ! bus_locked)
-               {
-                  di = unsafeIRDirty_0_N(
-                                         /*regparms*/2,
-                                         "drd_trace_store",
-                                         VG_(fnptr_to_fnentry)(drd_trace_store),
-                                         argv);
-                  addStmtToIRSB(bb, IRStmt_Dirty(di));
-               }
-               break;
-            default:
-               tl_assert(0);
-            }
-         }
-         addStmtToIRSB(bb, st);
-         break;
-
+      case Imbe_Fence:
+        break; /* not interesting */
+      case Imbe_BusLock:
+        tl_assert(! bus_locked);
+        bus_locked = True;
+        break;
+      case Imbe_BusUnlock:
+        tl_assert(bus_locked);
+        bus_locked = False;
+        break;
       default:
-         addStmtToIRSB(bb, st);
-         break;
+        tl_assert(0);
       }
-   }
+      addStmtToIRSB(bb, st);
+      break;
 
-   tl_assert(! bus_locked);
+    case Ist_Store:
+      if (instrument && ! bus_locked)
+      {
+        instrument_store(bb,
+                         st->Ist.Store.addr,
+                         sizeofIRType(typeOfIRExpr(bb->tyenv,
+                                                   st->Ist.Store.data)));
+      }
+      addStmtToIRSB(bb, st);
+      break;
 
-   return bb;
+    case Ist_WrTmp:
+      if (instrument)
+      {
+        const IRExpr* const data = st->Ist.WrTmp.data;
+        if (data->tag == Iex_Load)
+        {
+          instrument_load(bb,
+                          data->Iex.Load.addr,
+                          sizeofIRType(data->Iex.Load.ty));
+        }
+      }
+      addStmtToIRSB(bb, st);
+      break;
+
+    case Ist_Dirty:
+      if (instrument)
+      {
+        IRDirty* d = st->Ist.Dirty.details;
+        IREffect const mFx = d->mFx;
+        switch (mFx) {
+        case Ifx_None:
+          break;
+        case Ifx_Read:
+        case Ifx_Write:
+        case Ifx_Modify:
+          tl_assert(d->mAddr);
+          tl_assert(d->mSize > 0);
+          argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
+          if (mFx == Ifx_Read || mFx == Ifx_Modify) {
+            di = unsafeIRDirty_0_N(
+                                   /*regparms*/2,
+                                   "drd_trace_load",
+                                   VG_(fnptr_to_fnentry)(drd_trace_load),
+                                   argv);
+            addStmtToIRSB(bb, IRStmt_Dirty(di));
+          }
+          if ((mFx == Ifx_Write || mFx == Ifx_Modify)
+              && ! bus_locked)
+          {
+            di = unsafeIRDirty_0_N(
+                                   /*regparms*/2,
+                                   "drd_trace_store",
+                                   VG_(fnptr_to_fnentry)(drd_trace_store),
+                                   argv);
+            addStmtToIRSB(bb, IRStmt_Dirty(di));
+          }
+          break;
+        default:
+          tl_assert(0);
+        }
+      }
+      addStmtToIRSB(bb, st);
+      break;
+
+    default:
+      addStmtToIRSB(bb, st);
+      break;
+    }
+  }
+
+  tl_assert(! bus_locked);
+
+  return bb;
 }
 
 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
 {
-   tl_assert(tid == VG_(get_running_tid)());
-   thread_set_vg_running_tid(tid);
+  tl_assert(tid == VG_(get_running_tid)());
+  thread_set_vg_running_tid(tid);
 }
 
 static
 void drd_fini(Int exitcode)
 {
-   // thread_print_all();
-   if (VG_(clo_verbosity) > 1 || drd_print_stats)
-   {
-      VG_(message)(Vg_DebugMsg,
-                   "   thread: %lld context switches"
-                   " / %lld updates of the danger set",
-                   thread_get_context_switch_count(),
-                   thread_get_update_danger_set_count());
-      VG_(message)(Vg_DebugMsg,
-                   " segments: %lld total, %lld max, %lld discard points",
-                   sg_get_segments_created_count(),
-                   sg_get_max_segments_alive_count(),
-                   thread_get_discard_ordered_segments_count());
-      VG_(message)(Vg_DebugMsg,
-                   "  bitmaps: %lld / %lld bitmaps were allocated"
-                   " and %lld / %lld for danger set updates",
-                   bm_get_bitmap_creation_count(),
-                   bm_get_bitmap2_creation_count(),
-                   thread_get_danger_set_bitmap_creation_count(),
-                   thread_get_danger_set_bitmap2_creation_count());
-      VG_(message)(Vg_DebugMsg,
-                   "    mutex: %lld non-recursive lock/unlock events",
-                   get_mutex_lock_count());
-      drd_print_malloc_stats();
-   }
+  // thread_print_all();
+  if (VG_(clo_verbosity) > 1 || drd_print_stats)
+  {
+    VG_(message)(Vg_DebugMsg,
+                 "   thread: %lld context switches"
+                 " / %lld updates of the danger set",
+                 thread_get_context_switch_count(),
+                 thread_get_update_danger_set_count());
+    VG_(message)(Vg_DebugMsg,
+                 " segments: %lld total, %lld max, %lld discard points",
+                 sg_get_segments_created_count(),
+                 sg_get_max_segments_alive_count(),
+                 thread_get_discard_ordered_segments_count());
+    VG_(message)(Vg_DebugMsg,
+                 "  bitmaps: %lld / %lld bitmaps were allocated"
+                 " and %lld / %lld for danger set updates",
+                 bm_get_bitmap_creation_count(),
+                 bm_get_bitmap2_creation_count(),
+                 thread_get_danger_set_bitmap_creation_count(),
+                 thread_get_danger_set_bitmap2_creation_count());
+    VG_(message)(Vg_DebugMsg,
+                 "    mutex: %lld non-recursive lock/unlock events",
+                 get_mutex_lock_count());
+    drd_print_malloc_stats();
+  }
 }
 
 static
 void drd_pre_clo_init(void)
 {
-   // Basic tool stuff.
+  // Basic tool stuff.
 
-   VG_(details_name)            ("exp-drd");
-   VG_(details_version)         (NULL);
-   VG_(details_description)     ("a data race detector");
-   VG_(details_copyright_author)("Copyright (C) 2006-2008, and GNU GPL'd,"
-                                 " by Bart Van Assche.");
-   VG_(details_bug_reports_to)  (VG_BUGS_TO);
+  VG_(details_name)            ("exp-drd");
+  VG_(details_version)         (NULL);
+  VG_(details_description)     ("a data race detector");
+  VG_(details_copyright_author)("Copyright (C) 2006-2008, and GNU GPL'd,"
+                                " by Bart Van Assche.");
+  VG_(details_bug_reports_to)  (VG_BUGS_TO);
 
-   VG_(basic_tool_funcs)        (drd_post_clo_init,
-                                 drd_instrument,
-                                 drd_fini);
+  VG_(basic_tool_funcs)        (drd_post_clo_init,
+                                drd_instrument,
+                                drd_fini);
 
-   // Command line stuff.
-   VG_(needs_command_line_options)(drd_process_cmd_line_option,
-                                   drd_print_usage,
-                                   drd_print_debug_usage);
+  // Command line stuff.
+  VG_(needs_command_line_options)(drd_process_cmd_line_option,
+                                  drd_print_usage,
+                                  drd_print_debug_usage);
 
-   // Error handling.
-   drd_register_error_handlers();
+  // Error handling.
+  drd_register_error_handlers();
 
-   // Core event tracking.
-   VG_(track_pre_mem_read)         (drd_pre_mem_read);
-   VG_(track_pre_mem_read_asciiz)  (drd_pre_mem_read_asciiz);
-   VG_(track_post_mem_write)       (drd_post_mem_write);
-   VG_(track_new_mem_brk)          (drd_start_using_mem);
-   VG_(track_new_mem_mmap)         (drd_start_using_mem_w_perms);
-   VG_(track_new_mem_stack)        (drd_start_using_mem_stack);
-   VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
-   VG_(track_new_mem_startup)      (drd_start_using_mem_w_perms);
-   VG_(track_die_mem_brk)          (drd_stop_using_mem);
-   VG_(track_die_mem_munmap)       (drd_stop_using_mem);
-   VG_(track_die_mem_stack)        (drd_stop_using_mem_stack);
-   VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
-   VG_(track_start_client_code)    (drd_start_client_code);
-   VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
-   VG_(track_pre_thread_first_insn)(drd_post_thread_create);
-   VG_(track_pre_thread_ll_exit)   (drd_thread_finished);
+  // Core event tracking.
+  VG_(track_pre_mem_read)         (drd_pre_mem_read);
+  VG_(track_pre_mem_read_asciiz)  (drd_pre_mem_read_asciiz);
+  VG_(track_post_mem_write)       (drd_post_mem_write);
+  VG_(track_new_mem_brk)          (drd_start_using_mem);
+  VG_(track_new_mem_mmap)         (drd_start_using_mem_w_perms);
+  VG_(track_new_mem_stack)        (drd_start_using_mem_stack);
+  VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
+  VG_(track_new_mem_startup)      (drd_start_using_mem_w_perms);
+  VG_(track_die_mem_brk)          (drd_stop_using_mem);
+  VG_(track_die_mem_munmap)       (drd_stop_using_mem);
+  VG_(track_die_mem_stack)        (drd_stop_using_mem_stack);
+  VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
+  VG_(track_start_client_code)    (drd_start_client_code);
+  VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
+  VG_(track_pre_thread_first_insn)(drd_post_thread_create);
+  VG_(track_pre_thread_ll_exit)   (drd_thread_finished);
 
-   // Other stuff.
-   VG_(needs_var_info)();
+  // Other stuff.
+  VG_(needs_var_info)();
 
-   drd_register_malloc_wrappers(drd_start_using_mem, drd_stop_using_mem);
+  drd_register_malloc_wrappers(drd_start_using_mem, drd_stop_using_mem);
 
-   drd_clientreq_init();
+  drd_clientreq_init();
 
-   drd_suppression_init();
+  drd_suppression_init();
 
-   clientobj_init();
+  clientobj_init();
 }
 
 
 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init)
-
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_malloc_wrappers.c b/exp-drd/drd_malloc_wrappers.c
index dcfde41..b2d06cd 100644
--- a/exp-drd/drd_malloc_wrappers.c
+++ b/exp-drd/drd_malloc_wrappers.c
@@ -44,10 +44,10 @@
 
 
 typedef struct _DRD_Chunk {
-   struct _DRD_Chunk* next;
-   Addr          data;            // ptr to actual block
-   SizeT         size : (sizeof(UWord)*8)-2; //size requested; 30 or 62 bits
-   ExeContext*   where;           // where it was allocated
+  struct _DRD_Chunk* next;
+  Addr          data;            // ptr to actual block
+  SizeT         size : (sizeof(UWord)*8)-2; //size requested; 30 or 62 bits
+  ExeContext*   where;           // where it was allocated
 } DRD_Chunk;
 
 static StartUsingMem s_start_using_mem_callback;
@@ -70,12 +70,12 @@
 static
 DRD_Chunk* create_DRD_Chunk(ThreadId tid, Addr p, SizeT size)
 {
-   DRD_Chunk* mc = VG_(malloc)(sizeof(DRD_Chunk));
-   mc->data      = p;
-   mc->size      = size;
-   mc->where     = VG_(record_ExeContext)(tid, 0);
+  DRD_Chunk* mc = VG_(malloc)(sizeof(DRD_Chunk));
+  mc->data      = p;
+  mc->size      = size;
+  mc->where     = VG_(record_ExeContext)(tid, 0);
 
-   return mc;
+  return mc;
 }
 
 /*------------------------------------------------------------*/
@@ -89,192 +89,192 @@
                     SizeT size, SizeT align,
                     Bool is_zeroed)
 {
-   Addr p;
+  Addr p;
 
-   cmalloc_n_mallocs ++;
+  cmalloc_n_mallocs ++;
 
-   // Allocate and zero
-   p = (Addr)VG_(cli_malloc)(align, size);
-   if (!p) {
-      return NULL;
-   }
-   if (is_zeroed) VG_(memset)((void*)p, 0, size);
-   s_start_using_mem_callback(p, p + size);
+  // Allocate and zero
+  p = (Addr)VG_(cli_malloc)(align, size);
+  if (!p) {
+    return NULL;
+  }
+  if (is_zeroed) VG_(memset)((void*)p, 0, size);
+  s_start_using_mem_callback(p, p + size);
 
-   // Only update this stat if allocation succeeded.
-   cmalloc_bs_mallocd += size;
+  // Only update this stat if allocation succeeded.
+  cmalloc_bs_mallocd += size;
 
-   VG_(HT_add_node)(drd_malloc_list, create_DRD_Chunk(tid, p, size));
+  VG_(HT_add_node)(drd_malloc_list, create_DRD_Chunk(tid, p, size));
 
-   return (void*)p;
+  return (void*)p;
 }
 
 static
 void* drd_malloc(ThreadId tid, SizeT n)
 {
-   return drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
+  return drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
 }
 
 static
 void* drd_memalign(ThreadId tid, SizeT align, SizeT n)
 {
-   return drd_new_block(tid, n, align, /*is_zeroed*/False);
+  return drd_new_block(tid, n, align, /*is_zeroed*/False);
 }
 
 static
 void* drd_calloc(ThreadId tid, SizeT nmemb, SizeT size1)
 {
-   return drd_new_block(tid, nmemb*size1, VG_(clo_alignment),
-                        /*is_zeroed*/True);
+  return drd_new_block(tid, nmemb*size1, VG_(clo_alignment),
+                       /*is_zeroed*/True);
 }
 
 static
 __inline__
 void drd_handle_free(ThreadId tid, Addr p)
 {
-   DRD_Chunk* mc;
+  DRD_Chunk* mc;
 
-   cmalloc_n_frees++;
+  cmalloc_n_frees++;
 
-   mc = VG_(HT_remove)(drd_malloc_list, (UWord)p);
-   if (mc == NULL)
-   {
-      tl_assert(0);
-   }
-   else
-   {
-      s_stop_using_mem_callback(mc->data, mc->size);
-      VG_(free)(mc);
-   }
+  mc = VG_(HT_remove)(drd_malloc_list, (UWord)p);
+  if (mc == NULL)
+  {
+    tl_assert(0);
+  }
+  else
+  {
+    s_stop_using_mem_callback(mc->data, mc->size);
+    VG_(free)(mc);
+  }
 }
 
 static
 void drd_free(ThreadId tid, void* p)
 {
-   drd_handle_free(tid, (Addr)p);
+  drd_handle_free(tid, (Addr)p);
 }
 
 static
 void* drd_realloc(ThreadId tid, void* p_old, SizeT new_size)
 {
-   DRD_Chunk* mc;
-   void*     p_new;
-   SizeT     old_size;
+  DRD_Chunk* mc;
+  void*     p_new;
+  SizeT     old_size;
 
-   cmalloc_n_frees ++;
-   cmalloc_n_mallocs ++;
-   cmalloc_bs_mallocd += new_size;
+  cmalloc_n_frees ++;
+  cmalloc_n_mallocs ++;
+  cmalloc_bs_mallocd += new_size;
 
-   /* Remove the old block */
-   mc = VG_(HT_remove)(drd_malloc_list, (UWord)p_old);
-   if (mc == NULL) {
-      tl_assert(0);
-      return NULL;
-   }
+  /* Remove the old block */
+  mc = VG_(HT_remove)(drd_malloc_list, (UWord)p_old);
+  if (mc == NULL) {
+    tl_assert(0);
+    return NULL;
+  }
 
-   old_size = mc->size;
+  old_size = mc->size;
 
-   if (old_size == new_size)
-   {
-      /* size unchanged */
-      mc->where = VG_(record_ExeContext)(tid, 0);
-      p_new = p_old;
+  if (old_size == new_size)
+  {
+    /* size unchanged */
+    mc->where = VG_(record_ExeContext)(tid, 0);
+    p_new = p_old;
       
-   }
-   else if (old_size > new_size)
-   {
-      /* new size is smaller */
-      s_stop_using_mem_callback(mc->data + new_size, old_size);
-      mc->size = new_size;
-      mc->where = VG_(record_ExeContext)(tid, 0);
-      p_new = p_old;
+  }
+  else if (old_size > new_size)
+  {
+    /* new size is smaller */
+    s_stop_using_mem_callback(mc->data + new_size, old_size);
+    mc->size = new_size;
+    mc->where = VG_(record_ExeContext)(tid, 0);
+    p_new = p_old;
 
-   }
-   else
-   {
-      /* new size is bigger */
-      /* Get new memory */
-      const Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
+  }
+  else
+  {
+    /* new size is bigger */
+    /* Get new memory */
+    const Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
 
-      if (a_new)
-      {
-         /* Copy from old to new */
-         VG_(memcpy)((void*)a_new, p_old, mc->size);
+    if (a_new)
+    {
+      /* Copy from old to new */
+      VG_(memcpy)((void*)a_new, p_old, mc->size);
 
-         /* Free old memory */
-         s_stop_using_mem_callback(mc->data, mc->size);
-         VG_(free)(mc);
+      /* Free old memory */
+      s_stop_using_mem_callback(mc->data, mc->size);
+      VG_(free)(mc);
 
-         // Allocate a new chunk.
-         mc = create_DRD_Chunk(tid, a_new, new_size);
-         s_start_using_mem_callback(a_new, a_new + new_size);
-      }
-      else
-      {
-         /* Allocation failed -- leave original block untouched. */
-      }
+      // Allocate a new chunk.
+      mc = create_DRD_Chunk(tid, a_new, new_size);
+      s_start_using_mem_callback(a_new, a_new + new_size);
+    }
+    else
+    {
+      /* Allocation failed -- leave original block untouched. */
+    }
 
-      p_new = (void*)a_new;
-   }  
+    p_new = (void*)a_new;
+  }  
 
-   // Now insert the new mc (with a possibly new 'data' field) into
-   // malloc_list.  If this realloc() did not increase the memory size, we
-   // will have removed and then re-added mc unnecessarily.  But that's ok
-   // because shrinking a block with realloc() is (presumably) much rarer
-   // than growing it, and this way simplifies the growing case.
-   VG_(HT_add_node)(drd_malloc_list, mc);
+  // Now insert the new mc (with a possibly new 'data' field) into
+  // malloc_list.  If this realloc() did not increase the memory size, we
+  // will have removed and then re-added mc unnecessarily.  But that's ok
+  // because shrinking a block with realloc() is (presumably) much rarer
+  // than growing it, and this way simplifies the growing case.
+  VG_(HT_add_node)(drd_malloc_list, mc);
 
-   return p_new;
+  return p_new;
 }
 
 static
 void* drd___builtin_new(ThreadId tid, SizeT n)
 {
-   void* const result = drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
-   //VG_(message)(Vg_DebugMsg, "__builtin_new(%d, %d) = %p", tid, n, result);
-   return result;
+  void* const result = drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
+  //VG_(message)(Vg_DebugMsg, "__builtin_new(%d, %d) = %p", tid, n, result);
+  return result;
 }
 
 static
 void drd___builtin_delete(ThreadId tid, void* p)
 {
-   //VG_(message)(Vg_DebugMsg, "__builtin_delete(%d, %p)", tid, p);
-   drd_handle_free(tid, (Addr)p);
+  //VG_(message)(Vg_DebugMsg, "__builtin_delete(%d, %p)", tid, p);
+  drd_handle_free(tid, (Addr)p);
 }
 
 static
 void* drd___builtin_vec_new(ThreadId tid, SizeT n)
 {
-   return drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
+  return drd_new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
 }
 
 static
 void drd___builtin_vec_delete(ThreadId tid, void* p)
 {
-   drd_handle_free(tid, (Addr)p);
+  drd_handle_free(tid, (Addr)p);
 }
 
 void drd_register_malloc_wrappers(const StartUsingMem start_using_mem_callback,
                                   const StopUsingMem stop_using_mem_callback)
 {
-   tl_assert(drd_malloc_list == 0);
-   drd_malloc_list = VG_(HT_construct)("drd_malloc_list");   // a big prime
-   tl_assert(drd_malloc_list != 0);
-   tl_assert(stop_using_mem_callback);
+  tl_assert(drd_malloc_list == 0);
+  drd_malloc_list = VG_(HT_construct)("drd_malloc_list");   // a big prime
+  tl_assert(drd_malloc_list != 0);
+  tl_assert(stop_using_mem_callback);
 
-   s_start_using_mem_callback = start_using_mem_callback;
-   s_stop_using_mem_callback  = stop_using_mem_callback;
+  s_start_using_mem_callback = start_using_mem_callback;
+  s_stop_using_mem_callback  = stop_using_mem_callback;
 
-   VG_(needs_malloc_replacement)(drd_malloc,
-                                 drd___builtin_new,
-                                 drd___builtin_vec_new,
-                                 drd_memalign,
-                                 drd_calloc,
-                                 drd_free,
-                                 drd___builtin_delete,
-                                 drd___builtin_vec_delete,
-                                 drd_realloc,
-                                 0);
+  VG_(needs_malloc_replacement)(drd_malloc,
+                                drd___builtin_new,
+                                drd___builtin_vec_new,
+                                drd_memalign,
+                                drd_calloc,
+                                drd_free,
+                                drd___builtin_delete,
+                                drd___builtin_vec_delete,
+                                drd_realloc,
+                                0);
 }
 
 Bool drd_heap_addrinfo(Addr const a,
@@ -282,24 +282,24 @@
                        SizeT* const size,
                        ExeContext** const where)
 {
-   DRD_Chunk* mc;
+  DRD_Chunk* mc;
 
-   tl_assert(data);
-   tl_assert(size);
-   tl_assert(where);
+  tl_assert(data);
+  tl_assert(size);
+  tl_assert(where);
 
-   VG_(HT_ResetIter)(drd_malloc_list);
-   while ((mc = VG_(HT_Next)(drd_malloc_list)))
-   {
-      if (mc->data <= a && a < mc->data + mc->size)
-      {
-         *data  = mc->data;
-         *size  = mc->size;
-         *where = mc->where;
-         return True;
-      }
-   }
-   return False;
+  VG_(HT_ResetIter)(drd_malloc_list);
+  while ((mc = VG_(HT_Next)(drd_malloc_list)))
+  {
+    if (mc->data <= a && a < mc->data + mc->size)
+    {
+      *data  = mc->data;
+      *size  = mc->size;
+      *where = mc->where;
+      return True;
+    }
+  }
+  return False;
 }
 
 /*------------------------------------------------------------*/
@@ -308,40 +308,34 @@
 
 void drd_print_malloc_stats(void)
 {
-   DRD_Chunk* mc;
-   SizeT     nblocks = 0;
-   SizeT     nbytes  = 0;
+  DRD_Chunk* mc;
+  SizeT     nblocks = 0;
+  SizeT     nbytes  = 0;
    
-   if (VG_(clo_verbosity) == 0)
-      return;
-   if (VG_(clo_xml))
-      return;
+  if (VG_(clo_verbosity) == 0)
+    return;
+  if (VG_(clo_xml))
+    return;
 
-   /* Count memory still in use. */
-   VG_(HT_ResetIter)(drd_malloc_list);
-   while ((mc = VG_(HT_Next)(drd_malloc_list)))
-   {
-      nblocks++;
-      nbytes += mc->size;
-   }
+  /* Count memory still in use. */
+  VG_(HT_ResetIter)(drd_malloc_list);
+  while ((mc = VG_(HT_Next)(drd_malloc_list)))
+  {
+    nblocks++;
+    nbytes += mc->size;
+  }
 
-   VG_(message)(Vg_DebugMsg, 
-                "malloc/free: in use at exit: %lu bytes in %lu blocks.",
-                nbytes, nblocks);
-   VG_(message)(Vg_DebugMsg, 
-                "malloc/free: %lu allocs, %lu frees, %lu bytes allocated.",
-                cmalloc_n_mallocs,
-                cmalloc_n_frees, cmalloc_bs_mallocd);
-   if (VG_(clo_verbosity) > 1)
-      VG_(message)(Vg_DebugMsg, " ");
+  VG_(message)(Vg_DebugMsg, 
+               "malloc/free: in use at exit: %lu bytes in %lu blocks.",
+               nbytes, nblocks);
+  VG_(message)(Vg_DebugMsg, 
+               "malloc/free: %lu allocs, %lu frees, %lu bytes allocated.",
+               cmalloc_n_mallocs,
+               cmalloc_n_frees, cmalloc_bs_mallocd);
+  if (VG_(clo_verbosity) > 1)
+    VG_(message)(Vg_DebugMsg, " ");
 }
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
 /*--------------------------------------------------------------------*/
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_mutex.c b/exp-drd/drd_mutex.c
index df79cbe..ba37e31 100644
--- a/exp-drd/drd_mutex.c
+++ b/exp-drd/drd_mutex.c
@@ -464,10 +464,3 @@
 {
   return s_mutex_lock_count;
 }
-
-
-/*
- * Local variables:
- * c-basic-offset: 2
- * End:
- */
diff --git a/exp-drd/drd_pthread_intercepts.c b/exp-drd/drd_pthread_intercepts.c
index 0d132cb..4a36af2 100644
--- a/exp-drd/drd_pthread_intercepts.c
+++ b/exp-drd/drd_pthread_intercepts.c
@@ -59,23 +59,23 @@
 
 // Defines.
 
-#define PTH_FUNC(ret_ty, f, args...) \
-   ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args); \
-   ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args)
+#define PTH_FUNC(ret_ty, f, args...)                            \
+  ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args);        \
+  ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args)
 
 
 // Local data structures.
 
 typedef struct
 {
-   void* (*start)(void*);
-   void* arg;
-   int   detachstate;
+  void* (*start)(void*);
+  void* arg;
+  int   detachstate;
 #if 0
-   pthread_mutex_t mutex;
-   pthread_cond_t  cond;
+  pthread_mutex_t mutex;
+  pthread_cond_t  cond;
 #else
-   int wrapper_started;
+  int wrapper_started;
 #endif
 } VgPosixThreadArgs;
 
@@ -89,730 +89,730 @@
 
 static MutexT pthread_to_drd_mutex_type(const int kind)
 {
-   switch (kind)
-   {
-   /* PTHREAD_MUTEX_RECURSIVE_NP */
-   case PTHREAD_MUTEX_RECURSIVE:
-      return mutex_type_recursive_mutex;
-   /* PTHREAD_MUTEX_ERRORCHECK_NP */
-   case PTHREAD_MUTEX_ERRORCHECK:
-      return mutex_type_errorcheck_mutex;
-   /* PTHREAD_MUTEX_TIMED_NP */
-   /* PTHREAD_MUTEX_NORMAL */
-   case PTHREAD_MUTEX_DEFAULT:
-   case PTHREAD_MUTEX_ADAPTIVE_NP:
-      return mutex_type_default_mutex;
-   }
-   return mutex_type_invalid_mutex;
+  switch (kind)
+  {
+    /* PTHREAD_MUTEX_RECURSIVE_NP */
+  case PTHREAD_MUTEX_RECURSIVE:
+    return mutex_type_recursive_mutex;
+    /* PTHREAD_MUTEX_ERRORCHECK_NP */
+  case PTHREAD_MUTEX_ERRORCHECK:
+    return mutex_type_errorcheck_mutex;
+    /* PTHREAD_MUTEX_TIMED_NP */
+    /* PTHREAD_MUTEX_NORMAL */
+  case PTHREAD_MUTEX_DEFAULT:
+  case PTHREAD_MUTEX_ADAPTIVE_NP:
+    return mutex_type_default_mutex;
+  }
+  return mutex_type_invalid_mutex;
 }
 
 static MutexT mutex_type(pthread_mutex_t* mutex)
 {
 #if defined(_PTHREAD_DESCR_DEFINED)
-   // Linuxthreads.
-   const int kind = mutex->__m_kind;
+  // Linuxthreads.
+  const int kind = mutex->__m_kind;
 #elif defined(__SIZEOF_PTHREAD_MUTEX_T)
-   // NPTL.
-   const int kind = mutex->__data.__kind;
+  // NPTL.
+  const int kind = mutex->__data.__kind;
 #else
-   // Another POSIX threads implementation. Regression tests will fail.
-   const int kind = PTHREAD_MUTEX_DEFAULT;
+  // Another POSIX threads implementation. Regression tests will fail.
+  const int kind = PTHREAD_MUTEX_DEFAULT;
 #endif
-   return pthread_to_drd_mutex_type(kind);
+  return pthread_to_drd_mutex_type(kind);
 }
 
 static void vg_start_suppression(const void* const p, size_t const size)
 {
-   int res;
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_SUPPRESSION,
-                              p, (char*)p + size, 0, 0, 0);
+  int res;
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_SUPPRESSION,
+                             p, (char*)p + size, 0, 0, 0);
 }
 
 static void vg_set_joinable(const pthread_t tid, const int joinable)
 {
-   int res;
-   assert(joinable == 0 || joinable == 1);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__SET_JOINABLE,
-                              tid, joinable, 0, 0, 0);
+  int res;
+  assert(joinable == 0 || joinable == 1);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__SET_JOINABLE,
+                             tid, joinable, 0, 0, 0);
 }
 
 static void* vg_thread_wrapper(void* arg)
 {
-   int res;
+  int res;
 
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
-                              0, 0, 0, 0, 0);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
+                             0, 0, 0, 0, 0);
 
-   {
-      VgPosixThreadArgs* const arg_ptr = (VgPosixThreadArgs*)arg;
-      VgPosixThreadArgs const arg_copy = *arg_ptr;
-      void* result;
+  {
+    VgPosixThreadArgs* const arg_ptr = (VgPosixThreadArgs*)arg;
+    VgPosixThreadArgs const arg_copy = *arg_ptr;
+    void* result;
 
 #if 0
-      pthread_mutex_lock(arg_ptr->mutex);
-      pthread_cond_signal(arg_ptr->cond);
-      pthread_mutex_unlock(arg_ptr->mutex);
+    pthread_mutex_lock(arg_ptr->mutex);
+    pthread_cond_signal(arg_ptr->cond);
+    pthread_mutex_unlock(arg_ptr->mutex);
 #else
-      arg_ptr->wrapper_started = 1;
+    arg_ptr->wrapper_started = 1;
 #endif
 
-      VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
-                                 pthread_self(), 0, 0, 0, 0);
-      vg_set_joinable(pthread_self(),
-                      arg_copy.detachstate == PTHREAD_CREATE_JOINABLE);
-      result = (arg_copy.start)(arg_copy.arg);
-      return result;
-   }
+    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
+                               pthread_self(), 0, 0, 0, 0);
+    vg_set_joinable(pthread_self(),
+                    arg_copy.detachstate == PTHREAD_CREATE_JOINABLE);
+    result = (arg_copy.start)(arg_copy.arg);
+    return result;
+  }
 }
 
 static int detected_linuxthreads(void)
 {
 #if defined(linux)
 #if defined(_CS_GNU_LIBPTHREAD_VERSION)
-   /* Linux with a recent glibc. */
-   char buffer[256];
-   unsigned len;
-   len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
-   assert(len <= sizeof(buffer));
-   return len > 0 && buffer[0] == 'l';
+  /* Linux with a recent glibc. */
+  char buffer[256];
+  unsigned len;
+  len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
+  assert(len <= sizeof(buffer));
+  return len > 0 && buffer[0] == 'l';
 #else
-   /* Linux without _CS_GNU_LIBPTHREAD_VERSION: most likely LinuxThreads. */
-   return 1;
+  /* Linux without _CS_GNU_LIBPTHREAD_VERSION: most likely LinuxThreads. */
+  return 1;
 #endif
 #else
-   /* Another OS than Linux, hence no LinuxThreads. */
-   return 0;
+  /* Another OS than Linux, hence no LinuxThreads. */
+  return 0;
 #endif
 }
 
 static void vg_set_main_thread_state(void)
 {
-   int res;
+  int res;
 
-   if (detected_linuxthreads())
-   {
-      if (getenv("LD_ASSUME_KERNEL"))
-      {
-         fprintf(stderr,
-"Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
-"the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
-"after having unset the environment variable LD_ASSUME_KERNEL. Giving up.\n"
-                 );
-      }
-      else
-      {
-         fprintf(stderr,
-"Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
-"the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
-"after having upgraded to a newer version of your Linux distribution.\n"
-"Giving up.\n"
-                 );
-      }
-      abort();
-   }
+  if (detected_linuxthreads())
+  {
+    if (getenv("LD_ASSUME_KERNEL"))
+    {
+      fprintf(stderr,
+              "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
+              "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
+              "after having unset the environment variable LD_ASSUME_KERNEL. Giving up.\n"
+              );
+    }
+    else
+    {
+      fprintf(stderr,
+              "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
+              "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
+              "after having upgraded to a newer version of your Linux distribution.\n"
+              "Giving up.\n"
+              );
+    }
+    abort();
+  }
 
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
-                              0, 0, 0, 0, 0);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
+                             0, 0, 0, 0, 0);
 
-   // Make sure that DRD knows about the main thread's POSIX thread ID.
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
-                              pthread_self(), 0, 0, 0, 0);
+  // Make sure that DRD knows about the main thread's POSIX thread ID.
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
+                             pthread_self(), 0, 0, 0, 0);
 
 }
 
 // pthread_create
 PTH_FUNC(int, pthreadZucreateZa, // pthread_create*
-              pthread_t *thread, const pthread_attr_t *attr,
-              void *(*start) (void *), void *arg)
+         pthread_t *thread, const pthread_attr_t *attr,
+         void *(*start) (void *), void *arg)
 {
-   int    ret;
-   OrigFn fn;
-   VgPosixThreadArgs vgargs;
+  int    ret;
+  OrigFn fn;
+  VgPosixThreadArgs vgargs;
 
-   VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_GET_ORIG_FN(fn);
 
-   if (vg_main_thread_state_is_set == 0)
-   {
-      vg_set_main_thread_state();
-      vg_main_thread_state_is_set = 1;
-   }
-   vg_start_suppression(&vgargs.wrapper_started,
-                        sizeof(vgargs.wrapper_started));
-   vgargs.start = start;
-   vgargs.arg   = arg;
-   vgargs.wrapper_started = 0;
-   vgargs.detachstate = PTHREAD_CREATE_JOINABLE;
-   if (attr)
-   {
-      if (pthread_attr_getdetachstate(attr, &vgargs.detachstate) != 0)
-      {
-         assert(0);
-      }
-   }
-   assert(vgargs.detachstate == PTHREAD_CREATE_JOINABLE
-          || vgargs.detachstate == PTHREAD_CREATE_DETACHED);
+  if (vg_main_thread_state_is_set == 0)
+  {
+    vg_set_main_thread_state();
+    vg_main_thread_state_is_set = 1;
+  }
+  vg_start_suppression(&vgargs.wrapper_started,
+                       sizeof(vgargs.wrapper_started));
+  vgargs.start = start;
+  vgargs.arg   = arg;
+  vgargs.wrapper_started = 0;
+  vgargs.detachstate = PTHREAD_CREATE_JOINABLE;
+  if (attr)
+  {
+    if (pthread_attr_getdetachstate(attr, &vgargs.detachstate) != 0)
+    {
+      assert(0);
+    }
+  }
+  assert(vgargs.detachstate == PTHREAD_CREATE_JOINABLE
+         || vgargs.detachstate == PTHREAD_CREATE_DETACHED);
 #if 0
-   pthread_mutex_init(&vgargs.mutex, 0);
-   pthread_cond_init(&vgargs.cond, 0);
-   pthread_mutex_lock(&vgargs.mutex);
+  pthread_mutex_init(&vgargs.mutex, 0);
+  pthread_cond_init(&vgargs.cond, 0);
+  pthread_mutex_lock(&vgargs.mutex);
 #endif
-   CALL_FN_W_WWWW(ret, fn, thread, attr, vg_thread_wrapper, &vgargs);
+  CALL_FN_W_WWWW(ret, fn, thread, attr, vg_thread_wrapper, &vgargs);
 #if 0
-   pthread_cond_wait(&vgargs.cond, &vgargs.mutex);
-   pthread_mutex_unlock(&vgargs.mutex);
-   pthread_cond_destroy(&vgargs.cond);
-   pthread_mutex_destroy(&vgargs.mutex);
+  pthread_cond_wait(&vgargs.cond, &vgargs.mutex);
+  pthread_mutex_unlock(&vgargs.mutex);
+  pthread_cond_destroy(&vgargs.cond);
+  pthread_mutex_destroy(&vgargs.mutex);
 #else
-   // Yes, you see it correctly, busy waiting ... The problem is that
-   // POSIX threads functions cannot be called here -- the functions defined
-   // in this file (drd_intercepts.c) would be called instead of those in
-   // libpthread.so. This loop is necessary because vgargs is allocated on the
-   // stack, and the created thread reads it.
-   if (ret == 0)
-   {
-      while (! vgargs.wrapper_started)
-      {
-         sched_yield();
-      }
-   }
+  // Yes, you see it correctly, busy waiting ... The problem is that
+  // POSIX threads functions cannot be called here -- the functions defined
+  // in this file (drd_intercepts.c) would be called instead of those in
+  // libpthread.so. This loop is necessary because vgargs is allocated on the
+  // stack, and the created thread reads it.
+  if (ret == 0)
+  {
+    while (! vgargs.wrapper_started)
+    {
+      sched_yield();
+    }
+  }
 #endif
-   return ret;
+  return ret;
 }
 
 // pthread_join
 PTH_FUNC(int, pthreadZujoin, // pthread_join
-              pthread_t pt_joinee, void **thread_return)
+         pthread_t pt_joinee, void **thread_return)
 {
-   int      ret;
-   int      res;
-   OrigFn   fn;
+  int      ret;
+  int      res;
+  OrigFn   fn;
 
-   VALGRIND_GET_ORIG_FN(fn);
-   CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
-   if (ret == 0)
-   {
-      VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_JOIN,
-                                 pt_joinee, 0, 0, 0, 0);
-   }
-   return ret;
+  VALGRIND_GET_ORIG_FN(fn);
+  CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
+  if (ret == 0)
+  {
+    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_JOIN,
+                               pt_joinee, 0, 0, 0, 0);
+  }
+  return ret;
 }
 
 // pthread_detach
 PTH_FUNC(int, pthreadZudetach, pthread_t pt_thread)
 {
-   int ret;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   {
-      CALL_FN_W_W(ret, fn, pt_thread);
-      if (ret == 0)
-      {
-         vg_set_joinable(pt_thread, 0);
-      }
-   }
-   return ret;
+  int ret;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  {
+    CALL_FN_W_W(ret, fn, pt_thread);
+    if (ret == 0)
+    {
+      vg_set_joinable(pt_thread, 0);
+    }
+  }
+  return ret;
 }
 
 // pthread_mutex_init
 PTH_FUNC(int, pthreadZumutexZuinit,
-              pthread_mutex_t *mutex,
-              const pthread_mutexattr_t* attr)
+         pthread_mutex_t *mutex,
+         const pthread_mutexattr_t* attr)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   int mt;
-   VALGRIND_GET_ORIG_FN(fn);
-   mt = PTHREAD_MUTEX_DEFAULT;
-   if (attr)
-      pthread_mutexattr_gettype(attr, &mt);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
-                              mutex, pthread_to_drd_mutex_type(mt), 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, mutex, attr);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
-                              mutex, 0, 0, 0, 0);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  int mt;
+  VALGRIND_GET_ORIG_FN(fn);
+  mt = PTHREAD_MUTEX_DEFAULT;
+  if (attr)
+    pthread_mutexattr_gettype(attr, &mt);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
+                             mutex, pthread_to_drd_mutex_type(mt), 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, mutex, attr);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
+                             mutex, 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_mutex_destroy
 PTH_FUNC(int, pthreadZumutexZudestroy,
-              pthread_mutex_t *mutex)
+         pthread_mutex_t *mutex)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
-                              mutex, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, mutex);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                              mutex, mutex_type(mutex), 0, 0, 0);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
+                             mutex, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, mutex);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                             mutex, mutex_type(mutex), 0, 0, 0);
+  return ret;
 }
 
 // pthread_mutex_lock
 PTH_FUNC(int, pthreadZumutexZulock, // pthread_mutex_lock
-              pthread_mutex_t *mutex)
+         pthread_mutex_t *mutex)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                              mutex, mutex_type(mutex), 0, 0, 0);
-   CALL_FN_W_W(ret, fn, mutex);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
-                              mutex, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                             mutex, mutex_type(mutex), 0, 0, 0);
+  CALL_FN_W_W(ret, fn, mutex);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
+                             mutex, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_mutex_trylock
 PTH_FUNC(int, pthreadZumutexZutrylock, // pthread_mutex_trylock
-              pthread_mutex_t *mutex)
+         pthread_mutex_t *mutex)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                              mutex, mutex_type(mutex), 0, 0, 0);
-   CALL_FN_W_W(ret, fn, mutex);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                              mutex, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                             mutex, mutex_type(mutex), 0, 0, 0);
+  CALL_FN_W_W(ret, fn, mutex);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                             mutex, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_mutex_timedlock
 PTH_FUNC(int, pthreadZumutexZutimedlock, // pthread_mutex_timedlock
-              pthread_mutex_t *mutex,
-              const struct timespec *abs_timeout)
+         pthread_mutex_t *mutex,
+         const struct timespec *abs_timeout)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                              mutex, mutex_type(mutex), 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                              mutex, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                             mutex, mutex_type(mutex), 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                             mutex, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_mutex_unlock
 PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
-              pthread_mutex_t *mutex)
+         pthread_mutex_t *mutex)
 {
-   int ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                              VG_USERREQ__PRE_MUTEX_UNLOCK,
-                              mutex, mutex_type(mutex), 0, 0, 0);
-   CALL_FN_W_W(ret, fn, mutex);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                              VG_USERREQ__POST_MUTEX_UNLOCK,
-                              mutex, 0, 0, 0, 0);
-   return ret;
+  int ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                             VG_USERREQ__PRE_MUTEX_UNLOCK,
+                             mutex, mutex_type(mutex), 0, 0, 0);
+  CALL_FN_W_W(ret, fn, mutex);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                             VG_USERREQ__POST_MUTEX_UNLOCK,
+                             mutex, 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_cond_init
 PTH_FUNC(int, pthreadZucondZuinitZa, // pthread_cond_init*
-              pthread_cond_t* cond,
-              const pthread_condattr_t* attr)
+         pthread_cond_t* cond,
+         const pthread_condattr_t* attr)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_INIT,
-                              cond, 0, 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, cond, attr);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_INIT,
+                             cond, 0, 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, cond, attr);
+  return ret;
 }
 
 // pthread_cond_destroy
 PTH_FUNC(int, pthreadZucondZudestroyZa, // pthread_cond_destroy*
-              pthread_cond_t* cond)
+         pthread_cond_t* cond)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   CALL_FN_W_W(ret, fn, cond);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_DESTROY,
-                              cond, 0, 0, 0, 0);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  CALL_FN_W_W(ret, fn, cond);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_DESTROY,
+                             cond, 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_cond_wait
 PTH_FUNC(int, pthreadZucondZuwaitZa, // pthread_cond_wait*
-              pthread_cond_t *cond,
-              pthread_mutex_t *mutex)
+         pthread_cond_t *cond,
+         pthread_mutex_t *mutex)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
-                              cond, mutex, mutex_type(mutex), 0, 0);
-   CALL_FN_W_WW(ret, fn, cond, mutex);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
-                              cond, mutex, ret == 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
+                             cond, mutex, mutex_type(mutex), 0, 0);
+  CALL_FN_W_WW(ret, fn, cond, mutex);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
+                             cond, mutex, ret == 0, 0, 0);
+  return ret;
 }
 
 // pthread_cond_timedwait
 PTH_FUNC(int, pthreadZucondZutimedwaitZa, // pthread_cond_timedwait*
-              pthread_cond_t *cond,
-              pthread_mutex_t *mutex,
-              const struct timespec* abstime)
+         pthread_cond_t *cond,
+         pthread_mutex_t *mutex,
+         const struct timespec* abstime)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
-                              cond, mutex, mutex_type(mutex), 0, 0);
-   CALL_FN_W_WWW(ret, fn, cond, mutex, abstime);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
-                              cond, mutex, ret == 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
+                             cond, mutex, mutex_type(mutex), 0, 0);
+  CALL_FN_W_WWW(ret, fn, cond, mutex, abstime);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
+                             cond, mutex, ret == 0, 0, 0);
+  return ret;
 }
 
 // pthread_cond_signal
 PTH_FUNC(int, pthreadZucondZusignalZa, // pthread_cond_signal*
-              pthread_cond_t* cond)
+         pthread_cond_t* cond)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_SIGNAL,
-                              cond, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, cond);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_SIGNAL,
+                             cond, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, cond);
+  return ret;
 }
 
 // pthread_cond_broadcast
 PTH_FUNC(int, pthreadZucondZubroadcastZa, // pthread_cond_broadcast*
-              pthread_cond_t* cond)
+         pthread_cond_t* cond)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_BROADCAST,
-                              cond, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, cond);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_BROADCAST,
+                             cond, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, cond);
+  return ret;
 }
 
 
 // pthread_spin_init
 PTH_FUNC(int, pthreadZuspinZuinit, // pthread_spin_init
-              pthread_spinlock_t *spinlock,
-              int pshared)
+         pthread_spinlock_t *spinlock,
+         int pshared)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SPIN_INIT_OR_UNLOCK,
-                              spinlock, mutex_type_spinlock, 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, spinlock, pshared);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SPIN_INIT_OR_UNLOCK,
+                             spinlock, mutex_type_spinlock, 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, spinlock, pshared);
+  return ret;
 }
 
 // pthread_spin_destroy
 PTH_FUNC(int, pthreadZuspinZudestroy, // pthread_spin_destroy
-              pthread_spinlock_t *spinlock)
+         pthread_spinlock_t *spinlock)
 {
-   int ret;
-   int res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   CALL_FN_W_W(ret, fn, spinlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                              spinlock, mutex_type_spinlock, 0, 0, 0);
-   return ret;
+  int ret;
+  int res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  CALL_FN_W_W(ret, fn, spinlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                             spinlock, mutex_type_spinlock, 0, 0, 0);
+  return ret;
 }
 
 // pthread_spin_lock
 PTH_FUNC(int, pthreadZuspinZulock, // pthread_spin_lock
-              pthread_spinlock_t *spinlock)
+         pthread_spinlock_t *spinlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                              spinlock, mutex_type_spinlock, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, spinlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                              spinlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                             spinlock, mutex_type_spinlock, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, spinlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                             spinlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_spin_trylock
 PTH_FUNC(int, pthreadZuspinZutrylock, // pthread_spin_trylock
-              pthread_spinlock_t *spinlock)
+         pthread_spinlock_t *spinlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                              spinlock, mutex_type_spinlock, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, spinlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                              spinlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                             spinlock, mutex_type_spinlock, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, spinlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                             spinlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_spin_unlock
 PTH_FUNC(int, pthreadZuspinZuunlock, // pthread_spin_unlock
-              pthread_spinlock_t *spinlock)
+         pthread_spinlock_t *spinlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SPIN_INIT_OR_UNLOCK,
-                              spinlock, mutex_type_spinlock, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, spinlock);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SPIN_INIT_OR_UNLOCK,
+                             spinlock, mutex_type_spinlock, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, spinlock);
+  return ret;
 }
 
 // pthread_barrier_init
 PTH_FUNC(int, pthreadZubarrierZuinit, // pthread_barrier_init
-              pthread_barrier_t* barrier,
-              const pthread_barrierattr_t* attr,
-              unsigned count)
+         pthread_barrier_t* barrier,
+         const pthread_barrierattr_t* attr,
+         unsigned count)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
-                              barrier, pthread_barrier, count, 0, 0);
-   CALL_FN_W_WWW(ret, fn, barrier, attr, count);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
-                              barrier, pthread_barrier, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
+                             barrier, pthread_barrier, count, 0, 0);
+  CALL_FN_W_WWW(ret, fn, barrier, attr, count);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
+                             barrier, pthread_barrier, 0, 0, 0);
+  return ret;
 }
 
 // pthread_barrier_destroy
 PTH_FUNC(int, pthreadZubarrierZudestroy, // pthread_barrier_destroy
-              pthread_barrier_t* barrier)
+         pthread_barrier_t* barrier)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
-                              barrier, pthread_barrier, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, barrier);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
-                              barrier, pthread_barrier, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
+                             barrier, pthread_barrier, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, barrier);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
+                             barrier, pthread_barrier, 0, 0, 0);
+  return ret;
 }
 
 // pthread_barrier_wait
 PTH_FUNC(int, pthreadZubarrierZuwait, // pthread_barrier_wait
-              pthread_barrier_t* barrier)
+         pthread_barrier_t* barrier)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
-                              barrier, pthread_barrier, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, barrier);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
-                              barrier, pthread_barrier,
-                              ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD,
-                              0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
+                             barrier, pthread_barrier, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, barrier);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
+                             barrier, pthread_barrier,
+                             ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD,
+                             0, 0);
+  return ret;
 }
 
 
 // sem_init
 PTH_FUNC(int, semZuinitZAGLIBCZu2Zd0, // sem_init@GLIBC_2.0
-              sem_t *sem,
-              int pshared,
-              unsigned int value)
+         sem_t *sem,
+         int pshared,
+         unsigned int value)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
-                              sem, pshared, value, 0, 0);
-   CALL_FN_W_WWW(ret, fn, sem, pshared, value);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
-                              sem, 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
+                             sem, pshared, value, 0, 0);
+  CALL_FN_W_WWW(ret, fn, sem, pshared, value);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
+                             sem, 0, 0, 0, 0);
+  return ret;
 }
 
 PTH_FUNC(int, semZuinitZa, // sem_init*
-              sem_t *sem,
-              int pshared,
-              unsigned int value)
+         sem_t *sem,
+         int pshared,
+         unsigned int value)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
-                              sem, pshared, value, 0, 0);
-   CALL_FN_W_WWW(ret, fn, sem, pshared, value);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
-                              sem, 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
+                             sem, pshared, value, 0, 0);
+  CALL_FN_W_WWW(ret, fn, sem, pshared, value);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
+                             sem, 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_destroy
 PTH_FUNC(int, semZudestroyZAGLIBCZu2Zd0, // sem_destroy@GLIBC_2.0
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
-                              sem, 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
+                             sem, 0, 0, 0, 0);
+  return ret;
 }
 
 PTH_FUNC(int, semZudestroyZa, // sem_destroy*
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
-                              sem, 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
+                             sem, 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_wait
 PTH_FUNC(int, semZuwaitZAGLIBCZu2Zd0, // sem_wait@GLIBC_2.0
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_wait
 PTH_FUNC(int, semZuwaitZa, // sem_wait*
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_trywait
 PTH_FUNC(int, semZutrywaitZAGLIBCZu2Zd0, // sem_trywait@GLIBC_2.0
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 PTH_FUNC(int, semZutrywaitZa, // sem_trywait*
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_timedwait
 PTH_FUNC(int, semZutimedwait, // sem_timedwait
-              sem_t *sem, const struct timespec *abs_timeout)
+         sem_t *sem, const struct timespec *abs_timeout)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, sem, abs_timeout);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, sem, abs_timeout);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_post
 PTH_FUNC(int, semZupostZAGLIBCZu2Zd0, // sem_post@GLIBC_2.0
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // sem_post
 PTH_FUNC(int, semZupostZa, // sem_post*
-              sem_t *sem)
+         sem_t *sem)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
-                              sem, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, sem);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
-                              sem, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
+                             sem, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, sem);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
+                             sem, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_init
@@ -821,14 +821,14 @@
          pthread_rwlock_t* rwlock,
          const pthread_rwlockattr_t* attr)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_INIT,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_WW(ret, fn, rwlock, attr);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_INIT,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_WW(ret, fn, rwlock, attr);
+  return ret;
 }
 
 // pthread_rwlock_destroy
@@ -836,14 +836,14 @@
          pthreadZurwlockZudestroyZa, // pthread_rwlock_destroy*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_DESTROY,
-                              rwlock, 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_DESTROY,
+                             rwlock, 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_rdlock
@@ -851,16 +851,16 @@
          pthreadZurwlockZurdlockZa, // pthread_rwlock_rdlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_wrlock
@@ -868,16 +868,16 @@
          pthreadZurwlockZuwrlockZa, // pthread_rwlock_wrlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_timedrdlock
@@ -885,16 +885,16 @@
          pthreadZurwlockZutimedrdlockZa, // pthread_rwlock_timedrdlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_timedwrlock
@@ -902,16 +902,16 @@
          pthreadZurwlockZutimedwrlockZa, // pthread_rwlock_timedwrlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_tryrdlock
@@ -919,16 +919,16 @@
          pthreadZurwlockZutryrdlockZa, // pthread_rwlock_tryrdlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_trywrlock
@@ -936,16 +936,16 @@
          pthreadZurwlockZutrywrlockZa, // pthread_rwlock_trywrlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
 
 // pthread_rwlock_unlock
@@ -953,21 +953,14 @@
          pthreadZurwlockZuunlockZa, // pthread_rwlock_unlock*
          pthread_rwlock_t* rwlock)
 {
-   int   ret;
-   int   res;
-   OrigFn fn;
-   VALGRIND_GET_ORIG_FN(fn);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_UNLOCK,
-                              rwlock, 0, 0, 0, 0);
-   CALL_FN_W_W(ret, fn, rwlock);
-   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_UNLOCK,
-                              rwlock, ret == 0, 0, 0, 0);
-   return ret;
+  int   ret;
+  int   res;
+  OrigFn fn;
+  VALGRIND_GET_ORIG_FN(fn);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_UNLOCK,
+                             rwlock, 0, 0, 0, 0);
+  CALL_FN_W_W(ret, fn, rwlock);
+  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_UNLOCK,
+                             rwlock, ret == 0, 0, 0, 0);
+  return ret;
 }
-
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_rwlock.c b/exp-drd/drd_rwlock.c
index d3a5ed9..5ba6706 100644
--- a/exp-drd/drd_rwlock.c
+++ b/exp-drd/drd_rwlock.c
@@ -509,10 +509,3 @@
     }
   }
 }
-
-
-/*
- * Local variables:
- * c-basic-offset: 2
- * End:
- */
diff --git a/exp-drd/drd_segment.c b/exp-drd/drd_segment.c
index 575e8bd..d8f1943 100644
--- a/exp-drd/drd_segment.c
+++ b/exp-drd/drd_segment.c
@@ -115,15 +115,15 @@
 void sg_delete(Segment* const sg)
 {
 #if 1
-   if (sg_get_trace())
-   {
-      char msg[256];
-      VG_(snprintf)(msg, sizeof(msg),
-                    "Discarding the segment with vector clock ");
-      vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                 &sg->vc);
-      VG_(message)(Vg_UserMsg, "%s", msg);
-   }
+  if (sg_get_trace())
+  {
+    char msg[256];
+    VG_(snprintf)(msg, sizeof(msg),
+                  "Discarding the segment with vector clock ");
+    vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+               &sg->vc);
+    VG_(message)(Vg_UserMsg, "%s", msg);
+  }
 #endif
 
   s_segments_alive_count--;
diff --git a/exp-drd/drd_suppression.c b/exp-drd/drd_suppression.c
index 907b73a..cad09c4 100644
--- a/exp-drd/drd_suppression.c
+++ b/exp-drd/drd_suppression.c
@@ -123,9 +123,3 @@
   tl_assert(a1 < a2);
   bm_clear(s_suppressed, a1, a2);
 }
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/drd_thread.c b/exp-drd/drd_thread.c
index 19a8fcc..d8c5d19 100644
--- a/exp-drd/drd_thread.c
+++ b/exp-drd/drd_thread.c
@@ -64,20 +64,20 @@
 
 void thread_trace_context_switches(const Bool t)
 {
-   s_trace_context_switches = t;
+  s_trace_context_switches = t;
 }
 
 void thread_trace_danger_set(const Bool t)
 {
-   s_trace_danger_set = t;
+  s_trace_danger_set = t;
 }
 
 __inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
 {
-   return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
-           && ! (s_threadinfo[tid].vg_thread_exists == False
-                 && s_threadinfo[tid].posix_thread_exists == False
-                 && s_threadinfo[tid].detached_posix_thread == False));
+  return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
+          && ! (s_threadinfo[tid].vg_thread_exists == False
+                && s_threadinfo[tid].posix_thread_exists == False
+                && s_threadinfo[tid].detached_posix_thread == False));
 }
 
 /**
@@ -86,84 +86,84 @@
  **/
 DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
 {
-   int i;
+  int i;
 
-   if (tid == VG_INVALID_THREADID)
-      return DRD_INVALID_THREADID;
+  if (tid == VG_INVALID_THREADID)
+    return DRD_INVALID_THREADID;
 
-   for (i = 1; i < DRD_N_THREADS; i++)
-   {
-      if (s_threadinfo[i].vg_thread_exists == True
-          && s_threadinfo[i].vg_threadid == tid)
-      {
-         return i;
-      }
-   }
+  for (i = 1; i < DRD_N_THREADS; i++)
+  {
+    if (s_threadinfo[i].vg_thread_exists == True
+        && s_threadinfo[i].vg_threadid == tid)
+    {
+      return i;
+    }
+  }
 
-   return DRD_INVALID_THREADID;
+  return DRD_INVALID_THREADID;
 }
 
 static
 DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
 {
-   int i;
+  int i;
 
-   tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
+  tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
 
-   for (i = 1; i < DRD_N_THREADS; i++)
-   {
-      if (s_threadinfo[i].vg_thread_exists == False
-          && s_threadinfo[i].posix_thread_exists == False
-          && s_threadinfo[i].detached_posix_thread == False)
-      {
-         s_threadinfo[i].vg_thread_exists = True;
-         s_threadinfo[i].vg_threadid   = tid;
-         s_threadinfo[i].pt_threadid   = INVALID_POSIX_THREADID;
-         s_threadinfo[i].stack_min_min = 0;
-         s_threadinfo[i].stack_min     = 0;
-         s_threadinfo[i].stack_startup = 0;
-         s_threadinfo[i].stack_max     = 0;
-         VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
-                       "thread %d", tid);
-         s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
-         s_threadinfo[i].is_recording  = True;
-         s_threadinfo[i].synchr_nesting = 0;
-         if (s_threadinfo[i].first != 0)
-            VG_(printf)("drd thread id = %d\n", i);
-         tl_assert(s_threadinfo[i].first == 0);
-         tl_assert(s_threadinfo[i].last == 0);
-         return i;
-      }
-   }
+  for (i = 1; i < DRD_N_THREADS; i++)
+  {
+    if (s_threadinfo[i].vg_thread_exists == False
+        && s_threadinfo[i].posix_thread_exists == False
+        && s_threadinfo[i].detached_posix_thread == False)
+    {
+      s_threadinfo[i].vg_thread_exists = True;
+      s_threadinfo[i].vg_threadid   = tid;
+      s_threadinfo[i].pt_threadid   = INVALID_POSIX_THREADID;
+      s_threadinfo[i].stack_min_min = 0;
+      s_threadinfo[i].stack_min     = 0;
+      s_threadinfo[i].stack_startup = 0;
+      s_threadinfo[i].stack_max     = 0;
+      VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
+                    "thread %d", tid);
+      s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
+      s_threadinfo[i].is_recording  = True;
+      s_threadinfo[i].synchr_nesting = 0;
+      if (s_threadinfo[i].first != 0)
+        VG_(printf)("drd thread id = %d\n", i);
+      tl_assert(s_threadinfo[i].first == 0);
+      tl_assert(s_threadinfo[i].last == 0);
+      return i;
+    }
+  }
 
-   tl_assert(False);
+  tl_assert(False);
 
-   return DRD_INVALID_THREADID;
+  return DRD_INVALID_THREADID;
 }
 
 DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
 {
-   int i;
+  int i;
 
-   tl_assert(tid != INVALID_POSIX_THREADID);
+  tl_assert(tid != INVALID_POSIX_THREADID);
 
-   for (i = 1; i < DRD_N_THREADS; i++)
-   {
-      if (s_threadinfo[i].posix_thread_exists
-          && s_threadinfo[i].pt_threadid == tid)
-      {
-         return i;
-      }
-   }
-   return DRD_INVALID_THREADID;
+  for (i = 1; i < DRD_N_THREADS; i++)
+  {
+    if (s_threadinfo[i].posix_thread_exists
+        && s_threadinfo[i].pt_threadid == tid)
+    {
+      return i;
+    }
+  }
+  return DRD_INVALID_THREADID;
 }
 
 ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   return (s_threadinfo[tid].vg_thread_exists
-           ? s_threadinfo[tid].vg_threadid
-           : VG_INVALID_THREADID);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  return (s_threadinfo[tid].vg_thread_exists
+          ? s_threadinfo[tid].vg_threadid
+          : VG_INVALID_THREADID);
 }
 
 /** Sanity check of the doubly linked list of segments referenced by a
@@ -172,37 +172,37 @@
  */
 static Bool sane_ThreadInfo(const ThreadInfo* const ti)
 {
-   Segment* p;
-   for (p = ti->first; p; p = p->next) {
-      if (p->next && p->next->prev != p)
-         return False;
-      if (p->next == 0 && p != ti->last)
-         return False;
-   }
-   for (p = ti->last; p; p = p->prev) {
-      if (p->prev && p->prev->next != p)
-         return False;
-      if (p->prev == 0 && p != ti->first)
-         return False;
-   }
-   return True;
+  Segment* p;
+  for (p = ti->first; p; p = p->next) {
+    if (p->next && p->next->prev != p)
+      return False;
+    if (p->next == 0 && p != ti->last)
+      return False;
+  }
+  for (p = ti->last; p; p = p->prev) {
+    if (p->prev && p->prev->next != p)
+      return False;
+    if (p->prev == 0 && p != ti->first)
+      return False;
+  }
+  return True;
 }
 
 DrdThreadId thread_pre_create(const DrdThreadId creator,
                               const ThreadId vg_created)
 {
-   DrdThreadId created;
+  DrdThreadId created;
 
-   tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
-   created = VgThreadIdToNewDrdThreadId(vg_created);
-   tl_assert(0 <= created && created < DRD_N_THREADS
-             && created != DRD_INVALID_THREADID);
+  tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
+  created = VgThreadIdToNewDrdThreadId(vg_created);
+  tl_assert(0 <= created && created < DRD_N_THREADS
+            && created != DRD_INVALID_THREADID);
 
-   tl_assert(s_threadinfo[created].first == 0);
-   tl_assert(s_threadinfo[created].last == 0);
-   thread_append_segment(created, sg_new(creator, created));
+  tl_assert(s_threadinfo[created].first == 0);
+  tl_assert(s_threadinfo[created].last == 0);
+  thread_append_segment(created, sg_new(creator, created));
 
-   return created;
+  return created;
 }
 
 /** Allocate the first segment for a thread. Call this just after
@@ -210,18 +210,18 @@
  */
 DrdThreadId thread_post_create(const ThreadId vg_created)
 {
-   const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
+  const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
 
-   tl_assert(0 <= created && created < DRD_N_THREADS
-             && created != DRD_INVALID_THREADID);
+  tl_assert(0 <= created && created < DRD_N_THREADS
+            && created != DRD_INVALID_THREADID);
 
-   s_threadinfo[created].stack_max     = VG_(thread_get_stack_max)(vg_created);
-   s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
-   s_threadinfo[created].stack_min     = s_threadinfo[created].stack_max;
-   s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
-   tl_assert(s_threadinfo[created].stack_max != 0);
+  s_threadinfo[created].stack_max     = VG_(thread_get_stack_max)(vg_created);
+  s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
+  s_threadinfo[created].stack_min     = s_threadinfo[created].stack_max;
+  s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
+  tl_assert(s_threadinfo[created].stack_max != 0);
 
-   return created;
+  return created;
 }
 
 /* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,     */
@@ -231,66 +231,66 @@
 void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
 {
 #if 0
-   VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
-                " stack 0x%x .. 0x%lx (size %d)",
-                s_threadinfo[tid].vg_threadid, tid,
-                stack_startup,
-                s_threadinfo[tid].stack_max,
-                s_threadinfo[tid].stack_max - stack_startup);
+  VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
+               " stack 0x%x .. 0x%lx (size %d)",
+               s_threadinfo[tid].vg_threadid, tid,
+               stack_startup,
+               s_threadinfo[tid].stack_max,
+               s_threadinfo[tid].stack_max - stack_startup);
 #endif
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
-   tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
-   s_threadinfo[tid].stack_startup = stack_startup;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
+  tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
+  s_threadinfo[tid].stack_startup = stack_startup;
 }
 
 Addr thread_get_stack_min(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   return s_threadinfo[tid].stack_min;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  return s_threadinfo[tid].stack_min;
 }
 
 void thread_set_stack_min(const DrdThreadId tid, const Addr stack_min)
 {
 #if 0
-   VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
-                " (size %d, max %d, delta %d)",
-                s_threadinfo[tid].vg_threadid, tid,
-                stack_min,
-                s_threadinfo[tid].stack_max - stack_min,
-                s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
-                s_threadinfo[tid].stack_min - stack_min);
+  VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
+               " (size %d, max %d, delta %d)",
+               s_threadinfo[tid].vg_threadid, tid,
+               stack_min,
+               s_threadinfo[tid].stack_max - stack_min,
+               s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
+               s_threadinfo[tid].stack_min - stack_min);
 #endif
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   if (s_threadinfo[tid].stack_max)
-   {
-      s_threadinfo[tid].stack_min = stack_min;
-      if (stack_min < s_threadinfo[tid].stack_min_min)
-      {
-         s_threadinfo[tid].stack_min_min = stack_min;
-      }
-      tl_assert(s_threadinfo[tid].stack_min_min
-                <= s_threadinfo[tid].stack_min);
-      tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
-   }
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  if (s_threadinfo[tid].stack_max)
+  {
+    s_threadinfo[tid].stack_min = stack_min;
+    if (stack_min < s_threadinfo[tid].stack_min_min)
+    {
+      s_threadinfo[tid].stack_min_min = stack_min;
+    }
+    tl_assert(s_threadinfo[tid].stack_min_min
+              <= s_threadinfo[tid].stack_min);
+    tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
+  }
 }
 
 DrdThreadId thread_lookup_stackaddr(const Addr a,
                                     Addr* const stack_min,
                                     Addr* const stack_max)
 {
-   unsigned i;
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
-      {
-         *stack_min = s_threadinfo[i].stack_min;
-         *stack_max = s_threadinfo[i].stack_max;
-         return i;
-      }
-   }
-   return DRD_INVALID_THREADID;
+  unsigned i;
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
+    {
+      *stack_min = s_threadinfo[i].stack_min;
+      *stack_max = s_threadinfo[i].stack_max;
+      return i;
+    }
+  }
+  return DRD_INVALID_THREADID;
 }
 
 /**
@@ -299,22 +299,22 @@
  */
 void thread_delete(const DrdThreadId tid)
 {
-   Segment* sg;
-   Segment* sg_prev;
+  Segment* sg;
+  Segment* sg_prev;
 
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].synchr_nesting == 0);
-   for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
-   {
-      sg_prev = sg->prev;
-      sg_delete(sg);
-   }
-   s_threadinfo[tid].vg_thread_exists = False;
-   s_threadinfo[tid].posix_thread_exists = False;
-   tl_assert(s_threadinfo[tid].detached_posix_thread == False);
-   s_threadinfo[tid].first = 0;
-   s_threadinfo[tid].last = 0;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].synchr_nesting == 0);
+  for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
+  {
+    sg_prev = sg->prev;
+    sg_delete(sg);
+  }
+  s_threadinfo[tid].vg_thread_exists = False;
+  s_threadinfo[tid].posix_thread_exists = False;
+  tl_assert(s_threadinfo[tid].detached_posix_thread == False);
+  s_threadinfo[tid].first = 0;
+  s_threadinfo[tid].last = 0;
 }
 
 /* Called after a thread performed its last memory access and before   */
@@ -322,163 +322,163 @@
 /* joinable threads, not for detached threads.                         */
 void thread_finished(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
 
-   thread_stop_using_mem(s_threadinfo[tid].stack_min,
-                         s_threadinfo[tid].stack_max);
+  thread_stop_using_mem(s_threadinfo[tid].stack_min,
+                        s_threadinfo[tid].stack_max);
 
-   s_threadinfo[tid].vg_thread_exists = False;
+  s_threadinfo[tid].vg_thread_exists = False;
 
-   if (s_threadinfo[tid].detached_posix_thread)
-   {
-      /* Once a detached thread has finished, its stack is deallocated and   */
-      /* should no longer be taken into account when computing the danger set*/
-      s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
+  if (s_threadinfo[tid].detached_posix_thread)
+  {
+    /* Once a detached thread has finished, its stack is deallocated and   */
+    /* should no longer be taken into account when computing the danger set*/
+    s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
 
-      /* For a detached thread, calling pthread_exit() invalidates the     */
-      /* POSIX thread ID associated with the detached thread. For joinable */
-      /* POSIX threads however, the POSIX thread ID remains live after the */
-      /* pthread_exit() call until pthread_join() is called.               */
-      s_threadinfo[tid].posix_thread_exists = False;
-   }
+    /* For a detached thread, calling pthread_exit() invalidates the     */
+    /* POSIX thread ID associated with the detached thread. For joinable */
+    /* POSIX threads however, the POSIX thread ID remains live after the */
+    /* pthread_exit() call until pthread_join() is called.               */
+    s_threadinfo[tid].posix_thread_exists = False;
+  }
 }
 
 void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
-   tl_assert(ptid != INVALID_POSIX_THREADID);
-   s_threadinfo[tid].posix_thread_exists = True;
-   s_threadinfo[tid].pt_threadid         = ptid;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
+  tl_assert(ptid != INVALID_POSIX_THREADID);
+  s_threadinfo[tid].posix_thread_exists = True;
+  s_threadinfo[tid].pt_threadid         = ptid;
 }
 
 Bool thread_get_joinable(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   return ! s_threadinfo[tid].detached_posix_thread;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  return ! s_threadinfo[tid].detached_posix_thread;
 }
 
 void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(!! joinable == joinable);
-   tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(!! joinable == joinable);
+  tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
 #if 0
-   VG_(message)(Vg_DebugMsg,
-                "thread_set_joinable(%d/%d, %s)",
-                tid,
-                s_threadinfo[tid].vg_threadid,
-                joinable ? "joinable" : "detached");
+  VG_(message)(Vg_DebugMsg,
+               "thread_set_joinable(%d/%d, %s)",
+               tid,
+               s_threadinfo[tid].vg_threadid,
+               joinable ? "joinable" : "detached");
 #endif
-   s_threadinfo[tid].detached_posix_thread = ! joinable;
+  s_threadinfo[tid].detached_posix_thread = ! joinable;
 }
 
 const char* thread_get_name(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   return s_threadinfo[tid].name;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  return s_threadinfo[tid].name;
 }
 
 void thread_set_name(const DrdThreadId tid, const char* const name)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   VG_(strncpy)(s_threadinfo[tid].name, name,
-                sizeof(s_threadinfo[tid].name));
-   s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  VG_(strncpy)(s_threadinfo[tid].name, name,
+               sizeof(s_threadinfo[tid].name));
+  s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
 }
 
 void thread_set_name_fmt(const DrdThreadId tid, const char* const fmt,
                          const UWord arg)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
-                 fmt, arg);
-   s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
+                fmt, arg);
+  s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
 }
 
 DrdThreadId thread_get_running_tid(void)
 {
-   tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
-   return s_drd_running_tid;
+  tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
+  return s_drd_running_tid;
 }
 
 void thread_set_vg_running_tid(const ThreadId vg_tid)
 {
-   tl_assert(vg_tid != VG_INVALID_THREADID);
+  tl_assert(vg_tid != VG_INVALID_THREADID);
 
-   if (vg_tid != s_vg_running_tid)
-   {
-      thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
-   }
+  if (vg_tid != s_vg_running_tid)
+  {
+    thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
+  }
 
-   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
-   tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
+  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
+  tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
 }
 
 void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
 {
-   tl_assert(vg_tid != VG_INVALID_THREADID);
-   tl_assert(drd_tid != DRD_INVALID_THREADID);
+  tl_assert(vg_tid != VG_INVALID_THREADID);
+  tl_assert(drd_tid != DRD_INVALID_THREADID);
    
-   if (vg_tid != s_vg_running_tid)
-   {
-      if (s_trace_context_switches
-          && s_drd_running_tid != DRD_INVALID_THREADID)
-      {
-         VG_(message)(Vg_DebugMsg,
-                      "Context switch from thread %d to thread %d",
-                      s_drd_running_tid, drd_tid);
-      }
-      s_vg_running_tid = vg_tid;
-      s_drd_running_tid = drd_tid;
-      thread_update_danger_set(drd_tid);
-      s_context_switch_count++;
-   }
+  if (vg_tid != s_vg_running_tid)
+  {
+    if (s_trace_context_switches
+        && s_drd_running_tid != DRD_INVALID_THREADID)
+    {
+      VG_(message)(Vg_DebugMsg,
+                   "Context switch from thread %d to thread %d",
+                   s_drd_running_tid, drd_tid);
+    }
+    s_vg_running_tid = vg_tid;
+    s_drd_running_tid = drd_tid;
+    thread_update_danger_set(drd_tid);
+    s_context_switch_count++;
+  }
 
-   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
-   tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
+  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
+  tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
 }
 
 int thread_enter_synchr(const DrdThreadId tid)
 {
-   tl_assert(IsValidDrdThreadId(tid));
-   return s_threadinfo[tid].synchr_nesting++;
+  tl_assert(IsValidDrdThreadId(tid));
+  return s_threadinfo[tid].synchr_nesting++;
 }
 
 int thread_leave_synchr(const DrdThreadId tid)
 {
-   tl_assert(IsValidDrdThreadId(tid));
-   tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
-   return --s_threadinfo[tid].synchr_nesting;
+  tl_assert(IsValidDrdThreadId(tid));
+  tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
+  return --s_threadinfo[tid].synchr_nesting;
 }
 
 int thread_get_synchr_nesting_count(const DrdThreadId tid)
 {
-   tl_assert(IsValidDrdThreadId(tid));
-   return s_threadinfo[tid].synchr_nesting;
+  tl_assert(IsValidDrdThreadId(tid));
+  return s_threadinfo[tid].synchr_nesting;
 }
 
 /** Append a new segment at the end of the segment list. */
 static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
-   sg->prev = s_threadinfo[tid].last;
-   sg->next = 0;
-   if (s_threadinfo[tid].last)
-      s_threadinfo[tid].last->next = sg;
-   s_threadinfo[tid].last = sg;
-   if (s_threadinfo[tid].first == 0)
-      s_threadinfo[tid].first = sg;
-   tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
+  sg->prev = s_threadinfo[tid].last;
+  sg->next = 0;
+  if (s_threadinfo[tid].last)
+    s_threadinfo[tid].last->next = sg;
+  s_threadinfo[tid].last = sg;
+  if (s_threadinfo[tid].first == 0)
+    s_threadinfo[tid].first = sg;
+  tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
 }
 
 /** Remove a segment from the segment list of thread threadid, and free the
@@ -486,28 +486,28 @@
  */
 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
 
-   if (sg->prev)
-      sg->prev->next = sg->next;
-   if (sg->next)
-      sg->next->prev = sg->prev;
-   if (sg == s_threadinfo[tid].first)
-      s_threadinfo[tid].first = sg->next;
-   if (sg == s_threadinfo[tid].last)
-      s_threadinfo[tid].last = sg->prev;
-   sg_delete(sg);
-   tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
+  if (sg->prev)
+    sg->prev->next = sg->next;
+  if (sg->next)
+    sg->next->prev = sg->prev;
+  if (sg == s_threadinfo[tid].first)
+    s_threadinfo[tid].first = sg->next;
+  if (sg == s_threadinfo[tid].last)
+    s_threadinfo[tid].last = sg->prev;
+  sg_delete(sg);
+  tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
 }
 
 VectorClock* thread_get_vc(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].last);
-   return &s_threadinfo[tid].last->vc;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].last);
+  return &s_threadinfo[tid].last->vc;
 }
 
 /**
@@ -517,44 +517,44 @@
  */
 static void thread_compute_minimum_vc(VectorClock* vc)
 {
-   unsigned i;
-   Bool first;
-   Segment* latest_sg;
+  unsigned i;
+  Bool first;
+  Segment* latest_sg;
 
-   first = True;
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      latest_sg = s_threadinfo[i].last;
-      if (latest_sg)
-      {
-         if (first)
-            vc_assign(vc, &latest_sg->vc);
-         else
-            vc_min(vc, &latest_sg->vc);
-         first = False;
-      }
-   }
+  first = True;
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    latest_sg = s_threadinfo[i].last;
+    if (latest_sg)
+    {
+      if (first)
+        vc_assign(vc, &latest_sg->vc);
+      else
+        vc_min(vc, &latest_sg->vc);
+      first = False;
+    }
+  }
 }
 
 static void thread_compute_maximum_vc(VectorClock* vc)
 {
-   unsigned i;
-   Bool first;
-   Segment* latest_sg;
+  unsigned i;
+  Bool first;
+  Segment* latest_sg;
 
-   first = True;
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      latest_sg = s_threadinfo[i].last;
-      if (latest_sg)
-      {
-         if (first)
-            vc_assign(vc, &latest_sg->vc);
-         else
-            vc_combine(vc, &latest_sg->vc);
-         first = False;
-      }
-   }
+  first = True;
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    latest_sg = s_threadinfo[i].last;
+    if (latest_sg)
+    {
+      if (first)
+        vc_assign(vc, &latest_sg->vc);
+      else
+        vc_combine(vc, &latest_sg->vc);
+      first = False;
+    }
+  }
 }
 
 /**
@@ -564,44 +564,44 @@
  */
 static void thread_discard_ordered_segments(void)
 {
-   unsigned i;
-   VectorClock thread_vc_min;
+  unsigned i;
+  VectorClock thread_vc_min;
 
-   s_discard_ordered_segments_count++;
+  s_discard_ordered_segments_count++;
 
-   vc_init(&thread_vc_min, 0, 0);
-   thread_compute_minimum_vc(&thread_vc_min);
-   if (sg_get_trace())
-   {
-      char msg[256];
-      VectorClock thread_vc_max;
+  vc_init(&thread_vc_min, 0, 0);
+  thread_compute_minimum_vc(&thread_vc_min);
+  if (sg_get_trace())
+  {
+    char msg[256];
+    VectorClock thread_vc_max;
 
-      vc_init(&thread_vc_max, 0, 0);
-      thread_compute_maximum_vc(&thread_vc_max);
-      VG_(snprintf)(msg, sizeof(msg),
-                    "Discarding ordered segments -- min vc is ");
-      vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                 &thread_vc_min);
-      VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                    ", max vc is ");
-      vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                 &thread_vc_max);
-      VG_(message)(Vg_DebugMsg, "%s", msg);
-      vc_cleanup(&thread_vc_max);
-   }
+    vc_init(&thread_vc_max, 0, 0);
+    thread_compute_maximum_vc(&thread_vc_max);
+    VG_(snprintf)(msg, sizeof(msg),
+                  "Discarding ordered segments -- min vc is ");
+    vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+               &thread_vc_min);
+    VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                  ", max vc is ");
+    vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+               &thread_vc_max);
+    VG_(message)(Vg_DebugMsg, "%s", msg);
+    vc_cleanup(&thread_vc_max);
+  }
 
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      Segment* sg;
-      Segment* sg_next;
-      for (sg = s_threadinfo[i].first;
-           sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
-           sg = sg_next)
-      {
-         thread_discard_segment(i, sg);
-      }
-   }
-   vc_cleanup(&thread_vc_min);
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    Segment* sg;
+    Segment* sg_next;
+    for (sg = s_threadinfo[i].first;
+         sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
+         sg = sg_next)
+    {
+      thread_discard_segment(i, sg);
+    }
+  }
+  vc_cleanup(&thread_vc_min);
 }
 
 /**
@@ -610,43 +610,43 @@
  */
 void thread_new_segment(const DrdThreadId tid)
 {
-   Segment* sg;
+  Segment* sg;
 
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
 
-   sg = sg_new(tid, tid);
-   thread_append_segment(tid, sg);
+  sg = sg_new(tid, tid);
+  thread_append_segment(tid, sg);
 
-   thread_discard_ordered_segments();
+  thread_discard_ordered_segments();
 
-   if (tid == s_drd_running_tid)
-   {
-      /* Every change in the vector clock of the current thread may cause */
-      /* segments that were previously ordered to this thread to become   */
-      /* unordered. Hence, recalculate the danger set if the vector clock */
-      /* of the current thread is updated.                                */
-      thread_update_danger_set(tid);
-   }
+  if (tid == s_drd_running_tid)
+  {
+    /* Every change in the vector clock of the current thread may cause */
+    /* segments that were previously ordered to this thread to become   */
+    /* unordered. Hence, recalculate the danger set if the vector clock */
+    /* of the current thread is updated.                                */
+    thread_update_danger_set(tid);
+  }
 }
 
 /** Call this function after thread 'joiner' joined thread 'joinee'. */
 void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
 {
-   tl_assert(joiner != joinee);
-   tl_assert(0 <= joiner && joiner < DRD_N_THREADS
-             && joiner != DRD_INVALID_THREADID);
-   tl_assert(0 <= joinee && joinee < DRD_N_THREADS
-             && joinee != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[joiner].last);
-   tl_assert(s_threadinfo[joinee].last);
-   vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
-   thread_discard_ordered_segments();
+  tl_assert(joiner != joinee);
+  tl_assert(0 <= joiner && joiner < DRD_N_THREADS
+            && joiner != DRD_INVALID_THREADID);
+  tl_assert(0 <= joinee && joinee < DRD_N_THREADS
+            && joinee != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[joiner].last);
+  tl_assert(s_threadinfo[joinee].last);
+  vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
+  thread_discard_ordered_segments();
 
-   if (joiner == s_drd_running_tid)
-   {
-      thread_update_danger_set(joiner);
-   }
+  if (joiner == s_drd_running_tid)
+  {
+    thread_update_danger_set(joiner);
+  }
 }
 
 /** Call this function after thread 'tid' had to wait because of thread
@@ -655,11 +655,11 @@
  */
 void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].last);
-   tl_assert(vc);
-   vc_combine(&s_threadinfo[tid].last->vc, vc);
-   thread_discard_ordered_segments();
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].last);
+  tl_assert(vc);
+  vc_combine(&s_threadinfo[tid].last->vc, vc);
+  thread_discard_ordered_segments();
 }
 
 /** Call this function whenever a thread is no longer using the memory
@@ -668,103 +668,103 @@
  */
 void thread_stop_using_mem(const Addr a1, const Addr a2)
 {
-   DrdThreadId other_user = DRD_INVALID_THREADID;
+  DrdThreadId other_user = DRD_INVALID_THREADID;
 
-   /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
+  /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
 
-   unsigned i;
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      Segment* p;
-      for (p = s_threadinfo[i].first; p; p = p->next)
+  unsigned i;
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    Segment* p;
+    for (p = s_threadinfo[i].first; p; p = p->next)
+    {
+      if (other_user == DRD_INVALID_THREADID
+          && i != s_drd_running_tid
+          && bm_has_any_access(p->bm, a1, a2))
       {
-         if (other_user == DRD_INVALID_THREADID
-             && i != s_drd_running_tid
-             && bm_has_any_access(p->bm, a1, a2))
-         {
-            other_user = i;
-         }
-         bm_clear(p->bm, a1, a2);
+        other_user = i;
       }
-   }
+      bm_clear(p->bm, a1, a2);
+    }
+  }
 
-   /* If any other thread had accessed memory in [ a1, a2 [, update the */
-   /* danger set. */
-   if (other_user != DRD_INVALID_THREADID
-       && bm_has_any_access(s_danger_set, a1, a2))
-   {
-      thread_update_danger_set(thread_get_running_tid());
-   }
+  /* If any other thread had accessed memory in [ a1, a2 [, update the */
+  /* danger set. */
+  if (other_user != DRD_INVALID_THREADID
+      && bm_has_any_access(s_danger_set, a1, a2))
+  {
+    thread_update_danger_set(thread_get_running_tid());
+  }
 }
 
 void thread_start_recording(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   tl_assert(! s_threadinfo[tid].is_recording);
-   s_threadinfo[tid].is_recording = True;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  tl_assert(! s_threadinfo[tid].is_recording);
+  s_threadinfo[tid].is_recording = True;
 }
 
 void thread_stop_recording(const DrdThreadId tid)
 {
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   tl_assert(s_threadinfo[tid].is_recording);
-   s_threadinfo[tid].is_recording = False;
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  tl_assert(s_threadinfo[tid].is_recording);
+  s_threadinfo[tid].is_recording = False;
 }
 
 void thread_print_all(void)
 {
-   unsigned i;
-   Segment* p;
+  unsigned i;
+  Segment* p;
 
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      if (s_threadinfo[i].first)
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    if (s_threadinfo[i].first)
+    {
+      VG_(printf)("**************\n"
+                  "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
+                  "**************\n",
+                  i,
+                  s_threadinfo[i].vg_thread_exists,
+                  s_threadinfo[i].vg_threadid,
+                  s_threadinfo[i].posix_thread_exists,
+                  s_threadinfo[i].pt_threadid,
+                  s_threadinfo[i].detached_posix_thread,
+                  s_threadinfo[i].name);
+      for (p = s_threadinfo[i].first; p; p = p->next)
       {
-         VG_(printf)("**************\n"
-                     "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
-                     "**************\n",
-                     i,
-                     s_threadinfo[i].vg_thread_exists,
-                     s_threadinfo[i].vg_threadid,
-                     s_threadinfo[i].posix_thread_exists,
-                     s_threadinfo[i].pt_threadid,
-                     s_threadinfo[i].detached_posix_thread,
-                     s_threadinfo[i].name);
-         for (p = s_threadinfo[i].first; p; p = p->next)
-         {
-            sg_print(p);
-         }
+        sg_print(p);
       }
-   }
+    }
+  }
 }
 
 static void show_call_stack(const DrdThreadId tid,
                             const Char* const msg,
                             ExeContext* const callstack)
 {
-   const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
+  const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
 
-   VG_(message)(Vg_UserMsg,
-                "%s (%s)",
-                msg,
-                thread_get_name(tid));
+  VG_(message)(Vg_UserMsg,
+               "%s (%s)",
+               msg,
+               thread_get_name(tid));
 
-   if (vg_tid != VG_INVALID_THREADID)
-   {
-      if (callstack)
-      {
-         VG_(pp_ExeContext)(callstack);
-      }
-      else
-      {
-         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
-      }
-   }
-   else
-   {
-      VG_(message)(Vg_UserMsg,
-                   "   (thread finished, call stack no longer available)");
-   }
+  if (vg_tid != VG_INVALID_THREADID)
+  {
+    if (callstack)
+    {
+      VG_(pp_ExeContext)(callstack);
+    }
+    else
+    {
+      VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
+    }
+  }
+  else
+  {
+    VG_(message)(Vg_UserMsg,
+                 "   (thread finished, call stack no longer available)");
+  }
 }
 
 static void
@@ -774,39 +774,39 @@
                                            const BmAccessTypeT access_type,
                                            const Segment* const p)
 {
-   unsigned i;
+  unsigned i;
 
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
-   tl_assert(p);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
+  tl_assert(p);
 
-   for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
-   {
-      if (i != tid)
+  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
+  {
+    if (i != tid)
+    {
+      Segment* q;
+      for (q = s_threadinfo[i].last; q; q = q->prev)
       {
-         Segment* q;
-         for (q = s_threadinfo[i].last; q; q = q->prev)
-         {
-            // Since q iterates over the segments of thread i in order of 
-            // decreasing vector clocks, if q->vc <= p->vc, then 
-            // q->next->vc <= p->vc will also hold. Hence, break out of the
-            // loop once this condition is met.
-            if (vc_lte(&q->vc, &p->vc))
-               break;
-            if (! vc_lte(&p->vc, &q->vc))
-            {
-               if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
-               {
-                  tl_assert(q->stacktrace);
-                  show_call_stack(i,        "Other segment start",
-                                  q->stacktrace);
-                  show_call_stack(i,        "Other segment end",
-                                  q->next ? q->next->stacktrace : 0);
-               }
-            }
-         }
+        // Since q iterates over the segments of thread i in order of 
+        // decreasing vector clocks, if q->vc <= p->vc, then 
+        // q->next->vc <= p->vc will also hold. Hence, break out of the
+        // loop once this condition is met.
+        if (vc_lte(&q->vc, &p->vc))
+          break;
+        if (! vc_lte(&p->vc, &q->vc))
+        {
+          if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
+          {
+            tl_assert(q->stacktrace);
+            show_call_stack(i,        "Other segment start",
+                            q->stacktrace);
+            show_call_stack(i,        "Other segment end",
+                            q->next ? q->next->stacktrace : 0);
+          }
+        }
       }
-   }
+    }
+  }
 }
 
 void thread_report_conflicting_segments(const DrdThreadId tid,
@@ -814,19 +814,19 @@
                                         const SizeT size,
                                         const BmAccessTypeT access_type)
 {
-   Segment* p;
+  Segment* p;
 
-   tl_assert(0 <= tid && tid < DRD_N_THREADS
-             && tid != DRD_INVALID_THREADID);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS
+            && tid != DRD_INVALID_THREADID);
 
-   for (p = s_threadinfo[tid].first; p; p = p->next)
-   {
-      if (bm_has(p->bm, addr, addr + size, access_type))
-      {
-         thread_report_conflicting_segments_segment(tid, addr, size,
-                                                    access_type, p);
-      }
-   }
+  for (p = s_threadinfo[tid].first; p; p = p->next)
+  {
+    if (bm_has(p->bm, addr, addr + size, access_type))
+    {
+      thread_report_conflicting_segments_segment(tid, addr, size,
+                                                 access_type, p);
+    }
+  }
 }
 
 /** Compute a bitmap that represents the union of all memory accesses of all
@@ -834,150 +834,144 @@
  */
 static void thread_update_danger_set(const DrdThreadId tid)
 {
-   Segment* p;
+  Segment* p;
 
-   tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
-   tl_assert(tid == s_drd_running_tid);
+  tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
+  tl_assert(tid == s_drd_running_tid);
 
-   s_update_danger_set_count++;
-   s_danger_set_bitmap_creation_count  -= bm_get_bitmap_creation_count();
-   s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
+  s_update_danger_set_count++;
+  s_danger_set_bitmap_creation_count  -= bm_get_bitmap_creation_count();
+  s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
 
-   if (s_danger_set)
-   {
-      bm_clear_all(s_danger_set);
-   }
-   else
-   {
-      s_danger_set = bm_new();
-   }
+  if (s_danger_set)
+  {
+    bm_clear_all(s_danger_set);
+  }
+  else
+  {
+    s_danger_set = bm_new();
+  }
 
-   if (s_trace_danger_set)
-   {
+  if (s_trace_danger_set)
+  {
+    char msg[256];
+
+    VG_(snprintf)(msg, sizeof(msg),
+                  "computing danger set for thread %d with vc ",
+                  tid);
+    vc_snprint(msg + VG_(strlen)(msg),
+               sizeof(msg) - VG_(strlen)(msg),
+               &s_threadinfo[tid].last->vc);
+    VG_(message)(Vg_DebugMsg, "%s", msg);
+  }
+
+  p = s_threadinfo[tid].last;
+  {
+    unsigned j;
+
+    if (s_trace_danger_set)
+    {
       char msg[256];
 
       VG_(snprintf)(msg, sizeof(msg),
-                    "computing danger set for thread %d with vc ",
+                    "danger set: thread [%d] at vc ",
                     tid);
       vc_snprint(msg + VG_(strlen)(msg),
                  sizeof(msg) - VG_(strlen)(msg),
-                 &s_threadinfo[tid].last->vc);
+                 &p->vc);
       VG_(message)(Vg_DebugMsg, "%s", msg);
-   }
+    }
 
-   p = s_threadinfo[tid].last;
-   {
-      unsigned j;
-
-      if (s_trace_danger_set)
+    for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
+    {
+      if (IsValidDrdThreadId(j))
       {
-         char msg[256];
-
-         VG_(snprintf)(msg, sizeof(msg),
-                       "danger set: thread [%d] at vc ",
-                       tid);
-         vc_snprint(msg + VG_(strlen)(msg),
-                    sizeof(msg) - VG_(strlen)(msg),
-                    &p->vc);
-         VG_(message)(Vg_DebugMsg, "%s", msg);
+        const Segment* q;
+        for (q = s_threadinfo[j].last; q; q = q->prev)
+          if (j != tid && q != 0
+              && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
+          {
+            if (s_trace_danger_set)
+            {
+              char msg[256];
+              VG_(snprintf)(msg, sizeof(msg),
+                            "danger set: [%d] merging segment ", j);
+              vc_snprint(msg + VG_(strlen)(msg),
+                         sizeof(msg) - VG_(strlen)(msg),
+                         &q->vc);
+              VG_(message)(Vg_DebugMsg, "%s", msg);
+            }
+            bm_merge2(s_danger_set, q->bm);
+          }
+          else
+          {
+            if (s_trace_danger_set)
+            {
+              char msg[256];
+              VG_(snprintf)(msg, sizeof(msg),
+                            "danger set: [%d] ignoring segment ", j);
+              vc_snprint(msg + VG_(strlen)(msg),
+                         sizeof(msg) - VG_(strlen)(msg),
+                         &q->vc);
+              VG_(message)(Vg_DebugMsg, "%s", msg);
+            }
+          }
       }
+    }
 
-      for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
+    for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
+    {
+      if (IsValidDrdThreadId(j))
       {
-         if (IsValidDrdThreadId(j))
-         {
-            const Segment* q;
-            for (q = s_threadinfo[j].last; q; q = q->prev)
-            if (j != tid && q != 0
-                && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
-            {
-               if (s_trace_danger_set)
-               {
-                  char msg[256];
-                  VG_(snprintf)(msg, sizeof(msg),
-                                "danger set: [%d] merging segment ", j);
-                  vc_snprint(msg + VG_(strlen)(msg),
-                             sizeof(msg) - VG_(strlen)(msg),
-                             &q->vc);
-                  VG_(message)(Vg_DebugMsg, "%s", msg);
-               }
-               bm_merge2(s_danger_set, q->bm);
-            }
-            else
-            {
-               if (s_trace_danger_set)
-               {
-                  char msg[256];
-                  VG_(snprintf)(msg, sizeof(msg),
-                                "danger set: [%d] ignoring segment ", j);
-                  vc_snprint(msg + VG_(strlen)(msg),
-                             sizeof(msg) - VG_(strlen)(msg),
-                             &q->vc);
-                  VG_(message)(Vg_DebugMsg, "%s", msg);
-               }
-            }
-         }
+        // NPTL hack: don't report data races on sizeof(struct pthread)
+        // bytes at the top of the stack, since the NPTL functions access
+        // this data without locking.
+        if (s_threadinfo[j].stack_min != 0)
+        {
+          tl_assert(s_threadinfo[j].stack_startup != 0);
+          if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
+          {
+            bm_clear(s_danger_set,
+                     s_threadinfo[j].stack_min,
+                     s_threadinfo[j].stack_startup);
+          }
+        }
       }
+    }
+  }
 
-      for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
-      {
-         if (IsValidDrdThreadId(j))
-         {
-            // NPTL hack: don't report data races on sizeof(struct pthread)
-            // bytes at the top of the stack, since the NPTL functions access
-            // this data without locking.
-            if (s_threadinfo[j].stack_min != 0)
-            {
-               tl_assert(s_threadinfo[j].stack_startup != 0);
-               if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
-               {
-                  bm_clear(s_danger_set,
-                           s_threadinfo[j].stack_min,
-                           s_threadinfo[j].stack_startup);
-               }
-            }
-         }
-      }
-   }
+  s_danger_set_bitmap_creation_count  += bm_get_bitmap_creation_count();
+  s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
 
-   s_danger_set_bitmap_creation_count  += bm_get_bitmap_creation_count();
-   s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
-
-   if (0 && s_trace_danger_set)
-   {
-      VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
-      bm_print(s_danger_set);
-      VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
-   }
+  if (0 && s_trace_danger_set)
+  {
+    VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
+    bm_print(s_danger_set);
+    VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
+  }
 }
 
 ULong thread_get_context_switch_count(void)
 {
-   return s_context_switch_count;
+  return s_context_switch_count;
 }
 
 ULong thread_get_discard_ordered_segments_count(void)
 {
-   return s_discard_ordered_segments_count;
+  return s_discard_ordered_segments_count;
 }
 
 ULong thread_get_update_danger_set_count(void)
 {
-   return s_update_danger_set_count;
+  return s_update_danger_set_count;
 }
 
 ULong thread_get_danger_set_bitmap_creation_count(void)
 {
-   return s_danger_set_bitmap_creation_count;
+  return s_danger_set_bitmap_creation_count;
 }
 
 ULong thread_get_danger_set_bitmap2_creation_count(void)
 {
-   return s_danger_set_bitmap2_creation_count;
+  return s_danger_set_bitmap2_creation_count;
 }
-
-/*
- * Local variables:
- * c-basic-offset: 3
- * End:
- */
diff --git a/exp-drd/tests/pth_create_chain.c b/exp-drd/tests/pth_create_chain.c
index 1bdf9e7..f9636ac 100644
--- a/exp-drd/tests/pth_create_chain.c
+++ b/exp-drd/tests/pth_create_chain.c
@@ -52,7 +52,3 @@
   }
   return 0;
 }
-
-// Local variables:
-// compile-command: "g++ -o pthread_create-chain -g -Wall -Wextra -Werror -Wno-sign-compare -Wno-unused pthread_create-chain.cpp -lpthread"
-// End: