- Reindented code such that it uses three spaces for indentation instead
  of two. The indentation of the DRD source code is now consistent with
  the other Valgrind source files.
- Added emacs mode line with indentation settings.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@9496 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/drd/drd.h b/drd/drd.h
index 76c60b5..011fe38 100644
--- a/drd/drd.h
+++ b/drd/drd.h
@@ -1,60 +1,61 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 
 /*
-   ----------------------------------------------------------------
+  ----------------------------------------------------------------
 
-   Notice that the following BSD-style license applies to this one
-   file (drd.h) only.  The rest of Valgrind is licensed under the
-   terms of the GNU General Public License, version 2, unless
-   otherwise indicated.  See the COPYING file in the source
-   distribution for details.
+  Notice that the following BSD-style license applies to this one
+  file (drd.h) only.  The rest of Valgrind is licensed under the
+  terms of the GNU General Public License, version 2, unless
+  otherwise indicated.  See the COPYING file in the source
+  distribution for details.
 
-   ----------------------------------------------------------------
+  ----------------------------------------------------------------
 
-   This file is part of drd, a Valgrind tool for verification of
-   multithreaded programs.
+  This file is part of drd, a Valgrind tool for verification of
+  multithreaded programs.
 
-   Copyright (C) 2006-2009 Bart Van Assche <bart.vanassche@gmail.com>.
-   All rights reserved.
+  Copyright (C) 2006-2009 Bart Van Assche <bart.vanassche@gmail.com>.
+  All rights reserved.
 
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions
-   are met:
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
 
-   1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
+  1. Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
 
-   2. The origin of this software must not be misrepresented; you must
-      not claim that you wrote the original software.  If you use this
-      software in a product, an acknowledgment in the product
-      documentation would be appreciated but is not required.
+  2. The origin of this software must not be misrepresented; you must
+  not claim that you wrote the original software.  If you use this
+  software in a product, an acknowledgment in the product
+  documentation would be appreciated but is not required.
 
-   3. Altered source versions must be plainly marked as such, and must
-      not be misrepresented as being the original software.
+  3. Altered source versions must be plainly marked as such, and must
+  not be misrepresented as being the original software.
 
-   4. The name of the author may not be used to endorse or promote
-      products derived from this software without specific prior written
-      permission.
+  4. The name of the author may not be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
 
-   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
-   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-   ----------------------------------------------------------------
+  ----------------------------------------------------------------
 
-   Notice that the above BSD-style license applies to this one file
-   (drd.h) only.  The entire rest of Valgrind is licensed under
-   the terms of the GNU General Public License, version 2.  See the
-   COPYING file in the source distribution for details.
+  Notice that the above BSD-style license applies to this one file
+  (drd.h) only.  The entire rest of Valgrind is licensed under
+  the terms of the GNU General Public License, version 2.  See the
+  COPYING file in the source distribution for details.
 
-   ----------------------------------------------------------------
+  ----------------------------------------------------------------
 */
 
 #ifndef __VALGRIND_DRD_H
@@ -68,38 +69,38 @@
    This enum comprises an ABI exported by Valgrind to programs
    which use client requests.  DO NOT CHANGE THE ORDER OF THESE
    ENTRIES, NOR DELETE ANY -- add new ones at the end.
- */
+*/
 enum
-{
-  /* Ask the core the thread ID assigned by Valgrind. */
-  VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID = VG_USERREQ_TOOL_BASE('D','R'),
-  /* args: none. */
-  /* Ask the core the thread ID assigned by DRD. */
-  VG_USERREQ__DRD_GET_DRD_THREAD_ID,
-  /* args: none. */
+   {
+      /* Ask the core the thread ID assigned by Valgrind. */
+      VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID = VG_USERREQ_TOOL_BASE('D','R'),
+      /* args: none. */
+      /* Ask the core the thread ID assigned by DRD. */
+      VG_USERREQ__DRD_GET_DRD_THREAD_ID,
+      /* args: none. */
 
-  /* To tell the drd tool to suppress data race detection on the specified */
-  /* address range. */
-  VG_USERREQ__DRD_START_SUPPRESSION,
-  /* args: start address, size in bytes */
-  /* To tell the drd tool no longer to suppress data race detection on the */
-  /* specified address range. */
-  VG_USERREQ__DRD_FINISH_SUPPRESSION,
-  /* args: start address, size in bytes */
+      /* To tell the drd tool to suppress data race detection on the */
+      /* specified address range. */
+      VG_USERREQ__DRD_START_SUPPRESSION,
+      /* args: start address, size in bytes */
+      /* To tell the drd tool no longer to suppress data race detection on */
+      /* the specified address range. */
+      VG_USERREQ__DRD_FINISH_SUPPRESSION,
+      /* args: start address, size in bytes */
 
-  /* To ask the drd tool to trace all accesses to the specified range. */
-  VG_USERREQ__DRD_START_TRACE_ADDR,
-  /* args: Addr, SizeT. */
-  /* To ask the drd tool to stop tracing accesses to the specified range. */
-  VG_USERREQ__DRD_STOP_TRACE_ADDR,
-  /* args: Addr, SizeT. */
+      /* To ask the drd tool to trace all accesses to the specified range. */
+      VG_USERREQ__DRD_START_TRACE_ADDR,
+      /* args: Addr, SizeT. */
+      /* To ask the drd tool to stop tracing accesses to the specified range. */
+      VG_USERREQ__DRD_STOP_TRACE_ADDR,
+      /* args: Addr, SizeT. */
 
-  /* To ask the drd tool to discard all information about memory accesses */
-  /* and client objects for the specified range. This client request is   */
-  /* binary compatible with the similarly named Helgrind client request.  */
-  VG_USERREQ__DRD_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
-  /* args: Addr, SizeT. */
-};
+      /* To ask the drd tool to discard all information about memory accesses */
+      /* and client objects for the specified range. This client request is   */
+      /* binary compatible with the similarly named Helgrind client request.  */
+      VG_USERREQ__DRD_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
+      /* args: Addr, SizeT. */
+   };
 
 
 /** Tell DRD to suppress data race detection on the specified variable. */
@@ -114,35 +115,35 @@
 static __inline__
 int vg_get_valgrind_threadid(void)
 {
-  int res;
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID,
-                             0, 0, 0, 0, 0);
-  return res;
+   int res;
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID,
+                              0, 0, 0, 0, 0);
+   return res;
 }
 
 static __inline__
 int vg_get_drd_threadid(void)
 {
-  int res;
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_GET_DRD_THREAD_ID,
-                             0, 0, 0, 0, 0);
-  return res;
+   int res;
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_GET_DRD_THREAD_ID,
+                              0, 0, 0, 0, 0);
+   return res;
 }
 
 static __inline__
 void vg_drd_ignore_range(const void* const p, const int size)
 {
-  int res;
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_SUPPRESSION,
-                             p, size, 0, 0, 0);
+   int res;
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_SUPPRESSION,
+                              p, size, 0, 0, 0);
 }
 
 static __inline__
 void vg_drd_trace_range(const void* const p, const int size)
 {
-  int res;
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_TRACE_ADDR,
-                             p, size, 0, 0, 0);
+   int res;
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_START_TRACE_ADDR,
+                              p, size, 0, 0, 0);
 }
 
 
diff --git a/drd/drd_barrier.c b/drd/drd_barrier.c
index d2f477f..55c64e4 100644
--- a/drd/drd_barrier.c
+++ b/drd/drd_barrier.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -40,14 +41,14 @@
 /** Information associated with one thread participating in a barrier. */
 struct barrier_thread_info
 {
-  UWord       tid;           // A DrdThreadId declared as UWord because
-                             // this member variable is the key of an OSet.
-  Word        iteration;     // iteration of last pthread_barrier_wait()
-                             // call thread tid participated in.
-  Segment*    sg[2];         // Segments of the last two
-                             // pthread_barrier() calls by thread tid.
-  ExeContext* wait_call_ctxt;// call stack for *_barrier_wait() call.
-  Segment*    post_wait_sg;  // Segment created after *_barrier_wait() finished
+   UWord       tid;           // A DrdThreadId declared as UWord because
+   // this member variable is the key of an OSet.
+   Word        iteration;     // iteration of last pthread_barrier_wait()
+   // call thread tid participated in.
+   Segment*    sg[2];         // Segments of the last two
+   // pthread_barrier() calls by thread tid.
+   ExeContext* wait_call_ctxt;// call stack for *_barrier_wait() call.
+   Segment*    post_wait_sg;  // Segment created after *_barrier_wait() finished
 };
 
 
@@ -60,7 +61,7 @@
 static const char* barrier_type_name(const BarrierT bt);
 static
 void barrier_report_wait_delete_race(const struct barrier_info* const p,
-                                    const struct barrier_thread_info* const q);
+                                     const struct barrier_thread_info* const q);
 
 
 /* Local variables. */
@@ -73,7 +74,7 @@
 
 void DRD_(barrier_set_trace)(const Bool trace_barrier)
 {
-  s_trace_barrier = trace_barrier;
+   s_trace_barrier = trace_barrier;
 }
 
 /**
@@ -85,12 +86,12 @@
                                      const DrdThreadId tid,
                                      const Word iteration)
 {
-  p->tid            = tid;
-  p->iteration      = iteration;
-  p->sg[0]          = 0;
-  p->sg[1]          = 0;
-  p->wait_call_ctxt = 0;
-  p->post_wait_sg   = 0;
+   p->tid            = tid;
+   p->iteration      = iteration;
+   p->sg[0]          = 0;
+   p->sg[1]          = 0;
+   p->wait_call_ctxt = 0;
+   p->post_wait_sg   = 0;
 }
 
 /**
@@ -99,10 +100,10 @@
  */
 static void DRD_(barrier_thread_destroy)(struct barrier_thread_info* const p)
 {
-  tl_assert(p);
-  DRD_(sg_put)(p->sg[0]);
-  DRD_(sg_put)(p->sg[1]);
-  DRD_(sg_put)(p->post_wait_sg);
+   tl_assert(p);
+   DRD_(sg_put)(p->sg[0]);
+   DRD_(sg_put)(p->sg[1]);
+   DRD_(sg_put)(p->post_wait_sg);
 }
 
 /**
@@ -115,25 +116,25 @@
                               const BarrierT barrier_type,
                               const Word count)
 {
-  tl_assert(barrier != 0);
-  tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
-  tl_assert(p->a1 == barrier);
+   tl_assert(barrier != 0);
+   tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
+   tl_assert(p->a1 == barrier);
 
-  p->cleanup           = (void(*)(DrdClientobj*))barrier_cleanup;
-  p->delete_thread
-    = (void(*)(DrdClientobj*, DrdThreadId))barrier_delete_thread;
-  p->barrier_type      = barrier_type;
-  p->count             = count;
-  p->pre_iteration     = 0;
-  p->post_iteration    = 0;
-  p->pre_waiters_left  = count;
-  p->post_waiters_left = count;
+   p->cleanup           = (void(*)(DrdClientobj*))barrier_cleanup;
+   p->delete_thread
+      = (void(*)(DrdClientobj*, DrdThreadId))barrier_delete_thread;
+   p->barrier_type      = barrier_type;
+   p->count             = count;
+   p->pre_iteration     = 0;
+   p->post_iteration    = 0;
+   p->pre_waiters_left  = count;
+   p->post_waiters_left = count;
 
-  tl_assert(sizeof(((struct barrier_thread_info*)0)->tid) == sizeof(Word));
-  tl_assert(sizeof(((struct barrier_thread_info*)0)->tid)
-            >= sizeof(DrdThreadId));
-  p->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), "drd.barrier.bi.1",
-                                      VG_(free));
+   tl_assert(sizeof(((struct barrier_thread_info*)0)->tid) == sizeof(Word));
+   tl_assert(sizeof(((struct barrier_thread_info*)0)->tid)
+             >= sizeof(DrdThreadId));
+   p->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), "drd.barrier.bi.1",
+                                 VG_(free));
 }
 
 /**
@@ -144,39 +145,39 @@
  */
 static void barrier_cleanup(struct barrier_info* p)
 {
-  struct barrier_thread_info* q;
-    Segment* latest_sg = 0;
+   struct barrier_thread_info* q;
+   Segment* latest_sg = 0;
 
-  tl_assert(p);
+   tl_assert(p);
 
-  if (p->pre_waiters_left != p->count)
-  {
-    BarrierErrInfo bei = { p->a1, 0, 0 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            BarrierErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destruction of barrier that is being waited"
-                            " upon",
-                            &bei);
-  }
+   if (p->pre_waiters_left != p->count)
+   {
+      BarrierErrInfo bei = { p->a1, 0, 0 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              BarrierErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destruction of barrier that is being waited"
+                              " upon",
+                              &bei);
+   }
 
-  DRD_(thread_get_latest_segment)(&latest_sg, DRD_(thread_get_running_tid)());
-  tl_assert(latest_sg);
+   DRD_(thread_get_latest_segment)(&latest_sg, DRD_(thread_get_running_tid)());
+   tl_assert(latest_sg);
 
-  VG_(OSetGen_ResetIter)(p->oset);
-  for ( ; (q = VG_(OSetGen_Next)(p->oset)) != 0; )
-  {
-    if (q->post_wait_sg
-        && ! DRD_(vc_lte)(&q->post_wait_sg->vc, &latest_sg->vc))
-    {
-      barrier_report_wait_delete_race(p, q);
-    }
+   VG_(OSetGen_ResetIter)(p->oset);
+   for ( ; (q = VG_(OSetGen_Next)(p->oset)) != 0; )
+   {
+      if (q->post_wait_sg
+          && ! DRD_(vc_lte)(&q->post_wait_sg->vc, &latest_sg->vc))
+      {
+         barrier_report_wait_delete_race(p, q);
+      }
 
-    DRD_(barrier_thread_destroy)(q);
-  }
-  VG_(OSetGen_Destroy)(p->oset);
+      DRD_(barrier_thread_destroy)(q);
+   }
+   VG_(OSetGen_Destroy)(p->oset);
 
-  DRD_(sg_put)(latest_sg);
+   DRD_(sg_put)(latest_sg);
 }
 
 /**
@@ -188,18 +189,18 @@
 DRD_(barrier_get_or_allocate)(const Addr barrier,
                               const BarrierT barrier_type, const Word count)
 {
-  struct barrier_info *p;
+   struct barrier_info *p;
 
-  tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
+   tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
 
-  tl_assert(offsetof(DrdClientobj, barrier) == 0);
-  p = &(DRD_(clientobj_get)(barrier, ClientBarrier)->barrier);
-  if (p == 0)
-  {
-    p = &(DRD_(clientobj_add)(barrier, ClientBarrier)->barrier);
-    DRD_(barrier_initialize)(p, barrier, barrier_type, count);
-  }
-  return p;
+   tl_assert(offsetof(DrdClientobj, barrier) == 0);
+   p = &(DRD_(clientobj_get)(barrier, ClientBarrier)->barrier);
+   if (p == 0)
+   {
+      p = &(DRD_(clientobj_add)(barrier, ClientBarrier)->barrier);
+      DRD_(barrier_initialize)(p, barrier, barrier_type, count);
+   }
+   return p;
 }
 
 /**
@@ -208,8 +209,8 @@
  */
 static struct barrier_info* DRD_(barrier_get)(const Addr barrier)
 {
-  tl_assert(offsetof(DrdClientobj, barrier) == 0);
-  return &(DRD_(clientobj_get)(barrier, ClientBarrier)->barrier);
+   tl_assert(offsetof(DrdClientobj, barrier) == 0);
+   return &(DRD_(clientobj_get)(barrier, ClientBarrier)->barrier);
 }
 
 /**
@@ -222,182 +223,182 @@
                         const BarrierT barrier_type, const Word count,
                         const Bool reinitialization)
 {
-  struct barrier_info* p;
+   struct barrier_info* p;
 
-  tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
+   tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);
 
-  if (count == 0)
-  {
-    BarrierErrInfo bei = { barrier, 0, 0 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            BarrierErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "pthread_barrier_init: 'count' argument is zero",
-                            &bei);
-  }
-
-  if (! reinitialization && barrier_type == pthread_barrier)
-  {
-    p = DRD_(barrier_get)(barrier);
-    if (p)
-    {
+   if (count == 0)
+   {
       BarrierErrInfo bei = { barrier, 0, 0 };
       VG_(maybe_record_error)(VG_(get_running_tid)(),
                               BarrierErr,
                               VG_(get_IP)(VG_(get_running_tid)()),
-                              "Barrier reinitialization",
+                              "pthread_barrier_init: 'count' argument is zero",
                               &bei);
-    }
-  }
-  p = DRD_(barrier_get_or_allocate)(barrier, barrier_type, count);
+   }
 
-  if (s_trace_barrier)
-  {
-    if (reinitialization)
-    {
-      VG_(message)(Vg_UserMsg,
-                   "[%d/%d] barrier_reinit    %s 0x%lx count %ld -> %ld",
-                   VG_(get_running_tid)(),
-                   DRD_(thread_get_running_tid)(),
-                   barrier_get_typename(p),
-                   barrier,
-                   p->count,
-                   count);
-    }
-    else
-    {
-      VG_(message)(Vg_UserMsg,
-                   "[%d/%d] barrier_init      %s 0x%lx",
-                   VG_(get_running_tid)(),
-                   DRD_(thread_get_running_tid)(),
-                   barrier_get_typename(p),
-                   barrier);
-    }
-  }
+   if (! reinitialization && barrier_type == pthread_barrier)
+   {
+      p = DRD_(barrier_get)(barrier);
+      if (p)
+      {
+         BarrierErrInfo bei = { barrier, 0, 0 };
+         VG_(maybe_record_error)(VG_(get_running_tid)(),
+                                 BarrierErr,
+                                 VG_(get_IP)(VG_(get_running_tid)()),
+                                 "Barrier reinitialization",
+                                 &bei);
+      }
+   }
+   p = DRD_(barrier_get_or_allocate)(barrier, barrier_type, count);
 
-  if (reinitialization && p->count != count)
-  {
-    if (p->pre_waiters_left != p->count || p->post_waiters_left != p->count)
-    {
-      BarrierErrInfo bei = { p->a1, 0, 0 };
-      VG_(maybe_record_error)(VG_(get_running_tid)(),
-                              BarrierErr,
-                              VG_(get_IP)(VG_(get_running_tid)()),
-                              "Reinitialization of barrier with active"
-                              " waiters",
-                              &bei);
-    }
-    p->count = count;
-  }
+   if (s_trace_barrier)
+   {
+      if (reinitialization)
+      {
+         VG_(message)(Vg_UserMsg,
+                      "[%d/%d] barrier_reinit    %s 0x%lx count %ld -> %ld",
+                      VG_(get_running_tid)(),
+                      DRD_(thread_get_running_tid)(),
+                      barrier_get_typename(p),
+                      barrier,
+                      p->count,
+                      count);
+      }
+      else
+      {
+         VG_(message)(Vg_UserMsg,
+                      "[%d/%d] barrier_init      %s 0x%lx",
+                      VG_(get_running_tid)(),
+                      DRD_(thread_get_running_tid)(),
+                      barrier_get_typename(p),
+                      barrier);
+      }
+   }
+
+   if (reinitialization && p->count != count)
+   {
+      if (p->pre_waiters_left != p->count || p->post_waiters_left != p->count)
+      {
+         BarrierErrInfo bei = { p->a1, 0, 0 };
+         VG_(maybe_record_error)(VG_(get_running_tid)(),
+                                 BarrierErr,
+                                 VG_(get_IP)(VG_(get_running_tid)()),
+                                 "Reinitialization of barrier with active"
+                                 " waiters",
+                                 &bei);
+      }
+      p->count = count;
+   }
 }
 
 /** Called after pthread_barrier_destroy() / gomp_barrier_destroy(). */
 void DRD_(barrier_destroy)(const Addr barrier, const BarrierT barrier_type)
 {
-  struct barrier_info* p;
+   struct barrier_info* p;
 
-  p = DRD_(barrier_get)(barrier);
+   p = DRD_(barrier_get)(barrier);
 
-  if (s_trace_barrier)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] barrier_destroy   %s 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 barrier_get_typename(p),
-                 barrier);
-  }
+   if (s_trace_barrier)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] barrier_destroy   %s 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   barrier_get_typename(p),
+                   barrier);
+   }
 
-  if (p == 0)
-  {
-    GenericErrInfo GEI;
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            GenericErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Not a barrier",
-                            &GEI);
-    return;
-  }
+   if (p == 0)
+   {
+      GenericErrInfo GEI;
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              GenericErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Not a barrier",
+                              &GEI);
+      return;
+   }
 
-  if (p->pre_waiters_left != p->count || p->post_waiters_left != p->count)
-  {
-    BarrierErrInfo bei = { p->a1, 0, 0 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            BarrierErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destruction of a barrier with active waiters",
-                            &bei);
-  }
+   if (p->pre_waiters_left != p->count || p->post_waiters_left != p->count)
+   {
+      BarrierErrInfo bei = { p->a1, 0, 0 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              BarrierErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destruction of a barrier with active waiters",
+                              &bei);
+   }
 
-  DRD_(clientobj_remove)(p->a1, ClientBarrier);
+   DRD_(clientobj_remove)(p->a1, ClientBarrier);
 }
 
 /** Called before pthread_barrier_wait() / gomp_barrier_wait(). */
 void DRD_(barrier_pre_wait)(const DrdThreadId tid, const Addr barrier,
                             const BarrierT barrier_type)
 {
-  struct barrier_info* p;
-  struct barrier_thread_info* q;
-  const UWord word_tid = tid;
+   struct barrier_info* p;
+   struct barrier_thread_info* q;
+   const UWord word_tid = tid;
 
-  p = DRD_(barrier_get)(barrier);
-  if (p == 0 && barrier_type == gomp_barrier)
-  {
-    /*
-     * gomp_barrier_wait() call has been intercepted but gomp_barrier_init()
-     * not. The only cause I know of that can trigger this is that libgomp.so
-     * has been compiled with --enable-linux-futex.
-     */
-    VG_(message)(Vg_UserMsg, "");
-    VG_(message)(Vg_UserMsg,
-                 "Please verify whether gcc has been configured"
-                 " with option --disable-linux-futex.");
-    VG_(message)(Vg_UserMsg,
-                 "See also the section about OpenMP in the DRD manual.");
-    VG_(message)(Vg_UserMsg, "");
-  }
-  tl_assert(p);
+   p = DRD_(barrier_get)(barrier);
+   if (p == 0 && barrier_type == gomp_barrier)
+   {
+      /*
+       * gomp_barrier_wait() call has been intercepted but gomp_barrier_init()
+       * not. The only cause I know of that can trigger this is that libgomp.so
+       * has been compiled with --enable-linux-futex.
+       */
+      VG_(message)(Vg_UserMsg, "");
+      VG_(message)(Vg_UserMsg,
+                   "Please verify whether gcc has been configured"
+                   " with option --disable-linux-futex.");
+      VG_(message)(Vg_UserMsg,
+                   "See also the section about OpenMP in the DRD manual.");
+      VG_(message)(Vg_UserMsg, "");
+   }
+   tl_assert(p);
 
-  if (s_trace_barrier)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] barrier_pre_wait  %s 0x%lx iteration %ld",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 barrier_get_typename(p),
-                 barrier,
-                 p->pre_iteration);
-  }
+   if (s_trace_barrier)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] barrier_pre_wait  %s 0x%lx iteration %ld",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   barrier_get_typename(p),
+                   barrier,
+                   p->pre_iteration);
+   }
 
-  /* Allocate the per-thread data structure if necessary. */
-  q = VG_(OSetGen_Lookup)(p->oset, &word_tid);
-  if (q == 0)
-  {
-    q = VG_(OSetGen_AllocNode)(p->oset, sizeof(*q));
-    DRD_(barrier_thread_initialize)(q, tid, p->pre_iteration);
-    VG_(OSetGen_Insert)(p->oset, q);
-    tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
-  }
+   /* Allocate the per-thread data structure if necessary. */
+   q = VG_(OSetGen_Lookup)(p->oset, &word_tid);
+   if (q == 0)
+   {
+      q = VG_(OSetGen_AllocNode)(p->oset, sizeof(*q));
+      DRD_(barrier_thread_initialize)(q, tid, p->pre_iteration);
+      VG_(OSetGen_Insert)(p->oset, q);
+      tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
+   }
 
-  /* Record *_barrier_wait() call context. */
-  q->wait_call_ctxt = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
+   /* Record *_barrier_wait() call context. */
+   q->wait_call_ctxt = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
 
-  /*
-   * Store a pointer to the latest segment of the current thread in the
-   * per-thread data structure.
-   */
-  DRD_(thread_get_latest_segment)(&q->sg[p->pre_iteration], tid);
+   /*
+    * Store a pointer to the latest segment of the current thread in the
+    * per-thread data structure.
+    */
+   DRD_(thread_get_latest_segment)(&q->sg[p->pre_iteration], tid);
 
-  /*
-   * If the same number of threads as the barrier count indicates have
-   * called the pre *_barrier_wait() wrapper, toggle p->pre_iteration and
-   * reset the p->pre_waiters_left counter.
-   */
-  if (--p->pre_waiters_left <= 0)
-  {
-    p->pre_iteration    = 1 - p->pre_iteration;
-    p->pre_waiters_left = p->count;
-  }
+   /*
+    * If the same number of threads as the barrier count indicates have
+    * called the pre *_barrier_wait() wrapper, toggle p->pre_iteration and
+    * reset the p->pre_waiters_left counter.
+    */
+   if (--p->pre_waiters_left <= 0)
+   {
+      p->pre_iteration    = 1 - p->pre_iteration;
+      p->pre_waiters_left = p->count;
+   }
 }
 
 /** Called after pthread_barrier_wait() / gomp_barrier_wait(). */
@@ -405,105 +406,105 @@
                              const BarrierT barrier_type, const Bool waited,
                              const Bool serializing)
 {
-  struct barrier_info* p;
-  const UWord word_tid = tid;
-  struct barrier_thread_info* q;
-  struct barrier_thread_info* r;
+   struct barrier_info* p;
+   const UWord word_tid = tid;
+   struct barrier_thread_info* q;
+   struct barrier_thread_info* r;
 
-  p = DRD_(barrier_get)(barrier);
+   p = DRD_(barrier_get)(barrier);
 
-  if (s_trace_barrier)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] barrier_post_wait %s 0x%lx iteration %ld%s",
-                 VG_(get_running_tid)(),
-                 tid,
-                 p ? barrier_get_typename(p) : "(?)",
-                 barrier,
-                 p ? p->post_iteration : -1,
-                 serializing ? " (serializing)" : "");
-  }
+   if (s_trace_barrier)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] barrier_post_wait %s 0x%lx iteration %ld%s",
+                   VG_(get_running_tid)(),
+                   tid,
+                   p ? barrier_get_typename(p) : "(?)",
+                   barrier,
+                   p ? p->post_iteration : -1,
+                   serializing ? " (serializing)" : "");
+   }
 
-  /*
-   * If p == 0, this means that the barrier has been destroyed after
-   * *_barrier_wait() returned and before this function was called. Just
-   * return in that case -- race conditions between *_barrier_wait()
-   * and *_barrier_destroy() are detected by the *_barrier_destroy() wrapper.
-   */
-  if (p == 0)
-    return;
+   /*
+    * If p == 0, this means that the barrier has been destroyed after
+    * *_barrier_wait() returned and before this function was called. Just
+    * return in that case -- race conditions between *_barrier_wait()
+    * and *_barrier_destroy() are detected by the *_barrier_destroy() wrapper.
+    */
+   if (p == 0)
+      return;
 
-  /* If the *_barrier_wait() call returned an error code, exit. */
-  if (! waited)
-    return;
+   /* If the *_barrier_wait() call returned an error code, exit. */
+   if (! waited)
+      return;
 
-  q = VG_(OSetGen_Lookup)(p->oset, &word_tid);
-  if (q == 0)
-  {
-    BarrierErrInfo bei = { p->a1, 0, 0 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            BarrierErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Error in barrier implementation"
-                            " -- barrier_wait() started before"
-                            " barrier_destroy() and finished after"
-                            " barrier_destroy()",
-                            &bei);
+   q = VG_(OSetGen_Lookup)(p->oset, &word_tid);
+   if (q == 0)
+   {
+      BarrierErrInfo bei = { p->a1, 0, 0 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              BarrierErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Error in barrier implementation"
+                              " -- barrier_wait() started before"
+                              " barrier_destroy() and finished after"
+                              " barrier_destroy()",
+                              &bei);
 
-    q = VG_(OSetGen_AllocNode)(p->oset, sizeof(*q));
-    DRD_(barrier_thread_initialize)(q, tid, p->pre_iteration);
-    VG_(OSetGen_Insert)(p->oset, q);
-    tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
-  }
-  /*
-   * Combine all vector clocks that were stored in the pre_barrier_wait
-   * wrapper with the vector clock of the current thread.
-   */
-  VG_(OSetGen_ResetIter)(p->oset);
-  for ( ; (r = VG_(OSetGen_Next)(p->oset)) != 0; )
-  {
-    if (r != q)
-    {
-      tl_assert(r->sg[p->post_iteration]);
-      DRD_(thread_combine_vc2)(tid, &r->sg[p->post_iteration]->vc);
-    }
-  }
+      q = VG_(OSetGen_AllocNode)(p->oset, sizeof(*q));
+      DRD_(barrier_thread_initialize)(q, tid, p->pre_iteration);
+      VG_(OSetGen_Insert)(p->oset, q);
+      tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
+   }
+   /*
+    * Combine all vector clocks that were stored in the pre_barrier_wait
+    * wrapper with the vector clock of the current thread.
+    */
+   VG_(OSetGen_ResetIter)(p->oset);
+   for ( ; (r = VG_(OSetGen_Next)(p->oset)) != 0; )
+   {
+      if (r != q)
+      {
+         tl_assert(r->sg[p->post_iteration]);
+         DRD_(thread_combine_vc2)(tid, &r->sg[p->post_iteration]->vc);
+      }
+   }
 
-  /* Create a new segment and store a pointer to that segment. */
-  DRD_(thread_new_segment)(tid);
-  DRD_(thread_get_latest_segment)(&q->post_wait_sg, tid);
-  s_barrier_segment_creation_count++;
+   /* Create a new segment and store a pointer to that segment. */
+   DRD_(thread_new_segment)(tid);
+   DRD_(thread_get_latest_segment)(&q->post_wait_sg, tid);
+   s_barrier_segment_creation_count++;
 
-  /*
-   * If the same number of threads as the barrier count indicates have
-   * called the post *_barrier_wait() wrapper, toggle p->post_iteration and
-   * reset the p->post_waiters_left counter.
-   */
-  if (--p->post_waiters_left <= 0)
-  {
-    p->post_iteration    = 1 - p->post_iteration;
-    p->post_waiters_left = p->count;
-  }
+   /*
+    * If the same number of threads as the barrier count indicates have
+    * called the post *_barrier_wait() wrapper, toggle p->post_iteration and
+    * reset the p->post_waiters_left counter.
+    */
+   if (--p->post_waiters_left <= 0)
+   {
+      p->post_iteration    = 1 - p->post_iteration;
+      p->post_waiters_left = p->count;
+   }
 }
 
 /** Called when thread tid stops to exist. */
 static void barrier_delete_thread(struct barrier_info* const p,
                                   const DrdThreadId tid)
 {
-  struct barrier_thread_info* q;
-  const UWord word_tid = tid;
+   struct barrier_thread_info* q;
+   const UWord word_tid = tid;
 
-  q = VG_(OSetGen_Remove)(p->oset, &word_tid);
+   q = VG_(OSetGen_Remove)(p->oset, &word_tid);
 
-  /*
-   * q is only non-zero if the barrier object has been used by thread tid
-   * after the barrier_init() call and before the thread finished.
-   */
-  if (q)
-  {
-    DRD_(barrier_thread_destroy)(q);
-    VG_(OSetGen_FreeNode)(p->oset, q);
-  }
+   /*
+    * q is only non-zero if the barrier object has been used by thread tid
+    * after the barrier_init() call and before the thread finished.
+    */
+   if (q)
+   {
+      DRD_(barrier_thread_destroy)(q);
+      VG_(OSetGen_FreeNode)(p->oset, q);
+   }
 }
 
 /**
@@ -518,41 +519,41 @@
 void barrier_report_wait_delete_race(const struct barrier_info* const p,
                                      const struct barrier_thread_info* const q)
 {
-  tl_assert(p);
-  tl_assert(q);
+   tl_assert(p);
+   tl_assert(q);
 
-  {
-    BarrierErrInfo bei
-      = { p->a1, q->tid, q->wait_call_ctxt };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            BarrierErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destruction of barrier not synchronized with"
-                            " barrier wait call",
-                            &bei);
-  }
+   {
+      BarrierErrInfo bei
+         = { p->a1, q->tid, q->wait_call_ctxt };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              BarrierErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destruction of barrier not synchronized with"
+                              " barrier wait call",
+                              &bei);
+   }
 }
 
 static const char* barrier_get_typename(struct barrier_info* const p)
 {
-  tl_assert(p);
+   tl_assert(p);
 
-  return barrier_type_name(p->barrier_type);
+   return barrier_type_name(p->barrier_type);
 }
 
 static const char* barrier_type_name(const BarrierT bt)
 {
-  switch (bt)
-  {
-  case pthread_barrier:
-    return "pthread barrier";
-  case gomp_barrier:
-    return "gomp barrier";
-  }
-  return "?";
+   switch (bt)
+   {
+   case pthread_barrier:
+      return "pthread barrier";
+   case gomp_barrier:
+      return "gomp barrier";
+   }
+   return "?";
 }
 
 ULong DRD_(get_barrier_segment_creation_count)(void)
 {
-  return s_barrier_segment_creation_count;
+   return s_barrier_segment_creation_count;
 }
diff --git a/drd/drd_barrier.h b/drd/drd_barrier.h
index 1acb4a6..5c019a3 100644
--- a/drd/drd_barrier.h
+++ b/drd/drd_barrier.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_basics.h b/drd/drd_basics.h
index cc01899..78b1794 100644
--- a/drd/drd_basics.h
+++ b/drd/drd_basics.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of DRD, a thread error detector.
 
diff --git a/drd/drd_bitmap.c b/drd/drd_bitmap.c
index 73dcb04..85cacd5 100644
--- a/drd/drd_bitmap.c
+++ b/drd/drd_bitmap.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -56,51 +57,51 @@
 
 struct bitmap* DRD_(bm_new)()
 {
-  unsigned i;
-  struct bitmap* bm;
+   unsigned i;
+   struct bitmap* bm;
 
-  /* If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD */
-  /* in drd_bitmap.h.                                                    */
-  tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
+   /* If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD */
+   /* in drd_bitmap.h.                                                    */
+   tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
 
-  bm = VG_(malloc)("drd.bitmap.bn.1", sizeof(*bm));
-  tl_assert(bm);
-  /* Cache initialization. a1 is initialized with a value that never can */
-  /* match any valid address: the upper ADDR0_BITS bits of a1 are always */
-  /* zero for a valid cache entry.                                       */
-  for (i = 0; i < N_CACHE_ELEM; i++)
-  {
-    bm->cache[i].a1  = ~(UWord)1;
-    bm->cache[i].bm2 = 0;
-  }
-  bm->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), "drd.bitmap.bn.2",
-                                       VG_(free));
+   bm = VG_(malloc)("drd.bitmap.bn.1", sizeof(*bm));
+   tl_assert(bm);
+   /* Cache initialization. a1 is initialized with a value that never can */
+   /* match any valid address: the upper ADDR0_BITS bits of a1 are always */
+   /* zero for a valid cache entry.                                       */
+   for (i = 0; i < N_CACHE_ELEM; i++)
+   {
+      bm->cache[i].a1  = ~(UWord)1;
+      bm->cache[i].bm2 = 0;
+   }
+   bm->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), "drd.bitmap.bn.2",
+                                  VG_(free));
 
-  s_bitmap_creation_count++;
+   s_bitmap_creation_count++;
 
-  return bm;
+   return bm;
 }
 
 void DRD_(bm_delete)(struct bitmap* const bm)
 {
-  struct bitmap2*    bm2;
-  struct bitmap2ref* bm2ref;
+   struct bitmap2*    bm2;
+   struct bitmap2ref* bm2ref;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  VG_(OSetGen_ResetIter)(bm->oset);
-  for ( ; (bm2ref = VG_(OSetGen_Next)(bm->oset)) != 0; )
-  {
-    bm2 = bm2ref->bm2;
-    tl_assert(bm2->refcnt >= 1);
-    if (--bm2->refcnt == 0)
-    {
-      VG_(free)(bm2);
-    }
-  }
+   VG_(OSetGen_ResetIter)(bm->oset);
+   for ( ; (bm2ref = VG_(OSetGen_Next)(bm->oset)) != 0; )
+   {
+      bm2 = bm2ref->bm2;
+      tl_assert(bm2->refcnt >= 1);
+      if (--bm2->refcnt == 0)
+      {
+         VG_(free)(bm2);
+      }
+   }
 
-  VG_(OSetGen_Destroy)(bm->oset);
-  VG_(free)(bm);
+   VG_(OSetGen_Destroy)(bm->oset);
+   VG_(free)(bm);
 }
 
 /**
@@ -111,264 +112,264 @@
                            const Addr a1, const Addr a2,
                            const BmAccessTypeT access_type)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
-  tl_assert(a1 < a2);
-  /* The current implementation of bm_access_range does not work for the   */
-  /* ADDR0_COUNT highest addresses in the address range. At least on Linux */
-  /* this is not a problem since the upper part of the address space is    */
-  /* reserved for the kernel.                                              */
-  tl_assert(a2 + ADDR0_COUNT > a2);
+   tl_assert(bm);
+   tl_assert(a1 < a2);
+   /* The current implementation of bm_access_range does not work for the   */
+   /* ADDR0_COUNT highest addresses in the address range. At least on Linux */
+   /* this is not a problem since the upper part of the address space is    */
+   /* reserved for the kernel.                                              */
+   tl_assert(a2 + ADDR0_COUNT > a2);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    Addr b_start;
-    Addr b_end;
-    struct bitmap2* bm2;
-    SPLIT_ADDRESS(b);
+   for (b = a1; b < a2; b = b_next)
+   {
+      Addr b_start;
+      Addr b_end;
+      struct bitmap2* bm2;
+      SPLIT_ADDRESS(b);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
+      {
+         b_next = a2;
+      }
 
-    bm2 = bm2_lookup_or_insert_exclusive(bm, b1);
-    tl_assert(bm2);
+      bm2 = bm2_lookup_or_insert_exclusive(bm, b1);
+      tl_assert(bm2);
 
-    if ((bm2->addr << ADDR0_BITS) < a1)
-      b_start = a1;
-    else
-      if ((bm2->addr << ADDR0_BITS) < a2)
-        b_start = (bm2->addr << ADDR0_BITS);
+      if ((bm2->addr << ADDR0_BITS) < a1)
+         b_start = a1;
       else
-        break;
-    tl_assert(a1 <= b_start && b_start <= a2);
+         if ((bm2->addr << ADDR0_BITS) < a2)
+            b_start = (bm2->addr << ADDR0_BITS);
+         else
+            break;
+      tl_assert(a1 <= b_start && b_start <= a2);
 
-    if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-      b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-    else
-      b_end = a2;
-    tl_assert(a1 <= b_end && b_end <= a2);
-    tl_assert(b_start < b_end);
-    tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+         b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+      else
+         b_end = a2;
+      tl_assert(a1 <= b_end && b_end <= a2);
+      tl_assert(b_start < b_end);
+      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
       
-    if (access_type == eLoad)
-    {
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+      if (access_type == eLoad)
       {
-        bm0_set(bm2->bm1.bm0_r, b0);
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+         {
+            bm0_set(bm2->bm1.bm0_r, b0);
+         }
       }
-    }
-    else
-    {
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+      else
       {
-        bm0_set(bm2->bm1.bm0_w, b0);
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
+         {
+            bm0_set(bm2->bm1.bm0_w, b0);
+         }
       }
-    }
-  }
+   }
 }
 
 void DRD_(bm_access_range_load)(struct bitmap* const bm,
                                 const Addr a1, const Addr a2)
 {
-  DRD_(bm_access_range)(bm, a1, a2, eLoad);
+   DRD_(bm_access_range)(bm, a1, a2, eLoad);
 }
 
 void DRD_(bm_access_load_1)(struct bitmap* const bm, const Addr a1)
 {
-  bm_access_aligned_load(bm, a1, 1);
+   bm_access_aligned_load(bm, a1, 1);
 }
 
 void DRD_(bm_access_load_2)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 1) == 0)
-    bm_access_aligned_load(bm, a1, 2);
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 2, eLoad);
+   if ((a1 & 1) == 0)
+      bm_access_aligned_load(bm, a1, 2);
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 2, eLoad);
 }
 
 void DRD_(bm_access_load_4)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 3) == 0)
-    bm_access_aligned_load(bm, a1, 4);
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 4, eLoad);
+   if ((a1 & 3) == 0)
+      bm_access_aligned_load(bm, a1, 4);
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 4, eLoad);
 }
 
 void DRD_(bm_access_load_8)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 7) == 0)
-    bm_access_aligned_load(bm, a1, 8);
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_load(bm, a1 + 0, 4);
-    bm_access_aligned_load(bm, a1 + 4, 4);
-  }
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 8, eLoad);
+   if ((a1 & 7) == 0)
+      bm_access_aligned_load(bm, a1, 8);
+   else if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_load(bm, a1 + 0, 4);
+      bm_access_aligned_load(bm, a1 + 4, 4);
+   }
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 8, eLoad);
 }
 
 void DRD_(bm_access_range_store)(struct bitmap* const bm,
                                  const Addr a1, const Addr a2)
 {
-  DRD_(bm_access_range)(bm, a1, a2, eStore);
+   DRD_(bm_access_range)(bm, a1, a2, eStore);
 }
 
 void DRD_(bm_access_store_1)(struct bitmap* const bm, const Addr a1)
 {
-  bm_access_aligned_store(bm, a1, 1);
+   bm_access_aligned_store(bm, a1, 1);
 }
 
 void DRD_(bm_access_store_2)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 1) == 0)
-    bm_access_aligned_store(bm, a1, 2);
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 2, eStore);
+   if ((a1 & 1) == 0)
+      bm_access_aligned_store(bm, a1, 2);
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 2, eStore);
 }
 
 void DRD_(bm_access_store_4)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 3) == 0)
-    bm_access_aligned_store(bm, a1, 4);
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 4, eStore);
+   if ((a1 & 3) == 0)
+      bm_access_aligned_store(bm, a1, 4);
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 4, eStore);
 }
 
 void DRD_(bm_access_store_8)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 7) == 0)
-    bm_access_aligned_store(bm, a1, 8);
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_store(bm, a1 + 0, 4);
-    bm_access_aligned_store(bm, a1 + 4, 4);
-  }
-  else
-    DRD_(bm_access_range)(bm, a1, a1 + 8, eStore);
+   if ((a1 & 7) == 0)
+      bm_access_aligned_store(bm, a1, 8);
+   else if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_store(bm, a1 + 0, 4);
+      bm_access_aligned_store(bm, a1 + 4, 4);
+   }
+   else
+      DRD_(bm_access_range)(bm, a1, a1 + 8, eStore);
 }
 
 Bool DRD_(bm_has)(struct bitmap* const bm, const Addr a1, const Addr a2,
                   const BmAccessTypeT access_type)
 {
-  Addr b;
-  for (b = a1; b < a2; b++)
-  {
-    if (! DRD_(bm_has_1)(bm, b, access_type))
-    {
-      return False;
-    }
-  }
-  return True;
+   Addr b;
+   for (b = a1; b < a2; b++)
+   {
+      if (! DRD_(bm_has_1)(bm, b, access_type))
+      {
+         return False;
+      }
+   }
+   return True;
 }
 
 Bool
 DRD_(bm_has_any_load)(struct bitmap* const bm, const Addr a1, const Addr a2)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
+   for (b = a1; b < a2; b = b_next)
+   {
+      const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
-
-    if (bm2)
-    {
-      Addr b_start;
-      Addr b_end;
-      UWord b0;
-      const struct bitmap1* const p1 = &bm2->bm1;
-
-      if ((bm2->addr << ADDR0_BITS) < a1)
-        b_start = a1;
-      else
-        if ((bm2->addr << ADDR0_BITS) < a2)
-          b_start = (bm2->addr << ADDR0_BITS);
-        else
-          break;
-      tl_assert(a1 <= b_start && b_start <= a2);
-
-      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-      else
-        b_end = a2;
-      tl_assert(a1 <= b_end && b_end <= a2);
-      tl_assert(b_start < b_end);
-      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
-      
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
       {
-        if (bm0_is_set(p1->bm0_r, b0))
-        {
-          return True;
-        }
+         b_next = a2;
       }
-    }
-  }
-  return 0;
+
+      if (bm2)
+      {
+         Addr b_start;
+         Addr b_end;
+         UWord b0;
+         const struct bitmap1* const p1 = &bm2->bm1;
+
+         if ((bm2->addr << ADDR0_BITS) < a1)
+            b_start = a1;
+         else
+            if ((bm2->addr << ADDR0_BITS) < a2)
+               b_start = (bm2->addr << ADDR0_BITS);
+            else
+               break;
+         tl_assert(a1 <= b_start && b_start <= a2);
+
+         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+         else
+            b_end = a2;
+         tl_assert(a1 <= b_end && b_end <= a2);
+         tl_assert(b_start < b_end);
+         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+         {
+            if (bm0_is_set(p1->bm0_r, b0))
+            {
+               return True;
+            }
+         }
+      }
+   }
+   return 0;
 }
 
 Bool DRD_(bm_has_any_store)(struct bitmap* const bm,
                             const Addr a1, const Addr a2)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
+   for (b = a1; b < a2; b = b_next)
+   {
+      const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
-
-    if (bm2)
-    {
-      Addr b_start;
-      Addr b_end;
-      UWord b0;
-      const struct bitmap1* const p1 = &bm2->bm1;
-
-      if ((bm2->addr << ADDR0_BITS) < a1)
-        b_start = a1;
-      else
-        if ((bm2->addr << ADDR0_BITS) < a2)
-          b_start = (bm2->addr << ADDR0_BITS);
-        else
-          break;
-      tl_assert(a1 <= b_start && b_start <= a2);
-
-      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-      else
-        b_end = a2;
-      tl_assert(a1 <= b_end && b_end <= a2);
-      tl_assert(b_start < b_end);
-      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
-      
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
       {
-        if (bm0_is_set(p1->bm0_w, b0))
-        {
-          return True;
-        }
+         b_next = a2;
       }
-    }
-  }
-  return 0;
+
+      if (bm2)
+      {
+         Addr b_start;
+         Addr b_end;
+         UWord b0;
+         const struct bitmap1* const p1 = &bm2->bm1;
+
+         if ((bm2->addr << ADDR0_BITS) < a1)
+            b_start = a1;
+         else
+            if ((bm2->addr << ADDR0_BITS) < a2)
+               b_start = (bm2->addr << ADDR0_BITS);
+            else
+               break;
+         tl_assert(a1 <= b_start && b_start <= a2);
+
+         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+         else
+            b_end = a2;
+         tl_assert(a1 <= b_end && b_end <= a2);
+         tl_assert(b_start < b_end);
+         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+         {
+            if (bm0_is_set(p1->bm0_w, b0))
+            {
+               return True;
+            }
+         }
+      }
+   }
+   return 0;
 }
 
 /* Return True if there is a read access, write access or both   */
@@ -376,54 +377,54 @@
 Bool DRD_(bm_has_any_access)(struct bitmap* const bm,
                              const Addr a1, const Addr a2)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
+   for (b = a1; b < a2; b = b_next)
+   {
+      const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
-
-    if (bm2)
-    {
-      Addr b_start;
-      Addr b_end;
-      UWord b0;
-      const struct bitmap1* const p1 = &bm2->bm1;
-
-      if ((bm2->addr << ADDR0_BITS) < a1)
-        b_start = a1;
-      else
-        if ((bm2->addr << ADDR0_BITS) < a2)
-          b_start = (bm2->addr << ADDR0_BITS);
-        else
-          break;
-      tl_assert(a1 <= b_start && b_start <= a2);
-
-      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-      else
-        b_end = a2;
-      tl_assert(a1 <= b_end && b_end <= a2);
-      tl_assert(b_start < b_end);
-      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
-      
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
       {
-        if (bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0))
-        {
-          return True;
-        }
+         b_next = a2;
       }
-    }
-  }
-  return False;
+
+      if (bm2)
+      {
+         Addr b_start;
+         Addr b_end;
+         UWord b0;
+         const struct bitmap1* const p1 = &bm2->bm1;
+
+         if ((bm2->addr << ADDR0_BITS) < a1)
+            b_start = a1;
+         else
+            if ((bm2->addr << ADDR0_BITS) < a2)
+               b_start = (bm2->addr << ADDR0_BITS);
+            else
+               break;
+         tl_assert(a1 <= b_start && b_start <= a2);
+
+         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+         else
+            b_end = a2;
+         tl_assert(a1 <= b_end && b_end <= a2);
+         tl_assert(b_start < b_end);
+         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+         {
+            if (bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0))
+            {
+               return True;
+            }
+         }
+      }
+   }
+   return False;
 }
 
 /**
@@ -433,78 +434,78 @@
 Bool DRD_(bm_has_1)(struct bitmap* const bm,
                     const Addr a, const BmAccessTypeT access_type)
 {
-  const struct bitmap2* p2;
-  const struct bitmap1* p1;
-  const UWord* p0;
-  const UWord a0 = a & ADDR0_MASK;
+   const struct bitmap2* p2;
+   const struct bitmap1* p1;
+   const UWord* p0;
+   const UWord a0 = a & ADDR0_MASK;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  p2 = bm2_lookup(bm, a >> ADDR0_BITS);
-  if (p2)
-  {
-    p1 = &p2->bm1;
-    p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
-    return bm0_is_set(p0, a0) ? True : False;
-  }
-  return False;
+   p2 = bm2_lookup(bm, a >> ADDR0_BITS);
+   if (p2)
+   {
+      p1 = &p2->bm1;
+      p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
+      return bm0_is_set(p0, a0) ? True : False;
+   }
+   return False;
 }
 
 void DRD_(bm_clear)(struct bitmap* const bm, const Addr a1, const Addr a2)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
-  tl_assert(a1);
-  tl_assert(a1 <= a2);
+   tl_assert(bm);
+   tl_assert(a1);
+   tl_assert(a1 <= a2);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    struct bitmap2* const p2 = bm2_lookup_exclusive(bm, b >> ADDR0_BITS);
+   for (b = a1; b < a2; b = b_next)
+   {
+      struct bitmap2* const p2 = bm2_lookup_exclusive(bm, b >> ADDR0_BITS);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
-
-    if (p2)
-    {
-      Addr c = b;
-      /* If the first address in the bitmap that must be cleared does not */
-      /* start on an UWord boundary, start clearing the first addresses.  */
-      if (UWORD_LSB(c))
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
       {
-        Addr c_next = UWORD_MSB(c) + BITS_PER_UWORD;
-        if (c_next > b_next)
-          c_next = b_next;
-        bm0_clear_range(p2->bm1.bm0_r, c & ADDR0_MASK, c_next - c);
-        bm0_clear_range(p2->bm1.bm0_w, c & ADDR0_MASK, c_next - c);
-        c = c_next;
+         b_next = a2;
       }
-      /* If some UWords have to be cleared entirely, do this now. */
-      if (UWORD_LSB(c) == 0)
+
+      if (p2)
       {
-        const Addr c_next = UWORD_MSB(b_next);
-        tl_assert(UWORD_LSB(c) == 0);
-        tl_assert(UWORD_LSB(c_next) == 0);
-        tl_assert(c_next <= b_next);
-        tl_assert(c <= c_next);
-        if (c_next > c)
-        {
-          UWord idx = (c & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
-          VG_(memset)(&p2->bm1.bm0_r[idx], 0, (c_next - c) / 8);
-          VG_(memset)(&p2->bm1.bm0_w[idx], 0, (c_next - c) / 8);
-          c = c_next;
-        }
+         Addr c = b;
+         /* If the first address in the bitmap that must be cleared does not */
+         /* start on an UWord boundary, start clearing the first addresses.  */
+         if (UWORD_LSB(c))
+         {
+            Addr c_next = UWORD_MSB(c) + BITS_PER_UWORD;
+            if (c_next > b_next)
+               c_next = b_next;
+            bm0_clear_range(p2->bm1.bm0_r, c & ADDR0_MASK, c_next - c);
+            bm0_clear_range(p2->bm1.bm0_w, c & ADDR0_MASK, c_next - c);
+            c = c_next;
+         }
+         /* If some UWords have to be cleared entirely, do this now. */
+         if (UWORD_LSB(c) == 0)
+         {
+            const Addr c_next = UWORD_MSB(b_next);
+            tl_assert(UWORD_LSB(c) == 0);
+            tl_assert(UWORD_LSB(c_next) == 0);
+            tl_assert(c_next <= b_next);
+            tl_assert(c <= c_next);
+            if (c_next > c)
+            {
+               UWord idx = (c & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
+               VG_(memset)(&p2->bm1.bm0_r[idx], 0, (c_next - c) / 8);
+               VG_(memset)(&p2->bm1.bm0_w[idx], 0, (c_next - c) / 8);
+               c = c_next;
+            }
+         }
+         /* If the last address in the bitmap that must be cleared does not */
+         /* fall on an UWord boundary, clear the last addresses.            */
+         /* tl_assert(c <= b_next); */
+         bm0_clear_range(p2->bm1.bm0_r, c & ADDR0_MASK, b_next - c);
+         bm0_clear_range(p2->bm1.bm0_w, c & ADDR0_MASK, b_next - c);
       }
-      /* If the last address in the bitmap that must be cleared does not */
-      /* fall on an UWord boundary, clear the last addresses.            */
-      /* tl_assert(c <= b_next); */
-      bm0_clear_range(p2->bm1.bm0_r, c & ADDR0_MASK, b_next - c);
-      bm0_clear_range(p2->bm1.bm0_w, c & ADDR0_MASK, b_next - c);
-    }
-  }
+   }
 }
 
 /**
@@ -513,16 +514,16 @@
  */
 void DRD_(bm_clear_load)(struct bitmap* const bm, const Addr a1, const Addr a2)
 {
-  Addr a;
+   Addr a;
 
-  for (a = a1; a < a2; a++)
-  {
-    struct bitmap2* const p2 = bm2_lookup_exclusive(bm, a >> ADDR0_BITS);
-    if (p2)
-    {
-      bm0_clear(p2->bm1.bm0_r, a & ADDR0_MASK);
-    }
-  }
+   for (a = a1; a < a2; a++)
+   {
+      struct bitmap2* const p2 = bm2_lookup_exclusive(bm, a >> ADDR0_BITS);
+      if (p2)
+      {
+         bm0_clear(p2->bm1.bm0_r, a & ADDR0_MASK);
+      }
+   }
 }
 
 /**
@@ -532,16 +533,16 @@
 void DRD_(bm_clear_store)(struct bitmap* const bm,
                           const Addr a1, const Addr a2)
 {
-  Addr a;
+   Addr a;
 
-  for (a = a1; a < a2; a++)
-  {
-    struct bitmap2* const p2 = bm2_lookup_exclusive(bm, a >> ADDR0_BITS);
-    if (p2)
-    {
-      bm0_clear(p2->bm1.bm0_w, a & ADDR0_MASK);
-    }
-  }
+   for (a = a1; a < a2; a++)
+   {
+      struct bitmap2* const p2 = bm2_lookup_exclusive(bm, a >> ADDR0_BITS);
+      if (p2)
+      {
+         bm0_clear(p2->bm1.bm0_w, a & ADDR0_MASK);
+      }
+   }
 }
 
 /**
@@ -552,147 +553,147 @@
 Bool DRD_(bm_test_and_clear)(struct bitmap* const bm,
                              const Addr a1, const Addr a2)
 {
-  Bool result;
+   Bool result;
 
-  result = DRD_(bm_has_any_access)(bm, a1, a2) != 0;
-  DRD_(bm_clear)(bm, a1, a2);
-  return result;
+   result = DRD_(bm_has_any_access)(bm, a1, a2) != 0;
+   DRD_(bm_clear)(bm, a1, a2);
+   return result;
 }
 
 Bool DRD_(bm_has_conflict_with)(struct bitmap* const bm,
                                 const Addr a1, const Addr a2,
                                 const BmAccessTypeT access_type)
 {
-  Addr b, b_next;
+   Addr b, b_next;
 
-  tl_assert(bm);
+   tl_assert(bm);
 
-  for (b = a1; b < a2; b = b_next)
-  {
-    const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
+   for (b = a1; b < a2; b = b_next)
+   {
+      const struct bitmap2* bm2 = bm2_lookup(bm, b >> ADDR0_BITS);
 
-    b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
-    if (b_next > a2)
-    {
-      b_next = a2;
-    }
-
-    if (bm2)
-    {
-      Addr b_start;
-      Addr b_end;
-      UWord b0;
-      const struct bitmap1* const p1 = &bm2->bm1;
-
-      if ((bm2->addr << ADDR0_BITS) < a1)
-        b_start = a1;
-      else
-        if ((bm2->addr << ADDR0_BITS) < a2)
-          b_start = (bm2->addr << ADDR0_BITS);
-        else
-          break;
-      tl_assert(a1 <= b_start && b_start <= a2);
-
-      if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
-        b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
-      else
-        b_end = a2;
-      tl_assert(a1 <= b_end && b_end <= a2);
-      tl_assert(b_start < b_end);
-      tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
-      
-      for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+      b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
+      if (b_next > a2)
       {
-        if (access_type == eLoad)
-        {
-          if (bm0_is_set(p1->bm0_w, b0))
-          {
-            return True;
-          }
-        }
-        else
-        {
-          tl_assert(access_type == eStore);
-          if (bm0_is_set(p1->bm0_r, b0)
-              | bm0_is_set(p1->bm0_w, b0))
-          {
-            return True;
-          }
-        }
+         b_next = a2;
       }
-    }
-  }
-  return False;
+
+      if (bm2)
+      {
+         Addr b_start;
+         Addr b_end;
+         UWord b0;
+         const struct bitmap1* const p1 = &bm2->bm1;
+
+         if ((bm2->addr << ADDR0_BITS) < a1)
+            b_start = a1;
+         else
+            if ((bm2->addr << ADDR0_BITS) < a2)
+               b_start = (bm2->addr << ADDR0_BITS);
+            else
+               break;
+         tl_assert(a1 <= b_start && b_start <= a2);
+
+         if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
+            b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
+         else
+            b_end = a2;
+         tl_assert(a1 <= b_end && b_end <= a2);
+         tl_assert(b_start < b_end);
+         tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
+      
+         for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end-1) & ADDR0_MASK); b0++)
+         {
+            if (access_type == eLoad)
+            {
+               if (bm0_is_set(p1->bm0_w, b0))
+               {
+                  return True;
+               }
+            }
+            else
+            {
+               tl_assert(access_type == eStore);
+               if (bm0_is_set(p1->bm0_r, b0)
+                   | bm0_is_set(p1->bm0_w, b0))
+               {
+                  return True;
+               }
+            }
+         }
+      }
+   }
+   return False;
 }
 
 Bool DRD_(bm_load_has_conflict_with)(struct bitmap* const bm,
                                      const Addr a1, const Addr a2)
 {
-  return DRD_(bm_has_conflict_with)(bm, a1, a2, eLoad);
+   return DRD_(bm_has_conflict_with)(bm, a1, a2, eLoad);
 }
 
 Bool DRD_(bm_load_1_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  return bm_aligned_load_has_conflict_with(bm, a1, 1);
+   return bm_aligned_load_has_conflict_with(bm, a1, 1);
 }
 
 Bool DRD_(bm_load_2_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 1) == 0)
-    return bm_aligned_load_has_conflict_with(bm, a1, 2);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 2, eLoad);
+   if ((a1 & 1) == 0)
+      return bm_aligned_load_has_conflict_with(bm, a1, 2);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 2, eLoad);
 }
 
 Bool DRD_(bm_load_4_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 3) == 0)
-    return bm_aligned_load_has_conflict_with(bm, a1, 4);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 4, eLoad);
+   if ((a1 & 3) == 0)
+      return bm_aligned_load_has_conflict_with(bm, a1, 4);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 4, eLoad);
 }
 
 Bool DRD_(bm_load_8_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 7) == 0)
-    return bm_aligned_load_has_conflict_with(bm, a1, 8);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 8, eLoad);
+   if ((a1 & 7) == 0)
+      return bm_aligned_load_has_conflict_with(bm, a1, 8);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 8, eLoad);
 }
 
 Bool DRD_(bm_store_1_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  return bm_aligned_store_has_conflict_with(bm, a1, 1);
+   return bm_aligned_store_has_conflict_with(bm, a1, 1);
 }
 
 Bool DRD_(bm_store_2_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 1) == 0)
-    return bm_aligned_store_has_conflict_with(bm, a1, 2);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 2, eStore);
+   if ((a1 & 1) == 0)
+      return bm_aligned_store_has_conflict_with(bm, a1, 2);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 2, eStore);
 }
 
 Bool DRD_(bm_store_4_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 3) == 0)
-    return bm_aligned_store_has_conflict_with(bm, a1, 4);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 4, eStore);
+   if ((a1 & 3) == 0)
+      return bm_aligned_store_has_conflict_with(bm, a1, 4);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 4, eStore);
 }
 
 Bool DRD_(bm_store_8_has_conflict_with)(struct bitmap* const bm, const Addr a1)
 {
-  if ((a1 & 7) == 0)
-    return bm_aligned_store_has_conflict_with(bm, a1, 8);
-  else
-    return DRD_(bm_has_conflict_with)(bm, a1, a1 + 8, eStore);
+   if ((a1 & 7) == 0)
+      return bm_aligned_store_has_conflict_with(bm, a1, 8);
+   else
+      return DRD_(bm_has_conflict_with)(bm, a1, a1 + 8, eStore);
 }
 
 Bool DRD_(bm_store_has_conflict_with)(struct bitmap* const bm,
                                       const Addr a1, const Addr a2)
 {
-  return DRD_(bm_has_conflict_with)(bm, a1, a2, eStore);
+   return DRD_(bm_has_conflict_with)(bm, a1, a2, eStore);
 }
 
 /**
@@ -701,114 +702,114 @@
  */
 Bool DRD_(bm_equal)(struct bitmap* const lhs, struct bitmap* const rhs)
 {
-  struct bitmap2* bm2l;
-  struct bitmap2ref* bm2l_ref;
-  struct bitmap2* bm2r;
-  const struct bitmap2ref* bm2r_ref;
+   struct bitmap2* bm2l;
+   struct bitmap2ref* bm2l_ref;
+   struct bitmap2* bm2r;
+   const struct bitmap2ref* bm2r_ref;
 
-  /* It's not possible to have two independent iterators over the same OSet, */
-  /* so complain if lhs == rhs.                                              */
-  tl_assert(lhs != rhs);
+   /* It's not possible to have two independent iterators over the same OSet, */
+   /* so complain if lhs == rhs.                                              */
+   tl_assert(lhs != rhs);
 
-  VG_(OSetGen_ResetIter)(lhs->oset);
-  VG_(OSetGen_ResetIter)(rhs->oset);
+   VG_(OSetGen_ResetIter)(lhs->oset);
+   VG_(OSetGen_ResetIter)(rhs->oset);
 
-  for ( ; (bm2l_ref = VG_(OSetGen_Next)(lhs->oset)) != 0; )
-  {
-    while (bm2l_ref
-           && (bm2l = bm2l_ref->bm2)
-           && bm2l
-           && ! DRD_(bm_has_any_access)(lhs,
-                                  bm2l->addr << ADDR0_BITS,
-                                  (bm2l->addr + 1) << ADDR0_BITS))
-    {
-      bm2l_ref = VG_(OSetGen_Next)(lhs->oset);
-    }
-    if (bm2l_ref == 0)
-      break;
-    tl_assert(bm2l);
+   for ( ; (bm2l_ref = VG_(OSetGen_Next)(lhs->oset)) != 0; )
+   {
+      while (bm2l_ref
+             && (bm2l = bm2l_ref->bm2)
+             && bm2l
+             && ! DRD_(bm_has_any_access)(lhs,
+                                          bm2l->addr << ADDR0_BITS,
+                                          (bm2l->addr + 1) << ADDR0_BITS))
+      {
+         bm2l_ref = VG_(OSetGen_Next)(lhs->oset);
+      }
+      if (bm2l_ref == 0)
+         break;
+      tl_assert(bm2l);
 #if 0
-    VG_(message)(Vg_DebugMsg, "bm_equal: at 0x%lx", bm2l->addr << ADDR0_BITS);
+      VG_(message)(Vg_DebugMsg, "bm_equal: at 0x%lx", bm2l->addr << ADDR0_BITS);
 #endif
 
-    bm2r_ref = VG_(OSetGen_Next)(rhs->oset);
-    if (bm2r_ref == 0)
-    {
+      bm2r_ref = VG_(OSetGen_Next)(rhs->oset);
+      if (bm2r_ref == 0)
+      {
 #if 0
-      VG_(message)(Vg_DebugMsg, "bm_equal: no match found");
+         VG_(message)(Vg_DebugMsg, "bm_equal: no match found");
 #endif
-      return False;
-    }
-    bm2r = bm2r_ref->bm2;
-    tl_assert(bm2r);
-    tl_assert(DRD_(bm_has_any_access)(rhs,
-                                bm2r->addr << ADDR0_BITS,
-                                (bm2r->addr + 1) << ADDR0_BITS));
+         return False;
+      }
+      bm2r = bm2r_ref->bm2;
+      tl_assert(bm2r);
+      tl_assert(DRD_(bm_has_any_access)(rhs,
+                                        bm2r->addr << ADDR0_BITS,
+                                        (bm2r->addr + 1) << ADDR0_BITS));
 
-    if (bm2l != bm2r
-        && (bm2l->addr != bm2r->addr
-            || VG_(memcmp)(&bm2l->bm1, &bm2r->bm1, sizeof(bm2l->bm1)) != 0))
-    {
+      if (bm2l != bm2r
+          && (bm2l->addr != bm2r->addr
+              || VG_(memcmp)(&bm2l->bm1, &bm2r->bm1, sizeof(bm2l->bm1)) != 0))
+      {
 #if 0
-      VG_(message)(Vg_DebugMsg, "bm_equal: rhs 0x%lx -- returning false",
+         VG_(message)(Vg_DebugMsg, "bm_equal: rhs 0x%lx -- returning false",
+                      bm2r->addr << ADDR0_BITS);
+#endif
+         return False;
+      }
+   }
+   bm2r = VG_(OSetGen_Next)(rhs->oset);
+   if (bm2r)
+   {
+      tl_assert(DRD_(bm_has_any_access)(rhs,
+                                        bm2r->addr << ADDR0_BITS,
+                                        (bm2r->addr + 1) << ADDR0_BITS));
+#if 0
+      VG_(message)(Vg_DebugMsg,
+                   "bm_equal: remaining rhs 0x%lx -- returning false",
                    bm2r->addr << ADDR0_BITS);
 #endif
       return False;
-    }
-  }
-  bm2r = VG_(OSetGen_Next)(rhs->oset);
-  if (bm2r)
-  {
-    tl_assert(DRD_(bm_has_any_access)(rhs,
-                                bm2r->addr << ADDR0_BITS,
-                                (bm2r->addr + 1) << ADDR0_BITS));
-#if 0
-    VG_(message)(Vg_DebugMsg,
-                 "bm_equal: remaining rhs 0x%lx -- returning false",
-                 bm2r->addr << ADDR0_BITS);
-#endif
-    return False;
-  }
-  return True;
+   }
+   return True;
 }
 
 void DRD_(bm_swap)(struct bitmap* const bm1, struct bitmap* const bm2)
 {
-  OSet* const tmp = bm1->oset;
-  bm1->oset = bm2->oset;
-  bm2->oset = tmp;
+   OSet* const tmp = bm1->oset;
+   bm1->oset = bm2->oset;
+   bm2->oset = tmp;
 }
 
 /** Merge bitmaps *lhs and *rhs into *lhs. */
 void DRD_(bm_merge2)(struct bitmap* const lhs,
                      struct bitmap* const rhs)
 {
-  struct bitmap2* bm2l;
-  struct bitmap2ref* bm2l_ref;
-  struct bitmap2* bm2r;
-  const struct bitmap2ref* bm2r_ref;
+   struct bitmap2* bm2l;
+   struct bitmap2ref* bm2l_ref;
+   struct bitmap2* bm2r;
+   const struct bitmap2ref* bm2r_ref;
 
-  VG_(OSetGen_ResetIter)(rhs->oset);
+   VG_(OSetGen_ResetIter)(rhs->oset);
 
-  for ( ; (bm2r_ref = VG_(OSetGen_Next)(rhs->oset)) != 0; )
-  {
-    bm2r = bm2r_ref->bm2;
-    bm2l_ref = VG_(OSetGen_Lookup)(lhs->oset, &bm2r->addr);
-    if (bm2l_ref)
-    {
-      bm2l = bm2l_ref->bm2;
-      if (bm2l != bm2r)
+   for ( ; (bm2r_ref = VG_(OSetGen_Next)(rhs->oset)) != 0; )
+   {
+      bm2r = bm2r_ref->bm2;
+      bm2l_ref = VG_(OSetGen_Lookup)(lhs->oset, &bm2r->addr);
+      if (bm2l_ref)
       {
-        if (bm2l->refcnt > 1)
-          bm2l = bm2_make_exclusive(lhs, bm2l_ref);
-        bm2_merge(bm2l, bm2r);
+         bm2l = bm2l_ref->bm2;
+         if (bm2l != bm2r)
+         {
+            if (bm2l->refcnt > 1)
+               bm2l = bm2_make_exclusive(lhs, bm2l_ref);
+            bm2_merge(bm2l, bm2r);
+         }
       }
-    }
-    else
-    {
-      bm2_insert_addref(lhs, bm2r);
-    }
-  }
+      else
+      {
+         bm2_insert_addref(lhs, bm2r);
+      }
+   }
 }
 
 /**
@@ -819,114 +820,114 @@
  */
 int DRD_(bm_has_races)(struct bitmap* const lhs, struct bitmap* const rhs)
 {
-  VG_(OSetGen_ResetIter)(lhs->oset);
-  VG_(OSetGen_ResetIter)(rhs->oset);
+   VG_(OSetGen_ResetIter)(lhs->oset);
+   VG_(OSetGen_ResetIter)(rhs->oset);
 
-  for (;;)
-  {
-    const struct bitmap2ref* bm2l_ref;
-    const struct bitmap2ref* bm2r_ref;
-    const struct bitmap2* bm2l;
-    const struct bitmap2* bm2r;
-    const struct bitmap1* bm1l;
-    const struct bitmap1* bm1r;
-    unsigned k;
+   for (;;)
+   {
+      const struct bitmap2ref* bm2l_ref;
+      const struct bitmap2ref* bm2r_ref;
+      const struct bitmap2* bm2l;
+      const struct bitmap2* bm2r;
+      const struct bitmap1* bm1l;
+      const struct bitmap1* bm1r;
+      unsigned k;
 
-    bm2l_ref = VG_(OSetGen_Next)(lhs->oset);
-    bm2l = bm2l_ref->bm2;
-    bm2r_ref = VG_(OSetGen_Next)(rhs->oset);
-    bm2r = bm2r_ref->bm2;
-    while (bm2l && bm2r && bm2l->addr != bm2r->addr)
-    {
-      if (bm2l->addr < bm2r->addr)
-        bm2l = (bm2l_ref = VG_(OSetGen_Next)(lhs->oset))->bm2;
-      else
-        bm2r = (bm2r_ref = VG_(OSetGen_Next)(rhs->oset))->bm2;
-    }
-    if (bm2l == 0 || bm2r == 0)
-      break;
-
-    bm1l = &bm2l->bm1;
-    bm1r = &bm2r->bm1;
-
-    for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-    {
-      unsigned b;
-      for (b = 0; b < BITS_PER_UWORD; b++)
+      bm2l_ref = VG_(OSetGen_Next)(lhs->oset);
+      bm2l = bm2l_ref->bm2;
+      bm2r_ref = VG_(OSetGen_Next)(rhs->oset);
+      bm2r = bm2r_ref->bm2;
+      while (bm2l && bm2r && bm2l->addr != bm2r->addr)
       {
-        UWord const access_mask
-          = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
-          | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
-          | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
-          | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
-        Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
-        if (HAS_RACE(access_mask) && ! DRD_(is_suppressed)(a, a + 1))
-        {
-          return 1;
-        }
+         if (bm2l->addr < bm2r->addr)
+            bm2l = (bm2l_ref = VG_(OSetGen_Next)(lhs->oset))->bm2;
+         else
+            bm2r = (bm2r_ref = VG_(OSetGen_Next)(rhs->oset))->bm2;
       }
-    }
-  }
-  return 0;
+      if (bm2l == 0 || bm2r == 0)
+         break;
+
+      bm1l = &bm2l->bm1;
+      bm1r = &bm2r->bm1;
+
+      for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+      {
+         unsigned b;
+         for (b = 0; b < BITS_PER_UWORD; b++)
+         {
+            UWord const access_mask
+               = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
+               | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
+               | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
+               | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
+            Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
+            if (HAS_RACE(access_mask) && ! DRD_(is_suppressed)(a, a + 1))
+            {
+               return 1;
+            }
+         }
+      }
+   }
+   return 0;
 }
 
 void DRD_(bm_print)(struct bitmap* const bm)
 {
-  struct bitmap2* bm2;
-  struct bitmap2ref* bm2ref;
+   struct bitmap2* bm2;
+   struct bitmap2ref* bm2ref;
 
-  VG_(OSetGen_ResetIter)(bm->oset);
+   VG_(OSetGen_ResetIter)(bm->oset);
 
-  for ( ; (bm2ref = VG_(OSetGen_Next)(bm->oset)) != 0; )
-  {
-    const struct bitmap1* bm1;
-    unsigned b;
+   for ( ; (bm2ref = VG_(OSetGen_Next)(bm->oset)) != 0; )
+   {
+      const struct bitmap1* bm1;
+      unsigned b;
 
-    bm2 = bm2ref->bm2;
-    bm1 = &bm2->bm1;
-    for (b = 0; b < ADDR0_COUNT; b++)
-    {
-      const Addr a = (bm2->addr << ADDR0_BITS) | b;
-      const Bool r = bm0_is_set(bm1->bm0_r, b) != 0;
-      const Bool w = bm0_is_set(bm1->bm0_w, b) != 0;
-      if (r || w)
+      bm2 = bm2ref->bm2;
+      bm1 = &bm2->bm1;
+      for (b = 0; b < ADDR0_COUNT; b++)
       {
-        VG_(printf)("0x%08lx %c %c\n",
-                    a,
-                    w ? 'W' : ' ',
-                    r ? 'R' : ' ');
+         const Addr a = (bm2->addr << ADDR0_BITS) | b;
+         const Bool r = bm0_is_set(bm1->bm0_r, b) != 0;
+         const Bool w = bm0_is_set(bm1->bm0_w, b) != 0;
+         if (r || w)
+         {
+            VG_(printf)("0x%08lx %c %c\n",
+                        a,
+                        w ? 'W' : ' ',
+                        r ? 'R' : ' ');
+         }
       }
-    }
-  }
+   }
 }
 
 ULong DRD_(bm_get_bitmap_creation_count)(void)
 {
-  return s_bitmap_creation_count;
+   return s_bitmap_creation_count;
 }
 
 ULong DRD_(bm_get_bitmap2_node_creation_count)(void)
 {
-  return s_bitmap2_node_creation_count;
+   return s_bitmap2_node_creation_count;
 }
 
 ULong DRD_(bm_get_bitmap2_creation_count)(void)
 {
-  return s_bitmap2_creation_count;
+   return s_bitmap2_creation_count;
 }
 
 /** Allocate and initialize a second level bitmap. */
 static struct bitmap2* bm2_new(const UWord a1)
 {
-  struct bitmap2* bm2;
+   struct bitmap2* bm2;
 
-  bm2 = VG_(malloc)("drd.bitmap.bm2n.1", sizeof(*bm2));
-  bm2->addr   = a1;
-  bm2->refcnt = 1;
+   bm2 = VG_(malloc)("drd.bitmap.bm2n.1", sizeof(*bm2));
+   bm2->addr   = a1;
+   bm2->refcnt = 1;
 
-  s_bitmap2_creation_count++;
+   s_bitmap2_creation_count++;
 
-  return bm2;
+   return bm2;
 }
 
 /** Make a copy of a shared second level bitmap such that the copy can be
@@ -938,46 +939,46 @@
 static struct bitmap2* bm2_make_exclusive(struct bitmap* const bm,
                                           struct bitmap2ref* const bm2ref)
 {
-  UWord a1;
-  struct bitmap2* bm2;
-  struct bitmap2* bm2_copy;
+   UWord a1;
+   struct bitmap2* bm2;
+   struct bitmap2* bm2_copy;
 
-  tl_assert(bm);
-  tl_assert(bm2ref);
-  bm2 = bm2ref->bm2;
-  tl_assert(bm2);
-  tl_assert(bm2->refcnt > 1);
-  bm2->refcnt--;
-  tl_assert(bm2->refcnt >= 1);
-  a1 = bm2->addr;
-  bm2_copy = bm2_new(a1);
-  tl_assert(bm2_copy);
-  tl_assert(bm2_copy->addr   == a1);
-  tl_assert(bm2_copy->refcnt == 1);
-  VG_(memcpy)(&bm2_copy->bm1, &bm2->bm1, sizeof(bm2->bm1));
-  bm2ref->bm2 = bm2_copy;
+   tl_assert(bm);
+   tl_assert(bm2ref);
+   bm2 = bm2ref->bm2;
+   tl_assert(bm2);
+   tl_assert(bm2->refcnt > 1);
+   bm2->refcnt--;
+   tl_assert(bm2->refcnt >= 1);
+   a1 = bm2->addr;
+   bm2_copy = bm2_new(a1);
+   tl_assert(bm2_copy);
+   tl_assert(bm2_copy->addr   == a1);
+   tl_assert(bm2_copy->refcnt == 1);
+   VG_(memcpy)(&bm2_copy->bm1, &bm2->bm1, sizeof(bm2->bm1));
+   bm2ref->bm2 = bm2_copy;
 
-  bm_update_cache(bm, a1, bm2_copy);
+   bm_update_cache(bm, a1, bm2_copy);
 
-  return bm2_copy;
+   return bm2_copy;
 }
 
 static void bm2_merge(struct bitmap2* const bm2l,
                       const struct bitmap2* const bm2r)
 {
-  unsigned k;
+   unsigned k;
 
-  tl_assert(bm2l);
-  tl_assert(bm2r);
-  tl_assert(bm2l->addr == bm2r->addr);
-  tl_assert(bm2l->refcnt == 1);
+   tl_assert(bm2l);
+   tl_assert(bm2r);
+   tl_assert(bm2l->addr == bm2r->addr);
+   tl_assert(bm2l->refcnt == 1);
 
-  for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-  {
-    bm2l->bm1.bm0_r[k] |= bm2r->bm1.bm0_r[k];
-  }
-  for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
-  {
-    bm2l->bm1.bm0_w[k] |= bm2r->bm1.bm0_w[k];
-  }
+   for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+   {
+      bm2l->bm1.bm0_r[k] |= bm2r->bm1.bm0_r[k];
+   }
+   for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
+   {
+      bm2l->bm1.bm0_w[k] |= bm2r->bm1.bm0_w[k];
+   }
 }
diff --git a/drd/drd_bitmap.h b/drd/drd_bitmap.h
index 731bbcd..84f1eb5 100644
--- a/drd/drd_bitmap.h
+++ b/drd/drd_bitmap.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -47,13 +48,13 @@
 
 #define ADDR0_MASK (ADDR0_COUNT - 1)
 
-#define SPLIT_ADDRESS(a)            \
-  UWord a##0 = ((a) & ADDR0_MASK);  \
-  UWord a##1 = ((a) >> ADDR0_BITS);
+#define SPLIT_ADDRESS(a)                        \
+   UWord a##0 = ((a) & ADDR0_MASK);             \
+   UWord a##1 = ((a) >> ADDR0_BITS);
 
 // Assumption: sizeof(Addr) == sizeof(UWord).
-#define MAKE_ADDRESS(a1, a0)  \
-  (Addr)(((UWord)(a1) << (ADDR0_BITS)) | ((UWord)(a0)))
+#define MAKE_ADDRESS(a1, a0)                                    \
+   (Addr)(((UWord)(a1) << (ADDR0_BITS)) | ((UWord)(a0)))
 
 #define BITS_PER_UWORD (8UL*sizeof(UWord))
 #if defined(VGA_x86) || defined(VGA_ppc32)
@@ -91,21 +92,21 @@
 /* Lowest level, corresponding to the lowest ADDR0_BITS of an address. */
 struct bitmap1
 {
-  UWord bm0_r[BITMAP1_UWORD_COUNT];
-  UWord bm0_w[BITMAP1_UWORD_COUNT];
+   UWord bm0_r[BITMAP1_UWORD_COUNT];
+   UWord bm0_w[BITMAP1_UWORD_COUNT];
 };
 
 static __inline__ UWord bm0_mask(const Addr a)
 {
-  return ((UWord)1 << UWORD_LSB(a));
+   return ((UWord)1 << UWORD_LSB(a));
 }
 
 static __inline__ void bm0_set(UWord* bm0, const Addr a)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a < ADDR0_COUNT);
+   tl_assert(a < ADDR0_COUNT);
 #endif
-  bm0[a >> BITS_PER_BITS_PER_UWORD] |= (UWord)1 << UWORD_LSB(a);
+   bm0[a >> BITS_PER_BITS_PER_UWORD] |= (UWord)1 << UWORD_LSB(a);
 }
 
 /** Set all of the addresses in range [ a1 .. a1 + size [ in bitmap bm0. */
@@ -113,21 +114,21 @@
                                      const Addr a1, const SizeT size)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a1 < ADDR0_COUNT);
-  tl_assert(size > 0);
-  tl_assert(a1 + size <= ADDR0_COUNT);
-  tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
+   tl_assert(a1 < ADDR0_COUNT);
+   tl_assert(size > 0);
+   tl_assert(a1 + size <= ADDR0_COUNT);
+   tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
 #endif
-  bm0[a1 >> BITS_PER_BITS_PER_UWORD]
-    |= (((UWord)1 << size) - 1) << UWORD_LSB(a1);
+   bm0[a1 >> BITS_PER_BITS_PER_UWORD]
+      |= (((UWord)1 << size) - 1) << UWORD_LSB(a1);
 }
 
 static __inline__ void bm0_clear(UWord* bm0, const Addr a)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a < ADDR0_COUNT);
+   tl_assert(a < ADDR0_COUNT);
 #endif
-  bm0[a >> BITS_PER_BITS_PER_UWORD] &= ~((UWord)1 << UWORD_LSB(a));
+   bm0[a >> BITS_PER_BITS_PER_UWORD] &= ~((UWord)1 << UWORD_LSB(a));
 }
 
 /** Clear all of the addresses in range [ a1 .. a1 + size [ in bitmap bm0. */
@@ -135,21 +136,21 @@
                                        const Addr a1, const SizeT size)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a1 < ADDR0_COUNT);
-  tl_assert(size > 0);
-  tl_assert(a1 + size <= ADDR0_COUNT);
-  tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
+   tl_assert(a1 < ADDR0_COUNT);
+   tl_assert(size > 0);
+   tl_assert(a1 + size <= ADDR0_COUNT);
+   tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
 #endif
-  bm0[a1 >> BITS_PER_BITS_PER_UWORD]
-    &= ~(((UWord)1 << size) - 1) << UWORD_LSB(a1);
+   bm0[a1 >> BITS_PER_BITS_PER_UWORD]
+      &= ~(((UWord)1 << size) - 1) << UWORD_LSB(a1);
 }
 
 static __inline__ UWord bm0_is_set(const UWord* bm0, const Addr a)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a < ADDR0_COUNT);
+   tl_assert(a < ADDR0_COUNT);
 #endif
-  return (bm0[a >> BITS_PER_BITS_PER_UWORD] & ((UWord)1 << UWORD_LSB(a)));
+   return (bm0[a >> BITS_PER_BITS_PER_UWORD] & ((UWord)1 << UWORD_LSB(a)));
 }
 
 /** Return true if any of the bits [ a1 .. a1+size [ are set in bm0. */
@@ -157,13 +158,13 @@
                                        const Addr a1, const SizeT size)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(a1 < ADDR0_COUNT);
-  tl_assert(size > 0);
-  tl_assert(a1 + size <= ADDR0_COUNT);
-  tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
+   tl_assert(a1 < ADDR0_COUNT);
+   tl_assert(size > 0);
+   tl_assert(a1 + size <= ADDR0_COUNT);
+   tl_assert(UWORD_MSB(a1) == UWORD_MSB(a1 + size - 1));
 #endif
-  return (bm0[a1 >> BITS_PER_BITS_PER_UWORD]
-          & ((((UWord)1 << size) - 1) << UWORD_LSB(a1)));
+   return (bm0[a1 >> BITS_PER_BITS_PER_UWORD]
+           & ((((UWord)1 << size) - 1) << UWORD_LSB(a1)));
 }
 
 
@@ -176,22 +177,22 @@
 /* Second level bitmap. */
 struct bitmap2
 {
-  Addr           addr;   ///< address >> ADDR0_BITS
-  int            refcnt;
-  struct bitmap1 bm1;
+   Addr           addr;   ///< address >> ADDR0_BITS
+   int            refcnt;
+   struct bitmap1 bm1;
 };
 
 /* One node of bitmap::oset. */
 struct bitmap2ref
 {
-  Addr            addr; ///< address >> ADDR0_BITS
-  struct bitmap2* bm2;
+   Addr            addr; ///< address >> ADDR0_BITS
+   struct bitmap2* bm2;
 };
 
 struct bm_cache_elem
 {
-  Addr            a1;
-  struct bitmap2* bm2;
+   Addr            a1;
+   struct bitmap2* bm2;
 };
 
 #define N_CACHE_ELEM 4
@@ -199,8 +200,8 @@
 /* Complete bitmap. */
 struct bitmap
 {
-  struct bm_cache_elem cache[N_CACHE_ELEM];
-  OSet*                oset;
+   struct bm_cache_elem cache[N_CACHE_ELEM];
+   OSet*                oset;
 };
 
 
@@ -215,26 +216,26 @@
 void bm_cache_rotate(struct bm_cache_elem cache[], const int n)
 {
 #if 0
-  struct bm_cache_elem t;
+   struct bm_cache_elem t;
 
-  tl_assert(2 <= n && n <= 8);
+   tl_assert(2 <= n && n <= 8);
 
-  t = cache[0];
-  if (n > 1)
-    cache[0] = cache[1];
-  if (n > 2)
-    cache[1] = cache[2];
-  if (n > 3)
-    cache[2] = cache[3];
-  if (n > 4)
-    cache[3] = cache[4];
-  if (n > 5)
-    cache[4] = cache[5];
-  if (n > 6)
-    cache[5] = cache[6];
-  if (n > 7)
-    cache[6] = cache[7];
-  cache[n - 1] = t;
+   t = cache[0];
+   if (n > 1)
+      cache[0] = cache[1];
+   if (n > 2)
+      cache[1] = cache[2];
+   if (n > 3)
+      cache[2] = cache[3];
+   if (n > 4)
+      cache[3] = cache[4];
+   if (n > 5)
+      cache[4] = cache[5];
+   if (n > 6)
+      cache[5] = cache[6];
+   if (n > 7)
+      cache[6] = cache[7];
+   cache[n - 1] = t;
 #endif
 }
 
@@ -243,77 +244,77 @@
                      struct bitmap2** bm2)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
-  tl_assert(bm2);
+   tl_assert(bm);
+   tl_assert(bm2);
 #endif
 
 #if N_CACHE_ELEM > 8
 #error Please update the code below.
 #endif
 #if N_CACHE_ELEM >= 1
-  if (a1 == bm->cache[0].a1)
-  {
-    *bm2 = bm->cache[0].bm2;
-    return True;
-  }
+   if (a1 == bm->cache[0].a1)
+   {
+      *bm2 = bm->cache[0].bm2;
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 2
-  if (a1 == bm->cache[1].a1)
-  {
-    *bm2 = bm->cache[1].bm2;
-    return True;
-  }
+   if (a1 == bm->cache[1].a1)
+   {
+      *bm2 = bm->cache[1].bm2;
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 3
-  if (a1 == bm->cache[2].a1)
-  {
-    *bm2 = bm->cache[2].bm2;
-    bm_cache_rotate(bm->cache, 3);
-    return True;
-  }
+   if (a1 == bm->cache[2].a1)
+   {
+      *bm2 = bm->cache[2].bm2;
+      bm_cache_rotate(bm->cache, 3);
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 4
-  if (a1 == bm->cache[3].a1)
-  {
-    *bm2 = bm->cache[3].bm2;
-    bm_cache_rotate(bm->cache, 4);
-    return True;
-  }
+   if (a1 == bm->cache[3].a1)
+   {
+      *bm2 = bm->cache[3].bm2;
+      bm_cache_rotate(bm->cache, 4);
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 5
-  if (a1 == bm->cache[4].a1)
-  {
-    *bm2 = bm->cache[4].bm2;
-    bm_cache_rotate(bm->cache, 5);
-    return True;
-  }
+   if (a1 == bm->cache[4].a1)
+   {
+      *bm2 = bm->cache[4].bm2;
+      bm_cache_rotate(bm->cache, 5);
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 6
-  if (a1 == bm->cache[5].a1)
-  {
-    *bm2 = bm->cache[5].bm2;
-    bm_cache_rotate(bm->cache, 6);
-    return True;
-  }
+   if (a1 == bm->cache[5].a1)
+   {
+      *bm2 = bm->cache[5].bm2;
+      bm_cache_rotate(bm->cache, 6);
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 7
-  if (a1 == bm->cache[6].a1)
-  {
-    *bm2 = bm->cache[6].bm2;
-    bm_cache_rotate(bm->cache, 7);
-    return True;
-  }
+   if (a1 == bm->cache[6].a1)
+   {
+      *bm2 = bm->cache[6].bm2;
+      bm_cache_rotate(bm->cache, 7);
+      return True;
+   }
 #endif
 #if N_CACHE_ELEM >= 8
-  if (a1 == bm->cache[7].a1)
-  {
-    *bm2 = bm->cache[7].bm2;
-    bm_cache_rotate(bm->cache, 8);
-    return True;
-  }
+   if (a1 == bm->cache[7].a1)
+   {
+      *bm2 = bm->cache[7].bm2;
+      bm_cache_rotate(bm->cache, 8);
+      return True;
+   }
 #endif
-  *bm2 = 0;
-  return False;
+   *bm2 = 0;
+   return False;
 }
 
 static __inline__
@@ -322,35 +323,35 @@
                      struct bitmap2* const bm2)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
+   tl_assert(bm);
 #endif
 
 #if N_CACHE_ELEM > 8
 #error Please update the code below.
 #endif
 #if N_CACHE_ELEM >= 8
-  bm->cache[7] = bm->cache[6];
+   bm->cache[7] = bm->cache[6];
 #endif
 #if N_CACHE_ELEM >= 7
-  bm->cache[6] = bm->cache[5];
+   bm->cache[6] = bm->cache[5];
 #endif
 #if N_CACHE_ELEM >= 6
-  bm->cache[5] = bm->cache[4];
+   bm->cache[5] = bm->cache[4];
 #endif
 #if N_CACHE_ELEM >= 5
-  bm->cache[4] = bm->cache[3];
+   bm->cache[4] = bm->cache[3];
 #endif
 #if N_CACHE_ELEM >= 4
-  bm->cache[3] = bm->cache[2];
+   bm->cache[3] = bm->cache[2];
 #endif
 #if N_CACHE_ELEM >= 3
-  bm->cache[2] = bm->cache[1];
+   bm->cache[2] = bm->cache[1];
 #endif
 #if N_CACHE_ELEM >= 2
-  bm->cache[1] = bm->cache[0];
+   bm->cache[1] = bm->cache[0];
 #endif
-  bm->cache[0].a1  = a1;
-  bm->cache[0].bm2 = bm2;
+   bm->cache[0].a1  = a1;
+   bm->cache[0].bm2 = bm2;
 }
 
 /** Look up the address a1 in bitmap bm and return a pointer to a potentially
@@ -363,22 +364,22 @@
 static __inline__
 const struct bitmap2* bm2_lookup(struct bitmap* const bm, const UWord a1)
 {
-  struct bitmap2*    bm2;
-  struct bitmap2ref* bm2ref;
+   struct bitmap2*    bm2;
+   struct bitmap2ref* bm2ref;
 
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
+   tl_assert(bm);
 #endif
-  if (! bm_cache_lookup(bm, a1, &bm2))
-  {
-    bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
-    if (bm2ref)
-    {
-      bm2 = bm2ref->bm2;
-    }
-    bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
-  }
-  return bm2;
+   if (! bm_cache_lookup(bm, a1, &bm2))
+   {
+      bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
+      if (bm2ref)
+      {
+         bm2 = bm2ref->bm2;
+      }
+      bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
+   }
+   return bm2;
 }
 
 /** Look up the address a1 in bitmap bm and return a pointer to a second
@@ -391,40 +392,40 @@
 struct bitmap2*
 bm2_lookup_exclusive(struct bitmap* const bm, const UWord a1)
 {
-  struct bitmap2ref* bm2ref;
-  struct bitmap2* bm2;
+   struct bitmap2ref* bm2ref;
+   struct bitmap2* bm2;
 
-  bm2ref = 0;
-  if (bm_cache_lookup(bm, a1, &bm2))
-  {
-    if (bm2 == 0)
-      return 0;
-    if (bm2->refcnt > 1)
-    {
+   bm2ref = 0;
+   if (bm_cache_lookup(bm, a1, &bm2))
+   {
+      if (bm2 == 0)
+         return 0;
+      if (bm2->refcnt > 1)
+      {
+         bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
+      }
+   }
+   else
+   {
       bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
-    }
-  }
-  else
-  {
-    bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
-    if (bm2ref == 0)
-      return 0;
-    bm2 = bm2ref->bm2;
-  }
+      if (bm2ref == 0)
+         return 0;
+      bm2 = bm2ref->bm2;
+   }
 
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm2);
+   tl_assert(bm2);
 #endif
 
-  if (bm2->refcnt > 1)
-  {
+   if (bm2->refcnt > 1)
+   {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-    tl_assert(bm2ref);
+      tl_assert(bm2ref);
 #endif
-    bm2 = bm2_make_exclusive(*(struct bitmap**)&bm, bm2ref);
-  }
+      bm2 = bm2_make_exclusive(*(struct bitmap**)&bm, bm2ref);
+   }
 
-  return bm2;
+   return bm2;
 }
 
 /** Look up the address a1 in bitmap bm. The returned second level bitmap has
@@ -436,20 +437,20 @@
 static __inline__
 struct bitmap2* bm2_insert(struct bitmap* const bm, const UWord a1)
 {
-  struct bitmap2ref* bm2ref;
-  struct bitmap2* bm2;
+   struct bitmap2ref* bm2ref;
+   struct bitmap2* bm2;
 
-  s_bitmap2_node_creation_count++;
-  bm2ref       = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*bm2ref));
-  bm2ref->addr = a1;
-  bm2          = bm2_new(a1);
-  bm2ref->bm2  = bm2;
-  VG_(memset)(&bm2->bm1, 0, sizeof(bm2->bm1));
-  VG_(OSetGen_Insert)(bm->oset, bm2ref);
+   s_bitmap2_node_creation_count++;
+   bm2ref       = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*bm2ref));
+   bm2ref->addr = a1;
+   bm2          = bm2_new(a1);
+   bm2ref->bm2  = bm2;
+   VG_(memset)(&bm2->bm1, 0, sizeof(bm2->bm1));
+   VG_(OSetGen_Insert)(bm->oset, bm2ref);
   
-  bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
+   bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
 
-  return bm2;
+   return bm2;
 }
 
 /** Insert a new node in bitmap bm that points to the second level bitmap
@@ -459,23 +460,23 @@
 struct bitmap2* bm2_insert_addref(struct bitmap* const bm,
                                   struct bitmap2* const bm2)
 {
-  struct bitmap2ref* bm2ref;
+   struct bitmap2ref* bm2ref;
 
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
-  tl_assert(VG_(OSetGen_Lookup)(bm->oset, &bm2->addr) == 0);
+   tl_assert(bm);
+   tl_assert(VG_(OSetGen_Lookup)(bm->oset, &bm2->addr) == 0);
 #endif
 
-  s_bitmap2_node_creation_count++;
-  bm2ref       = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*bm2ref));
-  bm2ref->addr = bm2->addr;
-  bm2ref->bm2  = bm2;
-  bm2->refcnt++;
-  VG_(OSetGen_Insert)(bm->oset, bm2ref);
+   s_bitmap2_node_creation_count++;
+   bm2ref       = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*bm2ref));
+   bm2ref->addr = bm2->addr;
+   bm2ref->bm2  = bm2;
+   bm2->refcnt++;
+   VG_(OSetGen_Insert)(bm->oset, bm2ref);
   
-  bm_update_cache(*(struct bitmap**)&bm, bm2->addr, bm2);
+   bm_update_cache(*(struct bitmap**)&bm, bm2->addr, bm2);
 
-  return bm2;
+   return bm2;
 }
 
 /** Look up the address a1 in bitmap bm, and insert it if not found.
@@ -487,33 +488,33 @@
 static __inline__
 struct bitmap2* bm2_lookup_or_insert(struct bitmap* const bm, const UWord a1)
 {
-  struct bitmap2ref* bm2ref;
-  struct bitmap2* bm2;
+   struct bitmap2ref* bm2ref;
+   struct bitmap2* bm2;
 
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
+   tl_assert(bm);
 #endif
-  if (bm_cache_lookup(bm, a1, &bm2))
-  {
-    if (bm2 == 0)
-    {
-      bm2 = bm2_insert(bm, a1);
-    }
-  }
-  else
-  {
-    bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
-    if (bm2ref)
-    {
-      bm2 = bm2ref->bm2;
-    }
-    else
-    {
-      bm2 = bm2_insert(bm, a1);
-    }
-    bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
-  }
-  return bm2;
+   if (bm_cache_lookup(bm, a1, &bm2))
+   {
+      if (bm2 == 0)
+      {
+         bm2 = bm2_insert(bm, a1);
+      }
+   }
+   else
+   {
+      bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
+      if (bm2ref)
+      {
+         bm2 = bm2ref->bm2;
+      }
+      else
+      {
+         bm2 = bm2_insert(bm, a1);
+      }
+      bm_update_cache(*(struct bitmap**)&bm, a1, bm2);
+   }
+   return bm2;
 }
 
 /** Look up the address a1 in bitmap bm, and insert it if not found.
@@ -526,72 +527,72 @@
 struct bitmap2* bm2_lookup_or_insert_exclusive(struct bitmap* const bm,
                                                const UWord a1)
 {
-  struct bitmap2* bm2;
+   struct bitmap2* bm2;
 
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm);
+   tl_assert(bm);
 #endif
-  bm2 = (struct bitmap2*)bm2_lookup_or_insert(bm, a1);
+   bm2 = (struct bitmap2*)bm2_lookup_or_insert(bm, a1);
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(bm2);
+   tl_assert(bm2);
 #endif
-  if (bm2->refcnt > 1)
-  {
-    struct bitmap2ref* bm2ref;
-    bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
-    bm2 = bm2_make_exclusive(bm, bm2ref);
-  }
-  return bm2;
+   if (bm2->refcnt > 1)
+   {
+      struct bitmap2ref* bm2ref;
+      bm2ref = VG_(OSetGen_Lookup)(bm->oset, &a1);
+      bm2 = bm2_make_exclusive(bm, bm2ref);
+   }
+   return bm2;
 }
 
 static __inline__
 void bm_access_aligned_load(struct bitmap* const bm,
                             const Addr a1, const SizeT size)
 {
-  struct bitmap2* bm2;
+   struct bitmap2* bm2;
 
-  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
-  bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size);
+   bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
+   bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size);
 }
 
 static __inline__
 void bm_access_aligned_store(struct bitmap* const bm,
                              const Addr a1, const SizeT size)
 {
-  struct bitmap2* bm2;
+   struct bitmap2* bm2;
 
-  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
-  bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size);
+   bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
+   bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size);
 }
 
 static __inline__
 Bool bm_aligned_load_has_conflict_with(struct bitmap* const bm,
                                        const Addr a1, const SizeT size)
 {
-  const struct bitmap2* bm2;
+   const struct bitmap2* bm2;
 
-  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
+   bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
 
-  return (bm2 && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size));
+   return (bm2 && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size));
 }
 
 static __inline__
 Bool bm_aligned_store_has_conflict_with(struct bitmap* const bm,
                                         const Addr a1, const SizeT size)
 {
-  const struct bitmap2* bm2;
+   const struct bitmap2* bm2;
 
-  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
+   bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
 
-  if (bm2)
-  {
-    if (bm0_is_any_set(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size)
-        | bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size))
-    {
-      return True;
-    }
-  }
-  return False;
+   if (bm2)
+   {
+      if (bm0_is_any_set(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size)
+          | bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size))
+      {
+         return True;
+      }
+   }
+   return False;
 }
 
 #endif /* __DRD_BITMAP_H */
diff --git a/drd/drd_clientobj.c b/drd/drd_clientobj.c
index bb54657..87565e4 100644
--- a/drd/drd_clientobj.c
+++ b/drd/drd_clientobj.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -50,16 +51,16 @@
 
 void DRD_(clientobj_set_trace)(const Bool trace)
 {
-  s_trace_clientobj = trace;
+   s_trace_clientobj = trace;
 }
 
 /** Initialize the client object set. */
 void DRD_(clientobj_init)(void)
 {
-  tl_assert(s_clientobj_set == 0);
-  s_clientobj_set = VG_(OSetGen_Create)(0, 0, VG_(malloc),
-                                        "drd.clientobj.ci.1", VG_(free));
-  tl_assert(s_clientobj_set);
+   tl_assert(s_clientobj_set == 0);
+   s_clientobj_set = VG_(OSetGen_Create)(0, 0, VG_(malloc),
+                                         "drd.clientobj.ci.1", VG_(free));
+   tl_assert(s_clientobj_set);
 }
 
 /**
@@ -69,10 +70,10 @@
  */
 void DRD_(clientobj_cleanup)(void)
 {
-  tl_assert(s_clientobj_set);
-  tl_assert(VG_(OSetGen_Size)(s_clientobj_set) == 0);
-  VG_(OSetGen_Destroy)(s_clientobj_set);
-  s_clientobj_set = 0;
+   tl_assert(s_clientobj_set);
+   tl_assert(VG_(OSetGen_Size)(s_clientobj_set) == 0);
+   VG_(OSetGen_Destroy)(s_clientobj_set);
+   s_clientobj_set = 0;
 }
 
 /**
@@ -82,7 +83,7 @@
  */
 DrdClientobj* DRD_(clientobj_get_any)(const Addr addr)
 {
-  return VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
+   return VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
 }
 
 /**
@@ -92,11 +93,11 @@
  */
 DrdClientobj* DRD_(clientobj_get)(const Addr addr, const ObjType t)
 {
-  DrdClientobj* p;
-  p = VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
-  if (p && p->any.type == t)
-    return p;
-  return 0;
+   DrdClientobj* p;
+   p = VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
+   if (p && p->any.type == t)
+      return p;
+   return 0;
 }
 
 /** Return true if and only if the address range of any client object overlaps
@@ -104,45 +105,47 @@
  */
 Bool DRD_(clientobj_present)(const Addr a1, const Addr a2)
 {
-  DrdClientobj *p;
+   DrdClientobj *p;
 
-  tl_assert(a1 < a2);
-  VG_(OSetGen_ResetIter)(s_clientobj_set);
-  for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0; )
-  {
-    if (a1 <= p->any.a1 && p->any.a1 < a2)
-    {
-      return True;  
-    }
-  }
-  return False;
+   tl_assert(a1 < a2);
+   VG_(OSetGen_ResetIter)(s_clientobj_set);
+   for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0; )
+   {
+      if (a1 <= p->any.a1 && p->any.a1 < a2)
+      {
+         return True;  
+      }
+   }
+   return False;
 }
 
-/** Add state information for the client object at client address addr and
- *  of type t. Suppress data race reports on the address range [addr,addr+size[.
- *  @pre No other client object is present in the address range [addr,addr+size[.
+/**
+ * Add state information for the client object at client address addr and
+ * of type t. Suppress data race reports on the address range [addr,addr+size[.
+ *
+ * @pre No other client object is present in the address range [addr,addr+size[.
  */
 DrdClientobj* DRD_(clientobj_add)(const Addr a1, const ObjType t)
 {
-  DrdClientobj* p;
+   DrdClientobj* p;
 
-  tl_assert(! DRD_(clientobj_present)(a1, a1 + 1));
-  tl_assert(VG_(OSetGen_Lookup)(s_clientobj_set, &a1) == 0);
+   tl_assert(! DRD_(clientobj_present)(a1, a1 + 1));
+   tl_assert(VG_(OSetGen_Lookup)(s_clientobj_set, &a1) == 0);
 
-  if (s_trace_clientobj)
-  {
-    VG_(message)(Vg_UserMsg, "Adding client object 0x%lx of type %d", a1, t);
-  }
+   if (s_trace_clientobj)
+   {
+      VG_(message)(Vg_UserMsg, "Adding client object 0x%lx of type %d", a1, t);
+   }
 
-  p = VG_(OSetGen_AllocNode)(s_clientobj_set, sizeof(*p));
-  VG_(memset)(p, 0, sizeof(*p));
-  p->any.a1   = a1;
-  p->any.type = t;
-  p->any.first_observed_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
-  VG_(OSetGen_Insert)(s_clientobj_set, p);
-  tl_assert(VG_(OSetGen_Lookup)(s_clientobj_set, &a1) == p);
-  DRD_(start_suppression)(a1, a1 + 1, "clientobj");
-  return p;
+   p = VG_(OSetGen_AllocNode)(s_clientobj_set, sizeof(*p));
+   VG_(memset)(p, 0, sizeof(*p));
+   p->any.a1   = a1;
+   p->any.type = t;
+   p->any.first_observed_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
+   VG_(OSetGen_Insert)(s_clientobj_set, p);
+   tl_assert(VG_(OSetGen_Lookup)(s_clientobj_set, &a1) == p);
+   DRD_(start_suppression)(a1, a1 + 1, "clientobj");
+   return p;
 }
 
 /**
@@ -153,12 +156,12 @@
  */
 Bool DRD_(clientobj_remove)(const Addr addr, const ObjType t)
 {
-  DrdClientobj* p;
+   DrdClientobj* p;
 
-  p = VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
-  tl_assert(p);
-  tl_assert(p->any.type == t);
-  return clientobj_remove_obj(p);
+   p = VG_(OSetGen_Lookup)(s_clientobj_set, &addr);
+   tl_assert(p);
+   tl_assert(p->any.type == t);
+   return clientobj_remove_obj(p);
 }
 
 /**
@@ -172,19 +175,19 @@
  */
 static Bool clientobj_remove_obj(DrdClientobj* const p)
 {
-  tl_assert(p);
+   tl_assert(p);
 
-  if (s_trace_clientobj)
-  {
-    VG_(message)(Vg_UserMsg, "Removing client object 0x%lx of type %d",
-                 p->any.a1, p->any.type);
-  }
+   if (s_trace_clientobj)
+   {
+      VG_(message)(Vg_UserMsg, "Removing client object 0x%lx of type %d",
+                   p->any.a1, p->any.type);
+   }
 
-  tl_assert(p->any.cleanup);
-  (*p->any.cleanup)(p);
-  VG_(OSetGen_Remove)(s_clientobj_set, &p->any.a1);
-  VG_(OSetGen_FreeNode)(s_clientobj_set, p);
-  return True;
+   tl_assert(p->any.cleanup);
+   (*p->any.cleanup)(p);
+   VG_(OSetGen_Remove)(s_clientobj_set, &p->any.a1);
+   VG_(OSetGen_FreeNode)(s_clientobj_set, p);
+   return True;
 }
 
 /**
@@ -196,26 +199,26 @@
  */
 void DRD_(clientobj_stop_using_mem)(const Addr a1, const Addr a2)
 {
-  Addr removed_at;
-  DrdClientobj* p;
+   Addr removed_at;
+   DrdClientobj* p;
 
-  tl_assert(s_clientobj_set);
+   tl_assert(s_clientobj_set);
 
-  if (! DRD_(is_any_suppressed)(a1, a2))
-    return;
+   if (! DRD_(is_any_suppressed)(a1, a2))
+      return;
 
-  VG_(OSetGen_ResetIterAt)(s_clientobj_set, &a1);
-  for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0 && p->any.a1 < a2; )
-  {
-    tl_assert(a1 <= p->any.a1);
-    removed_at = p->any.a1;
-    clientobj_remove_obj(p);
-    /*
-     * The above call removes an element from the oset and hence
-     * invalidates the iterator. Restore the iterator.
-     */
-    VG_(OSetGen_ResetIterAt)(s_clientobj_set, &removed_at);
-  }
+   VG_(OSetGen_ResetIterAt)(s_clientobj_set, &a1);
+   for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0 && p->any.a1 < a2; )
+   {
+      tl_assert(a1 <= p->any.a1);
+      removed_at = p->any.a1;
+      clientobj_remove_obj(p);
+      /*
+       * The above call removes an element from the oset and hence
+       * invalidates the iterator. Restore the iterator.
+       */
+      VG_(OSetGen_ResetIterAt)(s_clientobj_set, &removed_at);
+   }
 }
 
 /**
@@ -224,27 +227,27 @@
  */
 void DRD_(clientobj_delete_thread)(const DrdThreadId tid)
 {
-  DrdClientobj *p;
+   DrdClientobj *p;
 
-  VG_(OSetGen_ResetIter)(s_clientobj_set);
-  for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0; )
-  {
-    if (p->any.delete_thread)
-    {
-      (*p->any.delete_thread)(p, tid);
-    }
-  }
+   VG_(OSetGen_ResetIter)(s_clientobj_set);
+   for ( ; (p = VG_(OSetGen_Next)(s_clientobj_set)) != 0; )
+   {
+      if (p->any.delete_thread)
+      {
+         (*p->any.delete_thread)(p, tid);
+      }
+   }
 }
 
 const char* DRD_(clientobj_type_name)(const ObjType t)
 {
-  switch (t)
-  {
-  case ClientMutex:     return "mutex";
-  case ClientCondvar:   return "cond";
-  case ClientSemaphore: return "semaphore";
-  case ClientBarrier:   return "barrier";
-  case ClientRwlock:    return "rwlock";
-  }
-  return "(unknown)";
+   switch (t)
+   {
+   case ClientMutex:     return "mutex";
+   case ClientCondvar:   return "cond";
+   case ClientSemaphore: return "semaphore";
+   case ClientBarrier:   return "barrier";
+   case ClientRwlock:    return "rwlock";
+   }
+   return "(unknown)";
 }
diff --git a/drd/drd_clientobj.h b/drd/drd_clientobj.h
index c30a17c..f24de5c 100644
--- a/drd/drd_clientobj.h
+++ b/drd/drd_clientobj.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -42,100 +43,100 @@
 /* Type definitions. */
 
 typedef enum {
-  ClientMutex     = 1,
-  ClientCondvar   = 2,
-  ClientSemaphore = 3,
-  ClientBarrier   = 4,
-  ClientRwlock    = 5,
+   ClientMutex     = 1,
+   ClientCondvar   = 2,
+   ClientSemaphore = 3,
+   ClientBarrier   = 4,
+   ClientRwlock    = 5,
 } ObjType;
 
 struct any
 {
-  Addr        a1;
-  ObjType     type;
-  void        (*cleanup)(union drd_clientobj*);
-  void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext* first_observed_at;
+   Addr        a1;
+   ObjType     type;
+   void        (*cleanup)(union drd_clientobj*);
+   void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext* first_observed_at;
 };
 
 struct mutex_info
 {
-  Addr            a1;
-  ObjType         type;
-  void            (*cleanup)(union drd_clientobj*);
-  void            (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext*     first_observed_at;
-  MutexT          mutex_type;      // pthread_mutex_t or pthread_spinlock_t.
-  int             recursion_count; // 0 if free, >= 1 if locked.
-  DrdThreadId     owner;           // owner if locked, last owner if free.
-  struct segment* last_locked_segment;
-  ULong           acquiry_time_ms;
-  ExeContext*     acquired_at;
+   Addr            a1;
+   ObjType         type;
+   void            (*cleanup)(union drd_clientobj*);
+   void            (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext*     first_observed_at;
+   MutexT          mutex_type;      // pthread_mutex_t or pthread_spinlock_t.
+   int             recursion_count; // 0 if free, >= 1 if locked.
+   DrdThreadId     owner;           // owner if locked, last owner if free.
+   struct segment* last_locked_segment;
+   ULong           acquiry_time_ms;
+   ExeContext*     acquired_at;
 };
 
 struct cond_info
 {
-  Addr        a1;
-  ObjType     type;
-  void        (*cleanup)(union drd_clientobj*);
-  void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext* first_observed_at;
-  int         waiter_count;
-  Addr        mutex; // Client mutex specified in pthread_cond_wait() call, and
-           // null if no client threads are currently waiting on this cond.var.
+   Addr        a1;
+   ObjType     type;
+   void        (*cleanup)(union drd_clientobj*);
+   void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext* first_observed_at;
+   int         waiter_count;
+   Addr        mutex; // Client mutex specified in pthread_cond_wait() call, and
+   // null if no client threads are currently waiting on this cond.var.
 };
 
 struct semaphore_info
 {
-  Addr        a1;
-  ObjType     type;
-  void        (*cleanup)(union drd_clientobj*);
-  void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext* first_observed_at;
-  UInt        waits_to_skip;     // Number of sem_wait() calls to skip
-                                 // (due to the value assigned by sem_init()).
-  UInt        value;             // Semaphore value.
-  UWord       waiters;           // Number of threads inside sem_wait().
-  DrdThreadId last_sem_post_tid; // Thread ID associated with last sem_post().
-  XArray*     last_sem_post_seg; // array of Segment*, used as a stack.
+   Addr        a1;
+   ObjType     type;
+   void        (*cleanup)(union drd_clientobj*);
+   void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext* first_observed_at;
+   UInt        waits_to_skip;     // Number of sem_wait() calls to skip
+   // (due to the value assigned by sem_init()).
+   UInt        value;             // Semaphore value.
+   UWord       waiters;           // Number of threads inside sem_wait().
+   DrdThreadId last_sem_post_tid; // Thread ID associated with last sem_post().
+   XArray*     last_sem_post_seg; // array of Segment*, used as a stack.
 };
 
 struct barrier_info
 {
-  Addr     a1;
-  ObjType  type;
-  void     (*cleanup)(union drd_clientobj*);
-  void     (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext* first_observed_at;
-  BarrierT barrier_type;      // pthread_barrier or gomp_barrier.
-  Word     count;             // Participant count in a barrier wait.
-  Word     pre_iteration;     // pre barrier completion count modulo two.
-  Word     post_iteration;    // post barrier completion count modulo two.
-  Word     pre_waiters_left;  // number of waiters left for a complete barrier.
-  Word     post_waiters_left; // number of waiters left for a complete barrier.
-  OSet*    oset;              // Per-thread barrier information.
+   Addr     a1;
+   ObjType  type;
+   void     (*cleanup)(union drd_clientobj*);
+   void     (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext* first_observed_at;
+   BarrierT barrier_type;      // pthread_barrier or gomp_barrier.
+   Word     count;             // Participant count in a barrier wait.
+   Word     pre_iteration;     // pre barrier completion count modulo two.
+   Word     post_iteration;    // post barrier completion count modulo two.
+   Word     pre_waiters_left;  // number of waiters left for a complete barrier.
+   Word     post_waiters_left; // number of waiters left for a complete barrier.
+   OSet*    oset;              // Per-thread barrier information.
 };
 
 struct rwlock_info
 {
-  Addr        a1;
-  ObjType     type;
-  void        (*cleanup)(union drd_clientobj*);
-  void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
-  ExeContext* first_observed_at;
-  OSet*       thread_info;
-  ULong       acquiry_time_ms;
-  ExeContext* acquired_at;
+   Addr        a1;
+   ObjType     type;
+   void        (*cleanup)(union drd_clientobj*);
+   void        (*delete_thread)(union drd_clientobj*, DrdThreadId);
+   ExeContext* first_observed_at;
+   OSet*       thread_info;
+   ULong       acquiry_time_ms;
+   ExeContext* acquired_at;
 };
 
 typedef union drd_clientobj
 {
-  struct any            any;
-  struct mutex_info     mutex;
-  struct cond_info      cond;
-  struct semaphore_info semaphore;
-  struct barrier_info   barrier;
-  struct rwlock_info    rwlock;
+   struct any            any;
+   struct mutex_info     mutex;
+   struct cond_info      cond;
+   struct semaphore_info semaphore;
+   struct barrier_info   barrier;
+   struct rwlock_info    rwlock;
 } DrdClientobj;
 
 
diff --git a/drd/drd_clientreq.c b/drd/drd_clientreq.c
index 57a5139..2aceffe 100644
--- a/drd/drd_clientreq.c
+++ b/drd/drd_clientreq.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -56,7 +57,7 @@
  */
 void DRD_(clientreq_init)(void)
 {
-  VG_(needs_client_requests)(DRD_(handle_client_request));
+   VG_(needs_client_requests)(DRD_(handle_client_request));
 }
 
 /**
@@ -66,322 +67,322 @@
 static
 Bool DRD_(handle_client_request)(ThreadId vg_tid, UWord* arg, UWord* ret)
 {
-  UWord result = 0;
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   UWord result = 0;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
 
-  tl_assert(vg_tid == VG_(get_running_tid()));
-  tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid);
+   tl_assert(vg_tid == VG_(get_running_tid()));
+   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid);
 
-  switch (arg[0])
-  {
-  case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
-    result = vg_tid;
-    break;
+   switch (arg[0])
+   {
+   case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
+      result = vg_tid;
+      break;
 
-  case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
-    result = drd_tid;
-    break;
+   case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
+      result = drd_tid;
+      break;
 
-  case VG_USERREQ__DRD_START_SUPPRESSION:
-    DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
-    break;
+   case VG_USERREQ__DRD_START_SUPPRESSION:
+      DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
+      break;
 
-  case VG_USERREQ__DRD_FINISH_SUPPRESSION:
-    DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
-    break;
+   case VG_USERREQ__DRD_FINISH_SUPPRESSION:
+      DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
+      break;
 
-  case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
-  {
-    const Addr topmost_sp = DRD_(highest_used_stack_address)(vg_tid);
+   case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
+      {
+         const Addr topmost_sp = DRD_(highest_used_stack_address)(vg_tid);
 #if 0
-    UInt nframes;
-    const UInt n_ips = 20;
-    Addr ips[n_ips], sps[n_ips], fps[n_ips];
-    Char desc[128];
-    unsigned i;
+         UInt nframes;
+         const UInt n_ips = 20;
+         Addr ips[n_ips], sps[n_ips], fps[n_ips];
+         Char desc[128];
+         unsigned i;
 
-    nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, fps, 0);
+         nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, fps, 0);
 
-    VG_(message)(Vg_DebugMsg, "thread %d/%d", vg_tid, drd_tid);
-    for (i = 0; i < nframes; i++)
-    {
-      VG_(describe_IP)(ips[i], desc, sizeof(desc));
-      VG_(message)(Vg_DebugMsg, "[%2d] sp 0x%09lx fp 0x%09lx ip %s",
-                   i, sps[i], fps[i], desc);
-    }
+         VG_(message)(Vg_DebugMsg, "thread %d/%d", vg_tid, drd_tid);
+         for (i = 0; i < nframes; i++)
+         {
+            VG_(describe_IP)(ips[i], desc, sizeof(desc));
+            VG_(message)(Vg_DebugMsg, "[%2d] sp 0x%09lx fp 0x%09lx ip %s",
+                         i, sps[i], fps[i], desc);
+         }
 #endif
-    DRD_(thread_set_stack_startup)(drd_tid, VG_(get_SP)(vg_tid));
-    DRD_(start_suppression)(topmost_sp, VG_(thread_get_stack_max)(vg_tid),
-                            "stack top");
-    break;
-  }
+         DRD_(thread_set_stack_startup)(drd_tid, VG_(get_SP)(vg_tid));
+         DRD_(start_suppression)(topmost_sp, VG_(thread_get_stack_max)(vg_tid),
+                                 "stack top");
+         break;
+      }
 
-  case VG_USERREQ__DRD_START_NEW_SEGMENT:
-    DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
-    break;
+   case VG_USERREQ__DRD_START_NEW_SEGMENT:
+      DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
+      break;
 
-  case VG_USERREQ__DRD_START_TRACE_ADDR:
-    DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2]);
-    break;
+   case VG_USERREQ__DRD_START_TRACE_ADDR:
+      DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2]);
+      break;
 
-  case VG_USERREQ__DRD_STOP_TRACE_ADDR:
-    DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
-    break;
+   case VG_USERREQ__DRD_STOP_TRACE_ADDR:
+      DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
+      break;
 
-  case VG_USERREQ__DRD_STOP_RECORDING:
-    DRD_(thread_stop_recording)(drd_tid);
-    break;
+   case VG_USERREQ__DRD_STOP_RECORDING:
+      DRD_(thread_stop_recording)(drd_tid);
+      break;
 
-  case VG_USERREQ__DRD_START_RECORDING:
-    DRD_(thread_start_recording)(drd_tid);
-    break;
+   case VG_USERREQ__DRD_START_RECORDING:
+      DRD_(thread_start_recording)(drd_tid);
+      break;
 
-  case VG_USERREQ__SET_PTHREADID:
-    // pthread_self() returns 0 for programs not linked with libpthread.so.
-    if (arg[1] != INVALID_POSIX_THREADID)
-      DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
-    break;
+   case VG_USERREQ__SET_PTHREADID:
+      // pthread_self() returns 0 for programs not linked with libpthread.so.
+      if (arg[1] != INVALID_POSIX_THREADID)
+         DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
+      break;
 
-  case VG_USERREQ__SET_JOINABLE:
-    DRD_(thread_set_joinable)(DRD_(PtThreadIdToDrdThreadId)(arg[1]),
-                              (Bool)arg[2]);
-    break;
+   case VG_USERREQ__SET_JOINABLE:
+      DRD_(thread_set_joinable)(DRD_(PtThreadIdToDrdThreadId)(arg[1]),
+                                (Bool)arg[2]);
+      break;
 
-  case VG_USERREQ__POST_THREAD_JOIN:
-    tl_assert(arg[1]);
-    DRD_(thread_post_join)(drd_tid, DRD_(PtThreadIdToDrdThreadId)(arg[1]));
-    break;
+   case VG_USERREQ__POST_THREAD_JOIN:
+      tl_assert(arg[1]);
+      DRD_(thread_post_join)(drd_tid, DRD_(PtThreadIdToDrdThreadId)(arg[1]));
+      break;
 
-  case VG_USERREQ__PRE_THREAD_CANCEL:
-    tl_assert(arg[1]);
-    DRD_(thread_pre_cancel)(drd_tid);
-    break;
+   case VG_USERREQ__PRE_THREAD_CANCEL:
+      tl_assert(arg[1]);
+      DRD_(thread_pre_cancel)(drd_tid);
+      break;
 
-  case VG_USERREQ__POST_THREAD_CANCEL:
-    tl_assert(arg[1]);
-    break;
+   case VG_USERREQ__POST_THREAD_CANCEL:
+      tl_assert(arg[1]);
+      break;
 
-  case VG_USERREQ__PRE_MUTEX_INIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(mutex_init)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__PRE_MUTEX_INIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(mutex_init)(arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__POST_MUTEX_INIT:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_MUTEX_INIT:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_MUTEX_DESTROY:
-    DRD_(thread_enter_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__PRE_MUTEX_DESTROY:
+      DRD_(thread_enter_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__POST_MUTEX_DESTROY:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(mutex_post_destroy)(arg[1]);
-    break;
+   case VG_USERREQ__POST_MUTEX_DESTROY:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(mutex_post_destroy)(arg[1]);
+      break;
 
-  case VG_USERREQ__PRE_MUTEX_LOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
-    break;
+   case VG_USERREQ__PRE_MUTEX_LOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
+      break;
 
-  case VG_USERREQ__POST_MUTEX_LOCK:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
-    break;
+   case VG_USERREQ__POST_MUTEX_LOCK:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
+      break;
 
-  case VG_USERREQ__PRE_MUTEX_UNLOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(mutex_unlock)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__PRE_MUTEX_UNLOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(mutex_unlock)(arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__POST_MUTEX_UNLOCK:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_MUTEX_UNLOCK:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(spinlock_init_or_unlock)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(spinlock_init_or_unlock)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_COND_INIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(cond_pre_init)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_COND_INIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(cond_pre_init)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_COND_INIT:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_COND_INIT:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_COND_DESTROY:
-    DRD_(thread_enter_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__PRE_COND_DESTROY:
+      DRD_(thread_enter_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__POST_COND_DESTROY:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(cond_post_destroy)(arg[1]);
-    break;
+   case VG_USERREQ__POST_COND_DESTROY:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(cond_post_destroy)(arg[1]);
+      break;
 
-  case VG_USERREQ__PRE_COND_WAIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-    {
-      const Addr cond = arg[1];
-      const Addr mutex = arg[2];
-      const MutexT mutex_type = arg[3];
-      DRD_(mutex_unlock)(mutex, mutex_type);
-      DRD_(cond_pre_wait)(cond, mutex);
-    }
-    break;
+   case VG_USERREQ__PRE_COND_WAIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+      {
+         const Addr cond = arg[1];
+         const Addr mutex = arg[2];
+         const MutexT mutex_type = arg[3];
+         DRD_(mutex_unlock)(mutex, mutex_type);
+         DRD_(cond_pre_wait)(cond, mutex);
+      }
+      break;
 
-  case VG_USERREQ__POST_COND_WAIT:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-    {
-      const Addr cond = arg[1];
-      const Addr mutex = arg[2];
-      const Bool took_lock = arg[3];
-      DRD_(cond_post_wait)(cond);
-      DRD_(mutex_post_lock)(mutex, took_lock, True);
-    }
-    break;
+   case VG_USERREQ__POST_COND_WAIT:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+      {
+         const Addr cond = arg[1];
+         const Addr mutex = arg[2];
+         const Bool took_lock = arg[3];
+         DRD_(cond_post_wait)(cond);
+         DRD_(mutex_post_lock)(mutex, took_lock, True);
+      }
+      break;
 
-  case VG_USERREQ__PRE_COND_SIGNAL:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(cond_pre_signal)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_COND_SIGNAL:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(cond_pre_signal)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_COND_SIGNAL:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_COND_SIGNAL:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_COND_BROADCAST:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(cond_pre_broadcast)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_COND_BROADCAST:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(cond_pre_broadcast)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_COND_BROADCAST:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_COND_BROADCAST:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_SEM_INIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
-    break;
+   case VG_USERREQ__PRE_SEM_INIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
+      break;
 
-  case VG_USERREQ__POST_SEM_INIT:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_SEM_INIT:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_SEM_DESTROY:
-    DRD_(thread_enter_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__PRE_SEM_DESTROY:
+      DRD_(thread_enter_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__POST_SEM_DESTROY:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(semaphore_destroy)(arg[1]);
-    break;
+   case VG_USERREQ__POST_SEM_DESTROY:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(semaphore_destroy)(arg[1]);
+      break;
 
-  case VG_USERREQ__PRE_SEM_WAIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(semaphore_pre_wait)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_SEM_WAIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(semaphore_pre_wait)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_SEM_WAIT:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
-    break;
+   case VG_USERREQ__POST_SEM_WAIT:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__PRE_SEM_POST:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(semaphore_pre_post)(drd_tid, arg[1]);
-    break;
+   case VG_USERREQ__PRE_SEM_POST:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(semaphore_pre_post)(drd_tid, arg[1]);
+      break;
 
-  case VG_USERREQ__POST_SEM_POST:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
-    break;
+   case VG_USERREQ__POST_SEM_POST:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__PRE_BARRIER_INIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
-    break;
+   case VG_USERREQ__PRE_BARRIER_INIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
+      break;
 
-  case VG_USERREQ__POST_BARRIER_INIT:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_BARRIER_INIT:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__PRE_BARRIER_DESTROY:
-    DRD_(thread_enter_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__PRE_BARRIER_DESTROY:
+      DRD_(thread_enter_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__POST_BARRIER_DESTROY:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(barrier_destroy)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__POST_BARRIER_DESTROY:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(barrier_destroy)(arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__PRE_BARRIER_WAIT:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
-    break;
+   case VG_USERREQ__PRE_BARRIER_WAIT:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__POST_BARRIER_WAIT:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
-    break;
+   case VG_USERREQ__POST_BARRIER_WAIT:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
+      break;
 
-  case VG_USERREQ__PRE_RWLOCK_INIT:
-    DRD_(rwlock_pre_init)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_RWLOCK_INIT:
+      DRD_(rwlock_pre_init)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_RWLOCK_DESTROY:
-    DRD_(rwlock_post_destroy)(arg[1]);
-    break;
+   case VG_USERREQ__POST_RWLOCK_DESTROY:
+      DRD_(rwlock_post_destroy)(arg[1]);
+      break;
 
-  case VG_USERREQ__PRE_RWLOCK_RDLOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(rwlock_pre_rdlock)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_RWLOCK_RDLOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(rwlock_pre_rdlock)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_RWLOCK_RDLOCK:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(rwlock_post_rdlock)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__POST_RWLOCK_RDLOCK:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(rwlock_post_rdlock)(arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__PRE_RWLOCK_WRLOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(rwlock_pre_wrlock)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_RWLOCK_WRLOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(rwlock_pre_wrlock)(arg[1]);
+      break;
 
-  case VG_USERREQ__POST_RWLOCK_WRLOCK:
-    if (DRD_(thread_leave_synchr)(drd_tid) == 0)
-      DRD_(rwlock_post_wrlock)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__POST_RWLOCK_WRLOCK:
+      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
+         DRD_(rwlock_post_wrlock)(arg[1], arg[2]);
+      break;
 
-  case VG_USERREQ__PRE_RWLOCK_UNLOCK:
-    if (DRD_(thread_enter_synchr)(drd_tid) == 0)
-      DRD_(rwlock_pre_unlock)(arg[1]);
-    break;
+   case VG_USERREQ__PRE_RWLOCK_UNLOCK:
+      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
+         DRD_(rwlock_pre_unlock)(arg[1]);
+      break;
       
-  case VG_USERREQ__POST_RWLOCK_UNLOCK:
-    DRD_(thread_leave_synchr)(drd_tid);
-    break;
+   case VG_USERREQ__POST_RWLOCK_UNLOCK:
+      DRD_(thread_leave_synchr)(drd_tid);
+      break;
 
-  case VG_USERREQ__DRD_CLEAN_MEMORY:
-    if (arg[2] > 0)
-      DRD_(clean_memory)(arg[1], arg[2]);
-    break;
+   case VG_USERREQ__DRD_CLEAN_MEMORY:
+      if (arg[2] > 0)
+         DRD_(clean_memory)(arg[1], arg[2]);
+      break;
 
-  default:
-    VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
-                 arg[0], arg[1]);
-    tl_assert(0);
-    return False;
-  }
+   default:
+      VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
+                   arg[0], arg[1]);
+      tl_assert(0);
+      return False;
+   }
 
-  *ret = result;
-  return True;
+   *ret = result;
+   return True;
 }
 
 /**
@@ -394,36 +395,36 @@
  */
 static Addr DRD_(highest_used_stack_address)(const ThreadId vg_tid)
 {
-    UInt nframes;
-    const UInt n_ips = 10;
-    UInt i;
-    Addr ips[n_ips], sps[n_ips];
-    Addr husa;
+   UInt nframes;
+   const UInt n_ips = 10;
+   UInt i;
+   Addr ips[n_ips], sps[n_ips];
+   Addr husa;
 
-    nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, 0, 0);
-    tl_assert(1 <= nframes && nframes <= n_ips);
+   nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, 0, 0);
+   tl_assert(1 <= nframes && nframes <= n_ips);
 
-    /* A hack to work around VG_(get_StackTrace)()'s behavior that sometimes */
-    /* the topmost stackframes it returns are bogus (this occurs sometimes   */
-    /* at least on amd64, ppc32 and ppc64).                                  */
+   /* A hack to work around VG_(get_StackTrace)()'s behavior that sometimes */
+   /* the topmost stackframes it returns are bogus (this occurs sometimes   */
+   /* at least on amd64, ppc32 and ppc64).                                  */
 
-    husa = sps[0];
+   husa = sps[0];
 
-    tl_assert(VG_(thread_get_stack_max)(vg_tid)
-              - VG_(thread_get_stack_size)(vg_tid) <= husa
-              && husa < VG_(thread_get_stack_max)(vg_tid));
+   tl_assert(VG_(thread_get_stack_max)(vg_tid)
+             - VG_(thread_get_stack_size)(vg_tid) <= husa
+             && husa < VG_(thread_get_stack_max)(vg_tid));
 
-    for (i = 1; i < nframes; i++)
-    {
+   for (i = 1; i < nframes; i++)
+   {
       if (sps[i] == 0)
-        break;
+         break;
       if (husa < sps[i] && sps[i] < VG_(thread_get_stack_max)(vg_tid))
-        husa = sps[i];
-    }
+         husa = sps[i];
+   }
 
-    tl_assert(VG_(thread_get_stack_max)(vg_tid)
-              - VG_(thread_get_stack_size)(vg_tid) <= husa
-              && husa < VG_(thread_get_stack_max)(vg_tid));
+   tl_assert(VG_(thread_get_stack_max)(vg_tid)
+             - VG_(thread_get_stack_size)(vg_tid) <= husa
+             && husa < VG_(thread_get_stack_max)(vg_tid));
 
-    return husa;
+   return husa;
 }
diff --git a/drd/drd_clientreq.h b/drd/drd_clientreq.h
index a011cce..40cf225 100644
--- a/drd/drd_clientreq.h
+++ b/drd/drd_clientreq.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -44,166 +45,166 @@
  * source files.
  */
 enum {
-  /* Ask drd to suppress data race reports on all currently allocated stack */
-  /* data of the current thread.                                            */
-  VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK = VG_USERREQ_TOOL_BASE('D', 'r'),
-  /* args: none */
-  /* To ask the drd tool to start a new segment in the specified thread. */
-  VG_USERREQ__DRD_START_NEW_SEGMENT,
-  /* args: POSIX thread ID. */
-  /* Let the drd tool stop recording memory accesses in the calling thread. */
-  VG_USERREQ__DRD_STOP_RECORDING,
-  /* args: none. */
-  /* Let the drd tool start recording memory accesses in the calling thread. */
-  VG_USERREQ__DRD_START_RECORDING,
-  /* args: none. */
+   /* Ask drd to suppress data race reports on all currently allocated stack */
+   /* data of the current thread.                                            */
+   VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK = VG_USERREQ_TOOL_BASE('D', 'r'),
+   /* args: none */
+   /* To ask the drd tool to start a new segment in the specified thread. */
+   VG_USERREQ__DRD_START_NEW_SEGMENT,
+   /* args: POSIX thread ID. */
+   /* Let the drd tool stop recording memory accesses in the calling thread. */
+   VG_USERREQ__DRD_STOP_RECORDING,
+   /* args: none. */
+   /* Let the drd tool start recording memory accesses in the calling thread. */
+   VG_USERREQ__DRD_START_RECORDING,
+   /* args: none. */
 
-  /* Tell drd the pthread_t of the running thread. */
-  VG_USERREQ__SET_PTHREADID,
-  /* args: pthread_t. */
-  /* Ask drd that a the thread's state transition from */
-  /* VgTs_Zombie to VgTs_Empty is delayed until */
-  /* VG_USERREQ__POST_THREAD_JOIN is performed. */
-  VG_USERREQ__SET_JOINABLE,
-  /* args: pthread_t, Bool */
+   /* Tell drd the pthread_t of the running thread. */
+   VG_USERREQ__SET_PTHREADID,
+   /* args: pthread_t. */
+   /* Ask drd that a the thread's state transition from */
+   /* VgTs_Zombie to VgTs_Empty is delayed until */
+   /* VG_USERREQ__POST_THREAD_JOIN is performed. */
+   VG_USERREQ__SET_JOINABLE,
+   /* args: pthread_t, Bool */
 
-  /* To notify drd that a thread finished because */
-  /* pthread_thread_join() was called on it. */
-  VG_USERREQ__POST_THREAD_JOIN,
-  /* args: pthread_t (joinee) */
+   /* To notify drd that a thread finished because */
+   /* pthread_thread_join() was called on it. */
+   VG_USERREQ__POST_THREAD_JOIN,
+   /* args: pthread_t (joinee) */
 
-  /* To notify drd before a pthread_cancel call. */
-  VG_USERREQ__PRE_THREAD_CANCEL,
-  /* args: pthread_t */
-  /* To notify drd after a pthread_cancel call. */
-  VG_USERREQ__POST_THREAD_CANCEL,
-  /* args: pthread_t, Bool */
+   /* To notify drd before a pthread_cancel call. */
+   VG_USERREQ__PRE_THREAD_CANCEL,
+   /* args: pthread_t */
+   /* To notify drd after a pthread_cancel call. */
+   VG_USERREQ__POST_THREAD_CANCEL,
+   /* args: pthread_t, Bool */
 
-  /* to notify the drd tool of a pthread_mutex_init call. */
-  VG_USERREQ__PRE_MUTEX_INIT,
-  /* args: Addr, MutexT */
-  /* to notify the drd tool of a pthread_mutex_init call. */
-  VG_USERREQ__POST_MUTEX_INIT,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_mutex_destroy call. */
-  VG_USERREQ__PRE_MUTEX_DESTROY,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_mutex_destroy call. */
-  VG_USERREQ__POST_MUTEX_DESTROY,
-  /* args: Addr, MutexT */
-  /* to notify the drd tool of pthread_mutex_lock calls */
-  VG_USERREQ__PRE_MUTEX_LOCK,
-  /* args: Addr, MutexT, Bool */
-  /* to notify the drd tool of pthread_mutex_lock calls */
-  VG_USERREQ__POST_MUTEX_LOCK,
-  /* args: Addr, Bool */
-  /* to notify the drd tool of pthread_mutex_unlock calls */
-  VG_USERREQ__PRE_MUTEX_UNLOCK,
-  /* args: Addr */
-  /* to notify the drd tool of pthread_mutex_unlock calls */
-  VG_USERREQ__POST_MUTEX_UNLOCK,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_spin_init/pthread_spin_unlock call */
-  VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_spin_init/pthread_spin_unlock call */
-  VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
-  /* args: Addr */
+   /* to notify the drd tool of a pthread_mutex_init call. */
+   VG_USERREQ__PRE_MUTEX_INIT,
+   /* args: Addr, MutexT */
+   /* to notify the drd tool of a pthread_mutex_init call. */
+   VG_USERREQ__POST_MUTEX_INIT,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_mutex_destroy call. */
+   VG_USERREQ__PRE_MUTEX_DESTROY,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_mutex_destroy call. */
+   VG_USERREQ__POST_MUTEX_DESTROY,
+   /* args: Addr, MutexT */
+   /* to notify the drd tool of pthread_mutex_lock calls */
+   VG_USERREQ__PRE_MUTEX_LOCK,
+   /* args: Addr, MutexT, Bool */
+   /* to notify the drd tool of pthread_mutex_lock calls */
+   VG_USERREQ__POST_MUTEX_LOCK,
+   /* args: Addr, Bool */
+   /* to notify the drd tool of pthread_mutex_unlock calls */
+   VG_USERREQ__PRE_MUTEX_UNLOCK,
+   /* args: Addr */
+   /* to notify the drd tool of pthread_mutex_unlock calls */
+   VG_USERREQ__POST_MUTEX_UNLOCK,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_spin_init/pthread_spin_unlock call */
+   VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_spin_init/pthread_spin_unlock call */
+   VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
+   /* args: Addr */
 
 
-  /* to notify the drd tool of a pthread_cond_init call. */
-  VG_USERREQ__PRE_COND_INIT,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_cond_init call. */
-  VG_USERREQ__POST_COND_INIT,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_cond_destroy call. */
-  VG_USERREQ__PRE_COND_DESTROY,
-  /* args: Addr */
-  /* to notify the drd tool of a pthread_cond_destroy call. */
-  VG_USERREQ__POST_COND_DESTROY,
-  /* args: Addr */
-  VG_USERREQ__PRE_COND_WAIT,
-  /* args: Addr cond, Addr mutex, MutexT mt */
-  VG_USERREQ__POST_COND_WAIT,
-  /* args: Addr cond, Addr mutex, Bool took_lock*/
-  VG_USERREQ__PRE_COND_SIGNAL,
-  /* args: Addr cond */
-  VG_USERREQ__POST_COND_SIGNAL,
-  /* args: Addr cond */
-  VG_USERREQ__PRE_COND_BROADCAST,
-  /* args: Addr cond */
-  VG_USERREQ__POST_COND_BROADCAST,
-  /* args: Addr cond */
+   /* to notify the drd tool of a pthread_cond_init call. */
+   VG_USERREQ__PRE_COND_INIT,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_cond_init call. */
+   VG_USERREQ__POST_COND_INIT,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_cond_destroy call. */
+   VG_USERREQ__PRE_COND_DESTROY,
+   /* args: Addr */
+   /* to notify the drd tool of a pthread_cond_destroy call. */
+   VG_USERREQ__POST_COND_DESTROY,
+   /* args: Addr */
+   VG_USERREQ__PRE_COND_WAIT,
+   /* args: Addr cond, Addr mutex, MutexT mt */
+   VG_USERREQ__POST_COND_WAIT,
+   /* args: Addr cond, Addr mutex, Bool took_lock*/
+   VG_USERREQ__PRE_COND_SIGNAL,
+   /* args: Addr cond */
+   VG_USERREQ__POST_COND_SIGNAL,
+   /* args: Addr cond */
+   VG_USERREQ__PRE_COND_BROADCAST,
+   /* args: Addr cond */
+   VG_USERREQ__POST_COND_BROADCAST,
+   /* args: Addr cond */
 
-  /* To notify the drd tool of a sem_init call. */
-  VG_USERREQ__PRE_SEM_INIT,
-  /* args: Addr sem, Word pshared, Word value */
-  /* To notify the drd tool of a sem_init call. */
-  VG_USERREQ__POST_SEM_INIT,
-  /* args: Addr sem */
-  /* To notify the drd tool of a sem_destroy call. */
-  VG_USERREQ__PRE_SEM_DESTROY,
-  /* args: Addr sem */
-  /* To notify the drd tool of a sem_destroy call. */
-  VG_USERREQ__POST_SEM_DESTROY,
-  /* args: Addr sem */
-  /* To notify the drd tool of a sem_wait call. */
-  VG_USERREQ__PRE_SEM_WAIT,
-  /* args: Addr sem */
-  /* To notify the drd tool of a sem_wait call. */
-  VG_USERREQ__POST_SEM_WAIT,
-  /* args: Addr sem, Bool waited */
-  /* To notify the drd tool before a sem_post call. */
-  VG_USERREQ__PRE_SEM_POST,
-  /* args: Addr sem */
-  /* To notify the drd tool after a sem_post call. */
-  VG_USERREQ__POST_SEM_POST,
-  /* args: Addr sem, Bool waited */
+   /* To notify the drd tool of a sem_init call. */
+   VG_USERREQ__PRE_SEM_INIT,
+   /* args: Addr sem, Word pshared, Word value */
+   /* To notify the drd tool of a sem_init call. */
+   VG_USERREQ__POST_SEM_INIT,
+   /* args: Addr sem */
+   /* To notify the drd tool of a sem_destroy call. */
+   VG_USERREQ__PRE_SEM_DESTROY,
+   /* args: Addr sem */
+   /* To notify the drd tool of a sem_destroy call. */
+   VG_USERREQ__POST_SEM_DESTROY,
+   /* args: Addr sem */
+   /* To notify the drd tool of a sem_wait call. */
+   VG_USERREQ__PRE_SEM_WAIT,
+   /* args: Addr sem */
+   /* To notify the drd tool of a sem_wait call. */
+   VG_USERREQ__POST_SEM_WAIT,
+   /* args: Addr sem, Bool waited */
+   /* To notify the drd tool before a sem_post call. */
+   VG_USERREQ__PRE_SEM_POST,
+   /* args: Addr sem */
+   /* To notify the drd tool after a sem_post call. */
+   VG_USERREQ__POST_SEM_POST,
+   /* args: Addr sem, Bool waited */
 
-  /* To notify the drd tool of a pthread_barrier_init call. */
-  VG_USERREQ__PRE_BARRIER_INIT,
-  /* args: Addr barrier, BarrierT type, Word count, Bool reinit */
-  /* To notify the drd tool of a pthread_barrier_init call. */
-  VG_USERREQ__POST_BARRIER_INIT,
-  /* args: Addr barrier, BarrierT type */
-  /* To notify the drd tool of a pthread_barrier_destroy call. */
-  VG_USERREQ__PRE_BARRIER_DESTROY,
-  /* args: Addr barrier, BarrierT type. */
-  /* To notify the drd tool of a pthread_barrier_destroy call. */
-  VG_USERREQ__POST_BARRIER_DESTROY,
-  /* args: Addr barrier, BarrierT type. */
-  /* To notify the drd tool of a pthread_barrier_wait call. */
-  VG_USERREQ__PRE_BARRIER_WAIT,
-  /* args: Addr barrier, BarrierT type. */
-  /* To notify the drd tool of a pthread_barrier_wait call. */
-  VG_USERREQ__POST_BARRIER_WAIT,
-  /* args: Addr barrier, BarrierT type, Word has_waited, Word serializing */
+   /* To notify the drd tool of a pthread_barrier_init call. */
+   VG_USERREQ__PRE_BARRIER_INIT,
+   /* args: Addr barrier, BarrierT type, Word count, Bool reinit */
+   /* To notify the drd tool of a pthread_barrier_init call. */
+   VG_USERREQ__POST_BARRIER_INIT,
+   /* args: Addr barrier, BarrierT type */
+   /* To notify the drd tool of a pthread_barrier_destroy call. */
+   VG_USERREQ__PRE_BARRIER_DESTROY,
+   /* args: Addr barrier, BarrierT type. */
+   /* To notify the drd tool of a pthread_barrier_destroy call. */
+   VG_USERREQ__POST_BARRIER_DESTROY,
+   /* args: Addr barrier, BarrierT type. */
+   /* To notify the drd tool of a pthread_barrier_wait call. */
+   VG_USERREQ__PRE_BARRIER_WAIT,
+   /* args: Addr barrier, BarrierT type. */
+   /* To notify the drd tool of a pthread_barrier_wait call. */
+   VG_USERREQ__POST_BARRIER_WAIT,
+   /* args: Addr barrier, BarrierT type, Word has_waited, Word serializing */
 
-  /* To notify the drd tool of a pthread_rwlock_init call. */
-  VG_USERREQ__PRE_RWLOCK_INIT,
-  /* args: Addr rwlock */
-  /* To notify the drd tool of a pthread_rwlock_destroy call. */
-  VG_USERREQ__POST_RWLOCK_DESTROY,
-  /* args: Addr rwlock */
-  /* To notify the drd tool of a pthread_rwlock_rdlock call. */
-  VG_USERREQ__PRE_RWLOCK_RDLOCK,
-  /* args: Addr rwlock */
-  /* To notify the drd tool of a pthread_rwlock_rdlock call. */
-  VG_USERREQ__POST_RWLOCK_RDLOCK,
-  /* args: Addr rwlock, Bool took_lock */
-  /* To notify the drd tool of a pthread_rwlock_wrlock call. */
-  VG_USERREQ__PRE_RWLOCK_WRLOCK,
-  /* args: Addr rwlock */
-  /* To notify the drd tool of a pthread_rwlock_wrlock call. */
-  VG_USERREQ__POST_RWLOCK_WRLOCK,
-  /* args: Addr rwlock, Bool took_lock */
-  /* To notify the drd tool of a pthread_rwlock_unlock call. */
-  VG_USERREQ__PRE_RWLOCK_UNLOCK,
-  /* args: Addr rwlock */
-  /* To notify the drd tool of a pthread_rwlock_unlock call. */
-  VG_USERREQ__POST_RWLOCK_UNLOCK
-  /* args: Addr rwlock, Bool unlocked */
+   /* To notify the drd tool of a pthread_rwlock_init call. */
+   VG_USERREQ__PRE_RWLOCK_INIT,
+   /* args: Addr rwlock */
+   /* To notify the drd tool of a pthread_rwlock_destroy call. */
+   VG_USERREQ__POST_RWLOCK_DESTROY,
+   /* args: Addr rwlock */
+   /* To notify the drd tool of a pthread_rwlock_rdlock call. */
+   VG_USERREQ__PRE_RWLOCK_RDLOCK,
+   /* args: Addr rwlock */
+   /* To notify the drd tool of a pthread_rwlock_rdlock call. */
+   VG_USERREQ__POST_RWLOCK_RDLOCK,
+   /* args: Addr rwlock, Bool took_lock */
+   /* To notify the drd tool of a pthread_rwlock_wrlock call. */
+   VG_USERREQ__PRE_RWLOCK_WRLOCK,
+   /* args: Addr rwlock */
+   /* To notify the drd tool of a pthread_rwlock_wrlock call. */
+   VG_USERREQ__POST_RWLOCK_WRLOCK,
+   /* args: Addr rwlock, Bool took_lock */
+   /* To notify the drd tool of a pthread_rwlock_unlock call. */
+   VG_USERREQ__PRE_RWLOCK_UNLOCK,
+   /* args: Addr rwlock */
+   /* To notify the drd tool of a pthread_rwlock_unlock call. */
+   VG_USERREQ__POST_RWLOCK_UNLOCK
+   /* args: Addr rwlock, Bool unlocked */
 
 };
 
@@ -213,14 +214,14 @@
  * values defined below specify which of these types a mutex really is.
  */
 typedef enum
-{
-  mutex_type_unknown          = -1,
-  mutex_type_invalid_mutex    = 0,
-  mutex_type_recursive_mutex  = 1,
-  mutex_type_errorcheck_mutex = 2,
-  mutex_type_default_mutex    = 3,
-  mutex_type_spinlock         = 4
-} MutexT;
+   {
+      mutex_type_unknown          = -1,
+      mutex_type_invalid_mutex    = 0,
+      mutex_type_recursive_mutex  = 1,
+      mutex_type_errorcheck_mutex = 2,
+      mutex_type_default_mutex    = 3,
+      mutex_type_spinlock         = 4
+   } MutexT;
 
 /*
  * Error checking on POSIX barriers and GOMP barriers happens by the same
@@ -228,10 +229,10 @@
  * a given client address.
  */
 typedef enum
-  {
-    pthread_barrier = 1,
-    gomp_barrier = 2
-  } BarrierT;
+   {
+      pthread_barrier = 1,
+      gomp_barrier = 2
+   } BarrierT;
 
 
 void DRD_(clientreq_init)(void);
diff --git a/drd/drd_cond.c b/drd/drd_cond.c
index afee311..a698e1c 100644
--- a/drd/drd_cond.c
+++ b/drd/drd_cond.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -50,25 +51,25 @@
 
 void DRD_(cond_set_report_signal_unlocked)(const Bool r)
 {
-  DRD_(s_report_signal_unlocked) = r;
+   DRD_(s_report_signal_unlocked) = r;
 }
 
 void DRD_(cond_set_trace)(const Bool trace_cond)
 {
-  DRD_(s_trace_cond) = trace_cond;
+   DRD_(s_trace_cond) = trace_cond;
 }
 
 static
 void DRD_(cond_initialize)(struct cond_info* const p, const Addr cond)
 {
-  tl_assert(cond != 0);
-  tl_assert(p->a1         == cond);
-  tl_assert(p->type       == ClientCondvar);
+   tl_assert(cond != 0);
+   tl_assert(p->a1         == cond);
+   tl_assert(p->type       == ClientCondvar);
 
-  p->cleanup       = (void(*)(DrdClientobj*))(DRD_(cond_cleanup));
-  p->delete_thread = 0;
-  p->waiter_count  = 0;
-  p->mutex         = 0;
+   p->cleanup       = (void(*)(DrdClientobj*))(DRD_(cond_cleanup));
+   p->delete_thread = 0;
+   p->waiter_count  = 0;
+   p->mutex         = 0;
 }
 
 /**
@@ -77,111 +78,111 @@
  */
 static void DRD_(cond_cleanup)(struct cond_info* p)
 {
-  tl_assert(p);
-  if (p->mutex)
-  {
-    struct mutex_info* q;
-    q = &(DRD_(clientobj_get)(p->mutex, ClientMutex)->mutex);
-    tl_assert(q);
-    {
-      CondDestrErrInfo cde = { p->a1, q->a1, q->owner };
-      VG_(maybe_record_error)(VG_(get_running_tid)(),
-                              CondDestrErr,
-                              VG_(get_IP)(VG_(get_running_tid)()),
-                              "Destroying condition variable that is being"
-                              " waited upon",
-                              &cde);
-    }
-  }
+   tl_assert(p);
+   if (p->mutex)
+   {
+      struct mutex_info* q;
+      q = &(DRD_(clientobj_get)(p->mutex, ClientMutex)->mutex);
+      tl_assert(q);
+      {
+         CondDestrErrInfo cde = { p->a1, q->a1, q->owner };
+         VG_(maybe_record_error)(VG_(get_running_tid)(),
+                                 CondDestrErr,
+                                 VG_(get_IP)(VG_(get_running_tid)()),
+                                 "Destroying condition variable that is being"
+                                 " waited upon",
+                                 &cde);
+      }
+   }
 }
 
 static struct cond_info* DRD_(cond_get_or_allocate)(const Addr cond)
 {
-  struct cond_info *p;
+   struct cond_info *p;
 
-  tl_assert(offsetof(DrdClientobj, cond) == 0);
-  p = &(DRD_(clientobj_get)(cond, ClientCondvar)->cond);
-  if (p == 0)
-  {
-    p = &(DRD_(clientobj_add)(cond, ClientCondvar)->cond);
-    DRD_(cond_initialize)(p, cond);
-  }
-  return p;
+   tl_assert(offsetof(DrdClientobj, cond) == 0);
+   p = &(DRD_(clientobj_get)(cond, ClientCondvar)->cond);
+   if (p == 0)
+   {
+      p = &(DRD_(clientobj_add)(cond, ClientCondvar)->cond);
+      DRD_(cond_initialize)(p, cond);
+   }
+   return p;
 }
 
 static struct cond_info* DRD_(cond_get)(const Addr cond)
 {
-  tl_assert(offsetof(DrdClientobj, cond) == 0);
-  return &(DRD_(clientobj_get)(cond, ClientCondvar)->cond);
+   tl_assert(offsetof(DrdClientobj, cond) == 0);
+   return &(DRD_(clientobj_get)(cond, ClientCondvar)->cond);
 }
 
 /** Called before pthread_cond_init(). */
 void DRD_(cond_pre_init)(const Addr cond)
 {
-  struct cond_info* p;
+   struct cond_info* p;
 
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_init       cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_init       cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  p = DRD_(cond_get)(cond);
+   p = DRD_(cond_get)(cond);
 
-  if (p)
-  {
-    CondErrInfo cei = { .cond = cond };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            CondErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "initialized twice",
-                            &cei);
-  }
+   if (p)
+   {
+      CondErrInfo cei = { .cond = cond };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              CondErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "initialized twice",
+                              &cei);
+   }
 
-  p = DRD_(cond_get_or_allocate)(cond);
+   p = DRD_(cond_get_or_allocate)(cond);
 }
 
 /** Called after pthread_cond_destroy(). */
 void DRD_(cond_post_destroy)(const Addr cond)
 {
-  struct cond_info* p;
+   struct cond_info* p;
 
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_destroy    cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_destroy    cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  p = DRD_(cond_get)(cond);
-  if (p == 0)
-  {
-    CondErrInfo cei = { .cond = cond };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            CondErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "not a condition variable",
-                            &cei);
-    return;
-  }
+   p = DRD_(cond_get)(cond);
+   if (p == 0)
+   {
+      CondErrInfo cei = { .cond = cond };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              CondErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "not a condition variable",
+                              &cei);
+      return;
+   }
 
-  if (p->waiter_count != 0)
-  {
-    CondErrInfo cei = { .cond = cond };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            CondErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "destruction of condition variable being waited"
-                            " upon",
-                            &cei);
-  }
+   if (p->waiter_count != 0)
+   {
+      CondErrInfo cei = { .cond = cond };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              CondErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "destruction of condition variable being waited"
+                              " upon",
+                              &cei);
+   }
 
-  DRD_(clientobj_remove)(p->a1, ClientCondvar);
+   DRD_(clientobj_remove)(p->a1, ClientCondvar);
 }
 
 /** Called before pthread_cond_wait(). Note: before this function is called,
@@ -189,143 +190,143 @@
  */
 int DRD_(cond_pre_wait)(const Addr cond, const Addr mutex)
 {
-  struct cond_info* p;
-  struct mutex_info* q;
+   struct cond_info* p;
+   struct mutex_info* q;
 
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_pre_wait   cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_pre_wait   cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  p = DRD_(cond_get_or_allocate)(cond);
-  tl_assert(p);
+   p = DRD_(cond_get_or_allocate)(cond);
+   tl_assert(p);
 
-  if (p->waiter_count == 0)
-  {
-    p->mutex = mutex;
-  }
-  else if (p->mutex != mutex)
-  {
-    CondWaitErrInfo cwei
-      = { .cond = cond, .mutex1 = p->mutex, .mutex2 = mutex };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            CondWaitErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Inconsistent association of condition variable"
-                            " and mutex",
-                            &cwei);
-  }
-  tl_assert(p->mutex);
-  q = DRD_(mutex_get)(p->mutex);
-  if (q
-      && q->owner == DRD_(thread_get_running_tid)() && q->recursion_count > 0)
-  {
-    const ThreadId vg_tid = VG_(get_running_tid)();
-    MutexErrInfo MEI = { q->a1, q->recursion_count, q->owner };
-    VG_(maybe_record_error)(vg_tid,
-                            MutexErr,
-                            VG_(get_IP)(vg_tid),
-                            "Mutex locked recursively",
-                            &MEI);
-  }
-  else if (q == 0)
-  {
-    DRD_(not_a_mutex)(p->mutex);
-  }
+   if (p->waiter_count == 0)
+   {
+      p->mutex = mutex;
+   }
+   else if (p->mutex != mutex)
+   {
+      CondWaitErrInfo cwei
+         = { .cond = cond, .mutex1 = p->mutex, .mutex2 = mutex };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              CondWaitErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Inconsistent association of condition variable"
+                              " and mutex",
+                              &cwei);
+   }
+   tl_assert(p->mutex);
+   q = DRD_(mutex_get)(p->mutex);
+   if (q
+       && q->owner == DRD_(thread_get_running_tid)() && q->recursion_count > 0)
+   {
+      const ThreadId vg_tid = VG_(get_running_tid)();
+      MutexErrInfo MEI = { q->a1, q->recursion_count, q->owner };
+      VG_(maybe_record_error)(vg_tid,
+                              MutexErr,
+                              VG_(get_IP)(vg_tid),
+                              "Mutex locked recursively",
+                              &MEI);
+   }
+   else if (q == 0)
+   {
+      DRD_(not_a_mutex)(p->mutex);
+   }
 
-  return ++p->waiter_count;
+   return ++p->waiter_count;
 }
 
 /** Called after pthread_cond_wait(). */
 int DRD_(cond_post_wait)(const Addr cond)
 {
-  struct cond_info* p;
+   struct cond_info* p;
 
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_post_wait  cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_post_wait  cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  p = DRD_(cond_get)(cond);
-  if (p)
-  {
-    if (p->waiter_count > 0)
-    {
-      --p->waiter_count;
-      if (p->waiter_count == 0)
+   p = DRD_(cond_get)(cond);
+   if (p)
+   {
+      if (p->waiter_count > 0)
       {
-        p->mutex = 0;
+         --p->waiter_count;
+         if (p->waiter_count == 0)
+         {
+            p->mutex = 0;
+         }
       }
-    }
-    return p->waiter_count;
-  }
-  return 0;
+      return p->waiter_count;
+   }
+   return 0;
 }
 
 static void DRD_(cond_signal)(Addr const cond)
 {
-  const ThreadId vg_tid = VG_(get_running_tid)();
-  const DrdThreadId drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
-  struct cond_info* const cond_p = DRD_(cond_get)(cond);
+   const ThreadId vg_tid = VG_(get_running_tid)();
+   const DrdThreadId drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
+   struct cond_info* const cond_p = DRD_(cond_get)(cond);
 
-  if (cond_p && cond_p->waiter_count > 0)
-  {
-    if (DRD_(s_report_signal_unlocked)
-        && ! DRD_(mutex_is_locked_by)(cond_p->mutex, drd_tid))
-    {
-      /* A signal is sent while the associated mutex has not been locked. */
-      /* This can indicate but is not necessarily a race condition.       */
-      CondRaceErrInfo cei;
-      cei.cond  = cond;
-      cei.mutex = cond_p->mutex;
-      VG_(maybe_record_error)(vg_tid,
-                              CondRaceErr,
-                              VG_(get_IP)(vg_tid),
-                              "CondErr",
-                              &cei);
-    }
-  }
-  else
-  {
-    /* No other thread is waiting for the signal, hence the signal will be */
-    /* lost. This is normal in a POSIX threads application.                */
-  }
+   if (cond_p && cond_p->waiter_count > 0)
+   {
+      if (DRD_(s_report_signal_unlocked)
+          && ! DRD_(mutex_is_locked_by)(cond_p->mutex, drd_tid))
+      {
+         /* A signal is sent while the associated mutex has not been locked. */
+         /* This can indicate but is not necessarily a race condition.       */
+         CondRaceErrInfo cei;
+         cei.cond  = cond;
+         cei.mutex = cond_p->mutex;
+         VG_(maybe_record_error)(vg_tid,
+                                 CondRaceErr,
+                                 VG_(get_IP)(vg_tid),
+                                 "CondErr",
+                                 &cei);
+      }
+   }
+   else
+   {
+      /* No other thread is waiting for the signal, hence the signal will be */
+      /* lost. This is normal in a POSIX threads application.                */
+   }
 }
 
 /** Called before pthread_cond_signal(). */
 void DRD_(cond_pre_signal)(Addr const cond)
 {
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_signal     cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_signal     cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  DRD_(cond_signal)(cond);
+   DRD_(cond_signal)(cond);
 }
 
 /** Called before pthread_cond_broadcast(). */
 void DRD_(cond_pre_broadcast)(Addr const cond)
 {
-  if (DRD_(s_trace_cond))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] cond_broadcast  cond 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 cond);
-  }
+   if (DRD_(s_trace_cond))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] cond_broadcast  cond 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   cond);
+   }
 
-  DRD_(cond_signal)(cond);
+   DRD_(cond_signal)(cond);
 }
diff --git a/drd/drd_cond.h b/drd/drd_cond.h
index e75caef..75a0743 100644
--- a/drd/drd_cond.h
+++ b/drd/drd_cond.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_error.c b/drd/drd_error.c
index 4259751..fe37aab 100644
--- a/drd/drd_error.c
+++ b/drd/drd_error.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -47,7 +48,7 @@
 
 void DRD_(set_show_conflicting_segments)(const Bool scs)
 {
-  s_show_conflicting_segments = scs;
+   s_show_conflicting_segments = scs;
 }
 
 /**
@@ -57,17 +58,17 @@
 static
 void describe_malloced_addr(Addr const a, SizeT const len, AddrInfo* const ai)
 {
-  Addr data;
+   Addr data;
 
-  if (DRD_(heap_addrinfo)(a, &data, &ai->size, &ai->lastchange))
-  {
-    ai->akind = eMallocd;
-    ai->rwoffset = a - data;
-  }
-  else
-  {
-    ai->akind = eUnknown;
-  }
+   if (DRD_(heap_addrinfo)(a, &data, &ai->size, &ai->lastchange))
+   {
+      ai->akind = eMallocd;
+      ai->rwoffset = a - data;
+   }
+   else
+   {
+      ai->akind = eUnknown;
+   }
 }
 
 /**
@@ -77,343 +78,344 @@
  */
 static void first_observed(const Addr obj)
 {
-  DrdClientobj* cl;
+   DrdClientobj* cl;
 
-  cl = DRD_(clientobj_get_any)(obj);
-  if (cl)
-  {
-    tl_assert(cl->any.first_observed_at);
-    VG_(message)(Vg_UserMsg,
-                 "%s 0x%lx was first observed at:",
-                 DRD_(clientobj_type_name)(cl->any.type),
-                 obj);
-    VG_(pp_ExeContext)(cl->any.first_observed_at);
-  }
+   cl = DRD_(clientobj_get_any)(obj);
+   if (cl)
+   {
+      tl_assert(cl->any.first_observed_at);
+      VG_(message)(Vg_UserMsg,
+                   "%s 0x%lx was first observed at:",
+                   DRD_(clientobj_type_name)(cl->any.type),
+                   obj);
+      VG_(pp_ExeContext)(cl->any.first_observed_at);
+   }
 }
 
 static
 void drd_report_data_race(Error* const err, const DataRaceErrInfo* const dri)
 {
-  AddrInfo ai;
-  const unsigned descr_size = 256;
-  Char* descr1 = VG_(malloc)("drd.error.drdr2.1", descr_size);
-  Char* descr2 = VG_(malloc)("drd.error.drdr2.2", descr_size);
+   AddrInfo ai;
+   const unsigned descr_size = 256;
+   Char* descr1 = VG_(malloc)("drd.error.drdr2.1", descr_size);
+   Char* descr2 = VG_(malloc)("drd.error.drdr2.2", descr_size);
 
-  tl_assert(dri);
-  tl_assert(dri->addr);
-  tl_assert(dri->size > 0);
-  tl_assert(descr1);
-  tl_assert(descr2);
+   tl_assert(dri);
+   tl_assert(dri->addr);
+   tl_assert(dri->size > 0);
+   tl_assert(descr1);
+   tl_assert(descr2);
 
-  descr1[0] = 0;
-  descr2[0] = 0;
-  VG_(get_data_description)(descr1, descr2, descr_size, dri->addr);
-  if (descr1[0] == 0)
-  {
-    describe_malloced_addr(dri->addr, dri->size, &ai);
-  }
-  VG_(message)(Vg_UserMsg,
-               "Conflicting %s by thread %d/%d at 0x%08lx size %ld",
-               dri->access_type == eStore ? "store" : "load",
-               DRD_(DrdThreadIdToVgThreadId)(dri->tid),
-               dri->tid,
-               dri->addr,
-               dri->size);
-  VG_(pp_ExeContext)(VG_(get_error_where)(err));
-  if (descr1[0])
-  {
-    VG_(message)(Vg_UserMsg, "%s", descr1);
-    VG_(message)(Vg_UserMsg, "%s", descr2);
-  }
-  else if (ai.akind == eMallocd && ai.lastchange)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "Address 0x%lx is at offset %ld from 0x%lx."
-                 " Allocation context:",
-                 dri->addr, ai.rwoffset, dri->addr - ai.rwoffset);
-    VG_(pp_ExeContext)(ai.lastchange);
-  }
-  else
-  {
-    char sect_name[64];
-    VgSectKind sect_kind;
-
-    sect_kind = VG_(seginfo_sect_kind)(sect_name, sizeof(sect_name), dri->addr);
-    if (sect_kind != Vg_SectUnknown)
-    {
+   descr1[0] = 0;
+   descr2[0] = 0;
+   VG_(get_data_description)(descr1, descr2, descr_size, dri->addr);
+   if (descr1[0] == 0)
+   {
+      describe_malloced_addr(dri->addr, dri->size, &ai);
+   }
+   VG_(message)(Vg_UserMsg,
+                "Conflicting %s by thread %d/%d at 0x%08lx size %ld",
+                dri->access_type == eStore ? "store" : "load",
+                DRD_(DrdThreadIdToVgThreadId)(dri->tid),
+                dri->tid,
+                dri->addr,
+                dri->size);
+   VG_(pp_ExeContext)(VG_(get_error_where)(err));
+   if (descr1[0])
+   {
+      VG_(message)(Vg_UserMsg, "%s", descr1);
+      VG_(message)(Vg_UserMsg, "%s", descr2);
+   }
+   else if (ai.akind == eMallocd && ai.lastchange)
+   {
       VG_(message)(Vg_UserMsg,
-                   "Allocation context: %s section of %s",
-                   VG_(pp_SectKind)(sect_kind),
-                   sect_name);
-    }
-    else
-    {
-      VG_(message)(Vg_UserMsg, "Allocation context: unknown.");
-    }
-  }
-  if (s_show_conflicting_segments)
-  {
-    DRD_(thread_report_conflicting_segments)(dri->tid,
-                                             dri->addr, dri->size,
-                                             dri->access_type);
-  }
+                   "Address 0x%lx is at offset %ld from 0x%lx."
+                   " Allocation context:",
+                   dri->addr, ai.rwoffset, dri->addr - ai.rwoffset);
+      VG_(pp_ExeContext)(ai.lastchange);
+   }
+   else
+   {
+      char sect_name[64];
+      VgSectKind sect_kind;
 
-  VG_(free)(descr2);
-  VG_(free)(descr1);
+      sect_kind = VG_(seginfo_sect_kind)(sect_name, sizeof(sect_name),
+                                         dri->addr);
+      if (sect_kind != Vg_SectUnknown)
+      {
+         VG_(message)(Vg_UserMsg,
+                      "Allocation context: %s section of %s",
+                      VG_(pp_SectKind)(sect_kind),
+                      sect_name);
+      }
+      else
+      {
+         VG_(message)(Vg_UserMsg, "Allocation context: unknown.");
+      }
+   }
+   if (s_show_conflicting_segments)
+   {
+      DRD_(thread_report_conflicting_segments)(dri->tid,
+                                               dri->addr, dri->size,
+                                               dri->access_type);
+   }
+
+   VG_(free)(descr2);
+   VG_(free)(descr1);
 }
 
 static Bool drd_tool_error_eq(VgRes res, Error* e1, Error* e2)
 {
-  return False;
+   return False;
 }
 
 static void drd_tool_error_pp(Error* const e)
 {
-  switch (VG_(get_error_kind)(e))
-  {
-  case DataRaceErr: {
-    drd_report_data_race(e, VG_(get_error_extra)(e));
-    break;
-  }
-  case MutexErr: {
-    MutexErrInfo* p = (MutexErrInfo*)(VG_(get_error_extra)(e));
-    tl_assert(p);
-    if (p->recursion_count >= 0)
-    {
+   switch (VG_(get_error_kind)(e))
+   {
+   case DataRaceErr: {
+      drd_report_data_race(e, VG_(get_error_extra)(e));
+      break;
+   }
+   case MutexErr: {
+      MutexErrInfo* p = (MutexErrInfo*)(VG_(get_error_extra)(e));
+      tl_assert(p);
+      if (p->recursion_count >= 0)
+      {
+         VG_(message)(Vg_UserMsg,
+                      "%s: mutex 0x%lx, recursion count %d, owner %d.",
+                      VG_(get_error_string)(e),
+                      p->mutex,
+                      p->recursion_count,
+                      p->owner);
+      }
+      else
+      {
+         VG_(message)(Vg_UserMsg,
+                      "The object at address 0x%lx is not a mutex.",
+                      p->mutex);
+      }
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(p->mutex);
+      break;
+   }
+   case CondErr: {
+      CondErrInfo* cdei =(CondErrInfo*)(VG_(get_error_extra)(e));
       VG_(message)(Vg_UserMsg,
-                   "%s: mutex 0x%lx, recursion count %d, owner %d.",
+                   "%s: cond 0x%lx",
                    VG_(get_error_string)(e),
-                   p->mutex,
-                   p->recursion_count,
-                   p->owner);
-    }
-    else
-    {
+                   cdei->cond);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(cdei->cond);
+      break;
+   }
+   case CondDestrErr: {
+      CondDestrErrInfo* cdi = (CondDestrErrInfo*)(VG_(get_error_extra)(e));
       VG_(message)(Vg_UserMsg,
-                   "The object at address 0x%lx is not a mutex.",
-                   p->mutex);
-    }
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(p->mutex);
-    break;
-  }
-  case CondErr: {
-    CondErrInfo* cdei =(CondErrInfo*)(VG_(get_error_extra)(e));
-    VG_(message)(Vg_UserMsg,
-                 "%s: cond 0x%lx",
-                 VG_(get_error_string)(e),
-                 cdei->cond);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(cdei->cond);
-    break;
-  }
-  case CondDestrErr: {
-    CondDestrErrInfo* cdi = (CondDestrErrInfo*)(VG_(get_error_extra)(e));
-    VG_(message)(Vg_UserMsg,
-                 "%s: cond 0x%lx, mutex 0x%lx locked by thread %d/%d",
-                 VG_(get_error_string)(e),
-                 cdi->cond, cdi->mutex,
-                 DRD_(DrdThreadIdToVgThreadId)(cdi->tid), cdi->tid);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(cdi->mutex);
-    break;
-  }
-  case CondRaceErr: {
-    CondRaceErrInfo* cei = (CondRaceErrInfo*)(VG_(get_error_extra)(e));
-    VG_(message)(Vg_UserMsg,
-                 "Probably a race condition: condition variable 0x%lx has been"
-                 " signaled but the associated mutex 0x%lx is not locked"
-                 " by the signalling thread.",
-                 cei->cond, cei->mutex);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(cei->cond);
-    first_observed(cei->mutex);
-    break;
-  }
-  case CondWaitErr: {
-    CondWaitErrInfo* cwei = (CondWaitErrInfo*)(VG_(get_error_extra)(e));
-    VG_(message)(Vg_UserMsg,
-                 "%s: condition variable 0x%lx, mutexes 0x%lx and 0x%lx",
-                 VG_(get_error_string)(e),
-                 cwei->cond,
-                 cwei->mutex1,
-                 cwei->mutex2);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(cwei->cond);
-    first_observed(cwei->mutex1);
-    first_observed(cwei->mutex2);
-    break;
-  }
-  case SemaphoreErr: {
-    SemaphoreErrInfo* sei = (SemaphoreErrInfo*)(VG_(get_error_extra)(e));
-    tl_assert(sei);
-    VG_(message)(Vg_UserMsg,
-                 "%s: semaphore 0x%lx",
-                 VG_(get_error_string)(e),
-                 sei->semaphore);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(sei->semaphore);
-    break;
-  }
-  case BarrierErr: {
-    BarrierErrInfo* bei = (BarrierErrInfo*)(VG_(get_error_extra)(e));
-    tl_assert(bei);
-    VG_(message)(Vg_UserMsg,
-                 "%s: barrier 0x%lx",
-                 VG_(get_error_string)(e),
-                 bei->barrier);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    if (bei->other_context)
-    {
+                   "%s: cond 0x%lx, mutex 0x%lx locked by thread %d/%d",
+                   VG_(get_error_string)(e),
+                   cdi->cond, cdi->mutex,
+                   DRD_(DrdThreadIdToVgThreadId)(cdi->tid), cdi->tid);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(cdi->mutex);
+      break;
+   }
+   case CondRaceErr: {
+      CondRaceErrInfo* cei = (CondRaceErrInfo*)(VG_(get_error_extra)(e));
       VG_(message)(Vg_UserMsg,
-                   "Conflicting wait call by thread %d/%d:",
-                   DRD_(DrdThreadIdToVgThreadId)(bei->other_tid),
-                   bei->other_tid);
-      VG_(pp_ExeContext)(bei->other_context);
-    }
-    first_observed(bei->barrier);
-    break;
-  }
-  case RwlockErr: {
-    RwlockErrInfo* p = (RwlockErrInfo*)(VG_(get_error_extra)(e));
-    tl_assert(p);
-    VG_(message)(Vg_UserMsg,
-                 "%s: rwlock 0x%lx.",
-                 VG_(get_error_string)(e),
-                 p->rwlock);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(p->rwlock);
-    break;
-  }
-  case HoldtimeErr: {
-    HoldtimeErrInfo* p =(HoldtimeErrInfo*)(VG_(get_error_extra)(e));
-    tl_assert(p);
-    tl_assert(p->acquired_at);
-    VG_(message)(Vg_UserMsg, "Acquired at:");
-    VG_(pp_ExeContext)(p->acquired_at);
-    VG_(message)(Vg_UserMsg,
-                 "Lock on %s 0x%lx was held during %d ms (threshold: %d ms).",
-                 VG_(get_error_string)(e),
-                 p->synchronization_object,
-                 p->hold_time_ms,
-                 p->threshold_ms);
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    first_observed(p->synchronization_object);
-    break;
-  }
-  case GenericErr: {
-    //GenericErrInfo* gei =(GenericErrInfo*)(VG_(get_error_extra)(e));
-    VG_(message)(Vg_UserMsg, "%s", VG_(get_error_string)(e));
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    break;
-  }
-  default:
-    VG_(message)(Vg_UserMsg,
-                 "%s",
-                 VG_(get_error_string)(e));
-    VG_(pp_ExeContext)(VG_(get_error_where)(e));
-    break;
-  }
+                   "Probably a race condition: condition variable 0x%lx has"
+                   " been signaled but the associated mutex 0x%lx is not"
+                   " locked by the signalling thread.",
+                   cei->cond, cei->mutex);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(cei->cond);
+      first_observed(cei->mutex);
+      break;
+   }
+   case CondWaitErr: {
+      CondWaitErrInfo* cwei = (CondWaitErrInfo*)(VG_(get_error_extra)(e));
+      VG_(message)(Vg_UserMsg,
+                   "%s: condition variable 0x%lx, mutexes 0x%lx and 0x%lx",
+                   VG_(get_error_string)(e),
+                   cwei->cond,
+                   cwei->mutex1,
+                   cwei->mutex2);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(cwei->cond);
+      first_observed(cwei->mutex1);
+      first_observed(cwei->mutex2);
+      break;
+   }
+   case SemaphoreErr: {
+      SemaphoreErrInfo* sei = (SemaphoreErrInfo*)(VG_(get_error_extra)(e));
+      tl_assert(sei);
+      VG_(message)(Vg_UserMsg,
+                   "%s: semaphore 0x%lx",
+                   VG_(get_error_string)(e),
+                   sei->semaphore);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(sei->semaphore);
+      break;
+   }
+   case BarrierErr: {
+      BarrierErrInfo* bei = (BarrierErrInfo*)(VG_(get_error_extra)(e));
+      tl_assert(bei);
+      VG_(message)(Vg_UserMsg,
+                   "%s: barrier 0x%lx",
+                   VG_(get_error_string)(e),
+                   bei->barrier);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      if (bei->other_context)
+      {
+         VG_(message)(Vg_UserMsg,
+                      "Conflicting wait call by thread %d/%d:",
+                      DRD_(DrdThreadIdToVgThreadId)(bei->other_tid),
+                      bei->other_tid);
+         VG_(pp_ExeContext)(bei->other_context);
+      }
+      first_observed(bei->barrier);
+      break;
+   }
+   case RwlockErr: {
+      RwlockErrInfo* p = (RwlockErrInfo*)(VG_(get_error_extra)(e));
+      tl_assert(p);
+      VG_(message)(Vg_UserMsg,
+                   "%s: rwlock 0x%lx.",
+                   VG_(get_error_string)(e),
+                   p->rwlock);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(p->rwlock);
+      break;
+   }
+   case HoldtimeErr: {
+      HoldtimeErrInfo* p =(HoldtimeErrInfo*)(VG_(get_error_extra)(e));
+      tl_assert(p);
+      tl_assert(p->acquired_at);
+      VG_(message)(Vg_UserMsg, "Acquired at:");
+      VG_(pp_ExeContext)(p->acquired_at);
+      VG_(message)(Vg_UserMsg,
+                   "Lock on %s 0x%lx was held during %d ms (threshold: %d ms).",
+                   VG_(get_error_string)(e),
+                   p->synchronization_object,
+                   p->hold_time_ms,
+                   p->threshold_ms);
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      first_observed(p->synchronization_object);
+      break;
+   }
+   case GenericErr: {
+      //GenericErrInfo* gei =(GenericErrInfo*)(VG_(get_error_extra)(e));
+      VG_(message)(Vg_UserMsg, "%s", VG_(get_error_string)(e));
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      break;
+   }
+   default:
+      VG_(message)(Vg_UserMsg,
+                   "%s",
+                   VG_(get_error_string)(e));
+      VG_(pp_ExeContext)(VG_(get_error_where)(e));
+      break;
+   }
 }
 
 static UInt drd_tool_error_update_extra(Error* e)
 {
-  switch (VG_(get_error_kind)(e))
-  {
-  case DataRaceErr:
-    return sizeof(DataRaceErrInfo);
-  case MutexErr:
-    return sizeof(MutexErrInfo);
-  case CondErr:
-    return sizeof(CondErrInfo);
-  case CondDestrErr:
-    return sizeof(CondDestrErrInfo);
-  case CondRaceErr:
-    return sizeof(CondRaceErrInfo);
-  case CondWaitErr:
-    return sizeof(CondWaitErrInfo);
-  case SemaphoreErr:
-    return sizeof(SemaphoreErrInfo);
-  case BarrierErr:
-    return sizeof(BarrierErrInfo);
-  case RwlockErr:
-    return sizeof(RwlockErrInfo);
-  case HoldtimeErr:
-    return sizeof(HoldtimeErrInfo);
-  case GenericErr:
-    return sizeof(GenericErrInfo);
-  default:
-    tl_assert(False);
-    break;
-  }
+   switch (VG_(get_error_kind)(e))
+   {
+   case DataRaceErr:
+      return sizeof(DataRaceErrInfo);
+   case MutexErr:
+      return sizeof(MutexErrInfo);
+   case CondErr:
+      return sizeof(CondErrInfo);
+   case CondDestrErr:
+      return sizeof(CondDestrErrInfo);
+   case CondRaceErr:
+      return sizeof(CondRaceErrInfo);
+   case CondWaitErr:
+      return sizeof(CondWaitErrInfo);
+   case SemaphoreErr:
+      return sizeof(SemaphoreErrInfo);
+   case BarrierErr:
+      return sizeof(BarrierErrInfo);
+   case RwlockErr:
+      return sizeof(RwlockErrInfo);
+   case HoldtimeErr:
+      return sizeof(HoldtimeErrInfo);
+   case GenericErr:
+      return sizeof(GenericErrInfo);
+   default:
+      tl_assert(False);
+      break;
+   }
 }
 
 static Bool drd_tool_error_recog(Char* const name, Supp* const supp)
 {
-  SuppKind skind = 0;
+   SuppKind skind = 0;
 
-  if (VG_(strcmp)(name, STR_DataRaceErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_MutexErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_CondErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_CondDestrErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_CondRaceErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_CondWaitErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_SemaphoreErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_BarrierErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_RwlockErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_HoldtimeErr) == 0)
-    ;
-  else if (VG_(strcmp)(name, STR_GenericErr) == 0)
-    ;
-  else
-    return False;
+   if (VG_(strcmp)(name, STR_DataRaceErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_MutexErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_CondErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_CondDestrErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_CondRaceErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_CondWaitErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_SemaphoreErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_BarrierErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_RwlockErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_HoldtimeErr) == 0)
+      ;
+   else if (VG_(strcmp)(name, STR_GenericErr) == 0)
+      ;
+   else
+      return False;
 
-  VG_(set_supp_kind)(supp, skind);
-  return True;
+   VG_(set_supp_kind)(supp, skind);
+   return True;
 }
 
 static
 Bool drd_tool_error_read_extra(Int fd, Char* buf, Int nBuf, Supp* supp)
 {
-  return True;
+   return True;
 }
 
 static Bool drd_tool_error_matches(Error* const e, Supp* const supp)
 {
-  switch (VG_(get_supp_kind)(supp))
-  {
-  }
-  return True;
+   switch (VG_(get_supp_kind)(supp))
+   {
+   }
+   return True;
 }
 
 static Char* drd_tool_error_name(Error* e)
 {
-  switch (VG_(get_error_kind)(e))
-  {
-  case DataRaceErr:  return VGAPPEND(STR_, DataRaceErr);
-  case MutexErr:     return VGAPPEND(STR_, MutexErr);
-  case CondErr:      return VGAPPEND(STR_, CondErr);
-  case CondDestrErr: return VGAPPEND(STR_, CondDestrErr);
-  case CondRaceErr:  return VGAPPEND(STR_, CondRaceErr);
-  case CondWaitErr:  return VGAPPEND(STR_, CondWaitErr);
-  case SemaphoreErr: return VGAPPEND(STR_, SemaphoreErr);
-  case BarrierErr:   return VGAPPEND(STR_, BarrierErr);
-  case RwlockErr:    return VGAPPEND(STR_, RwlockErr);
-  case HoldtimeErr:  return VGAPPEND(STR_, HoldtimeErr);
-  case GenericErr:   return VGAPPEND(STR_, GenericErr);
-  default:
-    tl_assert(0);
-  }
-  return 0;
+   switch (VG_(get_error_kind)(e))
+   {
+   case DataRaceErr:  return VGAPPEND(STR_, DataRaceErr);
+   case MutexErr:     return VGAPPEND(STR_, MutexErr);
+   case CondErr:      return VGAPPEND(STR_, CondErr);
+   case CondDestrErr: return VGAPPEND(STR_, CondDestrErr);
+   case CondRaceErr:  return VGAPPEND(STR_, CondRaceErr);
+   case CondWaitErr:  return VGAPPEND(STR_, CondWaitErr);
+   case SemaphoreErr: return VGAPPEND(STR_, SemaphoreErr);
+   case BarrierErr:   return VGAPPEND(STR_, BarrierErr);
+   case RwlockErr:    return VGAPPEND(STR_, RwlockErr);
+   case HoldtimeErr:  return VGAPPEND(STR_, HoldtimeErr);
+   case GenericErr:   return VGAPPEND(STR_, GenericErr);
+   default:
+      tl_assert(0);
+   }
+   return 0;
 }
 
 static void drd_tool_error_print_extra(Error* e)
@@ -421,14 +423,14 @@
 
 void DRD_(register_error_handlers)(void)
 {
-  // Tool error reporting.
-  VG_(needs_tool_errors)(drd_tool_error_eq,
-                         drd_tool_error_pp,
-                         True,
-                         drd_tool_error_update_extra,
-                         drd_tool_error_recog,
-                         drd_tool_error_read_extra,
-                         drd_tool_error_matches,
-                         drd_tool_error_name,
-                         drd_tool_error_print_extra);
+   // Tool error reporting.
+   VG_(needs_tool_errors)(drd_tool_error_eq,
+                          drd_tool_error_pp,
+                          True,
+                          drd_tool_error_update_extra,
+                          drd_tool_error_recog,
+                          drd_tool_error_read_extra,
+                          drd_tool_error_matches,
+                          drd_tool_error_name,
+                          drd_tool_error_print_extra);
 }
diff --git a/drd/drd_error.h b/drd/drd_error.h
index 34fd62b..2d5e643 100644
--- a/drd/drd_error.h
+++ b/drd/drd_error.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -138,10 +139,10 @@
 } RwlockErrInfo;
 
 typedef struct {
-  Addr        synchronization_object;
-  ExeContext* acquired_at;
-  UInt        hold_time_ms;
-  UInt        threshold_ms;
+   Addr        synchronization_object;
+   ExeContext* acquired_at;
+   UInt        hold_time_ms;
+   UInt        threshold_ms;
 } HoldtimeErrInfo;
 
 typedef struct {
diff --git a/drd/drd_gomp_intercepts.c b/drd/drd_gomp_intercepts.c
index d791c83..6cf2890 100644
--- a/drd/drd_gomp_intercepts.c
+++ b/drd/drd_gomp_intercepts.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 
 /*--------------------------------------------------------------------*/
 /*--- Client-space code for drd.             drd_gomp_intercepts.c ---*/
@@ -46,9 +47,9 @@
 
 // Defines.
 
-#define GOMP_FUNC(ret_ty, f, args...)                   \
-  ret_ty VG_WRAP_FUNCTION_ZZ(libgompZdsoZd1Za,f)(args); \
-  ret_ty VG_WRAP_FUNCTION_ZZ(libgompZdsoZd1Za,f)(args)
+#define GOMP_FUNC(ret_ty, f, args...)                           \
+   ret_ty VG_WRAP_FUNCTION_ZZ(libgompZdsoZd1Za,f)(args);        \
+   ret_ty VG_WRAP_FUNCTION_ZZ(libgompZdsoZd1Za,f)(args)
 
 
 // Type definitions
@@ -61,56 +62,56 @@
 GOMP_FUNC(void, gompZubarrierZuinit, // gomp_barrier_init
           gomp_barrier_t* barrier, unsigned count)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
+   int    ret;
+   int    res;
+   OrigFn fn;
 
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
-                             barrier, gomp_barrier, count, 0, 0);
-  VALGRIND_GET_ORIG_FN(fn);
-  CALL_FN_W_WW(ret, fn, barrier, count);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
-                             barrier, gomp_barrier, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
+                              barrier, gomp_barrier, count, 0, 0);
+   VALGRIND_GET_ORIG_FN(fn);
+   CALL_FN_W_WW(ret, fn, barrier, count);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
+                              barrier, gomp_barrier, 0, 0, 0);
 }
 
 GOMP_FUNC(void, gompZubarrierZureinit, // gomp_barrier_reinit
           gomp_barrier_t* barrier, unsigned count)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
-                             barrier, gomp_barrier, count, 1, 0);
-  VALGRIND_GET_ORIG_FN(fn);
-  CALL_FN_W_WW(ret, fn, barrier, count);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
-                             barrier, gomp_barrier, 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
+                              barrier, gomp_barrier, count, 1, 0);
+   VALGRIND_GET_ORIG_FN(fn);
+   CALL_FN_W_WW(ret, fn, barrier, count);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
+                              barrier, gomp_barrier, 0, 0, 0);
 }
 
 GOMP_FUNC(void, gompZubarrierZudestroy, // gomp_barrier_destroy
           gomp_barrier_t* barrier)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
-                             barrier, gomp_barrier, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, barrier);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
-                             barrier, gomp_barrier, 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
+                              barrier, gomp_barrier, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, barrier);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
+                              barrier, gomp_barrier, 0, 0, 0);
 }
 
 GOMP_FUNC(void, gompZubarrierZuwait, // gomp_barrier_wait
           gomp_barrier_t* barrier)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
-                             barrier, gomp_barrier, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, barrier);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
-                             barrier, gomp_barrier, 1, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
+                              barrier, gomp_barrier, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, barrier);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
+                              barrier, gomp_barrier, 1, 0, 0);
 }
diff --git a/drd/drd_load_store.c b/drd/drd_load_store.c
index 6370698..6521073 100644
--- a/drd/drd_load_store.c
+++ b/drd/drd_load_store.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -59,202 +60,202 @@
 
 Bool DRD_(get_check_stack_accesses)()
 {
-  return DRD_(s_check_stack_accesses);
+   return DRD_(s_check_stack_accesses);
 }
 
 void DRD_(set_check_stack_accesses)(const Bool c)
 {
-  tl_assert(c == False || c == True);
-  DRD_(s_check_stack_accesses) = c;
+   tl_assert(c == False || c == True);
+   DRD_(s_check_stack_accesses) = c;
 }
 
 void DRD_(trace_mem_access)(const Addr addr, const SizeT size,
-                          const BmAccessTypeT access_type)
+                            const BmAccessTypeT access_type)
 {
-  if (DRD_(is_any_traced)(addr, addr + size))
-  {
-    char vc[80];
-    DRD_(vc_snprint)(vc, sizeof(vc),
-                     DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
-    VG_(message)(Vg_UserMsg,
-                 "%s 0x%lx size %ld (vg %d / drd %d / vc %s)",
-                 access_type == eLoad
-                 ? "load "
-                 : access_type == eStore
-                 ? "store"
-                 : access_type == eStart
-                 ? "start"
-                 : access_type == eEnd
-                 ? "end  "
-                 : "????",
-                 addr,
-                 size,
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 vc);
-    VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
-                               VG_(clo_backtrace_size));
-    tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
-              == VG_(get_running_tid)());
-  }
+   if (DRD_(is_any_traced)(addr, addr + size))
+   {
+      char vc[80];
+      DRD_(vc_snprint)(vc, sizeof(vc),
+                       DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
+      VG_(message)(Vg_UserMsg,
+                   "%s 0x%lx size %ld (vg %d / drd %d / vc %s)",
+                   access_type == eLoad
+                   ? "load "
+                   : access_type == eStore
+                   ? "store"
+                   : access_type == eStart
+                   ? "start"
+                   : access_type == eEnd
+                   ? "end  "
+                   : "????",
+                   addr,
+                   size,
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   vc);
+      VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
+                                 VG_(clo_backtrace_size));
+      tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
+                == VG_(get_running_tid)());
+   }
 }
 
 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size)
 {
-  return DRD_(trace_mem_access)(addr, size, eLoad);
+   return DRD_(trace_mem_access)(addr, size, eLoad);
 }
 
 static VG_REGPARM(2) void drd_trace_mem_store(const Addr addr,const SizeT size)
 {
-  return DRD_(trace_mem_access)(addr, size, eStore);
+   return DRD_(trace_mem_access)(addr, size, eStore);
 }
 
 static void drd_report_race(const Addr addr, const SizeT size,
                             const BmAccessTypeT access_type)
 {
-  DataRaceErrInfo drei;
+   DataRaceErrInfo drei;
 
-  drei.tid  = DRD_(thread_get_running_tid)();
-  drei.addr = addr;
-  drei.size = size;
-  drei.access_type = access_type;
-  VG_(maybe_record_error)(VG_(get_running_tid)(),
-                          DataRaceErr,
-                          VG_(get_IP)(VG_(get_running_tid)()),
-                          "Conflicting accesses",
-                          &drei);
+   drei.tid  = DRD_(thread_get_running_tid)();
+   drei.addr = addr;
+   drei.size = size;
+   drei.access_type = access_type;
+   VG_(maybe_record_error)(VG_(get_running_tid)(),
+                           DataRaceErr,
+                           VG_(get_IP)(VG_(get_running_tid)()),
+                           "Conflicting accesses",
+                           &drei);
 }
 
 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  /* The assert below has been commented out because of performance reasons.*/
-  tl_assert(thread_get_running_tid()
-            == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
+   /* The assert below has been commented out because of performance reasons.*/
+   tl_assert(thread_get_running_tid()
+             == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
 #endif
 
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_load_triggers_conflict(addr, addr + size)
-      && ! DRD_(is_suppressed)(addr, addr + size))
-  {
-    drd_report_race(addr, size, eLoad);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_load_triggers_conflict(addr, addr + size)
+       && ! DRD_(is_suppressed)(addr, addr + size))
+   {
+      drd_report_race(addr, size, eLoad);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_load_1_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 1))
-  {
-    drd_report_race(addr, 1, eLoad);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_load_1_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 1))
+   {
+      drd_report_race(addr, 1, eLoad);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_load_2_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 2))
-  {
-    drd_report_race(addr, 2, eLoad);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_load_2_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 2))
+   {
+      drd_report_race(addr, 2, eLoad);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_load_4_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 4))
-  {
-    drd_report_race(addr, 4, eLoad);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_load_4_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 4))
+   {
+      drd_report_race(addr, 4, eLoad);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_load_8_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 8))
-  {
-    drd_report_race(addr, 8, eLoad);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_load_8_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 8))
+   {
+      drd_report_race(addr, 8, eLoad);
+   }
 }
 
 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  /* The assert below has been commented out because of performance reasons.*/
-  tl_assert(thread_get_running_tid()
-            == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
+   /* The assert below has been commented out because of performance reasons.*/
+   tl_assert(thread_get_running_tid()
+             == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
 #endif
 
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_store_triggers_conflict(addr, addr + size)
-      && ! DRD_(is_suppressed)(addr, addr + size))
-  {
-    drd_report_race(addr, size, eStore);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_store_triggers_conflict(addr, addr + size)
+       && ! DRD_(is_suppressed)(addr, addr + size))
+   {
+      drd_report_race(addr, size, eStore);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_store_1_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 1))
-  {
-    drd_report_race(addr, 1, eStore);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_store_1_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 1))
+   {
+      drd_report_race(addr, 1, eStore);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_store_2_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 2))
-  {
-    drd_report_race(addr, 2, eStore);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_store_2_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 2))
+   {
+      drd_report_race(addr, 2, eStore);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_store_4_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 4))
-  {
-    drd_report_race(addr, 4, eStore);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_store_4_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 4))
+   {
+      drd_report_race(addr, 4, eStore);
+   }
 }
 
 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
 {
-  if (DRD_(running_thread_is_recording)()
-      && (DRD_(s_check_stack_accesses)
-          || ! DRD_(thread_address_on_stack)(addr))
-      && bm_access_store_8_triggers_conflict(addr)
-      && ! DRD_(is_suppressed)(addr, addr + 8))
-  {
-    drd_report_race(addr, 8, eStore);
-  }
+   if (DRD_(running_thread_is_recording)()
+       && (DRD_(s_check_stack_accesses)
+           || ! DRD_(thread_address_on_stack)(addr))
+       && bm_access_store_8_triggers_conflict(addr)
+       && ! DRD_(is_suppressed)(addr, addr + 8))
+   {
+      drd_report_race(addr, 8, eStore);
+   }
 }
 
 /**
@@ -263,297 +264,297 @@
  */
 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr)
 {
-  Bool result = False;
+   Bool result = False;
 
-  if (addr_expr->tag == Iex_RdTmp)
-  {
-    int i;
-    for (i = 0; i < bb->stmts_size; i++)
-    {
-      if (bb->stmts[i]
-          && bb->stmts[i]->tag == Ist_WrTmp
-          && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
+   if (addr_expr->tag == Iex_RdTmp)
+   {
+      int i;
+      for (i = 0; i < bb->stmts_size; i++)
       {
-        IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
-        if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
-        {
-          result = True;
-        }
+         if (bb->stmts[i]
+             && bb->stmts[i]->tag == Ist_WrTmp
+             && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
+         {
+            IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
+            if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
+            {
+               result = True;
+            }
 
-        //ppIRExpr(e);
-        //VG_(printf)(" (%s)\n", result ? "True" : "False");
-        break;
+            //ppIRExpr(e);
+            //VG_(printf)(" (%s)\n", result ? "True" : "False");
+            break;
+         }
       }
-    }
-  }
-  return result;
+   }
+   return result;
 }
 
 static void instrument_load(IRSB* const bb,
                             IRExpr* const addr_expr,
                             const HWord size)
 {
-  IRExpr* size_expr;
-  IRExpr** argv;
-  IRDirty* di;
+   IRExpr* size_expr;
+   IRExpr** argv;
+   IRDirty* di;
 
-  if (UNLIKELY(DRD_(any_address_is_traced)()))
-  {
-    addStmtToIRSB(bb,
-		  IRStmt_Dirty(
-		    unsafeIRDirty_0_N(/*regparms*/2,
-				      "drd_trace_load",
-				      VG_(fnptr_to_fnentry)
-				      (drd_trace_mem_load),
-				      mkIRExprVec_2(addr_expr,
-						    mkIRExpr_HWord(size)))));
-  }
+   if (UNLIKELY(DRD_(any_address_is_traced)()))
+   {
+      addStmtToIRSB(bb,
+         IRStmt_Dirty(
+            unsafeIRDirty_0_N(/*regparms*/2,
+                              "drd_trace_load",
+                              VG_(fnptr_to_fnentry)
+                              (drd_trace_mem_load),
+                              mkIRExprVec_2(addr_expr,
+                                            mkIRExpr_HWord(size)))));
+   }
 
-  if (! DRD_(s_check_stack_accesses) && is_stack_access(bb, addr_expr))
-    return;
+   if (! DRD_(s_check_stack_accesses) && is_stack_access(bb, addr_expr))
+      return;
 
-  switch (size)
-  {
-  case 1:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_load_1",
-                           VG_(fnptr_to_fnentry)(drd_trace_load_1),
-                           argv);
-    break;
-  case 2:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_load_2",
-                           VG_(fnptr_to_fnentry)(drd_trace_load_2),
-                           argv);
-    break;
-  case 4:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_load_4",
-                           VG_(fnptr_to_fnentry)(drd_trace_load_4),
-                           argv);
-    break;
-  case 8:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_load_8",
-                           VG_(fnptr_to_fnentry)(drd_trace_load_8),
-                           argv);
-    break;
-  default:
-    size_expr = mkIRExpr_HWord(size);
-    argv = mkIRExprVec_2(addr_expr, size_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/2,
-                           "drd_trace_load",
-                           VG_(fnptr_to_fnentry)(DRD_(trace_load)),
-                           argv);
-    break;
-  }
-  addStmtToIRSB(bb, IRStmt_Dirty(di));
+   switch (size)
+   {
+   case 1:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_load_1",
+                             VG_(fnptr_to_fnentry)(drd_trace_load_1),
+                             argv);
+      break;
+   case 2:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_load_2",
+                             VG_(fnptr_to_fnentry)(drd_trace_load_2),
+                             argv);
+      break;
+   case 4:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_load_4",
+                             VG_(fnptr_to_fnentry)(drd_trace_load_4),
+                             argv);
+      break;
+   case 8:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_load_8",
+                             VG_(fnptr_to_fnentry)(drd_trace_load_8),
+                             argv);
+      break;
+   default:
+      size_expr = mkIRExpr_HWord(size);
+      argv = mkIRExprVec_2(addr_expr, size_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/2,
+                             "drd_trace_load",
+                             VG_(fnptr_to_fnentry)(DRD_(trace_load)),
+                             argv);
+      break;
+   }
+   addStmtToIRSB(bb, IRStmt_Dirty(di));
 }
 
 static void instrument_store(IRSB* const bb,
                              IRExpr* const addr_expr,
                              const HWord size)
 {
-  IRExpr* size_expr;
-  IRExpr** argv;
-  IRDirty* di;
+   IRExpr* size_expr;
+   IRExpr** argv;
+   IRDirty* di;
 
-  if (UNLIKELY(DRD_(any_address_is_traced)()))
-  {
-    addStmtToIRSB(bb,
-		  IRStmt_Dirty(
-		    unsafeIRDirty_0_N(/*regparms*/2,
-				      "drd_trace_store",
-				      VG_(fnptr_to_fnentry)
-				      (drd_trace_mem_store),
-				      mkIRExprVec_2(addr_expr,
-						    mkIRExpr_HWord(size)))));
-  }
+   if (UNLIKELY(DRD_(any_address_is_traced)()))
+   {
+      addStmtToIRSB(bb,
+                    IRStmt_Dirty(
+                                 unsafeIRDirty_0_N(/*regparms*/2,
+                                                   "drd_trace_store",
+                                                   VG_(fnptr_to_fnentry)
+                                                   (drd_trace_mem_store),
+                                                   mkIRExprVec_2(addr_expr,
+                                                                 mkIRExpr_HWord(size)))));
+   }
 
-  if (! DRD_(s_check_stack_accesses) && is_stack_access(bb, addr_expr))
-    return;
+   if (! DRD_(s_check_stack_accesses) && is_stack_access(bb, addr_expr))
+      return;
 
-  switch (size)
-  {
-  case 1:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_store_1",
-                           VG_(fnptr_to_fnentry)(drd_trace_store_1),
-                           argv);
-    break;
-  case 2:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_store_2",
-                           VG_(fnptr_to_fnentry)(drd_trace_store_2),
-                           argv);
-    break;
-  case 4:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_store_4",
-                           VG_(fnptr_to_fnentry)(drd_trace_store_4),
-                           argv);
-    break;
-  case 8:
-    argv = mkIRExprVec_1(addr_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/1,
-                           "drd_trace_store_8",
-                           VG_(fnptr_to_fnentry)(drd_trace_store_8),
-                           argv);
-    break;
-  default:
-    size_expr = mkIRExpr_HWord(size);
-    argv = mkIRExprVec_2(addr_expr, size_expr);
-    di = unsafeIRDirty_0_N(/*regparms*/2,
-                           "drd_trace_store",
-                           VG_(fnptr_to_fnentry)(DRD_(trace_store)),
-                           argv);
-    break;
-  }
-  addStmtToIRSB(bb, IRStmt_Dirty(di));
+   switch (size)
+   {
+   case 1:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_store_1",
+                             VG_(fnptr_to_fnentry)(drd_trace_store_1),
+                             argv);
+      break;
+   case 2:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_store_2",
+                             VG_(fnptr_to_fnentry)(drd_trace_store_2),
+                             argv);
+      break;
+   case 4:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_store_4",
+                             VG_(fnptr_to_fnentry)(drd_trace_store_4),
+                             argv);
+      break;
+   case 8:
+      argv = mkIRExprVec_1(addr_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/1,
+                             "drd_trace_store_8",
+                             VG_(fnptr_to_fnentry)(drd_trace_store_8),
+                             argv);
+      break;
+   default:
+      size_expr = mkIRExpr_HWord(size);
+      argv = mkIRExprVec_2(addr_expr, size_expr);
+      di = unsafeIRDirty_0_N(/*regparms*/2,
+                             "drd_trace_store",
+                             VG_(fnptr_to_fnentry)(DRD_(trace_store)),
+                             argv);
+      break;
+   }
+   addStmtToIRSB(bb, IRStmt_Dirty(di));
 }
 
 IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
-                     IRSB* const bb_in,
-                     VexGuestLayout* const layout,
-                     VexGuestExtents* const vge, 
-                     IRType const gWordTy,
-                     IRType const hWordTy)
+                       IRSB* const bb_in,
+                       VexGuestLayout* const layout,
+                       VexGuestExtents* const vge, 
+                       IRType const gWordTy,
+                       IRType const hWordTy)
 {
-  IRDirty* di;
-  Int      i;
-  IRSB*    bb;
-  IRExpr** argv;
-  Bool     instrument = True;
-  Bool     bus_locked = False;
+   IRDirty* di;
+   Int      i;
+   IRSB*    bb;
+   IRExpr** argv;
+   Bool     instrument = True;
+   Bool     bus_locked = False;
 
-  /* Set up BB */
-  bb           = emptyIRSB();
-  bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
-  bb->next     = deepCopyIRExpr(bb_in->next);
-  bb->jumpkind = bb_in->jumpkind;
+   /* Set up BB */
+   bb           = emptyIRSB();
+   bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
+   bb->next     = deepCopyIRExpr(bb_in->next);
+   bb->jumpkind = bb_in->jumpkind;
 
-  for (i = 0; i < bb_in->stmts_used; i++)
-  {
-    IRStmt* const st = bb_in->stmts[i];
-    tl_assert(st);
-    if (st->tag == Ist_NoOp)
-      continue;
+   for (i = 0; i < bb_in->stmts_used; i++)
+   {
+      IRStmt* const st = bb_in->stmts[i];
+      tl_assert(st);
+      if (st->tag == Ist_NoOp)
+         continue;
 
-    switch (st->tag)
-    {
-    /* Note: the code for not instrumenting the code in .plt          */
-    /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21    */
-    /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4).             */
-    /* This is because on this platform dynamic library symbols are   */
-    /* relocated in another way than by later binutils versions. The  */
-    /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
-    case Ist_IMark:
-      instrument = VG_(seginfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
-        != Vg_SectPLT;
-      addStmtToIRSB(bb, st);
-      break;
-
-    case Ist_MBE:
-      switch (st->Ist.MBE.event)
+      switch (st->tag)
       {
-      case Imbe_Fence:
-        break; /* not interesting */
-      case Imbe_BusLock:
-      case Imbe_SnoopedStoreBegin:
-        tl_assert(! bus_locked);
-        bus_locked = True;
-        break;
-      case Imbe_BusUnlock:
-      case Imbe_SnoopedStoreEnd:
-        tl_assert(bus_locked);
-        bus_locked = False;
-        break;
+         /* Note: the code for not instrumenting the code in .plt          */
+         /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21    */
+         /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4).             */
+         /* This is because on this platform dynamic library symbols are   */
+         /* relocated in another way than by later binutils versions. The  */
+         /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
+      case Ist_IMark:
+         instrument = VG_(seginfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
+            != Vg_SectPLT;
+         addStmtToIRSB(bb, st);
+         break;
+
+      case Ist_MBE:
+         switch (st->Ist.MBE.event)
+         {
+         case Imbe_Fence:
+            break; /* not interesting */
+         case Imbe_BusLock:
+         case Imbe_SnoopedStoreBegin:
+            tl_assert(! bus_locked);
+            bus_locked = True;
+            break;
+         case Imbe_BusUnlock:
+         case Imbe_SnoopedStoreEnd:
+            tl_assert(bus_locked);
+            bus_locked = False;
+            break;
+         default:
+            tl_assert(0);
+         }
+         addStmtToIRSB(bb, st);
+         break;
+
+      case Ist_Store:
+         if (instrument && ! bus_locked)
+         {
+            instrument_store(bb,
+                             st->Ist.Store.addr,
+                             sizeofIRType(typeOfIRExpr(bb->tyenv,
+                                                       st->Ist.Store.data)));
+         }
+         addStmtToIRSB(bb, st);
+         break;
+
+      case Ist_WrTmp:
+         if (instrument)
+         {
+            const IRExpr* const data = st->Ist.WrTmp.data;
+            if (data->tag == Iex_Load)
+            {
+               instrument_load(bb,
+                               data->Iex.Load.addr,
+                               sizeofIRType(data->Iex.Load.ty));
+            }
+         }
+         addStmtToIRSB(bb, st);
+         break;
+
+      case Ist_Dirty:
+         if (instrument)
+         {
+            IRDirty* d = st->Ist.Dirty.details;
+            IREffect const mFx = d->mFx;
+            switch (mFx) {
+            case Ifx_None:
+               break;
+            case Ifx_Read:
+            case Ifx_Write:
+            case Ifx_Modify:
+               tl_assert(d->mAddr);
+               tl_assert(d->mSize > 0);
+               argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
+               if (mFx == Ifx_Read || mFx == Ifx_Modify) {
+                  di = unsafeIRDirty_0_N(
+                          /*regparms*/2,
+                          "drd_trace_load",
+                          VG_(fnptr_to_fnentry)(DRD_(trace_load)),
+                          argv);
+                  addStmtToIRSB(bb, IRStmt_Dirty(di));
+               }
+               if ((mFx == Ifx_Write || mFx == Ifx_Modify)
+                   && ! bus_locked)
+               {
+                  di = unsafeIRDirty_0_N(
+                          /*regparms*/2,
+                          "drd_trace_store",
+                          VG_(fnptr_to_fnentry)(DRD_(trace_store)),
+                          argv);
+                  addStmtToIRSB(bb, IRStmt_Dirty(di));
+               }
+               break;
+            default:
+               tl_assert(0);
+            }
+         }
+         addStmtToIRSB(bb, st);
+         break;
+
       default:
-        tl_assert(0);
+         addStmtToIRSB(bb, st);
+         break;
       }
-      addStmtToIRSB(bb, st);
-      break;
+   }
 
-    case Ist_Store:
-      if (instrument && ! bus_locked)
-      {
-        instrument_store(bb,
-                         st->Ist.Store.addr,
-                         sizeofIRType(typeOfIRExpr(bb->tyenv,
-                                                   st->Ist.Store.data)));
-      }
-      addStmtToIRSB(bb, st);
-      break;
+   tl_assert(! bus_locked);
 
-    case Ist_WrTmp:
-      if (instrument)
-      {
-        const IRExpr* const data = st->Ist.WrTmp.data;
-        if (data->tag == Iex_Load)
-        {
-          instrument_load(bb,
-                          data->Iex.Load.addr,
-                          sizeofIRType(data->Iex.Load.ty));
-        }
-      }
-      addStmtToIRSB(bb, st);
-      break;
-
-    case Ist_Dirty:
-      if (instrument)
-      {
-        IRDirty* d = st->Ist.Dirty.details;
-        IREffect const mFx = d->mFx;
-        switch (mFx) {
-        case Ifx_None:
-          break;
-        case Ifx_Read:
-        case Ifx_Write:
-        case Ifx_Modify:
-          tl_assert(d->mAddr);
-          tl_assert(d->mSize > 0);
-          argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
-          if (mFx == Ifx_Read || mFx == Ifx_Modify) {
-            di = unsafeIRDirty_0_N(
-                                   /*regparms*/2,
-                                   "drd_trace_load",
-                                   VG_(fnptr_to_fnentry)(DRD_(trace_load)),
-                                   argv);
-            addStmtToIRSB(bb, IRStmt_Dirty(di));
-          }
-          if ((mFx == Ifx_Write || mFx == Ifx_Modify)
-              && ! bus_locked)
-          {
-            di = unsafeIRDirty_0_N(
-                                   /*regparms*/2,
-                                   "drd_trace_store",
-                                   VG_(fnptr_to_fnentry)(DRD_(trace_store)),
-                                   argv);
-            addStmtToIRSB(bb, IRStmt_Dirty(di));
-          }
-          break;
-        default:
-          tl_assert(0);
-        }
-      }
-      addStmtToIRSB(bb, st);
-      break;
-
-    default:
-      addStmtToIRSB(bb, st);
-      break;
-    }
-  }
-
-  tl_assert(! bus_locked);
-
-  return bb;
+   return bb;
 }
 
diff --git a/drd/drd_load_store.h b/drd/drd_load_store.h
index 19ab44f..fcd7cc8 100644
--- a/drd/drd_load_store.h
+++ b/drd/drd_load_store.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_main.c b/drd/drd_main.c
index f580a82..b37dfce 100644
--- a/drd/drd_main.c
+++ b/drd/drd_main.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -64,102 +65,103 @@
  */
 static Bool DRD_(process_cmd_line_option)(Char* arg)
 {
-  int check_stack_accesses   = -1;
-  int exclusive_threshold_ms = -1;
-  int report_signal_unlocked = -1;
-  int segment_merging        = -1;
-  int shared_threshold_ms    = -1;
-  int show_confl_seg         = -1;
-  int trace_barrier          = -1;
-  int trace_clientobj        = -1;
-  int trace_cond             = -1;
-  int trace_csw              = -1;
-  int trace_fork_join        = -1;
-  int trace_conflict_set     = -1;
-  int trace_mutex            = -1;
-  int trace_rwlock           = -1;
-  int trace_segment          = -1;
-  int trace_semaphore        = -1;
-  int trace_suppression      = -1;
-  Char* trace_address        = 0;
+   int check_stack_accesses   = -1;
+   int exclusive_threshold_ms = -1;
+   int report_signal_unlocked = -1;
+   int segment_merging        = -1;
+   int shared_threshold_ms    = -1;
+   int show_confl_seg         = -1;
+   int trace_barrier          = -1;
+   int trace_clientobj        = -1;
+   int trace_cond             = -1;
+   int trace_csw              = -1;
+   int trace_fork_join        = -1;
+   int trace_conflict_set     = -1;
+   int trace_mutex            = -1;
+   int trace_rwlock           = -1;
+   int trace_segment          = -1;
+   int trace_semaphore        = -1;
+   int trace_suppression      = -1;
+   Char* trace_address        = 0;
 
-  if     VG_BOOL_CLO(arg, "--check-stack-var",     check_stack_accesses) {}
-  else if VG_BOOL_CLO(arg, "--drd-stats",           DRD_(s_print_stats)) {}
-  else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked) {}
-  else if VG_BOOL_CLO(arg, "--segment-merging",     segment_merging) {}
-  else if VG_BOOL_CLO(arg, "--show-confl-seg",      show_confl_seg) {}
-  else if VG_BOOL_CLO(arg, "--show-stack-usage",    DRD_(s_show_stack_usage)) {}
-  else if VG_BOOL_CLO(arg, "--trace-barrier",       trace_barrier) {}
-  else if VG_BOOL_CLO(arg, "--trace-clientobj",     trace_clientobj) {}
-  else if VG_BOOL_CLO(arg, "--trace-cond",          trace_cond) {}
-  else if VG_BOOL_CLO(arg, "--trace-conflict-set",  trace_conflict_set) {}
-  else if VG_BOOL_CLO(arg, "--trace-csw",           trace_csw) {}
-  else if VG_BOOL_CLO(arg, "--trace-fork-join",     trace_fork_join) {}
-  else if VG_BOOL_CLO(arg, "--trace-mutex",         trace_mutex) {}
-  else if VG_BOOL_CLO(arg, "--trace-rwlock",        trace_rwlock) {}
-  else if VG_BOOL_CLO(arg, "--trace-segment",       trace_segment) {}
-  else if VG_BOOL_CLO(arg, "--trace-semaphore",     trace_semaphore) {}
-  else if VG_BOOL_CLO(arg, "--trace-suppr",         trace_suppression) {}
-  else if VG_BOOL_CLO(arg, "--var-info",            DRD_(s_var_info)) {}
-  else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {}
-  else if VG_INT_CLO (arg, "--shared-threshold",    shared_threshold_ms)    {}
-  else if VG_STR_CLO (arg, "--trace-addr",          trace_address) {}
-  else
-    return VG_(replacement_malloc_process_cmd_line_option)(arg);
+   if      VG_BOOL_CLO(arg, "--check-stack-var",     check_stack_accesses) {}
+   else if VG_BOOL_CLO(arg, "--drd-stats",           DRD_(s_print_stats)) {}
+   else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked) {}
+   else if VG_BOOL_CLO(arg, "--segment-merging",     segment_merging) {}
+   else if VG_BOOL_CLO(arg, "--show-confl-seg",      show_confl_seg) {}
+   else if VG_BOOL_CLO(arg, "--show-stack-usage",
+                       DRD_(s_show_stack_usage)) {}
+   else if VG_BOOL_CLO(arg, "--trace-barrier",       trace_barrier) {}
+   else if VG_BOOL_CLO(arg, "--trace-clientobj",     trace_clientobj) {}
+   else if VG_BOOL_CLO(arg, "--trace-cond",          trace_cond) {}
+   else if VG_BOOL_CLO(arg, "--trace-conflict-set",  trace_conflict_set) {}
+   else if VG_BOOL_CLO(arg, "--trace-csw",           trace_csw) {}
+   else if VG_BOOL_CLO(arg, "--trace-fork-join",     trace_fork_join) {}
+   else if VG_BOOL_CLO(arg, "--trace-mutex",         trace_mutex) {}
+   else if VG_BOOL_CLO(arg, "--trace-rwlock",        trace_rwlock) {}
+   else if VG_BOOL_CLO(arg, "--trace-segment",       trace_segment) {}
+   else if VG_BOOL_CLO(arg, "--trace-semaphore",     trace_semaphore) {}
+   else if VG_BOOL_CLO(arg, "--trace-suppr",         trace_suppression) {}
+   else if VG_BOOL_CLO(arg, "--var-info",            DRD_(s_var_info)) {}
+   else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {}
+   else if VG_INT_CLO (arg, "--shared-threshold",    shared_threshold_ms)    {}
+   else if VG_STR_CLO (arg, "--trace-addr",          trace_address) {}
+   else
+      return VG_(replacement_malloc_process_cmd_line_option)(arg);
 
-  if (check_stack_accesses != -1)
-    DRD_(set_check_stack_accesses)(check_stack_accesses);
-  if (exclusive_threshold_ms != -1)
-  {
-    DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
-    DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
-  }
-  if (report_signal_unlocked != -1)
-  {
-    DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
-  }
-  if (shared_threshold_ms != -1)
-  {
-    DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
-  }
-  if (segment_merging != -1)
-    DRD_(thread_set_segment_merging)(segment_merging);
-  if (show_confl_seg != -1)
-    DRD_(set_show_conflicting_segments)(show_confl_seg);
-  if (trace_address)
-  {
-    const Addr addr = VG_(strtoll16)(trace_address, 0);
-    DRD_(start_tracing_address_range)(addr, addr + 1);
-  }
-  if (trace_barrier != -1)
-    DRD_(barrier_set_trace)(trace_barrier);
-  if (trace_clientobj != -1)
-    DRD_(clientobj_set_trace)(trace_clientobj);
-  if (trace_cond != -1)
-    DRD_(cond_set_trace)(trace_cond);
-  if (trace_csw != -1)
-    DRD_(thread_trace_context_switches)(trace_csw);
-  if (trace_fork_join != -1)
-    DRD_(thread_set_trace_fork_join)(trace_fork_join);
-  if (trace_conflict_set != -1)
-    DRD_(thread_trace_conflict_set)(trace_conflict_set);
-  if (trace_mutex != -1)
-    DRD_(mutex_set_trace)(trace_mutex);
-  if (trace_rwlock != -1)
-    DRD_(rwlock_set_trace)(trace_rwlock);
-  if (trace_segment != -1)
-    DRD_(sg_set_trace)(trace_segment);
-  if (trace_semaphore != -1)
-    DRD_(semaphore_set_trace)(trace_semaphore);
-  if (trace_suppression != -1)
-    DRD_(suppression_set_trace)(trace_suppression);
+   if (check_stack_accesses != -1)
+      DRD_(set_check_stack_accesses)(check_stack_accesses);
+   if (exclusive_threshold_ms != -1)
+   {
+      DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
+      DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
+   }
+   if (report_signal_unlocked != -1)
+   {
+      DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
+   }
+   if (shared_threshold_ms != -1)
+   {
+      DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
+   }
+   if (segment_merging != -1)
+      DRD_(thread_set_segment_merging)(segment_merging);
+   if (show_confl_seg != -1)
+      DRD_(set_show_conflicting_segments)(show_confl_seg);
+   if (trace_address)
+   {
+      const Addr addr = VG_(strtoll16)(trace_address, 0);
+      DRD_(start_tracing_address_range)(addr, addr + 1);
+   }
+   if (trace_barrier != -1)
+      DRD_(barrier_set_trace)(trace_barrier);
+   if (trace_clientobj != -1)
+      DRD_(clientobj_set_trace)(trace_clientobj);
+   if (trace_cond != -1)
+      DRD_(cond_set_trace)(trace_cond);
+   if (trace_csw != -1)
+      DRD_(thread_trace_context_switches)(trace_csw);
+   if (trace_fork_join != -1)
+      DRD_(thread_set_trace_fork_join)(trace_fork_join);
+   if (trace_conflict_set != -1)
+      DRD_(thread_trace_conflict_set)(trace_conflict_set);
+   if (trace_mutex != -1)
+      DRD_(mutex_set_trace)(trace_mutex);
+   if (trace_rwlock != -1)
+      DRD_(rwlock_set_trace)(trace_rwlock);
+   if (trace_segment != -1)
+      DRD_(sg_set_trace)(trace_segment);
+   if (trace_semaphore != -1)
+      DRD_(semaphore_set_trace)(trace_semaphore);
+   if (trace_suppression != -1)
+      DRD_(suppression_set_trace)(trace_suppression);
 
-  return True;
+   return True;
 }
 
 static void DRD_(print_usage)(void)
 {
-  VG_(printf)(
+   VG_(printf)(
 "    --check-stack-var=yes|no  Whether or not to report data races on\n"
 "                              stack variables [no].\n"
 "    --exclusive-threshold=<n> Print an error message if any mutex or\n"
@@ -192,20 +194,20 @@
 "    --trace-mutex=yes|no      Trace all mutex activity [no].\n"
 "    --trace-rwlock=yes|no     Trace all reader-writer lock activity[no].\n"
 "    --trace-semaphore=yes|no  Trace all semaphore activity [no].\n"
-              );
+);
    VG_(replacement_malloc_print_usage)();
 }
 
 static void DRD_(print_debug_usage)(void)
 {  
-  VG_(printf)(
+   VG_(printf)(
 "    --drd-stats=yes|no        Print statistics about DRD activity [no].\n"
 "    --trace-clientobj=yes|no  Trace all client object activity [no].\n"
 "    --trace-csw=yes|no        Trace all scheduler context switches [no].\n"
 "    --trace-conflict-set=yes|no Trace all conflict set updates [no].\n"
 "    --trace-segment=yes|no    Trace segment actions [no].\n"
 "    --trace-suppr=yes|no      Trace all address suppression actions [no].\n"
-              );
+);
    VG_(replacement_malloc_print_debug_usage)();
 }
 
@@ -220,10 +222,10 @@
                              const Addr a,
                              const SizeT size)
 {
-  if (size > 0)
-  {
-    DRD_(trace_load)(a, size);
-  }
+   if (size > 0)
+   {
+      DRD_(trace_load)(a, size);
+   }
 }
 
 static void drd_pre_mem_read_asciiz(const CorePart part,
@@ -231,22 +233,22 @@
                                     Char* const s,
                                     const Addr a)
 {
-  const char* p = (void*)a;
-  SizeT size = 0;
+   const char* p = (void*)a;
+   SizeT size = 0;
 
-  /* Note: the expression '*p' reads client memory and may crash if the */
-  /* client provided an invalid pointer !                               */
-  while (*p)
-  {
-    p++;
-    size++;
-  }
-  // To do: find out what a reasonable upper limit on 'size' is.
-  tl_assert(size < 4096);
-  if (size > 0)
-  {
-    DRD_(trace_load)(a, size);
-  }
+   /* Note: the expression '*p' reads client memory and may crash if the */
+   /* client provided an invalid pointer !                               */
+   while (*p)
+   {
+      p++;
+      size++;
+   }
+   // To do: find out what a reasonable upper limit on 'size' is.
+   tl_assert(size < 4096);
+   if (size > 0)
+   {
+      DRD_(trace_load)(a, size);
+   }
 }
 
 static void drd_post_mem_write(const CorePart part,
@@ -254,62 +256,62 @@
                                const Addr a,
                                const SizeT size)
 {
-  DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
-  if (size > 0)
-  {
-    DRD_(trace_store)(a, size);
-  }
+   DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
+   if (size > 0)
+   {
+      DRD_(trace_store)(a, size);
+   }
 }
 
 static __inline__
 void drd_start_using_mem(const Addr a1, const SizeT len)
 {
-  tl_assert(a1 < a1 + len);
+   tl_assert(a1 < a1 + len);
 
-  if (UNLIKELY(DRD_(any_address_is_traced)()))
-  {
-    DRD_(trace_mem_access)(a1, len, eStart);
-  }
+   if (UNLIKELY(DRD_(any_address_is_traced)()))
+   {
+      DRD_(trace_mem_access)(a1, len, eStart);
+   }
 }
 
 static void drd_start_using_mem_w_ecu(const Addr a1,
                                       const SizeT len,
                                       UInt ec_uniq)
 {
-  drd_start_using_mem(a1, len);
+   drd_start_using_mem(a1, len);
 }
 
 static void drd_start_using_mem_w_tid(const Addr a1,
                                       const SizeT len,
                                       ThreadId tid)
 {
-  drd_start_using_mem(a1, len);
+   drd_start_using_mem(a1, len);
 }
 
 static __inline__
 void drd_stop_using_mem(const Addr a1, const SizeT len,
                         const Bool is_stack_mem)
 {
-  const Addr a2 = a1 + len;
+   const Addr a2 = a1 + len;
 
-  tl_assert(a1 < a2);
+   tl_assert(a1 < a2);
 
-  if (UNLIKELY(DRD_(any_address_is_traced)()))
-  {
-    DRD_(trace_mem_access)(a1, len, eEnd);
-  }
-  if (! is_stack_mem || DRD_(get_check_stack_accesses)())
-  {
-    DRD_(thread_stop_using_mem)(a1, a2);
-    DRD_(clientobj_stop_using_mem)(a1, a2);
-    DRD_(suppression_stop_using_mem)(a1, a2);
-  }
+   if (UNLIKELY(DRD_(any_address_is_traced)()))
+   {
+      DRD_(trace_mem_access)(a1, len, eEnd);
+   }
+   if (! is_stack_mem || DRD_(get_check_stack_accesses)())
+   {
+      DRD_(thread_stop_using_mem)(a1, a2);
+      DRD_(clientobj_stop_using_mem)(a1, a2);
+      DRD_(suppression_stop_using_mem)(a1, a2);
+   }
 }
 
 static __inline__
 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len)
 {
-  drd_stop_using_mem(a1, len, False);
+   drd_stop_using_mem(a1, len, False);
 }
 
 /**
@@ -318,9 +320,9 @@
  */
 void DRD_(clean_memory)(const Addr a1, const SizeT len)
 {
-  const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1);
-  drd_stop_using_mem(a1, len, is_stack_memory);
-  drd_start_using_mem(a1, len);
+   const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1);
+   drd_stop_using_mem(a1, len, is_stack_memory);
+   drd_start_using_mem(a1, len);
 }
 
 /**
@@ -335,41 +337,41 @@
  */
 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len)
 {
-  const DebugInfo* di;
+   const DebugInfo* di;
 
 #if 0
-  VG_(printf)("Evaluating range @ 0x%lx size %ld\n", a, len);
+   VG_(printf)("Evaluating range @ 0x%lx size %ld\n", a, len);
 #endif
 
-  for (di = VG_(next_seginfo)(0); di; di = VG_(next_seginfo)(di))
-  {
-    Addr  avma;
-    SizeT size;
+   for (di = VG_(next_seginfo)(0); di; di = VG_(next_seginfo)(di))
+   {
+      Addr  avma;
+      SizeT size;
 
-    avma = VG_(seginfo_get_plt_avma)(di);
-    size = VG_(seginfo_get_plt_size)(di);
-    tl_assert((avma && size) || (avma == 0 && size == 0));
-    if (size > 0)
-    {
+      avma = VG_(seginfo_get_plt_avma)(di);
+      size = VG_(seginfo_get_plt_size)(di);
+      tl_assert((avma && size) || (avma == 0 && size == 0));
+      if (size > 0)
+      {
 #if 0
-      VG_(printf)("Suppressing .plt @ 0x%lx size %ld\n", avma, size);
+         VG_(printf)("Suppressing .plt @ 0x%lx size %ld\n", avma, size);
 #endif
-      tl_assert(VG_(seginfo_sect_kind)(NULL, 0, avma) == Vg_SectPLT);
-      DRD_(start_suppression)(avma, avma + size, ".plt");
-    }
+         tl_assert(VG_(seginfo_sect_kind)(NULL, 0, avma) == Vg_SectPLT);
+         DRD_(start_suppression)(avma, avma + size, ".plt");
+      }
 
-    avma = VG_(seginfo_get_gotplt_avma)(di);
-    size = VG_(seginfo_get_gotplt_size)(di);
-    tl_assert((avma && size) || (avma == 0 && size == 0));
-    if (size > 0)
-    {
+      avma = VG_(seginfo_get_gotplt_avma)(di);
+      size = VG_(seginfo_get_gotplt_size)(di);
+      tl_assert((avma && size) || (avma == 0 && size == 0));
+      if (size > 0)
+      {
 #if 0
-      VG_(printf)("Suppressing .got.plt @ 0x%lx size %ld\n", avma, size);
+         VG_(printf)("Suppressing .got.plt @ 0x%lx size %ld\n", avma, size);
 #endif
-      tl_assert(VG_(seginfo_sect_kind)(NULL, 0, avma) == Vg_SectGOTPLT);
-      DRD_(start_suppression)(avma, avma + size, ".gotplt");
-    }
-  }
+         tl_assert(VG_(seginfo_sect_kind)(NULL, 0, avma) == Vg_SectGOTPLT);
+         DRD_(start_suppression)(avma, avma + size, ".gotplt");
+      }
+   }
 }
 
 static
@@ -377,11 +379,11 @@
                                  const Bool rr, const Bool ww, const Bool xx,
                                  ULong di_handle)
 {
-  DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
+   DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
 
-  drd_start_using_mem(a, len);
+   drd_start_using_mem(a, len);
 
-  DRD_(suppress_relocation_conflicts)(a, len);
+   DRD_(suppress_relocation_conflicts)(a, len);
 }
 
 /* Called by the core when the stack of a thread grows, to indicate that */
@@ -390,10 +392,10 @@
 static __inline__
 void drd_start_using_mem_stack(const Addr a, const SizeT len)
 {
-  DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(),
-                             a - VG_STACK_REDZONE_SZB);
-  drd_start_using_mem(a - VG_STACK_REDZONE_SZB, 
-                      len + VG_STACK_REDZONE_SZB);
+   DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(),
+                              a - VG_STACK_REDZONE_SZB);
+   drd_start_using_mem(a - VG_STACK_REDZONE_SZB, 
+                       len + VG_STACK_REDZONE_SZB);
 }
 
 /* Called by the core when the stack of a thread shrinks, to indicate that */
@@ -402,41 +404,49 @@
 static __inline__
 void drd_stop_using_mem_stack(const Addr a, const SizeT len)
 {
-  DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(),
-                             a + len - VG_STACK_REDZONE_SZB);
-  drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
-                     True);
+   DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(),
+                              a + len - VG_STACK_REDZONE_SZB);
+   drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
+                      True);
 }
 
-static void drd_start_using_mem_stack_signal(
-               const Addr a, const SizeT len,
-               ThreadId tid_for_whom_the_signal_frame_is_being_constructed)
+/**
+ * Callback function called by the Valgrind core before a stack area is
+ * being used by a signal handler.
+ *
+ * @param[in] a   Start of address range.
+ * @param[in] len Address range length.
+ * @param[in] tid Valgrind thread ID for whom the signal frame is being
+ *                constructed.
+ */
+static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len,
+                                             ThreadId tid)
 {
-  DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
-  drd_start_using_mem(a, len);
+   DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
+   drd_start_using_mem(a, len);
 }
 
 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len)
 {
-  drd_stop_using_mem(a, len, True);
+   drd_stop_using_mem(a, len, True);
 }
 
 static
 void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
 {
-  const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator);
-  tl_assert(created != VG_INVALID_THREADID);
-  DRD_(thread_pre_create)(drd_creator, created);
-  if (DRD_(IsValidDrdThreadId)(drd_creator))
-  {
-    DRD_(thread_new_segment)(drd_creator);
-  }
-  if (DRD_(thread_get_trace_fork_join)())
-  {
-    VG_(message)(Vg_DebugMsg,
-                 "drd_pre_thread_create creator = %d/%d, created = %d",
-                 creator, drd_creator, created);
-  }
+   const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator);
+   tl_assert(created != VG_INVALID_THREADID);
+   DRD_(thread_pre_create)(drd_creator, created);
+   if (DRD_(IsValidDrdThreadId)(drd_creator))
+   {
+      DRD_(thread_new_segment)(drd_creator);
+   }
+   if (DRD_(thread_get_trace_fork_join)())
+   {
+      VG_(message)(Vg_DebugMsg,
+                   "drd_pre_thread_create creator = %d/%d, created = %d",
+                   creator, drd_creator, created);
+   }
 }
 
 /* Called by Valgrind's core before any loads or stores are performed on */
@@ -445,69 +455,69 @@
 static
 void drd_post_thread_create(const ThreadId vg_created)
 {
-  DrdThreadId drd_created;
+   DrdThreadId drd_created;
 
-  tl_assert(vg_created != VG_INVALID_THREADID);
+   tl_assert(vg_created != VG_INVALID_THREADID);
 
-  drd_created = DRD_(thread_post_create)(vg_created);
-  if (DRD_(thread_get_trace_fork_join)())
-  {
-    VG_(message)(Vg_DebugMsg,
-                 "drd_post_thread_create created = %d/%d",
-                 vg_created, drd_created);
-  }
-  if (! DRD_(get_check_stack_accesses)())
-  {
-    DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created)
-                            - DRD_(thread_get_stack_size)(drd_created),
-                            DRD_(thread_get_stack_max)(drd_created),
-                            "stack");
-  }
+   drd_created = DRD_(thread_post_create)(vg_created);
+   if (DRD_(thread_get_trace_fork_join)())
+   {
+      VG_(message)(Vg_DebugMsg,
+                   "drd_post_thread_create created = %d/%d",
+                   vg_created, drd_created);
+   }
+   if (! DRD_(get_check_stack_accesses)())
+   {
+      DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created)
+                              - DRD_(thread_get_stack_size)(drd_created),
+                              DRD_(thread_get_stack_max)(drd_created),
+                              "stack");
+   }
 }
 
 /* Called after a thread has performed its last memory access. */
 static void drd_thread_finished(ThreadId vg_tid)
 {
-  DrdThreadId drd_tid;
+   DrdThreadId drd_tid;
 
-  tl_assert(VG_(get_running_tid)() == vg_tid);
+   tl_assert(VG_(get_running_tid)() == vg_tid);
 
-  drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
-  if (DRD_(thread_get_trace_fork_join)())
-  {
-    VG_(message)(Vg_DebugMsg,
-                 "drd_thread_finished tid = %d/%d%s",
-                 vg_tid,
-                 drd_tid,
-                 DRD_(thread_get_joinable)(drd_tid)
-                 ? ""
-                 : " (which is a detached thread)");
-  }
-  if (DRD_(s_show_stack_usage))
-  {
-    const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid);
-    const SizeT used_stack
-      = (DRD_(thread_get_stack_max)(drd_tid)
-         - DRD_(thread_get_stack_min_min)(drd_tid));
-    VG_(message)(Vg_UserMsg,
-                 "thread %d/%d%s finished and used %ld bytes out of %ld"
-                 " on its stack. Margin: %ld bytes.",
-                 vg_tid,
-                 drd_tid,
-                 DRD_(thread_get_joinable)(drd_tid)
-                 ? ""
-                 : " (which is a detached thread)",
-                 used_stack,
-                 stack_size,
-                 stack_size - used_stack);
+   drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
+   if (DRD_(thread_get_trace_fork_join)())
+   {
+      VG_(message)(Vg_DebugMsg,
+                   "drd_thread_finished tid = %d/%d%s",
+                   vg_tid,
+                   drd_tid,
+                   DRD_(thread_get_joinable)(drd_tid)
+                   ? ""
+                   : " (which is a detached thread)");
+   }
+   if (DRD_(s_show_stack_usage))
+   {
+      const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid);
+      const SizeT used_stack
+         = (DRD_(thread_get_stack_max)(drd_tid)
+            - DRD_(thread_get_stack_min_min)(drd_tid));
+      VG_(message)(Vg_UserMsg,
+                   "thread %d/%d%s finished and used %ld bytes out of %ld"
+                   " on its stack. Margin: %ld bytes.",
+                   vg_tid,
+                   drd_tid,
+                   DRD_(thread_get_joinable)(drd_tid)
+                   ? ""
+                   : " (which is a detached thread)",
+                   used_stack,
+                   stack_size,
+                   stack_size - used_stack);
 
-  }
-  drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid),
-                     DRD_(thread_get_stack_max)(drd_tid)
-                     - DRD_(thread_get_stack_min)(drd_tid),
-                     True);
-  DRD_(thread_stop_recording)(drd_tid);
-  DRD_(thread_finished)(drd_tid);
+   }
+   drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid),
+                      DRD_(thread_get_stack_max)(drd_tid)
+                      - DRD_(thread_get_stack_min)(drd_tid),
+                      True);
+   DRD_(thread_stop_recording)(drd_tid);
+   DRD_(thread_finished)(drd_tid);
 }
 
 //
@@ -516,124 +526,124 @@
 
 static void DRD_(post_clo_init)(void)
 {
-#  if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
-      || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
-  /* fine */
+#  if defined(VGP_x86_linux) || defined(VGP_amd64_linux)        \
+   || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+   /* fine */
 #  else
-  VG_(printf)("\nWARNING: DRD has only been tested on Linux.\n\n");
+   VG_(printf)("\nWARNING: DRD has only been tested on Linux.\n\n");
 #  endif
 
-  if (DRD_(s_var_info))
-  {
-    VG_(needs_var_info)();
-  }
+   if (DRD_(s_var_info))
+   {
+      VG_(needs_var_info)();
+   }
 }
 
 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
 {
-  tl_assert(tid == VG_(get_running_tid)());
-  DRD_(thread_set_vg_running_tid)(tid);
+   tl_assert(tid == VG_(get_running_tid)());
+   DRD_(thread_set_vg_running_tid)(tid);
 }
 
 static void DRD_(fini)(Int exitcode)
 {
-  // thread_print_all();
-  if (VG_(clo_verbosity) > 1 || DRD_(s_print_stats))
-  {
-    ULong update_conflict_set_count;
-    ULong dsnsc;
-    ULong dscvc;
+   // thread_print_all();
+   if (VG_(clo_verbosity) > 1 || DRD_(s_print_stats))
+   {
+      ULong update_conflict_set_count;
+      ULong dsnsc;
+      ULong dscvc;
 
-    update_conflict_set_count
-      = DRD_(thread_get_update_conflict_set_count)(&dsnsc, &dscvc);
+      update_conflict_set_count
+         = DRD_(thread_get_update_conflict_set_count)(&dsnsc, &dscvc);
 
-    VG_(message)(Vg_UserMsg,
-                 "   thread: %lld context switches"
-                 " / %lld updates of the conflict set",
-                 DRD_(thread_get_context_switch_count)(),
-                 update_conflict_set_count);
-    VG_(message)(Vg_UserMsg,
-                 "           (%lld new sg + %lld combine vc + %lld csw).",
-                 dsnsc,
-                 dscvc,
-                 update_conflict_set_count - dsnsc - dscvc);
-    VG_(message)(Vg_UserMsg,
-                 " segments: created %lld segments, max %lld alive,"
-                 " %lld discard points.",
-                 DRD_(sg_get_segments_created_count)(),
-                 DRD_(sg_get_max_segments_alive_count)(),
-                 DRD_(thread_get_discard_ordered_segments_count)());
-    VG_(message)(Vg_UserMsg,
-                 "           (%lld m, %lld rw, %lld s, %lld b)",
-                 DRD_(get_mutex_segment_creation_count)(),
-                 DRD_(get_rwlock_segment_creation_count)(),
-                 DRD_(get_semaphore_segment_creation_count)(),
-                 DRD_(get_barrier_segment_creation_count)());
-    VG_(message)(Vg_UserMsg,
-                 "  bitmaps: %lld level 1 / %lld level 2 bitmap refs",
-                 DRD_(bm_get_bitmap_creation_count)(),
-                 DRD_(bm_get_bitmap2_node_creation_count)());
-    VG_(message)(Vg_UserMsg,
-                 "           and %lld level 2 bitmaps were allocated.",
-                 DRD_(bm_get_bitmap2_creation_count)());
-    VG_(message)(Vg_UserMsg,
-                 "    mutex: %lld non-recursive lock/unlock events.",
-                 DRD_(get_mutex_lock_count)());
-    DRD_(print_malloc_stats)();
-  }
+      VG_(message)(Vg_UserMsg,
+                   "   thread: %lld context switches"
+                   " / %lld updates of the conflict set",
+                   DRD_(thread_get_context_switch_count)(),
+                   update_conflict_set_count);
+      VG_(message)(Vg_UserMsg,
+                   "           (%lld new sg + %lld combine vc + %lld csw).",
+                   dsnsc,
+                   dscvc,
+                   update_conflict_set_count - dsnsc - dscvc);
+      VG_(message)(Vg_UserMsg,
+                   " segments: created %lld segments, max %lld alive,"
+                   " %lld discard points.",
+                   DRD_(sg_get_segments_created_count)(),
+                   DRD_(sg_get_max_segments_alive_count)(),
+                   DRD_(thread_get_discard_ordered_segments_count)());
+      VG_(message)(Vg_UserMsg,
+                   "           (%lld m, %lld rw, %lld s, %lld b)",
+                   DRD_(get_mutex_segment_creation_count)(),
+                   DRD_(get_rwlock_segment_creation_count)(),
+                   DRD_(get_semaphore_segment_creation_count)(),
+                   DRD_(get_barrier_segment_creation_count)());
+      VG_(message)(Vg_UserMsg,
+                   "  bitmaps: %lld level 1 / %lld level 2 bitmap refs",
+                   DRD_(bm_get_bitmap_creation_count)(),
+                   DRD_(bm_get_bitmap2_node_creation_count)());
+      VG_(message)(Vg_UserMsg,
+                   "           and %lld level 2 bitmaps were allocated.",
+                   DRD_(bm_get_bitmap2_creation_count)());
+      VG_(message)(Vg_UserMsg,
+                   "    mutex: %lld non-recursive lock/unlock events.",
+                   DRD_(get_mutex_lock_count)());
+      DRD_(print_malloc_stats)();
+   }
 }
 
 static
 void drd_pre_clo_init(void)
 {
-  // Basic tool stuff.
+   // Basic tool stuff.
 
-  VG_(details_name)            ("drd");
-  VG_(details_version)         (NULL);
-  VG_(details_description)     ("a thread error detector");
-  VG_(details_copyright_author)("Copyright (C) 2006-2009, and GNU GPL'd,"
-                                " by Bart Van Assche.");
-  VG_(details_bug_reports_to)  (VG_BUGS_TO);
+   VG_(details_name)            ("drd");
+   VG_(details_version)         (NULL);
+   VG_(details_description)     ("a thread error detector");
+   VG_(details_copyright_author)("Copyright (C) 2006-2009, and GNU GPL'd,"
+                                 " by Bart Van Assche.");
+   VG_(details_bug_reports_to)  (VG_BUGS_TO);
 
-  VG_(basic_tool_funcs)        (DRD_(post_clo_init),
-                                DRD_(instrument),
-                                DRD_(fini));
+   VG_(basic_tool_funcs)        (DRD_(post_clo_init),
+                                 DRD_(instrument),
+                                 DRD_(fini));
 
-  // Command line stuff.
-  VG_(needs_command_line_options)(DRD_(process_cmd_line_option),
-                                  DRD_(print_usage),
-                                  DRD_(print_debug_usage));
+   // Command line stuff.
+   VG_(needs_command_line_options)(DRD_(process_cmd_line_option),
+                                   DRD_(print_usage),
+                                   DRD_(print_debug_usage));
 
-  // Error handling.
-  DRD_(register_error_handlers)();
+   // Error handling.
+   DRD_(register_error_handlers)();
 
-  // Core event tracking.
-  VG_(track_pre_mem_read)         (drd_pre_mem_read);
-  VG_(track_pre_mem_read_asciiz)  (drd_pre_mem_read_asciiz);
-  VG_(track_post_mem_write)       (drd_post_mem_write);
-  VG_(track_new_mem_brk)          (drd_start_using_mem_w_tid);
-  VG_(track_new_mem_mmap)         (drd_start_using_mem_w_perms);
-  VG_(track_new_mem_stack)        (drd_start_using_mem_stack);
-  VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
-  VG_(track_new_mem_startup)      (drd_start_using_mem_w_perms);
-  VG_(track_die_mem_brk)          (drd_stop_using_nonstack_mem);
-  VG_(track_die_mem_munmap)       (drd_stop_using_nonstack_mem);
-  VG_(track_die_mem_stack)        (drd_stop_using_mem_stack);
-  VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
-  VG_(track_start_client_code)    (drd_start_client_code);
-  VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
-  VG_(track_pre_thread_first_insn)(drd_post_thread_create);
-  VG_(track_pre_thread_ll_exit)   (drd_thread_finished);
+   // Core event tracking.
+   VG_(track_pre_mem_read)         (drd_pre_mem_read);
+   VG_(track_pre_mem_read_asciiz)  (drd_pre_mem_read_asciiz);
+   VG_(track_post_mem_write)       (drd_post_mem_write);
+   VG_(track_new_mem_brk)          (drd_start_using_mem_w_tid);
+   VG_(track_new_mem_mmap)         (drd_start_using_mem_w_perms);
+   VG_(track_new_mem_stack)        (drd_start_using_mem_stack);
+   VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
+   VG_(track_new_mem_startup)      (drd_start_using_mem_w_perms);
+   VG_(track_die_mem_brk)          (drd_stop_using_nonstack_mem);
+   VG_(track_die_mem_munmap)       (drd_stop_using_nonstack_mem);
+   VG_(track_die_mem_stack)        (drd_stop_using_mem_stack);
+   VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
+   VG_(track_start_client_code)    (drd_start_client_code);
+   VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
+   VG_(track_pre_thread_first_insn)(drd_post_thread_create);
+   VG_(track_pre_thread_ll_exit)   (drd_thread_finished);
 
-  // Other stuff.
-  DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu,
-                                 drd_stop_using_nonstack_mem);
+   // Other stuff.
+   DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu,
+                                  drd_stop_using_nonstack_mem);
 
-  DRD_(clientreq_init)();
+   DRD_(clientreq_init)();
 
-  DRD_(suppression_init)();
+   DRD_(suppression_init)();
 
-  DRD_(clientobj_init)();
+   DRD_(clientobj_init)();
 }
 
 
diff --git a/drd/drd_malloc_wrappers.c b/drd/drd_malloc_wrappers.c
index 086f82f..62da58e 100644
--- a/drd/drd_malloc_wrappers.c
+++ b/drd/drd_malloc_wrappers.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -40,10 +41,10 @@
 /* Local type definitions. */
 
 typedef struct _DRD_Chunk {
-  struct _DRD_Chunk* next;
-  Addr          data;            // ptr to actual block
-  SizeT         size : (sizeof(UWord)*8)-2; //size requested; 30 or 62 bits
-  ExeContext*   where;           // where it was allocated
+   struct _DRD_Chunk* next;
+   Addr          data;            // ptr to actual block
+   SizeT         size : (sizeof(UWord)*8)-2; //size requested; 30 or 62 bits
+   ExeContext*   where;           // where it was allocated
 } DRD_Chunk;
 
 
@@ -66,13 +67,13 @@
 /** Allocate its shadow chunk, put it on the appropriate list. */
 static DRD_Chunk* DRD_(create_chunk)(ThreadId tid, Addr p, SizeT size)
 {
-  DRD_Chunk* mc = VG_(malloc)("drd.malloc_wrappers.cDC.1",
-                              sizeof(DRD_Chunk));
-  mc->data      = p;
-  mc->size      = size;
-  mc->where     = VG_(record_ExeContext)(tid, 0);
+   DRD_Chunk* mc = VG_(malloc)("drd.malloc_wrappers.cDC.1",
+                               sizeof(DRD_Chunk));
+   mc->data      = p;
+   mc->size      = size;
+   mc->where     = VG_(record_ExeContext)(tid, 0);
 
-  return mc;
+   return mc;
 }
 
 /*------------------------------------------------------------*/
@@ -86,161 +87,163 @@
                       SizeT size, SizeT align,
                       Bool is_zeroed)
 {
-  Addr p;
+   Addr p;
 
-  DRD_(s_cmalloc_n_mallocs) ++;
+   DRD_(s_cmalloc_n_mallocs) ++;
 
-  // Allocate and zero
-  p = (Addr)VG_(cli_malloc)(align, size);
-  if (!p) {
-    return NULL;
-  }
-  if (is_zeroed) VG_(memset)((void*)p, 0, size);
-  DRD_(s_start_using_mem_callback)(p, p + size, 0/*ec_uniq*/);
+   // Allocate and zero
+   p = (Addr)VG_(cli_malloc)(align, size);
+   if (!p) {
+      return NULL;
+   }
+   if (is_zeroed) VG_(memset)((void*)p, 0, size);
+   DRD_(s_start_using_mem_callback)(p, p + size, 0/*ec_uniq*/);
 
-  // Only update this stat if allocation succeeded.
-  DRD_(s_cmalloc_bs_mallocd) += size;
+   // Only update this stat if allocation succeeded.
+   DRD_(s_cmalloc_bs_mallocd) += size;
 
-  VG_(HT_add_node)(DRD_(s_malloc_list), DRD_(create_chunk)(tid, p, size));
+   VG_(HT_add_node)(DRD_(s_malloc_list), DRD_(create_chunk)(tid, p, size));
 
-  return (void*)p;
+   return (void*)p;
 }
 
 static void* DRD_(malloc)(ThreadId tid, SizeT n)
 {
-  return DRD_(new_block)(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
+   return DRD_(new_block)(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
 }
 
 static void* DRD_(memalign)(ThreadId tid, SizeT align, SizeT n)
 {
-  return DRD_(new_block)(tid, n, align, /*is_zeroed*/False);
+   return DRD_(new_block)(tid, n, align, /*is_zeroed*/False);
 }
 
 static void* DRD_(calloc)(ThreadId tid, SizeT nmemb, SizeT size1)
 {
-  return DRD_(new_block)(tid, nmemb*size1, VG_(clo_alignment),
-                         /*is_zeroed*/True);
+   return DRD_(new_block)(tid, nmemb*size1, VG_(clo_alignment),
+                          /*is_zeroed*/True);
 }
 
 static __inline__ void DRD_(handle_free)(ThreadId tid, Addr p)
 {
-  DRD_Chunk* mc;
+   DRD_Chunk* mc;
 
-  DRD_(s_cmalloc_n_frees)++;
+   DRD_(s_cmalloc_n_frees)++;
 
-  mc = VG_(HT_remove)(DRD_(s_malloc_list), (UWord)p);
-  if (mc == NULL)
-  {
-    tl_assert(0);
-  }
-  else
-  {
-    tl_assert(p == mc->data);
-    if (mc->size > 0)
-      DRD_(s_stop_using_mem_callback)(mc->data, mc->size);
-    VG_(cli_free)((void*)p);
-    VG_(free)(mc);
-  }
+   mc = VG_(HT_remove)(DRD_(s_malloc_list), (UWord)p);
+   if (mc == NULL)
+   {
+      tl_assert(0);
+   }
+   else
+   {
+      tl_assert(p == mc->data);
+      if (mc->size > 0)
+         DRD_(s_stop_using_mem_callback)(mc->data, mc->size);
+      VG_(cli_free)((void*)p);
+      VG_(free)(mc);
+   }
 }
 
 static void DRD_(free)(ThreadId tid, void* p)
 {
-  DRD_(handle_free)(tid, (Addr)p);
+   DRD_(handle_free)(tid, (Addr)p);
 }
 
 static void* DRD_(realloc)(ThreadId tid, void* p_old, SizeT new_size)
 {
-  DRD_Chunk* mc;
-  void*     p_new;
-  SizeT     old_size;
+   DRD_Chunk* mc;
+   void*     p_new;
+   SizeT     old_size;
 
-  DRD_(s_cmalloc_n_frees) ++;
-  DRD_(s_cmalloc_n_mallocs) ++;
-  DRD_(s_cmalloc_bs_mallocd) += new_size;
+   DRD_(s_cmalloc_n_frees) ++;
+   DRD_(s_cmalloc_n_mallocs) ++;
+   DRD_(s_cmalloc_bs_mallocd) += new_size;
 
-  /* Remove the old block */
-  mc = VG_(HT_remove)(DRD_(s_malloc_list), (UWord)p_old);
-  if (mc == NULL) {
-    tl_assert(0);
-    return NULL;
-  }
+   /* Remove the old block */
+   mc = VG_(HT_remove)(DRD_(s_malloc_list), (UWord)p_old);
+   if (mc == NULL) {
+      tl_assert(0);
+      return NULL;
+   }
 
-  old_size = mc->size;
+   old_size = mc->size;
 
-  if (old_size == new_size)
-  {
-    /* size unchanged */
-    mc->where = VG_(record_ExeContext)(tid, 0);
-    p_new = p_old;
+   if (old_size == new_size)
+   {
+      /* size unchanged */
+      mc->where = VG_(record_ExeContext)(tid, 0);
+      p_new = p_old;
       
-  }
-  else if (old_size > new_size)
-  {
-    /* new size is smaller */
-    DRD_(s_stop_using_mem_callback)(mc->data + new_size, old_size);
-    mc->size = new_size;
-    mc->where = VG_(record_ExeContext)(tid, 0);
-    p_new = p_old;
+   }
+   else if (old_size > new_size)
+   {
+      /* new size is smaller */
+      DRD_(s_stop_using_mem_callback)(mc->data + new_size, old_size);
+      mc->size = new_size;
+      mc->where = VG_(record_ExeContext)(tid, 0);
+      p_new = p_old;
 
-  }
-  else
-  {
-    /* new size is bigger */
-    /* Get new memory */
-    const Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
+   }
+   else
+   {
+      /* new size is bigger */
+      /* Get new memory */
+      const Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
 
-    if (a_new)
-    {
-      /* Copy from old to new */
-      VG_(memcpy)((void*)a_new, p_old, mc->size);
+      if (a_new)
+      {
+         /* Copy from old to new */
+         VG_(memcpy)((void*)a_new, p_old, mc->size);
 
-      /* Free old memory */
-      DRD_(s_stop_using_mem_callback)(mc->data, mc->size);
-      VG_(free)(mc);
+         /* Free old memory */
+         DRD_(s_stop_using_mem_callback)(mc->data, mc->size);
+         VG_(free)(mc);
 
-      // Allocate a new chunk.
-      mc = DRD_(create_chunk)(tid, a_new, new_size);
-      DRD_(s_start_using_mem_callback)(a_new, a_new + new_size, 0/*ec_uniq*/);
-    }
-    else
-    {
-      /* Allocation failed -- leave original block untouched. */
-    }
+         // Allocate a new chunk.
+         mc = DRD_(create_chunk)(tid, a_new, new_size);
+         DRD_(s_start_using_mem_callback)(a_new, a_new + new_size,
+                                          0/*ec_uniq*/);
+      }
+      else
+      {
+         /* Allocation failed -- leave original block untouched. */
+      }
 
-    p_new = (void*)a_new;
-  }  
+      p_new = (void*)a_new;
+   }  
 
-  // Now insert the new mc (with a possibly new 'data' field) into
-  // malloc_list.  If this realloc() did not increase the memory size, we
-  // will have removed and then re-added mc unnecessarily.  But that's ok
-  // because shrinking a block with realloc() is (presumably) much rarer
-  // than growing it, and this way simplifies the growing case.
-  VG_(HT_add_node)(DRD_(s_malloc_list), mc);
+   // Now insert the new mc (with a possibly new 'data' field) into
+   // malloc_list.  If this realloc() did not increase the memory size, we
+   // will have removed and then re-added mc unnecessarily.  But that's ok
+   // because shrinking a block with realloc() is (presumably) much rarer
+   // than growing it, and this way simplifies the growing case.
+   VG_(HT_add_node)(DRD_(s_malloc_list), mc);
 
-  return p_new;
+   return p_new;
 }
 
 static void* DRD_(__builtin_new)(ThreadId tid, SizeT n)
 {
-  void* const result = DRD_(new_block)(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
-  //VG_(message)(Vg_DebugMsg, "__builtin_new(%d, %d) = %p", tid, n, result);
-  return result;
+   void* const result = DRD_(new_block)(tid, n, VG_(clo_alignment),
+                                        /*is_zeroed*/False);
+   //VG_(message)(Vg_DebugMsg, "__builtin_new(%d, %d) = %p", tid, n, result);
+   return result;
 }
 
 static void DRD_(__builtin_delete)(ThreadId tid, void* p)
 {
-  //VG_(message)(Vg_DebugMsg, "__builtin_delete(%d, %p)", tid, p);
-  DRD_(handle_free)(tid, (Addr)p);
+   //VG_(message)(Vg_DebugMsg, "__builtin_delete(%d, %p)", tid, p);
+   DRD_(handle_free)(tid, (Addr)p);
 }
 
 static void* DRD_(__builtin_vec_new)(ThreadId tid, SizeT n)
 {
-  return DRD_(new_block)(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
+   return DRD_(new_block)(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
 }
 
 static void DRD_(__builtin_vec_delete)(ThreadId tid, void* p)
 {
-  DRD_(handle_free)(tid, (Addr)p);
+   DRD_(handle_free)(tid, (Addr)p);
 }
 
 static SizeT DRD_(malloc_usable_size) ( ThreadId tid, void* p )
@@ -255,26 +258,26 @@
 void DRD_(register_malloc_wrappers)(const StartUsingMem start_callback,
                                     const StopUsingMem stop_callback)
 {
-  tl_assert(DRD_(s_malloc_list) == 0);
-  DRD_(s_malloc_list) = VG_(HT_construct)("drd_malloc_list");   // a big prime
-  tl_assert(DRD_(s_malloc_list) != 0);
-  tl_assert(start_callback);
-  tl_assert(stop_callback);
+   tl_assert(DRD_(s_malloc_list) == 0);
+   DRD_(s_malloc_list) = VG_(HT_construct)("drd_malloc_list");   // a big prime
+   tl_assert(DRD_(s_malloc_list) != 0);
+   tl_assert(start_callback);
+   tl_assert(stop_callback);
 
-  DRD_(s_start_using_mem_callback) = start_callback;
-  DRD_(s_stop_using_mem_callback)  = stop_callback;
+   DRD_(s_start_using_mem_callback) = start_callback;
+   DRD_(s_stop_using_mem_callback)  = stop_callback;
 
-  VG_(needs_malloc_replacement)(DRD_(malloc),
-                                DRD_(__builtin_new),
-                                DRD_(__builtin_vec_new),
-                                DRD_(memalign),
-                                DRD_(calloc),
-                                DRD_(free),
-                                DRD_(__builtin_delete),
-                                DRD_(__builtin_vec_delete),
-                                DRD_(realloc),
-                                DRD_(malloc_usable_size),
-                                0);
+   VG_(needs_malloc_replacement)(DRD_(malloc),
+                                 DRD_(__builtin_new),
+                                 DRD_(__builtin_vec_new),
+                                 DRD_(memalign),
+                                 DRD_(calloc),
+                                 DRD_(free),
+                                 DRD_(__builtin_delete),
+                                 DRD_(__builtin_vec_delete),
+                                 DRD_(realloc),
+                                 DRD_(malloc_usable_size),
+                                 0);
 }
 
 Bool DRD_(heap_addrinfo)(Addr const a,
@@ -282,24 +285,24 @@
                          SizeT* const size,
                          ExeContext** const where)
 {
-  DRD_Chunk* mc;
+   DRD_Chunk* mc;
 
-  tl_assert(data);
-  tl_assert(size);
-  tl_assert(where);
+   tl_assert(data);
+   tl_assert(size);
+   tl_assert(where);
 
-  VG_(HT_ResetIter)(DRD_(s_malloc_list));
-  while ((mc = VG_(HT_Next)(DRD_(s_malloc_list))))
-  {
-    if (mc->data <= a && a < mc->data + mc->size)
-    {
-      *data  = mc->data;
-      *size  = mc->size;
-      *where = mc->where;
-      return True;
-    }
-  }
-  return False;
+   VG_(HT_ResetIter)(DRD_(s_malloc_list));
+   while ((mc = VG_(HT_Next)(DRD_(s_malloc_list))))
+   {
+      if (mc->data <= a && a < mc->data + mc->size)
+      {
+         *data  = mc->data;
+         *size  = mc->size;
+         *where = mc->where;
+         return True;
+      }
+   }
+   return False;
 }
 
 /*------------------------------------------------------------*/
@@ -308,32 +311,32 @@
 
 void DRD_(print_malloc_stats)(void)
 {
-  DRD_Chunk* mc;
-  SizeT     nblocks = 0;
-  SizeT     nbytes  = 0;
+   DRD_Chunk* mc;
+   SizeT     nblocks = 0;
+   SizeT     nbytes  = 0;
    
-  if (VG_(clo_verbosity) == 0)
-    return;
-  if (VG_(clo_xml))
-    return;
+   if (VG_(clo_verbosity) == 0)
+      return;
+   if (VG_(clo_xml))
+      return;
 
-  /* Count memory still in use. */
-  VG_(HT_ResetIter)(DRD_(s_malloc_list));
-  while ((mc = VG_(HT_Next)(DRD_(s_malloc_list))))
-  {
-    nblocks++;
-    nbytes += mc->size;
-  }
+   /* Count memory still in use. */
+   VG_(HT_ResetIter)(DRD_(s_malloc_list));
+   while ((mc = VG_(HT_Next)(DRD_(s_malloc_list))))
+   {
+      nblocks++;
+      nbytes += mc->size;
+   }
 
-  VG_(message)(Vg_DebugMsg, 
-               "malloc/free: in use at exit: %lu bytes in %lu blocks.",
-               nbytes, nblocks);
-  VG_(message)(Vg_DebugMsg, 
-               "malloc/free: %lu allocs, %lu frees, %lu bytes allocated.",
-               DRD_(s_cmalloc_n_mallocs),
-               DRD_(s_cmalloc_n_frees), DRD_(s_cmalloc_bs_mallocd));
-  if (VG_(clo_verbosity) > 1)
-    VG_(message)(Vg_DebugMsg, " ");
+   VG_(message)(Vg_DebugMsg, 
+                "malloc/free: in use at exit: %lu bytes in %lu blocks.",
+                nbytes, nblocks);
+   VG_(message)(Vg_DebugMsg, 
+                "malloc/free: %lu allocs, %lu frees, %lu bytes allocated.",
+                DRD_(s_cmalloc_n_mallocs),
+                DRD_(s_cmalloc_n_frees), DRD_(s_cmalloc_bs_mallocd));
+   if (VG_(clo_verbosity) > 1)
+      VG_(message)(Vg_DebugMsg, " ");
 }
 
 /*--------------------------------------------------------------------*/
diff --git a/drd/drd_malloc_wrappers.h b/drd/drd_malloc_wrappers.h
index 0c35ed9..da2d203 100644
--- a/drd/drd_malloc_wrappers.h
+++ b/drd/drd_malloc_wrappers.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_mutex.c b/drd/drd_mutex.c
index fecb355..98de75e 100644
--- a/drd/drd_mutex.c
+++ b/drd/drd_mutex.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -55,163 +56,163 @@
 
 void DRD_(mutex_set_trace)(const Bool trace_mutex)
 {
-  tl_assert(!! trace_mutex == trace_mutex);
-  s_trace_mutex = trace_mutex;
+   tl_assert(!! trace_mutex == trace_mutex);
+   s_trace_mutex = trace_mutex;
 }
 
 void DRD_(mutex_set_lock_threshold)(const UInt lock_threshold_ms)
 {
-  s_mutex_lock_threshold_ms = lock_threshold_ms;
+   s_mutex_lock_threshold_ms = lock_threshold_ms;
 }
 
 static
 void DRD_(mutex_initialize)(struct mutex_info* const p,
                             const Addr mutex, const MutexT mutex_type)
 {
-  tl_assert(mutex);
-  tl_assert(mutex_type != mutex_type_unknown);
-  tl_assert(p->a1 == mutex);
+   tl_assert(mutex);
+   tl_assert(mutex_type != mutex_type_unknown);
+   tl_assert(p->a1 == mutex);
 
-  p->cleanup             = (void(*)(DrdClientobj*))mutex_cleanup;
-  p->delete_thread
-    = (void(*)(DrdClientobj*, DrdThreadId))mutex_delete_thread;
-  p->mutex_type          = mutex_type;
-  p->recursion_count     = 0;
-  p->owner               = DRD_INVALID_THREADID;
-  p->last_locked_segment = 0;
-  p->acquiry_time_ms     = 0;
-  p->acquired_at         = 0;
+   p->cleanup             = (void(*)(DrdClientobj*))mutex_cleanup;
+   p->delete_thread
+      = (void(*)(DrdClientobj*, DrdThreadId))mutex_delete_thread;
+   p->mutex_type          = mutex_type;
+   p->recursion_count     = 0;
+   p->owner               = DRD_INVALID_THREADID;
+   p->last_locked_segment = 0;
+   p->acquiry_time_ms     = 0;
+   p->acquired_at         = 0;
 }
 
 /** Deallocate the memory that was allocated by mutex_initialize(). */
 static void mutex_cleanup(struct mutex_info* p)
 {
-  tl_assert(p);
+   tl_assert(p);
 
-  if (s_trace_mutex)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] mutex_destroy   %s 0x%lx rc %d owner %d",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 DRD_(mutex_get_typename)(p),
-                 p->a1,
-                 p ? p->recursion_count : -1,
-                 p ? p->owner : DRD_INVALID_THREADID);
-  }
+   if (s_trace_mutex)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] mutex_destroy   %s 0x%lx rc %d owner %d",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   DRD_(mutex_get_typename)(p),
+                   p->a1,
+                   p ? p->recursion_count : -1,
+                   p ? p->owner : DRD_INVALID_THREADID);
+   }
 
-  if (mutex_is_locked(p))
-  {
-    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            MutexErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destroying locked mutex",
-                            &MEI);
-  }
+   if (mutex_is_locked(p))
+   {
+      MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              MutexErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destroying locked mutex",
+                              &MEI);
+   }
 
-  DRD_(sg_put)(p->last_locked_segment);
-  p->last_locked_segment = 0;
+   DRD_(sg_put)(p->last_locked_segment);
+   p->last_locked_segment = 0;
 }
 
 /** Let Valgrind report that there is no mutex object at address 'mutex'. */
 void DRD_(not_a_mutex)(const Addr mutex)
 {
-  MutexErrInfo MEI = { mutex, -1, DRD_INVALID_THREADID };
-  VG_(maybe_record_error)(VG_(get_running_tid)(),
-                          MutexErr,
-                          VG_(get_IP)(VG_(get_running_tid)()),
-                          "Not a mutex",
-                          &MEI);
+   MutexErrInfo MEI = { mutex, -1, DRD_INVALID_THREADID };
+   VG_(maybe_record_error)(VG_(get_running_tid)(),
+                           MutexErr,
+                           VG_(get_IP)(VG_(get_running_tid)()),
+                           "Not a mutex",
+                           &MEI);
 }
 
 static
 struct mutex_info*
 DRD_(mutex_get_or_allocate)(const Addr mutex, const MutexT mutex_type)
 {
-  struct mutex_info* p;
+   struct mutex_info* p;
 
-  tl_assert(offsetof(DrdClientobj, mutex) == 0);
-  p = &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
-  if (p)
-  {
-    return p;
-  }
+   tl_assert(offsetof(DrdClientobj, mutex) == 0);
+   p = &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
+   if (p)
+   {
+      return p;
+   }
 
-  if (DRD_(clientobj_present)(mutex, mutex + 1))
-  {
-    DRD_(not_a_mutex)(mutex);
-    return 0;
-  }
+   if (DRD_(clientobj_present)(mutex, mutex + 1))
+   {
+      DRD_(not_a_mutex)(mutex);
+      return 0;
+   }
 
-  tl_assert(mutex_type != mutex_type_unknown);
+   tl_assert(mutex_type != mutex_type_unknown);
 
-  p = &(DRD_(clientobj_add)(mutex, ClientMutex)->mutex);
-  DRD_(mutex_initialize)(p, mutex, mutex_type);
-  return p;
+   p = &(DRD_(clientobj_add)(mutex, ClientMutex)->mutex);
+   DRD_(mutex_initialize)(p, mutex, mutex_type);
+   return p;
 }
 
 struct mutex_info* DRD_(mutex_get)(const Addr mutex)
 {
-  tl_assert(offsetof(DrdClientobj, mutex) == 0);
-  return &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
+   tl_assert(offsetof(DrdClientobj, mutex) == 0);
+   return &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
 }
 
 /** Called before pthread_mutex_init(). */
 struct mutex_info*
 DRD_(mutex_init)(const Addr mutex, const MutexT mutex_type)
 {
-  struct mutex_info* p;
+   struct mutex_info* p;
 
-  tl_assert(mutex_type != mutex_type_unknown);
+   tl_assert(mutex_type != mutex_type_unknown);
 
-  if (s_trace_mutex)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] mutex_init      %s 0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 DRD_(mutex_type_name)(mutex_type),
-                 mutex);
-  }
+   if (s_trace_mutex)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] mutex_init      %s 0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   DRD_(mutex_type_name)(mutex_type),
+                   mutex);
+   }
 
-  if (mutex_type == mutex_type_invalid_mutex)
-  {
-    DRD_(not_a_mutex)(mutex);
-    return 0;
-  }
+   if (mutex_type == mutex_type_invalid_mutex)
+   {
+      DRD_(not_a_mutex)(mutex);
+      return 0;
+   }
 
-  p = DRD_(mutex_get)(mutex);
-  if (p)
-  {
-    const ThreadId vg_tid = VG_(get_running_tid)();
-    MutexErrInfo MEI
-      = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(vg_tid,
-                            MutexErr,
-                            VG_(get_IP)(vg_tid),
-                            "Mutex reinitialization",
-                            &MEI);
-    return p;
-  }
-  p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
+   p = DRD_(mutex_get)(mutex);
+   if (p)
+   {
+      const ThreadId vg_tid = VG_(get_running_tid)();
+      MutexErrInfo MEI
+         = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(vg_tid,
+                              MutexErr,
+                              VG_(get_IP)(vg_tid),
+                              "Mutex reinitialization",
+                              &MEI);
+      return p;
+   }
+   p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
 
-  return p;
+   return p;
 }
 
 /** Called after pthread_mutex_destroy(). */
 void DRD_(mutex_post_destroy)(const Addr mutex)
 {
-  struct mutex_info* p;
+   struct mutex_info* p;
 
-  p = DRD_(mutex_get)(mutex);
-  if (p == 0)
-  {
-    DRD_(not_a_mutex)(mutex);
-    return;
-  }
+   p = DRD_(mutex_get)(mutex);
+   if (p == 0)
+   {
+      DRD_(not_a_mutex)(mutex);
+      return;
+   }
 
-  DRD_(clientobj_remove)(mutex, ClientMutex);
+   DRD_(clientobj_remove)(mutex, ClientMutex);
 }
 
 /** Called before pthread_mutex_lock() is invoked. If a data structure for
@@ -222,51 +223,51 @@
 void DRD_(mutex_pre_lock)(const Addr mutex, MutexT mutex_type,
                           const Bool trylock)
 {
-  struct mutex_info* p;
+   struct mutex_info* p;
 
-  p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
-  if (mutex_type == mutex_type_unknown)
-    mutex_type = p->mutex_type;
+   p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
+   if (mutex_type == mutex_type_unknown)
+      mutex_type = p->mutex_type;
 
-  if (s_trace_mutex)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] %s %s 0x%lx rc %d owner %d",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 trylock ? "pre_mutex_lock " : "mutex_trylock  ",
-                 p ? DRD_(mutex_get_typename)(p) : "(?)",
-                 mutex,
-                 p ? p->recursion_count : -1,
-                 p ? p->owner : DRD_INVALID_THREADID);
-  }
+   if (s_trace_mutex)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] %s %s 0x%lx rc %d owner %d",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   trylock ? "pre_mutex_lock " : "mutex_trylock  ",
+                   p ? DRD_(mutex_get_typename)(p) : "(?)",
+                   mutex,
+                   p ? p->recursion_count : -1,
+                   p ? p->owner : DRD_INVALID_THREADID);
+   }
 
-  if (p == 0)
-  {
-    DRD_(not_a_mutex)(mutex);
-    return;
-  }
+   if (p == 0)
+   {
+      DRD_(not_a_mutex)(mutex);
+      return;
+   }
 
-  tl_assert(p);
+   tl_assert(p);
 
-  if (mutex_type == mutex_type_invalid_mutex)
-  {
-    DRD_(not_a_mutex)(mutex);
-    return;
-  }
+   if (mutex_type == mutex_type_invalid_mutex)
+   {
+      DRD_(not_a_mutex)(mutex);
+      return;
+   }
 
-  if (! trylock
-      && p->owner == DRD_(thread_get_running_tid)()
-      && p->recursion_count >= 1
-      && mutex_type != mutex_type_recursive_mutex)
-  {
-    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            MutexErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Recursive locking not allowed",
-                            &MEI);
-  }
+   if (! trylock
+       && p->owner == DRD_(thread_get_running_tid)()
+       && p->recursion_count >= 1
+       && mutex_type != mutex_type_recursive_mutex)
+   {
+      MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              MutexErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Recursive locking not allowed",
+                              &MEI);
+   }
 }
 
 /**
@@ -277,55 +278,55 @@
 void DRD_(mutex_post_lock)(const Addr mutex, const Bool took_lock,
                            const Bool post_cond_wait)
 {
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
-  struct mutex_info* p;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   struct mutex_info* p;
 
-  p = DRD_(mutex_get)(mutex);
+   p = DRD_(mutex_get)(mutex);
 
-  if (s_trace_mutex)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] %s %s 0x%lx rc %d owner %d%s",
-                 VG_(get_running_tid)(),
-                 drd_tid,
-                 post_cond_wait ? "cond_post_wait " : "post_mutex_lock",
-                 p ? DRD_(mutex_get_typename)(p) : "(?)",
-                 mutex,
-                 p ? p->recursion_count : 0,
-                 p ? p->owner : VG_INVALID_THREADID,
-                 took_lock ? "" : " (locking failed)");
-  }
+   if (s_trace_mutex)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] %s %s 0x%lx rc %d owner %d%s",
+                   VG_(get_running_tid)(),
+                   drd_tid,
+                   post_cond_wait ? "cond_post_wait " : "post_mutex_lock",
+                   p ? DRD_(mutex_get_typename)(p) : "(?)",
+                   mutex,
+                   p ? p->recursion_count : 0,
+                   p ? p->owner : VG_INVALID_THREADID,
+                   took_lock ? "" : " (locking failed)");
+   }
 
-  if (! p || ! took_lock)
-    return;
+   if (! p || ! took_lock)
+      return;
 
-  if (p->recursion_count == 0)
-  {
-    const DrdThreadId last_owner = p->owner;
+   if (p->recursion_count == 0)
+   {
+      const DrdThreadId last_owner = p->owner;
 
-    if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
-    {
-      tl_assert(p->last_locked_segment);
-      DRD_(thread_combine_vc2)(drd_tid, &p->last_locked_segment->vc);
-    }
-    DRD_(thread_new_segment)(drd_tid);
-    s_mutex_segment_creation_count++;
+      if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
+      {
+         tl_assert(p->last_locked_segment);
+         DRD_(thread_combine_vc2)(drd_tid, &p->last_locked_segment->vc);
+      }
+      DRD_(thread_new_segment)(drd_tid);
+      s_mutex_segment_creation_count++;
 
-    p->owner           = drd_tid;
-    p->acquiry_time_ms = VG_(read_millisecond_timer)();
-    p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
-    s_mutex_lock_count++;
-  }
-  else if (p->owner != drd_tid)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "The impossible happened: mutex 0x%lx is locked"
-                 " simultaneously by two threads (recursion count %d,"
-                 " owners %d and %d) !",
-                 p->a1, p->recursion_count, p->owner, drd_tid);
-    p->owner = drd_tid;
-  }
-  p->recursion_count++;
+      p->owner           = drd_tid;
+      p->acquiry_time_ms = VG_(read_millisecond_timer)();
+      p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
+      s_mutex_lock_count++;
+   }
+   else if (p->owner != drd_tid)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "The impossible happened: mutex 0x%lx is locked"
+                   " simultaneously by two threads (recursion count %d,"
+                   " owners %d and %d) !",
+                   p->a1, p->recursion_count, p->owner, drd_tid);
+      p->owner = drd_tid;
+   }
+   p->recursion_count++;
 }
 
 /**
@@ -341,155 +342,155 @@
  */
 void DRD_(mutex_unlock)(const Addr mutex, MutexT mutex_type)
 {
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
-  const ThreadId vg_tid = VG_(get_running_tid)();
-  struct mutex_info* p;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   const ThreadId vg_tid = VG_(get_running_tid)();
+   struct mutex_info* p;
 
-  p = DRD_(mutex_get)(mutex);
-  if (mutex_type == mutex_type_unknown)
-    mutex_type = p->mutex_type;
+   p = DRD_(mutex_get)(mutex);
+   if (mutex_type == mutex_type_unknown)
+      mutex_type = p->mutex_type;
 
-  if (s_trace_mutex)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] mutex_unlock    %s 0x%lx rc %d",
-                 vg_tid,
-                 drd_tid,
-                 p ? DRD_(mutex_get_typename)(p) : "(?)",
-                 mutex,
-                 p ? p->recursion_count : 0);
-  }
+   if (s_trace_mutex)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] mutex_unlock    %s 0x%lx rc %d",
+                   vg_tid,
+                   drd_tid,
+                   p ? DRD_(mutex_get_typename)(p) : "(?)",
+                   mutex,
+                   p ? p->recursion_count : 0);
+   }
 
-  if (p == 0 || mutex_type == mutex_type_invalid_mutex)
-  {
-    DRD_(not_a_mutex)(mutex);
-    return;
-  }
+   if (p == 0 || mutex_type == mutex_type_invalid_mutex)
+   {
+      DRD_(not_a_mutex)(mutex);
+      return;
+   }
 
-  if (p->owner == DRD_INVALID_THREADID)
-  {
-    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(vg_tid,
-                            MutexErr,
-                            VG_(get_IP)(vg_tid),
-                            "Mutex not locked",
-                            &MEI);
-    return;
-  }
+   if (p->owner == DRD_INVALID_THREADID)
+   {
+      MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(vg_tid,
+                              MutexErr,
+                              VG_(get_IP)(vg_tid),
+                              "Mutex not locked",
+                              &MEI);
+      return;
+   }
 
-  tl_assert(p);
-  if (p->mutex_type != mutex_type)
-  {
-    VG_(message)(Vg_UserMsg, "??? mutex 0x%lx: type changed from %d into %d",
-                 p->a1, p->mutex_type, mutex_type);
-  }
-  tl_assert(p->mutex_type == mutex_type);
-  tl_assert(p->owner != DRD_INVALID_THREADID);
+   tl_assert(p);
+   if (p->mutex_type != mutex_type)
+   {
+      VG_(message)(Vg_UserMsg, "??? mutex 0x%lx: type changed from %d into %d",
+                   p->a1, p->mutex_type, mutex_type);
+   }
+   tl_assert(p->mutex_type == mutex_type);
+   tl_assert(p->owner != DRD_INVALID_THREADID);
 
-  if (p->owner != drd_tid || p->recursion_count <= 0)
-  {
-    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(vg_tid,
-                            MutexErr,
-                            VG_(get_IP)(vg_tid),
-                            "Mutex not locked by calling thread",
-                            &MEI);
-    return;
-  }
-  tl_assert(p->recursion_count > 0);
-  p->recursion_count--;
-  tl_assert(p->recursion_count >= 0);
+   if (p->owner != drd_tid || p->recursion_count <= 0)
+   {
+      MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(vg_tid,
+                              MutexErr,
+                              VG_(get_IP)(vg_tid),
+                              "Mutex not locked by calling thread",
+                              &MEI);
+      return;
+   }
+   tl_assert(p->recursion_count > 0);
+   p->recursion_count--;
+   tl_assert(p->recursion_count >= 0);
 
-  if (p->recursion_count == 0)
-  {
-    if (s_mutex_lock_threshold_ms > 0)
-    {
-      ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
-      if (held > s_mutex_lock_threshold_ms)
+   if (p->recursion_count == 0)
+   {
+      if (s_mutex_lock_threshold_ms > 0)
       {
-        HoldtimeErrInfo HEI
-          = { mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
-        VG_(maybe_record_error)(vg_tid,
-                                HoldtimeErr,
-                                VG_(get_IP)(vg_tid),
-                                "mutex",
-                                &HEI);
+         ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+         if (held > s_mutex_lock_threshold_ms)
+         {
+            HoldtimeErrInfo HEI
+               = { mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
+            VG_(maybe_record_error)(vg_tid,
+                                    HoldtimeErr,
+                                    VG_(get_IP)(vg_tid),
+                                    "mutex",
+                                    &HEI);
+         }
       }
-    }
 
-    /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
-    /* current vector clock of the thread such that it is available when  */
-    /* this mutex is locked again.                                        */
+      /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
+      /* current vector clock of the thread such that it is available when  */
+      /* this mutex is locked again.                                        */
 
-    DRD_(thread_get_latest_segment)(&p->last_locked_segment, drd_tid);
-    DRD_(thread_new_segment)(drd_tid);
-    p->acquired_at = 0;
-    s_mutex_segment_creation_count++;
-  }
+      DRD_(thread_get_latest_segment)(&p->last_locked_segment, drd_tid);
+      DRD_(thread_new_segment)(drd_tid);
+      p->acquired_at = 0;
+      s_mutex_segment_creation_count++;
+   }
 }
 
 void DRD_(spinlock_init_or_unlock)(const Addr spinlock)
 {
-  struct mutex_info* mutex_p = DRD_(mutex_get)(spinlock);
-  if (mutex_p)
-  {
-    DRD_(mutex_unlock)(spinlock, mutex_type_spinlock);
-  }
-  else
-  {
-    DRD_(mutex_init)(spinlock, mutex_type_spinlock);
-  }
+   struct mutex_info* mutex_p = DRD_(mutex_get)(spinlock);
+   if (mutex_p)
+   {
+      DRD_(mutex_unlock)(spinlock, mutex_type_spinlock);
+   }
+   else
+   {
+      DRD_(mutex_init)(spinlock, mutex_type_spinlock);
+   }
 }
 
 const char* DRD_(mutex_get_typename)(struct mutex_info* const p)
 {
-  tl_assert(p);
+   tl_assert(p);
 
-  return DRD_(mutex_type_name)(p->mutex_type);
+   return DRD_(mutex_type_name)(p->mutex_type);
 }
 
 const char* DRD_(mutex_type_name)(const MutexT mt)
 {
-  switch (mt)
-  {
-  case mutex_type_invalid_mutex:
-    return "invalid mutex";
-  case mutex_type_recursive_mutex:
-    return "recursive mutex";
-  case mutex_type_errorcheck_mutex:
-    return "error checking mutex";
-  case mutex_type_default_mutex:
-    return "mutex";
-  case mutex_type_spinlock:
-    return "spinlock";
-  default:
-    tl_assert(0);
-  }
-  return "?";
+   switch (mt)
+   {
+   case mutex_type_invalid_mutex:
+      return "invalid mutex";
+   case mutex_type_recursive_mutex:
+      return "recursive mutex";
+   case mutex_type_errorcheck_mutex:
+      return "error checking mutex";
+   case mutex_type_default_mutex:
+      return "mutex";
+   case mutex_type_spinlock:
+      return "spinlock";
+   default:
+      tl_assert(0);
+   }
+   return "?";
 }
 
 /** Return true if the specified mutex is locked by any thread. */
 static Bool mutex_is_locked(struct mutex_info* const p)
 {
-  tl_assert(p);
-  return (p->recursion_count > 0);
+   tl_assert(p);
+   return (p->recursion_count > 0);
 }
 
 Bool DRD_(mutex_is_locked_by)(const Addr mutex, const DrdThreadId tid)
 {
-  struct mutex_info* const p = DRD_(mutex_get)(mutex);
-  if (p)
-  {
-    return (p->recursion_count > 0 && p->owner == tid);
-  }
-  return False;
+   struct mutex_info* const p = DRD_(mutex_get)(mutex);
+   if (p)
+   {
+      return (p->recursion_count > 0 && p->owner == tid);
+   }
+   return False;
 }
 
 int DRD_(mutex_get_recursion_count)(const Addr mutex)
 {
-  struct mutex_info* const p = DRD_(mutex_get)(mutex);
-  tl_assert(p);
-  return p->recursion_count;
+   struct mutex_info* const p = DRD_(mutex_get)(mutex);
+   tl_assert(p);
+   return p->recursion_count;
 }
 
 /**
@@ -498,27 +499,27 @@
  */
 static void mutex_delete_thread(struct mutex_info* p, const DrdThreadId tid)
 {
-  tl_assert(p);
+   tl_assert(p);
 
-  if (p->owner == tid && p->recursion_count > 0)
-  {
-    MutexErrInfo MEI
-      = { p->a1, p->recursion_count, p->owner };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            MutexErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Mutex still locked at thread exit",
-                            &MEI);
-    p->owner = VG_INVALID_THREADID;
-  }
+   if (p->owner == tid && p->recursion_count > 0)
+   {
+      MutexErrInfo MEI
+         = { p->a1, p->recursion_count, p->owner };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              MutexErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Mutex still locked at thread exit",
+                              &MEI);
+      p->owner = VG_INVALID_THREADID;
+   }
 }
 
 ULong DRD_(get_mutex_lock_count)(void)
 {
-  return s_mutex_lock_count;
+   return s_mutex_lock_count;
 }
 
 ULong DRD_(get_mutex_segment_creation_count)(void)
 {
-  return s_mutex_segment_creation_count;
+   return s_mutex_segment_creation_count;
 }
diff --git a/drd/drd_mutex.h b/drd/drd_mutex.h
index a36f601..5096a8e 100644
--- a/drd/drd_mutex.h
+++ b/drd/drd_mutex.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_pthread_intercepts.c b/drd/drd_pthread_intercepts.c
index 3c9ef5d..bf19360 100644
--- a/drd/drd_pthread_intercepts.c
+++ b/drd/drd_pthread_intercepts.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 
 /*--------------------------------------------------------------------*/
 /*--- Client-space code for DRD.          drd_pthread_intercepts.c ---*/
@@ -78,19 +79,19 @@
 #define ALLOCATE_THREAD_ARGS_ON_THE_STACK
 
 #define PTH_FUNC(ret_ty, f, args...)                            \
-  ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args);        \
-  ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args)
+   ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args);       \
+   ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args)
 
 
 /* Local data structures. */
 
 typedef struct
 {
-  void* (*start)(void*);
-  void* arg;
-  int   detachstate;
+   void* (*start)(void*);
+   void* arg;
+   int   detachstate;
 #if defined(WAIT_UNTIL_CREATED_THREAD_STARTED)
-  int   wrapper_started;
+   int   wrapper_started;
 #endif
 } DrdPosixThreadArgs;
 
@@ -116,8 +117,8 @@
  */
 static void DRD_(init)(void)
 {
-  DRD_(check_threading_library)();
-  DRD_(set_main_thread_state)();
+   DRD_(check_threading_library)();
+   DRD_(set_main_thread_state)();
 }
 
 /**
@@ -129,22 +130,22 @@
  */
 static MutexT DRD_(pthread_to_drd_mutex_type)(const int kind)
 {
-  if (kind == PTHREAD_MUTEX_RECURSIVE)
-    return mutex_type_recursive_mutex;
-  else if (kind == PTHREAD_MUTEX_ERRORCHECK)
-    return mutex_type_errorcheck_mutex;
-  else if (kind == PTHREAD_MUTEX_NORMAL)
-    return mutex_type_default_mutex;
-  else if (kind == PTHREAD_MUTEX_DEFAULT)
-    return mutex_type_default_mutex;
+   if (kind == PTHREAD_MUTEX_RECURSIVE)
+      return mutex_type_recursive_mutex;
+   else if (kind == PTHREAD_MUTEX_ERRORCHECK)
+      return mutex_type_errorcheck_mutex;
+   else if (kind == PTHREAD_MUTEX_NORMAL)
+      return mutex_type_default_mutex;
+   else if (kind == PTHREAD_MUTEX_DEFAULT)
+      return mutex_type_default_mutex;
 #if defined(HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
-  else if (kind == PTHREAD_MUTEX_ADAPTIVE_NP)
-    return mutex_type_default_mutex;
+   else if (kind == PTHREAD_MUTEX_ADAPTIVE_NP)
+      return mutex_type_default_mutex;
 #endif
-  else
-  {
-    return mutex_type_invalid_mutex;
-  }
+   else
+   {
+      return mutex_type_invalid_mutex;
+   }
 }
 
 /**
@@ -163,19 +164,19 @@
 static __inline__ MutexT DRD_(mutex_type)(pthread_mutex_t* mutex)
 {
 #if defined(HAVE_PTHREAD_MUTEX_T__M_KIND)
-  /* glibc + LinuxThreads. */
-  const int kind = mutex->__m_kind & 3;
+   /* glibc + LinuxThreads. */
+   const int kind = mutex->__m_kind & 3;
 #elif defined(HAVE_PTHREAD_MUTEX_T__DATA__KIND)
-  /* glibc + NPTL. */
-  const int kind = mutex->__data.__kind & 3;
+   /* glibc + NPTL. */
+   const int kind = mutex->__data.__kind & 3;
 #else
-  /* Another POSIX threads implementation. Regression tests will fail. */
-  const int kind = PTHREAD_MUTEX_DEFAULT;
-  fprintf(stderr,
-          "Did not recognize your POSIX threads implementation. Giving up.\n");
-  assert(0);
+   /* Another POSIX threads implementation. Regression tests will fail. */
+   const int kind = PTHREAD_MUTEX_DEFAULT;
+   fprintf(stderr,
+           "Did not recognize your POSIX threads implementation. Giving up.\n");
+   assert(0);
 #endif
-  return DRD_(pthread_to_drd_mutex_type)(kind);
+   return DRD_(pthread_to_drd_mutex_type)(kind);
 }
 
 /**
@@ -183,10 +184,10 @@
  */
 static void DRD_(set_joinable)(const pthread_t tid, const int joinable)
 {
-  int res;
-  assert(joinable == 0 || joinable == 1);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__SET_JOINABLE,
-                             tid, joinable, 0, 0, 0);
+   int res;
+   assert(joinable == 0 || joinable == 1);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__SET_JOINABLE,
+                              tid, joinable, 0, 0, 0);
 }
 
 /**
@@ -194,32 +195,33 @@
  */
 static void* DRD_(thread_wrapper)(void* arg)
 {
-  int res;
-  DrdPosixThreadArgs* arg_ptr;
-  DrdPosixThreadArgs arg_copy;
+   int res;
+   DrdPosixThreadArgs* arg_ptr;
+   DrdPosixThreadArgs arg_copy;
 
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
-                             0, 0, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
+                              0, 0, 0, 0, 0);
 
-  arg_ptr = (DrdPosixThreadArgs*)arg;
-  arg_copy = *arg_ptr;
+   arg_ptr = (DrdPosixThreadArgs*)arg;
+   arg_copy = *arg_ptr;
 #if defined(WAIT_UNTIL_CREATED_THREAD_STARTED)
-  arg_ptr->wrapper_started = 1;
+   arg_ptr->wrapper_started = 1;
 #else
 #if defined(ALLOCATE_THREAD_ARGS_ON_THE_STACK)
-#error Defining ALLOCATE_THREAD_ARGS_ON_THE_STACK but not WAIT_UNTIL_CREATED_THREAD_STARTED is not supported.
+#error Defining ALLOCATE_THREAD_ARGS_ON_THE_STACK but not \
+       WAIT_UNTIL_CREATED_THREAD_STARTED is not supported.
 #else
-  free(arg_ptr);
+   free(arg_ptr);
 #endif
 #endif
 
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
-                             pthread_self(), 0, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
+                              pthread_self(), 0, 0, 0, 0);
 
-  DRD_(set_joinable)(pthread_self(),
-                     arg_copy.detachstate == PTHREAD_CREATE_JOINABLE);
+   DRD_(set_joinable)(pthread_self(),
+                      arg_copy.detachstate == PTHREAD_CREATE_JOINABLE);
 
-  return (arg_copy.start)(arg_copy.arg);
+   return (arg_copy.start)(arg_copy.arg);
 }
 
 /**
@@ -233,19 +235,19 @@
 {
 #if defined(linux)
 #if defined(_CS_GNU_LIBPTHREAD_VERSION)
-  /* Linux with a recent glibc. */
-  char buffer[256];
-  unsigned len;
-  len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
-  assert(len <= sizeof(buffer));
-  return len > 0 && buffer[0] == 'l';
+   /* Linux with a recent glibc. */
+   char buffer[256];
+   unsigned len;
+   len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
+   assert(len <= sizeof(buffer));
+   return len > 0 && buffer[0] == 'l';
 #else
-  /* Linux without _CS_GNU_LIBPTHREAD_VERSION: most likely LinuxThreads. */
-  return 1;
+   /* Linux without _CS_GNU_LIBPTHREAD_VERSION: most likely LinuxThreads. */
+   return 1;
 #endif
 #else
-  /* Another OS than Linux, hence no LinuxThreads. */
-  return 0;
+   /* Another OS than Linux, hence no LinuxThreads. */
+   return 0;
 #endif
 }
 
@@ -255,27 +257,27 @@
  */
 static void DRD_(check_threading_library)(void)
 {
-  if (DRD_(detected_linuxthreads)())
-  {
-    if (getenv("LD_ASSUME_KERNEL"))
-    {
-      fprintf(stderr,
+   if (DRD_(detected_linuxthreads)())
+   {
+      if (getenv("LD_ASSUME_KERNEL"))
+      {
+         fprintf(stderr,
 "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
 "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
 "after having unset the environment variable LD_ASSUME_KERNEL. Giving up.\n"
-              );
-    }
-    else
-    {
-      fprintf(stderr,
+);
+      }
+      else
+      {
+         fprintf(stderr,
 "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
 "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
 "after having upgraded to a newer version of your Linux distribution.\n"
 "Giving up.\n"
-              );
-    }
-    abort();
-  }
+);
+      }
+      abort();
+   }
 }
 
 /**
@@ -284,14 +286,14 @@
  */
 static void DRD_(set_main_thread_state)(void)
 {
-  int res;
+   int res;
 
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
-                             0, 0, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK,
+                              0, 0, 0, 0, 0);
 
-  // Make sure that DRD knows about the main thread's POSIX thread ID.
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
-                             pthread_self(), 0, 0, 0, 0);
+   // Make sure that DRD knows about the main thread's POSIX thread ID.
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SET_PTHREADID,
+                              pthread_self(), 0, 0, 0, 0);
 
 }
 
@@ -300,128 +302,128 @@
          pthread_t *thread, const pthread_attr_t *attr,
          void *(*start) (void *), void *arg)
 {
-  int    res;
-  int    ret;
-  OrigFn fn;
+   int    res;
+   int    ret;
+   OrigFn fn;
 #if defined(ALLOCATE_THREAD_ARGS_ON_THE_STACK)
-  DrdPosixThreadArgs thread_args;
+   DrdPosixThreadArgs thread_args;
 #endif
-  DrdPosixThreadArgs* thread_args_p;
+   DrdPosixThreadArgs* thread_args_p;
 
-  VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_GET_ORIG_FN(fn);
 
 #if defined(ALLOCATE_THREAD_ARGS_ON_THE_STACK)
-  thread_args_p = &thread_args;
+   thread_args_p = &thread_args;
 #else
-  thread_args_p = malloc(sizeof(*thread_args_p));
+   thread_args_p = malloc(sizeof(*thread_args_p));
 #endif
-  assert(thread_args_p);
+   assert(thread_args_p);
 
-  thread_args_p->start           = start;
-  thread_args_p->arg             = arg;
+   thread_args_p->start           = start;
+   thread_args_p->arg             = arg;
 #if defined(WAIT_UNTIL_CREATED_THREAD_STARTED)
-  DRD_IGNORE_VAR(thread_args_p->wrapper_started);
-  thread_args_p->wrapper_started = 0;
+   DRD_IGNORE_VAR(thread_args_p->wrapper_started);
+   thread_args_p->wrapper_started = 0;
 #endif
-  /*
-   * Find out whether the thread will be started as a joinable thread
-   * or as a detached thread. If no thread attributes have been specified,
-   * this means that the new thread will be started as a joinable thread.
-   */
-  thread_args_p->detachstate = PTHREAD_CREATE_JOINABLE;
-  if (attr)
-  {
-    if (pthread_attr_getdetachstate(attr, &thread_args_p->detachstate) != 0)
-    {
-      assert(0);
-    }
-  }
-  assert(thread_args_p->detachstate == PTHREAD_CREATE_JOINABLE
-         || thread_args_p->detachstate == PTHREAD_CREATE_DETACHED);
+   /*
+    * Find out whether the thread will be started as a joinable thread
+    * or as a detached thread. If no thread attributes have been specified,
+    * this means that the new thread will be started as a joinable thread.
+    */
+   thread_args_p->detachstate = PTHREAD_CREATE_JOINABLE;
+   if (attr)
+   {
+      if (pthread_attr_getdetachstate(attr, &thread_args_p->detachstate) != 0)
+      {
+         assert(0);
+      }
+   }
+   assert(thread_args_p->detachstate == PTHREAD_CREATE_JOINABLE
+          || thread_args_p->detachstate == PTHREAD_CREATE_DETACHED);
 
-  /* Suppress NPTL-specific conflicts between creator and created thread. */
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_STOP_RECORDING,
-                             0, 0, 0, 0, 0);
+   /* Suppress NPTL-specific conflicts between creator and created thread. */
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_STOP_RECORDING,
+                              0, 0, 0, 0, 0);
 
-  CALL_FN_W_WWWW(ret, fn, thread, attr, DRD_(thread_wrapper), thread_args_p);
+   CALL_FN_W_WWWW(ret, fn, thread, attr, DRD_(thread_wrapper), thread_args_p);
 
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_START_RECORDING,
-                             0, 0, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_START_RECORDING,
+                              0, 0, 0, 0, 0);
 
 #if defined(WAIT_UNTIL_CREATED_THREAD_STARTED)
-  if (ret == 0)
-  {
-    /*
-     * Wait until the thread wrapper started.
-     * @todo Find out why some regression tests fail if thread arguments are
-     *   passed via dynamically allocated memory and if the loop below is
-     *   removed.
-     */
-    while (! thread_args_p->wrapper_started)
-    {
-      sched_yield();
-    }
-  }
+   if (ret == 0)
+   {
+      /*
+       * Wait until the thread wrapper started.
+       * @todo Find out why some regression tests fail if thread arguments are
+       *   passed via dynamically allocated memory and if the loop below is
+       *   removed.
+       */
+      while (! thread_args_p->wrapper_started)
+      {
+         sched_yield();
+      }
+   }
 
 #if defined(ALLOCATE_THREAD_ARGS_DYNAMICALLY)
-  free(thread_args_p);
+   free(thread_args_p);
 #endif
 
 #endif
 
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_START_NEW_SEGMENT,
-                             pthread_self(), 0, 0, 0, 0);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__DRD_START_NEW_SEGMENT,
+                              pthread_self(), 0, 0, 0, 0);
 
-  return ret;
+   return ret;
 }
 
 // pthread_join
 PTH_FUNC(int, pthreadZujoin, // pthread_join
          pthread_t pt_joinee, void **thread_return)
 {
-  int      ret;
-  int      res;
-  OrigFn   fn;
+   int      ret;
+   int      res;
+   OrigFn   fn;
 
-  VALGRIND_GET_ORIG_FN(fn);
-  CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
-  if (ret == 0)
-  {
-    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_JOIN,
-                               pt_joinee, 0, 0, 0, 0);
-  }
-  return ret;
+   VALGRIND_GET_ORIG_FN(fn);
+   CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
+   if (ret == 0)
+   {
+      VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_JOIN,
+                                 pt_joinee, 0, 0, 0, 0);
+   }
+   return ret;
 }
 
 // pthread_detach
 PTH_FUNC(int, pthreadZudetach, pthread_t pt_thread)
 {
-  int ret;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  {
-    CALL_FN_W_W(ret, fn, pt_thread);
-    if (ret == 0)
-    {
-      DRD_(set_joinable)(pt_thread, 0);
-    }
-  }
-  return ret;
+   int ret;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   {
+      CALL_FN_W_W(ret, fn, pt_thread);
+      if (ret == 0)
+      {
+         DRD_(set_joinable)(pt_thread, 0);
+      }
+   }
+   return ret;
 }
 
 // pthread_cancel
 PTH_FUNC(int, pthreadZucancel, pthread_t pt_thread)
 {
-  int res;
-  int ret;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_THREAD_CANCEL,
-                             pt_thread, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, pt_thread);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_CANCEL,
-                             pt_thread, ret==0, 0, 0, 0);
-  return ret;
+   int res;
+   int ret;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_THREAD_CANCEL,
+                              pt_thread, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, pt_thread);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_CANCEL,
+                              pt_thread, ret==0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_init
@@ -429,69 +431,69 @@
          pthread_mutex_t *mutex,
          const pthread_mutexattr_t* attr)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  int mt;
-  VALGRIND_GET_ORIG_FN(fn);
-  mt = PTHREAD_MUTEX_DEFAULT;
-  if (attr)
-    pthread_mutexattr_gettype(attr, &mt);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
-                             mutex, DRD_(pthread_to_drd_mutex_type)(mt),
-                             0, 0, 0);
-  CALL_FN_W_WW(ret, fn, mutex, attr);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
-                             mutex, 0, 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   int mt;
+   VALGRIND_GET_ORIG_FN(fn);
+   mt = PTHREAD_MUTEX_DEFAULT;
+   if (attr)
+      pthread_mutexattr_gettype(attr, &mt);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
+                              mutex, DRD_(pthread_to_drd_mutex_type)(mt),
+                              0, 0, 0);
+   CALL_FN_W_WW(ret, fn, mutex, attr);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
+                              mutex, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_destroy
 PTH_FUNC(int, pthreadZumutexZudestroy,
          pthread_mutex_t *mutex)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
-                             mutex, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                             mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
+                              mutex, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                              mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_lock
 PTH_FUNC(int, pthreadZumutexZulock, // pthread_mutex_lock
          pthread_mutex_t *mutex)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_trylock
 PTH_FUNC(int, pthreadZumutexZutrylock, // pthread_mutex_trylock
          pthread_mutex_t *mutex)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, DRD_(mutex_type)(mutex), 1, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, DRD_(mutex_type)(mutex), 1, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_timedlock
@@ -499,34 +501,34 @@
          pthread_mutex_t *mutex,
          const struct timespec *abs_timeout)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_mutex_unlock
 PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
          pthread_mutex_t *mutex)
 {
-  int ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                             VG_USERREQ__PRE_MUTEX_UNLOCK,
-                             mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                             VG_USERREQ__POST_MUTEX_UNLOCK,
-                             mutex, 0, 0, 0, 0);
-  return ret;
+   int ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                              VG_USERREQ__PRE_MUTEX_UNLOCK,
+                              mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                              VG_USERREQ__POST_MUTEX_UNLOCK,
+                              mutex, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_cond_init
@@ -534,32 +536,32 @@
          pthread_cond_t* cond,
          const pthread_condattr_t* attr)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_INIT,
-                             cond, 0, 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, cond, attr);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_INIT,
-                             cond, 0, 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_INIT,
+                              cond, 0, 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, cond, attr);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_INIT,
+                              cond, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_cond_destroy
 PTH_FUNC(int, pthreadZucondZudestroyZa, // pthread_cond_destroy*
          pthread_cond_t* cond)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_DESTROY,
-                             cond, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, cond);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_DESTROY,
-                             cond, 0, 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_DESTROY,
+                              cond, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, cond);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_DESTROY,
+                              cond, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_cond_wait
@@ -567,16 +569,16 @@
          pthread_cond_t *cond,
          pthread_mutex_t *mutex)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
-                             cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
-  CALL_FN_W_WW(ret, fn, cond, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
-                             cond, mutex, 1, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
+                              cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
+   CALL_FN_W_WW(ret, fn, cond, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
+                              cond, mutex, 1, 0, 0);
+   return ret;
 }
 
 // pthread_cond_timedwait
@@ -585,48 +587,48 @@
          pthread_mutex_t *mutex,
          const struct timespec* abstime)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
-                             cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
-  CALL_FN_W_WWW(ret, fn, cond, mutex, abstime);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
-                             cond, mutex, 1, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
+                              cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
+   CALL_FN_W_WWW(ret, fn, cond, mutex, abstime);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
+                              cond, mutex, 1, 0, 0);
+   return ret;
 }
 
 // pthread_cond_signal
 PTH_FUNC(int, pthreadZucondZusignalZa, // pthread_cond_signal*
          pthread_cond_t* cond)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_SIGNAL,
-                             cond, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, cond);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_SIGNAL,
-                             cond, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_SIGNAL,
+                              cond, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, cond);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_SIGNAL,
+                              cond, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_cond_broadcast
 PTH_FUNC(int, pthreadZucondZubroadcastZa, // pthread_cond_broadcast*
          pthread_cond_t* cond)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_BROADCAST,
-                             cond, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, cond);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_BROADCAST,
-                             cond, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_BROADCAST,
+                              cond, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, cond);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_BROADCAST,
+                              cond, 0, 0, 0, 0);
+   return ret;
 }
 
 
@@ -635,80 +637,80 @@
          pthread_spinlock_t *spinlock,
          int pshared)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
-                             spinlock, 0, 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, spinlock, pshared);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
-                             spinlock, 0, 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
+                              spinlock, 0, 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, spinlock, pshared);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
+                              spinlock, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_spin_destroy
 PTH_FUNC(int, pthreadZuspinZudestroy, // pthread_spin_destroy
          pthread_spinlock_t *spinlock)
 {
-  int ret;
-  int res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
-                             spinlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, spinlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                             spinlock, mutex_type_spinlock, 0, 0, 0);
-  return ret;
+   int ret;
+   int res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
+                              spinlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, spinlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                              spinlock, mutex_type_spinlock, 0, 0, 0);
+   return ret;
 }
 
 // pthread_spin_lock
 PTH_FUNC(int, pthreadZuspinZulock, // pthread_spin_lock
          pthread_spinlock_t *spinlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             spinlock, mutex_type_spinlock, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, spinlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             spinlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              spinlock, mutex_type_spinlock, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, spinlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              spinlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_spin_trylock
 PTH_FUNC(int, pthreadZuspinZutrylock, // pthread_spin_trylock
          pthread_spinlock_t *spinlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             spinlock, mutex_type_spinlock, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, spinlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             spinlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              spinlock, mutex_type_spinlock, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, spinlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              spinlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_spin_unlock
 PTH_FUNC(int, pthreadZuspinZuunlock, // pthread_spin_unlock
          pthread_spinlock_t *spinlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
-                             spinlock, mutex_type_spinlock, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, spinlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
-                             spinlock, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
+                              spinlock, mutex_type_spinlock, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, spinlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
+                              spinlock, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_barrier_init
@@ -717,50 +719,50 @@
          const pthread_barrierattr_t* attr,
          unsigned count)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
-                             barrier, pthread_barrier, count, 0, 0);
-  CALL_FN_W_WWW(ret, fn, barrier, attr, count);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
-                             barrier, pthread_barrier, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
+                              barrier, pthread_barrier, count, 0, 0);
+   CALL_FN_W_WWW(ret, fn, barrier, attr, count);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
+                              barrier, pthread_barrier, 0, 0, 0);
+   return ret;
 }
 
 // pthread_barrier_destroy
 PTH_FUNC(int, pthreadZubarrierZudestroy, // pthread_barrier_destroy
          pthread_barrier_t* barrier)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
-                             barrier, pthread_barrier, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, barrier);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
-                             barrier, pthread_barrier, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_DESTROY,
+                              barrier, pthread_barrier, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, barrier);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_DESTROY,
+                              barrier, pthread_barrier, 0, 0, 0);
+   return ret;
 }
 
 // pthread_barrier_wait
 PTH_FUNC(int, pthreadZubarrierZuwait, // pthread_barrier_wait
          pthread_barrier_t* barrier)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
-                             barrier, pthread_barrier, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, barrier);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
-                             barrier, pthread_barrier,
-                             ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD,
-                             ret == PTHREAD_BARRIER_SERIAL_THREAD, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_WAIT,
+                              barrier, pthread_barrier, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, barrier);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_WAIT,
+                              barrier, pthread_barrier,
+                              ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD,
+                              ret == PTHREAD_BARRIER_SERIAL_THREAD, 0);
+   return ret;
 }
 
 
@@ -770,96 +772,96 @@
          int pshared,
          unsigned int value)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
-                             sem, pshared, value, 0, 0);
-  CALL_FN_W_WWW(ret, fn, sem, pshared, value);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
-                             sem, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_INIT,
+                              sem, pshared, value, 0, 0);
+   CALL_FN_W_WWW(ret, fn, sem, pshared, value);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_INIT,
+                              sem, 0, 0, 0, 0);
+   return ret;
 }
 
 // sem_destroy
 PTH_FUNC(int, semZudestroyZa, // sem_destroy*
          sem_t *sem)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
-                             sem, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, sem);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
-                             sem, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_DESTROY,
+                              sem, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, sem);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_DESTROY,
+                              sem, 0, 0, 0, 0);
+   return ret;
 }
 
 // sem_wait
 PTH_FUNC(int, semZuwaitZa, // sem_wait*
          sem_t *sem)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                             sem, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, sem);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                             sem, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                              sem, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, sem);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                              sem, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // sem_trywait
 PTH_FUNC(int, semZutrywaitZa, // sem_trywait*
          sem_t *sem)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                             sem, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, sem);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                             sem, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                              sem, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, sem);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                              sem, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // sem_timedwait
 PTH_FUNC(int, semZutimedwait, // sem_timedwait
          sem_t *sem, const struct timespec *abs_timeout)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
-                             sem, 0, 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, sem, abs_timeout);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
-                             sem, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
+                              sem, 0, 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, sem, abs_timeout);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
+                              sem, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // sem_post
 PTH_FUNC(int, semZupostZa, // sem_post*
          sem_t *sem)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
-                             sem, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, sem);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
-                             sem, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_POST,
+                              sem, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, sem);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_POST,
+                              sem, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_init
@@ -868,14 +870,14 @@
          pthread_rwlock_t* rwlock,
          const pthread_rwlockattr_t* attr)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_INIT,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, rwlock, attr);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_INIT,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, rwlock, attr);
+   return ret;
 }
 
 // pthread_rwlock_destroy
@@ -883,14 +885,14 @@
          pthreadZurwlockZudestroyZa, // pthread_rwlock_destroy*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_DESTROY,
-                             rwlock, 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_DESTROY,
+                              rwlock, 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_rdlock
@@ -898,16 +900,16 @@
          pthreadZurwlockZurdlockZa, // pthread_rwlock_rdlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_wrlock
@@ -915,16 +917,16 @@
          pthreadZurwlockZuwrlockZa, // pthread_rwlock_wrlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_timedrdlock
@@ -932,16 +934,16 @@
          pthreadZurwlockZutimedrdlockZa, // pthread_rwlock_timedrdlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_timedwrlock
@@ -949,16 +951,16 @@
          pthreadZurwlockZutimedwrlockZa, // pthread_rwlock_timedwrlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_tryrdlock
@@ -966,16 +968,16 @@
          pthreadZurwlockZutryrdlockZa, // pthread_rwlock_tryrdlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_RDLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_RDLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_trywrlock
@@ -983,16 +985,16 @@
          pthreadZurwlockZutrywrlockZa, // pthread_rwlock_trywrlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_WRLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_WRLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
 
 // pthread_rwlock_unlock
@@ -1000,14 +1002,14 @@
          pthreadZurwlockZuunlockZa, // pthread_rwlock_unlock*
          pthread_rwlock_t* rwlock)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_UNLOCK,
-                             rwlock, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, rwlock);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_UNLOCK,
-                             rwlock, ret == 0, 0, 0, 0);
-  return ret;
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_UNLOCK,
+                              rwlock, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, rwlock);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_RWLOCK_UNLOCK,
+                              rwlock, ret == 0, 0, 0, 0);
+   return ret;
 }
diff --git a/drd/drd_qtcore_intercepts.c b/drd/drd_qtcore_intercepts.c
index 0a5ebe7..cddfef9 100644
--- a/drd/drd_qtcore_intercepts.c
+++ b/drd/drd_qtcore_intercepts.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 
 /*--------------------------------------------------------------------*/
 /*--- Client-space code for drd.           drd_qtcore_intercepts.c ---*/
@@ -46,9 +47,9 @@
 
 // Defines.
 
-#define QT4CORE_FUNC(ret_ty, f, args...)                       \
-  ret_ty VG_WRAP_FUNCTION_ZU(libQtCoreZdsoZd4,f)(args);        \
-  ret_ty VG_WRAP_FUNCTION_ZU(libQtCoreZdsoZd4,f)(args)
+#define QT4CORE_FUNC(ret_ty, f, args...)                        \
+   ret_ty VG_WRAP_FUNCTION_ZU(libQtCoreZdsoZd4,f)(args);        \
+   ret_ty VG_WRAP_FUNCTION_ZU(libQtCoreZdsoZd4,f)(args)
 
 
 
@@ -63,14 +64,14 @@
 /** Convert a Qt4 mutex type to a DRD mutex type. */
 static MutexT qt_to_drd_mutex_type(qt_mutex_mode mode)
 {
-  switch (mode)
-  {
-  case qt_nonrecursive:
-    return mutex_type_default_mutex;
-  case qt_recursive:
-    return mutex_type_recursive_mutex;
-  }
-  return mutex_type_invalid_mutex;
+   switch (mode)
+   {
+   case qt_nonrecursive:
+      return mutex_type_default_mutex;
+   case qt_recursive:
+      return mutex_type_recursive_mutex;
+   }
+   return mutex_type_invalid_mutex;
 }
 
 /** Find out the type of a Qt4 mutex (recursive or not).
@@ -79,7 +80,7 @@
  */
 static MutexT mutex_type(void* qt4_mutex)
 {
-  return mutex_type_unknown;
+   return mutex_type_unknown;
 }
 
 
@@ -88,15 +89,15 @@
              void* mutex,
              qt_mutex_mode mode)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
-                             mutex, qt_to_drd_mutex_type(mode), 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, mutex, mode);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
-                             mutex, 0, 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
+                              mutex, qt_to_drd_mutex_type(mode), 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, mutex, mode);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
+                              mutex, 0, 0, 0, 0);
 }
 
 // QMutex::QMutex(RecursionMode) -- _ZN6QMutexC2ENS_13RecursionModeE
@@ -104,76 +105,76 @@
              void* mutex,
              qt_mutex_mode mode)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
-                             mutex, qt_to_drd_mutex_type(mode), 0, 0, 0);
-  CALL_FN_W_WW(ret, fn, mutex, mode);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
-                             mutex, 0, 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
+                              mutex, qt_to_drd_mutex_type(mode), 0, 0, 0);
+   CALL_FN_W_WW(ret, fn, mutex, mode);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
+                              mutex, 0, 0, 0, 0);
 }
 
 // QMutex::~QMutex() -- _ZN6QMutexD1Ev
 QT4CORE_FUNC(void, _ZN6QMutexD1Ev,
              void* mutex)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
-                             mutex, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                             mutex, mutex_type(mutex), 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
+                              mutex, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                              mutex, mutex_type(mutex), 0, 0, 0);
 }
 
 // QMutex::~QMutex() -- _ZN6QMutexD2Ev
 QT4CORE_FUNC(void, _ZN6QMutexD2Ev,
              void** mutex)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
-                             mutex, 0, 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
-                             mutex, mutex_type(mutex), 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_DESTROY,
+                              mutex, 0, 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_DESTROY,
+                              mutex, mutex_type(mutex), 0, 0, 0);
 }
 
 // QMutex::lock() -- _ZN6QMutex4lockEv
 QT4CORE_FUNC(void, _ZN6QMutex4lockEv,
              void* mutex)
 {
-  int   ret;
-  int   res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, mutex_type(mutex), 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, 1, 0, 0, 0);
+   int   ret;
+   int   res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, mutex_type(mutex), 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, 1, 0, 0, 0);
 }
 
 // QMutex::tryLock() -- _ZN6QMutex7tryLockEv
 QT4CORE_FUNC(int, _ZN6QMutex7tryLockEv,
              void* mutex)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, mutex_type(mutex), 1, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, ret, 0, 0, 0);
-  return ret;
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, mutex_type(mutex), 1, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, ret, 0, 0, 0);
+   return ret;
 }
 
 // QMutex::tryLock(int) -- _ZN6QMutex7tryLockEi
@@ -181,31 +182,31 @@
              void* mutex,
              int timeout_ms)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
-                             mutex, mutex_type(mutex), 1, 0, 0);
-  CALL_FN_W_WW(ret, fn, mutex, timeout_ms);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
-                             mutex, ret, 0, 0, 0);
-  return ret;
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
+                              mutex, mutex_type(mutex), 1, 0, 0);
+   CALL_FN_W_WW(ret, fn, mutex, timeout_ms);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
+                              mutex, ret, 0, 0, 0);
+   return ret;
 }
 
 // QMutex::unlock() -- _ZN6QMutex6unlockEv
 QT4CORE_FUNC(void, _ZN6QMutex6unlockEv,
              void* mutex)
 {
-  int    ret;
-  int    res;
-  OrigFn fn;
-  VALGRIND_GET_ORIG_FN(fn);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                             VG_USERREQ__PRE_MUTEX_UNLOCK,
-                             mutex, mutex_type(mutex), 0, 0, 0);
-  CALL_FN_W_W(ret, fn, mutex);
-  VALGRIND_DO_CLIENT_REQUEST(res, -1,
-                             VG_USERREQ__POST_MUTEX_UNLOCK,
-                             mutex, 0, 0, 0, 0);
+   int    ret;
+   int    res;
+   OrigFn fn;
+   VALGRIND_GET_ORIG_FN(fn);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                              VG_USERREQ__PRE_MUTEX_UNLOCK,
+                              mutex, mutex_type(mutex), 0, 0, 0);
+   CALL_FN_W_W(ret, fn, mutex);
+   VALGRIND_DO_CLIENT_REQUEST(res, -1,
+                              VG_USERREQ__POST_MUTEX_UNLOCK,
+                              mutex, 0, 0, 0, 0);
 }
diff --git a/drd/drd_rwlock.c b/drd/drd_rwlock.c
index 12d8920..a2cfdaf 100644
--- a/drd/drd_rwlock.c
+++ b/drd/drd_rwlock.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -39,11 +40,11 @@
 
 struct rwlock_thread_info
 {
-  UWord    tid;                 // DrdThreadId.
-  UInt     reader_nesting_count;
-  UInt     writer_nesting_count;
-  Segment* last_unlock_segment; // Segment of last unlock call by this thread.
-  Bool     last_lock_was_writer_lock;
+   UWord    tid;                 // DrdThreadId.
+   UInt     reader_nesting_count;
+   UInt     writer_nesting_count;
+   Segment* last_unlock_segment; // Segment of last unlock call by this thread.
+   Bool     last_lock_was_writer_lock;
 };
 
 
@@ -66,74 +67,74 @@
 
 void DRD_(rwlock_set_trace)(const Bool trace_rwlock)
 {
-  tl_assert(trace_rwlock == False || trace_rwlock == True);
-  DRD_(s_trace_rwlock) = trace_rwlock;
+   tl_assert(trace_rwlock == False || trace_rwlock == True);
+   DRD_(s_trace_rwlock) = trace_rwlock;
 }
 
 void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms)
 {
-  DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms;
+   DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms;
 }
 
 void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms)
 {
-  DRD_(s_shared_threshold_ms) = shared_threshold_ms;
+   DRD_(s_shared_threshold_ms) = shared_threshold_ms;
 }
 
 static Bool DRD_(rwlock_is_rdlocked)(struct rwlock_info* p)
 {
-  struct rwlock_thread_info* q;
+   struct rwlock_thread_info* q;
 
-  VG_(OSetGen_ResetIter)(p->thread_info);
-  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
-  {
-    return q->reader_nesting_count > 0;
-  }
-  return False;
+   VG_(OSetGen_ResetIter)(p->thread_info);
+   for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
+   {
+      return q->reader_nesting_count > 0;
+   }
+   return False;
 }
 
 static Bool DRD_(rwlock_is_wrlocked)(struct rwlock_info* p)
 {
-  struct rwlock_thread_info* q;
+   struct rwlock_thread_info* q;
 
-  VG_(OSetGen_ResetIter)(p->thread_info);
-  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
-  {
-    return q->writer_nesting_count > 0;
-  }
-  return False;
+   VG_(OSetGen_ResetIter)(p->thread_info);
+   for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
+   {
+      return q->writer_nesting_count > 0;
+   }
+   return False;
 }
 
 static Bool DRD_(rwlock_is_locked)(struct rwlock_info* p)
 {
-  return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p);
+   return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p);
 }
 
 static Bool DRD_(rwlock_is_rdlocked_by)(struct rwlock_info* p,
                                         const DrdThreadId tid)
 {
-  const UWord uword_tid = tid;
-  struct rwlock_thread_info* q;
+   const UWord uword_tid = tid;
+   struct rwlock_thread_info* q;
 
-  q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
-  return q && q->reader_nesting_count > 0;
+   q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
+   return q && q->reader_nesting_count > 0;
 }
 
 static Bool DRD_(rwlock_is_wrlocked_by)(struct rwlock_info* p,
                                         const DrdThreadId tid)
 {
-  const UWord uword_tid = tid;
-  struct rwlock_thread_info* q;
+   const UWord uword_tid = tid;
+   struct rwlock_thread_info* q;
 
-  q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
-  return q && q->writer_nesting_count > 0;
+   q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
+   return q && q->writer_nesting_count > 0;
 }
 
 static Bool DRD_(rwlock_is_locked_by)(struct rwlock_info* p,
                                       const DrdThreadId tid)
 {
-  return (DRD_(rwlock_is_rdlocked_by)(p, tid)
-          || DRD_(rwlock_is_wrlocked_by)(p, tid));
+   return (DRD_(rwlock_is_rdlocked_by)(p, tid)
+           || DRD_(rwlock_is_wrlocked_by)(p, tid));
 }
 
 /** Either look up or insert a node corresponding to DRD thread id 'tid'. */
@@ -141,21 +142,21 @@
 struct rwlock_thread_info*
 DRD_(lookup_or_insert_node)(OSet* oset, const UWord tid)
 {
-  struct rwlock_thread_info* q;
+   struct rwlock_thread_info* q;
 
-  q = VG_(OSetGen_Lookup)(oset, &tid);
-  if (q == 0)
-  {
-    q = VG_(OSetGen_AllocNode)(oset, sizeof(*q));
-    q->tid                       = tid;
-    q->reader_nesting_count      = 0;
-    q->writer_nesting_count      = 0;
-    q->last_unlock_segment       = 0;
-    q->last_lock_was_writer_lock = False;
-    VG_(OSetGen_Insert)(oset, q);
-  }
-  tl_assert(q);
-  return q;
+   q = VG_(OSetGen_Lookup)(oset, &tid);
+   if (q == 0)
+   {
+      q = VG_(OSetGen_AllocNode)(oset, sizeof(*q));
+      q->tid                       = tid;
+      q->reader_nesting_count      = 0;
+      q->writer_nesting_count      = 0;
+      q->last_unlock_segment       = 0;
+      q->last_lock_was_writer_lock = False;
+      VG_(OSetGen_Insert)(oset, q);
+   }
+   tl_assert(q);
+   return q;
 }
 
 /**
@@ -166,155 +167,156 @@
                                           const DrdThreadId tid,
                                           const Bool readers_too)
 {
-  struct rwlock_thread_info* q;
+   struct rwlock_thread_info* q;
 
-  VG_(OSetGen_ResetIter)(p->thread_info);
-  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
-  {
-    if (q->tid != tid && (readers_too || q->last_lock_was_writer_lock))
-    {
-      DRD_(thread_combine_vc2)(tid, &q->last_unlock_segment->vc);
-    }
-  }
+   VG_(OSetGen_ResetIter)(p->thread_info);
+   for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
+   {
+      if (q->tid != tid && (readers_too || q->last_lock_was_writer_lock))
+      {
+         DRD_(thread_combine_vc2)(tid, &q->last_unlock_segment->vc);
+      }
+   }
 }
 
 /** Initialize the rwlock_info data structure *p. */
 static
 void DRD_(rwlock_initialize)(struct rwlock_info* const p, const Addr rwlock)
 {
-  tl_assert(rwlock != 0);
-  tl_assert(p->a1 == rwlock);
-  tl_assert(p->type == ClientRwlock);
+   tl_assert(rwlock != 0);
+   tl_assert(p->a1 == rwlock);
+   tl_assert(p->type == ClientRwlock);
 
-  p->cleanup         = (void(*)(DrdClientobj*))rwlock_cleanup;
-  p->delete_thread = (void(*)(DrdClientobj*, DrdThreadId))rwlock_delete_thread;
-  p->thread_info     = VG_(OSetGen_Create)(
-                          0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free));
-  p->acquiry_time_ms = 0;
-  p->acquired_at     = 0;
+   p->cleanup         = (void(*)(DrdClientobj*))rwlock_cleanup;
+   p->delete_thread
+      = (void(*)(DrdClientobj*, DrdThreadId))rwlock_delete_thread;
+   p->thread_info     = VG_(OSetGen_Create)(
+      0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free));
+   p->acquiry_time_ms = 0;
+   p->acquired_at     = 0;
 }
 
 /** Deallocate the memory that was allocated by rwlock_initialize(). */
 static void rwlock_cleanup(struct rwlock_info* p)
 {
-  struct rwlock_thread_info* q;
+   struct rwlock_thread_info* q;
 
-  tl_assert(p);
+   tl_assert(p);
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] rwlock_destroy     0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 p->a1);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] rwlock_destroy     0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   p->a1);
+   }
 
-  if (DRD_(rwlock_is_locked)(p))
-  {
-    RwlockErrInfo REI = { p->a1 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            RwlockErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destroying locked rwlock",
-                            &REI);
-  }
+   if (DRD_(rwlock_is_locked)(p))
+   {
+      RwlockErrInfo REI = { p->a1 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              RwlockErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destroying locked rwlock",
+                              &REI);
+   }
 
-  VG_(OSetGen_ResetIter)(p->thread_info);
-  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
-  {
-    DRD_(sg_put)(q->last_unlock_segment);
-  }
-  VG_(OSetGen_Destroy)(p->thread_info);
+   VG_(OSetGen_ResetIter)(p->thread_info);
+   for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
+   {
+      DRD_(sg_put)(q->last_unlock_segment);
+   }
+   VG_(OSetGen_Destroy)(p->thread_info);
 }
 
 static
 struct rwlock_info*
 DRD_(rwlock_get_or_allocate)(const Addr rwlock)
 {
-  struct rwlock_info* p;
+   struct rwlock_info* p;
 
-  tl_assert(offsetof(DrdClientobj, rwlock) == 0);
-  p = &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
-  if (p)
-  {
-    return p;
-  }
+   tl_assert(offsetof(DrdClientobj, rwlock) == 0);
+   p = &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
+   if (p)
+   {
+      return p;
+   }
 
-  if (DRD_(clientobj_present)(rwlock, rwlock + 1))
-  {
-    GenericErrInfo GEI;
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            GenericErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Not a reader-writer lock",
-                            &GEI);
-    return 0;
-  }
+   if (DRD_(clientobj_present)(rwlock, rwlock + 1))
+   {
+      GenericErrInfo GEI;
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              GenericErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Not a reader-writer lock",
+                              &GEI);
+      return 0;
+   }
 
-  p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock);
-  DRD_(rwlock_initialize)(p, rwlock);
-  return p;
+   p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock);
+   DRD_(rwlock_initialize)(p, rwlock);
+   return p;
 }
 
 static struct rwlock_info* DRD_(rwlock_get)(const Addr rwlock)
 {
-  tl_assert(offsetof(DrdClientobj, rwlock) == 0);
-  return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
+   tl_assert(offsetof(DrdClientobj, rwlock) == 0);
+   return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
 }
 
 /** Called before pthread_rwlock_init(). */
 struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock)
 {
-  struct rwlock_info* p;
+   struct rwlock_info* p;
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] rwlock_init        0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] rwlock_init        0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   rwlock);
+   }
 
-  p = DRD_(rwlock_get)(rwlock);
+   p = DRD_(rwlock_get)(rwlock);
 
-  if (p)
-  {
-    const ThreadId vg_tid = VG_(get_running_tid)();
-    RwlockErrInfo REI
-      = { p->a1 };
-    VG_(maybe_record_error)(vg_tid,
-                            RwlockErr,
-                            VG_(get_IP)(vg_tid),
-                            "Reader-writer lock reinitialization",
-                            &REI);
-    return p;
-  }
+   if (p)
+   {
+      const ThreadId vg_tid = VG_(get_running_tid)();
+      RwlockErrInfo REI
+         = { p->a1 };
+      VG_(maybe_record_error)(vg_tid,
+                              RwlockErr,
+                              VG_(get_IP)(vg_tid),
+                              "Reader-writer lock reinitialization",
+                              &REI);
+      return p;
+   }
 
-  p = DRD_(rwlock_get_or_allocate)(rwlock);
+   p = DRD_(rwlock_get_or_allocate)(rwlock);
 
-  return p;
+   return p;
 }
 
 /** Called after pthread_rwlock_destroy(). */
 void DRD_(rwlock_post_destroy)(const Addr rwlock)
 {
-  struct rwlock_info* p;
+   struct rwlock_info* p;
 
-  p = DRD_(rwlock_get)(rwlock);
-  if (p == 0)
-  {
-    GenericErrInfo GEI;
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            GenericErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Not a reader-writer lock",
-                            &GEI);
-    return;
-  }
+   p = DRD_(rwlock_get)(rwlock);
+   if (p == 0)
+   {
+      GenericErrInfo GEI;
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              GenericErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Not a reader-writer lock",
+                              &GEI);
+      return;
+   }
 
-  DRD_(clientobj_remove)(rwlock, ClientRwlock);
+   DRD_(clientobj_remove)(rwlock, ClientRwlock);
 }
 
 /**
@@ -325,27 +327,27 @@
  */
 void DRD_(rwlock_pre_rdlock)(const Addr rwlock)
 {
-  struct rwlock_info* p;
+   struct rwlock_info* p;
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] pre_rwlock_rdlock  0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] pre_rwlock_rdlock  0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   rwlock);
+   }
 
-  p = DRD_(rwlock_get_or_allocate)(rwlock);
-  tl_assert(p);
+   p = DRD_(rwlock_get_or_allocate)(rwlock);
+   tl_assert(p);
 
-  if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "reader-writer lock 0x%lx is already locked for"
-                 " writing by calling thread",
-                 p->a1);
-  }
+   if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "reader-writer lock 0x%lx is already locked for"
+                   " writing by calling thread",
+                   p->a1);
+   }
 }
 
 /**
@@ -355,37 +357,37 @@
  */
 void DRD_(rwlock_post_rdlock)(const Addr rwlock, const Bool took_lock)
 {
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
-  struct rwlock_info* p;
-  struct rwlock_thread_info* q;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   struct rwlock_info* p;
+   struct rwlock_thread_info* q;
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] post_rwlock_rdlock 0x%lx",
-                 VG_(get_running_tid)(),
-                 drd_tid,
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] post_rwlock_rdlock 0x%lx",
+                   VG_(get_running_tid)(),
+                   drd_tid,
+                   rwlock);
+   }
 
-  p = DRD_(rwlock_get)(rwlock);
+   p = DRD_(rwlock_get)(rwlock);
 
-  if (! p || ! took_lock)
-    return;
+   if (! p || ! took_lock)
+      return;
 
-  tl_assert(! DRD_(rwlock_is_wrlocked)(p));
+   tl_assert(! DRD_(rwlock_is_wrlocked)(p));
 
-  q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
-  if (++q->reader_nesting_count == 1)
-  {
-    DRD_(rwlock_combine_other_vc)(p, drd_tid, False);
-    q->last_lock_was_writer_lock = False;
-    DRD_(thread_new_segment)(drd_tid);
-    DRD_(s_rwlock_segment_creation_count)++;
+   q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
+   if (++q->reader_nesting_count == 1)
+   {
+      DRD_(rwlock_combine_other_vc)(p, drd_tid, False);
+      q->last_lock_was_writer_lock = False;
+      DRD_(thread_new_segment)(drd_tid);
+      DRD_(s_rwlock_segment_creation_count)++;
 
-    p->acquiry_time_ms = VG_(read_millisecond_timer)();
-    p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
-  }
+      p->acquiry_time_ms = VG_(read_millisecond_timer)();
+      p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
+   }
 }
 
 /**
@@ -396,35 +398,35 @@
  */
 void DRD_(rwlock_pre_wrlock)(const Addr rwlock)
 {
-  struct rwlock_info* p;
+   struct rwlock_info* p;
 
-  p = DRD_(rwlock_get)(rwlock);
+   p = DRD_(rwlock_get)(rwlock);
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] pre_rwlock_wrlock  0x%lx",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] pre_rwlock_wrlock  0x%lx",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   rwlock);
+   }
 
-  if (p == 0)
-  {
-    p = DRD_(rwlock_get_or_allocate)(rwlock);
-  }
+   if (p == 0)
+   {
+      p = DRD_(rwlock_get_or_allocate)(rwlock);
+   }
 
-  tl_assert(p);
+   tl_assert(p);
 
-  if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
-  {
-    RwlockErrInfo REI = { p->a1 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            RwlockErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Recursive writer locking not allowed",
-                            &REI);
-  }
+   if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
+   {
+      RwlockErrInfo REI = { p->a1 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              RwlockErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Recursive writer locking not allowed",
+                              &REI);
+   }
 }
 
 /**
@@ -434,35 +436,35 @@
  */
 void DRD_(rwlock_post_wrlock)(const Addr rwlock, const Bool took_lock)
 {
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
-  struct rwlock_info* p;
-  struct rwlock_thread_info* q;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   struct rwlock_info* p;
+   struct rwlock_thread_info* q;
 
-  p = DRD_(rwlock_get)(rwlock);
+   p = DRD_(rwlock_get)(rwlock);
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] post_rwlock_wrlock 0x%lx",
-                 VG_(get_running_tid)(),
-                 drd_tid,
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] post_rwlock_wrlock 0x%lx",
+                   VG_(get_running_tid)(),
+                   drd_tid,
+                   rwlock);
+   }
 
-  if (! p || ! took_lock)
-    return;
+   if (! p || ! took_lock)
+      return;
 
-  q = DRD_(lookup_or_insert_node)(p->thread_info,
-                                  DRD_(thread_get_running_tid)());
-  tl_assert(q->writer_nesting_count == 0);
-  q->writer_nesting_count++;
-  q->last_lock_was_writer_lock = True;
-  tl_assert(q->writer_nesting_count == 1);
-  DRD_(rwlock_combine_other_vc)(p, drd_tid, True);
-  DRD_(thread_new_segment)(drd_tid);
-  DRD_(s_rwlock_segment_creation_count)++;
-  p->acquiry_time_ms = VG_(read_millisecond_timer)();
-  p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
+   q = DRD_(lookup_or_insert_node)(p->thread_info,
+                                   DRD_(thread_get_running_tid)());
+   tl_assert(q->writer_nesting_count == 0);
+   q->writer_nesting_count++;
+   q->last_lock_was_writer_lock = True;
+   tl_assert(q->writer_nesting_count == 1);
+   DRD_(rwlock_combine_other_vc)(p, drd_tid, True);
+   DRD_(thread_new_segment)(drd_tid);
+   DRD_(s_rwlock_segment_creation_count)++;
+   p->acquiry_time_ms = VG_(read_millisecond_timer)();
+   p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
 }
 
 /**
@@ -477,94 +479,95 @@
  */
 void DRD_(rwlock_pre_unlock)(const Addr rwlock)
 {
-  const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
-  const ThreadId vg_tid = VG_(get_running_tid)();
-  struct rwlock_info* p;
-  struct rwlock_thread_info* q;
+   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
+   const ThreadId vg_tid = VG_(get_running_tid)();
+   struct rwlock_info* p;
+   struct rwlock_thread_info* q;
 
-  if (DRD_(s_trace_rwlock))
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] rwlock_unlock      0x%lx",
-                 vg_tid,
-                 drd_tid,
-                 rwlock);
-  }
+   if (DRD_(s_trace_rwlock))
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] rwlock_unlock      0x%lx",
+                   vg_tid,
+                   drd_tid,
+                   rwlock);
+   }
 
-  p = DRD_(rwlock_get)(rwlock);
-  if (p == 0)
-  {
-    GenericErrInfo GEI;
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            GenericErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Not a reader-writer lock",
-                            &GEI);
-    return;
-  }
-  if (! DRD_(rwlock_is_locked_by)(p, drd_tid))
-  {
-    RwlockErrInfo REI = { p->a1 };
-    VG_(maybe_record_error)(vg_tid,
-                            RwlockErr,
-                            VG_(get_IP)(vg_tid),
-                            "Reader-writer lock not locked by calling thread",
-                            &REI);
-    return;
-  }
-  q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
-  tl_assert(q);
-  if (q->reader_nesting_count > 0)
-  {
-    q->reader_nesting_count--;
-    if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0)
-    {
-      ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
-      if (held > DRD_(s_shared_threshold_ms))
+   p = DRD_(rwlock_get)(rwlock);
+   if (p == 0)
+   {
+      GenericErrInfo GEI;
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              GenericErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Not a reader-writer lock",
+                              &GEI);
+      return;
+   }
+   if (! DRD_(rwlock_is_locked_by)(p, drd_tid))
+   {
+      RwlockErrInfo REI = { p->a1 };
+      VG_(maybe_record_error)(vg_tid,
+                              RwlockErr,
+                              VG_(get_IP)(vg_tid),
+                              "Reader-writer lock not locked by calling thread",
+                              &REI);
+      return;
+   }
+   q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
+   tl_assert(q);
+   if (q->reader_nesting_count > 0)
+   {
+      q->reader_nesting_count--;
+      if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0)
       {
-        HoldtimeErrInfo HEI
-          = { rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) };
-        VG_(maybe_record_error)(vg_tid,
-                                HoldtimeErr,
-                                VG_(get_IP)(vg_tid),
-                                "rwlock",
-                                &HEI);
+         ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+         if (held > DRD_(s_shared_threshold_ms))
+         {
+            HoldtimeErrInfo HEI
+               = { rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) };
+            VG_(maybe_record_error)(vg_tid,
+                                    HoldtimeErr,
+                                    VG_(get_IP)(vg_tid),
+                                    "rwlock",
+                                    &HEI);
+         }
       }
-    }
-  }
-  else if (q->writer_nesting_count > 0)
-  {
-    q->writer_nesting_count--;
-    if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0)
-    {
-      ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
-      if (held > DRD_(s_exclusive_threshold_ms))
+   }
+   else if (q->writer_nesting_count > 0)
+   {
+      q->writer_nesting_count--;
+      if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0)
       {
-        HoldtimeErrInfo HEI
-          = { rwlock, p->acquired_at, held, DRD_(s_exclusive_threshold_ms) };
-        VG_(maybe_record_error)(vg_tid,
-                                HoldtimeErr,
-                                VG_(get_IP)(vg_tid),
-                                "rwlock",
-                                &HEI);
+         ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+         if (held > DRD_(s_exclusive_threshold_ms))
+         {
+            HoldtimeErrInfo HEI
+               = { rwlock, p->acquired_at, held,
+                   DRD_(s_exclusive_threshold_ms) };
+            VG_(maybe_record_error)(vg_tid,
+                                    HoldtimeErr,
+                                    VG_(get_IP)(vg_tid),
+                                    "rwlock",
+                                    &HEI);
+         }
       }
-    }
-  }
-  else
-  {
-    tl_assert(False);
-  }
+   }
+   else
+   {
+      tl_assert(False);
+   }
 
-  if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
-  {
-    /* This pthread_rwlock_unlock() call really unlocks the rwlock. Save the */
-    /* current vector clock of the thread such that it is available when  */
-    /* this rwlock is locked again.                                        */
+   if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
+   {
+      /* This pthread_rwlock_unlock() call really unlocks the rwlock. Save */
+      /* the current vector clock of the thread such that it is available  */
+      /* when this rwlock is locked again.                                 */
 
-    DRD_(thread_get_latest_segment)(&q->last_unlock_segment, drd_tid);
-    DRD_(thread_new_segment)(drd_tid);
-    DRD_(s_rwlock_segment_creation_count)++;
-  }
+      DRD_(thread_get_latest_segment)(&q->last_unlock_segment, drd_tid);
+      DRD_(thread_new_segment)(drd_tid);
+      DRD_(s_rwlock_segment_creation_count)++;
+   }
 }
 
 /**
@@ -574,22 +577,22 @@
 static void rwlock_delete_thread(struct rwlock_info* const p,
                                  const DrdThreadId tid)
 {
-  struct rwlock_thread_info* q;
-  if (DRD_(rwlock_is_locked_by)(p, tid))
-  {
-    RwlockErrInfo REI = { p->a1 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            RwlockErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Reader-writer lock still locked at thread exit",
-                            &REI);
-    q = DRD_(lookup_or_insert_node)(p->thread_info, tid);
-    q->reader_nesting_count = 0;
-    q->writer_nesting_count = 0;
-  }
+   struct rwlock_thread_info* q;
+   if (DRD_(rwlock_is_locked_by)(p, tid))
+   {
+      RwlockErrInfo REI = { p->a1 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              RwlockErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Reader-writer lock still locked at thread exit",
+                              &REI);
+      q = DRD_(lookup_or_insert_node)(p->thread_info, tid);
+      q->reader_nesting_count = 0;
+      q->writer_nesting_count = 0;
+   }
 }
 
 ULong DRD_(get_rwlock_segment_creation_count)(void)
 {
-  return DRD_(s_rwlock_segment_creation_count);
+   return DRD_(s_rwlock_segment_creation_count);
 }
diff --git a/drd/drd_rwlock.h b/drd/drd_rwlock.h
index abb59c7..e39a502 100644
--- a/drd/drd_rwlock.h
+++ b/drd/drd_rwlock.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_segment.c b/drd/drd_segment.c
index 06972e4..d513efa 100644
--- a/drd/drd_segment.c
+++ b/drd/drd_segment.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -58,110 +59,110 @@
                     const DrdThreadId creator,
                     const DrdThreadId created)
 {
-  Segment* creator_sg;
-  ThreadId vg_created = DRD_(DrdThreadIdToVgThreadId)(created);
+   Segment* creator_sg;
+   ThreadId vg_created = DRD_(DrdThreadIdToVgThreadId)(created);
 
-  tl_assert(sg);
-  tl_assert(creator == DRD_INVALID_THREADID
-            || DRD_(IsValidDrdThreadId)(creator));
+   tl_assert(sg);
+   tl_assert(creator == DRD_INVALID_THREADID
+             || DRD_(IsValidDrdThreadId)(creator));
 
-  creator_sg = (creator != DRD_INVALID_THREADID
-                ? DRD_(thread_get_segment)(creator) : 0);
+   creator_sg = (creator != DRD_INVALID_THREADID
+                 ? DRD_(thread_get_segment)(creator) : 0);
 
-  sg->next = 0;
-  sg->prev = 0;
-  sg->refcnt = 1;
+   sg->next = 0;
+   sg->prev = 0;
+   sg->refcnt = 1;
 
-  if (vg_created != VG_INVALID_THREADID && VG_(get_SP)(vg_created) != 0)
-    sg->stacktrace = VG_(record_ExeContext)(vg_created, 0);
-  else
-    sg->stacktrace = 0;
+   if (vg_created != VG_INVALID_THREADID && VG_(get_SP)(vg_created) != 0)
+      sg->stacktrace = VG_(record_ExeContext)(vg_created, 0);
+   else
+      sg->stacktrace = 0;
 
-  if (creator_sg)
-    DRD_(vc_copy)(&sg->vc, &creator_sg->vc);
-  else
-    DRD_(vc_init)(&sg->vc, 0, 0);
-  DRD_(vc_increment)(&sg->vc, created);
-  sg->bm = DRD_(bm_new)();
+   if (creator_sg)
+      DRD_(vc_copy)(&sg->vc, &creator_sg->vc);
+   else
+      DRD_(vc_init)(&sg->vc, 0, 0);
+   DRD_(vc_increment)(&sg->vc, created);
+   sg->bm = DRD_(bm_new)();
 
-  if (s_trace_segment)
-  {
-    char msg[256];
-    VG_(snprintf)(msg, sizeof(msg),
-                  "New segment for thread %d/%d with vc ",
-                  created != VG_INVALID_THREADID
-                  ? DRD_(DrdThreadIdToVgThreadId)(created)
-                  : DRD_INVALID_THREADID,
-                  created);
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-               &sg->vc);
-    VG_(message)(Vg_UserMsg, "%s", msg);
-  }
+   if (s_trace_segment)
+   {
+      char msg[256];
+      VG_(snprintf)(msg, sizeof(msg),
+                    "New segment for thread %d/%d with vc ",
+                    created != VG_INVALID_THREADID
+                    ? DRD_(DrdThreadIdToVgThreadId)(created)
+                    : DRD_INVALID_THREADID,
+                    created);
+      DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                       &sg->vc);
+      VG_(message)(Vg_UserMsg, "%s", msg);
+   }
 }
 
 /** Deallocate the memory that was allocated by sg_init(). */
 static void DRD_(sg_cleanup)(Segment* const sg)
 {
-  tl_assert(sg);
-  tl_assert(sg->refcnt == 0);
+   tl_assert(sg);
+   tl_assert(sg->refcnt == 0);
 
-  DRD_(vc_cleanup)(&sg->vc);
-  DRD_(bm_delete)(sg->bm);
-  sg->bm = 0;
+   DRD_(vc_cleanup)(&sg->vc);
+   DRD_(bm_delete)(sg->bm);
+   sg->bm = 0;
 }
 
 /** Allocate and initialize a new segment. */
 Segment* DRD_(sg_new)(const DrdThreadId creator, const DrdThreadId created)
 {
-  Segment* sg;
+   Segment* sg;
 
-  s_segments_created_count++;
-  s_segments_alive_count++;
-  if (s_max_segments_alive_count < s_segments_alive_count)
-    s_max_segments_alive_count = s_segments_alive_count;
+   s_segments_created_count++;
+   s_segments_alive_count++;
+   if (s_max_segments_alive_count < s_segments_alive_count)
+      s_max_segments_alive_count = s_segments_alive_count;
 
-  sg = VG_(malloc)("drd.segment.sn.1", sizeof(*sg));
-  tl_assert(sg);
-  sg_init(sg, creator, created);
-  return sg;
+   sg = VG_(malloc)("drd.segment.sn.1", sizeof(*sg));
+   tl_assert(sg);
+   sg_init(sg, creator, created);
+   return sg;
 }
 
 static void DRD_(sg_delete)(Segment* const sg)
 {
 #if 1
-  if (DRD_(sg_get_trace)())
-  {
-    char msg[256];
-    VG_(snprintf)(msg, sizeof(msg),
-                  "Discarding the segment with vector clock ");
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-               &sg->vc);
-    VG_(message)(Vg_UserMsg, "%s", msg);
-  }
+   if (DRD_(sg_get_trace)())
+   {
+      char msg[256];
+      VG_(snprintf)(msg, sizeof(msg),
+                    "Discarding the segment with vector clock ");
+      DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                       &sg->vc);
+      VG_(message)(Vg_UserMsg, "%s", msg);
+   }
 #endif
 
-  s_segments_alive_count--;
+   s_segments_alive_count--;
 
-  tl_assert(sg);
-  DRD_(sg_cleanup)(sg);
-  VG_(free)(sg);
+   tl_assert(sg);
+   DRD_(sg_cleanup)(sg);
+   VG_(free)(sg);
 }
 
 /** Query the reference count of the specified segment. */
 int DRD_(sg_get_refcnt)(const Segment* const sg)
 {
-  tl_assert(sg);
+   tl_assert(sg);
 
-  return sg->refcnt;
+   return sg->refcnt;
 }
 
 /** Increment the reference count of the specified segment. */
 Segment* DRD_(sg_get)(Segment* const sg)
 {
-  tl_assert(sg);
+   tl_assert(sg);
 
-  sg->refcnt++;
-  return sg;
+   sg->refcnt++;
+   return sg;
 }
 
 /**
@@ -170,90 +171,90 @@
  */
 void DRD_(sg_put)(Segment* const sg)
 {
-  if (sg == 0)
-    return;
+   if (sg == 0)
+      return;
 
-  if (s_trace_segment)
-  {
-    char msg[256];
-    VG_(snprintf)(msg, sizeof(msg),
-                  "Decrementing segment reference count %d -> %d with vc ",
-                  sg->refcnt, sg->refcnt - 1);
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-               &sg->vc);
-    VG_(message)(Vg_UserMsg, "%s", msg);
-  }
+   if (s_trace_segment)
+   {
+      char msg[256];
+      VG_(snprintf)(msg, sizeof(msg),
+                    "Decrementing segment reference count %d -> %d with vc ",
+                    sg->refcnt, sg->refcnt - 1);
+      DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                       &sg->vc);
+      VG_(message)(Vg_UserMsg, "%s", msg);
+   }
 
-  tl_assert(sg->refcnt >= 1);
+   tl_assert(sg->refcnt >= 1);
 
-  if (--sg->refcnt == 0)
-  {
-    DRD_(sg_delete)(sg);
-  }
+   if (--sg->refcnt == 0)
+   {
+      DRD_(sg_delete)(sg);
+   }
 }
 
 /** Merge sg1 and sg2 into sg1. */
 void DRD_(sg_merge)(const Segment* const sg1, Segment* const sg2)
 {
-  tl_assert(sg1);
-  tl_assert(sg1->refcnt == 1);
-  tl_assert(sg2);
-  tl_assert(sg2->refcnt == 1);
+   tl_assert(sg1);
+   tl_assert(sg1->refcnt == 1);
+   tl_assert(sg2);
+   tl_assert(sg2->refcnt == 1);
 
-  if (s_trace_segment)
-  {
+   if (s_trace_segment)
+   {
       char msg[256];
 
       VG_(snprintf)(msg, sizeof(msg), "Merging segments with vector clocks ");
       DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                 &sg1->vc);
+                       &sg1->vc);
       VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
                     " and ");
       DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                 &sg2->vc);
+                       &sg2->vc);
       VG_(message)(Vg_UserMsg, "%s", msg);
-  }
+   }
 
-  // Keep sg1->stacktrace.
-  // Keep sg1->vc.
-  // Merge sg2->bm into sg1->bm.
-  DRD_(bm_merge2)(sg1->bm, sg2->bm);
+   // Keep sg1->stacktrace.
+   // Keep sg1->vc.
+   // Merge sg2->bm into sg1->bm.
+   DRD_(bm_merge2)(sg1->bm, sg2->bm);
 }
 
 /** Print the vector clock and the bitmap of the specified segment. */
 void DRD_(sg_print)(const Segment* const sg)
 {
-  tl_assert(sg);
-  VG_(printf)("vc: ");
-  DRD_(vc_print)(&sg->vc);
-  VG_(printf)("\n");
-  DRD_(bm_print)(sg->bm);
+   tl_assert(sg);
+   VG_(printf)("vc: ");
+   DRD_(vc_print)(&sg->vc);
+   VG_(printf)("\n");
+   DRD_(bm_print)(sg->bm);
 }
 
 /** Query whether segment tracing has been enabled. */
 Bool DRD_(sg_get_trace)(void)
 {
-  return s_trace_segment;
+   return s_trace_segment;
 }
 
 /** Enable or disable segment tracing. */
 void DRD_(sg_set_trace)(Bool const trace_segment)
 {
-  tl_assert(trace_segment == False || trace_segment == True);
-  s_trace_segment = trace_segment;
+   tl_assert(trace_segment == False || trace_segment == True);
+   s_trace_segment = trace_segment;
 }
 
 ULong DRD_(sg_get_segments_created_count)(void)
 {
-  return s_segments_created_count;
+   return s_segments_created_count;
 }
 
 ULong DRD_(sg_get_segments_alive_count)(void)
 {
-  return s_segments_alive_count;
+   return s_segments_alive_count;
 }
 
 ULong DRD_(sg_get_max_segments_alive_count)(void)
 {
-  return s_max_segments_alive_count;
+   return s_max_segments_alive_count;
 }
diff --git a/drd/drd_segment.h b/drd/drd_segment.h
index 012cca6..8658fae 100644
--- a/drd/drd_segment.h
+++ b/drd/drd_segment.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -41,20 +42,20 @@
 
 typedef struct segment
 {
-  /** Pointers to next and previous segments executed by the same thread. */
-  struct segment*    next;
-  struct segment*    prev;
-  /** Reference count: number of pointers that point to this segment. */
-  int                refcnt;
-  /** Stack trace of the first instruction of the segment. */
-  ExeContext*        stacktrace;
-  /** Vector clock associated with the segment. */
-  VectorClock        vc;
-  /**
-   * Bitmap representing the memory accesses by the instructions associated
-   * with the segment.
-   */
-  struct bitmap*     bm;
+   /** Pointers to next and previous segments executed by the same thread. */
+   struct segment*    next;
+   struct segment*    prev;
+   /** Reference count: number of pointers that point to this segment. */
+   int                refcnt;
+   /** Stack trace of the first instruction of the segment. */
+   ExeContext*        stacktrace;
+   /** Vector clock associated with the segment. */
+   VectorClock        vc;
+   /**
+    * Bitmap representing the memory accesses by the instructions associated
+    * with the segment.
+    */
+   struct bitmap*     bm;
 } Segment;
 
 
diff --git a/drd/drd_semaphore.c b/drd/drd_semaphore.c
index 02d5c7d..362a9bd 100644
--- a/drd/drd_semaphore.c
+++ b/drd/drd_semaphore.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -50,42 +51,42 @@
 /** Push a segment at the end of the queue 'p->last_sem_post_seg'. */
 static void DRD_(segment_push)(struct semaphore_info* p, Segment* sg)
 {
-  Word n;
+   Word n;
 
-  tl_assert(sg);
-  n = VG_(addToXA)(p->last_sem_post_seg, &sg);
+   tl_assert(sg);
+   n = VG_(addToXA)(p->last_sem_post_seg, &sg);
 #if 0
-  VG_(message)(Vg_UserMsg, "0x%lx push: added at position %ld/%ld",
-               p->a1, n, VG_(sizeXA)(p->last_sem_post_seg));
+   VG_(message)(Vg_UserMsg, "0x%lx push: added at position %ld/%ld",
+                p->a1, n, VG_(sizeXA)(p->last_sem_post_seg));
 #endif
-  tl_assert(*(Segment**)VG_(indexXA)(p->last_sem_post_seg, n) == sg);
+   tl_assert(*(Segment**)VG_(indexXA)(p->last_sem_post_seg, n) == sg);
 }
 
 /** Pop a segment from the beginning of the queue 'p->last_sem_post_seg'. */
 static Segment* DRD_(segment_pop)(struct semaphore_info* p)
 {
-  Word sz;
-  Segment* sg;
+   Word sz;
+   Segment* sg;
 
-  sz = VG_(sizeXA)(p->last_sem_post_seg);
+   sz = VG_(sizeXA)(p->last_sem_post_seg);
 #if 0
-  VG_(message)(Vg_UserMsg, "0x%lx pop:  removed from position %ld/%ld",
-               p->a1, sz - 1, sz);
+   VG_(message)(Vg_UserMsg, "0x%lx pop:  removed from position %ld/%ld",
+                p->a1, sz - 1, sz);
 #endif
-  sg = 0;
-  if (sz > 0)
-  {
-    sg = *(Segment**)VG_(indexXA)(p->last_sem_post_seg, sz - 1);
-    tl_assert(sg);
-    VG_(dropTailXA)(p->last_sem_post_seg, 1);
-  }
-  return sg;
+   sg = 0;
+   if (sz > 0)
+   {
+      sg = *(Segment**)VG_(indexXA)(p->last_sem_post_seg, sz - 1);
+      tl_assert(sg);
+      VG_(dropTailXA)(p->last_sem_post_seg, 1);
+   }
+   return sg;
 }
 
 /** Enable or disable tracing of semaphore actions. */
 void DRD_(semaphore_set_trace)(const Bool trace_semaphore)
 {
-  s_trace_semaphore = trace_semaphore;
+   s_trace_semaphore = trace_semaphore;
 }
 
 /**
@@ -96,18 +97,18 @@
 void DRD_(semaphore_initialize)(struct semaphore_info* const p,
                                 const Addr semaphore)
 {
-  tl_assert(semaphore != 0);
-  tl_assert(p->a1 == semaphore);
-  tl_assert(p->type == ClientSemaphore);
+   tl_assert(semaphore != 0);
+   tl_assert(p->a1 == semaphore);
+   tl_assert(p->type == ClientSemaphore);
 
-  p->cleanup           = (void(*)(DrdClientobj*))semaphore_cleanup;
-  p->delete_thread     = 0;
-  p->waits_to_skip     = 0;
-  p->value             = 0;
-  p->waiters           = 0;
-  p->last_sem_post_tid = DRD_INVALID_THREADID;
-  p->last_sem_post_seg = VG_(newXA)(VG_(malloc), "drd.sg-stack",
-                                    VG_(free), sizeof(Segment*));
+   p->cleanup           = (void(*)(DrdClientobj*))semaphore_cleanup;
+   p->delete_thread     = 0;
+   p->waits_to_skip     = 0;
+   p->value             = 0;
+   p->waiters           = 0;
+   p->last_sem_post_tid = DRD_INVALID_THREADID;
+   p->last_sem_post_seg = VG_(newXA)(VG_(malloc), "drd.sg-stack",
+                                     VG_(free), sizeof(Segment*));
 }
 
 /**
@@ -116,21 +117,21 @@
  */
 static void semaphore_cleanup(struct semaphore_info* p)
 {
-  Segment* sg;
+   Segment* sg;
 
-  if (p->waiters > 0)
-  {
-    SemaphoreErrInfo sei = { p->a1 };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            SemaphoreErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Destruction of semaphore that is being waited"
-                            " upon",
-                            &sei);
-  }
-  while ((sg = DRD_(segment_pop)(p)))
-    DRD_(sg_put)(sg);
-  VG_(deleteXA)(p->last_sem_post_seg);
+   if (p->waiters > 0)
+   {
+      SemaphoreErrInfo sei = { p->a1 };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              SemaphoreErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Destruction of semaphore that is being waited"
+                              " upon",
+                              &sei);
+   }
+   while ((sg = DRD_(segment_pop)(p)))
+      DRD_(sg_put)(sg);
+   VG_(deleteXA)(p->last_sem_post_seg);
 }
 
 /**
@@ -142,17 +143,17 @@
 struct semaphore_info*
 DRD_(semaphore_get_or_allocate)(const Addr semaphore)
 {
-  struct semaphore_info *p;
+   struct semaphore_info *p;
 
-  tl_assert(offsetof(DrdClientobj, semaphore) == 0);
-  p = &(DRD_(clientobj_get)(semaphore, ClientSemaphore)->semaphore);
-  if (p == 0)
-  {
-    tl_assert(offsetof(DrdClientobj, semaphore) == 0);
-    p = &(DRD_(clientobj_add)(semaphore, ClientSemaphore)->semaphore);
-    DRD_(semaphore_initialize)(p, semaphore);
-  }
-  return p;
+   tl_assert(offsetof(DrdClientobj, semaphore) == 0);
+   p = &(DRD_(clientobj_get)(semaphore, ClientSemaphore)->semaphore);
+   if (p == 0)
+   {
+      tl_assert(offsetof(DrdClientobj, semaphore) == 0);
+      p = &(DRD_(clientobj_add)(semaphore, ClientSemaphore)->semaphore);
+      DRD_(semaphore_initialize)(p, semaphore);
+   }
+   return p;
 }
 
 /**
@@ -161,8 +162,8 @@
  */
 static struct semaphore_info* DRD_(semaphore_get)(const Addr semaphore)
 {
-  tl_assert(offsetof(DrdClientobj, semaphore) == 0);
-  return &(DRD_(clientobj_get)(semaphore, ClientSemaphore)->semaphore);
+   tl_assert(offsetof(DrdClientobj, semaphore) == 0);
+   return &(DRD_(clientobj_get)(semaphore, ClientSemaphore)->semaphore);
 }
 
 /** Called before sem_init(). */
@@ -170,85 +171,85 @@
                                             const Word pshared,
                                             const UInt value)
 {
-  struct semaphore_info* p;
-  Segment* sg;
+   struct semaphore_info* p;
+   Segment* sg;
 
-  if (s_trace_semaphore)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] semaphore_init      0x%lx value %u",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 semaphore,
-                 value);
-  }
-  p = DRD_(semaphore_get)(semaphore);
-  if (p)
-  {
-    const ThreadId vg_tid = VG_(get_running_tid)();
-    SemaphoreErrInfo SEI = { semaphore };
-    VG_(maybe_record_error)(vg_tid,
-                            SemaphoreErr,
-                            VG_(get_IP)(vg_tid),
-                            "Semaphore reinitialization",
-                            &SEI);
-    // Remove all segments from the segment stack.
-    while ((sg = DRD_(segment_pop)(p)))
-    {
-      DRD_(sg_put)(sg);
-    }
-  }
-  else
-  {
-    p = DRD_(semaphore_get_or_allocate)(semaphore);
-  }
-  tl_assert(p);
-  p->waits_to_skip = value;
-  p->value         = value;
-  return p;
+   if (s_trace_semaphore)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] semaphore_init      0x%lx value %u",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   semaphore,
+                   value);
+   }
+   p = DRD_(semaphore_get)(semaphore);
+   if (p)
+   {
+      const ThreadId vg_tid = VG_(get_running_tid)();
+      SemaphoreErrInfo SEI = { semaphore };
+      VG_(maybe_record_error)(vg_tid,
+                              SemaphoreErr,
+                              VG_(get_IP)(vg_tid),
+                              "Semaphore reinitialization",
+                              &SEI);
+      // Remove all segments from the segment stack.
+      while ((sg = DRD_(segment_pop)(p)))
+      {
+         DRD_(sg_put)(sg);
+      }
+   }
+   else
+   {
+      p = DRD_(semaphore_get_or_allocate)(semaphore);
+   }
+   tl_assert(p);
+   p->waits_to_skip = value;
+   p->value         = value;
+   return p;
 }
 
 /** Called after sem_destroy(). */
 void DRD_(semaphore_destroy)(const Addr semaphore)
 {
-  struct semaphore_info* p;
+   struct semaphore_info* p;
 
-  p = DRD_(semaphore_get)(semaphore);
+   p = DRD_(semaphore_get)(semaphore);
 
-  if (s_trace_semaphore)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] semaphore_destroy   0x%lx value %u",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 semaphore,
-                 p ? p->value : 0);
-  }
+   if (s_trace_semaphore)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] semaphore_destroy   0x%lx value %u",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   semaphore,
+                   p ? p->value : 0);
+   }
 
-  if (p == 0)
-  {
-    GenericErrInfo GEI;
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            GenericErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Not a semaphore",
-                            &GEI);
-    return;
-  }
+   if (p == 0)
+   {
+      GenericErrInfo GEI;
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              GenericErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Not a semaphore",
+                              &GEI);
+      return;
+   }
 
-  DRD_(clientobj_remove)(semaphore, ClientSemaphore);
+   DRD_(clientobj_remove)(semaphore, ClientSemaphore);
 }
 
 /** Called before sem_wait(). */
 void DRD_(semaphore_pre_wait)(const Addr semaphore)
 {
-  struct semaphore_info* p;
+   struct semaphore_info* p;
 
-  p = DRD_(semaphore_get_or_allocate)(semaphore);
-  tl_assert(p);
-  tl_assert((int)p->waiters >= 0);
-  p->waiters++;
-  tl_assert(p->waiters > 0);
+   p = DRD_(semaphore_get_or_allocate)(semaphore);
+   tl_assert(p);
+   tl_assert((int)p->waiters >= 0);
+   p->waiters++;
+   tl_assert(p->waiters > 0);
 }
 
 /**
@@ -259,101 +260,101 @@
 void DRD_(semaphore_post_wait)(const DrdThreadId tid, const Addr semaphore,
                                const Bool waited)
 {
-  struct semaphore_info* p;
-  Segment* sg;
+   struct semaphore_info* p;
+   Segment* sg;
 
-  p = DRD_(semaphore_get)(semaphore);
-  if (s_trace_semaphore)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] semaphore_wait      0x%lx value %u -> %u",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 semaphore,
-                 p ? p->value : 0,
-                 p ? p->value - 1 : 0);
-  }
-  tl_assert(p);
-  tl_assert(p->waiters > 0);
-  p->waiters--;
-  tl_assert((int)p->waiters >= 0);
-  tl_assert((int)p->value >= 0);
-  if (p->value == 0)
-  {
-    SemaphoreErrInfo sei = { semaphore };
-    VG_(maybe_record_error)(VG_(get_running_tid)(),
-                            SemaphoreErr,
-                            VG_(get_IP)(VG_(get_running_tid)()),
-                            "Invalid semaphore",
-                            &sei);
-    return;
-  }
-  p->value--;
-  tl_assert((int)p->value >= 0);
-  if (p->waits_to_skip > 0)
-    p->waits_to_skip--;
-  else
-  {
-    sg = DRD_(segment_pop)(p);
-    tl_assert(sg);
-    if (sg)
-    {
-      if (p->last_sem_post_tid != tid
-          && p->last_sem_post_tid != DRD_INVALID_THREADID)
+   p = DRD_(semaphore_get)(semaphore);
+   if (s_trace_semaphore)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] semaphore_wait      0x%lx value %u -> %u",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   semaphore,
+                   p ? p->value : 0,
+                   p ? p->value - 1 : 0);
+   }
+   tl_assert(p);
+   tl_assert(p->waiters > 0);
+   p->waiters--;
+   tl_assert((int)p->waiters >= 0);
+   tl_assert((int)p->value >= 0);
+   if (p->value == 0)
+   {
+      SemaphoreErrInfo sei = { semaphore };
+      VG_(maybe_record_error)(VG_(get_running_tid)(),
+                              SemaphoreErr,
+                              VG_(get_IP)(VG_(get_running_tid)()),
+                              "Invalid semaphore",
+                              &sei);
+      return;
+   }
+   p->value--;
+   tl_assert((int)p->value >= 0);
+   if (p->waits_to_skip > 0)
+      p->waits_to_skip--;
+   else
+   {
+      sg = DRD_(segment_pop)(p);
+      tl_assert(sg);
+      if (sg)
       {
-        DRD_(thread_combine_vc2)(tid, &sg->vc);
+         if (p->last_sem_post_tid != tid
+             && p->last_sem_post_tid != DRD_INVALID_THREADID)
+         {
+            DRD_(thread_combine_vc2)(tid, &sg->vc);
+         }
+         DRD_(sg_put)(sg);
+         DRD_(thread_new_segment)(tid);
+         s_semaphore_segment_creation_count++;
       }
-      DRD_(sg_put)(sg);
-      DRD_(thread_new_segment)(tid);
-      s_semaphore_segment_creation_count++;
-    }
-  }
+   }
 }
 
 /** Called before sem_post(). */
 void DRD_(semaphore_pre_post)(const DrdThreadId tid, const Addr semaphore)
 {
-  struct semaphore_info* p;
-  Segment* sg;
+   struct semaphore_info* p;
+   Segment* sg;
 
-  p = DRD_(semaphore_get_or_allocate)(semaphore);
-  p->value++;
+   p = DRD_(semaphore_get_or_allocate)(semaphore);
+   p->value++;
 
-  if (s_trace_semaphore)
-  {
-    VG_(message)(Vg_UserMsg,
-                 "[%d/%d] semaphore_post      0x%lx value %u -> %u",
-                 VG_(get_running_tid)(),
-                 DRD_(thread_get_running_tid)(),
-                 semaphore,
-                 p->value - 1, p->value);
-  }
+   if (s_trace_semaphore)
+   {
+      VG_(message)(Vg_UserMsg,
+                   "[%d/%d] semaphore_post      0x%lx value %u -> %u",
+                   VG_(get_running_tid)(),
+                   DRD_(thread_get_running_tid)(),
+                   semaphore,
+                   p->value - 1, p->value);
+   }
 
-  p->last_sem_post_tid = tid;
-  DRD_(thread_new_segment)(tid);
-  sg = 0;
-  DRD_(thread_get_latest_segment)(&sg, tid);
-  tl_assert(sg);
-  DRD_(segment_push)(p, sg);
-  s_semaphore_segment_creation_count++;
+   p->last_sem_post_tid = tid;
+   DRD_(thread_new_segment)(tid);
+   sg = 0;
+   DRD_(thread_get_latest_segment)(&sg, tid);
+   tl_assert(sg);
+   DRD_(segment_push)(p, sg);
+   s_semaphore_segment_creation_count++;
 }
 
 /** Called after sem_post() finished successfully. */
 void DRD_(semaphore_post_post)(const DrdThreadId tid, const Addr semaphore,
                                const Bool waited)
 {
-  /* Note: it is hard to implement the sem_post() wrapper correctly in     */
-  /* case sem_post() returns an error code. This is because handling this  */
-  /* case correctly requires restoring the vector clock associated with    */
-  /* the semaphore to its original value here. In order to do that without */
-  /* introducing a race condition, extra locking has to be added around    */
-  /* each semaphore call. Such extra locking would have to be added in     */
-  /* drd_intercepts.c. However, it is hard to implement synchronization    */
-  /* in drd_intercepts.c in a portable way without calling already         */
-  /* redirected functions.                                                 */
+   /* Note: it is hard to implement the sem_post() wrapper correctly in     */
+   /* case sem_post() returns an error code. This is because handling this  */
+   /* case correctly requires restoring the vector clock associated with    */
+   /* the semaphore to its original value here. In order to do that without */
+   /* introducing a race condition, extra locking has to be added around    */
+   /* each semaphore call. Such extra locking would have to be added in     */
+   /* drd_intercepts.c. However, it is hard to implement synchronization    */
+   /* in drd_intercepts.c in a portable way without calling already         */
+   /* redirected functions.                                                 */
 }
 
 ULong DRD_(get_semaphore_segment_creation_count)(void)
 {
-  return s_semaphore_segment_creation_count;
+   return s_semaphore_segment_creation_count;
 }
diff --git a/drd/drd_semaphore.h b/drd/drd_semaphore.h
index e6b248f..b9cc1e1 100644
--- a/drd/drd_semaphore.h
+++ b/drd/drd_semaphore.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
diff --git a/drd/drd_strmem_intercepts.c b/drd/drd_strmem_intercepts.c
index ca498eb..5d23df3 100644
--- a/drd/drd_strmem_intercepts.c
+++ b/drd/drd_strmem_intercepts.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 
 /*--------------------------------------------------------------------*/
 /*--- Replacements for strlen() and strnlen(), which run on the    ---*/
@@ -5,30 +6,30 @@
 /*--------------------------------------------------------------------*/
 
 /*
-   This file is part of DRD, a heavyweight Valgrind tool for
-   detecting threading errors. The code below has been extracted
-   from memchec/mc_replace_strmem.c, which has the following copyright
-   notice:
+  This file is part of DRD, a heavyweight Valgrind tool for
+  detecting threading errors. The code below has been extracted
+  from memchec/mc_replace_strmem.c, which has the following copyright
+  notice:
 
-   Copyright (C) 2000-2009 Julian Seward 
-      jseward@acm.org
+  Copyright (C) 2000-2009 Julian Seward 
+  jseward@acm.org
 
-   This program is free software; you can redistribute it and/or
-   modify it under the terms of the GNU General Public License as
-   published by the Free Software Foundation; either version 2 of the
-   License, or (at your option) any later version.
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
 
-   This program is distributed in the hope that it will be useful, but
-   WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   General Public License for more details.
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
 
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307, USA.
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
 
-   The GNU General Public License is contained in the file COPYING.
+  The GNU General Public License is contained in the file COPYING.
 */
 
 #include "pub_tool_basics.h"
@@ -38,13 +39,13 @@
 #include "valgrind.h"
 
 
-#define STRNLEN(soname, fnname) \
+#define STRNLEN(soname, fnname)                                         \
    SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* str, SizeT n ); \
    SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* str, SizeT n ) \
-   { \
-      SizeT i = 0; \
-      while (i < n && str[i] != 0) i++; \
-      return i; \
+   {                                                                    \
+      SizeT i = 0;                                                      \
+      while (i < n && str[i] != 0) i++;                                 \
+      return i;                                                         \
    }
 
 STRNLEN(VG_Z_LIBC_SONAME, strnlen)
@@ -54,13 +55,13 @@
 // calls to strlen() with its own built-in version.  This can be very
 // confusing if you aren't expecting it.  Other small functions in this file
 // may also be inline by gcc.
-#define STRLEN(soname, fnname) \
-   SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str ); \
-   SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str ) \
-   { \
-      SizeT i = 0; \
-      while (str[i] != 0) i++; \
-      return i; \
+#define STRLEN(soname, fnname)                                          \
+   SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str );      \
+   SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str )       \
+   {                                                                    \
+      SizeT i = 0;                                                      \
+      while (str[i] != 0) i++;                                          \
+      return i;                                                         \
    }
 
 STRLEN(VG_Z_LIBC_SONAME,          strlen)
diff --git a/drd/drd_suppression.c b/drd/drd_suppression.c
index 0ed2e55..8b7a55e 100644
--- a/drd/drd_suppression.c
+++ b/drd/drd_suppression.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -50,42 +51,42 @@
 
 void DRD_(suppression_init)(void)
 {
-  tl_assert(DRD_(s_suppressed) == 0);
-  DRD_(s_suppressed) = DRD_(bm_new)();
-  tl_assert(DRD_(s_suppressed));
+   tl_assert(DRD_(s_suppressed) == 0);
+   DRD_(s_suppressed) = DRD_(bm_new)();
+   tl_assert(DRD_(s_suppressed));
 }
 
 void DRD_(start_suppression)(const Addr a1, const Addr a2,
                              const char* const reason)
 {
-  if (DRD_(s_trace_suppression))
-  {
-    VG_(message)(Vg_DebugMsg, "start suppression of 0x%lx sz %ld (%s)",
-                 a1, a2 - a1, reason);
-  }
+   if (DRD_(s_trace_suppression))
+   {
+      VG_(message)(Vg_DebugMsg, "start suppression of 0x%lx sz %ld (%s)",
+                   a1, a2 - a1, reason);
+   }
 
-  tl_assert(a1 < a2);
-  // tl_assert(! drd_is_any_suppressed(a1, a2));
-  DRD_(bm_access_range_store)(DRD_(s_suppressed), a1, a2);
+   tl_assert(a1 < a2);
+   // tl_assert(! drd_is_any_suppressed(a1, a2));
+   DRD_(bm_access_range_store)(DRD_(s_suppressed), a1, a2);
 }
 
 void DRD_(finish_suppression)(const Addr a1, const Addr a2)
 {
-  if (DRD_(s_trace_suppression))
-  {
-    VG_(message)(Vg_DebugMsg, "finish suppression of 0x%lx sz %ld",
-                 a1, a2 - a1);
-    VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 12);   
-  }
+   if (DRD_(s_trace_suppression))
+   {
+      VG_(message)(Vg_DebugMsg, "finish suppression of 0x%lx sz %ld",
+                   a1, a2 - a1);
+      VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 12);   
+   }
 
-  tl_assert(a1 < a2);
-  if (! DRD_(is_suppressed)(a1, a2))
-  {
-     VG_(message)(Vg_DebugMsg, "?? [0x%lx,0x%lx[ not suppressed ??", a1, a2);
-     VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 12);
-     tl_assert(False);
-  }
-  DRD_(bm_clear_store)(DRD_(s_suppressed), a1, a2);
+   tl_assert(a1 < a2);
+   if (! DRD_(is_suppressed)(a1, a2))
+   {
+      VG_(message)(Vg_DebugMsg, "?? [0x%lx,0x%lx[ not suppressed ??", a1, a2);
+      VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 12);
+      tl_assert(False);
+   }
+   DRD_(bm_clear_store)(DRD_(s_suppressed), a1, a2);
 }
 
 /**
@@ -95,7 +96,7 @@
  */
 Bool DRD_(is_suppressed)(const Addr a1, const Addr a2)
 {
-  return DRD_(bm_has)(DRD_(s_suppressed), a1, a2, eStore);
+   return DRD_(bm_has)(DRD_(s_suppressed), a1, a2, eStore);
 }
 
 /**
@@ -105,53 +106,53 @@
  */
 Bool DRD_(is_any_suppressed)(const Addr a1, const Addr a2)
 {
-  return DRD_(bm_has_any_store)(DRD_(s_suppressed), a1, a2);
+   return DRD_(bm_has_any_store)(DRD_(s_suppressed), a1, a2);
 }
 
 void DRD_(start_tracing_address_range)(const Addr a1, const Addr a2)
 {
-  tl_assert(a1 < a2);
+   tl_assert(a1 < a2);
 
-  DRD_(bm_access_range_load)(DRD_(s_suppressed), a1, a2);
-  if (! DRD_(g_any_address_traced))
-  {
-    DRD_(g_any_address_traced) = True;
-  }
+   DRD_(bm_access_range_load)(DRD_(s_suppressed), a1, a2);
+   if (! DRD_(g_any_address_traced))
+   {
+      DRD_(g_any_address_traced) = True;
+   }
 }
 
 void DRD_(stop_tracing_address_range)(const Addr a1, const Addr a2)
 {
-  tl_assert(a1 < a2);
+   tl_assert(a1 < a2);
 
-  DRD_(bm_clear_load)(DRD_(s_suppressed), a1, a2);
-  if (DRD_(g_any_address_traced))
-  {
-    DRD_(g_any_address_traced)
-      = DRD_(bm_has_any_load)(DRD_(s_suppressed), 0, ~(Addr)0);
-  }
+   DRD_(bm_clear_load)(DRD_(s_suppressed), a1, a2);
+   if (DRD_(g_any_address_traced))
+   {
+      DRD_(g_any_address_traced)
+         = DRD_(bm_has_any_load)(DRD_(s_suppressed), 0, ~(Addr)0);
+   }
 }
 
 Bool DRD_(is_any_traced)(const Addr a1, const Addr a2)
 {
-  return DRD_(bm_has_any_load)(DRD_(s_suppressed), a1, a2);
+   return DRD_(bm_has_any_load)(DRD_(s_suppressed), a1, a2);
 }
 
 void DRD_(suppression_stop_using_mem)(const Addr a1, const Addr a2)
 {
-  if (DRD_(s_trace_suppression))
-  {
-    Addr b;
-    for (b = a1; b < a2; b++)
-    {
-      if (DRD_(bm_has_1)(DRD_(s_suppressed), b, eStore))
+   if (DRD_(s_trace_suppression))
+   {
+      Addr b;
+      for (b = a1; b < a2; b++)
       {
-        VG_(message)(Vg_DebugMsg,
-                     "stop_using_mem(0x%lx, %ld) finish suppression of 0x%lx",
-                     a1, a2 - a1, b);
+         if (DRD_(bm_has_1)(DRD_(s_suppressed), b, eStore))
+         {
+            VG_(message)(Vg_DebugMsg,
+                         "stop_using_mem(0x%lx, %ld) finish suppression of"
+                         " 0x%lx", a1, a2 - a1, b);
+         }
       }
-    }
-  }
-  tl_assert(a1);
-  tl_assert(a1 < a2);
-  DRD_(bm_clear)(DRD_(s_suppressed), a1, a2);
+   }
+   tl_assert(a1);
+   tl_assert(a1 < a2);
+   DRD_(bm_clear)(DRD_(s_suppressed), a1, a2);
 }
diff --git a/drd/drd_suppression.h b/drd/drd_suppression.h
index 9484822..9d93b94 100644
--- a/drd/drd_suppression.h
+++ b/drd/drd_suppression.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 #ifndef __PUB_CORE_DRD_H
 #define __PUB_CORE_DRD_H
 
@@ -12,7 +13,7 @@
 void DRD_(suppression_set_trace)(const Bool trace_suppression);
 void DRD_(suppression_init)(void);
 void DRD_(start_suppression)(const Addr a1, const Addr a2,
-                           const char* const reason);
+                             const char* const reason);
 void DRD_(finish_suppression)(const Addr a1, const Addr a2);
 Bool DRD_(is_suppressed)(const Addr a1, const Addr a2);
 Bool DRD_(is_any_suppressed)(const Addr a1, const Addr a2);
@@ -24,7 +25,7 @@
 
 static __inline__ Bool DRD_(any_address_is_traced)(void)
 {
-  return DRD_(g_any_address_traced);
+   return DRD_(g_any_address_traced);
 }
 
 
diff --git a/drd/drd_thread.c b/drd/drd_thread.c
index 90bd110..2b692b1 100644
--- a/drd/drd_thread.c
+++ b/drd/drd_thread.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -77,35 +78,35 @@
 /** Enables/disables context switch tracing. */
 void DRD_(thread_trace_context_switches)(const Bool t)
 {
-  tl_assert(t == False || t == True);
-  s_trace_context_switches = t;
+   tl_assert(t == False || t == True);
+   s_trace_context_switches = t;
 }
 
 /** Enables/disables conflict set tracing. */
 void DRD_(thread_trace_conflict_set)(const Bool t)
 {
-  tl_assert(t == False || t == True);
-  s_trace_conflict_set = t;
+   tl_assert(t == False || t == True);
+   s_trace_conflict_set = t;
 }
 
 /** Report whether fork/join tracing is enabled. */
 Bool DRD_(thread_get_trace_fork_join)(void)
 {
-  return s_trace_fork_join;
+   return s_trace_fork_join;
 }
 
 /** Enables/disables fork/join tracing. */
 void DRD_(thread_set_trace_fork_join)(const Bool t)
 {
-  tl_assert(t == False || t == True);
-  s_trace_fork_join = t;
+   tl_assert(t == False || t == True);
+   s_trace_fork_join = t;
 }
 
 /** Enables/disables segment merging. */
 void DRD_(thread_set_segment_merging)(const Bool m)
 {
-  tl_assert(m == False || m == True);
-  s_segment_merging = m;
+   tl_assert(m == False || m == True);
+   s_segment_merging = m;
 }
 
 /**
@@ -116,87 +117,87 @@
  */
 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
 {
-  int i;
+   int i;
 
-  if (tid == VG_INVALID_THREADID)
-    return DRD_INVALID_THREADID;
+   if (tid == VG_INVALID_THREADID)
+      return DRD_INVALID_THREADID;
 
-  for (i = 1; i < DRD_N_THREADS; i++)
-  {
-    if (DRD_(g_threadinfo)[i].vg_thread_exists == True
-        && DRD_(g_threadinfo)[i].vg_threadid == tid)
-    {
-      return i;
-    }
-  }
+   for (i = 1; i < DRD_N_THREADS; i++)
+   {
+      if (DRD_(g_threadinfo)[i].vg_thread_exists == True
+          && DRD_(g_threadinfo)[i].vg_threadid == tid)
+      {
+         return i;
+      }
+   }
 
-  return DRD_INVALID_THREADID;
+   return DRD_INVALID_THREADID;
 }
 
 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
 {
-  int i;
+   int i;
 
-  tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
+   tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
 
-  for (i = 1; i < DRD_N_THREADS; i++)
-  {
-    if (DRD_(g_threadinfo)[i].vg_thread_exists == False
-        && DRD_(g_threadinfo)[i].posix_thread_exists == False
-        && DRD_(g_threadinfo)[i].detached_posix_thread == False)
-    {
-      tl_assert(! DRD_(IsValidDrdThreadId)(i));
+   for (i = 1; i < DRD_N_THREADS; i++)
+   {
+      if (DRD_(g_threadinfo)[i].vg_thread_exists == False
+          && DRD_(g_threadinfo)[i].posix_thread_exists == False
+          && DRD_(g_threadinfo)[i].detached_posix_thread == False)
+      {
+         tl_assert(! DRD_(IsValidDrdThreadId)(i));
 
-      DRD_(g_threadinfo)[i].vg_thread_exists = True;
-      DRD_(g_threadinfo)[i].vg_threadid   = tid;
-      DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
-      DRD_(g_threadinfo)[i].stack_min     = 0;
-      DRD_(g_threadinfo)[i].stack_min_min = 0;
-      DRD_(g_threadinfo)[i].stack_startup = 0;
-      DRD_(g_threadinfo)[i].stack_max     = 0;
-      DRD_(g_threadinfo)[i].is_recording  = True;
-      DRD_(g_threadinfo)[i].synchr_nesting = 0;
-      tl_assert(DRD_(g_threadinfo)[i].first == 0);
-      tl_assert(DRD_(g_threadinfo)[i].last == 0);
+         DRD_(g_threadinfo)[i].vg_thread_exists = True;
+         DRD_(g_threadinfo)[i].vg_threadid   = tid;
+         DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
+         DRD_(g_threadinfo)[i].stack_min     = 0;
+         DRD_(g_threadinfo)[i].stack_min_min = 0;
+         DRD_(g_threadinfo)[i].stack_startup = 0;
+         DRD_(g_threadinfo)[i].stack_max     = 0;
+         DRD_(g_threadinfo)[i].is_recording  = True;
+         DRD_(g_threadinfo)[i].synchr_nesting = 0;
+         tl_assert(DRD_(g_threadinfo)[i].first == 0);
+         tl_assert(DRD_(g_threadinfo)[i].last == 0);
 
-      tl_assert(DRD_(IsValidDrdThreadId)(i));
+         tl_assert(DRD_(IsValidDrdThreadId)(i));
 
-      return i;
-    }
-  }
+         return i;
+      }
+   }
 
-  tl_assert(False);
+   tl_assert(False);
 
-  return DRD_INVALID_THREADID;
+   return DRD_INVALID_THREADID;
 }
 
 /** Convert a POSIX thread ID into a DRD thread ID. */
 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
 {
-  int i;
+   int i;
 
-  tl_assert(tid != INVALID_POSIX_THREADID);
+   tl_assert(tid != INVALID_POSIX_THREADID);
 
-  for (i = 1; i < DRD_N_THREADS; i++)
-  {
-    if (DRD_(g_threadinfo)[i].posix_thread_exists
-        && DRD_(g_threadinfo)[i].pt_threadid == tid)
-    {
-      return i;
-    }
-  }
-  return DRD_INVALID_THREADID;
+   for (i = 1; i < DRD_N_THREADS; i++)
+   {
+      if (DRD_(g_threadinfo)[i].posix_thread_exists
+          && DRD_(g_threadinfo)[i].pt_threadid == tid)
+      {
+         return i;
+      }
+   }
+   return DRD_INVALID_THREADID;
 }
 
 /** Convert a DRD thread ID into a Valgrind thread ID. */
 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return (DRD_(g_threadinfo)[tid].vg_thread_exists
-          ? DRD_(g_threadinfo)[tid].vg_threadid
-          : VG_INVALID_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return (DRD_(g_threadinfo)[tid].vg_thread_exists
+           ? DRD_(g_threadinfo)[tid].vg_threadid
+           : VG_INVALID_THREADID);
 }
 
 #if 0
@@ -207,20 +208,20 @@
  */
 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
 {
-  Segment* p;
-  for (p = ti->first; p; p = p->next) {
-    if (p->next && p->next->prev != p)
-      return False;
-    if (p->next == 0 && p != ti->last)
-      return False;
-  }
-  for (p = ti->last; p; p = p->prev) {
-    if (p->prev && p->prev->next != p)
-      return False;
-    if (p->prev == 0 && p != ti->first)
-      return False;
-  }
-  return True;
+   Segment* p;
+   for (p = ti->first; p; p = p->next) {
+      if (p->next && p->next->prev != p)
+         return False;
+      if (p->next == 0 && p != ti->last)
+         return False;
+   }
+   for (p = ti->last; p; p = p->prev) {
+      if (p->prev && p->prev->next != p)
+         return False;
+      if (p->prev == 0 && p != ti->first)
+         return False;
+   }
+   return True;
 }
 #endif
 
@@ -240,18 +241,18 @@
 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
                                     const ThreadId vg_created)
 {
-  DrdThreadId created;
+   DrdThreadId created;
 
-  tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
-  created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
-  tl_assert(0 <= (int)created && created < DRD_N_THREADS
-            && created != DRD_INVALID_THREADID);
+   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
+   created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
+   tl_assert(0 <= (int)created && created < DRD_N_THREADS
+             && created != DRD_INVALID_THREADID);
 
-  tl_assert(DRD_(g_threadinfo)[created].first == 0);
-  tl_assert(DRD_(g_threadinfo)[created].last == 0);
-  thread_append_segment(created, DRD_(sg_new)(creator, created));
+   tl_assert(DRD_(g_threadinfo)[created].first == 0);
+   tl_assert(DRD_(g_threadinfo)[created].last == 0);
+   thread_append_segment(created, DRD_(sg_new)(creator, created));
 
-  return created;
+   return created;
 }
 
 /**
@@ -266,19 +267,24 @@
  */
 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
 {
-  const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
+   const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
 
-  tl_assert(0 <= (int)created && created < DRD_N_THREADS
-            && created != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)created && created < DRD_N_THREADS
+             && created != DRD_INVALID_THREADID);
 
-  DRD_(g_threadinfo)[created].stack_max     = VG_(thread_get_stack_max)(vg_created);
-  DRD_(g_threadinfo)[created].stack_startup = DRD_(g_threadinfo)[created].stack_max;
-  DRD_(g_threadinfo)[created].stack_min     = DRD_(g_threadinfo)[created].stack_max;
-  DRD_(g_threadinfo)[created].stack_min_min = DRD_(g_threadinfo)[created].stack_max;
-  DRD_(g_threadinfo)[created].stack_size    = VG_(thread_get_stack_size)(vg_created);
-  tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
+   DRD_(g_threadinfo)[created].stack_max
+      = VG_(thread_get_stack_max)(vg_created);
+   DRD_(g_threadinfo)[created].stack_startup
+      = DRD_(g_threadinfo)[created].stack_max;
+   DRD_(g_threadinfo)[created].stack_min
+      = DRD_(g_threadinfo)[created].stack_max;
+   DRD_(g_threadinfo)[created].stack_min_min
+      = DRD_(g_threadinfo)[created].stack_max;
+   DRD_(g_threadinfo)[created].stack_size
+      = VG_(thread_get_stack_size)(vg_created);
+   tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
 
-  return created;
+   return created;
 }
 
 /**
@@ -287,43 +293,43 @@
  */
 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
 {
-  tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
-  tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
-  DRD_(thread_new_segment)(drd_joinee);
-  DRD_(thread_combine_vc)(drd_joiner, drd_joinee);
-  DRD_(thread_new_segment)(drd_joiner);
+   tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
+   tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
+   DRD_(thread_new_segment)(drd_joinee);
+   DRD_(thread_combine_vc)(drd_joiner, drd_joinee);
+   DRD_(thread_new_segment)(drd_joiner);
 
-  if (s_trace_fork_join)
-  {
-    const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
-    const ThreadId joinee = DRD_(DrdThreadIdToVgThreadId)(drd_joinee);
-    const unsigned msg_size = 256;
-    char* msg;
+   if (s_trace_fork_join)
+   {
+      const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
+      const ThreadId joinee = DRD_(DrdThreadIdToVgThreadId)(drd_joinee);
+      const unsigned msg_size = 256;
+      char* msg;
 
-    msg = VG_(malloc)("drd.main.dptj.1", msg_size);
-    tl_assert(msg);
-    VG_(snprintf)(msg, msg_size,
-                  "drd_post_thread_join joiner = %d/%d, joinee = %d/%d",
-                  joiner, drd_joiner, joinee, drd_joinee);
-    if (joiner)
-    {
-      VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
-                    ", new vc: ");
-      DRD_(vc_snprint)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
-                       DRD_(thread_get_vc)(drd_joiner));
-    }
-    VG_(message)(Vg_DebugMsg, "%s", msg);
-    VG_(free)(msg);
-  }
+      msg = VG_(malloc)("drd.main.dptj.1", msg_size);
+      tl_assert(msg);
+      VG_(snprintf)(msg, msg_size,
+                    "drd_post_thread_join joiner = %d/%d, joinee = %d/%d",
+                    joiner, drd_joiner, joinee, drd_joinee);
+      if (joiner)
+      {
+         VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
+                       ", new vc: ");
+         DRD_(vc_snprint)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
+                          DRD_(thread_get_vc)(drd_joiner));
+      }
+      VG_(message)(Vg_DebugMsg, "%s", msg);
+      VG_(free)(msg);
+   }
 
-  if (!  DRD_(get_check_stack_accesses)())
-  {
-    DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
-                             - DRD_(thread_get_stack_size)(drd_joinee),
-                             DRD_(thread_get_stack_max)(drd_joinee));
-  }
-  DRD_(clientobj_delete_thread)(drd_joinee);
-  DRD_(thread_delete)(drd_joinee);
+   if (!  DRD_(get_check_stack_accesses)())
+   {
+      DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
+                               - DRD_(thread_get_stack_size)(drd_joinee),
+                               DRD_(thread_get_stack_max)(drd_joinee));
+   }
+   DRD_(clientobj_delete_thread)(drd_joinee);
+   DRD_(thread_delete)(drd_joinee);
 }
 
 /**
@@ -335,19 +341,19 @@
 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
                                     const Addr stack_startup)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
-  tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
-  DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
+   tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
+   DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
 }
 
 /** Return the stack pointer for the specified thread. */
 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return DRD_(g_threadinfo)[tid].stack_min;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return DRD_(g_threadinfo)[tid].stack_min;
 }
 
 /**
@@ -356,25 +362,25 @@
  */
 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return DRD_(g_threadinfo)[tid].stack_min_min;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return DRD_(g_threadinfo)[tid].stack_min_min;
 }
 
 /** Return the top address for the stack of the specified thread. */
 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return DRD_(g_threadinfo)[tid].stack_max;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return DRD_(g_threadinfo)[tid].stack_max;
 }
 
 /** Return the maximum stack size for the specified thread. */
 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return DRD_(g_threadinfo)[tid].stack_size;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return DRD_(g_threadinfo)[tid].stack_size;
 }
 
 /**
@@ -383,26 +389,26 @@
  */
 void DRD_(thread_delete)(const DrdThreadId tid)
 {
-  Segment* sg;
-  Segment* sg_prev;
+   Segment* sg;
+   Segment* sg_prev;
 
-  tl_assert(DRD_(IsValidDrdThreadId)(tid));
+   tl_assert(DRD_(IsValidDrdThreadId)(tid));
 
-  tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
-  for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
-  {
-    sg_prev = sg->prev;
-    sg->prev = 0;
-    sg->next = 0;
-    DRD_(sg_put)(sg);
-  }
-  DRD_(g_threadinfo)[tid].vg_thread_exists = False;
-  DRD_(g_threadinfo)[tid].posix_thread_exists = False;
-  tl_assert(DRD_(g_threadinfo)[tid].detached_posix_thread == False);
-  DRD_(g_threadinfo)[tid].first = 0;
-  DRD_(g_threadinfo)[tid].last = 0;
+   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
+   for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
+   {
+      sg_prev = sg->prev;
+      sg->prev = 0;
+      sg->next = 0;
+      DRD_(sg_put)(sg);
+   }
+   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
+   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
+   tl_assert(DRD_(g_threadinfo)[tid].detached_posix_thread == False);
+   DRD_(g_threadinfo)[tid].first = 0;
+   DRD_(g_threadinfo)[tid].last = 0;
 
-  tl_assert(! DRD_(IsValidDrdThreadId)(tid));
+   tl_assert(! DRD_(IsValidDrdThreadId)(tid));
 }
 
 /**
@@ -412,73 +418,73 @@
  */
 void DRD_(thread_finished)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
 
-  DRD_(g_threadinfo)[tid].vg_thread_exists = False;
+   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
 
-  if (DRD_(g_threadinfo)[tid].detached_posix_thread)
-  {
-    /*
-     * Once a detached thread has finished, its stack is deallocated and
-     * should no longer be taken into account when computing the conflict set.
-     */
-    DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
+   if (DRD_(g_threadinfo)[tid].detached_posix_thread)
+   {
+      /*
+       * Once a detached thread has finished, its stack is deallocated and
+       * should no longer be taken into account when computing the conflict set.
+       */
+      DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
 
-    /*
-     * For a detached thread, calling pthread_exit() invalidates the
-     * POSIX thread ID associated with the detached thread. For joinable
-     * POSIX threads however, the POSIX thread ID remains live after the
-     * pthread_exit() call until pthread_join() is called.
-     */
-    DRD_(g_threadinfo)[tid].posix_thread_exists = False;
-  }
+      /*
+       * For a detached thread, calling pthread_exit() invalidates the
+       * POSIX thread ID associated with the detached thread. For joinable
+       * POSIX threads however, the POSIX thread ID remains live after the
+       * pthread_exit() call until pthread_join() is called.
+       */
+      DRD_(g_threadinfo)[tid].posix_thread_exists = False;
+   }
 }
 
 /** Called just before pthread_cancel(). */
 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
 
-  DRD_(g_threadinfo)[tid].synchr_nesting = 0;
+   DRD_(g_threadinfo)[tid].synchr_nesting = 0;
 }
 
 /** Store the POSIX thread ID for the specified thread. */
 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID);
-  tl_assert(ptid != INVALID_POSIX_THREADID);
-  DRD_(g_threadinfo)[tid].posix_thread_exists = True;
-  DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID);
+   tl_assert(ptid != INVALID_POSIX_THREADID);
+   DRD_(g_threadinfo)[tid].posix_thread_exists = True;
+   DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
 }
 
 /** Returns true for joinable threads and false for detached threads. */
 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
 }
 
 /** Store the thread mode: joinable or detached. */
 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(!! joinable == joinable);
-  tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(!! joinable == joinable);
+   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
 #if 0
-  VG_(message)(Vg_DebugMsg,
-               "thread_set_joinable(%d/%d, %s)",
-               tid,
-               DRD_(g_threadinfo)[tid].vg_threadid,
-               joinable ? "joinable" : "detached");
+   VG_(message)(Vg_DebugMsg,
+                "thread_set_joinable(%d/%d, %s)",
+                tid,
+                DRD_(g_threadinfo)[tid].vg_threadid,
+                joinable ? "joinable" : "detached");
 #endif
-  DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
+   DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
 }
 
 /**
@@ -487,16 +493,16 @@
  */
 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
 {
-  tl_assert(vg_tid != VG_INVALID_THREADID);
+   tl_assert(vg_tid != VG_INVALID_THREADID);
 
-  if (vg_tid != s_vg_running_tid)
-  {
-    DRD_(thread_set_running_tid)(vg_tid,
-                                 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
-  }
+   if (vg_tid != s_vg_running_tid)
+   {
+      DRD_(thread_set_running_tid)(vg_tid,
+                                   DRD_(VgThreadIdToDrdThreadId)(vg_tid));
+   }
 
-  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
-  tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
+   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
+   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
 }
 
 /**
@@ -506,29 +512,29 @@
 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
                                   const DrdThreadId drd_tid)
 {
-  tl_assert(vg_tid != VG_INVALID_THREADID);
-  tl_assert(drd_tid != DRD_INVALID_THREADID);
+   tl_assert(vg_tid != VG_INVALID_THREADID);
+   tl_assert(drd_tid != DRD_INVALID_THREADID);
    
-  if (vg_tid != s_vg_running_tid)
-  {
-    if (s_trace_context_switches
-        && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
-    {
-      VG_(message)(Vg_DebugMsg,
-                   "Context switch from thread %d/%d to thread %d/%d;"
-                   " segments: %llu",
-                   s_vg_running_tid, DRD_(g_drd_running_tid),
-                   DRD_(DrdThreadIdToVgThreadId)(drd_tid), drd_tid,
-                   DRD_(sg_get_segments_alive_count)());
-    }
-    s_vg_running_tid = vg_tid;
-    DRD_(g_drd_running_tid) = drd_tid;
-    thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
-    s_context_switch_count++;
-  }
+   if (vg_tid != s_vg_running_tid)
+   {
+      if (s_trace_context_switches
+          && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
+      {
+         VG_(message)(Vg_DebugMsg,
+                      "Context switch from thread %d/%d to thread %d/%d;"
+                      " segments: %llu",
+                      s_vg_running_tid, DRD_(g_drd_running_tid),
+                      DRD_(DrdThreadIdToVgThreadId)(drd_tid), drd_tid,
+                      DRD_(sg_get_segments_alive_count)());
+      }
+      s_vg_running_tid = vg_tid;
+      DRD_(g_drd_running_tid) = drd_tid;
+      thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
+      s_context_switch_count++;
+   }
 
-  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
-  tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
+   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
+   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
 }
 
 /**
@@ -537,8 +543,8 @@
  */
 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
 {
-  tl_assert(DRD_(IsValidDrdThreadId)(tid));
-  return DRD_(g_threadinfo)[tid].synchr_nesting++;
+   tl_assert(DRD_(IsValidDrdThreadId)(tid));
+   return DRD_(g_threadinfo)[tid].synchr_nesting++;
 }
 
 /**
@@ -547,33 +553,33 @@
  */
 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
 {
-  tl_assert(DRD_(IsValidDrdThreadId)(tid));
-  tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
-  return --DRD_(g_threadinfo)[tid].synchr_nesting;
+   tl_assert(DRD_(IsValidDrdThreadId)(tid));
+   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
+   return --DRD_(g_threadinfo)[tid].synchr_nesting;
 }
 
 /** Returns the synchronization nesting counter. */
 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
 {
-  tl_assert(DRD_(IsValidDrdThreadId)(tid));
-  return DRD_(g_threadinfo)[tid].synchr_nesting;
+   tl_assert(DRD_(IsValidDrdThreadId)(tid));
+   return DRD_(g_threadinfo)[tid].synchr_nesting;
 }
 
 /** Append a new segment at the end of the segment list. */
 static
 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
-  sg->prev = DRD_(g_threadinfo)[tid].last;
-  sg->next = 0;
-  if (DRD_(g_threadinfo)[tid].last)
-    DRD_(g_threadinfo)[tid].last->next = sg;
-  DRD_(g_threadinfo)[tid].last = sg;
-  if (DRD_(g_threadinfo)[tid].first == 0)
-    DRD_(g_threadinfo)[tid].first = sg;
-  // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+   sg->prev = DRD_(g_threadinfo)[tid].last;
+   sg->next = 0;
+   if (DRD_(g_threadinfo)[tid].last)
+      DRD_(g_threadinfo)[tid].last->next = sg;
+   DRD_(g_threadinfo)[tid].last = sg;
+   if (DRD_(g_threadinfo)[tid].first == 0)
+      DRD_(g_threadinfo)[tid].first = sg;
+   // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
 }
 
 /**
@@ -583,21 +589,21 @@
 static
 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  //tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   //tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
 
-  if (sg->prev)
-    sg->prev->next = sg->next;
-  if (sg->next)
-    sg->next->prev = sg->prev;
-  if (sg == DRD_(g_threadinfo)[tid].first)
-    DRD_(g_threadinfo)[tid].first = sg->next;
-  if (sg == DRD_(g_threadinfo)[tid].last)
-    DRD_(g_threadinfo)[tid].last = sg->prev;
-  DRD_(sg_put)(sg);
+   if (sg->prev)
+      sg->prev->next = sg->next;
+   if (sg->next)
+      sg->next->prev = sg->prev;
+   if (sg == DRD_(g_threadinfo)[tid].first)
+      DRD_(g_threadinfo)[tid].first = sg->next;
+   if (sg == DRD_(g_threadinfo)[tid].last)
+      DRD_(g_threadinfo)[tid].last = sg->prev;
+   DRD_(sg_put)(sg);
 
-  //tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+   //tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
 }
 
 /**
@@ -606,10 +612,10 @@
  */
 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].last);
-  return &DRD_(g_threadinfo)[tid].last->vc;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].last);
+   return &DRD_(g_threadinfo)[tid].last->vc;
 }
 
 /**
@@ -617,13 +623,13 @@
  */
 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
 {
-  tl_assert(sg);
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].last);
+   tl_assert(sg);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].last);
 
-  DRD_(sg_put)(*sg);
-  *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
+   DRD_(sg_put)(*sg);
+   *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
 }
 
 /**
@@ -634,24 +640,24 @@
  */
 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
 {
-  unsigned i;
-  Bool first;
-  Segment* latest_sg;
+   unsigned i;
+   Bool first;
+   Segment* latest_sg;
 
-  first = True;
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    latest_sg = DRD_(g_threadinfo)[i].last;
-    if (latest_sg)
-    {
-      if (first)
-        DRD_(vc_assign)(vc, &latest_sg->vc);
-      else
-        DRD_(vc_min)(vc, &latest_sg->vc);
-      first = False;
-    }
-  }
+   first = True;
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      latest_sg = DRD_(g_threadinfo)[i].last;
+      if (latest_sg)
+      {
+         if (first)
+            DRD_(vc_assign)(vc, &latest_sg->vc);
+         else
+            DRD_(vc_min)(vc, &latest_sg->vc);
+         first = False;
+      }
+   }
 }
 
 /**
@@ -661,24 +667,24 @@
  */
 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
 {
-  unsigned i;
-  Bool first;
-  Segment* latest_sg;
+   unsigned i;
+   Bool first;
+   Segment* latest_sg;
 
-  first = True;
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    latest_sg = DRD_(g_threadinfo)[i].last;
-    if (latest_sg)
-    {
-      if (first)
-        DRD_(vc_assign)(vc, &latest_sg->vc);
-      else
-        DRD_(vc_combine)(vc, &latest_sg->vc);
-      first = False;
-    }
-  }
+   first = True;
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      latest_sg = DRD_(g_threadinfo)[i].last;
+      if (latest_sg)
+      {
+         if (first)
+            DRD_(vc_assign)(vc, &latest_sg->vc);
+         else
+            DRD_(vc_combine)(vc, &latest_sg->vc);
+         first = False;
+      }
+   }
 }
 
 /**
@@ -688,45 +694,45 @@
  */
 static void DRD_(thread_discard_ordered_segments)(void)
 {
-  unsigned i;
-  VectorClock thread_vc_min;
+   unsigned i;
+   VectorClock thread_vc_min;
 
-  s_discard_ordered_segments_count++;
+   s_discard_ordered_segments_count++;
 
-  DRD_(vc_init)(&thread_vc_min, 0, 0);
-  DRD_(thread_compute_minimum_vc)(&thread_vc_min);
-  if (DRD_(sg_get_trace)())
-  {
-    char msg[256];
-    VectorClock thread_vc_max;
+   DRD_(vc_init)(&thread_vc_min, 0, 0);
+   DRD_(thread_compute_minimum_vc)(&thread_vc_min);
+   if (DRD_(sg_get_trace)())
+   {
+      char msg[256];
+      VectorClock thread_vc_max;
 
-    DRD_(vc_init)(&thread_vc_max, 0, 0);
-    DRD_(thread_compute_maximum_vc)(&thread_vc_max);
-    VG_(snprintf)(msg, sizeof(msg),
-                  "Discarding ordered segments -- min vc is ");
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                     &thread_vc_min);
-    VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                  ", max vc is ");
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
-                     &thread_vc_max);
-    VG_(message)(Vg_UserMsg, "%s", msg);
-    DRD_(vc_cleanup)(&thread_vc_max);
-  }
+      DRD_(vc_init)(&thread_vc_max, 0, 0);
+      DRD_(thread_compute_maximum_vc)(&thread_vc_max);
+      VG_(snprintf)(msg, sizeof(msg),
+                    "Discarding ordered segments -- min vc is ");
+      DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                       &thread_vc_min);
+      VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                    ", max vc is ");
+      DRD_(vc_snprint)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
+                       &thread_vc_max);
+      VG_(message)(Vg_UserMsg, "%s", msg);
+      DRD_(vc_cleanup)(&thread_vc_max);
+   }
 
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    Segment* sg;
-    Segment* sg_next;
-    for (sg = DRD_(g_threadinfo)[i].first;
-         sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
-         sg = sg_next)
-    {
-      thread_discard_segment(i, sg);
-    }
-  }
-  DRD_(vc_cleanup)(&thread_vc_min);
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      Segment* sg;
+      Segment* sg_next;
+      for (sg = DRD_(g_threadinfo)[i].first;
+           sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
+           sg = sg_next)
+      {
+         thread_discard_segment(i, sg);
+      }
+   }
+   DRD_(vc_cleanup)(&thread_vc_min);
 }
 
 /**
@@ -740,30 +746,30 @@
  */
 static void thread_merge_segments(void)
 {
-  unsigned i;
+   unsigned i;
 
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    Segment* sg;
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      Segment* sg;
 
-    // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
+      // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
 
-    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
-    {
-      if (DRD_(sg_get_refcnt)(sg) == 1
-          && sg->next
-          && DRD_(sg_get_refcnt)(sg->next) == 1
-          && sg->next->next)
+      for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
       {
-        /* Merge sg and sg->next into sg. */
-        DRD_(sg_merge)(sg, sg->next);
-        thread_discard_segment(i, sg->next);
+         if (DRD_(sg_get_refcnt)(sg) == 1
+             && sg->next
+             && DRD_(sg_get_refcnt)(sg->next) == 1
+             && sg->next->next)
+         {
+            /* Merge sg and sg->next into sg. */
+            DRD_(sg_merge)(sg, sg->next);
+            thread_discard_segment(i, sg->next);
+         }
       }
-    }
 
-    // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
-  }
+      // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
+   }
 }
 
 /**
@@ -772,43 +778,43 @@
  */
 void DRD_(thread_new_segment)(const DrdThreadId tid)
 {
-  Segment* new_sg;
+   Segment* new_sg;
 
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
 
-  new_sg = DRD_(sg_new)(tid, tid);
-  thread_append_segment(tid, new_sg);
+   new_sg = DRD_(sg_new)(tid, tid);
+   thread_append_segment(tid, new_sg);
 
-  thread_compute_conflict_set(&DRD_(g_conflict_set), DRD_(g_drd_running_tid));
-  s_conflict_set_new_segment_count++;
+   thread_compute_conflict_set(&DRD_(g_conflict_set), DRD_(g_drd_running_tid));
+   s_conflict_set_new_segment_count++;
 
-  DRD_(thread_discard_ordered_segments)();
+   DRD_(thread_discard_ordered_segments)();
 
-  if (s_segment_merging)
-  {
-    thread_merge_segments();
-  }
+   if (s_segment_merging)
+   {
+      thread_merge_segments();
+   }
 }
 
 /** Call this function after thread 'joiner' joined thread 'joinee'. */
 void DRD_(thread_combine_vc)(DrdThreadId joiner, DrdThreadId joinee)
 {
-  tl_assert(joiner != joinee);
-  tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
-            && joiner != DRD_INVALID_THREADID);
-  tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
-            && joinee != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[joiner].last);
-  tl_assert(DRD_(g_threadinfo)[joinee].last);
-  DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
-                   &DRD_(g_threadinfo)[joinee].last->vc);
-  DRD_(thread_discard_ordered_segments)();
+   tl_assert(joiner != joinee);
+   tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
+             && joiner != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
+             && joinee != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[joiner].last);
+   tl_assert(DRD_(g_threadinfo)[joinee].last);
+   DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
+                    &DRD_(g_threadinfo)[joinee].last->vc);
+   DRD_(thread_discard_ordered_segments)();
 
-  if (joiner == DRD_(g_drd_running_tid))
-  {
-    thread_compute_conflict_set(&DRD_(g_conflict_set), joiner);
-  }
+   if (joiner == DRD_(g_drd_running_tid))
+   {
+      thread_compute_conflict_set(&DRD_(g_conflict_set), joiner);
+   }
 }
 
 /**
@@ -818,14 +824,14 @@
  */
 void DRD_(thread_combine_vc2)(DrdThreadId tid, const VectorClock* const vc)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].last);
-  tl_assert(vc);
-  DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
-  thread_compute_conflict_set(&DRD_(g_conflict_set), tid);
-  DRD_(thread_discard_ordered_segments)();
-  s_conflict_set_combine_vc_count++;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].last);
+   tl_assert(vc);
+   DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
+   thread_compute_conflict_set(&DRD_(g_conflict_set), tid);
+   DRD_(thread_discard_ordered_segments)();
+   s_conflict_set_combine_vc_count++;
 }
 
 /**
@@ -835,58 +841,58 @@
  */
 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
 {
-  DrdThreadId other_user;
-  unsigned i;
+   DrdThreadId other_user;
+   unsigned i;
 
-  /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
-  other_user = DRD_INVALID_THREADID;
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    Segment* p;
-    for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
-    {
-      if (other_user == DRD_INVALID_THREADID
-          && i != DRD_(g_drd_running_tid))
+   /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
+   other_user = DRD_INVALID_THREADID;
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      Segment* p;
+      for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
       {
-        if (UNLIKELY(DRD_(bm_test_and_clear)(p->bm, a1, a2)))
-        {
-          other_user = i;
-        }
-        continue;
+         if (other_user == DRD_INVALID_THREADID
+             && i != DRD_(g_drd_running_tid))
+         {
+            if (UNLIKELY(DRD_(bm_test_and_clear)(p->bm, a1, a2)))
+            {
+               other_user = i;
+            }
+            continue;
+         }
+         DRD_(bm_clear)(p->bm, a1, a2);
       }
-      DRD_(bm_clear)(p->bm, a1, a2);
-    }
-  }
+   }
 
-  /*
-   * If any other thread had accessed memory in [ a1, a2 [, update the
-   * conflict set.
-   */
-  if (other_user != DRD_INVALID_THREADID
-      && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2))
-  {
-    thread_compute_conflict_set(&DRD_(g_conflict_set),
-                                      DRD_(thread_get_running_tid)());
-  }
+   /*
+    * If any other thread had accessed memory in [ a1, a2 [, update the
+    * conflict set.
+    */
+   if (other_user != DRD_INVALID_THREADID
+       && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2))
+   {
+      thread_compute_conflict_set(&DRD_(g_conflict_set),
+                                  DRD_(thread_get_running_tid)());
+   }
 }
 
 /** Start recording memory access information. */
 void DRD_(thread_start_recording)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(! DRD_(g_threadinfo)[tid].is_recording);
-  DRD_(g_threadinfo)[tid].is_recording = True;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(! DRD_(g_threadinfo)[tid].is_recording);
+   DRD_(g_threadinfo)[tid].is_recording = True;
 }
 
 /** Stop recording memory access information. */
 void DRD_(thread_stop_recording)(const DrdThreadId tid)
 {
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].is_recording);
-  DRD_(g_threadinfo)[tid].is_recording = False;
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].is_recording);
+   DRD_(g_threadinfo)[tid].is_recording = False;
 }
 
 /**
@@ -896,29 +902,29 @@
  */
 void DRD_(thread_print_all)(void)
 {
-  unsigned i;
-  Segment* p;
+   unsigned i;
+   Segment* p;
 
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    if (DRD_(g_threadinfo)[i].first)
-    {
-      VG_(printf)("**************\n"
-                  "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
-                  "**************\n",
-                  i,
-                  DRD_(g_threadinfo)[i].vg_thread_exists,
-                  DRD_(g_threadinfo)[i].vg_threadid,
-                  DRD_(g_threadinfo)[i].posix_thread_exists,
-                  DRD_(g_threadinfo)[i].pt_threadid,
-                  DRD_(g_threadinfo)[i].detached_posix_thread);
-      for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      if (DRD_(g_threadinfo)[i].first)
       {
-        DRD_(sg_print)(p);
+         VG_(printf)("**************\n"
+                     "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
+                     "**************\n",
+                     i,
+                     DRD_(g_threadinfo)[i].vg_thread_exists,
+                     DRD_(g_threadinfo)[i].vg_threadid,
+                     DRD_(g_threadinfo)[i].posix_thread_exists,
+                     DRD_(g_threadinfo)[i].pt_threadid,
+                     DRD_(g_threadinfo)[i].detached_posix_thread);
+         for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
+         {
+            DRD_(sg_print)(p);
+         }
       }
-    }
-  }
+   }
 }
 
 /** Show a call stack involved in a data race. */
@@ -926,26 +932,26 @@
                             const Char* const msg,
                             ExeContext* const callstack)
 {
-  const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
+   const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
 
-  VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
+   VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
 
-  if (vg_tid != VG_INVALID_THREADID)
-  {
-    if (callstack)
-    {
-      VG_(pp_ExeContext)(callstack);
-    }
-    else
-    {
-      VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
-    }
-  }
-  else
-  {
-    VG_(message)(Vg_UserMsg,
-                 "   (thread finished, call stack no longer available)");
-  }
+   if (vg_tid != VG_INVALID_THREADID)
+   {
+      if (callstack)
+      {
+         VG_(pp_ExeContext)(callstack);
+      }
+      else
+      {
+         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
+      }
+   }
+   else
+   {
+      VG_(message)(Vg_UserMsg,
+                   "   (thread finished, call stack no longer available)");
+   }
 }
 
 /** Print information about the segments involved in a data race. */
@@ -956,43 +962,43 @@
                                            const BmAccessTypeT access_type,
                                            const Segment* const p)
 {
-  unsigned i;
+   unsigned i;
 
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(p);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(p);
 
-  for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-       i++)
-  {
-    if (i != tid)
-    {
-      Segment* q;
-      for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
+   for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+        i++)
+   {
+      if (i != tid)
       {
-        /*
-         * Since q iterates over the segments of thread i in order of 
-         * decreasing vector clocks, if q->vc <= p->vc, then 
-         * q->next->vc <= p->vc will also hold. Hence, break out of the
-         * loop once this condition is met.
-         */
-        if (DRD_(vc_lte)(&q->vc, &p->vc))
-          break;
-        if (! DRD_(vc_lte)(&p->vc, &q->vc))
-        {
-          if (DRD_(bm_has_conflict_with)(q->bm, addr, addr + size,
-                                         access_type))
-          {
-            tl_assert(q->stacktrace);
-            show_call_stack(i,        "Other segment start",
-                            q->stacktrace);
-            show_call_stack(i,        "Other segment end",
-                            q->next ? q->next->stacktrace : 0);
-          }
-        }
+         Segment* q;
+         for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
+         {
+            /*
+             * Since q iterates over the segments of thread i in order of 
+             * decreasing vector clocks, if q->vc <= p->vc, then 
+             * q->next->vc <= p->vc will also hold. Hence, break out of the
+             * loop once this condition is met.
+             */
+            if (DRD_(vc_lte)(&q->vc, &p->vc))
+               break;
+            if (! DRD_(vc_lte)(&p->vc, &q->vc))
+            {
+               if (DRD_(bm_has_conflict_with)(q->bm, addr, addr + size,
+                                              access_type))
+               {
+                  tl_assert(q->stacktrace);
+                  show_call_stack(i,        "Other segment start",
+                                  q->stacktrace);
+                  show_call_stack(i,        "Other segment end",
+                                  q->next ? q->next->stacktrace : 0);
+               }
+            }
+         }
       }
-    }
-  }
+   }
 }
 
 /** Print information about all segments involved in a data race. */
@@ -1001,19 +1007,19 @@
                                               const SizeT size,
                                               const BmAccessTypeT access_type)
 {
-  Segment* p;
+   Segment* p;
 
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
 
-  for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
-  {
-    if (DRD_(bm_has)(p->bm, addr, addr + size, access_type))
-    {
-      thread_report_conflicting_segments_segment(tid, addr, size,
-                                                 access_type, p);
-    }
-  }
+   for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
+   {
+      if (DRD_(bm_has)(p->bm, addr, addr + size, access_type))
+      {
+         thread_report_conflicting_segments_segment(tid, addr, size,
+                                                    access_type, p);
+      }
+   }
 }
 
 /**
@@ -1023,123 +1029,129 @@
 static void thread_compute_conflict_set(struct bitmap** conflict_set,
                                         const DrdThreadId tid)
 {
-  Segment* p;
+   Segment* p;
 
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(tid == DRD_(g_drd_running_tid));
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(tid == DRD_(g_drd_running_tid));
 
-  s_update_conflict_set_count++;
-  s_conflict_set_bitmap_creation_count  -= DRD_(bm_get_bitmap_creation_count)();
-  s_conflict_set_bitmap2_creation_count -= DRD_(bm_get_bitmap2_creation_count)();
+   s_update_conflict_set_count++;
+   s_conflict_set_bitmap_creation_count
+      -= DRD_(bm_get_bitmap_creation_count)();
+   s_conflict_set_bitmap2_creation_count
+      -= DRD_(bm_get_bitmap2_creation_count)();
 
-  if (*conflict_set)
-  {
-    DRD_(bm_delete)(*conflict_set);
-  }
-  *conflict_set = DRD_(bm_new)();
+   if (*conflict_set)
+   {
+      DRD_(bm_delete)(*conflict_set);
+   }
+   *conflict_set = DRD_(bm_new)();
 
-  if (s_trace_conflict_set)
-  {
-    char msg[256];
-
-    VG_(snprintf)(msg, sizeof(msg),
-                  "computing conflict set for thread %d/%d with vc ",
-                  DRD_(DrdThreadIdToVgThreadId)(tid), tid);
-    DRD_(vc_snprint)(msg + VG_(strlen)(msg),
-                     sizeof(msg) - VG_(strlen)(msg),
-                     &DRD_(g_threadinfo)[tid].last->vc);
-    VG_(message)(Vg_UserMsg, "%s", msg);
-  }
-
-  p = DRD_(g_threadinfo)[tid].last;
-  {
-    unsigned j;
-
-    if (s_trace_conflict_set)
-    {
+   if (s_trace_conflict_set)
+   {
       char msg[256];
 
       VG_(snprintf)(msg, sizeof(msg),
-                    "conflict set: thread [%d] at vc ",
-                    tid);
+                    "computing conflict set for thread %d/%d with vc ",
+                    DRD_(DrdThreadIdToVgThreadId)(tid), tid);
       DRD_(vc_snprint)(msg + VG_(strlen)(msg),
                        sizeof(msg) - VG_(strlen)(msg),
-                       &p->vc);
+                       &DRD_(g_threadinfo)[tid].last->vc);
       VG_(message)(Vg_UserMsg, "%s", msg);
-    }
+   }
 
-    for (j = 0; j < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
-         j++)
-    {
-      if (j != tid && DRD_(IsValidDrdThreadId)(j))
+   p = DRD_(g_threadinfo)[tid].last;
+   {
+      unsigned j;
+
+      if (s_trace_conflict_set)
       {
-        const Segment* q;
-        for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
-        {
-          if (! DRD_(vc_lte)(&q->vc, &p->vc) && ! DRD_(vc_lte)(&p->vc, &q->vc))
-          {
-            if (s_trace_conflict_set)
-            {
-              char msg[256];
-              VG_(snprintf)(msg, sizeof(msg),
-                            "conflict set: [%d] merging segment ", j);
-              DRD_(vc_snprint)(msg + VG_(strlen)(msg),
-                               sizeof(msg) - VG_(strlen)(msg),
-                               &q->vc);
-              VG_(message)(Vg_UserMsg, "%s", msg);
-            }
-            DRD_(bm_merge2)(*conflict_set, q->bm);
-          }
-          else
-          {
-            if (s_trace_conflict_set)
-            {
-              char msg[256];
-              VG_(snprintf)(msg, sizeof(msg),
-                            "conflict set: [%d] ignoring segment ", j);
-              DRD_(vc_snprint)(msg + VG_(strlen)(msg),
-                               sizeof(msg) - VG_(strlen)(msg),
-                               &q->vc);
-              VG_(message)(Vg_UserMsg, "%s", msg);
-            }
-          }
-        }
+         char msg[256];
+
+         VG_(snprintf)(msg, sizeof(msg),
+                       "conflict set: thread [%d] at vc ",
+                       tid);
+         DRD_(vc_snprint)(msg + VG_(strlen)(msg),
+                          sizeof(msg) - VG_(strlen)(msg),
+                          &p->vc);
+         VG_(message)(Vg_UserMsg, "%s", msg);
       }
-    }
-  }
 
-  s_conflict_set_bitmap_creation_count  += DRD_(bm_get_bitmap_creation_count)();
-  s_conflict_set_bitmap2_creation_count += DRD_(bm_get_bitmap2_creation_count)();
+      for (j = 0;
+           j < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
+           j++)
+      {
+         if (j != tid && DRD_(IsValidDrdThreadId)(j))
+         {
+            const Segment* q;
+            for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
+            {
+               if (! DRD_(vc_lte)(&q->vc, &p->vc)
+                   && ! DRD_(vc_lte)(&p->vc, &q->vc))
+               {
+                  if (s_trace_conflict_set)
+                  {
+                     char msg[256];
+                     VG_(snprintf)(msg, sizeof(msg),
+                                   "conflict set: [%d] merging segment ", j);
+                     DRD_(vc_snprint)(msg + VG_(strlen)(msg),
+                                      sizeof(msg) - VG_(strlen)(msg),
+                                      &q->vc);
+                     VG_(message)(Vg_UserMsg, "%s", msg);
+                  }
+                  DRD_(bm_merge2)(*conflict_set, q->bm);
+               }
+               else
+               {
+                  if (s_trace_conflict_set)
+                  {
+                     char msg[256];
+                     VG_(snprintf)(msg, sizeof(msg),
+                                   "conflict set: [%d] ignoring segment ", j);
+                     DRD_(vc_snprint)(msg + VG_(strlen)(msg),
+                                      sizeof(msg) - VG_(strlen)(msg),
+                                      &q->vc);
+                     VG_(message)(Vg_UserMsg, "%s", msg);
+                  }
+               }
+            }
+         }
+      }
+   }
 
-  if (0 && s_trace_conflict_set)
-  {
-    VG_(message)(Vg_UserMsg, "[%d] new conflict set:", tid);
-    DRD_(bm_print)(*conflict_set);
-    VG_(message)(Vg_UserMsg, "[%d] end of new conflict set.", tid);
-  }
+   s_conflict_set_bitmap_creation_count
+      += DRD_(bm_get_bitmap_creation_count)();
+   s_conflict_set_bitmap2_creation_count
+      += DRD_(bm_get_bitmap2_creation_count)();
+
+   if (0 && s_trace_conflict_set)
+   {
+      VG_(message)(Vg_UserMsg, "[%d] new conflict set:", tid);
+      DRD_(bm_print)(*conflict_set);
+      VG_(message)(Vg_UserMsg, "[%d] end of new conflict set.", tid);
+   }
 }
 
 /** Report the number of context switches performed. */
 ULong DRD_(thread_get_context_switch_count)(void)
 {
-  return s_context_switch_count;
+   return s_context_switch_count;
 }
 
 /** Report the number of ordered segments that have been discarded. */
 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
 {
-  return s_discard_ordered_segments_count;
+   return s_discard_ordered_segments_count;
 }
 
 /** Return how many times the conflict set has been updated. */
 ULong DRD_(thread_get_update_conflict_set_count)(ULong* dsnsc, ULong* dscvc)
 {
-  tl_assert(dsnsc);
-  tl_assert(dscvc);
-  *dsnsc = s_conflict_set_new_segment_count;
-  *dscvc = s_conflict_set_combine_vc_count;
-  return s_update_conflict_set_count;
+   tl_assert(dsnsc);
+   tl_assert(dscvc);
+   *dsnsc = s_conflict_set_new_segment_count;
+   *dscvc = s_conflict_set_combine_vc_count;
+   return s_update_conflict_set_count;
 }
 
 /**
@@ -1148,7 +1160,7 @@
  */
 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
 {
-  return s_conflict_set_bitmap_creation_count;
+   return s_conflict_set_bitmap_creation_count;
 }
 
 /**
@@ -1157,5 +1169,5 @@
  */
 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
 {
-  return s_conflict_set_bitmap2_creation_count;
+   return s_conflict_set_bitmap2_creation_count;
 }
diff --git a/drd/drd_thread.h b/drd/drd_thread.h
index d861464..bf24fbc 100644
--- a/drd/drd_thread.h
+++ b/drd/drd_thread.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -66,28 +67,28 @@
 /** Per-thread information managed by DRD. */
 typedef struct
 {
-  Segment*  first;         /**< Pointer to first segment. */
-  Segment*  last;          /**< Pointer to last segment. */
-  ThreadId  vg_threadid;   /**< Valgrind thread ID. */
-  PThreadId pt_threadid;   /**< POSIX thread ID. */
-  Addr      stack_min_min; /**< Lowest value stack pointer ever had. */
-  Addr      stack_min;     /**< Current stack pointer. */
-  Addr      stack_startup; /**<Stack pointer after pthread_create() finished.*/
-  Addr      stack_max;     /**< Top of stack. */
-  SizeT     stack_size;    /**< Maximum size of stack. */
-  /** Indicates whether the Valgrind core knows about this thread. */
-  Bool      vg_thread_exists;
-  /** Indicates whether there is an associated POSIX thread ID. */
-  Bool      posix_thread_exists;
-  /**
-   * If true, indicates that there is a corresponding POSIX thread ID and
-   * a corresponding OS thread that is detached.
-   */
-  Bool      detached_posix_thread;
-  /** Wether recording of memory accesses is active. */
-  Bool      is_recording;
-  /** Nesting level of synchronization functions called by the client. */
-  Int       synchr_nesting;
+   Segment*  first;         /**< Pointer to first segment. */
+   Segment*  last;          /**< Pointer to last segment. */
+   ThreadId  vg_threadid;   /**< Valgrind thread ID. */
+   PThreadId pt_threadid;   /**< POSIX thread ID. */
+   Addr      stack_min_min; /**< Lowest value stack pointer ever had. */
+   Addr      stack_min;     /**< Current stack pointer. */
+   Addr      stack_startup; /**<Stack pointer after pthread_create() finished.*/
+   Addr      stack_max;     /**< Top of stack. */
+   SizeT     stack_size;    /**< Maximum size of stack. */
+   /** Indicates whether the Valgrind core knows about this thread. */
+   Bool      vg_thread_exists;
+   /** Indicates whether there is an associated POSIX thread ID. */
+   Bool      posix_thread_exists;
+   /**
+    * If true, indicates that there is a corresponding POSIX thread ID and
+    * a corresponding OS thread that is detached.
+    */
+   Bool      detached_posix_thread;
+   /** Wether recording of memory accesses is active. */
+   Bool      is_recording;
+   /** Nesting level of synchronization functions called by the client. */
+   Int       synchr_nesting;
 } ThreadInfo;
 
 
@@ -121,13 +122,14 @@
 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid);
 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid);
 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
-                              const ThreadId vg_created);
+                                    const ThreadId vg_created);
 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created);
 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee);
 void DRD_(thread_delete)(const DrdThreadId tid);
 void DRD_(thread_finished)(const DrdThreadId tid);
 void DRD_(thread_pre_cancel)(const DrdThreadId tid);
-void DRD_(thread_set_stack_startup)(const DrdThreadId tid, const Addr stack_startup);
+void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
+                                    const Addr stack_startup);
 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid);
 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid);
 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid);
@@ -137,15 +139,17 @@
 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable);
 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid);
 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
-                            const DrdThreadId drd_tid);
+                                  const DrdThreadId drd_tid);
 int DRD_(thread_enter_synchr)(const DrdThreadId tid);
 int DRD_(thread_leave_synchr)(const DrdThreadId tid);
 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid);
 void DRD_(thread_new_segment)(const DrdThreadId tid);
 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid);
 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid);
-void DRD_(thread_combine_vc)(const DrdThreadId joiner, const DrdThreadId joinee);
-void DRD_(thread_combine_vc2)(const DrdThreadId tid, const VectorClock* const vc);
+void DRD_(thread_combine_vc)(const DrdThreadId joiner,
+                             const DrdThreadId joinee);
+void DRD_(thread_combine_vc2)(const DrdThreadId tid,
+                              const VectorClock* const vc);
 
 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2);
 void DRD_(thread_start_recording)(const DrdThreadId tid);
@@ -153,12 +157,12 @@
 void DRD_(thread_print_all)(void);
 void DRD_(thread_report_races)(const DrdThreadId tid);
 void DRD_(thread_report_races_segment)(const DrdThreadId tid,
-                                 const Segment* const p);
+                                       const Segment* const p);
 void DRD_(thread_report_all_races)(void);
 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
-                                        const Addr addr,
-                                        const SizeT size,
-                                        const BmAccessTypeT access_type);
+                                              const Addr addr,
+                                              const SizeT size,
+                                              const BmAccessTypeT access_type);
 ULong DRD_(thread_get_context_switch_count)(void);
 ULong DRD_(thread_get_report_races_count)(void);
 ULong DRD_(thread_get_discard_ordered_segments_count)(void);
@@ -181,25 +185,25 @@
 static __inline__
 Bool DRD_(IsValidDrdThreadId)(const DrdThreadId tid)
 {
-  return (0 <= (int)tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
-          && ! (DRD_(g_threadinfo)[tid].vg_thread_exists == False
-                && DRD_(g_threadinfo)[tid].posix_thread_exists == False
-                && DRD_(g_threadinfo)[tid].detached_posix_thread == False));
+   return (0 <= (int)tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
+           && ! (DRD_(g_threadinfo)[tid].vg_thread_exists == False
+                 && DRD_(g_threadinfo)[tid].posix_thread_exists == False
+                 && DRD_(g_threadinfo)[tid].detached_posix_thread == False));
 }
 
 /** Returns the DRD thread ID of the currently running thread. */
 static __inline__
 DrdThreadId DRD_(thread_get_running_tid)(void)
 {
-  tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
-  return DRD_(g_drd_running_tid);
+   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
+   return DRD_(g_drd_running_tid);
 }
 
 /** Returns a pointer to the conflict set for the currently running thread. */
 static __inline__
 struct bitmap* DRD_(thread_get_conflict_set)(void)
 {
-  return DRD_(g_conflict_set);
+   return DRD_(g_conflict_set);
 }
 
 /**
@@ -210,11 +214,12 @@
 Bool DRD_(running_thread_is_recording)(void)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(0 <= (int)DRD_(g_drd_running_tid) && DRD_(g_drd_running_tid) < DRD_N_THREADS
-            && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)DRD_(g_drd_running_tid)
+             && DRD_(g_drd_running_tid) < DRD_N_THREADS
+             && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
 #endif
-  return (DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].synchr_nesting == 0
-          && DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].is_recording);
+   return (DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].synchr_nesting == 0
+           && DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].is_recording);
 }
 
 /**
@@ -225,21 +230,22 @@
 void DRD_(thread_set_stack_min)(const DrdThreadId tid, const Addr stack_min)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(0 <= (int)tid
-            && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
+   tl_assert(0 <= (int)tid
+             && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
 #endif
-  DRD_(g_threadinfo)[tid].stack_min = stack_min;
+   DRD_(g_threadinfo)[tid].stack_min = stack_min;
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  /* This function can be called after the thread has been created but */
-  /* before drd_post_thread_create() has filled in stack_max.          */
-  tl_assert(DRD_(g_threadinfo)[tid].stack_min < DRD_(g_threadinfo)[tid].stack_max
-            || DRD_(g_threadinfo)[tid].stack_max == 0);
+   /* This function can be called after the thread has been created but */
+   /* before drd_post_thread_create() has filled in stack_max.          */
+   tl_assert(DRD_(g_threadinfo)[tid].stack_min
+             < DRD_(g_threadinfo)[tid].stack_max
+             || DRD_(g_threadinfo)[tid].stack_max == 0);
 #endif
-  if (UNLIKELY(stack_min < DRD_(g_threadinfo)[tid].stack_min_min))
-  {
-    DRD_(g_threadinfo)[tid].stack_min_min = stack_min;
-  }
+   if (UNLIKELY(stack_min < DRD_(g_threadinfo)[tid].stack_min_min))
+   {
+      DRD_(g_threadinfo)[tid].stack_min_min = stack_min;
+   }
 }
 
 /**
@@ -249,8 +255,8 @@
 static __inline__
 Bool DRD_(thread_address_on_stack)(const Addr a)
 {
-  return (DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].stack_min <= a
-	  && a < DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].stack_max);
+   return (DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].stack_min <= a
+           && a < DRD_(g_threadinfo)[DRD_(g_drd_running_tid)].stack_max);
 }
 
 /**
@@ -260,18 +266,18 @@
 static __inline__
 Bool DRD_(thread_address_on_any_stack)(const Addr a)
 {
-  int i;
+   int i;
 
-  for (i = 1; i < DRD_N_THREADS; i++)
-  {
-    if (DRD_(g_threadinfo)[i].vg_thread_exists
-        && DRD_(g_threadinfo)[i].stack_min <= a
-	&& a < DRD_(g_threadinfo)[i].stack_max)
-    {
-      return True;
-    }
-  }
-  return False;
+   for (i = 1; i < DRD_N_THREADS; i++)
+   {
+      if (DRD_(g_threadinfo)[i].vg_thread_exists
+          && DRD_(g_threadinfo)[i].stack_min <= a
+          && a < DRD_(g_threadinfo)[i].stack_max)
+      {
+         return True;
+      }
+   }
+   return False;
 }
 
 /** Return a pointer to the latest segment for the specified thread. */
@@ -279,18 +285,18 @@
 Segment* DRD_(thread_get_segment)(const DrdThreadId tid)
 {
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
-            && tid != DRD_INVALID_THREADID);
-  tl_assert(DRD_(g_threadinfo)[tid].last);
+   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
+             && tid != DRD_INVALID_THREADID);
+   tl_assert(DRD_(g_threadinfo)[tid].last);
 #endif
-  return DRD_(g_threadinfo)[tid].last;
+   return DRD_(g_threadinfo)[tid].last;
 }
 
 /** Return a pointer to the latest segment for the running thread. */
 static __inline__
 Segment* DRD_(running_thread_get_segment)(void)
 {
-  return DRD_(thread_get_segment)(DRD_(g_drd_running_tid));
+   return DRD_(thread_get_segment)(DRD_(g_drd_running_tid));
 }
 
 #endif /* __THREAD_H */
diff --git a/drd/drd_thread_bitmap.h b/drd/drd_thread_bitmap.h
index 102fd98..0da6c60 100644
--- a/drd/drd_thread_bitmap.h
+++ b/drd/drd_thread_bitmap.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -34,131 +35,157 @@
 static __inline__
 Bool bm_access_load_1_triggers_conflict(const Addr a1)
 {
-  DRD_(bm_access_load_1)(DRD_(running_thread_get_segment)()->bm, a1);
-  return DRD_(bm_load_1_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1);
+   DRD_(bm_access_load_1)(DRD_(running_thread_get_segment)()->bm, a1);
+   return DRD_(bm_load_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                            a1);
 }
 
 static __inline__
 Bool bm_access_load_2_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 1) == 0)
-  {
-    bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 2);
-    return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 2);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 2, eLoad);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 2, eLoad);
-  }
+   if ((a1 & 1) == 0)
+   {
+      bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 2);
+      return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                               a1, 2);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 2, eLoad);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 2, eLoad);
+   }
 }
 
 static __inline__
 Bool bm_access_load_4_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 4);
-    return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 4);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 4, eLoad);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 4, eLoad);
-  }
+   if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 4);
+      return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                               a1, 4);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 4, eLoad);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 4, eLoad);
+   }
 }
 
 static __inline__
 Bool bm_access_load_8_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 7) == 0)
-  {
-    bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 8);
-    return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 8);
-  }
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 0, 4);
-    bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 4, 4);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 8, eLoad);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 8, eLoad);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 8, eLoad);
-  }
+   if ((a1 & 7) == 0)
+   {
+      bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 8);
+      return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                               a1, 8);
+   }
+   else if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 0, 4);
+      bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 4, 4);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 8, eLoad);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 8, eLoad);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 8, eLoad);
+   }
 }
 
 static __inline__
 Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
 {
-  DRD_(bm_access_range_load)(DRD_(running_thread_get_segment)()->bm, a1, a2);
-  return DRD_(bm_load_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a2);
+   DRD_(bm_access_range_load)(DRD_(running_thread_get_segment)()->bm, a1, a2);
+   return DRD_(bm_load_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                          a1, a2);
 }
 
 static __inline__
 Bool bm_access_store_1_triggers_conflict(const Addr a1)
 {
-  DRD_(bm_access_store_1)(DRD_(running_thread_get_segment)()->bm, a1);
-  return DRD_(bm_store_1_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1);
+   DRD_(bm_access_store_1)(DRD_(running_thread_get_segment)()->bm, a1);
+   return DRD_(bm_store_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                             a1);
 }
 
 static __inline__
 Bool bm_access_store_2_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 1) == 0)
-  {
-    bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 2);
-    return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 2);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 2, eStore);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 2, eStore);
-  }
+   if ((a1 & 1) == 0)
+   {
+      bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 2);
+      return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                                a1, 2);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 2, eStore);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 2, eStore);
+   }
 }
 
 static __inline__
 Bool bm_access_store_4_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 4);
-    return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 4);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 4, eStore);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 4, eStore);
-  }
+   if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 4);
+      return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                                a1, 4);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 4, eStore);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 4, eStore);
+   }
 }
 
 static __inline__
 Bool bm_access_store_8_triggers_conflict(const Addr a1)
 {
-  if ((a1 & 7) == 0)
-  {
-    bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 8);
-    return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), a1, 8);
-  }
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1 + 0, 4);
-    bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1 + 4, 4);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 8, eStore);
-  }
-  else
-  {
-    DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm, a1, a1 + 8, eStore);
-    return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a1 + 8, eStore);
-  }
+   if ((a1 & 7) == 0)
+   {
+      bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 8);
+      return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
+                                                a1, 8);
+   }
+   else if ((a1 & 3) == 0)
+   {
+      bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm,
+                              a1 + 0, 4);
+      bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm,
+                              a1 + 4, 4);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 8, eStore);
+   }
+   else
+   {
+      DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+                            a1, a1 + 8, eStore);
+      return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                        a1, a1 + 8, eStore);
+   }
 }
 
 static __inline__
 Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
 {
-  DRD_(bm_access_range_store)(DRD_(running_thread_get_segment)()->bm, a1, a2);
-  return DRD_(bm_store_has_conflict_with)(DRD_(thread_get_conflict_set)(), a1, a2);
+   DRD_(bm_access_range_store)(DRD_(running_thread_get_segment)()->bm, a1, a2);
+   return DRD_(bm_store_has_conflict_with)(DRD_(thread_get_conflict_set)(),
+                                           a1, a2);
 }
 
 #endif // __DRD_THREAD_BITMAP_H
diff --git a/drd/drd_vc.c b/drd/drd_vc.c
index 7f14f1a..583c3e2 100644
--- a/drd/drd_vc.c
+++ b/drd/drd_vc.c
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -47,65 +48,65 @@
                    const VCElem* const vcelem,
                    const unsigned size)
 {
-  tl_assert(vc);
-  vc->size = 0;
-  vc->capacity = 0;
-  vc->vc = 0;
-  DRD_(vc_reserve)(vc, size);
-  tl_assert(size == 0 || vc->vc != 0);
-  if (vcelem)
-  {
-    VG_(memcpy)(vc->vc, vcelem, size * sizeof(vcelem[0]));
-    vc->size = size;
-  }
+   tl_assert(vc);
+   vc->size = 0;
+   vc->capacity = 0;
+   vc->vc = 0;
+   DRD_(vc_reserve)(vc, size);
+   tl_assert(size == 0 || vc->vc != 0);
+   if (vcelem)
+   {
+      VG_(memcpy)(vc->vc, vcelem, size * sizeof(vcelem[0]));
+      vc->size = size;
+   }
 }
 
 /** Reset vc to the empty vector clock. */
 void DRD_(vc_cleanup)(VectorClock* const vc)
 {
-  DRD_(vc_reserve)(vc, 0);
+   DRD_(vc_reserve)(vc, 0);
 }
 
 /** Copy constructor -- initializes *new. */
 void DRD_(vc_copy)(VectorClock* const new, const VectorClock* const rhs)
 {
-  DRD_(vc_init)(new, rhs->vc, rhs->size);
+   DRD_(vc_init)(new, rhs->vc, rhs->size);
 }
 
 /** Assignment operator -- *lhs is already a valid vector clock. */
 void DRD_(vc_assign)(VectorClock* const lhs, const VectorClock* const rhs)
 {
-  DRD_(vc_cleanup)(lhs);
-  DRD_(vc_copy)(lhs, rhs);
+   DRD_(vc_cleanup)(lhs);
+   DRD_(vc_copy)(lhs, rhs);
 }
 
 /** Increment the clock of thread 'tid' in vector clock 'vc'. */
 void DRD_(vc_increment)(VectorClock* const vc, DrdThreadId const tid)
 {
-  unsigned i;
-  for (i = 0; i < vc->size; i++)
-  {
-    if (vc->vc[i].threadid == tid)
-    {
-      typeof(vc->vc[i].count) const oldcount = vc->vc[i].count;
-      vc->vc[i].count++;
-      // Check for integer overflow.
-      tl_assert(oldcount < vc->vc[i].count);
-      return;
-    }
-  }
+   unsigned i;
+   for (i = 0; i < vc->size; i++)
+   {
+      if (vc->vc[i].threadid == tid)
+      {
+         typeof(vc->vc[i].count) const oldcount = vc->vc[i].count;
+         vc->vc[i].count++;
+         // Check for integer overflow.
+         tl_assert(oldcount < vc->vc[i].count);
+         return;
+      }
+   }
 
-  /*
-   * The specified thread ID does not yet exist in the vector clock
-   * -- insert it.
-   */
-  {
-    const VCElem vcelem = { tid, 1 };
-    VectorClock vc2;
-    DRD_(vc_init)(&vc2, &vcelem, 1);
-    DRD_(vc_combine)(vc, &vc2);
-    DRD_(vc_cleanup)(&vc2);
-  }
+   /*
+    * The specified thread ID does not yet exist in the vector clock
+    * -- insert it.
+    */
+   {
+      const VCElem vcelem = { tid, 1 };
+      VectorClock vc2;
+      DRD_(vc_init)(&vc2, &vcelem, 1);
+      DRD_(vc_combine)(vc, &vc2);
+      DRD_(vc_cleanup)(&vc2);
+   }
 }
 
 /**
@@ -115,46 +116,46 @@
 Bool DRD_(vc_ordered)(const VectorClock* const vc1,
                       const VectorClock* const vc2)
 {
-  return DRD_(vc_lte)(vc1, vc2) || DRD_(vc_lte)(vc2, vc1);
+   return DRD_(vc_lte)(vc1, vc2) || DRD_(vc_lte)(vc2, vc1);
 }
 
 /** Compute elementwise minimum. */
 void DRD_(vc_min)(VectorClock* const result, const VectorClock* const rhs)
 {
-  unsigned i;
-  unsigned j;
+   unsigned i;
+   unsigned j;
 
-  tl_assert(result);
-  tl_assert(rhs);
+   tl_assert(result);
+   tl_assert(rhs);
 
-  DRD_(vc_check)(result);
+   DRD_(vc_check)(result);
 
-  /* Next, combine both vector clocks into one. */
-  i = 0;
-  for (j = 0; j < rhs->size; j++)
-  {
-    while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
-    {
-      /* Thread ID is missing in second vector clock. Clear the count. */
-      result->vc[i].count = 0;
-      i++;
-    }
-    if (i >= result->size)
-    {
-      break;
-    }
-    if (result->vc[i].threadid <= rhs->vc[j].threadid)
-    {
-      /* The thread ID is present in both vector clocks. Compute the minimum */
-      /* of vc[i].count and vc[j].count. */
-      tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
-      if (rhs->vc[j].count < result->vc[i].count)
+   /* Next, combine both vector clocks into one. */
+   i = 0;
+   for (j = 0; j < rhs->size; j++)
+   {
+      while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
       {
-        result->vc[i].count = rhs->vc[j].count;
+         /* Thread ID is missing in second vector clock. Clear the count. */
+         result->vc[i].count = 0;
+         i++;
       }
-    }
-  }
-  DRD_(vc_check)(result);
+      if (i >= result->size)
+      {
+         break;
+      }
+      if (result->vc[i].threadid <= rhs->vc[j].threadid)
+      {
+         /* The thread ID is present in both vector clocks. Compute the */
+         /* minimum of vc[i].count and vc[j].count. */
+         tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
+         if (rhs->vc[j].count < result->vc[i].count)
+         {
+            result->vc[i].count = rhs->vc[j].count;
+         }
+      }
+   }
+   DRD_(vc_check)(result);
 }
 
 /**
@@ -162,7 +163,7 @@
  */
 void DRD_(vc_combine)(VectorClock* const result, const VectorClock* const rhs)
 {
-  DRD_(vc_combine2)(result, rhs, -1);
+   DRD_(vc_combine2)(result, rhs, -1);
 }
 
 /**
@@ -175,111 +176,111 @@
                        const VectorClock* const rhs,
                        const DrdThreadId tid)
 {
-  unsigned i;
-  unsigned j;
-  unsigned shared;
-  unsigned new_size;
-  Bool     almost_equal = True;
+   unsigned i;
+   unsigned j;
+   unsigned shared;
+   unsigned new_size;
+   Bool     almost_equal = True;
 
-  tl_assert(result);
-  tl_assert(rhs);
+   tl_assert(result);
+   tl_assert(rhs);
 
-  // First count the number of shared thread id's.
-  j = 0;
-  shared = 0;
-  for (i = 0; i < result->size; i++)
-  {
-    while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
-      j++;
-    if (j >= rhs->size)
-      break;
-    if (result->vc[i].threadid == rhs->vc[j].threadid)
-      shared++;
-  }
+   // First count the number of shared thread id's.
+   j = 0;
+   shared = 0;
+   for (i = 0; i < result->size; i++)
+   {
+      while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
+         j++;
+      if (j >= rhs->size)
+         break;
+      if (result->vc[i].threadid == rhs->vc[j].threadid)
+         shared++;
+   }
 
-  DRD_(vc_check)(result);
+   DRD_(vc_check)(result);
 
-  new_size = result->size + rhs->size - shared;
-  if (new_size > result->capacity)
-    DRD_(vc_reserve)(result, new_size);
+   new_size = result->size + rhs->size - shared;
+   if (new_size > result->capacity)
+      DRD_(vc_reserve)(result, new_size);
 
-  DRD_(vc_check)(result);
+   DRD_(vc_check)(result);
 
-  // Next, combine both vector clocks into one.
-  i = 0;
-  for (j = 0; j < rhs->size; j++)
-  {
-    /* First of all, skip those clocks in result->vc[] for which there */
-    /* is no corresponding clock in rhs->vc[].                         */
-    while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
-    {
-      if (result->vc[i].threadid != tid)
+   // Next, combine both vector clocks into one.
+   i = 0;
+   for (j = 0; j < rhs->size; j++)
+   {
+      /* First of all, skip those clocks in result->vc[] for which there */
+      /* is no corresponding clock in rhs->vc[].                         */
+      while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
       {
-        almost_equal = False;
+         if (result->vc[i].threadid != tid)
+         {
+            almost_equal = False;
+         }
+         i++;
       }
-      i++;
-    }
-    /* If the end of *result is met, append rhs->vc[j] to *result. */
-    if (i >= result->size)
-    {
-      result->size++;
-      result->vc[i] = rhs->vc[j];
-      if (result->vc[i].threadid != tid)
+      /* If the end of *result is met, append rhs->vc[j] to *result. */
+      if (i >= result->size)
       {
-        almost_equal = False;
+         result->size++;
+         result->vc[i] = rhs->vc[j];
+         if (result->vc[i].threadid != tid)
+         {
+            almost_equal = False;
+         }
       }
-    }
-    /* If clock rhs->vc[j] is not in *result, insert it. */
-    else if (result->vc[i].threadid > rhs->vc[j].threadid)
-    {
-      unsigned k;
-      for (k = result->size; k > i; k--)
+      /* If clock rhs->vc[j] is not in *result, insert it. */
+      else if (result->vc[i].threadid > rhs->vc[j].threadid)
       {
-        result->vc[k] = result->vc[k - 1];
+         unsigned k;
+         for (k = result->size; k > i; k--)
+         {
+            result->vc[k] = result->vc[k - 1];
+         }
+         result->size++;
+         result->vc[i] = rhs->vc[j];
+         if (result->vc[i].threadid != tid)
+         {
+            almost_equal = False;
+         }
       }
-      result->size++;
-      result->vc[i] = rhs->vc[j];
-      if (result->vc[i].threadid != tid)
+      /* Otherwise, both *result and *rhs have a clock for thread            */
+      /* result->vc[i].threadid == rhs->vc[j].threadid. Compute the maximum. */
+      else
       {
-        almost_equal = False;
+         tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
+         if (result->vc[i].threadid != tid
+             && rhs->vc[j].count != result->vc[i].count)
+         {
+            almost_equal = False;
+         }
+         if (rhs->vc[j].count > result->vc[i].count)
+         {
+            result->vc[i].count = rhs->vc[j].count;
+         }
       }
-    }
-    /* Otherwise, both *result and *rhs have a clock for thread            */
-    /* result->vc[i].threadid == rhs->vc[j].threadid. Compute the maximum. */
-    else
-    {
-      tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
-      if (result->vc[i].threadid != tid
-          && rhs->vc[j].count != result->vc[i].count)
-      {
-        almost_equal = False;
-      }
-      if (rhs->vc[j].count > result->vc[i].count)
-      {
-        result->vc[i].count = rhs->vc[j].count;
-      }
-    }
-  }
-  DRD_(vc_check)(result);
-  tl_assert(result->size == new_size);
+   }
+   DRD_(vc_check)(result);
+   tl_assert(result->size == new_size);
 
-  return almost_equal;
+   return almost_equal;
 }
 
 /** Print the contents of vector clock 'vc'. */
 void DRD_(vc_print)(const VectorClock* const vc)
 {
-  unsigned i;
+   unsigned i;
 
-  tl_assert(vc);
-  VG_(printf)("[");
-  for (i = 0; i < vc->size; i++)
-  {
-    tl_assert(vc->vc);
-    VG_(printf)("%s %d: %d", i > 0 ? "," : "",
-                vc->vc[i].threadid, vc->vc[i].count);
-  }
-  VG_(printf)(" ]");
+   tl_assert(vc);
+   VG_(printf)("[");
+   for (i = 0; i < vc->size; i++)
+   {
+      tl_assert(vc->vc);
+      VG_(printf)("%s %d: %d", i > 0 ? "," : "",
+                  vc->vc[i].threadid, vc->vc[i].count);
+   }
+   VG_(printf)(" ]");
 }
 
 /**
@@ -289,23 +290,23 @@
 void DRD_(vc_snprint)(Char* const str, const Int size,
                       const VectorClock* const vc)
 {
-  unsigned i;
-  unsigned j = 1;
+   unsigned i;
+   unsigned j = 1;
 
-  tl_assert(vc);
-  VG_(snprintf)(str, size, "[");
-  for (i = 0; i < vc->size; i++)
-  {
-    tl_assert(vc->vc);
-    for ( ; j <= vc->vc[i].threadid; j++)
-    {
-      VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str),
-                    "%s %d",
-                    i > 0 ? "," : "",
-                    (j == vc->vc[i].threadid) ? vc->vc[i].count : 0);
-    }
-  }
-  VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str), " ]");
+   tl_assert(vc);
+   VG_(snprintf)(str, size, "[");
+   for (i = 0; i < vc->size; i++)
+   {
+      tl_assert(vc->vc);
+      for ( ; j <= vc->vc[i].threadid; j++)
+      {
+         VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str),
+                       "%s %d",
+                       i > 0 ? "," : "",
+                       (j == vc->vc[i].threadid) ? vc->vc[i].count : 0);
+      }
+   }
+   VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str), " ]");
 }
 
 /**
@@ -320,12 +321,12 @@
  */
 void DRD_(vc_check)(const VectorClock* const vc)
 {
-  unsigned i;
-  tl_assert(vc->size <= vc->capacity);
-  for (i = 1; i < vc->size; i++)
-  {
-    tl_assert(vc->vc[i-1].threadid < vc->vc[i].threadid);
-  }
+   unsigned i;
+   tl_assert(vc->size <= vc->capacity);
+   for (i = 1; i < vc->size; i++)
+   {
+      tl_assert(vc->vc[i-1].threadid < vc->vc[i].threadid);
+   }
 }
 
 /**
@@ -336,26 +337,26 @@
 static
 void DRD_(vc_reserve)(VectorClock* const vc, const unsigned new_capacity)
 {
-  tl_assert(vc);
-  if (new_capacity > vc->capacity)
-  {
-    if (vc->vc)
-    {
-      vc->vc = VG_(realloc)("drd.vc.vr.1",
-                            vc->vc, new_capacity * sizeof(vc->vc[0]));
-    }
-    else if (new_capacity > 0)
-    {
-      vc->vc = VG_(malloc)("drd.vc.vr.2",
-                           new_capacity * sizeof(vc->vc[0]));
-    }
-    else
-    {
-      tl_assert(vc->vc == 0 && new_capacity == 0);
-    }
-    vc->capacity = new_capacity;
-  }
-  tl_assert(new_capacity == 0 || vc->vc != 0);
+   tl_assert(vc);
+   if (new_capacity > vc->capacity)
+   {
+      if (vc->vc)
+      {
+         vc->vc = VG_(realloc)("drd.vc.vr.1",
+                               vc->vc, new_capacity * sizeof(vc->vc[0]));
+      }
+      else if (new_capacity > 0)
+      {
+         vc->vc = VG_(malloc)("drd.vc.vr.2",
+                              new_capacity * sizeof(vc->vc[0]));
+      }
+      else
+      {
+         tl_assert(vc->vc == 0 && new_capacity == 0);
+      }
+      vc->capacity = new_capacity;
+   }
+   tl_assert(new_capacity == 0 || vc->vc != 0);
 }
 
 #if 0
@@ -364,40 +365,42 @@
  */
 void DRD_(vc_test)(void)
 {
-  VectorClock vc1;
-  VCElem vc1elem[] = { { 3, 7 }, { 5, 8 }, };
-  VectorClock vc2;
-  VCElem vc2elem[] = { { 1, 4 }, { 3, 9 }, };
-  VectorClock vc3;
-  VCElem vc4elem[] = { { 1, 3 }, { 2, 1 }, };
-  VectorClock vc4;
-  VCElem vc5elem[] = { { 1, 4 }, };
-  VectorClock vc5;
+   VectorClock vc1;
+   VCElem vc1elem[] = { { 3, 7 }, { 5, 8 }, };
+   VectorClock vc2;
+   VCElem vc2elem[] = { { 1, 4 }, { 3, 9 }, };
+   VectorClock vc3;
+   VCElem vc4elem[] = { { 1, 3 }, { 2, 1 }, };
+   VectorClock vc4;
+   VCElem vc5elem[] = { { 1, 4 }, };
+   VectorClock vc5;
 
-  vc_init(&vc1, vc1elem, sizeof(vc1elem)/sizeof(vc1elem[0]));
-  vc_init(&vc2, vc2elem, sizeof(vc2elem)/sizeof(vc2elem[0]));
-  vc_init(&vc3, 0, 0);
-  vc_init(&vc4, vc4elem, sizeof(vc4elem)/sizeof(vc4elem[0]));
-  vc_init(&vc5, vc5elem, sizeof(vc5elem)/sizeof(vc5elem[0]));
+   vc_init(&vc1, vc1elem, sizeof(vc1elem)/sizeof(vc1elem[0]));
+   vc_init(&vc2, vc2elem, sizeof(vc2elem)/sizeof(vc2elem[0]));
+   vc_init(&vc3, 0, 0);
+   vc_init(&vc4, vc4elem, sizeof(vc4elem)/sizeof(vc4elem[0]));
+   vc_init(&vc5, vc5elem, sizeof(vc5elem)/sizeof(vc5elem[0]));
 
-  vc_combine(&vc3, &vc1);
-  vc_combine(&vc3, &vc2);
+   vc_combine(&vc3, &vc1);
+   vc_combine(&vc3, &vc2);
 
-  VG_(printf)("vc1: ");
-  vc_print(&vc1);
-  VG_(printf)("\nvc2: ");
-  vc_print(&vc2);
-  VG_(printf)("\nvc3: ");
-  vc_print(&vc3);
-  VG_(printf)("\n");
-  VG_(printf)("vc_lte(vc1, vc2) = %d, vc_lte(vc1, vc3) = %d, vc_lte(vc2, vc3) = %d, vc_lte(", vc_lte(&vc1, &vc2), vc_lte(&vc1, &vc3), vc_lte(&vc2, &vc3));
-  vc_print(&vc4);
-  VG_(printf)(", ");
-  vc_print(&vc5);
-  VG_(printf)(") = %d sw %d\n", vc_lte(&vc4, &vc5), vc_lte(&vc5, &vc4));
+   VG_(printf)("vc1: ");
+   vc_print(&vc1);
+   VG_(printf)("\nvc2: ");
+   vc_print(&vc2);
+   VG_(printf)("\nvc3: ");
+   vc_print(&vc3);
+   VG_(printf)("\n");
+   VG_(printf)("vc_lte(vc1, vc2) = %d, vc_lte(vc1, vc3) = %d,"
+               " vc_lte(vc2, vc3) = %d, vc_lte(",
+               vc_lte(&vc1, &vc2), vc_lte(&vc1, &vc3), vc_lte(&vc2, &vc3));
+   vc_print(&vc4);
+   VG_(printf)(", ");
+   vc_print(&vc5);
+   VG_(printf)(") = %d sw %d\n", vc_lte(&vc4, &vc5), vc_lte(&vc5, &vc4));
               
-  vc_cleanup(&vc1);
-  vc_cleanup(&vc2);
-  vc_cleanup(&vc3);
+   vc_cleanup(&vc1);
+   vc_cleanup(&vc2);
+   vc_cleanup(&vc3);
 }
 #endif
diff --git a/drd/drd_vc.h b/drd/drd_vc.h
index a97c531..c7f1fb1 100644
--- a/drd/drd_vc.h
+++ b/drd/drd_vc.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -52,15 +53,15 @@
 /** Vector clock element. */
 typedef struct
 {
-  DrdThreadId threadid;
-  UInt        count;
+   DrdThreadId threadid;
+   UInt        count;
 } VCElem;
 
 typedef struct
 {
-  unsigned capacity; /**< number of elements allocated for array vc. */
-  unsigned size;     /**< number of elements used of array vc. */
-  VCElem*  vc;       /**< vector clock elements. */
+   unsigned capacity; /**< number of elements allocated for array vc. */
+   unsigned size;     /**< number of elements used of array vc. */
+   VCElem*  vc;       /**< vector clock elements. */
 } VectorClock;
 
 
@@ -99,26 +100,26 @@
 static __inline__
 Bool DRD_(vc_lte)(const VectorClock* const vc1, const VectorClock* const vc2)
 {
-  unsigned i;
-  unsigned j = 0;
+   unsigned i;
+   unsigned j = 0;
 
-  for (i = 0; i < vc1->size; i++)
-  {
-    while (j < vc2->size && vc2->vc[j].threadid < vc1->vc[i].threadid)
-    {
-      j++;
-    }
-    if (j >= vc2->size || vc2->vc[j].threadid > vc1->vc[i].threadid)
-      return False;
+   for (i = 0; i < vc1->size; i++)
+   {
+      while (j < vc2->size && vc2->vc[j].threadid < vc1->vc[i].threadid)
+      {
+         j++;
+      }
+      if (j >= vc2->size || vc2->vc[j].threadid > vc1->vc[i].threadid)
+         return False;
 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
-    /* This assert statement has been commented out because of performance */
-    /* reasons.*/
-    tl_assert(j < vc2->size && vc2->vc[j].threadid == vc1->vc[i].threadid);
+      /* This assert statement has been commented out because of performance */
+      /* reasons.*/
+      tl_assert(j < vc2->size && vc2->vc[j].threadid == vc1->vc[i].threadid);
 #endif
-    if (vc1->vc[i].count > vc2->vc[j].count)
-      return False;
-  }
-  return True;
+      if (vc1->vc[i].count > vc2->vc[j].count)
+         return False;
+   }
+   return True;
 }
 
 
diff --git a/drd/pub_drd_bitmap.h b/drd/pub_drd_bitmap.h
index c0d9323..e6f2304 100644
--- a/drd/pub_drd_bitmap.h
+++ b/drd/pub_drd_bitmap.h
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
 /*
   This file is part of drd, a thread error detector.
 
@@ -42,8 +43,8 @@
 #define LHS_W (1<<1)
 #define RHS_R (1<<2)
 #define RHS_W (1<<3)
-#define HAS_RACE(a) ((((a) & RHS_W) && ((a) & (LHS_R | LHS_W))) \
-                  || (((a) & LHS_W) && ((a) & (RHS_R | RHS_W))))
+#define HAS_RACE(a) ((((a) & RHS_W) && ((a) & (LHS_R | LHS_W)))         \
+                     || (((a) & LHS_W) && ((a) & (RHS_R | RHS_W))))
 
 
 /* Forward declarations. */