Declared more inline functions static, such that the compiler can omit the out-of-line code.

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@8201 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/exp-drd/drd_bitmap.c b/exp-drd/drd_bitmap.c
index 6671f00..301d0e9 100644
--- a/exp-drd/drd_bitmap.c
+++ b/exp-drd/drd_bitmap.c
@@ -106,7 +106,6 @@
  * Record an access of type access_type at addresses a .. a + size - 1 in
  * bitmap bm.
  */
-static
 void bm_access_range(struct bitmap* const bm,
                      const Addr a1, const Addr a2,
                      const BmAccessTypeT access_type)
@@ -171,26 +170,6 @@
   }
 }
 
-static inline
-void bm_access_aligned_load(struct bitmap* const bm,
-                            const Addr a1, const SizeT size)
-{
-  struct bitmap2* bm2;
-
-  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
-  bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size);
-}
-
-static inline
-void bm_access_aligned_store(struct bitmap* const bm,
-                             const Addr a1, const SizeT size)
-{
-  struct bitmap2* bm2;
-
-  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
-  bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size);
-}
-
 void bm_access_range_load(struct bitmap* const bm,
                           const Addr a1, const Addr a2)
 {
@@ -644,36 +623,6 @@
   return False;
 }
 
-static inline
-Bool bm_aligned_load_has_conflict_with(const struct bitmap* const bm,
-                                       const Addr a1, const SizeT size)
-{
-  const struct bitmap2* bm2;
-
-  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
-
-  return (bm2 && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size));
-}
-
-static inline
-Bool bm_aligned_store_has_conflict_with(const struct bitmap* const bm,
-                                        const Addr a1, const SizeT size)
-{
-  const struct bitmap2* bm2;
-
-  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
-
-  if (bm2)
-  {
-    if (bm0_is_any_set(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size)
-        | bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size))
-    {
-      return True;
-    }
-  }
-  return False;
-}
-
 Bool bm_load_has_conflict_with(const struct bitmap* const bm,
                                const Addr a1, const Addr a2)
 {
diff --git a/exp-drd/drd_bitmap.h b/exp-drd/drd_bitmap.h
index a860cf4..4b8afa1 100644
--- a/exp-drd/drd_bitmap.h
+++ b/exp-drd/drd_bitmap.h
@@ -28,6 +28,7 @@
 
 
 #include "pub_tool_oset.h"
+#include "pub_tool_libcbase.h"
 
 
 /*
@@ -485,5 +486,54 @@
   return bm2;
 }
 
+static __inline__
+void bm_access_aligned_load(struct bitmap* const bm,
+                            const Addr a1, const SizeT size)
+{
+  struct bitmap2* bm2;
+
+  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
+  bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size);
+}
+
+static __inline__
+void bm_access_aligned_store(struct bitmap* const bm,
+                             const Addr a1, const SizeT size)
+{
+  struct bitmap2* bm2;
+
+  bm2 = bm2_lookup_or_insert_exclusive(bm, a1 >> ADDR0_BITS);
+  bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size);
+}
+
+static __inline__
+Bool bm_aligned_load_has_conflict_with(const struct bitmap* const bm,
+                                       const Addr a1, const SizeT size)
+{
+  const struct bitmap2* bm2;
+
+  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
+
+  return (bm2 && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size));
+}
+
+static __inline__
+Bool bm_aligned_store_has_conflict_with(const struct bitmap* const bm,
+                                        const Addr a1, const SizeT size)
+{
+  const struct bitmap2* bm2;
+
+  bm2 = bm2_lookup(bm, a1 >> ADDR0_BITS);
+
+  if (bm2)
+  {
+    if (bm0_is_any_set(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size)
+        | bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size))
+    {
+      return True;
+    }
+  }
+  return False;
+}
 
 #endif /* __DRD_BITMAP_H */
diff --git a/exp-drd/drd_main.c b/exp-drd/drd_main.c
index d6f21dc..3261ed2 100644
--- a/exp-drd/drd_main.c
+++ b/exp-drd/drd_main.c
@@ -35,6 +35,7 @@
 #include "drd_semaphore.h"
 #include "drd_suppression.h"
 #include "drd_thread.h"
+#include "drd_thread_bitmap.h"
 #include "drd_track.h"
 #include "drd_vc.h"
 #include "priv_drd_clientreq.h"
diff --git a/exp-drd/drd_thread.c b/exp-drd/drd_thread.c
index 6c7cb68..f07636d 100644
--- a/exp-drd/drd_thread.c
+++ b/exp-drd/drd_thread.c
@@ -733,136 +733,6 @@
   s_danger_set_combine_vc_count++;
 }
 
-__inline__
-Bool bm_access_load_1_triggers_conflict(const Addr a1)
-{
-  bm_access_load_1(running_thread_get_segment()->bm, a1);
-  return bm_load_1_has_conflict_with(thread_get_danger_set(), a1);
-}
-
-__inline__
-Bool bm_access_load_2_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 1) == 0)
-  {
-    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 2);
-    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 2);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eLoad);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eLoad);
-  }
-}
-
-__inline__
-Bool bm_access_load_4_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 4);
-    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 4);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eLoad);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eLoad);
-  }
-}
-
-__inline__
-Bool bm_access_load_8_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 7) == 0)
-  {
-    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 8);
-    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 8);
-  }
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 0, 4);
-    bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 4, 4);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eLoad);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eLoad);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eLoad);
-  }
-}
-
-__inline__
-Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
-{
-  bm_access_range_load(running_thread_get_segment()->bm, a1, a2);
-  return bm_load_has_conflict_with(thread_get_danger_set(), a1, a2);
-}
-
-__inline__
-Bool bm_access_store_1_triggers_conflict(const Addr a1)
-{
-  bm_access_store_1(running_thread_get_segment()->bm, a1);
-  return bm_store_1_has_conflict_with(thread_get_danger_set(), a1);
-}
-
-__inline__
-Bool bm_access_store_2_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 1) == 0)
-  {
-    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 2);
-    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 2);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eStore);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eStore);
-  }
-}
-
-__inline__
-Bool bm_access_store_4_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 4);
-    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 4);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eStore);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eStore);
-  }
-}
-
-__inline__
-Bool bm_access_store_8_triggers_conflict(const Addr a1)
-{
-  if ((a1 & 7) == 0)
-  {
-    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 8);
-    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 8);
-  }
-  else if ((a1 & 3) == 0)
-  {
-    bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 0, 4);
-    bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 4, 4);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eStore);
-  }
-  else
-  {
-    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eStore);
-    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eStore);
-  }
-}
-
-__inline__
-Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
-{
-  bm_access_range_store(running_thread_get_segment()->bm, a1, a2);
-  return bm_store_has_conflict_with(thread_get_danger_set(), a1, a2);
-}
-
 /** Call this function whenever a thread is no longer using the memory
  *  [ a1, a2 [, e.g. because of a call to free() or a stack pointer
  *  increase.
diff --git a/exp-drd/drd_thread.h b/exp-drd/drd_thread.h
index ddbd086..d122978 100644
--- a/exp-drd/drd_thread.h
+++ b/exp-drd/drd_thread.h
@@ -30,6 +30,7 @@
 // Includes.
 
 #include "drd_segment.h"
+#include "pub_drd_bitmap.h"
 #include "pub_tool_libcassert.h"  // tl_assert()
 #include "pub_tool_stacktrace.h"  // StackTrace
 #include "pub_tool_threadstate.h" // VG_N_THREADS
@@ -122,17 +123,6 @@
 void thread_combine_vc(const DrdThreadId joiner, const DrdThreadId joinee);
 void thread_combine_vc2(const DrdThreadId tid, const VectorClock* const vc);
 
-Bool bm_access_load_1_triggers_conflict(const Addr a1);
-Bool bm_access_load_2_triggers_conflict(const Addr a1);
-Bool bm_access_load_4_triggers_conflict(const Addr a1);
-Bool bm_access_load_8_triggers_conflict(const Addr a1);
-Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2);
-Bool bm_access_store_1_triggers_conflict(const Addr a1);
-Bool bm_access_store_2_triggers_conflict(const Addr a1);
-Bool bm_access_store_4_triggers_conflict(const Addr a1);
-Bool bm_access_store_8_triggers_conflict(const Addr a1);
-Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2);
-
 void thread_stop_using_mem(const Addr a1, const Addr a2);
 void thread_start_recording(const DrdThreadId tid);
 void thread_stop_recording(const DrdThreadId tid);
diff --git a/exp-drd/drd_thread_bitmap.h b/exp-drd/drd_thread_bitmap.h
new file mode 100644
index 0000000..e310398
--- /dev/null
+++ b/exp-drd/drd_thread_bitmap.h
@@ -0,0 +1,161 @@
+/*
+  This file is part of drd, a data race detector.
+
+  Copyright (C) 2006-2008 Bart Van Assche
+  bart.vanassche@gmail.com
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+
+#ifndef __DRD_THREAD_BITMAP_H
+#define __DRD_THREAD_BITMAP_H
+
+#include "drd_bitmap.h"
+
+static __inline__
+Bool bm_access_load_1_triggers_conflict(const Addr a1)
+{
+  bm_access_load_1(running_thread_get_segment()->bm, a1);
+  return bm_load_1_has_conflict_with(thread_get_danger_set(), a1);
+}
+
+static __inline__
+Bool bm_access_load_2_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 1) == 0)
+  {
+    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 2);
+    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 2);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eLoad);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eLoad);
+  }
+}
+
+static __inline__
+Bool bm_access_load_4_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 4);
+    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 4);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eLoad);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eLoad);
+  }
+}
+
+static __inline__
+Bool bm_access_load_8_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 7) == 0)
+  {
+    bm_access_aligned_load(running_thread_get_segment()->bm, a1, 8);
+    return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 8);
+  }
+  else if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 0, 4);
+    bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 4, 4);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eLoad);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eLoad);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eLoad);
+  }
+}
+
+static __inline__
+Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
+{
+  bm_access_range_load(running_thread_get_segment()->bm, a1, a2);
+  return bm_load_has_conflict_with(thread_get_danger_set(), a1, a2);
+}
+
+static __inline__
+Bool bm_access_store_1_triggers_conflict(const Addr a1)
+{
+  bm_access_store_1(running_thread_get_segment()->bm, a1);
+  return bm_store_1_has_conflict_with(thread_get_danger_set(), a1);
+}
+
+static __inline__
+Bool bm_access_store_2_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 1) == 0)
+  {
+    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 2);
+    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 2);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eStore);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eStore);
+  }
+}
+
+static __inline__
+Bool bm_access_store_4_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 4);
+    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 4);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eStore);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eStore);
+  }
+}
+
+static __inline__
+Bool bm_access_store_8_triggers_conflict(const Addr a1)
+{
+  if ((a1 & 7) == 0)
+  {
+    bm_access_aligned_store(running_thread_get_segment()->bm, a1, 8);
+    return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 8);
+  }
+  else if ((a1 & 3) == 0)
+  {
+    bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 0, 4);
+    bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 4, 4);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eStore);
+  }
+  else
+  {
+    bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eStore);
+    return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eStore);
+  }
+}
+
+static __inline__
+Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
+{
+  bm_access_range_store(running_thread_get_segment()->bm, a1, a2);
+  return bm_store_has_conflict_with(thread_get_danger_set(), a1, a2);
+}
+
+#endif // __DRD_THREAD_BITMAP_H
diff --git a/exp-drd/pub_drd_bitmap.h b/exp-drd/pub_drd_bitmap.h
index baa9934..981e74e 100644
--- a/exp-drd/pub_drd_bitmap.h
+++ b/exp-drd/pub_drd_bitmap.h
@@ -56,6 +56,9 @@
 // Function declarations.
 struct bitmap* bm_new(void);
 void bm_delete(struct bitmap* const bm);
+void bm_access_range(struct bitmap* const bm,
+                     const Addr a1, const Addr a2,
+                     const BmAccessTypeT access_type);
 void bm_access_range_load(struct bitmap* const bm,
                           const Addr a1, const Addr a2);
 void bm_access_load_1(struct bitmap* const bm, const Addr a1);