Wrapped DRD_() macro around even more function names.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@9170 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/drd/drd_clientobj.c b/drd/drd_clientobj.c
index 6506182..2281909 100644
--- a/drd/drd_clientobj.c
+++ b/drd/drd_clientobj.c
@@ -38,24 +38,26 @@
/* Local variables. */
-static OSet* s_clientobj;
-static Bool s_trace_clientobj;
+static OSet* DRD_(s_clientobj_set);
+static Bool DRD_(s_trace_clientobj);
/* Function definitions. */
void DRD_(clientobj_set_trace)(const Bool trace)
{
- s_trace_clientobj = trace;
+ DRD_(s_trace_clientobj) = trace;
}
/** Initialize the client object set. */
void DRD_(clientobj_init)(void)
{
- tl_assert(s_clientobj == 0);
- s_clientobj = VG_(OSetGen_Create)(0, 0, VG_(malloc), "drd.clientobj.ci.1",
- VG_(free));
- tl_assert(s_clientobj);
+ tl_assert(DRD_(s_clientobj_set) == 0);
+ DRD_(s_clientobj_set) = VG_(OSetGen_Create)(0, 0,
+ VG_(malloc),
+ "drd.clientobj.ci.1",
+ VG_(free));
+ tl_assert(DRD_(s_clientobj_set));
}
/**
@@ -65,10 +67,10 @@
*/
void DRD_(clientobj_cleanup)(void)
{
- tl_assert(s_clientobj);
- tl_assert(VG_(OSetGen_Size)(s_clientobj) == 0);
- VG_(OSetGen_Destroy)(s_clientobj);
- s_clientobj = 0;
+ tl_assert(DRD_(s_clientobj_set));
+ tl_assert(VG_(OSetGen_Size)(DRD_(s_clientobj_set)) == 0);
+ VG_(OSetGen_Destroy)(DRD_(s_clientobj_set));
+ DRD_(s_clientobj_set) = 0;
}
/** Return the data associated with the client object at client address addr.
@@ -77,7 +79,7 @@
*/
DrdClientobj* DRD_(clientobj_get_any)(const Addr addr)
{
- return VG_(OSetGen_Lookup)(s_clientobj, &addr);
+ return VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &addr);
}
/** Return the data associated with the client object at client address addr
@@ -87,7 +89,7 @@
DrdClientobj* DRD_(clientobj_get)(const Addr addr, const ObjType t)
{
DrdClientobj* p;
- p = VG_(OSetGen_Lookup)(s_clientobj, &addr);
+ p = VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &addr);
if (p && p->any.type == t)
return p;
return 0;
@@ -101,8 +103,8 @@
DrdClientobj *p;
tl_assert(a1 < a2);
- VG_(OSetGen_ResetIter)(s_clientobj);
- for ( ; (p = VG_(OSetGen_Next)(s_clientobj)) != 0; )
+ VG_(OSetGen_ResetIter)(DRD_(s_clientobj_set));
+ for ( ; (p = VG_(OSetGen_Next)(DRD_(s_clientobj_set))) != 0; )
{
if (a1 <= p->any.a1 && p->any.a1 < a2)
{
@@ -121,20 +123,20 @@
DrdClientobj* p;
tl_assert(! DRD_(clientobj_present)(a1, a1 + 1));
- tl_assert(VG_(OSetGen_Lookup)(s_clientobj, &a1) == 0);
+ tl_assert(VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &a1) == 0);
- if (s_trace_clientobj)
+ if (DRD_(s_trace_clientobj))
{
VG_(message)(Vg_UserMsg, "Adding client object 0x%lx of type %d", a1, t);
}
- p = VG_(OSetGen_AllocNode)(s_clientobj, sizeof(*p));
+ p = VG_(OSetGen_AllocNode)(DRD_(s_clientobj_set), sizeof(*p));
VG_(memset)(p, 0, sizeof(*p));
p->any.a1 = a1;
p->any.type = t;
p->any.first_observed_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
- VG_(OSetGen_Insert)(s_clientobj, p);
- tl_assert(VG_(OSetGen_Lookup)(s_clientobj, &a1) == p);
+ VG_(OSetGen_Insert)(DRD_(s_clientobj_set), p);
+ tl_assert(VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &a1) == p);
DRD_(start_suppression)(a1, a1 + 1, "clientobj");
return p;
}
@@ -143,7 +145,7 @@
{
DrdClientobj* p;
- if (s_trace_clientobj)
+ if (DRD_(s_trace_clientobj))
{
VG_(message)(Vg_UserMsg, "Removing client object 0x%lx of type %d",
addr, t);
@@ -153,15 +155,15 @@
#endif
}
- p = VG_(OSetGen_Lookup)(s_clientobj, &addr);
+ p = VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &addr);
tl_assert(p->any.type == t);
- p = VG_(OSetGen_Remove)(s_clientobj, &addr);
+ p = VG_(OSetGen_Remove)(DRD_(s_clientobj_set), &addr);
if (p)
{
- tl_assert(VG_(OSetGen_Lookup)(s_clientobj, &addr) == 0);
+ tl_assert(VG_(OSetGen_Lookup)(DRD_(s_clientobj_set), &addr) == 0);
tl_assert(p->any.cleanup);
(*p->any.cleanup)(p);
- VG_(OSetGen_FreeNode)(s_clientobj, p);
+ VG_(OSetGen_FreeNode)(DRD_(s_clientobj_set), p);
return True;
}
return False;
@@ -172,13 +174,13 @@
Addr removed_at;
DrdClientobj* p;
- tl_assert(s_clientobj);
+ tl_assert(DRD_(s_clientobj_set));
if (! DRD_(is_any_suppressed)(a1, a2))
return;
- VG_(OSetGen_ResetIter)(s_clientobj);
- p = VG_(OSetGen_Next)(s_clientobj);
+ VG_(OSetGen_ResetIter)(DRD_(s_clientobj_set));
+ p = VG_(OSetGen_Next)(DRD_(s_clientobj_set));
for ( ; p != 0; )
{
if (a1 <= p->any.a1 && p->any.a1 < a2)
@@ -187,27 +189,28 @@
DRD_(clientobj_remove)(p->any.a1, p->any.type);
/* The above call removes an element from the oset and hence */
/* invalidates the iterator. Set the iterator back. */
- VG_(OSetGen_ResetIter)(s_clientobj);
- while ((p = VG_(OSetGen_Next)(s_clientobj)) != 0
+ VG_(OSetGen_ResetIter)(DRD_(s_clientobj_set));
+ while ((p = VG_(OSetGen_Next)(DRD_(s_clientobj_set))) != 0
&& p->any.a1 <= removed_at)
{ }
}
else
{
- p = VG_(OSetGen_Next)(s_clientobj);
+ p = VG_(OSetGen_Next)(DRD_(s_clientobj_set));
}
}
}
void DRD_(clientobj_resetiter)(void)
{
- VG_(OSetGen_ResetIter)(s_clientobj);
+ VG_(OSetGen_ResetIter)(DRD_(s_clientobj_set));
}
DrdClientobj* DRD_(clientobj_next)(const ObjType t)
{
DrdClientobj* p;
- while ((p = VG_(OSetGen_Next)(s_clientobj)) != 0 && p->any.type != t)
+ while ((p = VG_(OSetGen_Next)(DRD_(s_clientobj_set))) != 0
+ && p->any.type != t)
;
return p;
}
diff --git a/drd/drd_clientreq.c b/drd/drd_clientreq.c
index f79973e..8ce1334 100644
--- a/drd/drd_clientreq.c
+++ b/drd/drd_clientreq.c
@@ -162,7 +162,7 @@
case VG_USERREQ__PRE_MUTEX_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- mutex_init(arg[1], arg[2]);
+ DRD_(mutex_init)(arg[1], arg[2]);
break;
case VG_USERREQ__POST_MUTEX_INIT:
@@ -175,22 +175,22 @@
case VG_USERREQ__POST_MUTEX_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- mutex_post_destroy(arg[1]);
+ DRD_(mutex_post_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_MUTEX_LOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- mutex_pre_lock(arg[1], arg[2], arg[3]);
+ DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
break;
case VG_USERREQ__POST_MUTEX_LOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- mutex_post_lock(arg[1], arg[2], False/*post_cond_wait*/);
+ DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
break;
case VG_USERREQ__PRE_MUTEX_UNLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- mutex_unlock(arg[1], arg[2]);
+ DRD_(mutex_unlock)(arg[1], arg[2]);
break;
case VG_USERREQ__POST_MUTEX_UNLOCK:
@@ -208,7 +208,7 @@
case VG_USERREQ__PRE_COND_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- cond_pre_init(arg[1]);
+ DRD_(cond_pre_init)(arg[1]);
break;
case VG_USERREQ__POST_COND_INIT:
@@ -221,7 +221,7 @@
case VG_USERREQ__POST_COND_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- cond_post_destroy(arg[1]);
+ DRD_(cond_post_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_COND_WAIT:
@@ -230,8 +230,8 @@
const Addr cond = arg[1];
const Addr mutex = arg[2];
const MutexT mutex_type = arg[3];
- mutex_unlock(mutex, mutex_type);
- cond_pre_wait(cond, mutex);
+ DRD_(mutex_unlock)(mutex, mutex_type);
+ DRD_(cond_pre_wait)(cond, mutex);
}
break;
@@ -241,14 +241,14 @@
const Addr cond = arg[1];
const Addr mutex = arg[2];
const Bool took_lock = arg[3];
- cond_post_wait(cond);
- mutex_post_lock(mutex, took_lock, True);
+ DRD_(cond_post_wait)(cond);
+ DRD_(mutex_post_lock)(mutex, took_lock, True);
}
break;
case VG_USERREQ__PRE_COND_SIGNAL:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- cond_pre_signal(arg[1]);
+ DRD_(cond_pre_signal)(arg[1]);
break;
case VG_USERREQ__POST_COND_SIGNAL:
@@ -257,7 +257,7 @@
case VG_USERREQ__PRE_COND_BROADCAST:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- cond_pre_broadcast(arg[1]);
+ DRD_(cond_pre_broadcast)(arg[1]);
break;
case VG_USERREQ__POST_COND_BROADCAST:
@@ -266,7 +266,7 @@
case VG_USERREQ__PRE_SEM_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- semaphore_init(arg[1], arg[2], arg[3]);
+ DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
break;
case VG_USERREQ__POST_SEM_INIT:
@@ -279,27 +279,27 @@
case VG_USERREQ__POST_SEM_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- semaphore_destroy(arg[1]);
+ DRD_(semaphore_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_SEM_WAIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- semaphore_pre_wait(arg[1]);
+ DRD_(semaphore_pre_wait)(arg[1]);
break;
case VG_USERREQ__POST_SEM_WAIT:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- semaphore_post_wait(drd_tid, arg[1], arg[2]);
+ DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
break;
case VG_USERREQ__PRE_SEM_POST:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- semaphore_pre_post(drd_tid, arg[1]);
+ DRD_(semaphore_pre_post)(drd_tid, arg[1]);
break;
case VG_USERREQ__POST_SEM_POST:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- semaphore_post_post(drd_tid, arg[1], arg[2]);
+ DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
break;
case VG_USERREQ__PRE_BARRIER_INIT:
@@ -331,36 +331,36 @@
break;
case VG_USERREQ__PRE_RWLOCK_INIT:
- rwlock_pre_init(arg[1]);
+ DRD_(rwlock_pre_init)(arg[1]);
break;
case VG_USERREQ__POST_RWLOCK_DESTROY:
- rwlock_post_destroy(arg[1]);
+ DRD_(rwlock_post_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_RWLOCK_RDLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- rwlock_pre_rdlock(arg[1]);
+ DRD_(rwlock_pre_rdlock)(arg[1]);
break;
case VG_USERREQ__POST_RWLOCK_RDLOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- rwlock_post_rdlock(arg[1], arg[2]);
+ DRD_(rwlock_post_rdlock)(arg[1], arg[2]);
break;
case VG_USERREQ__PRE_RWLOCK_WRLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- rwlock_pre_wrlock(arg[1]);
+ DRD_(rwlock_pre_wrlock)(arg[1]);
break;
case VG_USERREQ__POST_RWLOCK_WRLOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
- rwlock_post_wrlock(arg[1], arg[2]);
+ DRD_(rwlock_post_wrlock)(arg[1], arg[2]);
break;
case VG_USERREQ__PRE_RWLOCK_UNLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
- rwlock_pre_unlock(arg[1]);
+ DRD_(rwlock_pre_unlock)(arg[1]);
break;
case VG_USERREQ__POST_RWLOCK_UNLOCK:
diff --git a/drd/drd_cond.c b/drd/drd_cond.c
index acb7ee6..b71f676 100644
--- a/drd/drd_cond.c
+++ b/drd/drd_cond.c
@@ -38,35 +38,35 @@
/* Local functions. */
-static void cond_cleanup(struct cond_info* p);
+static void DRD_(cond_cleanup)(struct cond_info* p);
/* Local variables. */
-static Bool s_drd_report_signal_unlocked = True;
-static Bool s_trace_cond;
+static Bool DRD_(s_report_signal_unlocked) = True;
+static Bool DRD_(s_trace_cond);
/* Function definitions. */
-void cond_set_report_signal_unlocked(const Bool r)
+void DRD_(cond_set_report_signal_unlocked)(const Bool r)
{
- s_drd_report_signal_unlocked = r;
+ DRD_(s_report_signal_unlocked) = r;
}
-void cond_set_trace(const Bool trace_cond)
+void DRD_(cond_set_trace)(const Bool trace_cond)
{
- s_trace_cond = trace_cond;
+ DRD_(s_trace_cond) = trace_cond;
}
static
-void cond_initialize(struct cond_info* const p, const Addr cond)
+void DRD_(cond_initialize)(struct cond_info* const p, const Addr cond)
{
tl_assert(cond != 0);
tl_assert(p->a1 == cond);
tl_assert(p->type == ClientCondvar);
- p->cleanup = (void(*)(DrdClientobj*))cond_cleanup;
+ p->cleanup = (void(*)(DrdClientobj*))(DRD_(cond_cleanup));
p->waiter_count = 0;
p->mutex = 0;
}
@@ -75,7 +75,7 @@
* Free the memory that was allocated by cond_initialize(). Called by
* DRD_(clientobj_remove)().
*/
-static void cond_cleanup(struct cond_info* p)
+static void DRD_(cond_cleanup)(struct cond_info* p)
{
tl_assert(p);
if (p->mutex)
@@ -95,7 +95,7 @@
}
}
-static struct cond_info* cond_get_or_allocate(const Addr cond)
+static struct cond_info* DRD_(cond_get_or_allocate)(const Addr cond)
{
struct cond_info *p;
@@ -104,23 +104,23 @@
if (p == 0)
{
p = &(DRD_(clientobj_add)(cond, ClientCondvar)->cond);
- cond_initialize(p, cond);
+ DRD_(cond_initialize)(p, cond);
}
return p;
}
-static struct cond_info* cond_get(const Addr cond)
+static struct cond_info* DRD_(cond_get)(const Addr cond)
{
tl_assert(offsetof(DrdClientobj, cond) == 0);
return &(DRD_(clientobj_get)(cond, ClientCondvar)->cond);
}
/** Called before pthread_cond_init(). */
-void cond_pre_init(const Addr cond)
+void DRD_(cond_pre_init)(const Addr cond)
{
struct cond_info* p;
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_init cond 0x%lx",
@@ -129,7 +129,7 @@
cond);
}
- p = cond_get(cond);
+ p = DRD_(cond_get)(cond);
if (p)
{
@@ -141,15 +141,15 @@
&cei);
}
- p = cond_get_or_allocate(cond);
+ p = DRD_(cond_get_or_allocate)(cond);
}
/** Called after pthread_cond_destroy(). */
-void cond_post_destroy(const Addr cond)
+void DRD_(cond_post_destroy)(const Addr cond)
{
struct cond_info* p;
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_destroy cond 0x%lx",
@@ -158,7 +158,7 @@
cond);
}
- p = cond_get(cond);
+ p = DRD_(cond_get)(cond);
if (p == 0)
{
CondErrInfo cei = { .cond = cond };
@@ -187,12 +187,12 @@
/** Called before pthread_cond_wait(). Note: before this function is called,
* mutex_unlock() has already been called from drd_clientreq.c.
*/
-int cond_pre_wait(const Addr cond, const Addr mutex)
+int DRD_(cond_pre_wait)(const Addr cond, const Addr mutex)
{
struct cond_info* p;
struct mutex_info* q;
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_pre_wait cond 0x%lx",
@@ -201,7 +201,7 @@
cond);
}
- p = cond_get_or_allocate(cond);
+ p = DRD_(cond_get_or_allocate)(cond);
tl_assert(p);
if (p->waiter_count == 0)
@@ -220,8 +220,9 @@
&cwei);
}
tl_assert(p->mutex);
- q = mutex_get(p->mutex);
- if (q && q->owner == DRD_(thread_get_running_tid)() && q->recursion_count > 0)
+ q = DRD_(mutex_get)(p->mutex);
+ if (q
+ && q->owner == DRD_(thread_get_running_tid)() && q->recursion_count > 0)
{
const ThreadId vg_tid = VG_(get_running_tid)();
MutexErrInfo MEI = { q->a1, q->recursion_count, q->owner };
@@ -233,18 +234,18 @@
}
else if (q == 0)
{
- not_a_mutex(p->mutex);
+ DRD_(not_a_mutex)(p->mutex);
}
return ++p->waiter_count;
}
/** Called after pthread_cond_wait(). */
-int cond_post_wait(const Addr cond)
+int DRD_(cond_post_wait)(const Addr cond)
{
struct cond_info* p;
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_post_wait cond 0x%lx",
@@ -253,7 +254,7 @@
cond);
}
- p = cond_get(cond);
+ p = DRD_(cond_get)(cond);
if (p)
{
if (p->waiter_count > 0)
@@ -269,16 +270,16 @@
return 0;
}
-static void cond_signal(Addr const cond)
+static void DRD_(cond_signal)(Addr const cond)
{
const ThreadId vg_tid = VG_(get_running_tid)();
const DrdThreadId drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
- struct cond_info* const cond_p = cond_get(cond);
+ struct cond_info* const cond_p = DRD_(cond_get)(cond);
if (cond_p && cond_p->waiter_count > 0)
{
- if (s_drd_report_signal_unlocked
- && ! mutex_is_locked_by(cond_p->mutex, drd_tid))
+ if (DRD_(s_report_signal_unlocked)
+ && ! DRD_(mutex_is_locked_by)(cond_p->mutex, drd_tid))
{
/* A signal is sent while the associated mutex has not been locked. */
/* This can indicate but is not necessarily a race condition. */
@@ -300,9 +301,9 @@
}
/** Called before pthread_cond_signal(). */
-void cond_pre_signal(Addr const cond)
+void DRD_(cond_pre_signal)(Addr const cond)
{
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_signal cond 0x%lx",
@@ -311,13 +312,13 @@
cond);
}
- cond_signal(cond);
+ DRD_(cond_signal)(cond);
}
/** Called before pthread_cond_broadcast(). */
-void cond_pre_broadcast(Addr const cond)
+void DRD_(cond_pre_broadcast)(Addr const cond)
{
- if (s_trace_cond)
+ if (DRD_(s_trace_cond))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] cond_broadcast cond 0x%lx",
@@ -326,9 +327,9 @@
cond);
}
- cond_signal(cond);
+ DRD_(cond_signal)(cond);
}
/** Called after pthread_cond_destroy(). */
-void cond_thread_delete(const DrdThreadId tid)
+void DRD_(cond_thread_delete)(const DrdThreadId tid)
{ }
diff --git a/drd/drd_cond.h b/drd/drd_cond.h
index 2d27327..d2e7457 100644
--- a/drd/drd_cond.h
+++ b/drd/drd_cond.h
@@ -38,15 +38,15 @@
/* Function declarations. */
-void cond_set_report_signal_unlocked(const Bool r);
-void cond_set_trace(const Bool trace_cond);
-void cond_pre_init(const Addr cond);
-void cond_post_destroy(const Addr cond);
-int cond_pre_wait(const Addr cond, const Addr mutex);
-int cond_post_wait(const Addr cond);
-void cond_pre_signal(const Addr cond);
-void cond_pre_broadcast(const Addr cond);
-void cond_thread_delete(const DrdThreadId tid);
+void DRD_(cond_set_report_signal_unlocked)(const Bool r);
+void DRD_(cond_set_trace)(const Bool trace_cond);
+void DRD_(cond_pre_init)(const Addr cond);
+void DRD_(cond_post_destroy)(const Addr cond);
+int DRD_(cond_pre_wait)(const Addr cond, const Addr mutex);
+int DRD_(cond_post_wait)(const Addr cond);
+void DRD_(cond_pre_signal)(const Addr cond);
+void DRD_(cond_pre_broadcast)(const Addr cond);
+void DRD_(cond_thread_delete)(const DrdThreadId tid);
#endif /* __DRD_COND_H */
diff --git a/drd/drd_main.c b/drd/drd_main.c
index 34c4779..44f64a3 100644
--- a/drd/drd_main.c
+++ b/drd/drd_main.c
@@ -111,16 +111,16 @@
DRD_(set_check_stack_accesses)(check_stack_accesses);
if (exclusive_threshold_ms != -1)
{
- mutex_set_lock_threshold(exclusive_threshold_ms);
- rwlock_set_exclusive_threshold(exclusive_threshold_ms);
+ DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
+ DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
}
if (report_signal_unlocked != -1)
{
- cond_set_report_signal_unlocked(report_signal_unlocked);
+ DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
}
if (shared_threshold_ms != -1)
{
- rwlock_set_shared_threshold(shared_threshold_ms);
+ DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
}
if (segment_merging != -1)
DRD_(thread_set_segment_merging)(segment_merging);
@@ -136,7 +136,7 @@
if (trace_clientobj != -1)
DRD_(clientobj_set_trace)(trace_clientobj);
if (trace_cond != -1)
- cond_set_trace(trace_cond);
+ DRD_(cond_set_trace)(trace_cond);
if (trace_csw != -1)
DRD_(thread_trace_context_switches)(trace_csw);
if (trace_fork_join != -1)
@@ -144,13 +144,13 @@
if (trace_conflict_set != -1)
DRD_(thread_trace_conflict_set)(trace_conflict_set);
if (trace_mutex != -1)
- mutex_set_trace(trace_mutex);
+ DRD_(mutex_set_trace)(trace_mutex);
if (trace_rwlock != -1)
- rwlock_set_trace(trace_rwlock);
+ DRD_(rwlock_set_trace)(trace_rwlock);
if (trace_segment != -1)
DRD_(sg_set_trace)(trace_segment);
if (trace_semaphore != -1)
- semaphore_set_trace(trace_semaphore);
+ DRD_(semaphore_set_trace)(trace_semaphore);
if (trace_suppression != -1)
DRD_(suppression_set_trace)(trace_suppression);
@@ -554,9 +554,9 @@
DRD_(thread_get_discard_ordered_segments_count)());
VG_(message)(Vg_UserMsg,
" (%lld m, %lld rw, %lld s, %lld b)",
- get_mutex_segment_creation_count(),
- get_rwlock_segment_creation_count(),
- get_semaphore_segment_creation_count(),
+ DRD_(get_mutex_segment_creation_count)(),
+ DRD_(get_rwlock_segment_creation_count)(),
+ DRD_(get_semaphore_segment_creation_count)(),
DRD_(get_barrier_segment_creation_count)());
VG_(message)(Vg_UserMsg,
" bitmaps: %lld level 1 / %lld level 2 bitmap refs",
@@ -567,7 +567,7 @@
bm_get_bitmap2_creation_count());
VG_(message)(Vg_UserMsg,
" mutex: %lld non-recursive lock/unlock events.",
- get_mutex_lock_count());
+ DRD_(get_mutex_lock_count)());
drd_print_malloc_stats();
}
}
diff --git a/drd/drd_mutex.c b/drd/drd_mutex.c
index 3f26508..4ca6e84 100644
--- a/drd/drd_mutex.c
+++ b/drd/drd_mutex.c
@@ -36,42 +36,42 @@
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
-// Local functions.
+/* Local functions. */
-static void mutex_cleanup(struct mutex_info* p);
-static Bool mutex_is_locked(struct mutex_info* const p);
+static void DRD_(mutex_cleanup)(struct mutex_info* p);
+static Bool DRD_(mutex_is_locked)(struct mutex_info* const p);
-// Local variables.
+/* Local variables. */
-static Bool s_trace_mutex;
-static ULong s_mutex_lock_count;
-static ULong s_mutex_segment_creation_count;
-static UInt s_mutex_lock_threshold_ms = 1000 * 1000;
+static Bool DRD_(s_trace_mutex);
+static ULong DRD_(s_mutex_lock_count);
+static ULong DRD_(s_mutex_segment_creation_count);
+static UInt DRD_(s_mutex_lock_threshold_ms) = 1000 * 1000;
-// Function definitions.
+/* Function definitions. */
-void mutex_set_trace(const Bool trace_mutex)
+void DRD_(mutex_set_trace)(const Bool trace_mutex)
{
tl_assert(!! trace_mutex == trace_mutex);
- s_trace_mutex = trace_mutex;
+ DRD_(s_trace_mutex) = trace_mutex;
}
-void mutex_set_lock_threshold(const UInt lock_threshold_ms)
+void DRD_(mutex_set_lock_threshold)(const UInt lock_threshold_ms)
{
- s_mutex_lock_threshold_ms = lock_threshold_ms;
+ DRD_(s_mutex_lock_threshold_ms) = lock_threshold_ms;
}
static
-void mutex_initialize(struct mutex_info* const p,
- const Addr mutex, const MutexT mutex_type)
+void DRD_(mutex_initialize)(struct mutex_info* const p,
+ const Addr mutex, const MutexT mutex_type)
{
tl_assert(mutex);
tl_assert(mutex_type != mutex_type_unknown);
tl_assert(p->a1 == mutex);
- p->cleanup = (void(*)(DrdClientobj*))&mutex_cleanup;
+ p->cleanup = (void(*)(DrdClientobj*))&(DRD_(mutex_cleanup));
p->mutex_type = mutex_type;
p->recursion_count = 0;
p->owner = DRD_INVALID_THREADID;
@@ -81,23 +81,23 @@
}
/** Deallocate the memory that was allocated by mutex_initialize(). */
-static void mutex_cleanup(struct mutex_info* p)
+static void DRD_(mutex_cleanup)(struct mutex_info* p)
{
tl_assert(p);
- if (s_trace_mutex)
+ if (DRD_(s_trace_mutex))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] mutex_destroy %s 0x%lx rc %d owner %d",
VG_(get_running_tid)(),
DRD_(thread_get_running_tid)(),
- mutex_get_typename(p),
+ DRD_(mutex_get_typename)(p),
p->a1,
p ? p->recursion_count : -1,
p ? p->owner : DRD_INVALID_THREADID);
}
- if (mutex_is_locked(p))
+ if (DRD_(mutex_is_locked)(p))
{
MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
VG_(maybe_record_error)(VG_(get_running_tid)(),
@@ -112,7 +112,7 @@
}
/** Let Valgrind report that there is no mutex object at address 'mutex'. */
-void not_a_mutex(const Addr mutex)
+void DRD_(not_a_mutex)(const Addr mutex)
{
MutexErrInfo MEI = { mutex, -1, DRD_INVALID_THREADID };
VG_(maybe_record_error)(VG_(get_running_tid)(),
@@ -124,7 +124,7 @@
static
struct mutex_info*
-mutex_get_or_allocate(const Addr mutex, const MutexT mutex_type)
+DRD_(mutex_get_or_allocate)(const Addr mutex, const MutexT mutex_type)
{
struct mutex_info* p;
@@ -137,18 +137,18 @@
if (DRD_(clientobj_present)(mutex, mutex + 1))
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return 0;
}
tl_assert(mutex_type != mutex_type_unknown);
p = &(DRD_(clientobj_add)(mutex, ClientMutex)->mutex);
- mutex_initialize(p, mutex, mutex_type);
+ DRD_(mutex_initialize)(p, mutex, mutex_type);
return p;
}
-struct mutex_info* mutex_get(const Addr mutex)
+struct mutex_info* DRD_(mutex_get)(const Addr mutex)
{
tl_assert(offsetof(DrdClientobj, mutex) == 0);
return &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
@@ -156,29 +156,29 @@
/** Called before pthread_mutex_init(). */
struct mutex_info*
-mutex_init(const Addr mutex, const MutexT mutex_type)
+DRD_(mutex_init)(const Addr mutex, const MutexT mutex_type)
{
struct mutex_info* p;
tl_assert(mutex_type != mutex_type_unknown);
- if (s_trace_mutex)
+ if (DRD_(s_trace_mutex))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] mutex_init %s 0x%lx",
VG_(get_running_tid)(),
DRD_(thread_get_running_tid)(),
- mutex_type_name(mutex_type),
+ DRD_(mutex_type_name)(mutex_type),
mutex);
}
if (mutex_type == mutex_type_invalid_mutex)
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return 0;
}
- p = mutex_get(mutex);
+ p = DRD_(mutex_get)(mutex);
if (p)
{
const ThreadId vg_tid = VG_(get_running_tid)();
@@ -191,20 +191,20 @@
&MEI);
return p;
}
- p = mutex_get_or_allocate(mutex, mutex_type);
+ p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
return p;
}
/** Called after pthread_mutex_destroy(). */
-void mutex_post_destroy(const Addr mutex)
+void DRD_(mutex_post_destroy)(const Addr mutex)
{
struct mutex_info* p;
- p = mutex_get(mutex);
+ p = DRD_(mutex_get)(mutex);
if (p == 0)
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return;
}
@@ -216,23 +216,23 @@
* an attempt is made to lock recursively a synchronization object that must
* not be locked recursively.
*/
-void mutex_pre_lock(const Addr mutex, MutexT mutex_type,
- const Bool trylock)
+void DRD_(mutex_pre_lock)(const Addr mutex, MutexT mutex_type,
+ const Bool trylock)
{
struct mutex_info* p;
- p = mutex_get_or_allocate(mutex, mutex_type);
+ p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
if (mutex_type == mutex_type_unknown)
mutex_type = p->mutex_type;
- if (s_trace_mutex)
+ if (DRD_(s_trace_mutex))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] %s %s 0x%lx rc %d owner %d",
VG_(get_running_tid)(),
DRD_(thread_get_running_tid)(),
trylock ? "pre_mutex_lock " : "mutex_trylock ",
- p ? mutex_get_typename(p) : "(?)",
+ p ? DRD_(mutex_get_typename)(p) : "(?)",
mutex,
p ? p->recursion_count : -1,
p ? p->owner : DRD_INVALID_THREADID);
@@ -240,7 +240,7 @@
if (p == 0)
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return;
}
@@ -248,7 +248,7 @@
if (mutex_type == mutex_type_invalid_mutex)
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return;
}
@@ -271,22 +271,22 @@
* Note: this function must be called after pthread_mutex_lock() has been
* called, or a race condition is triggered !
*/
-void mutex_post_lock(const Addr mutex, const Bool took_lock,
- const Bool post_cond_wait)
+void DRD_(mutex_post_lock)(const Addr mutex, const Bool took_lock,
+ const Bool post_cond_wait)
{
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
struct mutex_info* p;
- p = mutex_get(mutex);
+ p = DRD_(mutex_get)(mutex);
- if (s_trace_mutex)
+ if (DRD_(s_trace_mutex))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] %s %s 0x%lx rc %d owner %d%s",
VG_(get_running_tid)(),
drd_tid,
post_cond_wait ? "cond_post_wait " : "post_mutex_lock",
- p ? mutex_get_typename(p) : "(?)",
+ p ? DRD_(mutex_get_typename)(p) : "(?)",
mutex,
p ? p->recursion_count : 0,
p ? p->owner : VG_INVALID_THREADID,
@@ -306,12 +306,12 @@
DRD_(thread_combine_vc2)(drd_tid, &p->last_locked_segment->vc);
}
DRD_(thread_new_segment)(drd_tid);
- s_mutex_segment_creation_count++;
+ DRD_(s_mutex_segment_creation_count)++;
p->owner = drd_tid;
p->acquiry_time_ms = VG_(read_millisecond_timer)();
p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
- s_mutex_lock_count++;
+ DRD_(s_mutex_lock_count)++;
}
else if (p->owner != drd_tid)
{
@@ -325,41 +325,42 @@
p->recursion_count++;
}
-/** Update mutex_info state when unlocking the pthread_mutex_t mutex.
+/**
+ * Update mutex_info state when unlocking the pthread_mutex_t mutex.
*
- * @param mutex Pointer to pthread_mutex_t data structure in the client space.
- * @param tid ThreadId of the thread calling pthread_mutex_unlock().
- * @param vc Pointer to the current vector clock of thread tid.
+ * @param mutex Pointer to pthread_mutex_t data structure in the client space.
+ * @param tid ThreadId of the thread calling pthread_mutex_unlock().
+ * @param vc Pointer to the current vector clock of thread tid.
*
- * @return New value of the mutex recursion count.
+ * @return New value of the mutex recursion count.
*
- * @note This function must be called before pthread_mutex_unlock() is called,
- * or a race condition is triggered !
+ * @note This function must be called before pthread_mutex_unlock() is called,
+ * or a race condition is triggered !
*/
-void mutex_unlock(const Addr mutex, MutexT mutex_type)
+void DRD_(mutex_unlock)(const Addr mutex, MutexT mutex_type)
{
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
const ThreadId vg_tid = VG_(get_running_tid)();
struct mutex_info* p;
- p = mutex_get(mutex);
+ p = DRD_(mutex_get)(mutex);
if (mutex_type == mutex_type_unknown)
mutex_type = p->mutex_type;
- if (s_trace_mutex)
+ if (DRD_(s_trace_mutex))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] mutex_unlock %s 0x%lx rc %d",
vg_tid,
drd_tid,
- p ? mutex_get_typename(p) : "(?)",
+ p ? DRD_(mutex_get_typename)(p) : "(?)",
mutex,
p ? p->recursion_count : 0);
}
if (p == 0 || mutex_type == mutex_type_invalid_mutex)
{
- not_a_mutex(mutex);
+ DRD_(not_a_mutex)(mutex);
return;
}
@@ -399,13 +400,13 @@
if (p->recursion_count == 0)
{
- if (s_mutex_lock_threshold_ms > 0)
+ if (DRD_(s_mutex_lock_threshold_ms) > 0)
{
ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
- if (held > s_mutex_lock_threshold_ms)
+ if (held > DRD_(s_mutex_lock_threshold_ms))
{
HoldtimeErrInfo HEI
- = { mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
+ = { mutex, p->acquired_at, held, DRD_(s_mutex_lock_threshold_ms) };
VG_(maybe_record_error)(vg_tid,
HoldtimeErr,
VG_(get_IP)(vg_tid),
@@ -421,31 +422,31 @@
DRD_(thread_get_latest_segment)(&p->last_locked_segment, drd_tid);
DRD_(thread_new_segment)(drd_tid);
p->acquired_at = 0;
- s_mutex_segment_creation_count++;
+ DRD_(s_mutex_segment_creation_count)++;
}
}
void DRD_(spinlock_init_or_unlock)(const Addr spinlock)
{
- struct mutex_info* mutex_p = mutex_get(spinlock);
+ struct mutex_info* mutex_p = DRD_(mutex_get)(spinlock);
if (mutex_p)
{
- mutex_unlock(spinlock, mutex_type_spinlock);
+ DRD_(mutex_unlock)(spinlock, mutex_type_spinlock);
}
else
{
- mutex_init(spinlock, mutex_type_spinlock);
+ DRD_(mutex_init)(spinlock, mutex_type_spinlock);
}
}
-const char* mutex_get_typename(struct mutex_info* const p)
+const char* DRD_(mutex_get_typename)(struct mutex_info* const p)
{
tl_assert(p);
- return mutex_type_name(p->mutex_type);
+ return DRD_(mutex_type_name)(p->mutex_type);
}
-const char* mutex_type_name(const MutexT mt)
+const char* DRD_(mutex_type_name)(const MutexT mt)
{
switch (mt)
{
@@ -466,15 +467,15 @@
}
/** Return true if the specified mutex is locked by any thread. */
-static Bool mutex_is_locked(struct mutex_info* const p)
+static Bool DRD_(mutex_is_locked)(struct mutex_info* const p)
{
tl_assert(p);
return (p->recursion_count > 0);
}
-Bool mutex_is_locked_by(const Addr mutex, const DrdThreadId tid)
+Bool DRD_(mutex_is_locked_by)(const Addr mutex, const DrdThreadId tid)
{
- struct mutex_info* const p = mutex_get(mutex);
+ struct mutex_info* const p = DRD_(mutex_get)(mutex);
if (p)
{
return (p->recursion_count > 0 && p->owner == tid);
@@ -482,9 +483,9 @@
return False;
}
-int mutex_get_recursion_count(const Addr mutex)
+int DRD_(mutex_get_recursion_count)(const Addr mutex)
{
- struct mutex_info* const p = mutex_get(mutex);
+ struct mutex_info* const p = DRD_(mutex_get)(mutex);
tl_assert(p);
return p->recursion_count;
}
@@ -493,7 +494,7 @@
* Call this function when thread tid stops to exist, such that the
* "last owner" field can be cleared if it still refers to that thread.
*/
-void mutex_thread_delete(const DrdThreadId tid)
+void DRD_(mutex_thread_delete)(const DrdThreadId tid)
{
struct mutex_info* p;
@@ -514,12 +515,12 @@
}
}
-ULong get_mutex_lock_count(void)
+ULong DRD_(get_mutex_lock_count)(void)
{
- return s_mutex_lock_count;
+ return DRD_(s_mutex_lock_count);
}
-ULong get_mutex_segment_creation_count(void)
+ULong DRD_(get_mutex_segment_creation_count)(void)
{
- return s_mutex_segment_creation_count;
+ return DRD_(s_mutex_segment_creation_count);
}
diff --git a/drd/drd_mutex.h b/drd/drd_mutex.h
index d95064b..3c1b3b8 100644
--- a/drd/drd_mutex.h
+++ b/drd/drd_mutex.h
@@ -35,26 +35,25 @@
struct mutex_info;
-void mutex_set_trace(const Bool trace_mutex);
-void mutex_set_lock_threshold(const UInt lock_threshold_ms);
-struct mutex_info* mutex_init(const Addr mutex,
- const MutexT mutex_type);
-void mutex_post_destroy(const Addr mutex);
-void not_a_mutex(const Addr mutex);
-struct mutex_info* mutex_get(const Addr mutex);
-void mutex_pre_lock(const Addr mutex, const MutexT mutex_type,
- const Bool trylock);
-void mutex_post_lock(const Addr mutex, const Bool took_lock,
- const Bool post_cond_wait);
-void mutex_unlock(const Addr mutex, const MutexT mutex_type);
+void DRD_(mutex_set_trace)(const Bool trace_mutex);
+void DRD_(mutex_set_lock_threshold)(const UInt lock_threshold_ms);
+struct mutex_info* DRD_(mutex_init)(const Addr mutex, const MutexT mutex_type);
+void DRD_(mutex_post_destroy)(const Addr mutex);
+void DRD_(not_a_mutex)(const Addr mutex);
+struct mutex_info* DRD_(mutex_get)(const Addr mutex);
+void DRD_(mutex_pre_lock)(const Addr mutex, const MutexT mutex_type,
+ const Bool trylock);
+void DRD_(mutex_post_lock)(const Addr mutex, const Bool took_lock,
+ const Bool post_cond_wait);
+void DRD_(mutex_unlock)(const Addr mutex, const MutexT mutex_type);
void DRD_(spinlock_init_or_unlock)(const Addr spinlock);
-const char* mutex_get_typename(struct mutex_info* const p);
-const char* mutex_type_name(const MutexT mt);
-Bool mutex_is_locked_by(const Addr mutex, const DrdThreadId tid);
-int mutex_get_recursion_count(const Addr mutex);
-void mutex_thread_delete(const DrdThreadId tid);
-ULong get_mutex_lock_count(void);
-ULong get_mutex_segment_creation_count(void);
+const char* DRD_(mutex_get_typename)(struct mutex_info* const p);
+const char* DRD_(mutex_type_name)(const MutexT mt);
+Bool DRD_(mutex_is_locked_by)(const Addr mutex, const DrdThreadId tid);
+int DRD_(mutex_get_recursion_count)(const Addr mutex);
+void DRD_(mutex_thread_delete)(const DrdThreadId tid);
+ULong DRD_(get_mutex_lock_count)(void);
+ULong DRD_(get_mutex_segment_creation_count)(void);
#endif /* __DRD_MUTEX_H */
diff --git a/drd/drd_rwlock.c b/drd/drd_rwlock.c
index 0cda736..067937b 100644
--- a/drd/drd_rwlock.c
+++ b/drd/drd_rwlock.c
@@ -36,7 +36,7 @@
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
-// Type definitions.
+/* Local type definitions. */
struct rwlock_thread_info
{
@@ -48,38 +48,38 @@
};
-// Local functions.
+/* Local functions. */
-static void rwlock_cleanup(struct rwlock_info* p);
-static ULong s_rwlock_segment_creation_count;
+static void DRD_(rwlock_cleanup)(struct rwlock_info* p);
-// Local variables.
+/* Local variables. */
-static Bool s_trace_rwlock;
-static UInt s_exclusive_threshold_ms;
-static UInt s_shared_threshold_ms;
+static Bool DRD_(s_trace_rwlock);
+static UInt DRD_(s_exclusive_threshold_ms);
+static UInt DRD_(s_shared_threshold_ms);
+static ULong DRD_(s_rwlock_segment_creation_count);
-// Function definitions.
+/* Function definitions. */
-void rwlock_set_trace(const Bool trace_rwlock)
+void DRD_(rwlock_set_trace)(const Bool trace_rwlock)
{
- tl_assert(!! trace_rwlock == trace_rwlock);
- s_trace_rwlock = trace_rwlock;
+ tl_assert(trace_rwlock == False || trace_rwlock == True);
+ DRD_(s_trace_rwlock) = trace_rwlock;
}
-void rwlock_set_exclusive_threshold(const UInt exclusive_threshold_ms)
+void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms)
{
- s_exclusive_threshold_ms = exclusive_threshold_ms;
+ DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms;
}
-void rwlock_set_shared_threshold(const UInt shared_threshold_ms)
+void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms)
{
- s_shared_threshold_ms = shared_threshold_ms;
+ DRD_(s_shared_threshold_ms) = shared_threshold_ms;
}
-static Bool rwlock_is_rdlocked(struct rwlock_info* p)
+static Bool DRD_(rwlock_is_rdlocked)(struct rwlock_info* p)
{
struct rwlock_thread_info* q;
@@ -91,7 +91,7 @@
return False;
}
-static Bool rwlock_is_wrlocked(struct rwlock_info* p)
+static Bool DRD_(rwlock_is_wrlocked)(struct rwlock_info* p)
{
struct rwlock_thread_info* q;
@@ -103,12 +103,13 @@
return False;
}
-static Bool rwlock_is_locked(struct rwlock_info* p)
+static Bool DRD_(rwlock_is_locked)(struct rwlock_info* p)
{
- return rwlock_is_rdlocked(p) || rwlock_is_wrlocked(p);
+ return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p);
}
-static Bool rwlock_is_rdlocked_by(struct rwlock_info* p, const DrdThreadId tid)
+static Bool DRD_(rwlock_is_rdlocked_by)(struct rwlock_info* p,
+ const DrdThreadId tid)
{
const UWord uword_tid = tid;
struct rwlock_thread_info* q;
@@ -117,7 +118,8 @@
return q && q->reader_nesting_count > 0;
}
-static Bool rwlock_is_wrlocked_by(struct rwlock_info* p, const DrdThreadId tid)
+static Bool DRD_(rwlock_is_wrlocked_by)(struct rwlock_info* p,
+ const DrdThreadId tid)
{
const UWord uword_tid = tid;
struct rwlock_thread_info* q;
@@ -126,14 +128,17 @@
return q && q->writer_nesting_count > 0;
}
-static Bool rwlock_is_locked_by(struct rwlock_info* p, const DrdThreadId tid)
+static Bool DRD_(rwlock_is_locked_by)(struct rwlock_info* p,
+ const DrdThreadId tid)
{
- return rwlock_is_rdlocked_by(p, tid) || rwlock_is_wrlocked_by(p, tid);
+ return (DRD_(rwlock_is_rdlocked_by)(p, tid)
+ || DRD_(rwlock_is_wrlocked_by)(p, tid));
}
/** Either look up or insert a node corresponding to DRD thread id 'tid'. */
static
-struct rwlock_thread_info* lookup_or_insert_node(OSet* oset, const UWord tid)
+struct rwlock_thread_info*
+DRD_(lookup_or_insert_node)(OSet* oset, const UWord tid)
{
struct rwlock_thread_info* q;
@@ -152,12 +157,13 @@
return q;
}
-/** Combine the vector clock corresponding to the last unlock operation of
- * reader-writer lock p into the vector clock of thread 'tid'.
+/**
+ * Combine the vector clock corresponding to the last unlock operation of
+ * reader-writer lock p into the vector clock of thread 'tid'.
*/
-static void rwlock_combine_other_vc(struct rwlock_info* const p,
- const DrdThreadId tid,
- const Bool readers_too)
+static void DRD_(rwlock_combine_other_vc)(struct rwlock_info* const p,
+ const DrdThreadId tid,
+ const Bool readers_too)
{
struct rwlock_thread_info* q;
@@ -173,13 +179,13 @@
/** Initialize the rwlock_info data structure *p. */
static
-void rwlock_initialize(struct rwlock_info* const p, const Addr rwlock)
+void DRD_(rwlock_initialize)(struct rwlock_info* const p, const Addr rwlock)
{
tl_assert(rwlock != 0);
tl_assert(p->a1 == rwlock);
tl_assert(p->type == ClientRwlock);
- p->cleanup = (void(*)(DrdClientobj*))&rwlock_cleanup;
+ p->cleanup = (void(*)(DrdClientobj*))&(DRD_(rwlock_cleanup));
p->thread_info = VG_(OSetGen_Create)(
0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free));
p->acquiry_time_ms = 0;
@@ -187,13 +193,13 @@
}
/** Deallocate the memory that was allocated by rwlock_initialize(). */
-static void rwlock_cleanup(struct rwlock_info* p)
+static void DRD_(rwlock_cleanup)(struct rwlock_info* p)
{
struct rwlock_thread_info* q;
tl_assert(p);
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] rwlock_destroy 0x%lx",
@@ -202,7 +208,7 @@
p->a1);
}
- if (rwlock_is_locked(p))
+ if (DRD_(rwlock_is_locked)(p))
{
RwlockErrInfo REI = { p->a1 };
VG_(maybe_record_error)(VG_(get_running_tid)(),
@@ -222,7 +228,7 @@
static
struct rwlock_info*
-rwlock_get_or_allocate(const Addr rwlock)
+DRD_(rwlock_get_or_allocate)(const Addr rwlock)
{
struct rwlock_info* p;
@@ -245,22 +251,22 @@
}
p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock);
- rwlock_initialize(p, rwlock);
+ DRD_(rwlock_initialize)(p, rwlock);
return p;
}
-static struct rwlock_info* rwlock_get(const Addr rwlock)
+static struct rwlock_info* DRD_(rwlock_get)(const Addr rwlock)
{
tl_assert(offsetof(DrdClientobj, rwlock) == 0);
return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
}
/** Called before pthread_rwlock_init(). */
-struct rwlock_info* rwlock_pre_init(const Addr rwlock)
+struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock)
{
struct rwlock_info* p;
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] rwlock_init 0x%lx",
@@ -269,7 +275,7 @@
rwlock);
}
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
if (p)
{
@@ -284,17 +290,17 @@
return p;
}
- p = rwlock_get_or_allocate(rwlock);
+ p = DRD_(rwlock_get_or_allocate)(rwlock);
return p;
}
/** Called after pthread_rwlock_destroy(). */
-void rwlock_post_destroy(const Addr rwlock)
+void DRD_(rwlock_post_destroy)(const Addr rwlock)
{
struct rwlock_info* p;
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
if (p == 0)
{
GenericErrInfo GEI;
@@ -309,16 +315,17 @@
DRD_(clientobj_remove)(rwlock, ClientRwlock);
}
-/** Called before pthread_rwlock_rdlock() is invoked. If a data structure for
- * the client-side object was not yet created, do this now. Also check whether
- * an attempt is made to lock recursively a synchronization object that must
- * not be locked recursively.
+/**
+ * Called before pthread_rwlock_rdlock() is invoked. If a data structure for
+ * the client-side object was not yet created, do this now. Also check whether
+ * an attempt is made to lock recursively a synchronization object that must
+ * not be locked recursively.
*/
-void rwlock_pre_rdlock(const Addr rwlock)
+void DRD_(rwlock_pre_rdlock)(const Addr rwlock)
{
struct rwlock_info* p;
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] pre_rwlock_rdlock 0x%lx",
@@ -327,10 +334,10 @@
rwlock);
}
- p = rwlock_get_or_allocate(rwlock);
+ p = DRD_(rwlock_get_or_allocate)(rwlock);
tl_assert(p);
- if (rwlock_is_wrlocked_by(p, DRD_(thread_get_running_tid)()))
+ if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
{
VG_(message)(Vg_UserMsg,
"reader-writer lock 0x%lx is already locked for"
@@ -339,17 +346,18 @@
}
}
-/** Update rwlock_info state when locking the pthread_rwlock_t mutex.
- * Note: this function must be called after pthread_rwlock_rdlock() has been
- * called, or a race condition is triggered !
+/**
+ * Update rwlock_info state when locking the pthread_rwlock_t mutex.
+ * Note: this function must be called after pthread_rwlock_rdlock() has been
+ * called, or a race condition is triggered !
*/
-void rwlock_post_rdlock(const Addr rwlock, const Bool took_lock)
+void DRD_(rwlock_post_rdlock)(const Addr rwlock, const Bool took_lock)
{
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
struct rwlock_info* p;
struct rwlock_thread_info* q;
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] post_rwlock_rdlock 0x%lx",
@@ -358,38 +366,39 @@
rwlock);
}
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
if (! p || ! took_lock)
return;
- tl_assert(! rwlock_is_wrlocked(p));
+ tl_assert(! DRD_(rwlock_is_wrlocked)(p));
- q = lookup_or_insert_node(p->thread_info, drd_tid);
+ q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
if (++q->reader_nesting_count == 1)
{
- rwlock_combine_other_vc(p, drd_tid, False);
+ DRD_(rwlock_combine_other_vc)(p, drd_tid, False);
q->last_lock_was_writer_lock = False;
DRD_(thread_new_segment)(drd_tid);
- s_rwlock_segment_creation_count++;
+ DRD_(s_rwlock_segment_creation_count)++;
p->acquiry_time_ms = VG_(read_millisecond_timer)();
p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
}
}
-/** Called before pthread_rwlock_wrlock() is invoked. If a data structure for
- * the client-side object was not yet created, do this now. Also check whether
- * an attempt is made to lock recursively a synchronization object that must
- * not be locked recursively.
+/**
+ * Called before pthread_rwlock_wrlock() is invoked. If a data structure for
+ * the client-side object was not yet created, do this now. Also check whether
+ * an attempt is made to lock recursively a synchronization object that must
+ * not be locked recursively.
*/
-void rwlock_pre_wrlock(const Addr rwlock)
+void DRD_(rwlock_pre_wrlock)(const Addr rwlock)
{
struct rwlock_info* p;
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] pre_rwlock_wrlock 0x%lx",
@@ -400,12 +409,12 @@
if (p == 0)
{
- p = rwlock_get_or_allocate(rwlock);
+ p = DRD_(rwlock_get_or_allocate)(rwlock);
}
tl_assert(p);
- if (rwlock_is_wrlocked_by(p, DRD_(thread_get_running_tid)()))
+ if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
{
RwlockErrInfo REI = { p->a1 };
VG_(maybe_record_error)(VG_(get_running_tid)(),
@@ -421,15 +430,15 @@
* Note: this function must be called after pthread_rwlock_wrlock() has
* finished, or a race condition is triggered !
*/
-void rwlock_post_wrlock(const Addr rwlock, const Bool took_lock)
+void DRD_(rwlock_post_wrlock)(const Addr rwlock, const Bool took_lock)
{
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
struct rwlock_info* p;
struct rwlock_thread_info* q;
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] post_rwlock_wrlock 0x%lx",
@@ -441,14 +450,15 @@
if (! p || ! took_lock)
return;
- q = lookup_or_insert_node(p->thread_info, DRD_(thread_get_running_tid)());
+ q = DRD_(lookup_or_insert_node)(p->thread_info,
+ DRD_(thread_get_running_tid)());
tl_assert(q->writer_nesting_count == 0);
q->writer_nesting_count++;
q->last_lock_was_writer_lock = True;
tl_assert(q->writer_nesting_count == 1);
- rwlock_combine_other_vc(p, drd_tid, True);
+ DRD_(rwlock_combine_other_vc)(p, drd_tid, True);
DRD_(thread_new_segment)(drd_tid);
- s_rwlock_segment_creation_count++;
+ DRD_(s_rwlock_segment_creation_count)++;
p->acquiry_time_ms = VG_(read_millisecond_timer)();
p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
}
@@ -462,14 +472,14 @@
* @param tid ThreadId of the thread calling pthread_rwlock_unlock().
* @param vc Pointer to the current vector clock of thread tid.
*/
-void rwlock_pre_unlock(const Addr rwlock)
+void DRD_(rwlock_pre_unlock)(const Addr rwlock)
{
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
const ThreadId vg_tid = VG_(get_running_tid)();
struct rwlock_info* p;
struct rwlock_thread_info* q;
- if (s_trace_rwlock)
+ if (DRD_(s_trace_rwlock))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] rwlock_unlock 0x%lx",
@@ -478,7 +488,7 @@
rwlock);
}
- p = rwlock_get(rwlock);
+ p = DRD_(rwlock_get)(rwlock);
if (p == 0)
{
GenericErrInfo GEI;
@@ -489,7 +499,7 @@
&GEI);
return;
}
- if (! rwlock_is_locked_by(p, drd_tid))
+ if (! DRD_(rwlock_is_locked_by)(p, drd_tid))
{
RwlockErrInfo REI = { p->a1 };
VG_(maybe_record_error)(vg_tid,
@@ -499,18 +509,18 @@
&REI);
return;
}
- q = lookup_or_insert_node(p->thread_info, drd_tid);
+ q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
tl_assert(q);
if (q->reader_nesting_count > 0)
{
q->reader_nesting_count--;
- if (q->reader_nesting_count == 0 && s_shared_threshold_ms > 0)
+ if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0)
{
ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
- if (held > s_shared_threshold_ms)
+ if (held > DRD_(s_shared_threshold_ms))
{
HoldtimeErrInfo HEI
- = { rwlock, p->acquired_at, held, s_shared_threshold_ms };
+ = { rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) };
VG_(maybe_record_error)(vg_tid,
HoldtimeErr,
VG_(get_IP)(vg_tid),
@@ -522,13 +532,13 @@
else if (q->writer_nesting_count > 0)
{
q->writer_nesting_count--;
- if (q->writer_nesting_count == 0 && s_exclusive_threshold_ms > 0)
+ if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0)
{
ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
- if (held > s_exclusive_threshold_ms)
+ if (held > DRD_(s_exclusive_threshold_ms))
{
HoldtimeErrInfo HEI
- = { rwlock, p->acquired_at, held, s_exclusive_threshold_ms };
+ = { rwlock, p->acquired_at, held, DRD_(s_exclusive_threshold_ms) };
VG_(maybe_record_error)(vg_tid,
HoldtimeErr,
VG_(get_IP)(vg_tid),
@@ -550,7 +560,7 @@
DRD_(thread_get_latest_segment)(&q->last_unlock_segment, drd_tid);
DRD_(thread_new_segment)(drd_tid);
- s_rwlock_segment_creation_count++;
+ DRD_(s_rwlock_segment_creation_count)++;
}
}
@@ -558,7 +568,7 @@
* Call this function when thread tid stops to exist, such that the
* "last owner" field can be cleared if it still refers to that thread.
*/
-void rwlock_thread_delete(const DrdThreadId tid)
+void DRD_(rwlock_thread_delete)(const DrdThreadId tid)
{
struct rwlock_info* p;
@@ -566,7 +576,7 @@
for ( ; (p = &(DRD_(clientobj_next)(ClientRwlock)->rwlock)) != 0; )
{
struct rwlock_thread_info* q;
- if (rwlock_is_locked_by(p, tid))
+ if (DRD_(rwlock_is_locked_by)(p, tid))
{
RwlockErrInfo REI = { p->a1 };
VG_(maybe_record_error)(VG_(get_running_tid)(),
@@ -574,14 +584,14 @@
VG_(get_IP)(VG_(get_running_tid)()),
"Reader-writer lock still locked at thread exit",
&REI);
- q = lookup_or_insert_node(p->thread_info, tid);
+ q = DRD_(lookup_or_insert_node)(p->thread_info, tid);
q->reader_nesting_count = 0;
q->writer_nesting_count = 0;
}
}
}
-ULong get_rwlock_segment_creation_count(void)
+ULong DRD_(get_rwlock_segment_creation_count)(void)
{
- return s_rwlock_segment_creation_count;
+ return DRD_(s_rwlock_segment_creation_count);
}
diff --git a/drd/drd_rwlock.h b/drd/drd_rwlock.h
index 3a79d7f..861e8eb 100644
--- a/drd/drd_rwlock.h
+++ b/drd/drd_rwlock.h
@@ -38,18 +38,18 @@
struct rwlock_info;
-void rwlock_set_trace(const Bool trace_rwlock);
-void rwlock_set_exclusive_threshold(const UInt exclusive_threshold_ms);
-void rwlock_set_shared_threshold(const UInt shared_threshold_ms);
-struct rwlock_info* rwlock_pre_init(const Addr rwlock);
-void rwlock_post_destroy(const Addr rwlock);
-void rwlock_pre_rdlock(const Addr rwlock);
-void rwlock_post_rdlock(const Addr rwlock, const Bool took_lock);
-void rwlock_pre_wrlock(const Addr rwlock);
-void rwlock_post_wrlock(const Addr rwlock, const Bool took_lock);
-void rwlock_pre_unlock(const Addr rwlock);
-void rwlock_thread_delete(const DrdThreadId tid);
-ULong get_rwlock_segment_creation_count(void);
+void DRD_(rwlock_set_trace)(const Bool trace_rwlock);
+void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms);
+void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms);
+struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock);
+void DRD_(rwlock_post_destroy)(const Addr rwlock);
+void DRD_(rwlock_pre_rdlock)(const Addr rwlock);
+void DRD_(rwlock_post_rdlock)(const Addr rwlock, const Bool took_lock);
+void DRD_(rwlock_pre_wrlock)(const Addr rwlock);
+void DRD_(rwlock_post_wrlock)(const Addr rwlock, const Bool took_lock);
+void DRD_(rwlock_pre_unlock)(const Addr rwlock);
+void DRD_(rwlock_thread_delete)(const DrdThreadId tid);
+ULong DRD_(get_rwlock_segment_creation_count)(void);
#endif /* __DRD_RWLOCK_H */
diff --git a/drd/drd_semaphore.c b/drd/drd_semaphore.c
index 5d6143f..03dfbde 100644
--- a/drd/drd_semaphore.c
+++ b/drd/drd_semaphore.c
@@ -35,20 +35,21 @@
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
-// Local functions.
+/* Local functions. */
-static void semaphore_cleanup(struct semaphore_info* p);
+static void DRD_(semaphore_cleanup)(struct semaphore_info* p);
-// Local variables.
+/* Local variables. */
-static Bool s_trace_semaphore;
-static ULong s_semaphore_segment_creation_count;
+static Bool DRD_(s_trace_semaphore);
+static ULong DRD_(s_semaphore_segment_creation_count);
-// Function definitions.
+/* Function definitions. */
-static void segment_push(struct semaphore_info* p, Segment* sg)
+/** Push a segment at the end of the queue 'p->last_sem_post_seg'. */
+static void DRD_(segment_push)(struct semaphore_info* p, Segment* sg)
{
Word n;
@@ -61,7 +62,8 @@
tl_assert(*(Segment**)VG_(indexXA)(p->last_sem_post_seg, n) == sg);
}
-static Segment* segment_pop(struct semaphore_info* p)
+/** Pop a segment from the beginning of the queue 'p->last_sem_post_seg'. */
+static Segment* DRD_(segment_pop)(struct semaphore_info* p)
{
Word sz;
Segment* sg;
@@ -81,19 +83,25 @@
return sg;
}
-void semaphore_set_trace(const Bool trace_semaphore)
+/** Enable or disable tracing of semaphore actions. */
+void DRD_(semaphore_set_trace)(const Bool trace_semaphore)
{
- s_trace_semaphore = trace_semaphore;
+ DRD_(s_trace_semaphore) = trace_semaphore;
}
+/**
+ * Initialize the memory 'p' points at as a semaphore_info structure for the
+ * client semaphore at client addres 'semaphore'.
+ */
static
-void semaphore_initialize(struct semaphore_info* const p, const Addr semaphore)
+void DRD_(semaphore_initialize)(struct semaphore_info* const p,
+ const Addr semaphore)
{
tl_assert(semaphore != 0);
tl_assert(p->a1 == semaphore);
tl_assert(p->type == ClientSemaphore);
- p->cleanup = (void(*)(DrdClientobj*))semaphore_cleanup;
+ p->cleanup = (void(*)(DrdClientobj*))(DRD_(semaphore_cleanup));
p->waits_to_skip = 0;
p->value = 0;
p->waiters = 0;
@@ -106,7 +114,7 @@
* Free the memory that was allocated by semaphore_initialize(). Called by
* DRD_(clientobj_remove)().
*/
-static void semaphore_cleanup(struct semaphore_info* p)
+static void DRD_(semaphore_cleanup)(struct semaphore_info* p)
{
Segment* sg;
@@ -120,14 +128,19 @@
" upon",
&sei);
}
- while ((sg = segment_pop(p)))
+ while ((sg = DRD_(segment_pop)(p)))
DRD_(sg_put)(sg);
VG_(deleteXA)(p->last_sem_post_seg);
}
+/**
+ * Return a pointer to the structure with information about the specified
+ * client semaphore. Allocate a new structure if such a structure did not
+ * yet exist.
+ */
static
struct semaphore_info*
-semaphore_get_or_allocate(const Addr semaphore)
+DRD_(semaphore_get_or_allocate)(const Addr semaphore)
{
struct semaphore_info *p;
@@ -137,25 +150,30 @@
{
tl_assert(offsetof(DrdClientobj, semaphore) == 0);
p = &(DRD_(clientobj_add)(semaphore, ClientSemaphore)->semaphore);
- semaphore_initialize(p, semaphore);
+ DRD_(semaphore_initialize)(p, semaphore);
}
return p;
}
-static struct semaphore_info* semaphore_get(const Addr semaphore)
+/**
+ * Return a pointer to the structure with information about the specified
+ * client semaphore, or null if no such structure was found.
+ */
+static struct semaphore_info* DRD_(semaphore_get)(const Addr semaphore)
{
tl_assert(offsetof(DrdClientobj, semaphore) == 0);
return &(DRD_(clientobj_get)(semaphore, ClientSemaphore)->semaphore);
}
/** Called before sem_init(). */
-struct semaphore_info* semaphore_init(const Addr semaphore,
- const Word pshared, const UInt value)
+struct semaphore_info* DRD_(semaphore_init)(const Addr semaphore,
+ const Word pshared,
+ const UInt value)
{
struct semaphore_info* p;
Segment* sg;
- if (s_trace_semaphore)
+ if (DRD_(s_trace_semaphore))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] semaphore_init 0x%lx value %u",
@@ -164,7 +182,7 @@
semaphore,
value);
}
- p = semaphore_get(semaphore);
+ p = DRD_(semaphore_get)(semaphore);
if (p)
{
const ThreadId vg_tid = VG_(get_running_tid)();
@@ -175,14 +193,14 @@
"Semaphore reinitialization",
&SEI);
// Remove all segments from the segment stack.
- while ((sg = segment_pop(p)))
+ while ((sg = DRD_(segment_pop)(p)))
{
DRD_(sg_put)(sg);
}
}
else
{
- p = semaphore_get_or_allocate(semaphore);
+ p = DRD_(semaphore_get_or_allocate)(semaphore);
}
tl_assert(p);
p->waits_to_skip = value;
@@ -191,13 +209,13 @@
}
/** Called after sem_destroy(). */
-void semaphore_destroy(const Addr semaphore)
+void DRD_(semaphore_destroy)(const Addr semaphore)
{
struct semaphore_info* p;
- p = semaphore_get(semaphore);
+ p = DRD_(semaphore_get)(semaphore);
- if (s_trace_semaphore)
+ if (DRD_(s_trace_semaphore))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] semaphore_destroy 0x%lx value %u",
@@ -222,29 +240,30 @@
}
/** Called before sem_wait(). */
-void semaphore_pre_wait(const Addr semaphore)
+void DRD_(semaphore_pre_wait)(const Addr semaphore)
{
struct semaphore_info* p;
- p = semaphore_get_or_allocate(semaphore);
+ p = DRD_(semaphore_get_or_allocate)(semaphore);
tl_assert(p);
tl_assert((int)p->waiters >= 0);
p->waiters++;
tl_assert(p->waiters > 0);
}
-/** Called after sem_wait() finished.
- * @note Do not rely on the value of 'waited' -- some glibc versions do
- * not set it correctly.
+/**
+ * Called after sem_wait() finished.
+ * @note Do not rely on the value of 'waited' -- some glibc versions do
+ * not set it correctly.
*/
-void semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
- const Bool waited)
+void DRD_(semaphore_post_wait)(const DrdThreadId tid, const Addr semaphore,
+ const Bool waited)
{
struct semaphore_info* p;
Segment* sg;
- p = semaphore_get(semaphore);
- if (s_trace_semaphore)
+ p = DRD_(semaphore_get)(semaphore);
+ if (DRD_(s_trace_semaphore))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] semaphore_wait 0x%lx value %u -> %u",
@@ -275,7 +294,7 @@
p->waits_to_skip--;
else
{
- sg = segment_pop(p);
+ sg = DRD_(segment_pop)(p);
tl_assert(sg);
if (sg)
{
@@ -286,21 +305,21 @@
}
DRD_(sg_put)(sg);
DRD_(thread_new_segment)(tid);
- s_semaphore_segment_creation_count++;
+ DRD_(s_semaphore_segment_creation_count)++;
}
}
}
/** Called before sem_post(). */
-void semaphore_pre_post(const DrdThreadId tid, const Addr semaphore)
+void DRD_(semaphore_pre_post)(const DrdThreadId tid, const Addr semaphore)
{
struct semaphore_info* p;
Segment* sg;
- p = semaphore_get_or_allocate(semaphore);
+ p = DRD_(semaphore_get_or_allocate)(semaphore);
p->value++;
- if (s_trace_semaphore)
+ if (DRD_(s_trace_semaphore))
{
VG_(message)(Vg_UserMsg,
"[%d/%d] semaphore_post 0x%lx value %u -> %u",
@@ -315,13 +334,13 @@
sg = 0;
DRD_(thread_get_latest_segment)(&sg, tid);
tl_assert(sg);
- segment_push(p, sg);
- s_semaphore_segment_creation_count++;
+ DRD_(segment_push)(p, sg);
+ DRD_(s_semaphore_segment_creation_count)++;
}
/** Called after sem_post() finished successfully. */
-void semaphore_post_post(const DrdThreadId tid, const Addr semaphore,
- const Bool waited)
+void DRD_(semaphore_post_post)(const DrdThreadId tid, const Addr semaphore,
+ const Bool waited)
{
/* Note: it is hard to implement the sem_post() wrapper correctly in */
/* case sem_post() returns an error code. This is because handling this */
@@ -334,10 +353,10 @@
/* redirected functions. */
}
-void semaphore_thread_delete(const DrdThreadId threadid)
+void DRD_(semaphore_thread_delete)(const DrdThreadId threadid)
{ }
-ULong get_semaphore_segment_creation_count(void)
+ULong DRD_(get_semaphore_segment_creation_count)(void)
{
- return s_semaphore_segment_creation_count;
+ return DRD_(s_semaphore_segment_creation_count);
}
diff --git a/drd/drd_semaphore.h b/drd/drd_semaphore.h
index ecca2ac..1d714c6 100644
--- a/drd/drd_semaphore.h
+++ b/drd/drd_semaphore.h
@@ -37,18 +37,19 @@
struct semaphore_info;
-void semaphore_set_trace(const Bool trace_semaphore);
-struct semaphore_info* semaphore_init(const Addr semaphore,
- const Word pshared, const UInt value);
-void semaphore_destroy(const Addr semaphore);
-void semaphore_pre_wait(const Addr semaphore);
-void semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
- const Bool waited);
-void semaphore_pre_post(const DrdThreadId tid, const Addr semaphore);
-void semaphore_post_post(const DrdThreadId tid, const Addr semaphore,
- const Bool waited);
-void semaphore_thread_delete(const DrdThreadId tid);
-ULong get_semaphore_segment_creation_count(void);
+void DRD_(semaphore_set_trace)(const Bool trace_semaphore);
+struct semaphore_info* DRD_(semaphore_init)(const Addr semaphore,
+ const Word pshared,
+ const UInt value);
+void DRD_(semaphore_destroy)(const Addr semaphore);
+void DRD_(semaphore_pre_wait)(const Addr semaphore);
+void DRD_(semaphore_post_wait)(const DrdThreadId tid, const Addr semaphore,
+ const Bool waited);
+void DRD_(semaphore_pre_post)(const DrdThreadId tid, const Addr semaphore);
+void DRD_(semaphore_post_post)(const DrdThreadId tid, const Addr semaphore,
+ const Bool waited);
+void DRD_(semaphore_thread_delete)(const DrdThreadId tid);
+ULong DRD_(get_semaphore_segment_creation_count)(void);
#endif /* __DRD_SEMAPHORE_H */
diff --git a/drd/drd_thread.c b/drd/drd_thread.c
index 3f213e2..7a8f335 100644
--- a/drd/drd_thread.c
+++ b/drd/drd_thread.c
@@ -304,9 +304,9 @@
DRD_(thread_get_stack_max)(drd_joinee));
}
DRD_(thread_delete)(drd_joinee);
- mutex_thread_delete(drd_joinee);
- cond_thread_delete(drd_joinee);
- semaphore_thread_delete(drd_joinee);
+ DRD_(mutex_thread_delete)(drd_joinee);
+ DRD_(cond_thread_delete)(drd_joinee);
+ DRD_(semaphore_thread_delete)(drd_joinee);
DRD_(barrier_thread_delete)(drd_joinee);
}