Add initial support for POSIX barriers (pthread_barrier_{init,wait,destroy}).
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@8766 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/helgrind/helgrind.h b/helgrind/helgrind.h
index 46359a9..e736186 100644
--- a/helgrind/helgrind.h
+++ b/helgrind/helgrind.h
@@ -93,7 +93,9 @@
_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t* */
- _VG_USERREQ__HG_GET_MY_SEGMENT /* -> Segment* */
+ _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong */
+ _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */
+ _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE /* pth_bar_t* */
} Vg_TCheckClientRequest;
/* Clean memory state. This makes Helgrind forget everything it knew
diff --git a/helgrind/hg_intercepts.c b/helgrind/hg_intercepts.c
index 172edfe..440c4be 100644
--- a/helgrind/hg_intercepts.c
+++ b/helgrind/hg_intercepts.c
@@ -755,28 +755,74 @@
/*--- pthread_barrier_t functions ---*/
/*----------------------------------------------------------------*/
-PTH_FUNC(int, pthreadZubarrierZuwait, // pthread_barrier_wait.
- pthread_barrier_t* b)
+/* Handled: pthread_barrier_init
+ pthread_barrier_wait
+ pthread_barrier_destroy
+
+ Unhandled: pthread_barrierattr_destroy
+ pthread_barrierattr_getpshared
+ pthread_barrierattr_init
+ pthread_barrierattr_setpshared
+ -- are these important?
+*/
+
+PTH_FUNC(int, pthreadZubarrierZuinit, // pthread_barrier_init
+ pthread_barrier_t* bar,
+ pthread_barrierattr_t* attr, unsigned long count)
{
int ret;
OrigFn fn;
VALGRIND_GET_ORIG_FN(fn);
if (TRACE_PTH_FNS) {
- fprintf(stderr, "<< pthread_barrier_wait %p", b);
+ fprintf(stderr, "<< pthread_barrier_init %p %p %lu",
+ bar, attr, count);
fflush(stderr);
}
- // We blocked, signal.
- DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE,
- void*,b);
- CALL_FN_W_W(ret, fn, b);
+ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,
+ pthread_barrier_t*,bar,
+ unsigned long,count);
- // FIXME: handle ret
+ CALL_FN_W_WWW(ret, fn, bar,attr,count);
- // We unblocked, finish wait.
- DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,
- void *, b, void *, b);
+ if (ret != 0) {
+ DO_PthAPIerror( "pthread_barrier_init", ret );
+ }
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, " pthread_barrier_init -> %d >>\n", ret);
+ }
+
+ return ret;
+}
+
+
+PTH_FUNC(int, pthreadZubarrierZuwait, // pthread_barrier_wait
+ pthread_barrier_t* bar)
+{
+ int ret;
+ OrigFn fn;
+ VALGRIND_GET_ORIG_FN(fn);
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, "<< pthread_barrier_wait %p", bar);
+ fflush(stderr);
+ }
+
+ /* That this works correctly, and doesn't screw up when a thread
+ leaving the barrier races round to the front and re-enters while
+ other threads are still leaving it, is quite subtle. See
+ comments in the handler for PTHREAD_BARRIER_WAIT_PRE in
+ hg_main.c. */
+ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,
+ pthread_barrier_t*,bar);
+
+ CALL_FN_W_W(ret, fn, bar);
+
+ if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD) {
+ DO_PthAPIerror( "pthread_barrier_wait", ret );
+ }
if (TRACE_PTH_FNS) {
fprintf(stderr, " pthread_barrier_wait -> %d >>\n", ret);
@@ -786,6 +832,33 @@
}
+PTH_FUNC(int, pthreadZubarrierZudestroy, // pthread_barrier_destroy
+ pthread_barrier_t* bar)
+{
+ int ret;
+ OrigFn fn;
+ VALGRIND_GET_ORIG_FN(fn);
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, "<< pthread_barrier_destroy %p", bar);
+ fflush(stderr);
+ }
+
+ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,
+ pthread_barrier_t*,bar);
+
+ CALL_FN_W_W(ret, fn, bar);
+
+ if (ret != 0) {
+ DO_PthAPIerror( "pthread_barrier_destroy", ret );
+ }
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, " pthread_barrier_destroy -> %d >>\n", ret);
+ }
+
+ return ret;
+}
/*----------------------------------------------------------------*/
/*--- pthread_rwlock_t functions ---*/
diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c
index 6d1f9b7..ba3b2df 100644
--- a/helgrind/hg_main.c
+++ b/helgrind/hg_main.c
@@ -1397,7 +1397,9 @@
}
-/*--------- Event handlers proper (evh__* functions) ---------*/
+/* ---------------------------------------------------------- */
+/* -------- Event handlers proper (evh__* functions) -------- */
+/* ---------------------------------------------------------- */
/* What is the Thread* for the currently running thread? This is
absolutely performance critical. We receive notifications from the
@@ -1814,8 +1816,9 @@
// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
//}
-
+/* ------------------------------------------------------- */
/* -------------- events to do with mutexes -------------- */
+/* ------------------------------------------------------- */
/* EXPOSITION only: by intercepting lock init events we can show the
user where the lock was initialised, rather than only being able to
@@ -1964,7 +1967,9 @@
}
+/* ----------------------------------------------------- */
/* --------------- events to do with CVs --------------- */
+/* ----------------------------------------------------- */
/* A mapping from CV to the SO associated with it. When the CV is
signalled/broadcasted upon, we do a 'send' into the SO, and when a
@@ -1978,7 +1983,8 @@
static void map_cond_to_SO_INIT ( void ) {
if (UNLIKELY(map_cond_to_SO == NULL)) {
- map_cond_to_SO = VG_(newFM)( HG_(zalloc), "hg.mctSI.1", HG_(free), NULL );
+ map_cond_to_SO = VG_(newFM)( HG_(zalloc),
+ "hg.mctSI.1", HG_(free), NULL );
tl_assert(map_cond_to_SO != NULL);
}
}
@@ -2137,7 +2143,9 @@
}
+/* ------------------------------------------------------- */
/* -------------- events to do with rwlocks -------------- */
+/* ------------------------------------------------------- */
/* EXPOSITION only */
static
@@ -2274,7 +2282,9 @@
}
-/* --------------- events to do with semaphores --------------- */
+/* ---------------------------------------------------------- */
+/* -------------- events to do with semaphores -------------- */
+/* ---------------------------------------------------------- */
/* This is similar to but not identical to the handling for condition
variables. */
@@ -2500,6 +2510,213 @@
}
+/* -------------------------------------------------------- */
+/* -------------- events to do with barriers -------------- */
+/* -------------------------------------------------------- */
+
+typedef
+ struct {
+ Bool initted; /* has it yet been initted by guest? */
+ UWord size; /* declared size */
+ XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
+ }
+ Bar;
+
+static Bar* new_Bar ( void ) {
+ Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
+ tl_assert(bar);
+ /* all fields are zero */
+ tl_assert(bar->initted == False);
+ return bar;
+}
+
+static void delete_Bar ( Bar* bar ) {
+ tl_assert(bar);
+ if (bar->waiting)
+ VG_(deleteXA)(bar->waiting);
+ HG_(free)(bar);
+}
+
+/* A mapping which stores auxiliary data for barriers. */
+
+/* pthread_barrier_t* -> Bar* */
+static WordFM* map_barrier_to_Bar = NULL;
+
+static void map_barrier_to_Bar_INIT ( void ) {
+ if (UNLIKELY(map_barrier_to_Bar == NULL)) {
+ map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
+ "hg.mbtBI.1", HG_(free), NULL );
+ tl_assert(map_barrier_to_Bar != NULL);
+ }
+}
+
+static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
+ UWord key, val;
+ map_barrier_to_Bar_INIT();
+ if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
+ tl_assert(key == (UWord)barrier);
+ return (Bar*)val;
+ } else {
+ Bar* bar = new_Bar();
+ VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
+ return bar;
+ }
+}
+
+static void map_barrier_to_Bar_delete ( void* barrier ) {
+ UWord keyW, valW;
+ map_barrier_to_Bar_INIT();
+ if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
+ Bar* bar = (Bar*)valW;
+ tl_assert(keyW == (UWord)barrier);
+ delete_Bar(bar);
+ }
+}
+
+
+static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
+ void* barrier,
+ UWord count )
+{
+ Thread* thr;
+ Bar* bar;
+
+ if (SHOW_EVENTS >= 1)
+ VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
+ "(tid=%d, barrier=%p, count=%lu)\n",
+ (Int)tid, (void*)barrier, count );
+
+ thr = map_threads_maybe_lookup( tid );
+ tl_assert(thr); /* cannot fail - Thread* must already exist */
+
+ if (count == 0) {
+ HG_(record_error_Misc)(
+ thr, "pthread_barrier_init: 'count' argument is zero"
+ );
+ }
+
+ bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
+ tl_assert(bar);
+
+ if (bar->initted) {
+ HG_(record_error_Misc)(
+ thr, "pthread_barrier_init: barrier is already initialised"
+ );
+ }
+
+ if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
+ tl_assert(bar->initted);
+ HG_(record_error_Misc)(
+ thr, "pthread_barrier_init: barrier still has waiting threads"
+ );
+ VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
+ }
+ if (!bar->waiting) {
+ bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
+ sizeof(Thread*) );
+ }
+
+ tl_assert(bar->waiting);
+ tl_assert(VG_(sizeXA)(bar->waiting) == 0);
+ bar->initted = True;
+ bar->size = count;
+}
+
+
+static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
+ void* barrier )
+{
+ /* Deal with destroy events. The only purpose is to free storage
+ associated with the barrier, so as to avoid any possible
+ resource leaks. */
+ if (SHOW_EVENTS >= 1)
+ VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
+ "(tid=%d, barrier=%p)\n",
+ (Int)tid, (void*)barrier );
+
+ /* Maybe we shouldn't do this; just let it persist, so that when it
+ is reinitialised we don't need to do any dynamic memory
+ allocation? The downside is a potentially unlimited space leak,
+ if the client creates (in turn) a large number of barriers all
+ at different locations. Note that if we do later move to the
+ don't-delete-it scheme, we need to mark the barrier as
+ uninitialised again since otherwise a later _init call will
+ elicit a duplicate-init error. */
+ map_barrier_to_Bar_delete( barrier );
+}
+
+
+static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
+ void* barrier )
+{
+ Thread* thr;
+ Bar* bar;
+ SO* so;
+ UWord present, i;
+
+ if (SHOW_EVENTS >= 1)
+ VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
+ "(tid=%d, barrier=%p)\n",
+ (Int)tid, (void*)barrier );
+
+ thr = map_threads_maybe_lookup( tid );
+ tl_assert(thr); /* cannot fail - Thread* must already exist */
+
+ bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
+ tl_assert(bar);
+
+ if (!bar->initted) {
+ HG_(record_error_Misc)(
+ thr, "pthread_barrier_wait: barrier is uninitialised"
+ );
+ return; /* client is broken .. avoid assertions below */
+ }
+
+ /* guaranteed by _INIT_PRE above */
+ tl_assert(bar->size > 0);
+ tl_assert(bar->waiting);
+
+ VG_(addToXA)( bar->waiting, &thr );
+
+ /* guaranteed by this function */
+ present = VG_(sizeXA)(bar->waiting);
+ tl_assert(present > 0 && present <= bar->size);
+
+ if (present < bar->size)
+ return;
+
+ /* All the threads have arrived. Now do the Interesting bit. Get
+ a new synchronisation object and do a weak send to it from all
+ the participating threads. This makes its vector clocks be the
+ join of all the individual thread's vector clocks. Then do a
+ strong receive from it back to all threads, so that their VCs
+ are a copy of it (hence are all equal to the join of their
+ original VCs.) */
+ so = libhb_so_alloc();
+
+ /* XXX check ->waiting has no duplicates */
+
+ tl_assert(bar->waiting);
+ tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
+
+ /* compute the join ... */
+ for (i = 0; i < bar->size; i++) {
+ Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
+ Thr* hbthr = t->hbthr;
+ libhb_so_send( hbthr, so, False/*weak send*/ );
+ }
+ /* ... and distribute to all threads */
+ for (i = 0; i < bar->size; i++) {
+ Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
+ Thr* hbthr = t->hbthr;
+ libhb_so_recv( hbthr, so, True/*strong recv*/ );
+ }
+
+ /* finally, we must empty out the waiting vector */
+ VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
+}
+
+
/*--------------------------------------------------------------*/
/*--- Lock acquisition order monitoring ---*/
/*--------------------------------------------------------------*/
@@ -3658,19 +3875,20 @@
evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
break;
-//zz case _VG_USERREQ__HG_GET_MY_SEGMENT: { // -> Segment*
-//zz Thread* thr;
-//zz SegmentID segid;
-//zz Segment* seg;
-//zz thr = map_threads_maybe_lookup( tid );
-//zz tl_assert(thr); /* cannot fail */
-//zz segid = thr->csegid;
-//zz tl_assert(is_sane_SegmentID(segid));
-//zz seg = map_segments_lookup( segid );
-//zz tl_assert(seg);
-//zz *ret = (UWord)seg;
-//zz break;
-//zz }
+ case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
+ /* pth_bar_t*, ulong */
+ evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
+ break;
+
+ case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
+ /* pth_bar_t* */
+ evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
+ break;
+
+ case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
+ /* pth_bar_t* */
+ evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
+ break;
default:
/* Unhandled Helgrind client request! */