Implement thread cleanup stacks.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@329 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/arch/x86-linux/vg_libpthread.c b/coregrind/arch/x86-linux/vg_libpthread.c
index b541d8b..7fba3b0 100644
--- a/coregrind/arch/x86-linux/vg_libpthread.c
+++ b/coregrind/arch/x86-linux/vg_libpthread.c
@@ -279,8 +279,19 @@
__attribute__((noreturn))
void thread_exit_wrapper ( void* ret_val )
{
- int detached, res;
+ int detached, res;
+ CleanupEntry cu;
/* Run this thread's cleanup handlers. */
+ while (1) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == -1) break; /* stack empty */
+ assert(res == 0);
+ if (0) printf("running exit cleanup handler");
+ cu.fn ( cu.arg );
+ }
+
/* Run this thread's key finalizers. */
/* Decide on my final disposition. */
@@ -471,6 +482,94 @@
/* ---------------------------------------------------
+ CLEANUP STACKS
+ ------------------------------------------------ */
+
+void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = __routine;
+ cu.arg = __arg;
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_PUSH,
+ &cu, 0, 0, 0);
+ assert(res == 0);
+}
+
+
+void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ /* As _pthread_cleanup_push, but first save the thread's original
+ cancellation type in __buffer and set it to Deferred. */
+ int orig_ctype;
+ ensure_valgrind("_pthread_cleanup_push_defer");
+ /* Set to Deferred, and put the old cancellation type in res. */
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(orig_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ PTHREAD_CANCEL_DEFERRED, 0, 0, 0);
+ assert(orig_ctype != -1);
+ *((int*)(__buffer)) = orig_ctype;
+ /* Now push the cleanup. */
+ _pthread_cleanup_push(NULL, __routine, __arg);
+}
+
+
+void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = cu.arg = NULL; /* paranoia */
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == 0) {
+ /* pop succeeded */
+ if (__execute) {
+ cu.fn ( cu.arg );
+ }
+ return;
+ }
+ if (res == -1) {
+ /* stack underflow */
+ return;
+ }
+ barf("_pthread_cleanup_pop");
+}
+
+
+void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int orig_ctype, fake_ctype;
+ /* As _pthread_cleanup_pop, but after popping/running the handler,
+ restore the thread's original cancellation type from the first
+ word of __buffer. */
+ _pthread_cleanup_pop(NULL, __execute);
+ orig_ctype = *((int*)(__buffer));
+ assert(orig_ctype == PTHREAD_CANCEL_DEFERRED
+ || orig_ctype == PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(fake_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ orig_ctype, 0, 0, 0);
+ assert(fake_ctype == PTHREAD_CANCEL_DEFERRED);
+}
+
+
+/* ---------------------------------------------------
MUTEX ATTRIBUTES
------------------------------------------------ */
@@ -2446,43 +2545,6 @@
weak_alias(_IO_funlockfile, funlockfile);
-void _pthread_cleanup_push_defer ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push_defer");
-}
-
-void _pthread_cleanup_pop_restore ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_pop_restore");
-}
-
-/*--------*/
-void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
- void (*__routine) (void *),
- void *__arg)
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push");
-}
-
-void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
- int __execute)
-{
- static int moans = N_MOANS;
- if (moans-- > 0) {
- if (__execute)
- ignored("_pthread_cleanup_pop-EXECUTE");
- else
- ignored("_pthread_cleanup_pop-NO-EXECUTE");
- }
-}
-
-
/* This doesn't seem to be needed to simulate libpthread.so's external
interface, but many people complain about its absence. */
diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h
index c437f6a..6a078f6 100644
--- a/coregrind/vg_include.h
+++ b/coregrind/vg_include.h
@@ -152,6 +152,9 @@
/* Number of entries in the rwlock-remapping table. */
#define VG_N_RWLOCKS 50
+/* Number of entries in each thread's cleanup stack. */
+#define VG_N_CLEANUPSTACK 8
+
/* ---------------------------------------------------------------------
Basic types
@@ -471,6 +474,9 @@
#define VG_USERREQ__PTHREAD_KILL 0x301A
#define VG_USERREQ__PTHREAD_YIELD 0x301B
+#define VG_USERREQ__CLEANUP_PUSH 0x3020
+#define VG_USERREQ__CLEANUP_POP 0x3021
+
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
@@ -518,6 +524,14 @@
VgTs_Sleeping /* sleeping for a while */
}
ThreadStatus;
+
+/* An entry in a threads's cleanup stack. */
+typedef
+ struct {
+ void (*fn)(void*);
+ void* arg;
+ }
+ CleanupEntry;
typedef
struct {
@@ -565,6 +579,9 @@
void** joiner_thread_return;
ThreadId joiner_jee_tid;
+ /* Whether or not detached. */
+ Bool detached;
+
/* Cancelability state and type. */
Bool cancel_st; /* False==PTH_CANCEL_DISABLE; True==.._ENABLE */
Bool cancel_ty; /* False==PTH_CANC_ASYNCH; True==..._DEFERRED */
@@ -575,8 +592,9 @@
cancallation is pending. */
void (*cancel_pend)(void*);
- /* Whether or not detached. */
- Bool detached;
+ /* The cleanup stack. */
+ Int custack_used;
+ CleanupEntry custack[VG_N_CLEANUPSTACK];
/* thread-specific data */
void* specifics[VG_N_THREAD_KEYS];
diff --git a/coregrind/vg_libpthread.c b/coregrind/vg_libpthread.c
index b541d8b..7fba3b0 100644
--- a/coregrind/vg_libpthread.c
+++ b/coregrind/vg_libpthread.c
@@ -279,8 +279,19 @@
__attribute__((noreturn))
void thread_exit_wrapper ( void* ret_val )
{
- int detached, res;
+ int detached, res;
+ CleanupEntry cu;
/* Run this thread's cleanup handlers. */
+ while (1) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == -1) break; /* stack empty */
+ assert(res == 0);
+ if (0) printf("running exit cleanup handler");
+ cu.fn ( cu.arg );
+ }
+
/* Run this thread's key finalizers. */
/* Decide on my final disposition. */
@@ -471,6 +482,94 @@
/* ---------------------------------------------------
+ CLEANUP STACKS
+ ------------------------------------------------ */
+
+void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = __routine;
+ cu.arg = __arg;
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_PUSH,
+ &cu, 0, 0, 0);
+ assert(res == 0);
+}
+
+
+void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ /* As _pthread_cleanup_push, but first save the thread's original
+ cancellation type in __buffer and set it to Deferred. */
+ int orig_ctype;
+ ensure_valgrind("_pthread_cleanup_push_defer");
+ /* Set to Deferred, and put the old cancellation type in res. */
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(orig_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ PTHREAD_CANCEL_DEFERRED, 0, 0, 0);
+ assert(orig_ctype != -1);
+ *((int*)(__buffer)) = orig_ctype;
+ /* Now push the cleanup. */
+ _pthread_cleanup_push(NULL, __routine, __arg);
+}
+
+
+void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = cu.arg = NULL; /* paranoia */
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == 0) {
+ /* pop succeeded */
+ if (__execute) {
+ cu.fn ( cu.arg );
+ }
+ return;
+ }
+ if (res == -1) {
+ /* stack underflow */
+ return;
+ }
+ barf("_pthread_cleanup_pop");
+}
+
+
+void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int orig_ctype, fake_ctype;
+ /* As _pthread_cleanup_pop, but after popping/running the handler,
+ restore the thread's original cancellation type from the first
+ word of __buffer. */
+ _pthread_cleanup_pop(NULL, __execute);
+ orig_ctype = *((int*)(__buffer));
+ assert(orig_ctype == PTHREAD_CANCEL_DEFERRED
+ || orig_ctype == PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(fake_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ orig_ctype, 0, 0, 0);
+ assert(fake_ctype == PTHREAD_CANCEL_DEFERRED);
+}
+
+
+/* ---------------------------------------------------
MUTEX ATTRIBUTES
------------------------------------------------ */
@@ -2446,43 +2545,6 @@
weak_alias(_IO_funlockfile, funlockfile);
-void _pthread_cleanup_push_defer ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push_defer");
-}
-
-void _pthread_cleanup_pop_restore ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_pop_restore");
-}
-
-/*--------*/
-void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
- void (*__routine) (void *),
- void *__arg)
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push");
-}
-
-void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
- int __execute)
-{
- static int moans = N_MOANS;
- if (moans-- > 0) {
- if (__execute)
- ignored("_pthread_cleanup_pop-EXECUTE");
- else
- ignored("_pthread_cleanup_pop-NO-EXECUTE");
- }
-}
-
-
/* This doesn't seem to be needed to simulate libpthread.so's external
interface, but many people complain about its absence. */
diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c
index a2a5573..7eeee51 100644
--- a/coregrind/vg_scheduler.c
+++ b/coregrind/vg_scheduler.c
@@ -143,6 +143,9 @@
static void do_pthread_getspecific ( ThreadId,
UInt /* pthread_key_t */ );
+static void do__cleanup_push ( ThreadId tid, CleanupEntry* cu );
+static void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu );
+static void do__set_canceltype ( ThreadId tid, Int type );
/* ---------------------------------------------------------------------
Helper functions for the scheduler.
@@ -520,10 +523,11 @@
VG_(threads)[tid].joinee_retval = NULL;
VG_(threads)[tid].joiner_thread_return = NULL;
VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
+ VG_(threads)[tid].detached = False;
VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
VG_(threads)[tid].cancel_pend = NULL; /* not pending */
- VG_(threads)[tid].detached = False;
+ VG_(threads)[tid].custack_used = 0;
VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for);
for (j = 0; j < VG_N_THREAD_KEYS; j++)
@@ -695,10 +699,6 @@
case VG_USERREQ__READ_MILLISECOND_TIMER:
SIMPLE_RETURN(VG_(read_millisecond_timer)());
- case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
- do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
- return True;
-
/* This may make thread tid non-runnable, but the scheduler
checks for that on return from this function. */
case VG_USERREQ__PTHREAD_MUTEX_LOCK:
@@ -709,10 +709,26 @@
do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
return True;
+ case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
+ do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
+ return True;
+
case VG_USERREQ__PTHREAD_GETSPECIFIC:
do_pthread_getspecific ( tid, (UInt)(arg[1]) );
return True;
+ case VG_USERREQ__SET_CANCELTYPE:
+ do__set_canceltype ( tid, arg[1] );
+ return True;
+
+ case VG_USERREQ__CLEANUP_PUSH:
+ do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
+ return True;
+
+ case VG_USERREQ__CLEANUP_POP:
+ do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
+ return True;
+
default:
/* Too hard; wimp out. */
return False;
@@ -1668,6 +1684,54 @@
-------------------------------------------------------- */
static
+void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
+{
+ Int sp;
+ Char msg_buf[100];
+ vg_assert(VG_(is_valid_tid)(tid));
+ sp = VG_(threads)[tid].custack_used;
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "cleanup_push (fn %p, arg %p) -> slot %d",
+ cu->fn, cu->arg, sp);
+ print_sched_event(tid, msg_buf);
+ }
+ vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
+ if (sp == VG_N_CLEANUPSTACK)
+ VG_(panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
+ " Increase and recompile.");
+ VG_(threads)[tid].custack[sp] = *cu;
+ sp++;
+ VG_(threads)[tid].custack_used = sp;
+ SET_EDX(tid, 0);
+}
+
+
+static
+void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
+{
+ Int sp;
+ Char msg_buf[100];
+ vg_assert(VG_(is_valid_tid)(tid));
+ sp = VG_(threads)[tid].custack_used;
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "cleanup_pop from slot %d", sp);
+ print_sched_event(tid, msg_buf);
+ }
+ vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
+ if (sp == 0) {
+ SET_EDX(tid, -1);
+ return;
+ }
+ sp--;
+ *cu = VG_(threads)[tid].custack[sp];
+ VG_(threads)[tid].custack_used = sp;
+ SET_EDX(tid, 0);
+}
+
+
+static
void do_pthread_yield ( ThreadId tid )
{
Char msg_buf[100];
@@ -2886,10 +2950,6 @@
do__set_cancelstate ( tid, arg[1] );
break;
- case VG_USERREQ__SET_CANCELTYPE:
- do__set_canceltype ( tid, arg[1] );
- break;
-
case VG_USERREQ__SET_OR_GET_DETACH:
do__set_or_get_detach ( tid, arg[1], arg[2] );
break;
diff --git a/vg_include.h b/vg_include.h
index c437f6a..6a078f6 100644
--- a/vg_include.h
+++ b/vg_include.h
@@ -152,6 +152,9 @@
/* Number of entries in the rwlock-remapping table. */
#define VG_N_RWLOCKS 50
+/* Number of entries in each thread's cleanup stack. */
+#define VG_N_CLEANUPSTACK 8
+
/* ---------------------------------------------------------------------
Basic types
@@ -471,6 +474,9 @@
#define VG_USERREQ__PTHREAD_KILL 0x301A
#define VG_USERREQ__PTHREAD_YIELD 0x301B
+#define VG_USERREQ__CLEANUP_PUSH 0x3020
+#define VG_USERREQ__CLEANUP_POP 0x3021
+
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
@@ -518,6 +524,14 @@
VgTs_Sleeping /* sleeping for a while */
}
ThreadStatus;
+
+/* An entry in a threads's cleanup stack. */
+typedef
+ struct {
+ void (*fn)(void*);
+ void* arg;
+ }
+ CleanupEntry;
typedef
struct {
@@ -565,6 +579,9 @@
void** joiner_thread_return;
ThreadId joiner_jee_tid;
+ /* Whether or not detached. */
+ Bool detached;
+
/* Cancelability state and type. */
Bool cancel_st; /* False==PTH_CANCEL_DISABLE; True==.._ENABLE */
Bool cancel_ty; /* False==PTH_CANC_ASYNCH; True==..._DEFERRED */
@@ -575,8 +592,9 @@
cancallation is pending. */
void (*cancel_pend)(void*);
- /* Whether or not detached. */
- Bool detached;
+ /* The cleanup stack. */
+ Int custack_used;
+ CleanupEntry custack[VG_N_CLEANUPSTACK];
/* thread-specific data */
void* specifics[VG_N_THREAD_KEYS];
diff --git a/vg_libpthread.c b/vg_libpthread.c
index b541d8b..7fba3b0 100644
--- a/vg_libpthread.c
+++ b/vg_libpthread.c
@@ -279,8 +279,19 @@
__attribute__((noreturn))
void thread_exit_wrapper ( void* ret_val )
{
- int detached, res;
+ int detached, res;
+ CleanupEntry cu;
/* Run this thread's cleanup handlers. */
+ while (1) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == -1) break; /* stack empty */
+ assert(res == 0);
+ if (0) printf("running exit cleanup handler");
+ cu.fn ( cu.arg );
+ }
+
/* Run this thread's key finalizers. */
/* Decide on my final disposition. */
@@ -471,6 +482,94 @@
/* ---------------------------------------------------
+ CLEANUP STACKS
+ ------------------------------------------------ */
+
+void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = __routine;
+ cu.arg = __arg;
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_PUSH,
+ &cu, 0, 0, 0);
+ assert(res == 0);
+}
+
+
+void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *__buffer,
+ void (*__routine) (void *),
+ void *__arg)
+{
+ /* As _pthread_cleanup_push, but first save the thread's original
+ cancellation type in __buffer and set it to Deferred. */
+ int orig_ctype;
+ ensure_valgrind("_pthread_cleanup_push_defer");
+ /* Set to Deferred, and put the old cancellation type in res. */
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(orig_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ PTHREAD_CANCEL_DEFERRED, 0, 0, 0);
+ assert(orig_ctype != -1);
+ *((int*)(__buffer)) = orig_ctype;
+ /* Now push the cleanup. */
+ _pthread_cleanup_push(NULL, __routine, __arg);
+}
+
+
+void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int res;
+ CleanupEntry cu;
+ ensure_valgrind("_pthread_cleanup_push");
+ cu.fn = cu.arg = NULL; /* paranoia */
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__CLEANUP_POP,
+ &cu, 0, 0, 0);
+ if (res == 0) {
+ /* pop succeeded */
+ if (__execute) {
+ cu.fn ( cu.arg );
+ }
+ return;
+ }
+ if (res == -1) {
+ /* stack underflow */
+ return;
+ }
+ barf("_pthread_cleanup_pop");
+}
+
+
+void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *__buffer,
+ int __execute)
+{
+ int orig_ctype, fake_ctype;
+ /* As _pthread_cleanup_pop, but after popping/running the handler,
+ restore the thread's original cancellation type from the first
+ word of __buffer. */
+ _pthread_cleanup_pop(NULL, __execute);
+ orig_ctype = *((int*)(__buffer));
+ assert(orig_ctype == PTHREAD_CANCEL_DEFERRED
+ || orig_ctype == PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(-1 != PTHREAD_CANCEL_DEFERRED);
+ assert(-1 != PTHREAD_CANCEL_ASYNCHRONOUS);
+ assert(sizeof(struct _pthread_cleanup_buffer) >= sizeof(int));
+ VALGRIND_MAGIC_SEQUENCE(fake_ctype, (-1) /* default */,
+ VG_USERREQ__SET_CANCELTYPE,
+ orig_ctype, 0, 0, 0);
+ assert(fake_ctype == PTHREAD_CANCEL_DEFERRED);
+}
+
+
+/* ---------------------------------------------------
MUTEX ATTRIBUTES
------------------------------------------------ */
@@ -2446,43 +2545,6 @@
weak_alias(_IO_funlockfile, funlockfile);
-void _pthread_cleanup_push_defer ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push_defer");
-}
-
-void _pthread_cleanup_pop_restore ( void )
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_pop_restore");
-}
-
-/*--------*/
-void _pthread_cleanup_push (struct _pthread_cleanup_buffer *__buffer,
- void (*__routine) (void *),
- void *__arg)
-{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("_pthread_cleanup_push");
-}
-
-void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *__buffer,
- int __execute)
-{
- static int moans = N_MOANS;
- if (moans-- > 0) {
- if (__execute)
- ignored("_pthread_cleanup_pop-EXECUTE");
- else
- ignored("_pthread_cleanup_pop-NO-EXECUTE");
- }
-}
-
-
/* This doesn't seem to be needed to simulate libpthread.so's external
interface, but many people complain about its absence. */
diff --git a/vg_scheduler.c b/vg_scheduler.c
index a2a5573..7eeee51 100644
--- a/vg_scheduler.c
+++ b/vg_scheduler.c
@@ -143,6 +143,9 @@
static void do_pthread_getspecific ( ThreadId,
UInt /* pthread_key_t */ );
+static void do__cleanup_push ( ThreadId tid, CleanupEntry* cu );
+static void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu );
+static void do__set_canceltype ( ThreadId tid, Int type );
/* ---------------------------------------------------------------------
Helper functions for the scheduler.
@@ -520,10 +523,11 @@
VG_(threads)[tid].joinee_retval = NULL;
VG_(threads)[tid].joiner_thread_return = NULL;
VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
+ VG_(threads)[tid].detached = False;
VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
VG_(threads)[tid].cancel_pend = NULL; /* not pending */
- VG_(threads)[tid].detached = False;
+ VG_(threads)[tid].custack_used = 0;
VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for);
for (j = 0; j < VG_N_THREAD_KEYS; j++)
@@ -695,10 +699,6 @@
case VG_USERREQ__READ_MILLISECOND_TIMER:
SIMPLE_RETURN(VG_(read_millisecond_timer)());
- case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
- do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
- return True;
-
/* This may make thread tid non-runnable, but the scheduler
checks for that on return from this function. */
case VG_USERREQ__PTHREAD_MUTEX_LOCK:
@@ -709,10 +709,26 @@
do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
return True;
+ case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
+ do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
+ return True;
+
case VG_USERREQ__PTHREAD_GETSPECIFIC:
do_pthread_getspecific ( tid, (UInt)(arg[1]) );
return True;
+ case VG_USERREQ__SET_CANCELTYPE:
+ do__set_canceltype ( tid, arg[1] );
+ return True;
+
+ case VG_USERREQ__CLEANUP_PUSH:
+ do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
+ return True;
+
+ case VG_USERREQ__CLEANUP_POP:
+ do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
+ return True;
+
default:
/* Too hard; wimp out. */
return False;
@@ -1668,6 +1684,54 @@
-------------------------------------------------------- */
static
+void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
+{
+ Int sp;
+ Char msg_buf[100];
+ vg_assert(VG_(is_valid_tid)(tid));
+ sp = VG_(threads)[tid].custack_used;
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "cleanup_push (fn %p, arg %p) -> slot %d",
+ cu->fn, cu->arg, sp);
+ print_sched_event(tid, msg_buf);
+ }
+ vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
+ if (sp == VG_N_CLEANUPSTACK)
+ VG_(panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
+ " Increase and recompile.");
+ VG_(threads)[tid].custack[sp] = *cu;
+ sp++;
+ VG_(threads)[tid].custack_used = sp;
+ SET_EDX(tid, 0);
+}
+
+
+static
+void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
+{
+ Int sp;
+ Char msg_buf[100];
+ vg_assert(VG_(is_valid_tid)(tid));
+ sp = VG_(threads)[tid].custack_used;
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "cleanup_pop from slot %d", sp);
+ print_sched_event(tid, msg_buf);
+ }
+ vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
+ if (sp == 0) {
+ SET_EDX(tid, -1);
+ return;
+ }
+ sp--;
+ *cu = VG_(threads)[tid].custack[sp];
+ VG_(threads)[tid].custack_used = sp;
+ SET_EDX(tid, 0);
+}
+
+
+static
void do_pthread_yield ( ThreadId tid )
{
Char msg_buf[100];
@@ -2886,10 +2950,6 @@
do__set_cancelstate ( tid, arg[1] );
break;
- case VG_USERREQ__SET_CANCELTYPE:
- do__set_canceltype ( tid, arg[1] );
- break;
-
case VG_USERREQ__SET_OR_GET_DETACH:
do__set_or_get_detach ( tid, arg[1], arg[2] );
break;