This commit fixes up the handling of shadow registers quite a bit.
Removed the SK_(written_shadow_regs_values)() function. Instead, skins that
use shadow regs can track the `post_regs_write_init' event, and set the shadow
regs from within it. This is much more flexible, since it allows each shadow
register to be set to a separate value if necessary. It also matches the new
shadow-reg-change events described below.
In the core, there were some places where the shadow regs were changed, and
skins had no way of knowing about it, which was a problem for some skins.
So I added a bunch of new events to notify skins about these:
post_reg_write_syscall_return
post_reg_write_deliver_signal
post_reg_write_pthread_return
post_reg_write_clientreq_return
post_reg_write_clientcall_return
Any skin that uses shadow regs should almost certainly track these events. The
post_reg_write_clientcall_return allows a skin to tailor the shadow reg of the
return value of a CLIENTCALL'd function appropriately; this is especially
useful when replacing malloc() et al.
Defined some macros that should be used *whenever the core changes the value of
a shadow register* :
SET_SYSCALL_RETVAL
SET_SIGNAL_EDX (maybe should be SET_SIGNAL_RETVAL? ... not sure)
SET_SIGNAL_ESP
SET_CLREQ_RETVAL
SET_CLCALL_RETVAL
SET_PTHREQ_ESP
SET_PTHREQ_RETVAL
These replace all the old SET_EAX and SET_EDX macros, and are added in a few
places where the shadow-reg update was missing.
Added shadow registers to the machine state saved/restored when signal handlers
are pushed/popped (they were missing).
Added skin-callable functions VG_(set_return_from_syscall_shadow)() and
VG_(get_exit_status_shadow)() which are useful and abstract away from which
registers the results are in.
Also, poll() changes %ebx (it's first argument) sometimes, I don't know why.
So we notify skins about that too (with the `post_reg_write_syscall_return'
event, which isn't ideal I guess...)
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1642 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/vg_default.c b/coregrind/vg_default.c
index 6aea85e..b74c740 100644
--- a/coregrind/vg_default.c
+++ b/coregrind/vg_default.c
@@ -159,17 +159,6 @@
/* ---------------------------------------------------------------------
- For throwing out basic block level info when code is invalidated
- ------------------------------------------------------------------ */
-
-__attribute__ ((weak))
-void SK_(written_shadow_regs_values)(UInt* gen_reg, UInt* eflags)
-{
- non_fund_panic("SK_(written_shadow_regs_values)");
-}
-
-
-/* ---------------------------------------------------------------------
Command line arg template function
------------------------------------------------------------------ */
diff --git a/coregrind/vg_from_ucode.c b/coregrind/vg_from_ucode.c
index b285e1d..2236a47 100644
--- a/coregrind/vg_from_ucode.c
+++ b/coregrind/vg_from_ucode.c
@@ -2952,6 +2952,11 @@
VG_(baseBlock)[ shadow_reg_index(archreg) ] = val;
}
+void VG_(set_shadow_eflags) ( UInt val )
+{
+ VG_(baseBlock)[ VGOFF_(sh_eflags) ] = val;
+}
+
UInt VG_(get_thread_shadow_archreg) ( ThreadId tid, UInt archreg )
{
ThreadState* tst;
diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h
index 206c6d8..f6cc1a5 100644
--- a/coregrind/vg_include.h
+++ b/coregrind/vg_include.h
@@ -353,6 +353,16 @@
void (*post_mem_write) ( Addr a, UInt size );
+ /* Register events */
+ void (*post_regs_write_init) ( void );
+ void (*post_reg_write_syscall_return) ( ThreadId tid, UInt reg );
+ void (*post_reg_write_deliver_signal) ( ThreadId tid, UInt reg );
+ void (*post_reg_write_pthread_return) ( ThreadId tid, UInt reg );
+ void (*post_reg_write_clientreq_return) ( ThreadId tid, UInt reg );
+ void (*post_reg_write_clientcall_return) ( ThreadId tid, UInt reg,
+ Addr f );
+
+
/* Scheduler events (not exhaustive) */
void (*thread_run) ( ThreadId tid );
@@ -923,24 +933,38 @@
(VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD)
/* Junk to fill up a thread's shadow regs with when shadow regs aren't
- * being used. */
+ being used. */
#define VG_UNUSED_SHADOW_REG_VALUE 0x27182818
+/* For sanity checking: if this ends up in a thread's shadow regs when
+ shadow regs aren't being used, something went wrong. */
+#define VG_USED_SHADOW_REG_VALUE 0x31415927
-/* What we set a shadow register to when written by SET_EAX and similar
- * things. */
-extern UInt VG_(written_shadow_reg);
-
-/* Write a value to the client's %EDX (request return value register)
- and set the shadow to indicate it is defined. */
-#define SET_EDX(zztid, zzval) \
- do { VG_(threads)[zztid].m_edx = (zzval); \
- VG_(threads)[zztid].sh_edx = VG_(written_shadow_reg); \
+/* Write a value to a client's thread register, and shadow (if necessary) */
+#define SET_THREAD_REG( zztid, zzval, zzreg, zzREG, zzevent, zzargs... ) \
+ do { VG_(threads)[zztid].m_##zzreg = (zzval); \
+ VG_TRACK( zzevent, zztid, R_##zzREG, ##zzargs ); \
} while (0)
-#define SET_EAX(zztid, zzval) \
- do { VG_(threads)[zztid].m_eax = (zzval); \
- VG_(threads)[zztid].sh_eax = VG_(written_shadow_reg); \
- } while (0)
+#define SET_SYSCALL_RETVAL(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, eax, EAX, post_reg_write_syscall_return)
+
+#define SET_SIGNAL_EDX(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, edx, EDX, post_reg_write_deliver_signal)
+
+#define SET_SIGNAL_ESP(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, esp, ESP, post_reg_write_deliver_signal)
+
+#define SET_CLREQ_RETVAL(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, edx, EDX, post_reg_write_clientreq_return)
+
+#define SET_CLCALL_RETVAL(zztid, zzval, f) \
+ SET_THREAD_REG(zztid, zzval, edx, EDX, post_reg_write_clientcall_return, f)
+
+#define SET_PTHREQ_ESP(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, esp, ESP, post_reg_write_pthread_return)
+
+#define SET_PTHREQ_RETVAL(zztid, zzval) \
+ SET_THREAD_REG(zztid, zzval, edx, EDX, post_reg_write_pthread_return)
/* This is or'd into a pthread mutex's __m_kind field if it is used
@@ -1435,14 +1459,14 @@
extern Bool VG_(is_kerror) ( Int res );
-#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
- VG_(load_thread_state)(thread_id); \
- VG_(copy_baseBlock_to_m_state_static)(); \
- VG_(do_syscall)(); \
- VG_(copy_m_state_static_to_baseBlock)(); \
- VG_(save_thread_state)(thread_id); \
- VG_(threads)[thread_id].sh_eax = VG_(written_shadow_reg);\
- result_lvalue = VG_(threads)[thread_id].m_eax;
+#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
+ VG_(load_thread_state)(thread_id); \
+ VG_(copy_baseBlock_to_m_state_static)(); \
+ VG_(do_syscall)(); \
+ VG_(copy_m_state_static_to_baseBlock)(); \
+ VG_(save_thread_state)(thread_id); \
+ result_lvalue = VG_(threads)[thread_id].m_eax; \
+ VG_TRACK( post_reg_write_syscall_return, thread_id, R_EAX );
/* ---------------------------------------------------------------------
diff --git a/coregrind/vg_intercept.c b/coregrind/vg_intercept.c
index 1f810e8..d0f5724 100644
--- a/coregrind/vg_intercept.c
+++ b/coregrind/vg_intercept.c
@@ -279,6 +279,7 @@
VG_USERREQ__READ_MILLISECOND_TIMER,
0, 0, 0, 0);
+
/* CHECK SIZES FOR struct pollfd */
my_assert(sizeof(struct timeval) == sizeof(struct vki_timeval));
@@ -357,7 +358,6 @@
return 0;
}
}
-
}
}
diff --git a/coregrind/vg_main.c b/coregrind/vg_main.c
index 960fa75..2390cd7 100644
--- a/coregrind/vg_main.c
+++ b/coregrind/vg_main.c
@@ -390,9 +390,6 @@
/* Initialise shadow regs */
if (VG_(needs).shadow_regs) {
- UInt eflags;
-
- SK_(written_shadow_regs_values) ( & VG_(written_shadow_reg), & eflags );
VG_(baseBlock)[VGOFF_(sh_esp)] =
VG_(baseBlock)[VGOFF_(sh_ebp)] =
VG_(baseBlock)[VGOFF_(sh_eax)] =
@@ -400,11 +397,9 @@
VG_(baseBlock)[VGOFF_(sh_edx)] =
VG_(baseBlock)[VGOFF_(sh_ebx)] =
VG_(baseBlock)[VGOFF_(sh_esi)] =
- VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(written_shadow_reg);
- VG_(baseBlock)[VGOFF_(sh_eflags)] = eflags;
-
- } else {
- VG_(written_shadow_reg) = VG_UNUSED_SHADOW_REG_VALUE;
+ VG_(baseBlock)[VGOFF_(sh_edi)] = 0;
+ VG_(baseBlock)[VGOFF_(sh_eflags)] = 0;
+ VG_TRACK( post_regs_write_init );
}
}
diff --git a/coregrind/vg_needs.c b/coregrind/vg_needs.c
index c5cb357..259e15e 100644
--- a/coregrind/vg_needs.c
+++ b/coregrind/vg_needs.c
@@ -95,6 +95,14 @@
.pre_mem_write = NULL,
.post_mem_write = NULL,
+ /* Register events */
+ .post_regs_write_init = NULL,
+ .post_reg_write_syscall_return = NULL,
+ .post_reg_write_deliver_signal = NULL,
+ .post_reg_write_pthread_return = NULL,
+ .post_reg_write_clientreq_return = NULL,
+ .post_reg_write_clientcall_return = NULL,
+
/* Scheduler events */
.thread_run = NULL,
@@ -129,7 +137,8 @@
VG_(track_events).new_mem_stack_12 ||
VG_(track_events).new_mem_stack_16 ||
VG_(track_events).new_mem_stack_32) &&
- ! VG_(track_events).new_mem_stack) {
+ ! VG_(track_events).new_mem_stack)
+ {
VG_(printf)("\nSkin error: one of the specialised `new_mem_stack_n'\n"
"events tracked, but not the generic `new_mem_stack' one.\n");
VG_(skin_panic)("`new_mem_stack' should be defined\n");
@@ -140,12 +149,25 @@
VG_(track_events).die_mem_stack_12 ||
VG_(track_events).die_mem_stack_16 ||
VG_(track_events).die_mem_stack_32) &&
- ! VG_(track_events).die_mem_stack) {
+ ! VG_(track_events).die_mem_stack)
+ {
VG_(printf)("\nSkin error: one of the specialised `die_mem_stack_n'\n"
"events tracked, but not the generic `die_mem_stack' one.\n");
VG_(skin_panic)("`die_mem_stack' should be defined\n");
}
+ if ( (VG_(track_events).post_reg_write_syscall_return ||
+ VG_(track_events).post_reg_write_deliver_signal ||
+ VG_(track_events).post_reg_write_pthread_return ||
+ VG_(track_events).post_reg_write_clientreq_return ||
+ VG_(track_events).post_reg_write_clientcall_return) &&
+ ! VG_(needs).shadow_regs)
+ {
+ VG_(printf)("\nSkin error: one of the `post_reg_write'\n"
+ "events tracked, but `shadow_regs' need not set.\n");
+ VG_(skin_panic)("`shadow_regs' should be set\n");
+ }
+
#undef CHECK_NOT
#undef INVALID_Bool
}
@@ -232,6 +254,13 @@
UInt size)
TRACK(post_mem_write, Addr a, UInt size)
+TRACK(post_regs_write_init, void );
+TRACK(post_reg_write_syscall_return, ThreadId tid, UInt reg );
+TRACK(post_reg_write_deliver_signal, ThreadId tid, UInt reg );
+TRACK(post_reg_write_pthread_return, ThreadId tid, UInt reg );
+TRACK(post_reg_write_clientreq_return, ThreadId tid, UInt reg );
+TRACK(post_reg_write_clientcall_return, ThreadId tid, UInt reg, Addr f );
+
TRACK(thread_run, ThreadId tid)
TRACK(post_thread_create, ThreadId tid, ThreadId child)
diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c
index ed4fcba..92c951b 100644
--- a/coregrind/vg_scheduler.c
+++ b/coregrind/vg_scheduler.c
@@ -160,7 +160,10 @@
typedef UInt ThreadKey;
-UInt VG_(written_shadow_reg);
+UInt VG_(syscall_altered_shadow_reg);
+UInt VG_(signal_delivery_altered_shadow_reg);
+UInt VG_(pthread_op_altered_shadow_reg);
+UInt VG_(client_request_altered_shadow_reg);
/* Forwards */
static void do_client_request ( ThreadId tid );
@@ -441,8 +444,6 @@
VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
} else {
/* Fields shouldn't be used -- check their values haven't changed. */
- /* Nb: they are written to by some macros like SET_EDX, but they
- * should just write VG_UNUSED_SHADOW_REG_VALUE. */
vg_assert(
VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eax &&
VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebx &&
@@ -788,8 +789,7 @@
vg_assert(VG_(is_valid_tid)(tid));
- /* Increment signal-returned counter. Used only to implement
- pause(). */
+ /* Increment signal-returned counter. Used only to implement pause(). */
VG_(threads)[tid].n_signals_returned++;
restart_blocked_syscalls = VG_(signal_returns)(tid);
@@ -803,9 +803,8 @@
|| VG_(threads)[tid].m_eax == __NR_write)) {
/* read() or write() interrupted. Force a return with EINTR. */
cleanup_waiting_fd_table(tid);
- VG_(threads)[tid].m_eax = -VKI_EINTR;
+ SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
VG_(threads)[tid].status = VgTs_Runnable;
-
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"read() / write() interrupted by signal; return EINTR" );
@@ -825,7 +824,7 @@
rem->tv_sec = 0;
rem->tv_nsec = 1;
}
- SET_EAX(tid, -VKI_EINTR);
+ SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
VG_(threads)[tid].status = VgTs_Runnable;
return;
}
@@ -841,7 +840,7 @@
static
void sched_do_syscall ( ThreadId tid )
{
- UInt saved_eax;
+ UInt saved_meax, saved_sheax;
Int res, syscall_no;
UInt fd;
void* pre_res;
@@ -908,7 +907,8 @@
pre_res = VG_(pre_known_blocking_syscall)(tid, syscall_no);
/* This trashes the thread's %eax; we have to preserve it. */
- saved_eax = VG_(threads)[tid].m_eax;
+ saved_meax = VG_(threads)[tid].m_eax;
+ saved_sheax = VG_(threads)[tid].sh_eax;
KERNEL_DO_SYSCALL(tid,res);
/* Restore original blockfulness of the fd. */
@@ -936,11 +936,13 @@
/* It would have blocked. First, restore %EAX to what it was
before our speculative call. */
- VG_(threads)[tid].m_eax = saved_eax;
+ saved_meax = VG_(threads)[tid].m_eax = saved_meax;
+ saved_sheax = VG_(threads)[tid].sh_eax = saved_sheax;
+
/* Put this fd in a table of fds on which we are waiting for
completion. The arguments for select() later are constructed
from this table. */
- add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */,
+ add_waiting_fd(tid, fd, saved_meax /* which holds the syscall # */,
pre_res);
/* Deschedule thread until an I/O completion happens. */
VG_(threads)[tid].status = VgTs_WaitFD;
@@ -948,7 +950,6 @@
VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
print_sched_event(tid, msg_buf);
}
-
}
}
@@ -1010,7 +1011,8 @@
rem->tv_nsec = 0;
}
/* Make the syscall return 0 (success). */
- VG_(threads)[tid].m_eax = 0;
+ SET_SYSCALL_RETVAL(tid, 0);
+
/* Reschedule this thread. */
VG_(threads)[tid].status = VgTs_Runnable;
if (VG_(clo_trace_sched)) {
@@ -1526,7 +1528,7 @@
thread yield instead. Not essential, just an
optimisation. */
if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
- SET_EAX(tid, 0); /* syscall returns with success */
+ SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
goto stage1; /* find a new thread to run */
}
@@ -1670,7 +1672,7 @@
vg_assert(VG_(threads)[tid].cancel_pend != NULL);
/* Push a suitable arg, and mark it as readable. */
- VG_(threads)[tid].m_esp -= 4;
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
* (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
VG_TRACK( post_mem_write, VG_(threads)[tid].m_esp, sizeof(void*) );
@@ -1678,7 +1680,7 @@
need to have it so that the arg is at the correct stack offset.
Don't mark as readable; any attempt to read this is and internal
valgrind bug since thread_exit_wrapper should not return. */
- VG_(threads)[tid].m_esp -= 4;
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
* (UInt*)(VG_(threads)[tid].m_esp) = 0xBEADDEEF;
/* .cancel_pend will hold &thread_exit_wrapper */
@@ -1790,7 +1792,7 @@
/* joiner returns with success */
VG_(threads)[jnr].status = VgTs_Runnable;
- SET_EDX(jnr, 0);
+ SET_PTHREQ_RETVAL(jnr, 0);
}
}
@@ -1838,7 +1840,7 @@
VG_(threads)[tid].custack[sp] = *cu;
sp++;
VG_(threads)[tid].custack_used = sp;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -1855,7 +1857,7 @@
}
vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
if (sp == 0) {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
return;
}
sp--;
@@ -1864,7 +1866,7 @@
*cu = VG_(threads)[tid].custack[sp];
VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
VG_(threads)[tid].custack_used = sp;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -1877,7 +1879,7 @@
VG_(sprintf)(msg_buf, "yield");
print_sched_event(tid, msg_buf);
}
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -1898,7 +1900,7 @@
make_thread_jump_to_cancelhdlr ( tid );
} else {
/* No, we keep going. */
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
}
@@ -1925,8 +1927,8 @@
} else {
VG_(core_panic)("do__set_cancelstate");
}
- SET_EDX(tid, old_st ? PTHREAD_CANCEL_ENABLE
- : PTHREAD_CANCEL_DISABLE);
+ SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
+ : PTHREAD_CANCEL_DISABLE);
}
@@ -1952,7 +1954,7 @@
} else {
VG_(core_panic)("do__set_canceltype");
}
- SET_EDX(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
+ SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
: PTHREAD_CANCEL_ASYNCHRONOUS);
}
@@ -1977,20 +1979,20 @@
}
if (!VG_(is_valid_tid)(det)) {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
return;
}
switch (what) {
case 2: /* get */
- SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0);
+ SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
return;
case 1: /* set detached. If someone is in a join-wait for det,
do not detach. */
for (i = 1; i < VG_N_THREADS; i++) {
if (VG_(threads)[i].status == VgTs_WaitJoinee
&& VG_(threads)[i].joiner_jee_tid == det) {
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"tid %d not detached because %d in join-wait for it",
@@ -2001,11 +2003,11 @@
}
}
VG_(threads)[det].detached = True;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
return;
case 0: /* set not detached */
VG_(threads)[det].detached = False;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
return;
default:
VG_(core_panic)("do__set_or_get_detach");
@@ -2031,7 +2033,7 @@
}
VG_(record_pthread_error)( tid,
"pthread_cancel: target thread does not exist, or invalid");
- SET_EDX(tid, -VKI_ESRCH);
+ SET_PTHREQ_RETVAL(tid, -VKI_ESRCH);
return;
}
@@ -2045,7 +2047,7 @@
}
/* Thread doing the cancelling returns with success. */
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
/* Perhaps we can nuke the cancellee right now? */
do__testcancel(cee);
@@ -2066,7 +2068,7 @@
if (jee == tid) {
VG_(record_pthread_error)( tid,
"pthread_join: attempt to join to self");
- SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
+ SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
VG_(threads)[tid].status = VgTs_Runnable;
return;
}
@@ -2082,7 +2084,7 @@
/* Invalid thread to join to. */
VG_(record_pthread_error)( tid,
"pthread_join: target thread does not exist, or invalid");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
VG_(threads)[tid].status = VgTs_Runnable;
return;
}
@@ -2096,7 +2098,7 @@
VG_(record_pthread_error)( tid,
"pthread_join: another thread already "
"in join-wait for target thread");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
VG_(threads)[tid].status = VgTs_Runnable;
return;
}
@@ -2246,9 +2248,9 @@
- mark everything below %esp inaccessible
- mark redzone at stack end inaccessible
*/
- VG_(threads)[tid].m_esp = VG_(threads)[tid].stack_base
- + VG_(threads)[tid].stack_size
- - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
+ + VG_(threads)[tid].stack_size
+ - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
+ new_stk_szb - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
@@ -2256,7 +2258,8 @@
VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
/* push two args */
- VG_(threads)[tid].m_esp -= 8;
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
+
VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
VG_TRACK ( pre_mem_write, Vg_CorePThread, & VG_(threads)[tid],
"new thread: stack",
@@ -2282,7 +2285,7 @@
VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for);
/* return child's tid to parent */
- SET_EDX(parent_tid, tid); /* success */
+ SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
}
@@ -2403,7 +2406,7 @@
if (mutex == NULL) {
VG_(record_pthread_error)( tid,
"pthread_mutex_lock/trylock: mutex is NULL");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2423,7 +2426,7 @@
default:
VG_(record_pthread_error)( tid,
"pthread_mutex_lock/trylock: mutex is invalid");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2437,16 +2440,16 @@
if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
/* return 0 (success). */
mutex->__m_count++;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
if (0)
VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
tid, mutex, mutex->__m_count);
return;
} else {
if (is_trylock)
- SET_EDX(tid, EBUSY);
+ SET_PTHREQ_RETVAL(tid, EBUSY);
else
- SET_EDX(tid, EDEADLK);
+ SET_PTHREQ_RETVAL(tid, EDEADLK);
return;
}
} else {
@@ -2455,13 +2458,13 @@
/* GUARD: __m_count > 0 && __m_owner is valid */
if (is_trylock) {
/* caller is polling; so return immediately. */
- SET_EDX(tid, EBUSY);
+ SET_PTHREQ_RETVAL(tid, EBUSY);
} else {
VG_TRACK ( pre_mutex_lock, tid, mutex );
VG_(threads)[tid].status = VgTs_WaitMX;
VG_(threads)[tid].associated_mx = mutex;
- SET_EDX(tid, 0); /* pth_mx_lock success value */
+ SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
caller, mutex );
@@ -2481,12 +2484,11 @@
mutex->__m_count = 1;
mutex->__m_owner = (_pthread_descr)tid;
- VG_TRACK( post_mutex_lock, tid, mutex);
-
/* return 0 (success). */
- SET_EDX(tid, 0);
- }
+ SET_PTHREQ_RETVAL(tid, 0);
+ VG_TRACK( post_mutex_lock, tid, mutex);
+ }
}
@@ -2508,7 +2510,7 @@
if (mutex == NULL) {
VG_(record_pthread_error)( tid,
"pthread_mutex_unlock: mutex is NULL");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2536,7 +2538,7 @@
default:
VG_(record_pthread_error)( tid,
"pthread_mutex_unlock: mutex is invalid");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2545,7 +2547,7 @@
/* nobody holds it */
VG_(record_pthread_error)( tid,
"pthread_mutex_unlock: mutex is not locked");
- SET_EDX(tid, EPERM);
+ SET_PTHREQ_RETVAL(tid, EPERM);
return;
}
@@ -2553,7 +2555,7 @@
/* we don't hold it */
VG_(record_pthread_error)( tid,
"pthread_mutex_unlock: mutex is locked by a different thread");
- SET_EDX(tid, EPERM);
+ SET_PTHREQ_RETVAL(tid, EPERM);
return;
}
@@ -2562,7 +2564,7 @@
if (mutex->__m_count > 1) {
vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
mutex->__m_count --;
- SET_EDX(tid, 0); /* success */
+ SET_PTHREQ_RETVAL(tid, 0); /* success */
return;
}
@@ -2575,7 +2577,7 @@
release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
/* Our (tid's) pth_unlock() returns with 0 (success). */
- SET_EDX(tid, 0); /* Success. */
+ SET_PTHREQ_RETVAL(tid, 0); /* Success. */
}
@@ -2627,7 +2629,7 @@
/* Currently unheld; hand it out to thread tid. */
vg_assert(mx->__m_count == 0);
VG_(threads)[tid].status = VgTs_Runnable;
- SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
+ SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
VG_(threads)[tid].associated_cv = NULL;
VG_(threads)[tid].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)tid;
@@ -2647,7 +2649,7 @@
VG_TRACK( pre_mutex_lock, tid, mx );
VG_(threads)[tid].status = VgTs_WaitMX;
- SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
+ SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
VG_(threads)[tid].associated_cv = NULL;
VG_(threads)[tid].associated_mx = mx;
if (VG_(clo_trace_pthread_level) >= 1) {
@@ -2718,7 +2720,7 @@
VG_(threads)[i].status = VgTs_WaitMX;
VG_(threads)[i].associated_cv = NULL;
VG_(threads)[i].associated_mx = mx;
- SET_EDX(i, 0); /* pth_cond_wait success value */
+ SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
@@ -2758,7 +2760,7 @@
if (mutex == NULL || cond == NULL) {
VG_(record_pthread_error)( tid,
"pthread_cond_wait/timedwait: cond or mutex is NULL");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2778,7 +2780,7 @@
default:
VG_(record_pthread_error)( tid,
"pthread_cond_wait/timedwait: mutex is invalid");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2788,7 +2790,7 @@
VG_(record_pthread_error)( tid,
"pthread_cond_wait/timedwait: mutex is unlocked "
"or is locked but not owned by thread");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2833,7 +2835,7 @@
if (cond == NULL) {
VG_(record_pthread_error)( tid,
"pthread_cond_signal/broadcast: cond is NULL");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
@@ -2843,7 +2845,7 @@
caller
);
- SET_EDX(tid, 0); /* success */
+ SET_PTHREQ_RETVAL(tid, 0); /* success */
}
@@ -2879,9 +2881,9 @@
&& VG_(threads)[tid].status == VgTs_Runnable);
if (is_valid_key((ThreadKey)key)) {
- SET_EDX(tid, 1);
+ SET_PTHREQ_RETVAL(tid, 1);
} else {
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
}
@@ -2909,7 +2911,7 @@
break;
if (i == VG_N_THREAD_KEYS) {
- /* SET_EDX(tid, EAGAIN);
+ /* SET_PTHREQ_RETVAL(tid, EAGAIN);
return;
*/
VG_(core_panic)("pthread_key_create: VG_N_THREAD_KEYS is too low;"
@@ -2926,7 +2928,7 @@
*key = i;
VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -2946,13 +2948,13 @@
if (!is_valid_key(key)) {
VG_(record_pthread_error)( tid,
"pthread_key_delete: key is invalid");
- SET_EDX(tid, EINVAL);
+ SET_PTHREQ_RETVAL(tid, EINVAL);
return;
}
vg_thread_keys[key].inuse = False;
vg_thread_keys[key].destructor = NULL;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -2974,7 +2976,7 @@
vg_assert(VG_(is_valid_or_empty_tid)(tid));
if (VG_(threads)[tid].status == VgTs_Empty) {
- SET_EDX(tid, 1);
+ SET_PTHREQ_RETVAL(tid, 1);
return;
}
@@ -2982,7 +2984,7 @@
vg_assert(specifics_ptr == NULL
|| IS_ALIGNED4_ADDR(specifics_ptr));
- SET_EDX(tid, (UInt)specifics_ptr);
+ SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
}
@@ -3000,7 +3002,7 @@
&& VG_(threads)[tid].status == VgTs_Runnable);
VG_(threads)[tid].specifics_ptr = ptr;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -3023,7 +3025,7 @@
vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
if (!vg_thread_keys[key].inuse) {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
return;
}
VG_TRACK( pre_mem_write, Vg_CorePThread, & VG_(threads)[tid],
@@ -3042,7 +3044,7 @@
}
VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -3087,7 +3089,7 @@
VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
/* Success. */
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -3138,17 +3140,17 @@
if (!VG_(is_valid_tid)(thread)) {
VG_(record_pthread_error)( tid,
"pthread_kill: invalid target thread");
- SET_EDX(tid, -VKI_ESRCH);
+ SET_PTHREQ_RETVAL(tid, -VKI_ESRCH);
return;
}
if (sig < 1 || sig > VKI_KNSIG) {
- SET_EDX(tid, -VKI_EINVAL);
+ SET_PTHREQ_RETVAL(tid, -VKI_EINVAL);
return;
}
VG_(send_signal_to_thread)( thread, sig );
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -3170,9 +3172,9 @@
if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
vg_fhstack_used = n;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
} else {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
}
}
@@ -3192,7 +3194,7 @@
n = vg_fhstack_used;
vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
- SET_EDX(tid, n);
+ SET_PTHREQ_RETVAL(tid, n);
}
static
@@ -3211,12 +3213,12 @@
(Addr)fh, sizeof(ForkHandlerEntry));
if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
return;
}
vg_fhstack[n] = *fh;
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
}
@@ -3237,16 +3239,30 @@
(Addr)fh, sizeof(ForkHandlerEntry));
if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
- SET_EDX(tid, -1);
+ SET_PTHREQ_RETVAL(tid, -1);
return;
}
*fh = vg_fhstack[n];
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
}
+/* ---------------------------------------------------------------------
+ Specifying shadow register values
+ ------------------------------------------------------------------ */
+
+void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
+{
+ VG_(set_thread_shadow_archreg)(tid, R_EAX, ret_shadow);
+}
+
+UInt VG_(get_exit_status_shadow) ( void )
+{
+ return VG_(get_shadow_archreg)(R_EBX);
+}
+
/* ---------------------------------------------------------------------
Handle client requests.
@@ -3259,11 +3275,6 @@
static
void do_client_request ( ThreadId tid )
{
-# define RETURN_WITH(vvv) \
- { tst->m_edx = (vvv); \
- tst->sh_edx = VG_(written_shadow_reg); \
- }
-
ThreadState* tst = &VG_(threads)[tid];
UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
UInt req_no = arg[0];
@@ -3271,63 +3282,45 @@
/* VG_(printf)("req no = 0x%x\n", req_no); */
switch (req_no) {
- /* For the CLIENT_{,tst}CALL[0123] ones, have to do some nasty casting
- to make gcc believe it's a function. */
case VG_USERREQ__CLIENT_CALL0: {
UInt (*f)(void) = (void*)arg[1];
- RETURN_WITH(
- f ( )
- );
+ SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
break;
}
case VG_USERREQ__CLIENT_CALL1: {
UInt (*f)(UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( arg[2] )
- );
+ SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_CALL2: {
UInt (*f)(UInt, UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( arg[2], arg[3] )
- );
+ SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_CALL3: {
UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( arg[2], arg[3], arg[4] )
- );
+ SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL0: {
UInt (*f)(ThreadState*) = (void*)arg[1];
- RETURN_WITH(
- f ( tst )
- );
+ SET_CLCALL_RETVAL(tid, f ( tst ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL1: {
UInt (*f)(ThreadState*, UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( tst, arg[2] )
- );
+ SET_CLCALL_RETVAL(tid, f ( tst, arg[2] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL2: {
UInt (*f)(ThreadState*, UInt, UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( tst, arg[2], arg[3] )
- );
+ SET_CLCALL_RETVAL(tid, f ( tst, arg[2], arg[3] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL3: {
UInt (*f)(ThreadState*, UInt, UInt, UInt) = (void*)arg[1];
- RETURN_WITH(
- f ( tst, arg[2], arg[3], arg[4] )
- );
+ SET_CLCALL_RETVAL(tid, f ( tst, arg[2], arg[3], arg[4] ), (Addr)f );
break;
}
@@ -3341,8 +3334,8 @@
the comment in vg_defaults.c/SK_(malloc)() for why. */
case VG_USERREQ__MALLOC:
VG_(sk_malloc_called_by_scheduler) = True;
- RETURN_WITH(
- (UInt)SK_(malloc) ( tst, arg[1] )
+ SET_PTHREQ_RETVAL(
+ tid, (UInt)SK_(malloc) ( tst, arg[1] )
);
VG_(sk_malloc_called_by_scheduler) = False;
break;
@@ -3351,23 +3344,23 @@
VG_(sk_malloc_called_by_scheduler) = True;
SK_(free) ( tst, (void*)arg[1] );
VG_(sk_malloc_called_by_scheduler) = False;
- RETURN_WITH(0); /* irrelevant */
+ SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
break;
case VG_USERREQ__PTHREAD_GET_THREADID:
- RETURN_WITH(tid);
+ SET_PTHREQ_RETVAL(tid, tid);
break;
case VG_USERREQ__RUNNING_ON_VALGRIND:
- RETURN_WITH(1);
+ SET_CLREQ_RETVAL(tid, 1);
break;
case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
- RETURN_WITH(VG_(clo_trace_pthread_level));
+ SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
break;
case VG_USERREQ__READ_MILLISECOND_TIMER:
- RETURN_WITH(VG_(read_millisecond_timer)());
+ SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
break;
/* Some of these may make thread tid non-runnable, but the
@@ -3405,7 +3398,7 @@
break;
case VG_USERREQ__GET_N_SIGS_RETURNED:
- RETURN_WITH(VG_(threads)[tid].n_signals_returned);
+ SET_PTHREQ_RETVAL(tid, VG_(threads)[tid].n_signals_returned);
break;
case VG_USERREQ__PTHREAD_JOIN:
@@ -3517,12 +3510,12 @@
case VG_USERREQ__NUKE_OTHER_THREADS:
VG_(nuke_all_threads_except) ( tid );
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
break;
case VG_USERREQ__PTHREAD_ERROR:
VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
- SET_EDX(tid, 0);
+ SET_PTHREQ_RETVAL(tid, 0);
break;
case VG_USERREQ__SET_FHSTACK_USED:
@@ -3557,11 +3550,11 @@
VG_(invalidate_translations)( arg[1], arg[2], True );
- SET_EDX( tid, 0 ); /* return value is meaningless */
+ SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
break;
case VG_USERREQ__COUNT_ERRORS:
- SET_EDX( tid, VG_(n_errs_found) );
+ SET_CLREQ_RETVAL( tid, VG_(n_errs_found) );
break;
default:
@@ -3573,7 +3566,7 @@
arg[0], (void*)arg[1], arg[2] );
if (SK_(handle_client_request) ( &VG_(threads)[tid], arg, &ret ))
- SET_EDX(tid, ret);
+ SET_CLREQ_RETVAL(tid, ret);
} else {
static Bool whined = False;
@@ -3588,8 +3581,6 @@
}
break;
}
-
-# undef RETURN_WITH
}
diff --git a/coregrind/vg_signals.c b/coregrind/vg_signals.c
index 4cafe38..1123ee9 100644
--- a/coregrind/vg_signals.c
+++ b/coregrind/vg_signals.c
@@ -518,13 +518,13 @@
if (ss != NULL) {
if (on_sig_stack(VG_(threads)[tid].m_esp)) {
- SET_EAX(tid, -VKI_EPERM);
+ SET_SYSCALL_RETVAL(tid, -VKI_EPERM);
return;
}
if (ss->ss_flags != VKI_SS_DISABLE
&& ss->ss_flags != VKI_SS_ONSTACK
&& ss->ss_flags != 0) {
- SET_EAX(tid, -VKI_EINVAL);
+ SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
return;
}
if (ss->ss_flags == VKI_SS_DISABLE) {
@@ -532,14 +532,14 @@
vg_scss.altstack.ss_sp = NULL;
} else {
if (ss->ss_size < VKI_MINSIGSTKSZ) {
- SET_EAX(tid, -VKI_ENOMEM);
+ SET_SYSCALL_RETVAL(tid, -VKI_ENOMEM);
return;
}
}
vg_scss.altstack.ss_sp = ss->ss_sp;
vg_scss.altstack.ss_size = ss->ss_size;
}
- SET_EAX(tid, 0);
+ SET_SYSCALL_RETVAL(tid, 0);
}
@@ -597,7 +597,7 @@
VG_(block_all_host_signals)( &irrelevant_sigmask );
VG_(handle_SCSS_change)( False /* lazy update */ );
}
- SET_EAX(tid, 0);
+ SET_SYSCALL_RETVAL(tid, 0);
return;
bad_signo:
@@ -605,7 +605,7 @@
VG_(message)(Vg_UserMsg,
"Warning: bad signal number %d in __NR_sigaction.",
signo);
- SET_EAX(tid, -VKI_EINVAL);
+ SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
return;
bad_sigkill_or_sigstop:
@@ -614,7 +614,7 @@
"Warning: attempt to set %s handler in __NR_sigaction.",
signo == VKI_SIGKILL ? "SIGKILL" : "SIGSTOP" );
- SET_EAX(tid, -VKI_EINVAL);
+ SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
return;
}
@@ -713,11 +713,11 @@
vg_assert(VG_(is_valid_tid)(tid));
do_setmask ( VG_INVALID_THREADID, how, set, oldset );
/* Syscall returns 0 (success) to its thread. */
- SET_EAX(tid, 0);
+ SET_SYSCALL_RETVAL(tid, 0);
} else {
VG_(message)(Vg_DebugMsg,
"sigprocmask: unknown `how' field %d", how);
- SET_EAX(tid, -VKI_EINVAL);
+ SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
}
}
@@ -904,17 +904,29 @@
/* Safely-saved version of sigNo, as described above. */
Int sigNo_private;
/* Saved processor state. */
- UInt ssestate[VG_SIZE_OF_SSESTATE_W];
- UInt eax;
- UInt ecx;
- UInt edx;
- UInt ebx;
- UInt ebp;
- UInt esp;
- UInt esi;
- UInt edi;
- Addr eip;
- UInt eflags;
+ UInt m_sse[VG_SIZE_OF_SSESTATE_W];
+
+ UInt m_eax;
+ UInt m_ecx;
+ UInt m_edx;
+ UInt m_ebx;
+ UInt m_ebp;
+ UInt m_esp;
+ UInt m_esi;
+ UInt m_edi;
+ UInt m_eflags;
+ Addr m_eip;
+
+ UInt sh_eax;
+ UInt sh_ebx;
+ UInt sh_ecx;
+ UInt sh_edx;
+ UInt sh_esi;
+ UInt sh_edi;
+ UInt sh_ebp;
+ UInt sh_esp;
+ UInt sh_eflags;
+
/* Scheduler-private stuff: what was the thread's status prior to
delivering this signal? */
ThreadStatus status;
@@ -989,25 +1001,41 @@
frame->magicPI = 0x31415927;
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- frame->ssestate[i] = tst->m_sse[i];
+ frame->m_sse[i] = tst->m_sse[i];
- frame->eax = tst->m_eax;
- frame->ecx = tst->m_ecx;
- frame->edx = tst->m_edx;
- frame->ebx = tst->m_ebx;
- frame->ebp = tst->m_ebp;
- frame->esp = tst->m_esp;
- frame->esi = tst->m_esi;
- frame->edi = tst->m_edi;
- frame->eip = tst->m_eip;
- frame->eflags = tst->m_eflags;
+ frame->m_eax = tst->m_eax;
+ frame->m_ecx = tst->m_ecx;
+ frame->m_edx = tst->m_edx;
+ frame->m_ebx = tst->m_ebx;
+ frame->m_ebp = tst->m_ebp;
+ frame->m_esp = tst->m_esp;
+ frame->m_esi = tst->m_esi;
+ frame->m_edi = tst->m_edi;
+ frame->m_eflags = tst->m_eflags;
+ frame->m_eip = tst->m_eip;
+
+ if (VG_(needs).shadow_regs) {
+ frame->sh_eax = tst->sh_eax;
+ frame->sh_ecx = tst->sh_ecx;
+ frame->sh_edx = tst->sh_edx;
+ frame->sh_ebx = tst->sh_ebx;
+ frame->sh_ebp = tst->sh_ebp;
+ frame->sh_esp = tst->sh_esp;
+ frame->sh_esi = tst->sh_esi;
+ frame->sh_edi = tst->sh_edi;
+ frame->sh_eflags = tst->sh_eflags;
+ }
frame->status = tst->status;
frame->magicE = 0x27182818;
+ /* Ensure 'tid' and 'tst' correspond */
+ vg_assert(& VG_(threads)[tid] == tst);
/* Set the thread so it will next run the handler. */
- tst->m_esp = esp;
+ /* tst->m_esp = esp; */
+ SET_SIGNAL_ESP(tid, esp);
+
tst->m_eip = (Addr)vg_scss.scss_per_sig[sigNo].scss_handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
@@ -1022,7 +1050,6 @@
*/
}
-
/* Clear the signal frame created by vg_push_signal_frame, restore the
simulated machine state, and return the signal number that the
frame was for. */
@@ -1049,25 +1076,36 @@
VG_(message)(Vg_DebugMsg,
"vg_pop_signal_frame (thread %d): valid magic", tid);
- /* restore machine state */
- for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- tst->m_sse[i] = frame->ssestate[i];
-
/* Mark the frame structure as nonaccessible. */
VG_TRACK( die_mem_stack_signal, (Addr)frame, sizeof(VgSigFrame) );
- /* Restore machine state from the saved context. */
- tst->m_eax = frame->eax;
- tst->m_ecx = frame->ecx;
- tst->m_edx = frame->edx;
- tst->m_ebx = frame->ebx;
- tst->m_ebp = frame->ebp;
- tst->m_esp = frame->esp;
- tst->m_esi = frame->esi;
- tst->m_edi = frame->edi;
- tst->m_eflags = frame->eflags;
- tst->m_eip = frame->eip;
+ /* restore machine state */
+ for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
+ tst->m_sse[i] = frame->m_sse[i];
+ tst->m_eax = frame->m_eax;
+ tst->m_ecx = frame->m_ecx;
+ tst->m_edx = frame->m_edx;
+ tst->m_ebx = frame->m_ebx;
+ tst->m_ebp = frame->m_ebp;
+ tst->m_esp = frame->m_esp;
+ tst->m_esi = frame->m_esi;
+ tst->m_edi = frame->m_edi;
+ tst->m_eflags = frame->m_eflags;
+ tst->m_eip = frame->m_eip;
+
+ if (VG_(needs).shadow_regs) {
+ tst->sh_eax = frame->sh_eax;
+ tst->sh_ecx = frame->sh_ecx;
+ tst->sh_edx = frame->sh_edx;
+ tst->sh_ebx = frame->sh_ebx;
+ tst->sh_ebp = frame->sh_ebp;
+ tst->sh_esp = frame->sh_esp;
+ tst->sh_esi = frame->sh_esi;
+ tst->sh_edi = frame->sh_edi;
+ tst->sh_eflags = frame->sh_eflags;
+ }
+
/* don't use the copy exposed to the handler; it might have changed
it. */
sigNo = frame->sigNo_private;
@@ -1180,7 +1218,7 @@
*(Int*)(sigwait_args[2]) = sigNo;
VG_TRACK( post_mem_write, (Addr)sigwait_args[2], sizeof(UInt));
}
- SET_EDX(tid, 0);
+ SET_SIGNAL_EDX(tid, 0);
tst->status = VgTs_Runnable;
VG_(ksigemptyset)(&tst->sigs_waited_for);
scss_changed = True;
diff --git a/coregrind/vg_syscalls.c b/coregrind/vg_syscalls.c
index 57ef09e..6e6dcac 100644
--- a/coregrind/vg_syscalls.c
+++ b/coregrind/vg_syscalls.c
@@ -616,7 +616,7 @@
}
/* "do" the syscall ourselves; the kernel never sees it */
res = VG_(sys_modify_ldt)( tid, arg1, (void*)arg2, arg3 );
- SET_EAX(tid, res);
+ SET_SYSCALL_RETVAL(tid, res);
if (arg1 == 0 && !VG_(is_kerror)(res) && res > 0) {
VG_TRACK( post_mem_write, arg2, res );
}
@@ -1373,7 +1373,7 @@
"alternative logfile fd." );
/* Pretend the close succeeded, regardless. (0 == success) */
res = 0;
- SET_EAX(tid, res);
+ SET_SYSCALL_RETVAL(tid, res);
} else {
KERNEL_DO_SYSCALL(tid,res);
}
@@ -2661,6 +2661,12 @@
VG_TRACK( post_mem_write, (Addr)(&arr[i].revents),
sizeof(Short) );
}
+ /* For some unknown reason, %ebx sometimes gets changed by poll...
+ let the skin know (using the `post_reg_write_syscall_return'
+ event isn't ideal... */
+ if (arg1 != tst->m_ebx) {
+ VG_TRACK( post_reg_write_syscall_return, tid, R_EBX );
+ }
break;
case __NR_readlink: /* syscall 85 */
@@ -3398,7 +3404,7 @@
# if SIGNAL_SIMULATION
VG_(do_sigpending)( tid, (vki_ksigset_t*)arg1 );
res = 0;
- SET_EAX(tid, res);
+ SET_SYSCALL_RETVAL(tid, res);
# else
KERNEL_DO_SYSCALL(tid, res);
# endif