Add code for stepping over atomic instruction sequence on PPC
diff --git a/sysdeps/linux-gnu/ppc/arch.h b/sysdeps/linux-gnu/ppc/arch.h
index 711b4a3..64c1821 100644
--- a/sysdeps/linux-gnu/ppc/arch.h
+++ b/sysdeps/linux-gnu/ppc/arch.h
@@ -15,6 +15,7 @@
/* Start of arch-specific functions. */
#define ARCH_HAVE_UMOVELONG
+#define ARCH_HAVE_ATOMIC_SINGLESTEP
#define PPC_NOP { 0x60, 0x00, 0x00, 0x00 }
#define PPC_NOP_LENGTH 4
diff --git a/sysdeps/linux-gnu/ppc/trace.c b/sysdeps/linux-gnu/ppc/trace.c
index 8642157..05993de 100644
--- a/sysdeps/linux-gnu/ppc/trace.c
+++ b/sysdeps/linux-gnu/ppc/trace.c
@@ -197,3 +197,85 @@
*result = pointed_to;
return 0;
}
+
+/* The atomic skip code is mostly taken from GDB. */
+
+/* Instruction masks used during single-stepping of atomic
+ * sequences. This was lifted from GDB. */
+#define LWARX_MASK 0xfc0007fe
+#define LWARX_INSTRUCTION 0x7c000028
+#define LDARX_INSTRUCTION 0x7c0000A8
+#define STWCX_MASK 0xfc0007ff
+#define STWCX_INSTRUCTION 0x7c00012d
+#define STDCX_INSTRUCTION 0x7c0001ad
+#define BC_MASK 0xfc000000
+#define BC_INSTRUCTION 0x40000000
+
+int
+arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
+ int (*add_cb)(void *addr, void *data),
+ void *add_cb_data)
+{
+ void *addr = sbp->addr;
+ debug(1, "pid=%d addr=%p", proc->pid, addr);
+
+ /* If the original instruction was lwarx/ldarx, we can't
+ * single-step over it, instead we have to execute the whole
+ * atomic block at once. */
+ union {
+ uint32_t insn;
+ char buf[4];
+ } u;
+ memcpy(u.buf, sbp->orig_value, BREAKPOINT_LENGTH);
+
+ if ((u.insn & LWARX_MASK) != LWARX_INSTRUCTION
+ && (u.insn & LWARX_MASK) != LDARX_INSTRUCTION)
+ return 1;
+
+ int insn_count;
+ for (insn_count = 0; ; ++insn_count) {
+ addr += 4;
+ unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
+ if (l == (unsigned long)-1 && errno)
+ return -1;
+ uint32_t insn;
+#ifdef __powerpc64__
+ insn = l >> 32;
+#else
+ insn = l;
+#endif
+
+ /* If we hit a branch instruction, give up. The
+ * computation could escape that way and we'd have to
+ * treat that case specially. */
+ if ((insn & BC_MASK) == BC_INSTRUCTION) {
+ debug(1, "pid=%d, found branch at %p, giving up",
+ proc->pid, addr);
+ return -1;
+ }
+
+ if ((insn & STWCX_MASK) == STWCX_INSTRUCTION
+ || (insn & STWCX_MASK) == STDCX_INSTRUCTION) {
+ debug(1, "pid=%d, found end of atomic block at %p",
+ proc->pid, addr);
+ break;
+ }
+
+ /* Arbitrary cut-off. If we didn't find the
+ * terminating instruction by now, just give up. */
+ if (insn_count > 16) {
+ debug(1, "pid=%d, couldn't find end of atomic block",
+ proc->pid);
+ return -1;
+ }
+ }
+
+ /* Put the breakpoint to the next instruction. */
+ addr += 4;
+ if (add_cb(addr, add_cb_data) < 0)
+ return -1;
+
+ debug(1, "PTRACE_CONT");
+ ptrace(PTRACE_CONT, proc->pid, 0, 0);
+ return 0;
+}
diff --git a/sysdeps/linux-gnu/trace.c b/sysdeps/linux-gnu/trace.c
index 9ecea1e..d962048 100644
--- a/sysdeps/linux-gnu/trace.c
+++ b/sysdeps/linux-gnu/trace.c
@@ -249,6 +249,9 @@
/* The pointer being re-enabled. */
Breakpoint * breakpoint_being_enabled;
+ /* Artificial atomic skip breakpoint, if any needed. */
+ void *atomic_skip_bp_addr;
+
enum {
/* We are waiting for everyone to land in t/T. */
psh_stopping = 0,
@@ -612,12 +615,84 @@
return 1;
}
-static void
-singlestep(Process * proc)
+/* The protocol is: 0 for success, negative for failure, positive if
+ * default singlestep is to be used. */
+int arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
+ int (*add_cb)(void *addr, void *data),
+ void *add_cb_data);
+
+#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP
+int
+arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
+ int (*add_cb)(void *addr, void *data),
+ void *add_cb_data)
{
+ return 1;
+}
+#endif
+
+static int
+atomic_singlestep_add_bp(void *addr, void *data)
+{
+ struct process_stopping_handler *self = data;
+ struct Process *proc = self->task_enabling_breakpoint;
+
+ /* Only support single address as of now. */
+ assert(self->atomic_skip_bp_addr == NULL);
+
+ self->atomic_skip_bp_addr = addr + 4;
+ insert_breakpoint(proc->leader, self->atomic_skip_bp_addr, NULL, 1);
+
+ return 0;
+}
+
+static int
+singlestep(struct process_stopping_handler *self)
+{
+ struct Process *proc = self->task_enabling_breakpoint;
+
+ int status = arch_atomic_singlestep(self->task_enabling_breakpoint,
+ self->breakpoint_being_enabled,
+ &atomic_singlestep_add_bp, self);
+
+ /* Propagate failure and success. */
+ if (status <= 0)
+ return status;
+
+ /* Otherwise do the default action: singlestep. */
debug(1, "PTRACE_SINGLESTEP");
- if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0))
+ if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) {
perror("PTRACE_SINGLESTEP");
+ return -1;
+ }
+ return 0;
+}
+
+static void
+post_singlestep(struct process_stopping_handler *self, Event **eventp)
+{
+ continue_for_sigstop_delivery(&self->pids);
+
+ if ((*eventp)->type == EVENT_BREAKPOINT)
+ *eventp = NULL; // handled
+
+ if (self->atomic_skip_bp_addr != 0)
+ delete_breakpoint(self->task_enabling_breakpoint->leader,
+ self->atomic_skip_bp_addr);
+
+ self->breakpoint_being_enabled = NULL;
+}
+
+static void
+singlestep_error(struct process_stopping_handler *self, Event **eventp)
+{
+ struct Process *teb = self->task_enabling_breakpoint;
+ Breakpoint *sbp = self->breakpoint_being_enabled;
+ fprintf(stderr, "%d couldn't singlestep over %s (%p)\n",
+ teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL,
+ sbp->addr);
+ delete_breakpoint(teb->leader, sbp->addr);
+ post_singlestep(self, eventp);
}
/* This event handler is installed when we are in the process of
@@ -670,7 +745,11 @@
teb->pid);
if (sbp->enabled)
disable_breakpoint(teb, sbp);
- singlestep(teb);
+ if (singlestep(self) < 0) {
+ singlestep_error(self, &event);
+ goto psh_sinking;
+ }
+
self->state = state = psh_singlestep;
}
break;
@@ -682,7 +761,10 @@
/* This is not the singlestep that we are waiting for. */
if (event->type == EVENT_SIGNAL) {
- singlestep(task);
+ if (singlestep(self) < 0) {
+ singlestep_error(self, &event);
+ goto psh_sinking;
+ }
break;
}
@@ -692,18 +774,13 @@
if (sbp->enabled)
enable_breakpoint(teb, sbp);
- continue_for_sigstop_delivery(&self->pids);
+ post_singlestep(self, &event);
+ goto psh_sinking;
+ }
+ break;
- self->breakpoint_being_enabled = NULL;
- self->state = state = psh_sinking;
-
- if (event->type == EVENT_BREAKPOINT)
- event = NULL; // handled
- } else
- break;
-
- /* fall-through */
-
+ psh_sinking:
+ state = self->state = psh_sinking;
case psh_sinking:
if (await_sigstop_delivery(&self->pids, task_info, event))
process_stopping_done(self, leader);