Arch-abstraction: made vg_execontext.c arch-independent, based on Paul
    Mackerras's work.

- introduced arch-neutral macros for getting the instruction/frame/stack
  pointers.

- renamed ExeContext.eips as ExeContext.ips

- renamed esp/ebp/eip to sp/fp/ip in several related files and arch-neutralised
  various comments

- introduced arch-neutral macros for walking the stack


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2663 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/core.h b/coregrind/core.h
index 656e81e..1351cfd 100644
--- a/coregrind/core.h
+++ b/coregrind/core.h
@@ -1149,9 +1149,9 @@
 struct _ExeContext {
    struct _ExeContext * next;
    /* Variable-length array.  The size is VG_(clo_backtrace_size); at
-      least 1, at most VG_DEEPEST_BACKTRACE.  [0] is the current %eip,
+      least 1, at most VG_DEEPEST_BACKTRACE.  [0] is the current IP,
       [1] is its caller, [2] is the caller of [1], etc. */
-   Addr eips[0];
+   Addr ips[0];
 };
 
 
diff --git a/coregrind/vg_errcontext.c b/coregrind/vg_errcontext.c
index ae1c292..4ceda42 100644
--- a/coregrind/vg_errcontext.c
+++ b/coregrind/vg_errcontext.c
@@ -360,7 +360,7 @@
    /* This loop condensed from VG_(mini_stack_dump)() */
    i = 0;
    do {
-      Addr eip = ec->eips[i];
+      Addr eip = ec->ips[i];
       if (i > 0) 
          eip -= MIN_INSTR_SIZE;     // point to calling line
       if ( VG_(get_fnname_nodemangle) (eip, buf,  M_VG_ERRTXT) ) {
@@ -380,7 +380,7 @@
                      "# unknown, suppression will not work, sorry\n");
       }
       i++;
-   } while (i < stop_at && ec->eips[i] != 0);
+   } while (i < stop_at && ec->ips[i] != 0);
 
    VG_(printf)("}\n");
 }
@@ -667,7 +667,7 @@
 
       if ((i+1 == VG_(clo_dump_error))) {
 	VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to debugging*/,
-                         p_min->where->eips[0], /*debugging*/True);
+                         p_min->where->ips[0], /*debugging*/True);
       }
 
       p_min->count = 1 << 30;
@@ -1002,8 +1002,8 @@
       caller_obj[i][0] = caller_fun[i][0] = 0;
 
    for (i = 0; i < VG_N_SUPP_CALLERS && i < VG_(clo_backtrace_size); i++) {
-      get_objname_fnname ( err->where->eips[i], caller_obj[i], M_VG_ERRTXT,
-                                                caller_fun[i], M_VG_ERRTXT );
+      get_objname_fnname ( err->where->ips[i], caller_obj[i], M_VG_ERRTXT,
+                                               caller_fun[i], M_VG_ERRTXT );
    }
 
    /* See if the error context matches any suppression. */
diff --git a/coregrind/vg_execontext.c b/coregrind/vg_execontext.c
index 4e077c4..0c3d291 100644
--- a/coregrind/vg_execontext.c
+++ b/coregrind/vg_execontext.c
@@ -109,7 +109,7 @@
 void VG_(pp_ExeContext) ( ExeContext* e )
 {
    init_ExeContext_storage();
-   VG_(mini_stack_dump) ( e->eips, VG_(clo_backtrace_size) );
+   VG_(mini_stack_dump) ( e->ips, VG_(clo_backtrace_size) );
 }
 
 
@@ -122,23 +122,23 @@
    case Vg_LowRes:
       /* Just compare the top two callers. */
       vg_ec_cmp2s++;
-      if (e1->eips[0] != e2->eips[0]
-          || e1->eips[1] != e2->eips[1]) return False;
+      if (e1->ips[0] != e2->ips[0]
+          || e1->ips[1] != e2->ips[1]) return False;
       return True;
 
    case Vg_MedRes:
       /* Just compare the top four callers. */
       vg_ec_cmp4s++;
-      if (e1->eips[0] != e2->eips[0]) return False;
+      if (e1->ips[0] != e2->ips[0]) return False;
 
       if (VG_(clo_backtrace_size) < 2) return True;
-      if (e1->eips[1] != e2->eips[1]) return False;
+      if (e1->ips[1] != e2->ips[1]) return False;
 
       if (VG_(clo_backtrace_size) < 3) return True;
-      if (e1->eips[2] != e2->eips[2]) return False;
+      if (e1->ips[2] != e2->ips[2]) return False;
 
       if (VG_(clo_backtrace_size) < 4) return True;
-      if (e1->eips[3] != e2->eips[3]) return False;
+      if (e1->ips[3] != e2->ips[3]) return False;
       return True;
 
    case Vg_HighRes:
@@ -153,21 +153,21 @@
 }
 
 
-/* Take a snapshot of the client's stack, putting the up to 'n_eips' %eips
-   into 'eips'.  In order to be thread-safe, we pass in the thread's %EIP
-   and %EBP.  Returns number of %eips put in 'eips'.  */
-static UInt stack_snapshot2 ( Addr* eips, UInt n_eips, Addr eip, Addr ebp,
-                              Addr ebp_min, Addr ebp_max_orig )
+/* Take a snapshot of the client's stack, putting the up to 'n_ips' IPs 
+   into 'ips'.  In order to be thread-safe, we pass in the thread's IP
+   and FP.  Returns number of IPs put in 'ips'.  */
+static UInt stack_snapshot2 ( Addr* ips, UInt n_ips, Addr ip, Addr fp,
+                              Addr fp_min, Addr fp_max_orig )
 {
    Int         i;
-   Addr        ebp_max;
+   Addr        fp_max;
    UInt        n_found = 0;
 
    VGP_PUSHCC(VgpExeContext);
 
-   /* First snaffle %EIPs from the client's stack into eips[0 .. n_eips-1], 
+   /* First snaffle IPs from the client's stack into ips[0 .. n_ips-1], 
       putting zeroes in when the trail goes cold, which we guess to be when
-      %ebp is not a reasonable stack location.  We also assert that %ebp
+      FP is not a reasonable stack location.  We also assert that FP
       increases down the chain. */
 
    // Gives shorter stack trace for tests/badjump.c
@@ -175,46 +175,47 @@
    // most "normal" backtraces.
    // NJN 2002-sep-05: traces for pthreaded programs are particularly bad.
 
-   // JRS 2002-sep-17: hack, to round up ebp_max to the end of the
+   // JRS 2002-sep-17: hack, to round up fp_max to the end of the
    // current page, at least.  Dunno if it helps.
    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
-   ebp_max = (ebp_max_orig + VKI_BYTES_PER_PAGE - 1) 
+   fp_max = (fp_max_orig + VKI_BYTES_PER_PAGE - 1) 
                 & ~(VKI_BYTES_PER_PAGE - 1);
-   ebp_max -= sizeof(Addr);
+   fp_max -= sizeof(Addr);
 
    /* Assertion broken before main() is reached in pthreaded programs;  the
     * offending stack traces only have one item.  --njn, 2002-aug-16 */
-   /* vg_assert(ebp_min <= ebp_max);*/
+   /* vg_assert(fp_min <= fp_max);*/
 
-   if (ebp_min + 4000000 <= ebp_max) {
+   if (fp_min + 4000000 <= fp_max) {
       /* If the stack is ridiculously big, don't poke around ... but
          don't bomb out either.  Needed to make John Regehr's
          user-space threads package work. JRS 20021001 */
-      eips[0] = eip;
+      ips[0] = ip;
       i = 1;
    } else {
       /* Get whatever we safely can ... */
-      eips[0] = eip;
-      for (i = 1; i < n_eips; i++) {
-         if (!(ebp_min <= ebp && ebp <= ebp_max)) {
-            //VG_(printf)("... out of range %p\n", ebp);
-            break; /* ebp gone baaaad */
+      ips[0] = ip;
+      fp = FIRST_STACK_FRAME(fp);
+      for (i = 1; i < n_ips; i++) {
+         if (!(fp_min <= fp && fp <= fp_max)) {
+            //VG_(printf)("... out of range %p\n", fp);
+            break; /* fp gone baaaad */
          }
          // NJN 2002-sep-17: monotonicity doesn't work -- gives wrong traces...
-         //     if (ebp >= ((UInt*)ebp)[0]) {
+         //     if (fp >= ((UInt*)fp)[0]) {
          //   VG_(printf)("nonmonotonic\n");
-         //    break; /* ebp gone nonmonotonic */
+         //    break; /* fp gone nonmonotonic */
          // }
-         eips[i] = ((UInt*)ebp)[1];  /* ret addr */
-         ebp     = ((UInt*)ebp)[0];  /* old ebp */
-         //VG_(printf)("     %p\n", eips[i]);
+         ips[i] = STACK_FRAME_RET(fp);  /* ret addr */
+         fp     = STACK_FRAME_NEXT(fp);  /* old fp */
+         //VG_(printf)("     %p\n", ips[i]);
       }
    }
    n_found = i;
 
    /* Put zeroes in the rest. */
-   for (;  i < n_eips; i++) {
-      eips[i] = 0;
+   for (;  i < n_ips; i++) {
+      ips[i] = 0;
    }
    VGP_POPCC(VgpExeContext);
 
@@ -230,11 +231,11 @@
    on the returned ExeContext* values themselves.  Inspired by Hugs's
    Text type.  
 */
-ExeContext* VG_(get_ExeContext2) ( Addr eip, Addr ebp,
-                                   Addr ebp_min, Addr ebp_max_orig )
+ExeContext* VG_(get_ExeContext2) ( Addr ip, Addr fp,
+                                   Addr fp_min, Addr fp_max_orig )
 {
    Int         i;
-   Addr        eips[VG_DEEPEST_BACKTRACE];
+   Addr        ips[VG_DEEPEST_BACKTRACE];
    Bool        same;
    UInt        hash;
    ExeContext* new_ec;
@@ -246,15 +247,15 @@
    vg_assert(VG_(clo_backtrace_size) >= 1 
              && VG_(clo_backtrace_size) <= VG_DEEPEST_BACKTRACE);
 
-   stack_snapshot2( eips, VG_(clo_backtrace_size),
-                    eip, ebp, ebp_min, ebp_max_orig );
+   stack_snapshot2( ips, VG_(clo_backtrace_size),
+                    ip, fp, fp_min, fp_max_orig );
 
    /* Now figure out if we've seen this one before.  First hash it so
       as to determine the list number. */
 
    hash = 0;
    for (i = 0; i < VG_(clo_backtrace_size); i++) {
-      hash ^= (UInt)eips[i];
+      hash ^= (UInt)ips[i];
       hash = (hash << 29) | (hash >> 3);
    }
    hash = hash % VG_N_EC_LISTS;
@@ -270,7 +271,7 @@
       vg_ec_searchcmps++;
       same = True;
       for (i = 0; i < VG_(clo_backtrace_size); i++) {
-         if (list->eips[i] != eips[i]) {
+         if (list->ips[i] != ips[i]) {
             same = False;
             break; 
          }
@@ -293,7 +294,7 @@
                                + VG_(clo_backtrace_size) * sizeof(Addr) );
 
    for (i = 0; i < VG_(clo_backtrace_size); i++)
-      new_ec->eips[i] = eips[i];
+      new_ec->ips[i] = ips[i];
 
    new_ec->next = vg_ec_list[hash];
    vg_ec_list[hash] = new_ec;
@@ -302,65 +303,64 @@
    return new_ec;
 }
 
-void get_needed_regs(ThreadId tid, Addr* eip, Addr* ebp, Addr* esp,
+void get_needed_regs(ThreadId tid, Addr* ip, Addr* fp, Addr* sp,
                      Addr* stack_highest_word)
 {
    if (VG_(is_running_thread)(tid)) {
       /* thread currently in baseblock */
-      *eip                = VG_(baseBlock)[VGOFF_(m_eip)];
-      *ebp                = VG_(baseBlock)[VGOFF_(m_ebp)];
-      *esp                = VG_(baseBlock)[VGOFF_(m_esp)];
+      *ip                 = VG_(baseBlock)[VGOFF_INSTR_PTR];
+      *fp                 = VG_(baseBlock)[VGOFF_FRAME_PTR];
+      *sp                 = VG_(baseBlock)[VGOFF_STACK_PTR];
       *stack_highest_word = VG_(threads)[tid].stack_highest_word;
    } else {
       /* thread in thread table */
       ThreadState* tst = & VG_(threads)[ tid ];
-      *eip                = tst->arch.m_eip;
-      *ebp                = tst->arch.m_ebp;
-      *esp                = tst->arch.m_esp; 
+      *ip                 = ARCH_INSTR_PTR(tst->arch);
+      *fp                 = ARCH_FRAME_PTR(tst->arch);
+      *sp                 = ARCH_STACK_PTR(tst->arch);
       *stack_highest_word = tst->stack_highest_word;
    }
 
    /* Nasty little hack to deal with sysinfo syscalls - if libc is
       using the sysinfo page for syscalls (the TLS version does), then
-      eip will always appear to be in that page when doing a syscall,
+      ip will always appear to be in that page when doing a syscall,
       not the actual libc function doing the syscall.  This check sees
-      if EIP is within the syscall code, and pops the return address
-      off the stack so that eip is placed within the library function
+      if IP is within the syscall code, and pops the return address
+      off the stack so that ip is placed within the library function
       calling the syscall.  This makes stack backtraces much more
       useful.  */
-   if (*eip >= VG_(client_trampoline_code)+VG_(tramp_syscall_offset) &&
-       *eip < VG_(client_trampoline_code)+VG_(trampoline_code_length) &&
-       VG_(is_addressable)(*esp, sizeof(Addr))) {
-      *eip = *(Addr *)*esp;
-      *esp += sizeof(Addr);
+   if (*ip >= VG_(client_trampoline_code)+VG_(tramp_syscall_offset) &&
+       *ip < VG_(client_trampoline_code)+VG_(trampoline_code_length) &&
+       VG_(is_addressable)(*sp, sizeof(Addr))) {
+      *ip = *(Addr *)*sp;
+      *sp += sizeof(Addr);
    }
 }
 
 ExeContext* VG_(get_ExeContext) ( ThreadId tid )
 {
-   Addr eip, ebp, esp, stack_highest_word;
+   Addr ip, fp, sp, stack_highest_word;
 
-   get_needed_regs(tid, &eip, &ebp, &esp, &stack_highest_word);
-   return VG_(get_ExeContext2)(eip, ebp, esp, stack_highest_word);
+   get_needed_regs(tid, &ip, &fp, &sp, &stack_highest_word);
+   return VG_(get_ExeContext2)(ip, fp, sp, stack_highest_word);
 }
 
-/* Take a snapshot of the client's stack, putting the up to 'n_eips' %eips
-   into 'eips'.  In order to be thread-safe, we pass in the thread's %EIP
-   and %EBP.  Returns number of %eips put in 'eips'.  */
-UInt VG_(stack_snapshot) ( ThreadId tid, Addr* eips, UInt n_eips )
+/* Take a snapshot of the client's stack, putting the up to 'n_ips' 
+   instruction pointers into 'ips'.  In order to be thread-safe, we pass in
+   the thread's IP and FP.  Returns number of IPs put in 'ips'.  */
+UInt VG_(stack_snapshot) ( ThreadId tid, Addr* ips, UInt n_ips )
 {
-   Addr eip, ebp, esp, stack_highest_word;
+   Addr ip, fp, sp, stack_highest_word;
 
-   get_needed_regs(tid, &eip, &ebp, &esp, &stack_highest_word);
-   return stack_snapshot2(eips, n_eips, 
-                          eip, ebp, esp, stack_highest_word);
+   get_needed_regs(tid, &ip, &fp, &sp, &stack_highest_word);
+   return stack_snapshot2(ips, n_ips, ip, fp, sp, stack_highest_word);
 }
 
 
 Addr VG_(get_EIP_from_ExeContext) ( ExeContext* e, UInt n )
 {
    if (n > VG_(clo_backtrace_size)) return 0;
-   return e->eips[n];
+   return e->ips[n];
 }
 
 Addr VG_(get_EIP) ( ThreadId tid )
@@ -368,13 +368,13 @@
    Addr ret;
 
    if (VG_(is_running_thread)(tid))
-      ret = VG_(baseBlock)[VGOFF_(m_eip)];
+      ret = VG_(baseBlock)[VGOFF_INSTR_PTR];
    else
-      ret = VG_(threads)[ tid ].arch.m_eip;
+      ret = ARCH_INSTR_PTR(VG_(threads)[ tid ].arch);
 
    return ret;
 }
 
 /*--------------------------------------------------------------------*/
-/*--- end                                          vg_execontext.c ---*/
+/*--- end                                                          ---*/
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c
index 65c7bf5..10e6654 100644
--- a/coregrind/vg_scheduler.c
+++ b/coregrind/vg_scheduler.c
@@ -3115,7 +3115,7 @@
          ExeContext *e = VG_(get_ExeContext)( tid );
          int count =
             VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
-            VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
+            VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
             SET_CLREQ_RETVAL( tid, count );
          break; }
 
@@ -3123,7 +3123,7 @@
          ExeContext *e = VG_(get_ExeContext)( tid );
          int count =
             VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
-            VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
+            VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
             SET_CLREQ_RETVAL( tid, count );
          break; }
 
diff --git a/coregrind/x86/core_arch.h b/coregrind/x86/core_arch.h
index 59751f2..2039ee6 100644
--- a/coregrind/x86/core_arch.h
+++ b/coregrind/x86/core_arch.h
@@ -35,6 +35,32 @@
 #include "tool_arch.h"        // arch-specific tool stuff
 
 /* ---------------------------------------------------------------------
+   Interesting registers
+   ------------------------------------------------------------------ */
+
+// Accessors for the arch_thread_t
+#define ARCH_INSTR_PTR(regs)           ((regs).m_eip)
+#define ARCH_STACK_PTR(regs)           ((regs).m_esp)
+#define ARCH_FRAME_PTR(regs)           ((regs).m_ebp)
+
+#define ARCH_CLREQ_ARGS(regs)          ((regs).m_eax)
+
+// Interesting register numbers
+#define R_STACK_PTR                    R_ESP
+#define R_FRAME_PTR                    R_EBP
+
+// Stack frame layout and linkage
+#define FIRST_STACK_FRAME(ebp)         (ebp)
+#define STACK_FRAME_RET(ebp)           (((UInt*)ebp)[1])
+#define STACK_FRAME_NEXT(ebp)          (((UInt*)ebp)[0])
+
+// Offsets of interesting registers
+#define VGOFF_INSTR_PTR                VGOFF_(m_eip)
+#define VGOFF_STACK_PTR                VGOFF_(m_esp)
+#define VGOFF_FRAME_PTR                VGOFF_(m_ebp)
+
+
+/* ---------------------------------------------------------------------
    Exports of vg_ldt.c
    ------------------------------------------------------------------ */
 
diff --git a/include/tool.h.base b/include/tool.h.base
index 36f86b0..2402957 100644
--- a/include/tool.h.base
+++ b/include/tool.h.base
@@ -1483,26 +1483,26 @@
 */
 extern ExeContext* VG_(get_ExeContext) ( ThreadId tid );
 
-/* Get the nth EIP from the ExeContext.  0 is the EIP of the top function, 1
+/* Get the nth IP from the ExeContext.  0 is the IP of the top function, 1
    is its caller, etc.  Returns 0 if there isn't one, or if n is greater
    than VG_(clo_backtrace_size), set by the --num-callers option. */
 extern Addr VG_(get_EIP_from_ExeContext) ( ExeContext* e, UInt n );
 
-/* Just grab the client's EIP, as a much smaller and cheaper
+/* Just grab the client's IP, as a much smaller and cheaper
    indication of where they are.  Use is basically same as for
    VG_(get_ExeContext)() above.
 */
 extern Addr VG_(get_EIP)( ThreadId tid );
 
 /* For tools needing more control over stack traces:  walks the stack to get
-   %eips from the top stack frames for thread 'tid'.  Maximum of 'n_eips'
-   addresses put into 'eips';  0 is the top of the stack, 1 is its caller,
-   etc. */
-extern UInt VG_(stack_snapshot) ( ThreadId tid, Addr* eips, UInt n_eips );
+   instruction pointers from the top stack frames for thread 'tid'.  Maximum of
+   'n_ips' addresses put into 'ips';  0 is the top of the stack, 1 is its
+   caller, etc. */
+extern UInt VG_(stack_snapshot) ( ThreadId tid, Addr* ips, UInt n_ips );
 
 /* Does the same thing as VG_(pp_ExeContext)(), just with slightly
    different input. */
-extern void VG_(mini_stack_dump) ( Addr eips[], UInt n_eips );
+extern void VG_(mini_stack_dump) ( Addr ips[], UInt n_ips );
 
 
 /*====================================================================*/