A modularisation + refactoring commit. vg_execontext.c has been split into
two halves: stacktrace.c, which deals with getting, traversing and printing
stack traces; and execontext.c, which deals with storing stack traces
permanently in a way that avoids duplicates, and comparing them.
One nice outcome: previously we were often creating ExeContexts, which live
forever, even when they were only needed temporarily. Ie. this was a memory
leak, which has been removed.
As part of this, new headers have been created, carved off core.h and
tool.h. Lots of function names have changed, too.
In Massif, I also changed a lot of "eip" names to "ip" to make them less
x86-specific.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3429 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am
index be3d5da..df822ac 100644
--- a/coregrind/Makefile.am
+++ b/coregrind/Makefile.am
@@ -27,6 +27,8 @@
noinst_HEADERS = \
core.h \
core_asm.h \
+ pub_core_execontext.h \
+ pub_core_stacktrace.h \
ume.h \
vg_symtab2.h \
vg_symtypes.h \
@@ -50,13 +52,14 @@
valgrind_LDADD=
stage2_SOURCES = \
+ execontext.c \
+ stacktrace.c \
ume.c \
\
vg_scheduler.c \
vg_default.c \
vg_demangle.c \
vg_errcontext.c \
- vg_execontext.c \
vg_hashtable.c \
vg_instrument.c \
vg_main.c \
diff --git a/coregrind/core.h b/coregrind/core.h
index 25be871..481d4f7 100644
--- a/coregrind/core.h
+++ b/coregrind/core.h
@@ -93,6 +93,8 @@
// eg. x86-linux/core_platform.h
#include "core_os.h" // OS-specific stuff, eg. linux/core_os.h
+#include "pub_core_stacktrace.h" // for type 'StackTrace'
+
#include "valgrind.h"
#undef TL_
@@ -809,7 +811,7 @@
__attribute__ ((__noreturn__))
extern void VG_(core_panic) ( Char* str );
__attribute__ ((__noreturn__))
-extern void VG_(core_panic_at) ( Char* str, ExeContext *ec );
+extern void VG_(core_panic_at) ( Char* str, StackTrace ips );
/* Tools use VG_(strdup)() which doesn't expose ArenaId */
extern Char* VG_(arena_strdup) ( ArenaId aid, const Char* s);
@@ -867,31 +869,6 @@
Int debugging_verbosity );
/* ---------------------------------------------------------------------
- Exports of vg_execontext.c.
- ------------------------------------------------------------------ */
-
-/* Records the PC and a bit of the call chain. The first 4 IP
- values are used in comparisons do remove duplicate errors, and for
- comparing against suppression specifications. The rest are purely
- informational (but often important). */
-
-struct _ExeContext {
- struct _ExeContext * next;
- /* Variable-length array. The size is VG_(clo_backtrace_size); at
- least 1, at most VG_DEEPEST_BACKTRACE. [0] is the current IP,
- [1] is its caller, [2] is the caller of [1], etc. */
- Addr ips[0];
-};
-
-
-/* Print stats (informational only). */
-extern void VG_(print_ExeContext_stats) ( void );
-
-/* Like VG_(get_ExeContext), but with a slightly different type */
-extern ExeContext* VG_(get_ExeContext2) ( Addr ip, Addr fp,
- Addr fp_min, Addr fp_max );
-
-/* ---------------------------------------------------------------------
Exports of vg_errcontext.c.
------------------------------------------------------------------ */
diff --git a/coregrind/execontext.c b/coregrind/execontext.c
new file mode 100644
index 0000000..5de1dbe
--- /dev/null
+++ b/coregrind/execontext.c
@@ -0,0 +1,256 @@
+/*--------------------------------------------------------------------*/
+/*--- execontext.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "core.h"
+#include "pub_core_execontext.h"
+
+/*------------------------------------------------------------*/
+/*--- Low-level ExeContext storage. ---*/
+/*------------------------------------------------------------*/
+
+/* The first 4 IP values are used in comparisons do remove duplicate errors,
+ and for comparing against suppression specifications. The rest are
+ purely informational (but often important). */
+
+struct _ExeContext {
+ struct _ExeContext * next;
+ /* Variable-length array. The size is VG_(clo_backtrace_size); at
+ least 1, at most VG_DEEPEST_BACKTRACE. [0] is the current IP,
+ [1] is its caller, [2] is the caller of [1], etc. */
+ Addr ips[0];
+};
+
+/* Number of lists in which we keep track of ExeContexts. Should be
+ prime. */
+#define N_EC_LISTS 4999 /* a prime number */
+
+/* The idea is only to ever store any one context once, so as to save
+ space and make exact comparisons faster. */
+
+static ExeContext* ec_list[N_EC_LISTS];
+
+/* Stats only: the number of times the system was searched to locate a
+ context. */
+static UInt ec_searchreqs;
+
+/* Stats only: the number of full context comparisons done. */
+static UInt ec_searchcmps;
+
+/* Stats only: total number of stored contexts. */
+static UInt ec_totstored;
+
+/* Number of 2, 4 and (fast) full cmps done. */
+static UInt ec_cmp2s;
+static UInt ec_cmp4s;
+static UInt ec_cmpAlls;
+
+
+/*------------------------------------------------------------*/
+/*--- Exported functions. ---*/
+/*------------------------------------------------------------*/
+
+
+/* Initialise this subsystem. */
+static void init_ExeContext_storage ( void )
+{
+ Int i;
+ static Bool init_done = False;
+ if (init_done)
+ return;
+ ec_searchreqs = 0;
+ ec_searchcmps = 0;
+ ec_totstored = 0;
+ ec_cmp2s = 0;
+ ec_cmp4s = 0;
+ ec_cmpAlls = 0;
+ for (i = 0; i < N_EC_LISTS; i++)
+ ec_list[i] = NULL;
+ init_done = True;
+}
+
+
+/* Print stats. */
+void VG_(print_ExeContext_stats) ( void )
+{
+ init_ExeContext_storage();
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %d lists, %d contexts (avg %d per list)",
+ N_EC_LISTS, ec_totstored,
+ ec_totstored / N_EC_LISTS
+ );
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %d searches, %d full compares (%d per 1000)",
+ ec_searchreqs, ec_searchcmps,
+ ec_searchreqs == 0
+ ? 0
+ : (UInt)( (((ULong)ec_searchcmps) * 1000)
+ / ((ULong)ec_searchreqs ))
+ );
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %d cmp2, %d cmp4, %d cmpAll",
+ ec_cmp2s, ec_cmp4s, ec_cmpAlls
+ );
+}
+
+
+/* Print an ExeContext. */
+void VG_(pp_ExeContext) ( ExeContext* ec )
+{
+ VG_(pp_StackTrace)( ec->ips, VG_(clo_backtrace_size) );
+}
+
+
+/* Compare two ExeContexts, comparing all callers. */
+Bool VG_(eq_ExeContext) ( VgRes res, ExeContext* e1, ExeContext* e2 )
+{
+ if (e1 == NULL || e2 == NULL)
+ return False;
+ switch (res) {
+ case Vg_LowRes:
+ /* Just compare the top two callers. */
+ ec_cmp2s++;
+ if (e1->ips[0] != e2->ips[0]) return False;
+
+ if (VG_(clo_backtrace_size) < 2) return True;
+ if (e1->ips[1] != e2->ips[1]) return False;
+ return True;
+
+ case Vg_MedRes:
+ /* Just compare the top four callers. */
+ ec_cmp4s++;
+ if (e1->ips[0] != e2->ips[0]) return False;
+
+ if (VG_(clo_backtrace_size) < 2) return True;
+ if (e1->ips[1] != e2->ips[1]) return False;
+
+ if (VG_(clo_backtrace_size) < 3) return True;
+ if (e1->ips[2] != e2->ips[2]) return False;
+
+ if (VG_(clo_backtrace_size) < 4) return True;
+ if (e1->ips[3] != e2->ips[3]) return False;
+ return True;
+
+ case Vg_HighRes:
+ ec_cmpAlls++;
+ /* Compare them all -- just do pointer comparison. */
+ if (e1 != e2) return False;
+ return True;
+
+ default:
+ VG_(core_panic)("VG_(eq_ExeContext): unrecognised VgRes");
+ }
+}
+
+/* This guy is the head honcho here. Take a snapshot of the client's
+ stack. Search our collection of ExeContexts to see if we already
+ have it, and if not, allocate a new one. Either way, return a
+ pointer to the context. If there is a matching context we
+ guarantee to not allocate a new one. Thus we never store
+ duplicates, and so exact equality can be quickly done as equality
+ on the returned ExeContext* values themselves. Inspired by Hugs's
+ Text type.
+*/
+ExeContext* VG_(record_ExeContext) ( ThreadId tid )
+{
+ Int i;
+ Addr ips[VG_DEEPEST_BACKTRACE];
+ Bool same;
+ UWord hash;
+ ExeContext* new_ec;
+ ExeContext* list;
+
+ VGP_PUSHCC(VgpExeContext);
+
+ init_ExeContext_storage();
+ vg_assert(VG_(clo_backtrace_size) >= 1
+ && VG_(clo_backtrace_size) <= VG_DEEPEST_BACKTRACE);
+
+ VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size) );
+
+ /* Now figure out if we've seen this one before. First hash it so
+ as to determine the list number. */
+
+ hash = 0;
+ for (i = 0; i < VG_(clo_backtrace_size); i++) {
+ hash ^= ips[i];
+ hash = (hash << 29) | (hash >> 3);
+ }
+ hash = hash % N_EC_LISTS;
+
+ /* And (the expensive bit) look a matching entry in the list. */
+
+ ec_searchreqs++;
+
+ list = ec_list[hash];
+
+ while (True) {
+ if (list == NULL) break;
+ ec_searchcmps++;
+ same = True;
+ for (i = 0; i < VG_(clo_backtrace_size); i++) {
+ if (list->ips[i] != ips[i]) {
+ same = False;
+ break;
+ }
+ }
+ if (same) break;
+ list = list->next;
+ }
+
+ if (list != NULL) {
+ /* Yay! We found it. */
+ VGP_POPCC(VgpExeContext);
+ return list;
+ }
+
+ /* Bummer. We have to allocate a new context record. */
+ ec_totstored++;
+
+ new_ec = VG_(arena_malloc)( VG_AR_EXECTXT,
+ sizeof(struct _ExeContext *)
+ + VG_(clo_backtrace_size) * sizeof(Addr) );
+
+ for (i = 0; i < VG_(clo_backtrace_size); i++)
+ new_ec->ips[i] = ips[i];
+
+ new_ec->next = ec_list[hash];
+ ec_list[hash] = new_ec;
+
+ VGP_POPCC(VgpExeContext);
+ return new_ec;
+}
+
+StackTrace VG_(extract_StackTrace) ( ExeContext* e )
+{
+ return e->ips;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_execontext.h b/coregrind/pub_core_execontext.h
new file mode 100644
index 0000000..119adb4
--- /dev/null
+++ b/coregrind/pub_core_execontext.h
@@ -0,0 +1,54 @@
+/*--------------------------------------------------------------------*/
+/*--- ExeContexts: long-lived, non-dup'd stack traces. ---*/
+/*--- pub_core_execontext.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_EXECONTEXT_H
+#define __PUB_CORE_EXECONTEXT_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module provides an abstract data type, ExeContext,
+// which is a stack trace stored in such a way that duplicates are
+// avoided. This also facilitates fast comparisons if necessary.
+//--------------------------------------------------------------------
+
+#include "pub_tool_execontext.h"
+
+#include "pub_core_stacktrace.h"
+
+// Print stats (informational only).
+extern void VG_(print_ExeContext_stats) ( void );
+
+// Extract the StackTrace from an ExeContext.
+extern StackTrace VG_(extract_StackTrace) ( ExeContext* e );
+
+#endif // __PUB_CORE_EXECONTEXT_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_stacktrace.h b/coregrind/pub_core_stacktrace.h
new file mode 100644
index 0000000..61c6e63
--- /dev/null
+++ b/coregrind/pub_core_stacktrace.h
@@ -0,0 +1,49 @@
+/*--------------------------------------------------------------------*/
+/*--- Stack traces: getting, traversing, printing. ---*/
+/*--- pub_core_stacktrace.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_STACKTRACE_H
+#define __PUB_CORE_STACKTRACE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with stack traces: getting them,
+// traversing them, and printing them.
+//--------------------------------------------------------------------
+
+#include "pub_tool_stacktrace.h"
+
+// Variant that gives a little more control over the stack-walking.
+extern UInt VG_(get_StackTrace2) ( StackTrace ips, UInt n_ips, Addr ip,
+ Addr fp, Addr fp_min, Addr fp_max );
+
+#endif // __PUB_CORE_STACKTRACE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/stacktrace.c b/coregrind/stacktrace.c
new file mode 100644
index 0000000..692a035
--- /dev/null
+++ b/coregrind/stacktrace.c
@@ -0,0 +1,204 @@
+/*--------------------------------------------------------------------*/
+/*--- stacktrace.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "core.h"
+#include "pub_core_stacktrace.h"
+
+/*------------------------------------------------------------*/
+/*--- Exported functions. ---*/
+/*------------------------------------------------------------*/
+
+/* Take a snapshot of the client's stack, putting the up to 'n_ips' IPs
+ into 'ips'. In order to be thread-safe, we pass in the thread's IP
+ and FP. Returns number of IPs put in 'ips'. */
+UInt VG_(get_StackTrace2) ( Addr* ips, UInt n_ips, Addr ip, Addr fp,
+ Addr fp_min, Addr fp_max_orig )
+{
+ static const Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+
+ VGP_PUSHCC(VgpExeContext);
+
+ /* First snaffle IPs from the client's stack into ips[0 .. n_ips-1],
+ putting zeroes in when the trail goes cold, which we guess to be when
+ FP is not a reasonable stack location. We also assert that FP
+ increases down the chain. */
+
+ // Gives shorter stack trace for tests/badjump.c
+ // JRS 2002-aug-16: I don't think this is a big deal; looks ok for
+ // most "normal" backtraces.
+ // NJN 2002-sep-05: traces for pthreaded programs are particularly bad.
+
+ // JRS 2002-sep-17: hack, to round up fp_max to the end of the
+ // current page, at least. Dunno if it helps.
+ // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
+ fp_max = (fp_max_orig + VKI_PAGE_SIZE - 1) & ~(VKI_PAGE_SIZE - 1);
+ fp_max -= sizeof(Addr);
+
+ if (debug)
+ VG_(printf)("n_ips=%d fp_min=%p fp_max_orig=%p, fp_max=%p ip=%p fp=%p\n",
+ n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
+
+ /* Assertion broken before main() is reached in pthreaded programs; the
+ * offending stack traces only have one item. --njn, 2002-aug-16 */
+ /* vg_assert(fp_min <= fp_max);*/
+
+ if (fp_min + 4000000 <= fp_max) {
+ /* If the stack is ridiculously big, don't poke around ... but
+ don't bomb out either. Needed to make John Regehr's
+ user-space threads package work. JRS 20021001 */
+ ips[0] = ip;
+ i = 1;
+ } else {
+ /* Get whatever we safely can ... */
+ ips[0] = ip;
+ fp = FIRST_STACK_FRAME(fp);
+ for (i = 1; i < n_ips; i++) {
+ if (!(fp_min <= fp && fp <= fp_max)) {
+ if (debug)
+ VG_(printf)("... out of range %p\n", fp);
+ break; /* fp gone baaaad */
+ }
+ // NJN 2002-sep-17: monotonicity doesn't work -- gives wrong traces...
+ // if (fp >= ((UInt*)fp)[0]) {
+ // VG_(printf)("nonmonotonic\n");
+ // break; /* fp gone nonmonotonic */
+ // }
+ ips[i] = STACK_FRAME_RET(fp); /* ret addr */
+ fp = STACK_FRAME_NEXT(fp); /* old fp */
+ if (debug)
+ VG_(printf)(" ips[%d]=%08p\n", i, ips[i]);
+ }
+ }
+ n_found = i;
+
+ /* Put zeroes in the rest. */
+ for (; i < n_ips; i++) {
+ ips[i] = 0;
+ }
+ VGP_POPCC(VgpExeContext);
+
+ return n_found;
+}
+
+UInt VG_(get_StackTrace) ( ThreadId tid, StackTrace ips, UInt n_ips )
+{
+ /* thread in thread table */
+ ThreadState* tst = & VG_(threads)[ tid ];
+ Addr ip = INSTR_PTR(tst->arch);
+ Addr fp = FRAME_PTR(tst->arch);
+ Addr sp = STACK_PTR(tst->arch);
+ Addr stack_highest_word = tst->stack_highest_word;
+
+#ifdef __x86__
+ /* Nasty little hack to deal with sysinfo syscalls - if libc is
+ using the sysinfo page for syscalls (the TLS version does), then
+ ip will always appear to be in that page when doing a syscall,
+ not the actual libc function doing the syscall. This check sees
+ if IP is within the syscall code, and pops the return address
+ off the stack so that ip is placed within the library function
+ calling the syscall. This makes stack backtraces much more
+ useful. */
+ if (ip >= VG_(client_trampoline_code)+VG_(tramp_syscall_offset) &&
+ ip < VG_(client_trampoline_code)+VG_(trampoline_code_length) &&
+ VG_(is_addressable)(sp, sizeof(Addr), VKI_PROT_READ)) {
+ ip = *(Addr *)sp;
+ sp += sizeof(Addr);
+ }
+#endif
+ if (0)
+ VG_(printf)("tid %d: stack_highest=%p ip=%p sp=%p fp=%p\n",
+ tid, stack_highest_word, ip, sp, fp);
+
+ return VG_(get_StackTrace2)(ips, n_ips, ip, fp, sp, stack_highest_word);
+}
+
+static void printIpDesc(UInt n, Addr ip)
+{
+ static UChar buf[M_VG_ERRTXT];
+
+ VG_(describe_IP)(ip, buf, M_VG_ERRTXT);
+ VG_(message)(Vg_UserMsg, " %s %s", ( n == 0 ? "at" : "by" ), buf);
+}
+
+/* Print a StackTrace. */
+void VG_(pp_StackTrace) ( StackTrace ips, UInt n_ips )
+{
+ vg_assert( n_ips > 0 );
+ VG_(apply_StackTrace)( printIpDesc, ips, n_ips );
+}
+
+/* Get and immediately print a StackTrace. */
+void VG_(get_and_pp_StackTrace) ( ThreadId tid, UInt n_ips )
+{
+ Addr ips[n_ips];
+ VG_(get_StackTrace)(tid, ips, n_ips);
+ VG_(pp_StackTrace) ( ips, n_ips);
+}
+
+
+void VG_(apply_StackTrace)( void(*action)(UInt n, Addr ip),
+ StackTrace ips, UInt n_ips )
+{
+ #define MYBUF_LEN 10 // only needs to be long enough for "main"
+
+ Bool main_done = False;
+ Char mybuf[MYBUF_LEN]; // ok to stack allocate mybuf[] -- it's tiny
+ Int i = 0;
+
+ vg_assert(n_ips > 0);
+ do {
+ Addr ip = ips[i];
+ if (i > 0)
+ ip -= MIN_INSTR_SIZE; // point to calling line
+
+ // Stop after "main"; if main() is recursive, stop after last main().
+ if ( ! VG_(clo_show_below_main)) {
+ VG_(get_fnname_nodemangle)( ip, mybuf, MYBUF_LEN );
+ if ( VG_STREQ("main", mybuf) )
+ main_done = True;
+ else if (main_done)
+ break;
+ }
+
+ // Act on the ip
+ action(i, ip);
+
+ i++;
+ } while (i < n_ips && ips[i] != 0);
+
+ #undef MYBUF_LEN
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/vg_errcontext.c b/coregrind/vg_errcontext.c
index ce55550..a28e34e 100644
--- a/coregrind/vg_errcontext.c
+++ b/coregrind/vg_errcontext.c
@@ -29,6 +29,7 @@
*/
#include "core.h"
+#include "pub_core_execontext.h"
/*------------------------------------------------------------*/
/*--- Globals ---*/
@@ -326,7 +327,7 @@
err->count = 1;
err->tid = tid;
if (NULL == where)
- err->where = VG_(get_ExeContext)( tid );
+ err->where = VG_(record_ExeContext)( tid );
else
err->where = where;
@@ -381,7 +382,7 @@
}
// Print stack trace elements
- VG_(apply_ExeContext)(printSuppForIp, ec, stop_at);
+ VG_(apply_StackTrace)(printSuppForIp, VG_(extract_StackTrace)(ec), stop_at);
VG_(printf)("}\n");
}
@@ -675,9 +676,9 @@
pp_Error( p_min, False );
if ((i+1 == VG_(clo_dump_error))) {
+ StackTrace ips = VG_(extract_StackTrace)(p_min->where);
VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to debugging*/,
- p_min->where->ips[0], /*debugging*/True,
- 0xFE/*verbosity*/);
+ ips[0], /*debugging*/True, 0xFE/*verbosity*/);
}
p_min->count = 1 << 30;
@@ -982,9 +983,10 @@
{
Int i;
Char caller_name[M_VG_ERRTXT];
+ StackTrace ips = VG_(extract_StackTrace)(err->where);
for (i = 0; i < su->n_callers; i++) {
- Addr a = err->where->ips[i];
+ Addr a = ips[i];
vg_assert(su->callers[i].name != NULL);
switch (su->callers[i].ty) {
case ObjName:
diff --git a/coregrind/vg_execontext.c b/coregrind/vg_execontext.c
deleted file mode 100644
index 8683055..0000000
--- a/coregrind/vg_execontext.c
+++ /dev/null
@@ -1,411 +0,0 @@
-
-/*--------------------------------------------------------------------*/
-/*--- Storage, and equality on, execution contexts (backtraces). ---*/
-/*--- vg_execontext.c ---*/
-/*--------------------------------------------------------------------*/
-
-/*
- This file is part of Valgrind, a dynamic binary instrumentation
- framework.
-
- Copyright (C) 2000-2005 Julian Seward
- jseward@acm.org
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of the
- License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307, USA.
-
- The GNU General Public License is contained in the file COPYING.
-*/
-
-#include "core.h"
-
-
-/*------------------------------------------------------------*/
-/*--- Low-level ExeContext storage. ---*/
-/*------------------------------------------------------------*/
-
-/* Number of lists in which we keep track of ExeContexts. Should be
- prime. */
-#define N_EC_LISTS 4999 /* a prime number */
-
-/* The idea is only to ever store any one context once, so as to save
- space and make exact comparisons faster. */
-
-static ExeContext* ec_list[N_EC_LISTS];
-
-/* Stats only: the number of times the system was searched to locate a
- context. */
-static UInt ec_searchreqs;
-
-/* Stats only: the number of full context comparisons done. */
-static UInt ec_searchcmps;
-
-/* Stats only: total number of stored contexts. */
-static UInt ec_totstored;
-
-/* Number of 2, 4 and (fast) full cmps done. */
-static UInt ec_cmp2s;
-static UInt ec_cmp4s;
-static UInt ec_cmpAlls;
-
-
-/*------------------------------------------------------------*/
-/*--- Exported functions. ---*/
-/*------------------------------------------------------------*/
-
-
-/* Initialise this subsystem. */
-static void init_ExeContext_storage ( void )
-{
- Int i;
- static Bool init_done = False;
- if (init_done)
- return;
- ec_searchreqs = 0;
- ec_searchcmps = 0;
- ec_totstored = 0;
- ec_cmp2s = 0;
- ec_cmp4s = 0;
- ec_cmpAlls = 0;
- for (i = 0; i < N_EC_LISTS; i++)
- ec_list[i] = NULL;
- init_done = True;
-}
-
-
-/* Print stats. */
-void VG_(print_ExeContext_stats) ( void )
-{
- init_ExeContext_storage();
- VG_(message)(Vg_DebugMsg,
- " exectx: %d lists, %d contexts (avg %d per list)",
- N_EC_LISTS, ec_totstored,
- ec_totstored / N_EC_LISTS
- );
- VG_(message)(Vg_DebugMsg,
- " exectx: %d searches, %d full compares (%d per 1000)",
- ec_searchreqs, ec_searchcmps,
- ec_searchreqs == 0
- ? 0
- : (UInt)( (((ULong)ec_searchcmps) * 1000)
- / ((ULong)ec_searchreqs ))
- );
- VG_(message)(Vg_DebugMsg,
- " exectx: %d cmp2, %d cmp4, %d cmpAll",
- ec_cmp2s, ec_cmp4s, ec_cmpAlls
- );
-}
-
-
-static void printIpDesc(UInt n, Addr ip)
-{
- static UChar buf[M_VG_ERRTXT];
-
- VG_(describe_eip)(ip, buf, M_VG_ERRTXT);
- VG_(message)(Vg_UserMsg, " %s %s", ( n == 0 ? "at" : "by" ), buf);
-}
-
-/* Print an ExeContext. */
-void VG_(pp_ExeContext) ( ExeContext* ec )
-{
- vg_assert( VG_(clo_backtrace_size) > 0 );
- VG_(apply_ExeContext)( printIpDesc, ec, VG_(clo_backtrace_size) );
-}
-
-
-/* Compare two ExeContexts, comparing all callers. */
-Bool VG_(eq_ExeContext) ( VgRes res, ExeContext* e1, ExeContext* e2 )
-{
- if (e1 == NULL || e2 == NULL)
- return False;
- switch (res) {
- case Vg_LowRes:
- /* Just compare the top two callers. */
- ec_cmp2s++;
- if (e1->ips[0] != e2->ips[0]
- || e1->ips[1] != e2->ips[1]) return False;
- return True;
-
- case Vg_MedRes:
- /* Just compare the top four callers. */
- ec_cmp4s++;
- if (e1->ips[0] != e2->ips[0]) return False;
-
- if (VG_(clo_backtrace_size) < 2) return True;
- if (e1->ips[1] != e2->ips[1]) return False;
-
- if (VG_(clo_backtrace_size) < 3) return True;
- if (e1->ips[2] != e2->ips[2]) return False;
-
- if (VG_(clo_backtrace_size) < 4) return True;
- if (e1->ips[3] != e2->ips[3]) return False;
- return True;
-
- case Vg_HighRes:
- ec_cmpAlls++;
- /* Compare them all -- just do pointer comparison. */
- if (e1 != e2) return False;
- return True;
-
- default:
- VG_(core_panic)("VG_(eq_ExeContext): unrecognised VgRes");
- }
-}
-
-
-void VG_(apply_ExeContext)( void(*action)(UInt n, Addr ip),
- ExeContext* ec, UInt n_ips )
-{
- #define MYBUF_LEN 10 // only needs to be long enough for "main"
-
- Bool main_done = False;
- Char mybuf[MYBUF_LEN]; // ok to stack allocate mybuf[] -- it's tiny
- Int i = 0;
-
- vg_assert(n_ips > 0);
- do {
- Addr ip = ec->ips[i];
- if (i > 0)
- ip -= MIN_INSTR_SIZE; // point to calling line
-
- // Stop after "main"; if main() is recursive, stop after last main().
- if ( ! VG_(clo_show_below_main)) {
- VG_(get_fnname_nodemangle)( ip, mybuf, MYBUF_LEN );
- if ( VG_STREQ("main", mybuf) )
- main_done = True;
- else if (main_done)
- break;
- }
-
- // Act on the ip
- action(i, ip);
-
- i++;
- } while (i < n_ips && ec->ips[i] != 0);
-
- #undef MYBUF_LEN
-}
-
-
-/* Take a snapshot of the client's stack, putting the up to 'n_ips' IPs
- into 'ips'. In order to be thread-safe, we pass in the thread's IP
- and FP. Returns number of IPs put in 'ips'. */
-static UInt stack_snapshot2 ( Addr* ips, UInt n_ips, Addr ip, Addr fp,
- Addr fp_min, Addr fp_max_orig )
-{
- static const Bool debug = False;
- Int i;
- Addr fp_max;
- UInt n_found = 0;
-
- VGP_PUSHCC(VgpExeContext);
-
- /* First snaffle IPs from the client's stack into ips[0 .. n_ips-1],
- putting zeroes in when the trail goes cold, which we guess to be when
- FP is not a reasonable stack location. We also assert that FP
- increases down the chain. */
-
- // Gives shorter stack trace for tests/badjump.c
- // JRS 2002-aug-16: I don't think this is a big deal; looks ok for
- // most "normal" backtraces.
- // NJN 2002-sep-05: traces for pthreaded programs are particularly bad.
-
- // JRS 2002-sep-17: hack, to round up fp_max to the end of the
- // current page, at least. Dunno if it helps.
- // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
- fp_max = (fp_max_orig + VKI_PAGE_SIZE - 1) & ~(VKI_PAGE_SIZE - 1);
- fp_max -= sizeof(Addr);
-
- if (debug)
- VG_(printf)("n_ips=%d fp_min=%p fp_max_orig=%p, fp_max=%p ip=%p fp=%p\n",
- n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
-
- /* Assertion broken before main() is reached in pthreaded programs; the
- * offending stack traces only have one item. --njn, 2002-aug-16 */
- /* vg_assert(fp_min <= fp_max);*/
-
- if (fp_min + 4000000 <= fp_max) {
- /* If the stack is ridiculously big, don't poke around ... but
- don't bomb out either. Needed to make John Regehr's
- user-space threads package work. JRS 20021001 */
- ips[0] = ip;
- i = 1;
- } else {
- /* Get whatever we safely can ... */
- ips[0] = ip;
- fp = FIRST_STACK_FRAME(fp);
- for (i = 1; i < n_ips; i++) {
- if (!(fp_min <= fp && fp <= fp_max)) {
- if (debug)
- VG_(printf)("... out of range %p\n", fp);
- break; /* fp gone baaaad */
- }
- // NJN 2002-sep-17: monotonicity doesn't work -- gives wrong traces...
- // if (fp >= ((UInt*)fp)[0]) {
- // VG_(printf)("nonmonotonic\n");
- // break; /* fp gone nonmonotonic */
- // }
- ips[i] = STACK_FRAME_RET(fp); /* ret addr */
- fp = STACK_FRAME_NEXT(fp); /* old fp */
- if (debug)
- VG_(printf)(" ips[%d]=%08p\n", i, ips[i]);
- }
- }
- n_found = i;
-
- /* Put zeroes in the rest. */
- for (; i < n_ips; i++) {
- ips[i] = 0;
- }
- VGP_POPCC(VgpExeContext);
-
- return n_found;
-}
-
-/* This guy is the head honcho here. Take a snapshot of the client's
- stack. Search our collection of ExeContexts to see if we already
- have it, and if not, allocate a new one. Either way, return a
- pointer to the context. If there is a matching context we
- guarantee to not allocate a new one. Thus we never store
- duplicates, and so exact equality can be quickly done as equality
- on the returned ExeContext* values themselves. Inspired by Hugs's
- Text type.
-*/
-ExeContext* VG_(get_ExeContext2) ( Addr ip, Addr fp,
- Addr fp_min, Addr fp_max_orig )
-{
- Int i;
- Addr ips[VG_DEEPEST_BACKTRACE];
- Bool same;
- UWord hash;
- ExeContext* new_ec;
- ExeContext* list;
-
- VGP_PUSHCC(VgpExeContext);
-
- init_ExeContext_storage();
- vg_assert(VG_(clo_backtrace_size) >= 1
- && VG_(clo_backtrace_size) <= VG_DEEPEST_BACKTRACE);
-
- stack_snapshot2( ips, VG_(clo_backtrace_size),
- ip, fp, fp_min, fp_max_orig );
-
- /* Now figure out if we've seen this one before. First hash it so
- as to determine the list number. */
-
- hash = 0;
- for (i = 0; i < VG_(clo_backtrace_size); i++) {
- hash ^= ips[i];
- hash = (hash << 29) | (hash >> 3);
- }
- hash = hash % N_EC_LISTS;
-
- /* And (the expensive bit) look a matching entry in the list. */
-
- ec_searchreqs++;
-
- list = ec_list[hash];
-
- while (True) {
- if (list == NULL) break;
- ec_searchcmps++;
- same = True;
- for (i = 0; i < VG_(clo_backtrace_size); i++) {
- if (list->ips[i] != ips[i]) {
- same = False;
- break;
- }
- }
- if (same) break;
- list = list->next;
- }
-
- if (list != NULL) {
- /* Yay! We found it. */
- VGP_POPCC(VgpExeContext);
- return list;
- }
-
- /* Bummer. We have to allocate a new context record. */
- ec_totstored++;
-
- new_ec = VG_(arena_malloc)( VG_AR_EXECTXT,
- sizeof(struct _ExeContext *)
- + VG_(clo_backtrace_size) * sizeof(Addr) );
-
- for (i = 0; i < VG_(clo_backtrace_size); i++)
- new_ec->ips[i] = ips[i];
-
- new_ec->next = ec_list[hash];
- ec_list[hash] = new_ec;
-
- VGP_POPCC(VgpExeContext);
- return new_ec;
-}
-
-static
-void get_needed_regs(ThreadId tid, Addr* ip, Addr* fp, Addr* sp,
- Addr* stack_highest_word)
-{
- /* thread in thread table */
- ThreadState* tst = & VG_(threads)[ tid ];
- *ip = INSTR_PTR(tst->arch);
- *fp = FRAME_PTR(tst->arch);
- *sp = STACK_PTR(tst->arch);
- *stack_highest_word = tst->stack_highest_word;
-
-#ifdef __x86__
- /* Nasty little hack to deal with sysinfo syscalls - if libc is
- using the sysinfo page for syscalls (the TLS version does), then
- ip will always appear to be in that page when doing a syscall,
- not the actual libc function doing the syscall. This check sees
- if IP is within the syscall code, and pops the return address
- off the stack so that ip is placed within the library function
- calling the syscall. This makes stack backtraces much more
- useful. */
- if (*ip >= VG_(client_trampoline_code)+VG_(tramp_syscall_offset) &&
- *ip < VG_(client_trampoline_code)+VG_(trampoline_code_length) &&
- VG_(is_addressable)(*sp, sizeof(Addr), VKI_PROT_READ)) {
- *ip = *(Addr *)*sp;
- *sp += sizeof(Addr);
- }
-#endif
- if (0)
- VG_(printf)("tid %d: stack_highest=%p ip=%p sp=%p fp=%p\n",
- tid, *stack_highest_word, *ip, *sp, *fp);
-}
-
-ExeContext* VG_(get_ExeContext) ( ThreadId tid )
-{
- Addr ip, fp, sp, stack_highest_word;
-
- get_needed_regs(tid, &ip, &fp, &sp, &stack_highest_word);
- return VG_(get_ExeContext2)(ip, fp, sp, stack_highest_word);
-}
-
-/* Take a snapshot of the client's stack, putting the up to 'n_ips'
- instruction pointers into 'ips'. In order to be thread-safe, we pass in
- the thread's IP and FP. Returns number of IPs put in 'ips'. */
-UInt VG_(stack_snapshot) ( ThreadId tid, Addr* ips, UInt n_ips )
-{
- Addr ip, fp, sp, stack_highest_word;
-
- get_needed_regs(tid, &ip, &fp, &sp, &stack_highest_word);
- return stack_snapshot2(ips, n_ips, ip, fp, sp, stack_highest_word);
-}
-
-/*--------------------------------------------------------------------*/
-/*--- end ---*/
-/*--------------------------------------------------------------------*/
diff --git a/coregrind/vg_main.c b/coregrind/vg_main.c
index eaef6d2..15962ce 100644
--- a/coregrind/vg_main.c
+++ b/coregrind/vg_main.c
@@ -32,6 +32,7 @@
#include "core.h"
#include "ume.h"
+#include "pub_core_execontext.h"
#include <dirent.h>
#include <dlfcn.h>
diff --git a/coregrind/vg_mylibc.c b/coregrind/vg_mylibc.c
index 4cd55f9..3f261fd 100644
--- a/coregrind/vg_mylibc.c
+++ b/coregrind/vg_mylibc.c
@@ -31,6 +31,7 @@
*/
#include "core.h"
+#include "pub_core_stacktrace.h"
/* ---------------------------------------------------------------------
Wrappers around system calls, and other stuff, to do with signals.
@@ -1128,9 +1129,9 @@
into the signal handler. Also, it could be somewhat risky if we
actully got the panic/exception within the execontext/stack
dump/symtab code. But it's better than nothing. */
-static inline ExeContext *get_real_execontext(Addr ret)
+static inline void get_and_pp_real_StackTrace(Addr ret)
{
- ExeContext *ec;
+ Addr ips[VG_DEEPEST_BACKTRACE];
Addr sp, fp;
Addr stacktop;
ThreadId tid = VG_(get_lwp_tid)(VG_(gettid)());
@@ -1141,18 +1142,18 @@
stacktop = (Addr)(tst->os_state.stack + tst->os_state.stacksize);
- ec = VG_(get_ExeContext2)(ret, fp, sp, stacktop);
-
- return ec;
+ VG_(get_StackTrace2)(ips, VG_(clo_backtrace_size),
+ ret, fp, sp, stacktop);
+ VG_(pp_StackTrace) (ips, VG_(clo_backtrace_size));
}
__attribute__ ((noreturn))
-static void report_and_quit ( const Char* report, ExeContext *ec )
+static void report_and_quit ( const Char* report, StackTrace ips )
{
- if (ec == NULL)
- ec = get_real_execontext((Addr)__builtin_return_address(0));
-
- VG_(pp_ExeContext)(ec);
+ if (ips == NULL)
+ get_and_pp_real_StackTrace((Addr)__builtin_return_address(0));
+ else
+ VG_(pp_StackTrace)(ips, VG_(clo_backtrace_size));
VG_(pp_sched_status)();
VG_(printf)("\n");
@@ -1191,11 +1192,11 @@
}
__attribute__ ((noreturn))
-static void panic ( Char* name, Char* report, Char* str, ExeContext *ec )
+static void panic ( Char* name, Char* report, Char* str, StackTrace ips )
{
VG_(printf)("\n%s: the `impossible' happened:\n %s\n", name, str);
VG_(printf)("Basic block ctr is approximately %llu\n", VG_(bbs_done) );
- report_and_quit(report, ec);
+ report_and_quit(report, ips);
}
void VG_(core_panic) ( Char* str )
@@ -1203,9 +1204,9 @@
panic("valgrind", VG_BUGS_TO, str, NULL);
}
-void VG_(core_panic_at) ( Char* str, ExeContext *ec )
+void VG_(core_panic_at) ( Char* str, StackTrace ips )
{
- panic("valgrind", VG_BUGS_TO, str, ec);
+ panic("valgrind", VG_BUGS_TO, str, ips);
}
void VG_(tool_panic) ( Char* str )
diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c
index e316eb4..66eba36 100644
--- a/coregrind/vg_scheduler.c
+++ b/coregrind/vg_scheduler.c
@@ -60,6 +60,8 @@
VG_USERREQ__DISCARD_TRANSLATIONS, and others */
#include "core.h"
+#include "pub_core_stacktrace.h"
+
/* ---------------------------------------------------------------------
Types and globals for the scheduler.
@@ -168,7 +170,7 @@
for (i = 1; i < VG_N_THREADS; i++) {
if (VG_(threads)[i].status == VgTs_Empty) continue;
VG_(printf)("\nThread %d: status = %s\n", i, name_of_thread_state(VG_(threads)[i].status));
- VG_(pp_ExeContext)( VG_(get_ExeContext)( i ) );
+ VG_(get_and_pp_StackTrace)( i, VG_(clo_backtrace_size) );
}
VG_(printf)("\n");
}
@@ -844,7 +846,7 @@
VG_(message)( Vg_UserMsg,
"Emulation warning: unsupported action:");
VG_(message)( Vg_UserMsg, " %s", what);
- VG_(pp_ExeContext) ( VG_(get_ExeContext) ( tid ) );
+ VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
}
break;
}
@@ -1047,10 +1049,9 @@
break; }
case VG_USERREQ__PRINTF_BACKTRACE: {
- ExeContext *e = VG_(get_ExeContext)( tid );
int count =
VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
- VG_(pp_ExeContext)(e);
+ VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
SET_CLREQ_RETVAL( tid, count );
break; }
diff --git a/coregrind/vg_signals.c b/coregrind/vg_signals.c
index 6774b39..8e814d7 100644
--- a/coregrind/vg_signals.c
+++ b/coregrind/vg_signals.c
@@ -1373,8 +1373,7 @@
}
if (tid != VG_INVALID_THREADID) {
- ExeContext *ec = VG_(get_ExeContext)(tid);
- VG_(pp_ExeContext)(ec);
+ VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
}
@@ -1879,6 +1878,7 @@
from the client's code, then we can jump back into the scheduler
and have it delivered. Otherwise it's a Valgrind bug. */
{
+ Addr ips[ VG_(clo_backtrace_size) ];
Addr context_ip;
Char buf[1024];
ThreadState *tst = VG_(get_ThreadState)(VG_(get_lwp_tid)(VG_(gettid)()));
@@ -1936,11 +1936,12 @@
if (tid == 0) /* could happen after everyone has exited */
tid = VG_(master_tid);
tst = VG_(get_ThreadState)(tid);
- VG_(core_panic_at)("Killed by fatal signal",
- VG_(get_ExeContext2)(UCONTEXT_INSTR_PTR(uc),
- UCONTEXT_FRAME_PTR(uc),
- UCONTEXT_STACK_PTR(uc),
- (Addr)(tst->os_state.stack + tst->os_state.stacksize)));
+ VG_(get_StackTrace2)(ips, VG_(clo_backtrace_size),
+ UCONTEXT_INSTR_PTR(uc),
+ UCONTEXT_FRAME_PTR(uc),
+ UCONTEXT_STACK_PTR(uc),
+ (Addr)(tst->os_state.stack + tst->os_state.stacksize));
+ VG_(core_panic_at)("Killed by fatal signal", ips);
}
}
diff --git a/coregrind/vg_symtab2.c b/coregrind/vg_symtab2.c
index 9cb2a12..1ec15ae 100644
--- a/coregrind/vg_symtab2.c
+++ b/coregrind/vg_symtab2.c
@@ -2170,7 +2170,7 @@
#endif /* TEST */
/* Print into buf info on code address, function name and filename */
-Char* VG_(describe_eip)(Addr eip, Char* buf, Int n_buf)
+Char* VG_(describe_IP)(Addr eip, Char* buf, Int n_buf)
{
#define APPEND(str) \
{ UChar* sss; \
diff --git a/coregrind/vg_syscalls.c b/coregrind/vg_syscalls.c
index 14e4c97..b0b1c75 100644
--- a/coregrind/vg_syscalls.c
+++ b/coregrind/vg_syscalls.c
@@ -29,6 +29,7 @@
*/
#include "core.h"
+#include "pub_core_stacktrace.h"
/* All system calls are channelled through here, doing two things:
@@ -134,8 +135,7 @@
syscallname, start, end);
if (VG_(clo_verbosity) > 1) {
- ExeContext *ec = VG_(get_ExeContext)(tid);
- VG_(pp_ExeContext)(ec);
+ VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
}
@@ -479,7 +479,7 @@
i->fd = fd;
i->pathname = pathname;
- i->where = (tid == -1) ? NULL : VG_(get_ExeContext)(tid);
+ i->where = (tid == -1) ? NULL : VG_(record_ExeContext)(tid);
}
static
@@ -954,8 +954,7 @@
VG_(message)(Vg_UserMsg,
" Use --log-fd=<number> to select an alternative log fd.");
if (VG_(clo_verbosity) > 1) {
- ExeContext *ec = VG_(get_ExeContext)(tid);
- VG_(pp_ExeContext)(ec);
+ VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
return False;
}
@@ -5907,8 +5906,7 @@
VG_(message)
(Vg_DebugMsg,"WARNING: unhandled syscall: %u", (UInt)SYSNO);
if (VG_(clo_verbosity) > 1) {
- ExeContext *ec = VG_(get_ExeContext)(tid);
- VG_(pp_ExeContext)(ec);
+ VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
VG_(message)
(Vg_DebugMsg,"Do not panic. You may be able to fix this easily.");
diff --git a/coregrind/vg_threadmodel.c b/coregrind/vg_threadmodel.c
index 37209b0..a1bec5d 100644
--- a/coregrind/vg_threadmodel.c
+++ b/coregrind/vg_threadmodel.c
@@ -180,7 +180,7 @@
//:: if (th->state == state)
//:: return;
//::
-//:: ec = VG_(get_ExeContext)(th->tid);
+//:: ec = VG_(record_ExeContext)(th->tid);
//::
//:: switch(state) {
//:: case TS_Alive:
@@ -607,7 +607,7 @@
//::
//:: static void mutex_setstate(ThreadId tid, struct mutex *mx, enum mutex_state st)
//:: {
-//:: ExeContext *ec = VG_(get_ExeContext)(tid);
+//:: ExeContext *ec = VG_(record_ExeContext)(tid);
//::
//:: switch(st) {
//:: case MX_Init: