Remove the "vg_" prefix from a lot of non-global variables.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3462 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/amd64/signals.c b/coregrind/amd64/signals.c
index 0d1f584..256fc08 100644
--- a/coregrind/amd64/signals.c
+++ b/coregrind/amd64/signals.c
@@ -601,7 +601,7 @@
    if (VG_(clo_trace_signals))
       VG_(message)(
          Vg_DebugMsg, 
-         "vg_pop_signal_frame (thread %d): isRT=%d valid magic; RIP=%p", 
+         "VGA_(signal_return) (thread %d): isRT=%d valid magic; RIP=%p", 
          tid, isRT, tst->arch.vex.guest_RIP);
 
    /* tell the tools */
diff --git a/coregrind/vg_errcontext.c b/coregrind/vg_errcontext.c
index 26ef33c..9a9c8fc 100644
--- a/coregrind/vg_errcontext.c
+++ b/coregrind/vg_errcontext.c
@@ -50,11 +50,11 @@
 
 /* The list of error contexts found, both suppressed and unsuppressed.
    Initially empty, and grows as errors are detected. */
-static Error* vg_errors = NULL;
+static Error* errors = NULL;
 
 /* The list of suppression directives, as read from the specified
    suppressions file. */
-static Supp* vg_suppressions = NULL;
+static Supp* suppressions = NULL;
 
 /* Running count of unsuppressed errors detected. */
 static UInt n_errs_found = 0;
@@ -425,10 +425,10 @@
           Error* p;
           Error* p_prev;
           UInt   extra_size;
-          VgRes  exe_res                = Vg_MedRes;
-   static Bool   stopping_message       = False;
-   static Bool   slowdown_message       = False;
-   static Int    vg_n_errs_shown        = 0;
+          VgRes  exe_res          = Vg_MedRes;
+   static Bool   stopping_message = False;
+   static Bool   slowdown_message = False;
+   static Int    n_errs_shown     = 0;
 
    /* After M_COLLECT_NO_ERRORS_AFTER_SHOWN different errors have
       been found, or M_COLLECT_NO_ERRORS_AFTER_FOUND total errors
@@ -437,12 +437,12 @@
       extremely buggy programs, although it does make it pretty
       pointless to continue the Valgrind run after this point. */
    if (VG_(clo_error_limit) 
-       && (vg_n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN
+       && (n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN
            || n_errs_found >= M_COLLECT_NO_ERRORS_AFTER_FOUND)) {
       if (!stopping_message) {
          VG_(message)(Vg_UserMsg, "");
 
-	 if (vg_n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN) {
+	 if (n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN) {
             VG_(message)(Vg_UserMsg, 
                "More than %d different errors detected.  "
                "I'm not reporting any more.",
@@ -471,7 +471,7 @@
    /* After M_COLLECT_ERRORS_SLOWLY_AFTER different errors have
       been found, be much more conservative about collecting new
       ones. */
-   if (vg_n_errs_shown >= M_COLLECT_ERRORS_SLOWLY_AFTER) {
+   if (n_errs_shown >= M_COLLECT_ERRORS_SLOWLY_AFTER) {
       exe_res = Vg_LowRes;
       if (!slowdown_message) {
          VG_(message)(Vg_UserMsg, "");
@@ -488,7 +488,7 @@
    construct_error ( &err, tid, ekind, a, s, extra, NULL );
 
    /* First, see if we've got an error record matching this one. */
-   p      = vg_errors;
+   p      = errors;
    p_prev = NULL;
    while (p != NULL) {
       if (eq_Error(exe_res, p, &err)) {
@@ -506,9 +506,9 @@
             for it are faster. */
          if (p_prev != NULL) {
             vg_assert(p_prev->next == p);
-            p_prev->next    = p->next;
-            p->next         = vg_errors;
-            vg_errors = p;
+            p_prev->next = p->next;
+            p->next      = errors;
+            errors       = p;
 	 }
 
          return;
@@ -558,16 +558,16 @@
       p->extra = new_extra;
    }
 
-   p->next = vg_errors;
+   p->next = errors;
    p->supp = is_suppressible_error(&err);
-   vg_errors = p;
+   errors  = p;
    if (p->supp == NULL) {
       n_errs_found++;
       if (!is_first_shown_context)
          VG_(message)(Vg_UserMsg, "");
       pp_Error(p, False);
       is_first_shown_context = False;
-      vg_n_errs_shown++;
+      n_errs_shown++;
       do_actions_on_error(p, /*allow_db_attach*/True);
    } else {
       n_errs_suppressed++;
@@ -637,13 +637,13 @@
       return;
 
    n_err_contexts = 0;
-   for (p = vg_errors; p != NULL; p = p->next) {
+   for (p = errors; p != NULL; p = p->next) {
       if (p->supp == NULL)
          n_err_contexts++;
    }
 
    n_supp_contexts = 0;
-   for (su = vg_suppressions; su != NULL; su = su->next) {
+   for (su = suppressions; su != NULL; su = su->next) {
       if (su->count > 0)
          n_supp_contexts++;
    }
@@ -660,7 +660,7 @@
    for (i = 0; i < n_err_contexts; i++) {
       n_min = (1 << 30) - 1;
       p_min = NULL;
-      for (p = vg_errors; p != NULL; p = p->next) {
+      for (p = errors; p != NULL; p = p->next) {
          if (p->supp != NULL) continue;
          if (p->count < n_min) {
             n_min = p->count;
@@ -687,7 +687,7 @@
    if (n_supp_contexts > 0) 
       VG_(message)(Vg_DebugMsg, "");
    any_supp = False;
-   for (su = vg_suppressions; su != NULL; su = su->next) {
+   for (su = suppressions; su != NULL; su = su->next) {
       if (su->count > 0) {
          any_supp = True;
          VG_(message)(Vg_DebugMsg, "supp: %4d %s", su->count, su->sname);
@@ -787,7 +787,7 @@
    return found;
 }
 
-/* Read suppressions from the file specified in vg_clo_suppressions
+/* Read suppressions from the file specified in VG_(clo_suppressions)
    and place them in the suppressions list.  If there's any difficulty
    doing this, just give up -- there's no point in trying to recover.  
 */
@@ -924,8 +924,8 @@
          supp->callers[i] = tmp_callers[i];
       }
 
-      supp->next = vg_suppressions;
-      vg_suppressions = supp;
+      supp->next = suppressions;
+      suppressions = supp;
    }
    VG_(close)(fd);
    return;
@@ -946,7 +946,7 @@
 void VG_(load_suppressions) ( void )
 {
    Int i;
-   vg_suppressions = NULL;
+   suppressions = NULL;
    for (i = 0; i < VG_(clo_n_suppressions); i++) {
       if (VG_(clo_verbosity) > 1) {
          VG_(message)(Vg_DebugMsg, "Reading suppressions file: %s", 
@@ -1015,7 +1015,7 @@
    Supp* su;
 
    /* See if the error context matches any suppression. */
-   for (su = vg_suppressions; su != NULL; su = su->next) {
+   for (su = suppressions; su != NULL; su = su->next) {
       if (supp_matches_error(su, err) &&
           supp_matches_callers(err, su))
       {
diff --git a/coregrind/vg_messages.c b/coregrind/vg_messages.c
index 86905dc..f5400c0 100644
--- a/coregrind/vg_messages.c
+++ b/coregrind/vg_messages.c
@@ -38,14 +38,14 @@
 /* Size of a buffer used for creating messages. */
 #define M_MSGBUF 10000
 
-static char vg_mbuf[M_MSGBUF];
-static int vg_n_mbuf;
+static char mbuf[M_MSGBUF];
+static int n_mbuf;
 
 static void add_to_buf ( Char c, void *p )
 {
-  if (vg_n_mbuf >= (M_MSGBUF-1)) return;
-  vg_mbuf[vg_n_mbuf++] = c;
-  vg_mbuf[vg_n_mbuf]   = 0;
+  if (n_mbuf >= (M_MSGBUF-1)) return;
+  mbuf[n_mbuf++] = c;
+  mbuf[n_mbuf]   = 0;
 }
 
 static void add_timestamp ( Char *buf )
@@ -81,8 +81,8 @@
    Char ts[32];
    Char c;
    static const Char pfx[] = ">>>>>>>>>>>>>>>>";
-   vg_n_mbuf = 0;
-   vg_mbuf[vg_n_mbuf] = 0;
+   n_mbuf = 0;
+   mbuf[n_mbuf] = 0;
 
    if (VG_(clo_time_stamp))
      add_timestamp(ts);
@@ -109,7 +109,7 @@
    int count = 0;
    if (VG_(clo_log_fd) >= 0) {
       add_to_buf('\n',0);
-      VG_(send_bytes_to_logging_sink) ( vg_mbuf, VG_(strlen)(vg_mbuf) );
+      VG_(send_bytes_to_logging_sink) ( mbuf, VG_(strlen)(mbuf) );
       count = 1;
    }
    return count;
diff --git a/coregrind/vg_signals.c b/coregrind/vg_signals.c
index 4ed1269..86a37e1 100644
--- a/coregrind/vg_signals.c
+++ b/coregrind/vg_signals.c
@@ -89,8 +89,8 @@
    Forwards decls.
    ------------------------------------------------------------------ */
 
-static void vg_sync_signalhandler  ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext * );
-static void vg_async_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext * );
+static void sync_signalhandler  ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext * );
+static void async_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext * );
 static void sigvgkill_handler	   ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext * );
 
 static const Char *signame(Int sigNo);
@@ -152,7 +152,7 @@
       } 
       SCSS;
 
-static SCSS vg_scss;
+static SCSS scss;
 
 
 /* -----------------------------------------------------
@@ -191,13 +191,13 @@
    } 
    SKSS;
 
-static SKSS vg_skss;
+static SKSS skss;
 
 Bool VG_(is_sig_ign)(Int sigNo)
 {
    vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
 
-   return vg_scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN;
+   return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN;
 }
 
 /* ---------------------------------------------------------------------
@@ -211,8 +211,8 @@
    VG_(printf)("\n\nSKSS:\n");
    for (sig = 1; sig <= _VKI_NSIG; sig++) {
       VG_(printf)("sig %d:  handler 0x%x,  flags 0x%x\n", sig,
-                  vg_skss.skss_per_sig[sig].skss_handler,
-                  vg_skss.skss_per_sig[sig].skss_flags );
+                  skss.skss_per_sig[sig].skss_handler,
+                  skss.skss_per_sig[sig].skss_flags );
 
    }
 }
@@ -236,8 +236,8 @@
       void *skss_handler;
       void *scss_handler;
       
-      scss_handler = vg_scss.scss_per_sig[sig].scss_handler;
-      scss_flags   = vg_scss.scss_per_sig[sig].scss_flags;
+      scss_handler = scss.scss_per_sig[sig].scss_handler;
+      scss_flags   = scss.scss_per_sig[sig].scss_flags;
 
       switch(sig) {
       case VKI_SIGSEGV:
@@ -247,7 +247,7 @@
       case VKI_SIGTRAP:
 	 /* For these, we always want to catch them and report, even
 	    if the client code doesn't. */
-	 skss_handler = vg_sync_signalhandler;
+	 skss_handler = sync_signalhandler;
 	 break;
 
       case VKI_SIGCONT:
@@ -260,12 +260,12 @@
             only set a handler if the client has set a signal handler.         
             Otherwise the kernel will interrupt a syscall which                
             wouldn't have otherwise been interrupted. */                 
-	 if (vg_scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
+	 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
 	    skss_handler = VKI_SIG_DFL;
-	 else if (vg_scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
+	 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
 	    skss_handler = VKI_SIG_IGN;
 	 else
-	    skss_handler = vg_async_signalhandler;
+	    skss_handler = async_signalhandler;
 	 break;
 
       default:
@@ -279,7 +279,7 @@
 	    if (scss_handler == VKI_SIG_IGN)
 	       skss_handler = VKI_SIG_IGN;
 	    else 
-	       skss_handler = vg_async_signalhandler;
+	       skss_handler = async_signalhandler;
 	 }
 	 break;
       }
@@ -339,8 +339,8 @@
    struct vki_sigaction ksa, ksa_old;
 
    /* Remember old SKSS and calculate new one. */
-   skss_old = vg_skss;
-   calculate_SKSS_from_SCSS ( &vg_skss );
+   skss_old = skss;
+   calculate_SKSS_from_SCSS ( &skss );
 
    /* Compare the new SKSS entries vs the old ones, and update kernel
       where they differ. */
@@ -353,15 +353,15 @@
 
       if (!force_update) {
          if ((skss_old.skss_per_sig[sig].skss_handler
-              == vg_skss.skss_per_sig[sig].skss_handler)
+              == skss.skss_per_sig[sig].skss_handler)
              && (skss_old.skss_per_sig[sig].skss_flags
-                 == vg_skss.skss_per_sig[sig].skss_flags))
+                 == skss.skss_per_sig[sig].skss_flags))
             /* no difference */
             continue;
       }
 
-      ksa.ksa_handler = vg_skss.skss_per_sig[sig].skss_handler;
-      ksa.sa_flags    = vg_skss.skss_per_sig[sig].skss_flags;
+      ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
+      ksa.sa_flags    = skss.skss_per_sig[sig].skss_flags;
       ksa.sa_restorer = VG_(sigreturn);
 
       /* block all signals in handler */
@@ -508,21 +508,21 @@
    /* If the client supplied non-NULL old_act, copy the relevant SCSS
       entry into it. */
    if (old_act) {
-      old_act->ksa_handler = vg_scss.scss_per_sig[signo].scss_handler;
-      old_act->sa_flags    = vg_scss.scss_per_sig[signo].scss_flags;
-      old_act->sa_mask     = vg_scss.scss_per_sig[signo].scss_mask;
-      old_act->sa_restorer = vg_scss.scss_per_sig[signo].scss_restorer;
+      old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
+      old_act->sa_flags    = scss.scss_per_sig[signo].scss_flags;
+      old_act->sa_mask     = scss.scss_per_sig[signo].scss_mask;
+      old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
    }
 
    /* And now copy new SCSS entry from new_act. */
    if (new_act) {
-      vg_scss.scss_per_sig[signo].scss_handler  = new_act->ksa_handler;
-      vg_scss.scss_per_sig[signo].scss_flags    = new_act->sa_flags;
-      vg_scss.scss_per_sig[signo].scss_mask     = new_act->sa_mask;
-      vg_scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
+      scss.scss_per_sig[signo].scss_handler  = new_act->ksa_handler;
+      scss.scss_per_sig[signo].scss_flags    = new_act->sa_flags;
+      scss.scss_per_sig[signo].scss_mask     = new_act->sa_mask;
+      scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
 
-      VG_(sigdelset)(&vg_scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
-      VG_(sigdelset)(&vg_scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
+      VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
+      VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
    }
 
    /* All happy bunnies ... */
@@ -765,7 +765,7 @@
 /* Set up a stack frame (VgSigContext) for the client's signal
    handler. */
 static
-void vg_push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo )
+void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo )
 {
    Addr         esp_top_of_frame;
    ThreadState* tst;
@@ -777,10 +777,10 @@
 
    if (VG_(clo_trace_signals))
       VG_(message)(Vg_DebugMsg, 
-         "vg_push_signal_frame (thread %d): signal %d", tid, sigNo);
+         "push_signal_frame (thread %d): signal %d", tid, sigNo);
 
    if (/* this signal asked to run on an alt stack */
-       (vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
+       (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
        && /* there is a defined and enabled alt stack, which we're not
              already using.  Logic from get_sigframe in
              arch/i386/kernel/signal.c. */
@@ -806,17 +806,17 @@
       VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
    }
 
-   vg_assert(vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
-   vg_assert(vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
+   vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
+   vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
 
    /* This may fail if the client stack is busted; if that happens,
       the whole process will exit rather than simply calling the
       signal handler. */
    VGA_(push_signal_frame)(tid, esp_top_of_frame, siginfo,
-                           vg_scss.scss_per_sig[sigNo].scss_handler,
-                           vg_scss.scss_per_sig[sigNo].scss_flags,
+                           scss.scss_per_sig[sigNo].scss_handler,
+                           scss.scss_per_sig[sigNo].scss_flags,
 			  &tst->sig_mask,
-			   vg_scss.scss_per_sig[sigNo].scss_restorer);
+			   scss.scss_per_sig[sigNo].scss_restorer);
 }
 
 
@@ -1235,7 +1235,7 @@
    If we're not being quiet, then print out some more detail about
    fatal signals (esp. core dumping signals).
  */
-static void vg_default_action(const vki_siginfo_t *info, ThreadId tid)
+static void default_action(const vki_siginfo_t *info, ThreadId tid)
 {
    Int  sigNo     = info->si_signo;
    Bool terminate = False;	/* kills process         */
@@ -1414,7 +1414,7 @@
 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info )
 {
    Int			sigNo = info->si_signo;
-   SCSS_Per_Signal	*handler = &vg_scss.scss_per_sig[sigNo];
+   SCSS_Per_Signal	*handler = &scss.scss_per_sig[sigNo];
    void			*handler_fn;
    ThreadState		*tst = VG_(get_ThreadState)(tid);
 
@@ -1444,7 +1444,7 @@
    vg_assert(handler_fn != VKI_SIG_IGN);
 
    if (handler_fn == VKI_SIG_DFL) {
-      vg_default_action(info, tid);
+      default_action(info, tid);
    } else {
       /* Create a signal delivery frame, and set the client's %ESP and
 	 %EIP so that when execution continues, we will enter the
@@ -1458,7 +1458,7 @@
       */
       vg_assert(VG_(is_valid_tid)(tid));
 
-      vg_push_signal_frame ( tid, info );
+      push_signal_frame ( tid, info );
 
       if (handler->scss_flags & VKI_SA_ONESHOT) {
 	 /* Do the ONESHOT thing. */
@@ -1628,7 +1628,7 @@
    since that's the only time this set of signals is unblocked.
 */
 static 
-void vg_async_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
+void async_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
 {
    ThreadId tid = VG_(get_lwp_tid)(VG_(gettid)());
    ThreadState *tst = VG_(get_ThreadState)(tid);
@@ -1644,7 +1644,7 @@
 
    /* Update thread state properly */
    VGA_(interrupted_syscall)(tid, uc, 
-			     !!(vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART));
+			     !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART));
 
    /* Set up the thread's state to deliver a signal */
    if (!VG_(is_sig_ign)(info->si_signo))
@@ -1654,7 +1654,7 @@
       handler. */
    VG_(resume_scheduler)(tid);
 
-   VG_(core_panic)("vg_async_signalhandler: got unexpected signal while outside of scheduler");
+   VG_(core_panic)("async_signalhandler: got unexpected signal while outside of scheduler");
 }
 
 /* Extend the stack to cover addr.  maxsize is the limit the stack can grow to.
@@ -1729,7 +1729,7 @@
    Receive a sync signal from the host. 
 */
 static
-void vg_sync_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
+void sync_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
 {
    ThreadId tid = VG_(get_lwp_tid)(VG_(gettid)());
 
@@ -1758,8 +1758,8 @@
 	    client's signal mask was applied, so we can't get here
 	    unless the client wants this signal right now.  This means
 	    we can simply use the async_signalhandler. */
-	 vg_async_signalhandler(sigNo, info, uc);
-	 VG_(core_panic)("vg_async_signalhandler returned!?\n");
+	 async_signalhandler(sigNo, info, uc);
+	 VG_(core_panic)("async_signalhandler returned!?\n");
       }
 
       if (info->_sifields._kill._pid == 0) {
@@ -1971,12 +1971,12 @@
 }
 
 static __attribute((unused))
-void pp_vg_ksigaction ( struct vki_sigaction* sa )
+void pp_ksigaction ( struct vki_sigaction* sa )
 {
    Int i;
-   VG_(printf)("vg_ksigaction: handler %p, flags 0x%x, restorer %p\n", 
+   VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n", 
                sa->ksa_handler, (UInt)sa->sa_flags, sa->sa_restorer);
-   VG_(printf)("vg_ksigaction: { ");
+   VG_(printf)("pp_ksigaction: { ");
    for (i = 1; i <= VG_(max_signal); i++)
       if (VG_(sigismember(&(sa->sa_mask),i)))
          VG_(printf)("%d ", i);
@@ -2105,7 +2105,7 @@
       if (i >= VKI_SIGRTMIN) {
 	 struct vki_sigaction tsa;
 
-	 tsa.ksa_handler = (void *)vg_sync_signalhandler;
+	 tsa.ksa_handler = (void *)sync_signalhandler;
 	 tsa.sa_flags = VKI_SA_SIGINFO;
 	 tsa.sa_restorer = 0;
 	 VG_(sigfillset)(&tsa.sa_mask);
@@ -2126,23 +2126,23 @@
          VG_(printf)("snaffling handler 0x%x for signal %d\n", 
                      (Addr)(sa.ksa_handler), i );
 
-      vg_scss.scss_per_sig[i].scss_handler  = sa.ksa_handler;
-      vg_scss.scss_per_sig[i].scss_flags    = sa.sa_flags;
-      vg_scss.scss_per_sig[i].scss_mask     = sa.sa_mask;
-      vg_scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
+      scss.scss_per_sig[i].scss_handler  = sa.ksa_handler;
+      scss.scss_per_sig[i].scss_flags    = sa.sa_flags;
+      scss.scss_per_sig[i].scss_mask     = sa.sa_mask;
+      scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
    }
 
    if (VG_(clo_trace_signals))
       VG_(message)(Vg_DebugMsg, "Max kernel-supported signal is %d", VG_(max_signal));
 
    /* Our private internal signals are treated as ignored */
-   vg_scss.scss_per_sig[VKI_SIGVGCHLD].scss_handler = VKI_SIG_IGN;
-   vg_scss.scss_per_sig[VKI_SIGVGCHLD].scss_flags   = VKI_SA_SIGINFO;
-   VG_(sigfillset)(&vg_scss.scss_per_sig[VKI_SIGVGCHLD].scss_mask);
+   scss.scss_per_sig[VKI_SIGVGCHLD].scss_handler = VKI_SIG_IGN;
+   scss.scss_per_sig[VKI_SIGVGCHLD].scss_flags   = VKI_SA_SIGINFO;
+   VG_(sigfillset)(&scss.scss_per_sig[VKI_SIGVGCHLD].scss_mask);
 
-   vg_scss.scss_per_sig[VKI_SIGVGKILL].scss_handler = VKI_SIG_IGN;
-   vg_scss.scss_per_sig[VKI_SIGVGKILL].scss_flags   = VKI_SA_SIGINFO;
-   VG_(sigfillset)(&vg_scss.scss_per_sig[VKI_SIGVGKILL].scss_mask);
+   scss.scss_per_sig[VKI_SIGVGKILL].scss_handler = VKI_SIG_IGN;
+   scss.scss_per_sig[VKI_SIGVGKILL].scss_flags   = VKI_SA_SIGINFO;
+   VG_(sigfillset)(&scss.scss_per_sig[VKI_SIGVGKILL].scss_mask);
 
    /* Copy the process' signal mask into the root thread. */
    vg_assert(VG_(threads)[VG_(master_tid)].status == VgTs_Init);
diff --git a/coregrind/vg_symtab2.c b/coregrind/vg_symtab2.c
index 5a5371d..d7c953e 100644
--- a/coregrind/vg_symtab2.c
+++ b/coregrind/vg_symtab2.c
@@ -1198,7 +1198,7 @@
 /* Read the symbols from the object/exe specified by the SegInfo into
    the tables within the supplied SegInfo.  */
 static
-Bool vg_read_lib_symbols ( SegInfo* si )
+Bool read_lib_symbols ( SegInfo* si )
 {
    Bool          res;
    ElfXX_Ehdr*   ehdr;       /* The ELF header                          */
@@ -1607,7 +1607,7 @@
    si->bss_start  = si->bss_size  = 0;
 
    /* And actually fill it up. */
-   if (!vg_read_lib_symbols ( si ) && 0) {
+   if (!read_lib_symbols ( si ) && 0) {
       /* XXX this interacts badly with the prevN optimization in
          addStr().  Since this frees the si, the si pointer value can
          be recycled, which confuses the curr_si == si test.  For now,
@@ -1879,8 +1879,8 @@
 /* The whole point of this whole big deal: map a code address to a
    plausible symbol name.  Returns False if no idea; otherwise True.
    Caller supplies buf and nbuf.  If demangle is False, don't do
-   demangling, regardless of vg_clo_demangle -- probably because the
-   call has come from vg_what_fn_or_object_is_this. */
+   demangling, regardless of VG_(clo_demangle) -- probably because the
+   call has come from VG_(get_fnname_nodemangle)(). */
 static
 Bool get_fnname ( Bool demangle, Addr a, Char* buf, Int nbuf,
                   Bool match_anywhere_in_fun, Bool show_offset)
diff --git a/coregrind/x86/signals.c b/coregrind/x86/signals.c
index 5cd8b97..2f72fe4 100644
--- a/coregrind/x86/signals.c
+++ b/coregrind/x86/signals.c
@@ -690,7 +690,7 @@
    if (VG_(clo_trace_signals))
       VG_(message)(
          Vg_DebugMsg, 
-         "vg_pop_signal_frame (thread %d): isRT=%d valid magic; EIP=%p", 
+         "VGA_(signal_return) (thread %d): isRT=%d valid magic; EIP=%p", 
          tid, isRT, tst->arch.vex.guest_EIP);
 
    /* tell the tools */
diff --git a/memcheck/mac_leakcheck.c b/memcheck/mac_leakcheck.c
index fd55422..220d151 100644
--- a/memcheck/mac_leakcheck.c
+++ b/memcheck/mac_leakcheck.c
@@ -52,7 +52,7 @@
 
 
 static
-void vg_scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
+void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
 {
    if (0)
       VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
@@ -330,7 +330,7 @@
    if (VG_DEBUG_LEAKCHECK)
       VG_(printf)("scan %p-%p\n", start, len);
    VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
-   VG_(set_fault_catcher)(vg_scan_all_valid_memory_catcher);
+   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
 
    lc_scanned += end-ptr;
 
@@ -557,7 +557,7 @@
 
 /* Top level entry point to leak detector.  Call here, passing in
    suitable address-validating functions (see comment at top of
-   vg_scan_all_valid_memory above).  All this is to avoid duplication
+   scan_all_valid_memory above).  All this is to avoid duplication
    of the leak-detection code for Memcheck and Addrcheck.
    Also pass in a tool-specific function to extract the .where field
    for allocated blocks, an indication of the resolution wanted for
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index f0b8b97..99f45ce 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -1631,54 +1631,54 @@
    CGenBlock;
 
 /* This subsystem is self-initialising. */
-static UInt       vg_cgb_size = 0;
-static UInt       vg_cgb_used = 0;
-static CGenBlock* vg_cgbs     = NULL;
+static UInt       cgb_size = 0;
+static UInt       cgb_used = 0;
+static CGenBlock* cgbs     = NULL;
 
 /* Stats for this subsystem. */
-static UInt vg_cgb_used_MAX = 0;   /* Max in use. */
-static UInt vg_cgb_allocs   = 0;   /* Number of allocs. */
-static UInt vg_cgb_discards = 0;   /* Number of discards. */
-static UInt vg_cgb_search   = 0;   /* Number of searches. */
+static UInt cgb_used_MAX = 0;   /* Max in use. */
+static UInt cgb_allocs   = 0;   /* Number of allocs. */
+static UInt cgb_discards = 0;   /* Number of discards. */
+static UInt cgb_search   = 0;   /* Number of searches. */
 
 
 static
-Int vg_alloc_client_block ( void )
+Int alloc_client_block ( void )
 {
    UInt       i, sz_new;
    CGenBlock* cgbs_new;
 
-   vg_cgb_allocs++;
+   cgb_allocs++;
 
-   for (i = 0; i < vg_cgb_used; i++) {
-      vg_cgb_search++;
-      if (vg_cgbs[i].start == 0 && vg_cgbs[i].size == 0)
+   for (i = 0; i < cgb_used; i++) {
+      cgb_search++;
+      if (cgbs[i].start == 0 && cgbs[i].size == 0)
          return i;
    }
 
    /* Not found.  Try to allocate one at the end. */
-   if (vg_cgb_used < vg_cgb_size) {
-      vg_cgb_used++;
-      return vg_cgb_used-1;
+   if (cgb_used < cgb_size) {
+      cgb_used++;
+      return cgb_used-1;
    }
 
    /* Ok, we have to allocate a new one. */
-   tl_assert(vg_cgb_used == vg_cgb_size);
-   sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
+   tl_assert(cgb_used == cgb_size);
+   sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
 
    cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
-   for (i = 0; i < vg_cgb_used; i++) 
-      cgbs_new[i] = vg_cgbs[i];
+   for (i = 0; i < cgb_used; i++) 
+      cgbs_new[i] = cgbs[i];
 
-   if (vg_cgbs != NULL)
-      VG_(free)( vg_cgbs );
-   vg_cgbs = cgbs_new;
+   if (cgbs != NULL)
+      VG_(free)( cgbs );
+   cgbs = cgbs_new;
 
-   vg_cgb_size = sz_new;
-   vg_cgb_used++;
-   if (vg_cgb_used > vg_cgb_used_MAX)
-      vg_cgb_used_MAX = vg_cgb_used;
-   return vg_cgb_used-1;
+   cgb_size = sz_new;
+   cgb_used++;
+   if (cgb_used > cgb_used_MAX)
+      cgb_used_MAX = cgb_used;
+   return cgb_used-1;
 }
 
 
@@ -1686,7 +1686,7 @@
 {
    VG_(message)(Vg_DebugMsg, 
       "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
-      vg_cgb_allocs, vg_cgb_discards, vg_cgb_used_MAX, vg_cgb_search 
+      cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search 
    );
 }
 
@@ -1704,15 +1704,15 @@
    /* VG_(printf)("try to identify %d\n", a); */
 
    /* Perhaps it's a general block ? */
-   for (i = 0; i < vg_cgb_used; i++) {
-      if (vg_cgbs[i].start == 0 && vg_cgbs[i].size == 0) 
+   for (i = 0; i < cgb_used; i++) {
+      if (cgbs[i].start == 0 && cgbs[i].size == 0) 
          continue;
-      if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) {
+      if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
          MAC_Mempool **d, *mp;
 
          /* OK - maybe it's a mempool, too? */
          mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
-                                             (UWord)vg_cgbs[i].start,
+                                             (UWord)cgbs[i].start,
                                              (void*)&d);
          if(mp != NULL) {
             if(mp->chunks != NULL) {
@@ -1728,16 +1728,16 @@
                }
             }
             ai->akind = Mempool;
-            ai->blksize = vg_cgbs[i].size;
-            ai->rwoffset  = (Int)(a) - (Int)(vg_cgbs[i].start);
-            ai->lastchange = vg_cgbs[i].where;
+            ai->blksize = cgbs[i].size;
+            ai->rwoffset  = (Int)(a) - (Int)(cgbs[i].start);
+            ai->lastchange = cgbs[i].where;
             return True;
          }
          ai->akind = UserG;
-         ai->blksize = vg_cgbs[i].size;
-         ai->rwoffset  = (Int)(a) - (Int)(vg_cgbs[i].start);
-         ai->lastchange = vg_cgbs[i].where;
-	 ai->desc = vg_cgbs[i].desc;
+         ai->blksize = cgbs[i].size;
+         ai->rwoffset  = (Int)(a) - (Int)(cgbs[i].start);
+         ai->lastchange = cgbs[i].where;
+	 ai->desc = cgbs[i].desc;
          return True;
       }
    }
@@ -1803,12 +1803,12 @@
 
       case VG_USERREQ__CREATE_BLOCK: /* describe a block */
 	 if (arg[1] != 0 && arg[2] != 0) {
-	    i = vg_alloc_client_block();
-	    /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
-	    vg_cgbs[i].start = arg[1];
-	    vg_cgbs[i].size  = arg[2];
-	    vg_cgbs[i].desc  = VG_(strdup)((Char *)arg[3]);
-	    vg_cgbs[i].where = VG_(record_ExeContext) ( tid );
+	    i = alloc_client_block();
+	    /* VG_(printf)("allocated %d %p\n", i, cgbs); */
+	    cgbs[i].start = arg[1];
+	    cgbs[i].size  = arg[2];
+	    cgbs[i].desc  = VG_(strdup)((Char *)arg[3]);
+	    cgbs[i].where = VG_(record_ExeContext) ( tid );
 
 	    *ret = i;
 	 } else
@@ -1816,15 +1816,15 @@
 	 break;
 
       case VG_USERREQ__DISCARD: /* discard */
-         if (vg_cgbs == NULL 
-             || arg[2] >= vg_cgb_used ||
-	     (vg_cgbs[arg[2]].start == 0 && vg_cgbs[arg[2]].size == 0)) {
+         if (cgbs == NULL 
+             || arg[2] >= cgb_used ||
+	     (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
             *ret = 1;
 	 } else {
-	    tl_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
-	    vg_cgbs[arg[2]].start = vg_cgbs[arg[2]].size = 0;
-	    VG_(free)(vg_cgbs[arg[2]].desc);
-	    vg_cgb_discards++;
+	    tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
+	    cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
+	    VG_(free)(cgbs[arg[2]].desc);
+	    cgb_discards++;
 	    *ret = 0;
 	 }
 	 break;