A bit of tidying up:

* get rid of 'struct _EC' (a.k.a 'struct EC_') and use ExeContext
  everywhere

* remove stacktrace_to_EC and call
  VG_(make_ExeContext_from_StackTrace) directly

* comment out some unused code



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@8749 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c
index d290c4d..da2c9b8 100644
--- a/helgrind/hg_main.c
+++ b/helgrind/hg_main.c
@@ -356,7 +356,7 @@
 /*--- Print out the primary data structures                    ---*/
 /*----------------------------------------------------------------*/
 
-static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
+//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
 
 #define PP_THREADS      (1<<1)
 #define PP_LOCKS        (1<<2)
@@ -909,130 +909,130 @@
 /*--- the core memory state machine (msm__* functions)         ---*/
 /*----------------------------------------------------------------*/
 
-static WordSetID add_BHL ( WordSetID lockset ) {
-   return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
-}
-static WordSetID del_BHL ( WordSetID lockset ) {
-   return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
-}
+//static WordSetID add_BHL ( WordSetID lockset ) {
+//   return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
+//}
+//static WordSetID del_BHL ( WordSetID lockset ) {
+//   return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
+//}
 
 
-/* Last-lock-lossage records.  This mechanism exists to help explain
-   to programmers why we are complaining about a race.  The idea is to
-   monitor all lockset transitions.  When a previously nonempty
-   lockset becomes empty, the lock(s) that just disappeared (the
-   "lossage") are the locks that have consistently protected the
-   location (ga_of_access) in question for the longest time.  Most of
-   the time the lossage-set is a single lock.  Because the
-   lossage-lock is the one that has survived longest, there is there
-   is a good chance that it is indeed the lock that the programmer
-   intended to use to protect the location.
-
-   Note that we cannot in general just look at the lossage set when we
-   see a transition to ShM(...,empty-set), because a transition to an
-   empty lockset can happen arbitrarily far before the point where we
-   want to report an error.  This is in the case where there are many
-   transitions ShR -> ShR, all with an empty lockset, and only later
-   is there a transition to ShM.  So what we want to do is note the
-   lossage lock at the point where a ShR -> ShR transition empties out
-   the lockset, so we can present it later if there should be a
-   transition to ShM.
-
-   So this function finds such transitions.  For each, it associates
-   in ga_to_lastlock, the guest address and the lossage lock.  In fact
-   we do not record the Lock* directly as that may disappear later,
-   but instead the ExeContext inside the Lock which says where it was
-   initialised or first locked.  ExeContexts are permanent so keeping
-   them indefinitely is safe.
-
-   A boring detail: the hardware bus lock is not interesting in this
-   respect, so we first remove that from the pre/post locksets.
-*/
-
-static UWord stats__ga_LL_adds = 0;
-
-static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
-
-static 
-void record_last_lock_lossage ( Addr ga_of_access,
-                                WordSetID lset_old, WordSetID lset_new )
-{
-   Lock* lk;
-   Int   card_old, card_new;
-
-   tl_assert(lset_old != lset_new);
-
-   if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
-                      (Int)lset_old, 
-                      HG_(cardinalityWS)(univ_lsets,lset_old),
-                      (Int)lset_new, 
-                      HG_(cardinalityWS)(univ_lsets,lset_new),
-                      ga_of_access );
-
-   /* This is slow, but at least it's simple.  The bus hardware lock
-      just confuses the logic, so remove it from the locksets we're
-      considering before doing anything else. */
-   lset_new = del_BHL( lset_new );
-
-   if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
-      /* The post-transition lock set is not empty.  So we are not
-         interested.  We're only interested in spotting transitions
-         that make locksets become empty. */
-      return;
-   }
-
-   /* lset_new is now empty */
-   card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
-   tl_assert(card_new == 0);
-
-   lset_old = del_BHL( lset_old );
-   card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
-
-   if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
-                      (Int)lset_old, card_old, (Int)lset_new, card_new );
-
-   if (card_old == 0) {
-      /* The old lockset was also empty.  Not interesting. */
-      return;
-   }
-
-   tl_assert(card_old > 0);
-   tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
-
-   /* Now we know we've got a transition from a nonempty lockset to an
-      empty one.  So lset_old must be the set of locks lost.  Record
-      some details.  If there is more than one element in the lossage
-      set, just choose one arbitrarily -- not the best, but at least
-      it's simple. */
-
-   lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
-   if (0) VG_(printf)("lossage %ld %p\n",
-                      HG_(cardinalityWS)( univ_lsets, lset_old), lk );
-   if (lk->appeared_at) {
-      if (ga_to_lastlock == NULL)
-         ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
-      VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
-      stats__ga_LL_adds++;
-   }
-}
-
-/* This queries the table (ga_to_lastlock) made by
-   record_last_lock_lossage, when constructing error messages.  It
-   attempts to find the ExeContext of the allocation or initialisation
-   point for the lossage lock associated with 'ga'. */
-
-static ExeContext* maybe_get_lastlock_initpoint ( Addr ga ) 
-{
-   ExeContext* ec_hint = NULL;
-   if (ga_to_lastlock != NULL 
-       && VG_(lookupFM)(ga_to_lastlock, 
-                        NULL, (Word*)&ec_hint, ga)) {
-      tl_assert(ec_hint != NULL);
-      return ec_hint;
-   } else {
-      return NULL;
-   }
-}
+///* Last-lock-lossage records.  This mechanism exists to help explain
+//   to programmers why we are complaining about a race.  The idea is to
+//   monitor all lockset transitions.  When a previously nonempty
+//   lockset becomes empty, the lock(s) that just disappeared (the
+//   "lossage") are the locks that have consistently protected the
+//   location (ga_of_access) in question for the longest time.  Most of
+//   the time the lossage-set is a single lock.  Because the
+//   lossage-lock is the one that has survived longest, there is there
+//   is a good chance that it is indeed the lock that the programmer
+//   intended to use to protect the location.
+//
+//   Note that we cannot in general just look at the lossage set when we
+//   see a transition to ShM(...,empty-set), because a transition to an
+//   empty lockset can happen arbitrarily far before the point where we
+//   want to report an error.  This is in the case where there are many
+//   transitions ShR -> ShR, all with an empty lockset, and only later
+//   is there a transition to ShM.  So what we want to do is note the
+//   lossage lock at the point where a ShR -> ShR transition empties out
+//   the lockset, so we can present it later if there should be a
+//   transition to ShM.
+//
+//   So this function finds such transitions.  For each, it associates
+//   in ga_to_lastlock, the guest address and the lossage lock.  In fact
+//   we do not record the Lock* directly as that may disappear later,
+//   but instead the ExeContext inside the Lock which says where it was
+//   initialised or first locked.  ExeContexts are permanent so keeping
+//   them indefinitely is safe.
+//
+//   A boring detail: the hardware bus lock is not interesting in this
+//   respect, so we first remove that from the pre/post locksets.
+//*/
+//
+//static UWord stats__ga_LL_adds = 0;
+//
+//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
+//
+//static 
+//void record_last_lock_lossage ( Addr ga_of_access,
+//                                WordSetID lset_old, WordSetID lset_new )
+//{
+//   Lock* lk;
+//   Int   card_old, card_new;
+//
+//   tl_assert(lset_old != lset_new);
+//
+//   if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
+//                      (Int)lset_old, 
+//                      HG_(cardinalityWS)(univ_lsets,lset_old),
+//                      (Int)lset_new, 
+//                      HG_(cardinalityWS)(univ_lsets,lset_new),
+//                      ga_of_access );
+//
+//   /* This is slow, but at least it's simple.  The bus hardware lock
+//      just confuses the logic, so remove it from the locksets we're
+//      considering before doing anything else. */
+//   lset_new = del_BHL( lset_new );
+//
+//   if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
+//      /* The post-transition lock set is not empty.  So we are not
+//         interested.  We're only interested in spotting transitions
+//         that make locksets become empty. */
+//      return;
+//   }
+//
+//   /* lset_new is now empty */
+//   card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
+//   tl_assert(card_new == 0);
+//
+//   lset_old = del_BHL( lset_old );
+//   card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
+//
+//   if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
+//                      (Int)lset_old, card_old, (Int)lset_new, card_new );
+//
+//   if (card_old == 0) {
+//      /* The old lockset was also empty.  Not interesting. */
+//      return;
+//   }
+//
+//   tl_assert(card_old > 0);
+//   tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
+//
+//   /* Now we know we've got a transition from a nonempty lockset to an
+//      empty one.  So lset_old must be the set of locks lost.  Record
+//      some details.  If there is more than one element in the lossage
+//      set, just choose one arbitrarily -- not the best, but at least
+//      it's simple. */
+//
+//   lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
+//   if (0) VG_(printf)("lossage %ld %p\n",
+//                      HG_(cardinalityWS)( univ_lsets, lset_old), lk );
+//   if (lk->appeared_at) {
+//      if (ga_to_lastlock == NULL)
+//         ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
+//      VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
+//      stats__ga_LL_adds++;
+//   }
+//}
+//
+///* This queries the table (ga_to_lastlock) made by
+//   record_last_lock_lossage, when constructing error messages.  It
+//   attempts to find the ExeContext of the allocation or initialisation
+//   point for the lossage lock associated with 'ga'. */
+//
+//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga ) 
+//{
+//   ExeContext* ec_hint = NULL;
+//   if (ga_to_lastlock != NULL 
+//       && VG_(lookupFM)(ga_to_lastlock, 
+//                        NULL, (Word*)&ec_hint, ga)) {
+//      tl_assert(ec_hint != NULL);
+//      return ec_hint;
+//   } else {
+//      return NULL;
+//   }
+//}
 
 
 /*----------------------------------------------------------------*/
@@ -1797,20 +1797,20 @@
    LIBHB_WRITE_N(hbthr, a, size);
 }
 
-static void evh__bus_lock(void) {
-   Thread* thr;
-   if (0) VG_(printf)("evh__bus_lock()\n");
-   thr = get_current_Thread();
-   tl_assert(thr); /* cannot fail - Thread* must already exist */
-   evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
-}
-static void evh__bus_unlock(void) {
-   Thread* thr;
-   if (0) VG_(printf)("evh__bus_unlock()\n");
-   thr = get_current_Thread();
-   tl_assert(thr); /* cannot fail - Thread* must already exist */
-   evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
-}
+//static void evh__bus_lock(void) {
+//   Thread* thr;
+//   if (0) VG_(printf)("evh__bus_lock()\n");
+//   thr = get_current_Thread();
+//   tl_assert(thr); /* cannot fail - Thread* must already exist */
+//   evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
+//}
+//static void evh__bus_unlock(void) {
+//   Thread* thr;
+//   if (0) VG_(printf)("evh__bus_unlock()\n");
+//   thr = get_current_Thread();
+//   tl_assert(thr); /* cannot fail - Thread* must already exist */
+//   evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
+//}
 
 
 /* -------------- events to do with mutexes -------------- */
@@ -3277,38 +3277,38 @@
 }
 
 
-static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
-{
-   switch (event) {
-      case Imbe_SnoopedStoreBegin:
-      case Imbe_SnoopedStoreEnd:
-         /* These arise from ppc stwcx. insns.  They should perhaps be
-            handled better. */
-         break;
-      case Imbe_Fence:
-         break; /* not interesting */
-      case Imbe_BusLock:
-      case Imbe_BusUnlock:
-         addStmtToIRSB(
-            bbOut,
-            IRStmt_Dirty(
-               unsafeIRDirty_0_N( 
-                  0/*regparms*/, 
-                  event == Imbe_BusLock ? "evh__bus_lock"
-                                        : "evh__bus_unlock",
-                  VG_(fnptr_to_fnentry)(
-                     event == Imbe_BusLock ? &evh__bus_lock 
-                                           : &evh__bus_unlock 
-                  ),
-                  mkIRExprVec_0() 
-               )
-            )
-         );
-         break;
-      default:
-         tl_assert(0);
-   }
-}
+//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
+//{
+//   switch (event) {
+//      case Imbe_SnoopedStoreBegin:
+//      case Imbe_SnoopedStoreEnd:
+//         /* These arise from ppc stwcx. insns.  They should perhaps be
+//            handled better. */
+//         break;
+//      case Imbe_Fence:
+//         break; /* not interesting */
+//      case Imbe_BusLock:
+//      case Imbe_BusUnlock:
+//         addStmtToIRSB(
+//            bbOut,
+//            IRStmt_Dirty(
+//               unsafeIRDirty_0_N( 
+//                  0/*regparms*/, 
+//                  event == Imbe_BusLock ? "evh__bus_lock"
+//                                        : "evh__bus_unlock",
+//                  VG_(fnptr_to_fnentry)(
+//                     event == Imbe_BusLock ? &evh__bus_lock 
+//                                           : &evh__bus_unlock 
+//                  ),
+//                  mkIRExprVec_0() 
+//               )
+//            )
+//         );
+//         break;
+//      default:
+//         tl_assert(0);
+//   }
+//}
 
 
 static
@@ -3799,9 +3799,9 @@
       VG_(printf)("       univ_laog: %'8d unique lock sets\n",
                   (Int)HG_(cardinalityWSU)( univ_laog ));
 
-      VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
-                  stats__ga_LL_adds,
-                  (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
+      //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
+      //            stats__ga_LL_adds,
+      //            (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
 
       VG_(printf)("  LockN-to-P map: %'8llu queries (%llu map size)\n",
                   HG_(stats__LockN_to_P_queries),
@@ -3846,13 +3846,7 @@
 }
 
 static
-struct EC_*  for_libhb__stacktrace_to_EC ( Addr* frames, UWord nFrames )
-{
-   return VG_(make_ExeContext_from_StackTrace)( frames, (UInt)nFrames );
-}
-
-static
-struct EC_*  for_libhb__get_EC ( Thr* hbt )
+ExeContext*  for_libhb__get_EC ( Thr* hbt )
 {
    Thread*     thr;
    ThreadId    tid;
@@ -3862,7 +3856,7 @@
    tl_assert(thr);
    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
    ec = VG_(record_ExeContext)( tid, 0 );
-   return (struct EC_*) ec;
+   return ec;
 }
 
 
@@ -3948,7 +3942,6 @@
 
    /////////////////////////////////////////////
    hbthr_root = libhb_init( for_libhb__get_stacktrace, 
-                            for_libhb__stacktrace_to_EC,
                             for_libhb__get_EC );
    /////////////////////////////////////////////