Get rid of baseBlock.  Now, when generated code is running, the guest
state pointer points directly at the ThreadState.arch.vex field, thus
updating it in place and avoiding a lot of code (and time-wasting)
which copies stuff back and forth to baseBlock.

Fix zillions of other places in the system where the current thread id
is needed.  It is now passed to all needed places.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3090 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mac_leakcheck.c b/memcheck/mac_leakcheck.c
index 6cd7e32..df2a907 100644
--- a/memcheck/mac_leakcheck.c
+++ b/memcheck/mac_leakcheck.c
@@ -375,6 +375,7 @@
    reachable blocks should be shown.
 */
 void MAC_(do_detect_memory_leaks) (
+   ThreadId tid,
    Bool is_valid_64k_chunk ( UInt ),
    Bool is_valid_address ( Addr )
 )
@@ -499,7 +500,7 @@
 
       print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
       is_suppressed = 
-         VG_(unique_error) ( VG_(get_current_tid)(), LeakErr, (UInt)i+1,
+         VG_(unique_error) ( tid, LeakErr, (UInt)i+1,
                              (Char*)n_lossrecords, (void*) p_min,
                              p_min->allocated_at, print_record,
                              /*allow_GDB_attach*/False, /*count_error*/False );
diff --git a/memcheck/mac_malloc_wrappers.c b/memcheck/mac_malloc_wrappers.c
index 1cc3575..93ca3af 100644
--- a/memcheck/mac_malloc_wrappers.c
+++ b/memcheck/mac_malloc_wrappers.c
@@ -132,7 +132,8 @@
 
 /* Allocate its shadow chunk, put it on the appropriate list. */
 static
-void add_MAC_Chunk ( Addr p, SizeT size, MAC_AllocKind kind, VgHashTable table)
+void add_MAC_Chunk ( ThreadId tid,
+                     Addr p, SizeT size, MAC_AllocKind kind, VgHashTable table)
 {
    MAC_Chunk* mc;
 
@@ -140,7 +141,7 @@
    mc->data      = p;
    mc->size      = size;
    mc->allockind = kind;
-   mc->where     = VG_(get_ExeContext)(VG_(get_current_or_recent_tid)());
+   mc->where     = VG_(get_ExeContext)(tid);
 
    /* Paranoia ... ensure this area is off-limits to the client, so
       the mc->data field isn't visible to the leak checker.  If memory
@@ -148,7 +149,7 @@
       VG_(malloc) should be noaccess as far as the client is
       concerned. */
    if (!MAC_(check_noaccess)( (Addr)mc, sizeof(MAC_Chunk), NULL )) {
-      VG_(tool_panic)("add_MAC_chunk: shadow area is accessible");
+      VG_(tool_panic)("add_MAC_Chunk: shadow area is accessible");
    } 
 
    VG_(HT_add_node)( table, (VgHashNode*)mc );
@@ -182,7 +183,8 @@
 
 /* Allocate memory and note change in memory available */
 __inline__
-void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
+void* MAC_(new_block) ( ThreadId tid,
+                        Addr p, SizeT size, SizeT align, UInt rzB,
                         Bool is_zeroed, MAC_AllocKind kind, VgHashTable table)
 {
    VGP_PUSHCC(VgpCliMalloc);
@@ -202,7 +204,7 @@
       if (is_zeroed) VG_(memset)((void*)p, 0, size);
    }
 
-   add_MAC_Chunk( p, size, kind, table );
+   add_MAC_Chunk( tid, p, size, kind, table );
 
    MAC_(ban_mem_heap)( p-rzB, rzB );
    MAC_(new_mem_heap)( p, size, is_zeroed );
@@ -213,63 +215,64 @@
    return (void*)p;
 }
 
-void* TL_(malloc) ( SizeT n )
+void* TL_(malloc) ( ThreadId tid, SizeT n )
 {
    if (complain_about_silly_args(n, "malloc")) {
       return NULL;
    } else {
-      return MAC_(new_block) ( 0, n, VG_(clo_alignment), 
+      return MAC_(new_block) ( tid, 0, n, VG_(clo_alignment), 
          VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc,
          MAC_(malloc_list));
    }
 }
 
-void* TL_(__builtin_new) ( SizeT n )
+void* TL_(__builtin_new) ( ThreadId tid, SizeT n )
 {
    if (complain_about_silly_args(n, "__builtin_new")) {
       return NULL;
    } else {
-      return MAC_(new_block) ( 0, n, VG_(clo_alignment), 
+      return MAC_(new_block) ( tid, 0, n, VG_(clo_alignment), 
          VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocNew,
          MAC_(malloc_list));
    }
 }
 
-void* TL_(__builtin_vec_new) ( SizeT n )
+void* TL_(__builtin_vec_new) ( ThreadId tid, SizeT n )
 {
    if (complain_about_silly_args(n, "__builtin_vec_new")) {
       return NULL;
    } else {
-      return MAC_(new_block) ( 0, n, VG_(clo_alignment), 
+      return MAC_(new_block) ( tid, 0, n, VG_(clo_alignment), 
          VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocNewVec,
          MAC_(malloc_list));
    }
 }
 
-void* TL_(memalign) ( SizeT align, SizeT n )
+void* TL_(memalign) ( ThreadId tid, SizeT align, SizeT n )
 {
    if (complain_about_silly_args(n, "memalign")) {
       return NULL;
    } else {
-      return MAC_(new_block) ( 0, n, align, 
+      return MAC_(new_block) ( tid, 0, n, align, 
          VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc,
          MAC_(malloc_list));
    }
 }
 
-void* TL_(calloc) ( SizeT nmemb, SizeT size1 )
+void* TL_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
 {
    if (complain_about_silly_args2(nmemb, size1)) {
       return NULL;
    } else {
-      return MAC_(new_block) ( 0, nmemb*size1, VG_(clo_alignment),
+      return MAC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
          VG_(vg_malloc_redzone_szB), /*is_zeroed*/True, MAC_AllocMalloc,
          MAC_(malloc_list));
    }
 }
 
 static
-void die_and_free_mem ( MAC_Chunk* mc,
+void die_and_free_mem ( ThreadId tid,
+                        MAC_Chunk* mc,
                         MAC_Chunk** prev_chunks_next_ptr, SizeT rzB )
 {
    /* Note: ban redzones again -- just in case user de-banned them
@@ -287,18 +290,17 @@
    /* Put it out of harm's way for a while, if not from a client request */
    if (MAC_AllocCustom != mc->allockind) {
       /* Record where freed */
-      mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
+      mc->where = VG_(get_ExeContext) ( tid );
       add_to_freed_queue ( mc );
    } else
       VG_(free) ( mc );
 }
 
 __inline__
-void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
+void MAC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MAC_AllocKind kind )
 {
    MAC_Chunk*  mc;
    MAC_Chunk** prev_chunks_next_ptr;
-   ThreadId    tid = VG_(get_current_or_recent_tid)();
 
    VGP_PUSHCC(VgpCliMalloc);
 
@@ -317,31 +319,33 @@
       MAC_(record_freemismatch_error) ( tid, p );
    }
 
-   die_and_free_mem ( mc, prev_chunks_next_ptr, rzB );
+   die_and_free_mem ( tid, mc, prev_chunks_next_ptr, rzB );
    VGP_POPCC(VgpCliMalloc);
 }
 
-void TL_(free) ( void* p )
+void TL_(free) ( ThreadId tid, void* p )
 {
-   MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
+   MAC_(handle_free)( 
+      tid, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc );
 }
 
-void TL_(__builtin_delete) ( void* p )
+void TL_(__builtin_delete) ( ThreadId tid, void* p )
 {
-   MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
+   MAC_(handle_free)(
+      tid, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
 }
 
-void TL_(__builtin_vec_delete) ( void* p )
+void TL_(__builtin_vec_delete) ( ThreadId tid, void* p )
 {
-   MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
+   MAC_(handle_free)(
+      tid, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
 }
 
-void* TL_(realloc) ( void* p, SizeT new_size )
+void* TL_(realloc) ( ThreadId tid, void* p, SizeT new_size )
 {
    MAC_Chunk  *mc;
    MAC_Chunk **prev_chunks_next_ptr;
    UInt        i;
-   ThreadId    tid = VG_(get_current_or_recent_tid)();
 
    VGP_PUSHCC(VgpCliMalloc);
 
@@ -404,13 +408,14 @@
          ((UChar*)p_new)[i] = ((UChar*)p)[i];
 
       /* Free old memory */
-      die_and_free_mem ( mc, prev_chunks_next_ptr,
+      die_and_free_mem ( tid, mc, prev_chunks_next_ptr,
                          VG_(vg_malloc_redzone_szB) );
 
       /* this has to be after die_and_free_mem, otherwise the
          former succeeds in shorting out the new block, not the
          old, in the case when both are on the same list.  */
-      add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc,  MAC_(malloc_list) );
+      add_MAC_Chunk ( tid, p_new, new_size, 
+                           MAC_AllocMalloc, MAC_(malloc_list) );
 
       VGP_POPCC(VgpCliMalloc);
       return (void*)p_new;
@@ -464,7 +469,7 @@
                                          (void*)&prev_next );
 
    if (mp == NULL) {
-      ThreadId      tid = VG_(get_current_or_recent_tid)();
+      ThreadId      tid = VG_(get_current_tid)();
 
       MAC_(record_illegal_mempool_error) ( tid, pool );
       return;
@@ -477,7 +482,7 @@
    VG_(free)(mp);
 }
 
-void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size)
+void MAC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT size)
 {
    MAC_Mempool*  mp;
    MAC_Mempool** prev_next;
@@ -486,13 +491,11 @@
                                         (void*)&prev_next );
 
    if (mp == NULL) {
-      ThreadId      tid = VG_(get_current_or_recent_tid)();
-
       MAC_(record_illegal_mempool_error) ( tid, pool );
       return;
    }
 
-   MAC_(new_block)(addr, size, /*ignored*/0, mp->rzB, mp->is_zeroed,
+   MAC_(new_block)(tid, addr, size, /*ignored*/0, mp->rzB, mp->is_zeroed,
                    MAC_AllocCustom, mp->chunks);
 }
 
@@ -502,8 +505,7 @@
    MAC_Mempool** prev_pool;
    MAC_Chunk*    mc;
    MAC_Chunk**   prev_chunk;
-   ThreadId      tid = VG_(get_current_or_recent_tid)();
-
+   ThreadId      tid = VG_(get_current_tid)();
 
    mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list), (UWord)pool,
                                        (void*)&prev_pool);
@@ -521,7 +523,7 @@
       return;
    }
 
-   die_and_free_mem(mc, prev_chunk, mp->rzB);
+   die_and_free_mem ( tid, mc, prev_chunk, mp->rzB );
 }
 
 typedef
diff --git a/memcheck/mac_needs.c b/memcheck/mac_needs.c
index 561bb87..14edf23 100644
--- a/memcheck/mac_needs.c
+++ b/memcheck/mac_needs.c
@@ -430,7 +430,10 @@
    MAC_Error err_extra;
    Bool      just_below_esp;
 
-   just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a );
+   just_below_esp = is_just_below_ESP( 
+                       VG_(get_stack_pointer)(tid),
+                       a 
+                    );
 
    /* If this is caused by an access immediately below %ESP, and the
       user asks nicely, we just ignore it. */
@@ -512,12 +515,11 @@
    VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
 }
 
-
-// This one not passed a ThreadId, so it grabs it itself.
-void MAC_(record_overlap_error) ( Char* function, OverlapExtra* ov_extra )
+void MAC_(record_overlap_error) ( ThreadId tid, 
+                                  Char* function, OverlapExtra* ov_extra )
 {
-   VG_(maybe_record_error)( VG_(get_current_or_recent_tid)(), 
-                            OverlapErr, /*addr*/0, /*s*/function, ov_extra );
+   VG_(maybe_record_error)( 
+      tid, OverlapErr, /*addr*/0, /*s*/function, ov_extra );
 }
 
 
@@ -826,7 +828,7 @@
    init_prof_mem();
 }
 
-void MAC_(common_fini)(void (*leak_check)(void))
+void MAC_(common_fini)(void (*leak_check)(ThreadId))
 {
    MAC_(print_malloc_stats)();
 
@@ -838,7 +840,8 @@
       VG_(message)(Vg_UserMsg, 
                    "For counts of detected errors, rerun with: -v");
    }
-   if (MAC_(clo_leak_check)) leak_check();
+   if (MAC_(clo_leak_check)) 
+      leak_check( 1/*bogus ThreadID*/ );
 
    done_prof_mem();
 }
@@ -855,11 +858,6 @@
       "   program to incorporate the updates in the Valgrind header files.\n"
       "   You shouldn't need to change the text of your program at all.\n"
       "   Everything should then work as before.  Sorry for the bother.\n";
-
-   // Not using 'tid' here because MAC_(new_block)() and MAC_(handle_free)()
-   // grab it themselves.  But what they grab should match 'tid', check
-   // this.
-   tl_assert(tid == VG_(get_current_or_recent_tid)());
    
    switch (arg[0]) {
    case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
@@ -883,7 +881,7 @@
       UInt rzB       =       arg[3];
       Bool is_zeroed = (Bool)arg[4];
 
-      MAC_(new_block) ( p, sizeB, /*ignored*/0, rzB, is_zeroed, 
+      MAC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed, 
                         MAC_AllocCustom, MAC_(malloc_list) );
       return True;
    }
@@ -891,7 +889,7 @@
       Addr p         = (Addr)arg[1];
       UInt rzB       =       arg[2];
 
-      MAC_(handle_free) ( p, rzB, MAC_AllocCustom );
+      MAC_(handle_free) ( tid, p, rzB, MAC_AllocCustom );
       return True;
    }
 
@@ -920,7 +918,7 @@
       Addr addr      = (Addr)arg[2];
       UInt size      =       arg[3];
 
-      MAC_(mempool_alloc) ( pool, addr, size );
+      MAC_(mempool_alloc) ( tid, pool, addr, size );
       return True;
    }
 
diff --git a/memcheck/mac_shared.h b/memcheck/mac_shared.h
index 2b57a5e..15bc80f 100644
--- a/memcheck/mac_shared.h
+++ b/memcheck/mac_shared.h
@@ -305,14 +305,21 @@
 
 extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
 
-extern void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
+extern void* MAC_(new_block) ( ThreadId tid,
+                               Addr p, SizeT size, SizeT align, UInt rzB,
                                Bool is_zeroed, MAC_AllocKind kind,
                                VgHashTable table);
-extern void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind );
+
+extern void MAC_(handle_free) ( ThreadId tid,
+                                Addr p, UInt rzB, MAC_AllocKind kind );
 
 extern void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed);
+
 extern void MAC_(destroy_mempool)(Addr pool);
-extern void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size);
+
+extern void MAC_(mempool_alloc)(ThreadId tid, 
+                                Addr pool, Addr addr, SizeT size);
+
 extern void MAC_(mempool_free)(Addr pool, Addr addr);
 
 extern void MAC_(record_address_error)     ( ThreadId tid, Addr a,
@@ -324,7 +331,8 @@
 extern void MAC_(record_jump_error)        ( ThreadId tid, Addr a );
 extern void MAC_(record_free_error)        ( ThreadId tid, Addr a );
 extern void MAC_(record_freemismatch_error)( ThreadId tid, Addr a );
-extern void MAC_(record_overlap_error)     ( Char* function, OverlapExtra* oe );
+extern void MAC_(record_overlap_error)     ( ThreadId tid, 
+                                             Char* function, OverlapExtra* oe );
 extern void MAC_(record_illegal_mempool_error) ( ThreadId tid, Addr pool );
 
 extern void MAC_(pp_shared_Error)          ( Error* err);
@@ -332,7 +340,7 @@
 extern MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk)( Bool (*p)(MAC_Chunk*, void*), void* d );
 
 extern void MAC_(common_pre_clo_init) ( void );
-extern void MAC_(common_fini)         ( void (*leak_check)(void) );
+extern void MAC_(common_fini)         ( void (*leak_check)(ThreadId) );
 
 extern Bool MAC_(handle_common_client_requests) ( ThreadId tid, 
                                                   UWord* arg_block, UWord* ret );
@@ -344,6 +352,7 @@
                                          UInt n_total_records); 
                            
 extern void MAC_(do_detect_memory_leaks) (
+          ThreadId tid,
           Bool is_valid_64k_chunk ( UInt ),
           Bool is_valid_address   ( Addr )
        );
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 3e51e72..94667c8 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -1474,9 +1474,10 @@
 /* Leak detector for this tool.  We don't actually do anything, merely
    run the generic leak detector with suitable parameters for this
    tool. */
-static void mc_detect_memory_leaks ( void )
+static void mc_detect_memory_leaks ( ThreadId tid )
 {
-   MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
+   MAC_(do_detect_memory_leaks) ( 
+      tid, mc_is_valid_64k_chunk, mc_is_valid_address );
 }
 
 
@@ -1815,7 +1816,7 @@
       }
 
       case VG_USERREQ__DO_LEAK_CHECK:
-         mc_detect_memory_leaks();
+         mc_detect_memory_leaks(tid);
 	 *ret = 0; /* return value is meaningless */
 	 break;