A big commit size-wise, but small concept-wise:  removed the ThreadState type
from skin's view, replacing all instances with ThreadId.  Much cleaner.  Had to
change the way VG_(get_ExeContext)() worked a little.  Changed the core/skin
major interface because this breaks the old version.  Also fixed a few minor
related things here and there.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1782 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/vg_replace_malloc.c b/coregrind/vg_replace_malloc.c
index 1d2de8b..636e851 100644
--- a/coregrind/vg_replace_malloc.c
+++ b/coregrind/vg_replace_malloc.c
@@ -45,7 +45,7 @@
 
 /* Sidestep the normal check which disallows using valgrind.h directly. */
 #define __VALGRIND_SOMESKIN_H
-#include "valgrind.h"            /* for VALGRIND_NON_SIMD_tstCALL[12] */
+#include "valgrind.h"            /* for VALGRIND_NON_SIMD_CALL[12] */
 
 /*------------------------------------------------------------*/
 /*--- Command line options                                 ---*/
@@ -150,7 +150,7 @@
    MAYBE_SLOPPIFY(n);
 
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(malloc), n );
+      v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(malloc), n );
    } else if (VG_(clo_alignment) != 4) {
       v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
    } else {
@@ -169,7 +169,7 @@
    MAYBE_SLOPPIFY(n);
 
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_new), n );
+      v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_new), n );
    } else if (VG_(clo_alignment) != 4) {
       v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
    } else {
@@ -194,7 +194,7 @@
    MAYBE_SLOPPIFY(n);
 
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_new), n );
+      v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_vec_new), n );
    } else if (VG_(clo_alignment) != 4) {
       v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
    } else {
@@ -217,7 +217,7 @@
    if (p == NULL) 
       return;
    if (VG_(is_running_on_simd_CPU)()) {
-      (void)VALGRIND_NON_SIMD_tstCALL1( SK_(free), p );
+      (void)VALGRIND_NON_SIMD_CALL1( SK_(free), p );
    } else {
       VG_(arena_free)(VG_AR_CLIENT, p);      
    }
@@ -230,7 +230,7 @@
    if (p == NULL) 
       return;
    if (VG_(is_running_on_simd_CPU)()) {
-      (void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_delete), p );
+      (void)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_delete), p );
    } else {
       VG_(arena_free)(VG_AR_CLIENT, p);
    }
@@ -249,7 +249,7 @@
    if (p == NULL) 
       return;
    if (VG_(is_running_on_simd_CPU)()) {
-      (void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_delete), p );
+      (void)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_vec_delete), p );
    } else {
       VG_(arena_free)(VG_AR_CLIENT, p);
    }
@@ -270,7 +270,7 @@
    MAYBE_SLOPPIFY(size);
 
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(calloc), nmemb, size );
+      v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(calloc), nmemb, size );
    } else {
       v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
    }
@@ -296,7 +296,7 @@
       return NULL;
    }   
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(realloc), ptrV, new_size );
+      v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(realloc), ptrV, new_size );
    } else {
       v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
    }
@@ -314,7 +314,7 @@
    MAYBE_SLOPPIFY(n);
 
    if (VG_(is_running_on_simd_CPU)()) {
-      v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(memalign), alignment, n );
+      v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(memalign), alignment, n );
    } else {
       v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
    }