64-bit cleanness: Converted malloc() et al to use SizeT rather than Int.
This required some tricks with casting to maintain Memcheck's silly (ie.
negative) arg checking.  The allocator was also changed accordingly. It
should now be able to allocate more than 4GB blocks on 64-bit platforms.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2906 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/massif/ms_main.c b/massif/ms_main.c
index baa6fa7..ca3a28d 100644
--- a/massif/ms_main.c
+++ b/massif/ms_main.c
@@ -178,7 +178,7 @@
    struct _HP_Chunk {
       struct _HP_Chunk* next;
       Addr              data;    // Ptr to actual block
-      UInt              size;    // Size requested
+      SizeT             size;    // Size requested
       XPt*              where;   // Where allocated; bottom-XPt
    }
    HP_Chunk;
@@ -344,7 +344,7 @@
 
 // Cheap allocation for blocks that never need to be freed.  Saves about 10%
 // for Konqueror startup with --depth=40.
-static void* perm_malloc(UInt n_bytes)
+static void* perm_malloc(SizeT n_bytes)
 {
    static Addr hp     = 0;    // current heap pointer
    static Addr hp_lim = 0;    // maximum usable byte in current block
@@ -664,7 +664,7 @@
 static void hp_census(void);
 
 static
-void* new_block ( void* p, Int size, UInt align, Bool is_zeroed )
+void* new_block ( void* p, SizeT size, SizeT align, Bool is_zeroed )
 {
    HP_Chunk* hc;
    Bool custom_alloc = (NULL == p);
@@ -738,27 +738,27 @@
 }
  
 
-void* SK_(malloc) ( Int n )
+void* SK_(malloc) ( SizeT n )
 {
    return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
 }
 
-void* SK_(__builtin_new) ( Int n )
+void* SK_(__builtin_new) ( SizeT n )
 {
    return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
 }
 
-void* SK_(__builtin_vec_new) ( Int n )
+void* SK_(__builtin_vec_new) ( SizeT n )
 {
    return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
 }
 
-void* SK_(calloc) ( Int m, Int size )
+void* SK_(calloc) ( SizeT m, SizeT size )
 {
    return new_block( NULL, m*size, VG_(clo_alignment), /*is_zeroed*/True );
 }
 
-void *SK_(memalign)( Int align, Int n )
+void *SK_(memalign)( SizeT align, SizeT n )
 {
    return new_block( NULL, n, align, False );
 }
@@ -778,13 +778,13 @@
    die_block( p, /*custom_free*/False );
 }
 
-void* SK_(realloc) ( void* p_old, Int new_size )
+void* SK_(realloc) ( void* p_old, SizeT new_size )
 {
    HP_Chunk*    hc;
    HP_Chunk**   remove_handle;
    Int          i;
    void*        p_new;
-   UInt         old_size;
+   SizeT        old_size;
    XPt         *old_where, *new_where;
    
    VGP_PUSHCC(VgpCliMalloc);