Streamlined and consistified alignment checking.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3168 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 22ce638..5996c1f 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -226,7 +226,7 @@
    UChar   abits8;
    PROF_EVENT(24);
 #  ifdef VG_DEBUG_MEMORY
-   tl_assert(IS_ALIGNED4_ADDR(a));
+   tl_assert(IS_4_ALIGNED(a));
 #  endif
    sm     = primary_map[a >> 16];
    sm_off = a & 0xFFFF;
@@ -242,7 +242,7 @@
    UInt    sm_off = a & 0xFFFF;
    PROF_EVENT(25);
 #  ifdef VG_DEBUG_MEMORY
-   tl_assert(IS_ALIGNED4_ADDR(a));
+   tl_assert(IS_4_ALIGNED(a));
 #  endif
    return ((UInt*)(sm->vbyte))[sm_off >> 2];
 }
@@ -257,7 +257,7 @@
    sm_off = a & 0xFFFF;
    PROF_EVENT(23);
 #  ifdef VG_DEBUG_MEMORY
-   tl_assert(IS_ALIGNED4_ADDR(a));
+   tl_assert(IS_4_ALIGNED(a));
 #  endif
    ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
 }
@@ -819,7 +819,7 @@
 #  ifdef VG_DEBUG_MEMORY
    return mc_rd_V8_SLOWLY(a);
 #  else
-   if (IS_ALIGNED8_ADDR(a)) {
+   if (IS_4_ALIGNED(a)) {
       UInt    sec_no = shiftRight16(a) & 0xFFFF;
       SecMap* sm     = primary_map[sec_no];
       UInt    a_off  = (a & 0xFFFF) >> 3;
@@ -836,7 +836,7 @@
       }
    }
    else
-   if (IS_ALIGNED4_ADDR(a)) {
+   if (IS_4_ALIGNED(a)) {
       /* LITTLE-ENDIAN */
       UInt vLo =  MC_(helperc_LOADV4)(a+0);
       UInt vHi =  MC_(helperc_LOADV4)(a+4);
@@ -853,7 +853,7 @@
 #  ifdef VG_DEBUG_MEMORY
    mc_wr_V8_SLOWLY(a, vbytes);
 #  else
-   if (IS_ALIGNED8_ADDR(a)) {
+   if (IS_4_ALIGNED(a)) {
       UInt    sec_no = shiftRight16(a) & 0xFFFF;
       SecMap* sm     = primary_map[sec_no];
       UInt    a_off  = (a & 0xFFFF) >> 3;
@@ -872,7 +872,7 @@
       return;
    }
    else
-   if (IS_ALIGNED4_ADDR(a)) {
+   if (IS_4_ALIGNED(a)) {
       UInt vHi = (UInt)(vbytes >> 32);
       UInt vLo = (UInt)vbytes;
       /* LITTLE-ENDIAN */
@@ -1385,7 +1385,7 @@
    UInt* vbitsP = NULL; /* ditto */
 
    /* Check alignment of args. */
-   if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
+   if (!(IS_4_ALIGNED(data) && IS_4_ALIGNED(vbits)))
       return 2;
    if ((size & 3) != 0)
       return 2;
@@ -1462,7 +1462,7 @@
 {
    UInt vbytes;
    UChar abits;
-   tl_assert(IS_ALIGNED4_ADDR(a));
+   tl_assert(IS_4_ALIGNED(a));
    abits  = get_abits4_ALIGNED(a);
    vbytes = get_vbytes4_ALIGNED(a);
    if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {