Changes made so that skin error "report this bug to" messages are distinguished
from core error ones:

  - Split up VG_(panic) into VG_(core_panic) and VG_(skin_panic)

  - Likewise, split vg_assert into vg_assert and sk_assert

  - Added a new need string: `bug_reports_to'

  - Removed VG_(skin_error) which was a previous wussy attempt at this change.
    This removed the need for the hacky redeclaration of VG_(skin_error) in
    vg_profile.c, which is good.

At the moment, Julian and Nick's email addresses are hard-coded into each skin
individually, rather than using a #define in vg_skin.h, because that didn't
feel quite right to me...  jseward@acm.org is still done with a #define for
core errors, though.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1164 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 6df6993..4a7ff24 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -149,7 +149,7 @@
 }
 
 #define PROF_EVENT(ev)                                  \
-   do { vg_assert((ev) >= 0 && (ev) < N_PROF_EVENTS);   \
+   do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS);   \
         event_ctr[ev]++;                                \
    } while (False);
 
@@ -386,7 +386,7 @@
    /* It just happens that a SecMap occupies exactly 18 pages --
       although this isn't important, so the following assert is
       spurious. */
-   vg_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
+   sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
    map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
 
    for (i = 0; i < 8192; i++)
@@ -463,7 +463,7 @@
    UChar   abits8;
    PROF_EVENT(24);
 #  ifdef VG_DEBUG_MEMORY
-   vg_assert(IS_ALIGNED4_ADDR(a));
+   sk_assert(IS_ALIGNED4_ADDR(a));
 #  endif
    sm     = primary_map[a >> 16];
    sm_off = a & 0xFFFF;
@@ -479,7 +479,7 @@
    UInt    sm_off = a & 0xFFFF;
    PROF_EVENT(25);
 #  ifdef VG_DEBUG_MEMORY
-   vg_assert(IS_ALIGNED4_ADDR(a));
+   sk_assert(IS_ALIGNED4_ADDR(a));
 #  endif
    return ((UInt*)(sm->vbyte))[sm_off >> 2];
 }
@@ -515,15 +515,15 @@
       indicate bugs in our machinery.  30,000,000 is arbitrary, but so
       far all legitimate requests have fallen beneath that size. */
    /* 4 Mar 02: this is just stupid; get rid of it. */
-   /* vg_assert(len < 30000000); */
+   /* sk_assert(len < 30000000); */
 
    /* Check the permissions make sense. */
-   vg_assert(example_a_bit == VGM_BIT_VALID 
+   sk_assert(example_a_bit == VGM_BIT_VALID 
              || example_a_bit == VGM_BIT_INVALID);
-   vg_assert(example_v_bit == VGM_BIT_VALID 
+   sk_assert(example_v_bit == VGM_BIT_VALID 
              || example_v_bit == VGM_BIT_INVALID);
    if (example_a_bit == VGM_BIT_INVALID)
-      vg_assert(example_v_bit == VGM_BIT_INVALID);
+      sk_assert(example_v_bit == VGM_BIT_INVALID);
 
    /* The validity bits to write. */
    vbyte = example_v_bit==VGM_BIT_VALID 
@@ -568,7 +568,7 @@
       VGP_POPCC(VgpSetMem);
       return;
    }
-   vg_assert((a % 8) == 0 && len > 0);
+   sk_assert((a % 8) == 0 && len > 0);
 
    /* Once aligned, go fast. */
    while (True) {
@@ -588,7 +588,7 @@
       VGP_POPCC(VgpSetMem);
       return;
    }
-   vg_assert((a % 8) == 0 && len > 0 && len < 8);
+   sk_assert((a % 8) == 0 && len > 0 && len < 8);
 
    /* Finish the upper fragment. */
    while (True) {
@@ -604,7 +604,7 @@
    /* Check that zero page and highest page have not been written to
       -- this could happen with buggy syscall wrappers.  Today
       (2001-04-26) had precisely such a problem with __NR_setitimer. */
-   vg_assert(SK_(cheap_sanity_check)());
+   sk_assert(SK_(cheap_sanity_check)());
    VGP_POPCC(VgpSetMem);
 }
 
@@ -736,8 +736,8 @@
 
    PROF_EVENT(50);
 #  ifdef VG_DEBUG_MEMORY
-   vg_assert(IS_ALIGNED4_ADDR(a));
-   vg_assert(IS_ALIGNED4_ADDR(len));
+   sk_assert(IS_ALIGNED4_ADDR(a));
+   sk_assert(IS_ALIGNED4_ADDR(len));
 #  endif
 
    for ( ; a < a_past_end; a += 4) {
@@ -765,8 +765,8 @@
 
    PROF_EVENT(51);
 #  ifdef VG_DEBUG_MEMORY
-   vg_assert(IS_ALIGNED4_ADDR(a));
-   vg_assert(IS_ALIGNED4_ADDR(len));
+   sk_assert(IS_ALIGNED4_ADDR(a));
+   sk_assert(IS_ALIGNED4_ADDR(len));
 #  endif
 
    for ( ; a < a_past_end; a += 4) {
@@ -808,7 +808,7 @@
          break;
 
       default:
-         VG_(panic)("check_is_writable: Unknown or unexpected CorePart");
+         VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
       }
    }
 
@@ -844,7 +844,7 @@
          break;
 
       default:
-         VG_(panic)("check_is_readable: Unknown or unexpected CorePart");
+         VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
       }
    }
    VGP_POPCC(VgpCheckMem);
@@ -860,7 +860,7 @@
 
    VGP_PUSHCC(VgpCheckMem);
 
-   vg_assert(part == Vg_CoreSysCall);
+   sk_assert(part == Vg_CoreSysCall);
    ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
    if (!ok) {
       SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
@@ -1129,7 +1129,7 @@
       (which is the default), and the address is 4-aligned.  
       If not, Case 2 will have applied.
    */
-   vg_assert(SK_(clo_partial_loads_ok));
+   sk_assert(SK_(clo_partial_loads_ok));
    {
       UInt vw = VGM_WORD_INVALID;
       vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
@@ -1362,7 +1362,7 @@
    }
 
    VG_(printf)("size is %d\n", size);
-   VG_(panic)("vgmext_fpu_read_check: unhandled size");
+   VG_(skin_panic)("vgmext_fpu_read_check: unhandled size");
 #  endif
 }
 
@@ -1450,7 +1450,7 @@
    }
 
    VG_(printf)("size is %d\n", size);
-   VG_(panic)("vgmext_fpu_write_check: unhandled size");
+   VG_(skin_panic)("vgmext_fpu_write_check: unhandled size");
 #  endif
 }
 
@@ -1560,7 +1560,7 @@
    /* VG_(printf)("freelist sanity\n"); */
    for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
       n += sc->size;
-   vg_assert(n == vg_freed_list_volume);
+   sk_assert(n == vg_freed_list_volume);
 }
 
 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
@@ -1571,11 +1571,11 @@
 
    /* Put it at the end of the freed list */
    if (vg_freed_list_end == NULL) {
-      vg_assert(vg_freed_list_start == NULL);
+      sk_assert(vg_freed_list_start == NULL);
       vg_freed_list_end = vg_freed_list_start = sc;
       vg_freed_list_volume = sc->size;
    } else {    
-      vg_assert(vg_freed_list_end->next == NULL);
+      sk_assert(vg_freed_list_end->next == NULL);
       vg_freed_list_end->next = sc;
       vg_freed_list_end = sc;
       vg_freed_list_volume += sc->size;
@@ -1587,13 +1587,13 @@
    
    while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
       /* freelist_sanity(); */
-      vg_assert(vg_freed_list_start != NULL);
-      vg_assert(vg_freed_list_end != NULL);
+      sk_assert(vg_freed_list_start != NULL);
+      sk_assert(vg_freed_list_end != NULL);
 
       sc1 = vg_freed_list_start;
       vg_freed_list_volume -= sc1->size;
       /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
-      vg_assert(vg_freed_list_volume >= 0);
+      sk_assert(vg_freed_list_volume >= 0);
 
       if (vg_freed_list_start == vg_freed_list_end) {
          vg_freed_list_start = vg_freed_list_end = NULL;
@@ -1671,33 +1671,33 @@
    sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
    sigbus_new.ksa_restorer = NULL;
    res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
-   vg_assert(res == 0);
+   sk_assert(res == 0);
 
    sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
    sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
    sigsegv_new.ksa_restorer = NULL;
    res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
-   vg_assert(res == 0+0);
+   sk_assert(res == 0+0);
 
    res =  VG_(ksigemptyset)( &unblockmask_new );
    res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
    res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
    res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
-   vg_assert(res == 0+0+0);
+   sk_assert(res == 0+0+0);
 
    res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
-   vg_assert(res == 0+0+0+0);
+   sk_assert(res == 0+0+0+0);
 
    res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
-   vg_assert(res == 0+0+0+0+0);
+   sk_assert(res == 0+0+0+0+0);
 
    res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
-   vg_assert(res == 0+0+0+0+0+0);
+   sk_assert(res == 0+0+0+0+0+0);
 
    /* The signal handlers are installed.  Actually do the memory scan. */
    numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
-   vg_assert(numPages == 1048576);
-   vg_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
+   sk_assert(numPages == 1048576);
+   sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
 
    nWordsNotified = 0;
 
@@ -1734,13 +1734,13 @@
 
    /* Restore signal state to whatever it was before. */
    res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
-   vg_assert(res == 0 +0);
+   sk_assert(res == 0 +0);
 
    res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
-   vg_assert(res == 0 +0 +0);
+   sk_assert(res == 0 +0 +0);
 
    res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
-   vg_assert(res == 0 +0 +0 +0);
+   sk_assert(res == 0 +0 +0 +0);
 
    return nWordsNotified;
 }
@@ -1830,13 +1830,13 @@
          lo = mid+1;
          continue;
       }
-      vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
+      sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
       retVal = mid;
       break;
    }
 
 #  ifdef VG_DEBUG_LEAKCHECK
-   vg_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
+   sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
 #  endif
    /* VG_(printf)("%d\n", retVal); */
    return retVal;
@@ -1921,8 +1921,8 @@
       sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
       if (sh_no != -1) {
          /* Found a block at/into which ptr points. */
-         vg_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
-         vg_assert(ptr < vglc_shadows[sh_no]->data 
+         sk_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
+         sk_assert(ptr < vglc_shadows[sh_no]->data 
                          + vglc_shadows[sh_no]->size);
          /* Decide whether Proper-ly or Interior-ly reached. */
          if (ptr == vglc_shadows[sh_no]->data) {
@@ -1954,7 +1954,7 @@
    /* VG_(get_malloc_shadows) allocates storage for shadows */
    vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
    if (vglc_n_shadows == 0) {
-      vg_assert(vglc_shadows == NULL);
+      sk_assert(vglc_shadows == NULL);
       VG_(message)(Vg_UserMsg, 
                    "No malloc'd blocks -- no leaks are possible.\n");
       return;
@@ -1968,9 +1968,9 @@
    /* Sanity check; assert that the blocks are now in order and that
       they don't overlap. */
    for (i = 0; i < vglc_n_shadows-1; i++) {
-      vg_assert( ((Addr)vglc_shadows[i]->data)
+      sk_assert( ((Addr)vglc_shadows[i]->data)
                  < ((Addr)vglc_shadows[i+1]->data) );
-      vg_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
+      sk_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
                  < ((Addr)vglc_shadows[i+1]->data) );
    }
 
@@ -2060,7 +2060,7 @@
             p_min = p;
          }
       }
-      vg_assert(p_min != NULL);
+      sk_assert(p_min != NULL);
 
       if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
          p_min->num_blocks = 0;
@@ -2161,7 +2161,7 @@
          str[w++] = ' ';
    }
    str[w++] = 0;
-   vg_assert(w == 36);
+   sk_assert(w == 36);
 }
 
 /* Caution!  Not vthread-safe; looks in VG_(baseBlock), not the thread
@@ -2257,7 +2257,7 @@
       VG_(message)(Vg_DebugMsg,
                    "probable sanity check failure for syscall number %d\n",
                    syscallno );
-      VG_(panic)("aborting due to the above ... bye!");
+      VG_(skin_panic)("aborting due to the above ... bye!");
    }
 }
 
@@ -2353,6 +2353,7 @@
 {
    needs->name                    = "memcheck";
    needs->description             = "a memory error detector";
+   needs->bug_reports_to          = "jseward@acm.org";
 
    needs->core_errors             = True;
    needs->skin_errors             = True;