Callgrind: use jmpkind from VEX for side exits.

To detect calls and returns, Callgrind's heuristic
starts with using the jumpkind got from VEX for
a control flow change instruction. However, for
side exits, it always assumed a (conditional) jump,
which holds true for x86, but e.g. not for ARM.

This fixes Callgrind to use the jumpkind found
by VEX for all exits, which should help making
Callgrind work for ARM. It also moves the check
whether a boring jump is actually a fall-through
to instrumentation time. This changes (fixes) the
result for indirect jumps to the next instruction,
which should not be classified as fall-through
(anyway, this case is probably very rare).

This patch introduces an own enum for jump kinds
in Callgrind. This is less confusing than misusing
the VEX jump kind type, as Callgrinds wants
to distinguish BB fall-throughs from real jumps
(which both are Ijk_Boring in VEX).
Also, setup_bbcc now stores separately whether the
jump kind is conditional or not.

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12269 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/callgrind/bbcc.c b/callgrind/bbcc.c
index 1f76342..374ed2a 100644
--- a/callgrind/bbcc.c
+++ b/callgrind/bbcc.c
@@ -555,7 +555,9 @@
   Addr sp;
   BB* last_bb;
   ThreadId tid;
-  Int jmpkind, passed = 0, csp;
+  ClgJumpKind jmpkind;
+  Bool isConditionalJump;
+  Int passed = 0, csp;
   Bool ret_without_call = False;
   Int popcount_on_return = 1;
 
@@ -581,16 +583,8 @@
   if (last_bb) {
       passed = CLG_(current_state).jmps_passed;
       CLG_ASSERT(passed <= last_bb->cjmp_count);
-      if (passed == last_bb->cjmp_count) {
-	  jmpkind = last_bb->jmpkind;
-
-	  /* VEX always gives a Boring jump kind also when passed trough */
-	  if ((jmpkind == Ijk_Boring) &&
-	      (last_bb->offset + last_bb->instr_len == bb->offset))
-	      jmpkind = JmpNone;
-      }
-      else
-	  jmpkind = JmpCond;
+      jmpkind = last_bb->jmp[passed].jmpkind;
+      isConditionalJump = (passed < last_bb->cjmp_count);
 
       /* if we are in a function which is skipped in the call graph, we
        * do not increment the exe counter to produce cost (if simulation off),
@@ -612,7 +606,8 @@
       }
   }
   else {
-      jmpkind = JmpNone;
+      jmpkind = jk_None;
+      isConditionalJump = False;
   }
 
   /* Manipulate JmpKind if needed, only using BB specific info */
@@ -620,7 +615,7 @@
   csp = CLG_(current_call_stack).sp;
 
   /* A return not matching the top call in our callstack is a jump */
-  if ( (jmpkind == Ijk_Ret) && (csp >0)) {
+  if ( (jmpkind == jk_Return) && (csp >0)) {
       Int csp_up = csp-1;      
       call_entry* top_ce = &(CLG_(current_call_stack).entry[csp_up]);
 
@@ -650,14 +645,14 @@
 	  }
       }
       if (popcount_on_return == 0) {
-	  jmpkind = Ijk_Boring;
+	  jmpkind = jk_Jump;
 	  ret_without_call = True;
       }
   }
 
   /* Should this jump be converted to call or pop/call ? */
-  if (( jmpkind != Ijk_Ret) &&
-      ( jmpkind != Ijk_Call) && last_bb) {
+  if (( jmpkind != jk_Return) &&
+      ( jmpkind != jk_Call) && last_bb) {
 
     /* We simulate a JMP/Cont to be a CALL if
      * - jump is in another ELF object or section kind
@@ -701,30 +696,32 @@
 	    }
 	}
 
-	jmpkind = Ijk_Call;
+	jmpkind = jk_Call;
 	call_emulation = True;
     }
   }
 
-  if (jmpkind == Ijk_Call)
+  if (jmpkind == jk_Call)
     skip = CLG_(get_fn_node)(bb)->skip;
 
   CLG_DEBUGIF(1) {
-      if (jmpkind == JmpCond)
-	  VG_(printf)("Conditional");
-      else if (jmpkind == JmpNone)
-	  VG_(printf)("None");
-      else
-	  ppIRJumpKind( jmpkind );
-
-      VG_(printf)(" %08lx -> %08lx, SP %08lx\n",
-		  last_bb ? bb_jmpaddr(last_bb) : 0,
-		  bb_addr(bb), sp);
+    if (isConditionalJump)
+      VG_(printf)("Cond-");
+    switch(jmpkind) {
+    case jk_None:   VG_(printf)("Fall-through"); break;
+    case jk_Jump:   VG_(printf)("Jump"); break;
+    case jk_Call:   VG_(printf)("Call"); break;
+    case jk_Return: VG_(printf)("Return"); break;
+    default:        tl_assert(0);
+    }
+    VG_(printf)(" %08lx -> %08lx, SP %08lx\n",
+		last_bb ? bb_jmpaddr(last_bb) : 0,
+		bb_addr(bb), sp);
   }
 
   /* Handle CALL/RET and update context to get correct BBCC */
   
-  if (jmpkind == Ijk_Ret) {
+  if (jmpkind == jk_Return) {
     
     if ((csp == 0) || 
 	((CLG_(current_fn_stack).top > CLG_(current_fn_stack).bottom) &&
@@ -745,10 +742,10 @@
     Int unwind_count = CLG_(unwind_call_stack)(sp, 0);
     if (unwind_count > 0) {
       /* if unwinding was done, this actually is a return */
-      jmpkind = Ijk_Ret;
+      jmpkind = jk_Return;
     }
     
-    if (jmpkind == Ijk_Call) {
+    if (jmpkind == jk_Call) {
       delayed_push = True;
 
       csp = CLG_(current_call_stack).sp;
@@ -848,8 +845,7 @@
 			 bbcc, sp, skip);
   }
 
-  if (CLG_(clo).collect_jumps &&
-      ((jmpkind == JmpCond) || (jmpkind == Ijk_Boring))) {
+  if (CLG_(clo).collect_jumps && (jmpkind == jk_Jump)) {
     
     /* Handle conditional jumps followed, i.e. trace arcs
      * This uses JCC structures, too */
@@ -857,15 +853,15 @@
     jCC* jcc = CLG_(get_jcc)(last_bbcc, passed, bbcc);
     CLG_ASSERT(jcc != 0);
     // Change from default, and check if already changed
-    if (jcc->jmpkind == Ijk_Call)
-      jcc->jmpkind = jmpkind;
+    if (jcc->jmpkind == jk_Call)
+      jcc->jmpkind = isConditionalJump ? jk_CondJump : jk_Jump;
     else {
 	// FIXME: Why can this fail?
 	// CLG_ASSERT(jcc->jmpkind == jmpkind);
     }
     
     jcc->call_counter++;
-    if (jmpkind == JmpCond)
+    if (isConditionalJump)
       CLG_(stat).jcnd_counter++;
     else
       CLG_(stat).jump_counter++;