Files updated, added and removed in order to turn the ERASER branch into HEAD


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1086 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/vg_dispatch.S b/coregrind/vg_dispatch.S
index bd1c5b9..7cdb209 100644
--- a/coregrind/vg_dispatch.S
+++ b/coregrind/vg_dispatch.S
@@ -1,8 +1,8 @@
 
-##--------------------------------------------------------------------##
-##--- The core dispatch loop, for jumping to a code address.       ---##
-##---                                                vg_dispatch.S ---##
-##--------------------------------------------------------------------##
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address.       ---*/
+/*---                                                vg_dispatch.S ---*/
+/*--------------------------------------------------------------------*/
 
 /*
   This file is part of Valgrind, an x86 protected-mode emulator 
@@ -26,7 +26,7 @@
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
   02111-1307, USA.
 
-  The GNU General Public License is contained in the file LICENSE.
+  The GNU General Public License is contained in the file COPYING.
 */
 
 #include "vg_constants.h"
@@ -59,9 +59,9 @@
 	
 .globl VG_(run_innerloop)
 VG_(run_innerloop):
-	#OYNK(1000)
+	/* OYNK(1000) */
 
-	# ----- entry point to VG_(run_innerloop) -----
+	/* ----- entry point to VG_(run_innerloop) ----- */
 	pushl	%ebx
 	pushl	%ecx
 	pushl	%edx
@@ -69,74 +69,98 @@
 	pushl	%edi
 	pushl	%ebp
 
-	# Set up the baseBlock pointer
+	/* Set up the baseBlock pointer */
 	movl	$VG_(baseBlock), %ebp
 
-	# fetch m_eip into %eax
+	/* fetch m_eip into %eax */
 	movl	VGOFF_(m_eip), %esi
 	movl	(%ebp, %esi, 4), %eax
 	
-	# Start off dispatching paranoically, since we no longer have
-	# any indication whether or not this might be a special call/ret
-	# transfer.
-	jmp	dispatch_stkadj
-	
-	
 dispatch_main:
-	# Jump here to do a new dispatch.
-	# %eax holds destination (original) address.
-	# %ebp indicates further details of the control transfer
-	# requested to the address in %eax.
-	#
-	# If ebp == & VG_(baseBlock), just jump next to %eax.
-	# 
-	# If ebp == VG_EBP_JMP_SYSCALL, do a system call before 
-	# continuing at eax.
-	#
-	# If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before 
-	# continuing at eax.
-	#
-	# If %ebp has any other value, we panic.
+	/* Jump here to do a new dispatch.
+	   %eax holds destination (original) address.
+	   %ebp indicates further details of the control transfer
+	   requested to the address in %eax.
 	
+	   If ebp == & VG_(baseBlock), just jump next to %eax.
+	 
+	   If ebp == VG_EBP_JMP_SYSCALL, do a system call before 
+	   continuing at eax.
+	
+	   If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before 
+	   continuing at eax.
+	
+	   If %ebp has any other value, we panic.
+	*/
+	cmpl	$VG_(baseBlock), %ebp
+	jnz	dispatch_exceptional
+	/* fall into main loop */
+
+
+dispatch_boring:
+	/* save the jump address at VG_(baseBlock)[VGOFF_(m_eip)] */
+	movl	VGOFF_(m_eip), %esi
+	movl	%eax, (%ebp, %esi, 4)
+	/* Are we out of timeslice?  If yes, defer to scheduler. */
+	decl	VG_(dispatch_ctr)
+	jz	counter_is_zero
+	/* try a fast lookup in the translation cache */
+	movl	%eax, %ebx
+	andl	$VG_TT_FAST_MASK, %ebx	
+	/* ebx = tt_fast index */
+	movl	VG_(tt_fast)(,%ebx,4), %ebx	
+	/* ebx points at a tt entry
+	   now compare target with the tte.orig_addr field (+0) */
+	cmpl	%eax, (%ebx)
+	jnz	fast_lookup_failed
+#if 1
+	/* Found a match.  Set the tte.mru_epoch field (+8)
+	   and call the tte.trans_addr field (+4) */
+	movl	VG_(current_epoch), %ecx
+	movl	%ecx, 8(%ebx)
+#endif
+	call	*4(%ebx)
 	cmpl	$VG_(baseBlock), %ebp
 	jnz	dispatch_exceptional
 
-dispatch_boring:
-	# save the jump address at VG_(baseBlock)[VGOFF_(m_eip)],
+dispatch_boring_unroll2:
+	/* save the jump address at VG_(baseBlock)[VGOFF_(m_eip)] */
 	movl	VGOFF_(m_eip), %esi
 	movl	%eax, (%ebp, %esi, 4)
-	
-	# do a timeslice check.
-	# are we out of timeslice?  If yes, defer to scheduler.
-	#OYNK(1001)
+#if 1
+	/* Are we out of timeslice?  If yes, defer to scheduler. */
 	decl	VG_(dispatch_ctr)
 	jz	counter_is_zero
-
-	#OYNK(1002)
-	# try a fast lookup in the translation cache
+#endif
+	/* try a fast lookup in the translation cache */
 	movl	%eax, %ebx
 	andl	$VG_TT_FAST_MASK, %ebx	
-	# ebx = tt_fast index
+	/* ebx = tt_fast index */
 	movl	VG_(tt_fast)(,%ebx,4), %ebx	
-	# ebx points at a tt entry
-	# now compare target with the tte.orig_addr field (+0)
+	/* ebx points at a tt entry
+	   now compare target with the tte.orig_addr field (+0) */
 	cmpl	%eax, (%ebx)
 	jnz	fast_lookup_failed
-
-	# Found a match.  Set the tte.mru_epoch field (+8)
-	# and call the tte.trans_addr field (+4)
+#if 1
+	/* Found a match.  Set the tte.mru_epoch field (+8)
+	   and call the tte.trans_addr field (+4) */
 	movl	VG_(current_epoch), %ecx
 	movl	%ecx, 8(%ebx)
+#endif
 	call	*4(%ebx)
-	jmp	dispatch_main
+	cmpl	$VG_(baseBlock), %ebp
+	jz	dispatch_boring
+
+	jmp	dispatch_exceptional
+
 	
 fast_lookup_failed:
-	# %EIP is up to date here since dispatch_boring dominates
+	/* %EIP is up to date here since dispatch_boring dominates */
 	movl	$VG_TRC_INNER_FASTMISS, %eax
 	jmp	run_innerloop_exit
 
 counter_is_zero:
-	# %EIP is up to date here since dispatch_boring dominates
+	/* %EIP is up to date here since dispatch_boring dominates */
 	movl	$VG_TRC_INNER_COUNTERZERO, %eax
 	jmp	run_innerloop_exit
 	
@@ -155,21 +179,19 @@
    make it look cleaner. 
 */
 dispatch_exceptional:
-	# this is jumped to only, not fallen-through from above
-	cmpl	$VG_TRC_EBP_JMP_STKADJ, %ebp
-	jz	dispatch_stkadj
+	/* this is jumped to only, not fallen-through from above */
 	cmpl	$VG_TRC_EBP_JMP_SYSCALL, %ebp
 	jz	dispatch_syscall
 	cmpl	$VG_TRC_EBP_JMP_CLIENTREQ, %ebp
 	jz	dispatch_clientreq
 
-	# ebp has an invalid value ... crap out.
+	/* ebp has an invalid value ... crap out. */
 	pushl	$panic_msg_ebp
 	call	VG_(panic)
-	#	(never returns)
+	/* (never returns) */
 
 dispatch_syscall:
-	# save %eax in %EIP and defer to sched
+	/* save %eax in %EIP and defer to sched */
 	movl	$VG_(baseBlock), %ebp
 	movl	VGOFF_(m_eip), %esi
 	movl	%eax, (%ebp, %esi, 4)
@@ -177,29 +199,13 @@
 	jmp	run_innerloop_exit
 	
 dispatch_clientreq:
-	# save %eax in %EIP and defer to sched
+	/* save %eax in %EIP and defer to sched */
 	movl	$VG_(baseBlock), %ebp
 	movl	VGOFF_(m_eip), %esi
 	movl	%eax, (%ebp, %esi, 4)
 	movl	$VG_TRC_EBP_JMP_CLIENTREQ, %eax
 	jmp	run_innerloop_exit
 
-dispatch_stkadj:
-	# save %eax in %EIP
-	movl	$VG_(baseBlock), %ebp
-	movl	VGOFF_(m_eip), %esi
-	movl	%eax, (%ebp, %esi, 4)
-
-	# see if we need to mess with stack blocks
-	pushl	%eax
-	call	VG_(delete_client_stack_blocks_following_ESP_change)
-	popl	%eax
-	movl	$VG_(baseBlock), %ebp
-		
-	# ok, its not interesting.  Handle the normal way.
-	jmp	dispatch_boring
-
-
 .data
 panic_msg_ebp:
 .ascii	"vg_dispatch: %ebp has invalid value!"
@@ -207,6 +213,6 @@
 .text	
 
 
-##--------------------------------------------------------------------##
-##--- end                                            vg_dispatch.S ---##
-##--------------------------------------------------------------------##
+/*--------------------------------------------------------------------*/
+/*--- end                                            vg_dispatch.S ---*/
+/*--------------------------------------------------------------------*/