Make various other tools compile; also fix the stage2 linker script.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3068 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/Makefile.am b/Makefile.am
index 9f95c7b..95e2a15 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -18,7 +18,6 @@
memcheck \
addrcheck \
corecheck \
- massif \
lackey \
none
diff --git a/addrcheck/ac_main.c b/addrcheck/ac_main.c
index ff5a074..c0bb2c8 100644
--- a/addrcheck/ac_main.c
+++ b/addrcheck/ac_main.c
@@ -716,34 +716,34 @@
}
REGPARM(1)
-static void ac_helperc_LOAD4 ( Addr a )
+static void ach_LOAD4 ( Addr a )
{
ac_helperc_ACCESS4 ( a, /*isWrite*/False );
}
REGPARM(1)
-static void ac_helperc_STORE4 ( Addr a )
+static void ach_STORE4 ( Addr a )
{
ac_helperc_ACCESS4 ( a, /*isWrite*/True );
}
REGPARM(1)
-static void ac_helperc_LOAD2 ( Addr a )
+static void ach_LOAD2 ( Addr a )
{
ac_helperc_ACCESS2 ( a, /*isWrite*/False );
}
REGPARM(1)
-static void ac_helperc_STORE2 ( Addr a )
+static void ach_STORE2 ( Addr a )
{
ac_helperc_ACCESS2 ( a, /*isWrite*/True );
}
REGPARM(1)
-static void ac_helperc_LOAD1 ( Addr a )
+static void ach_LOAD1 ( Addr a )
{
ac_helperc_ACCESS1 ( a, /*isWrite*/False );
}
REGPARM(1)
-static void ac_helperc_STORE1 ( Addr a )
+static void ach_STORE1 ( Addr a )
{
ac_helperc_ACCESS1 ( a, /*isWrite*/True );
}
@@ -913,13 +913,13 @@
}
REGPARM(2)
-static void ac_fpu_READ_check ( Addr addr, SizeT size )
+static void ach_LOADN ( Addr addr, SizeT size )
{
ac_fpu_ACCESS_check ( addr, size, /*isWrite*/False );
}
REGPARM(2)
-static void ac_fpu_WRITE_check ( Addr addr, SizeT size )
+static void ach_STOREN ( Addr addr, SizeT size )
{
ac_fpu_ACCESS_check ( addr, size, /*isWrite*/True );
}
@@ -949,137 +949,143 @@
/*--- Our instrumenter ---*/
/*------------------------------------------------------------*/
-UCodeBlock* TL_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
+IRBB* TL_(instrument)(IRBB* bb_in, VexGuestLayout* layout, IRType hWordTy )
{
-/* Use this rather than eg. -1 because it's a UInt. */
-#define INVALID_DATA_SIZE 999999
+ Int i, hsz;
+ IRStmt* st;
+ IRExpr* data;
+ IRExpr* aexpr;
+ IRExpr* guard;
+ IRDirty* di;
+ Bool isLoad;
- UCodeBlock* cb;
- Int i;
- UInstr* u_in;
- Int t_addr, t_size;
- Addr helper;
+ /* Set up BB */
+ IRBB* bb = emptyIRBB();
+ bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);
+ bb->next = dopyIRExpr(bb_in->next);
+ bb->jumpkind = bb_in->jumpkind;
- cb = VG_(setup_UCodeBlock)(cb_in);
+ /* No loads to consider in ->next. */
+ tl_assert(isAtom(bb_in->next));
- for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
+ for (i = 0; i < bb_in->stmts_used; i++) {
+ st = bb_in->stmts[i];
+ if (!st) continue;
- t_addr = t_size = INVALID_TEMPREG;
- u_in = VG_(get_instr)(cb_in, i);
+ /* Examine each stmt in turn to figure out if it needs to be
+ preceded by a memory access check. If so, collect up the
+ relevant pieces of information. */
+ hsz = 0;
+ aexpr = NULL;
+ guard = NULL;
+ isLoad = True;
- switch (u_in->opcode) {
- case NOP: case LOCK: case CALLM_E: case CALLM_S:
+ switch (st->tag) {
+
+ case Ist_Tmp:
+ data = st->Ist.Tmp.data;
+ if (data->tag == Iex_LDle) {
+ aexpr = data->Iex.LDle.addr;
+ hsz = sizeofIRType(data->Iex.LDle.ty);
+ isLoad = True;
+ }
+ break;
+
+ case Ist_STle:
+ data = st->Ist.STle.data;
+ aexpr = st->Ist.STle.addr;
+ tl_assert(isAtom(data));
+ tl_assert(isAtom(aexpr));
+ hsz = sizeofIRType(typeOfIRExpr(bb_in->tyenv, data));
+ isLoad = False;
+
+ case Ist_Put:
+ tl_assert(isAtom(st->Ist.Put.data));
break;
- /* For memory-ref instrs, copy the data_addr into a temporary
- * to be passed to the helper at the end of the instruction.
- */
- case LOAD:
- switch (u_in->size) {
- case 4: helper = (Addr)ac_helperc_LOAD4; break;
- case 2: helper = (Addr)ac_helperc_LOAD2; break;
- case 1: helper = (Addr)ac_helperc_LOAD1; break;
- default: VG_(tool_panic)
- ("addrcheck::TL_(instrument):LOAD");
- }
- uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
- uCCall (cb, helper, 1, 1, False );
- VG_(copy_UInstr)(cb, u_in);
+ case Ist_PutI:
+ tl_assert(isAtom(st->Ist.PutI.ix));
+ tl_assert(isAtom(st->Ist.PutI.data));
break;
- case STORE:
- switch (u_in->size) {
- case 4: helper = (Addr)ac_helperc_STORE4; break;
- case 2: helper = (Addr)ac_helperc_STORE2; break;
- case 1: helper = (Addr)ac_helperc_STORE1; break;
- default: VG_(tool_panic)
- ("addrcheck::TL_(instrument):STORE");
- }
- uInstr1(cb, CCALL, 0, TempReg, u_in->val2);
- uCCall (cb, helper, 1, 1, False );
- VG_(copy_UInstr)(cb, u_in);
+ case Ist_Exit:
+ tl_assert(isAtom(st->Ist.Exit.guard));
break;
- case SSE3ag_MemRd_RegWr:
- tl_assert(u_in->size == 4 || u_in->size == 8);
- helper = (Addr)ac_fpu_READ_check;
- goto do_Access_ARG1;
- do_Access_ARG1:
- tl_assert(u_in->tag1 == TempReg);
- t_addr = u_in->val1;
- t_size = newTemp(cb);
- uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
- uLiteral(cb, u_in->size);
- uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
- uCCall(cb, helper, 2, 2, False );
- VG_(copy_UInstr)(cb, u_in);
- break;
+ case Ist_Dirty:
+ if (st->Ist.Dirty.details->mFx != Ifx_None) {
+ /* We classify Ifx_Modify as a load. */
+ isLoad = st->Ist.Dirty.details->mFx != Ifx_Write;
+ hsz = st->Ist.Dirty.details->mSize;
+ aexpr = st->Ist.Dirty.details->mAddr;
+ guard = st->Ist.Dirty.details->guard;
+ tl_assert(isAtom(aexpr));
+ }
+ break;
- case MMX2_MemRd:
- tl_assert(u_in->size == 4 || u_in->size == 8);
- helper = (Addr)ac_fpu_READ_check;
- goto do_Access_ARG2;
- case MMX2_MemWr:
- tl_assert(u_in->size == 4 || u_in->size == 8);
- helper = (Addr)ac_fpu_WRITE_check;
- goto do_Access_ARG2;
- case FPU_R:
- helper = (Addr)ac_fpu_READ_check;
- goto do_Access_ARG2;
- case FPU_W:
- helper = (Addr)ac_fpu_WRITE_check;
- goto do_Access_ARG2;
- do_Access_ARG2:
- tl_assert(u_in->tag2 == TempReg);
- t_addr = u_in->val2;
- t_size = newTemp(cb);
- uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
- uLiteral(cb, u_in->size);
- uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
- uCCall(cb, helper, 2, 2, False );
- VG_(copy_UInstr)(cb, u_in);
- break;
-
- case MMX2a1_MemRd:
- case SSE3a_MemRd:
- case SSE2a_MemRd:
- case SSE3a1_MemRd:
- case SSE2a1_MemRd:
- helper = (Addr)ac_fpu_READ_check;
- goto do_Access_ARG3;
- case SSE2a_MemWr:
- case SSE3a_MemWr:
- helper = (Addr)ac_fpu_WRITE_check;
- goto do_Access_ARG3;
- do_Access_ARG3:
- tl_assert(u_in->size == 4 || u_in->size == 8
- || u_in->size == 16 || u_in->size == 512);
- tl_assert(u_in->tag3 == TempReg);
- t_addr = u_in->val3;
- t_size = newTemp(cb);
- uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
- uLiteral(cb, u_in->size);
- uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
- uCCall(cb, helper, 2, 2, False );
- VG_(copy_UInstr)(cb, u_in);
- break;
-
- case SSE3e1_RegRd:
- case SSE3e_RegWr:
- case SSE3g1_RegWr:
- case SSE5:
- case SSE3g_RegWr:
- case SSE3e_RegRd:
- case SSE4:
- case SSE3:
default:
- VG_(copy_UInstr)(cb, u_in);
- break;
+ VG_(printf)("\n");
+ ppIRStmt(st);
+ VG_(printf)("\n");
+ VG_(tool_panic)("addrcheck: unhandled IRStmt");
}
+
+ /* If needed, add a helper call. */
+ if (aexpr) {
+ tl_assert(hsz > 0);
+ switch (hsz) {
+ case 4:
+ if (isLoad)
+ di = unsafeIRDirty_0_N( 1, "ach_LOAD4", &ach_LOAD4,
+ mkIRExprVec_1(aexpr));
+ else
+ di = unsafeIRDirty_0_N( 1, "ach_STORE4", &ach_STORE4,
+ mkIRExprVec_1(aexpr));
+ break;
+ case 2:
+ if (isLoad)
+ di = unsafeIRDirty_0_N( 1, "ach_LOAD2", &ach_LOAD2,
+ mkIRExprVec_1(aexpr));
+ else
+ di = unsafeIRDirty_0_N( 1, "ach_STORE2", &ach_STORE2,
+ mkIRExprVec_1(aexpr));
+ break;
+ case 1:
+ if (isLoad)
+ di = unsafeIRDirty_0_N( 1, "ach_LOAD1", &ach_LOAD1,
+ mkIRExprVec_1(aexpr));
+ else
+ di = unsafeIRDirty_0_N( 1, "ach_STORE1", &ach_STORE1,
+ mkIRExprVec_1(aexpr));
+ break;
+ default:
+ if (isLoad)
+ di = unsafeIRDirty_0_N(
+ 2, "ach_LOADN", &ach_LOADN,
+ mkIRExprVec_2(aexpr,mkIRExpr_HWord(hsz)));
+ else
+ di = unsafeIRDirty_0_N(
+ 2, "ach_STOREN", &ach_STOREN,
+ mkIRExprVec_2(aexpr,mkIRExpr_HWord(hsz)));
+ break;
+ }
+
+ /* If the call has arisen as a result of a dirty helper which
+ references memory, we need to inherit the guard from the
+ dirty helper. */
+ if (guard)
+ di->guard = dopyIRExpr(guard);
+
+ /* emit the helper call */
+ addStmtToIRBB( bb, IRStmt_Dirty(di) );
+
+ }
+
+ /* And finally, copy the expr itself to the output. */
+ addStmtToIRBB( bb, dopyIRStmt(st));
}
- VG_(free_UCodeBlock)(cb_in);
- return cb;
+ return bb;
}
@@ -1307,15 +1313,6 @@
VG_(init_pre_mem_write) ( & ac_check_is_writable );
VG_(init_post_mem_write) ( & ac_make_accessible );
- VG_(register_compact_helper)((Addr) & ac_helperc_LOAD4);
- VG_(register_compact_helper)((Addr) & ac_helperc_LOAD2);
- VG_(register_compact_helper)((Addr) & ac_helperc_LOAD1);
- VG_(register_compact_helper)((Addr) & ac_helperc_STORE4);
- VG_(register_compact_helper)((Addr) & ac_helperc_STORE2);
- VG_(register_compact_helper)((Addr) & ac_helperc_STORE1);
- VG_(register_noncompact_helper)((Addr) & ac_fpu_READ_check);
- VG_(register_noncompact_helper)((Addr) & ac_fpu_WRITE_check);
-
VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
diff --git a/corecheck/cc_main.c b/corecheck/cc_main.c
index 218d055..ac013c3 100644
--- a/corecheck/cc_main.c
+++ b/corecheck/cc_main.c
@@ -51,9 +51,9 @@
{
}
-UCodeBlock* TL_(instrument)(UCodeBlock* cb, Addr a)
+IRBB* TL_(instrument)(IRBB* bb_in, VexGuestLayout* layout, IRType hWordTy )
{
- return cb;
+ return bb_in;
}
void TL_(fini)(Int exitcode)
diff --git a/coregrind/valgrind.vs b/coregrind/valgrind.vs
index 30ded82..e8233ea 100644
--- a/coregrind/valgrind.vs
+++ b/coregrind/valgrind.vs
@@ -5,6 +5,17 @@
vgProf_*;
vgOff_*;
vgArch_*;
+ *IROp*;
+ *IRExpr*;
+ *IRStmt*;
+ *IRBB*;
+ *IRDirty*;
+ *IRType*;
+ *IRTemp*;
+ *IRConst*;
+ *IRCallee*;
+ *IRArray*;
+ LibVEX_Alloc;
local:
*; # default to hidden
diff --git a/lackey/lk_main.c b/lackey/lk_main.c
index 540cd09..18e6b11 100644
--- a/lackey/lk_main.c
+++ b/lackey/lk_main.c
@@ -35,7 +35,7 @@
static ULong n_dlrr_calls = 0;
static ULong n_BBs = 0;
static ULong n_UInstrs = 0;
-static ULong n_machine_instrs = 0;
+static ULong n_guest_instrs = 0;
static ULong n_Jccs = 0;
static ULong n_Jccs_untaken = 0;
@@ -49,7 +49,7 @@
static void add_one_BB(void)
{
n_BBs++;
- n_machine_instrs++;
+ n_guest_instrs++;
}
static void add_one_UInstr(void)
@@ -57,9 +57,9 @@
n_UInstrs++;
}
-static void add_one_machine_instr(void)
+static void add_one_guest_instr(void)
{
- n_machine_instrs++;
+ n_guest_instrs++;
}
static void add_one_Jcc(void)
@@ -81,13 +81,6 @@
"Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.");
VG_(details_bug_reports_to) (VG_BUGS_TO);
VG_(details_avg_translation_sizeB) ( 175 );
-
- VG_(register_compact_helper)((Addr) & add_one_dlrr_call);
- VG_(register_compact_helper)((Addr) & add_one_BB);
- VG_(register_compact_helper)((Addr) & add_one_machine_instr);
- VG_(register_compact_helper)((Addr) & add_one_UInstr);
- VG_(register_compact_helper)((Addr) & add_one_Jcc);
- VG_(register_compact_helper)((Addr) & add_one_Jcc_untaken);
}
void TL_(post_clo_init)(void)
@@ -106,27 +99,27 @@
Jcc ...
JMP ... (will not be reached if Jcc succeeds)
- If we simplemindedly added calls to add_one_machine_instr() before INCEIPs
+ If we simplemindedly added calls to add_one_guest_instr() before INCEIPs
and unconditional JMPs, we'd sometimes miss the final call (when a
preceding conditional JMP succeeds), underestimating the machine instruction
count.
<code a>
- call add_one_machine_instr()
+ call add_one_guest_instr()
INCEIP ...
<code b>
Jcc ...
- call add_one_machine_instr()
+ call add_one_guest_instr()
JMP ...
Instead we add a call before each INCEIP, and also one at the start of the
block, but not one at the end, viz:
- call add_one_machine_instr()
+ call add_one_guest_instr()
<code a>
- call add_one_machine_instr()
+ call add_one_guest_instr()
INCEIP ...
<code b>
@@ -136,8 +129,69 @@
Which gives us the right answer. And just to avoid two C calls, we fold
the basic-block-beginning call in with add_one_BB(). Phew.
*/
-UCodeBlock* TL_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
+IRBB* TL_(instrument)(IRBB* bb_in, VexGuestLayout* layout, IRType hWordTy )
{
+ IRDirty* di;
+ Int i;
+
+ /* Set up BB */
+ IRBB* bb = emptyIRBB();
+ bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);
+ bb->next = dopyIRExpr(bb_in->next);
+ bb->jumpkind = bb_in->jumpkind;
+
+#if 0
+ /* We need to know the entry point for this bb to do this. In any
+ case it's pretty meaningless in the presence of bb chasing since
+ we may enter this function part way through an IRBB. */
+ /* Count call to dlrr(), if this BB is dlrr()'s entry point */
+ if (VG_(get_fnname_if_entry)(orig_addr, fnname, 100) &&
+ 0 == VG_(strcmp)(fnname, "_dl_runtime_resolve"))
+ {
+ addStmtToIRBB(
+ bb,
+ IRStmt_Dirty(
+ unsafeIRDirty_0_N( "add_one_dlrr_call", mkIRExprVec_0() )
+ ));
+ }
+#endif
+
+ /* Count this basic block */
+ di = unsafeIRDirty_0_N( 0, "add_one_BB", &add_one_BB, mkIRExprVec_0() );
+ addStmtToIRBB( bb, IRStmt_Dirty(di) );
+
+ for (i = 0; i < bb_in->stmts_used; i++) {
+ IRStmt* st = bb_in->stmts[i];
+ if (!st) continue;
+
+ switch (st->tag) {
+ case Ist_Exit:
+ /* Count Jcc */
+ addStmtToIRBB(
+ bb,
+ IRStmt_Dirty(
+ unsafeIRDirty_0_N( 0, "add_one_Jcc", &add_one_Jcc,
+ mkIRExprVec_0() )
+ ));
+ addStmtToIRBB( bb, dopyIRStmt(st) );
+ /* Count non-taken Jcc */
+ addStmtToIRBB(
+ bb,
+ IRStmt_Dirty(
+ unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", &add_one_Jcc_untaken,
+ mkIRExprVec_0() )
+ ));
+ break;
+
+ default:
+ addStmtToIRBB( bb, dopyIRStmt(st));
+ }
+ }
+
+ return bb;
+
+
+#if 0
UCodeBlock* cb;
Int i;
UInstr* u;
@@ -145,13 +199,6 @@
cb = VG_(setup_UCodeBlock)(cb_in);
- /* Count call to dlrr(), if this BB is dlrr()'s entry point */
- if (VG_(get_fnname_if_entry)(orig_addr, fnname, 100) &&
- 0 == VG_(strcmp)(fnname, "_dl_runtime_resolve"))
- {
- VG_(call_helper_0_0)(cb, (Addr) & add_one_dlrr_call);
- }
-
/* Count basic block */
VG_(call_helper_0_0)(cb, (Addr) & add_one_BB);
@@ -163,8 +210,8 @@
break;
case INCEIP:
- /* Count machine instr */
- VG_(call_helper_0_0)(cb, (Addr) & add_one_machine_instr);
+ /* Count x86 instr */
+ VG_(call_helper_0_0)(cb, (Addr) & add_one_x86_instr);
VG_(copy_UInstr)(cb, u);
break;
@@ -190,6 +237,7 @@
VG_(free_UCodeBlock)(cb_in);
return cb;
+#endif
}
void TL_(fini)(Int exitcode)
@@ -199,24 +247,25 @@
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Executed:");
- VG_(message)(Vg_UserMsg, " BBs: %llu", n_BBs);
- VG_(message)(Vg_UserMsg, " machine instrs: %llu", n_machine_instrs);
- VG_(message)(Vg_UserMsg, " UInstrs: %llu", n_UInstrs);
+ VG_(message)(Vg_UserMsg, " BBs: %u", n_BBs);
+ VG_(message)(Vg_UserMsg, " guest instrs: %u", n_guest_instrs);
+ VG_(message)(Vg_UserMsg, " UInstrs: %u", n_UInstrs);
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Jccs:");
- VG_(message)(Vg_UserMsg, " total: %llu", n_Jccs);
- VG_(message)(Vg_UserMsg, " %% taken: %llu%%",
- (n_Jccs - n_Jccs_untaken)*100 / n_Jccs);
+ VG_(message)(Vg_UserMsg, " total: %u", n_Jccs);
+ VG_(message)(Vg_UserMsg, " %% taken: %u%%",
+ (n_Jccs - n_Jccs_untaken)*100 /
+ (n_Jccs ? n_Jccs : 1));
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Ratios:");
- VG_(message)(Vg_UserMsg, " machine instrs : BB = %3llu : 10",
- 10 * n_machine_instrs / n_BBs);
- VG_(message)(Vg_UserMsg, " UInstrs : BB = %3llu : 10",
+ VG_(message)(Vg_UserMsg, " guest instrs : BB = %3u : 10",
+ 10 * n_guest_instrs / n_BBs);
+ VG_(message)(Vg_UserMsg, " UInstrs : BB = %3u : 10",
10 * n_UInstrs / n_BBs);
- VG_(message)(Vg_UserMsg, " UInstrs : machine_instr = %3llu : 10",
- 10 * n_UInstrs / n_machine_instrs);
+ VG_(message)(Vg_UserMsg, " UInstrs : x86_instr = %3u : 10",
+ 10 * n_UInstrs / n_guest_instrs);
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Exit code: %d", exitcode);
diff --git a/none/nl_main.c b/none/nl_main.c
index 1210ff4..9b6514e 100644
--- a/none/nl_main.c
+++ b/none/nl_main.c
@@ -46,9 +46,9 @@
{
}
-UCodeBlock* TL_(instrument)(UCodeBlock* cb, Addr a)
+IRBB* TL_(instrument)(IRBB* bb, VexGuestLayout* layout, IRType hWordTy)
{
- return cb;
+ return bb;
}
void TL_(fini)(Int exitcode)