blob: aec5e6d8c42bf51a9d0477506a1decb723c1d13e [file] [log] [blame]
sewardj21082ff2004-10-19 13:11:35 +00001
2/*--------------------------------------------------------------------*/
sewardj2c800602004-11-06 13:56:36 +00003/*--- Instrument IR to perform memory checking operations. ---*/
sewardj21082ff2004-10-19 13:11:35 +00004/*--- mc_translate.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
10
11 Copyright (C) 2000-2004 Julian Seward
12 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "mc_include.h"
33
sewardj1dbd19d2004-11-03 09:10:30 +000034
sewardj2985e1b2004-11-06 13:51:48 +000035/*------------------------------------------------------------*/
36/*--- Forward decls ---*/
37/*------------------------------------------------------------*/
sewardj1dbd19d2004-11-03 09:10:30 +000038
sewardj2985e1b2004-11-06 13:51:48 +000039struct _MCEnv;
40
41static IRType shadowType ( IRType ty );
42static IRExpr* expr2vbits ( struct _MCEnv* mce, IRExpr* e );
43
44
45/*------------------------------------------------------------*/
46/*--- Memcheck running state, and tmp management. ---*/
47/*------------------------------------------------------------*/
sewardj1dbd19d2004-11-03 09:10:30 +000048
49/* Carries around state during memcheck instrumentation. */
50typedef
sewardj2985e1b2004-11-06 13:51:48 +000051 struct _MCEnv {
sewardj1dbd19d2004-11-03 09:10:30 +000052 /* MODIFIED: the bb being constructed. IRStmts are added. */
53 IRBB* bb;
54
55 /* MODIFIED: a table [0 .. #temps_in_original_bb-1] which maps
56 original temps to their current their current shadow temp.
57 Initially all entries are IRTemp_INVALID. Entries are added
58 lazily since many original temps are not used due to
59 optimisation prior to instrumentation. Note that floating
60 point original tmps are shadowed by integer tmps of the same
61 size, and Bit-typed original tmps are shadowed by the type
sewardj2985e1b2004-11-06 13:51:48 +000062 Ity_I8. See comment below. */
sewardj1dbd19d2004-11-03 09:10:30 +000063 IRTemp* tmpMap;
64 Int n_originalTmps; /* for range checking */
65
sewardj2985e1b2004-11-06 13:51:48 +000066 /* READONLY: the guest layout. This indicates which parts of
67 the guest state should be regarded as 'always defined'. */
sewardj1dbd19d2004-11-03 09:10:30 +000068 VexGuestLayout* layout;
69 /* READONLY: the host word type. Needed for constructing
70 arguments of type 'HWord' to be passed to helper functions.
71 Ity_I32 or Ity_I64 only. */
72 IRType hWordTy;
73 }
74 MCEnv;
75
sewardj2985e1b2004-11-06 13:51:48 +000076/* SHADOW TMP MANAGEMENT. Shadow tmps are allocated lazily (on
77 demand), as they are encountered. This is for two reasons.
sewardj1dbd19d2004-11-03 09:10:30 +000078
sewardj2985e1b2004-11-06 13:51:48 +000079 (1) (less important reason): Many original tmps are unused due to
80 initial IR optimisation, and we do not want to spaces in tables
81 tracking them.
sewardj1dbd19d2004-11-03 09:10:30 +000082
sewardj2985e1b2004-11-06 13:51:48 +000083 Shadow IRTemps are therefore allocated on demand. mce.tmpMap is a
84 table indexed [0 .. n_types-1], which gives the current shadow for
85 each original tmp, or INVALID_IRTEMP if none is so far assigned.
86 It is necessary to support making multiple assignments to a shadow
87 -- specifically, after testing a shadow for definedness, it needs
88 to be made defined. But IR's SSA property disallows this.
sewardj1dbd19d2004-11-03 09:10:30 +000089
sewardj2985e1b2004-11-06 13:51:48 +000090 (2) (more important reason): Therefore, when a shadow needs to get
91 a new value, a new temporary is created, the value is assigned to
92 that, and the tmpMap is updated to reflect the new binding.
sewardj1dbd19d2004-11-03 09:10:30 +000093
sewardj2985e1b2004-11-06 13:51:48 +000094 A corollary is that if the tmpMap maps a given tmp to
95 INVALID_IRTEMP and we are hoping to read that shadow tmp, it means
96 there's a read-before-write error in the original tmps. The IR
97 sanity checker should catch all such anomalies, however.
sewardj1dbd19d2004-11-03 09:10:30 +000098*/
sewardj1dbd19d2004-11-03 09:10:30 +000099
100/* Find the tmp currently shadowing the given original tmp. If none
101 so far exists, allocate one. */
102static IRTemp findShadowTmp ( MCEnv* mce, IRTemp orig )
103{
104 sk_assert(orig < mce->n_originalTmps);
sewardj92d168d2004-11-15 14:22:12 +0000105 if (mce->tmpMap[orig] == IRTemp_INVALID) {
sewardj1dbd19d2004-11-03 09:10:30 +0000106 mce->tmpMap[orig]
107 = newIRTemp(mce->bb->tyenv,
108 shadowType(mce->bb->tyenv->types[orig]));
109 }
110 return mce->tmpMap[orig];
111}
112
113/* Allocate a new shadow for the given original tmp. This means any
114 previous shadow is abandoned. This is needed because it is
115 necessary to give a new value to a shadow once it has been tested
116 for undefinedness, but unfortunately IR's SSA property disallows
117 this. Instead we must abandon the old shadow, allocate a new one
118 and use that instead. */
119static void newShadowTmp ( MCEnv* mce, IRTemp orig )
120{
121 sk_assert(orig < mce->n_originalTmps);
122 mce->tmpMap[orig]
123 = newIRTemp(mce->bb->tyenv,
124 shadowType(mce->bb->tyenv->types[orig]));
125}
126
127
sewardj2985e1b2004-11-06 13:51:48 +0000128/*------------------------------------------------------------*/
129/*--- IRAtoms -- a subset of IRExprs ---*/
130/*------------------------------------------------------------*/
sewardj1dbd19d2004-11-03 09:10:30 +0000131
sewardj2985e1b2004-11-06 13:51:48 +0000132/* An atom is either an IRExpr_Const or an IRExpr_Tmp, as defined by
133 isAtom() in libvex_ir.h. Because this instrumenter expects flat
134 input, most of this code deals in atoms. Usefully, a value atom
135 always has a V-value which is also an atom: constants are shadowed
136 by constants, and temps are shadowed by the corresponding shadow
137 temporary. */
138
139typedef IRExpr IRAtom;
140
141/* (used for sanity checks only): is this an atom which looks
142 like it's from original code? */
143static Bool isOriginalAtom ( MCEnv* mce, IRAtom* a1 )
144{
145 if (a1->tag == Iex_Const)
146 return True;
147 if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp < mce->n_originalTmps)
148 return True;
149 return False;
150}
151
152/* (used for sanity checks only): is this an atom which looks
153 like it's from shadow code? */
154static Bool isShadowAtom ( MCEnv* mce, IRAtom* a1 )
155{
156 if (a1->tag == Iex_Const)
157 return True;
158 if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp >= mce->n_originalTmps)
159 return True;
160 return False;
161}
162
163/* (used for sanity checks only): check that both args are atoms and
164 are identically-kinded. */
165static Bool sameKindedAtoms ( IRAtom* a1, IRAtom* a2 )
166{
167 if (a1->tag == Iex_Tmp && a1->tag == Iex_Tmp)
168 return True;
169 if (a1->tag == Iex_Const && a1->tag == Iex_Const)
170 return True;
171 return False;
172}
173
174
175/*------------------------------------------------------------*/
176/*--- Type management ---*/
177/*------------------------------------------------------------*/
178
179/* Shadow state is always accessed using integer types. This returns
180 an integer type with the same size (as per sizeofIRType) as the
181 given type. The only valid shadow types are Bit, I8, I16, I32,
182 I64. */
183
184static IRType shadowType ( IRType ty )
185{
186 switch (ty) {
sewardjba999312004-11-15 15:21:17 +0000187 case Ity_I1:
sewardj2985e1b2004-11-06 13:51:48 +0000188 case Ity_I8:
189 case Ity_I16:
190 case Ity_I32:
191 case Ity_I64: return ty;
192 case Ity_F32: return Ity_I32;
193 case Ity_F64: return Ity_I64;
194 default: ppIRType(ty);
195 VG_(skin_panic)("memcheck:shadowType");
196 }
197}
198
199/* Produce a 'defined' value of the given shadow type. Should only be
200 supplied shadow types (Bit/I8/I16/I32/UI64). */
201static IRExpr* definedOfType ( IRType ty ) {
202 switch (ty) {
sewardjba999312004-11-15 15:21:17 +0000203 case Ity_I1: return IRExpr_Const(IRConst_U1(False));
sewardj2985e1b2004-11-06 13:51:48 +0000204 case Ity_I8: return IRExpr_Const(IRConst_U8(0));
205 case Ity_I16: return IRExpr_Const(IRConst_U16(0));
206 case Ity_I32: return IRExpr_Const(IRConst_U32(0));
207 case Ity_I64: return IRExpr_Const(IRConst_U64(0));
208 default: VG_(skin_panic)("memcheck:definedOfType");
209 }
210}
211
212
213/*------------------------------------------------------------*/
214/*--- Constructing IR fragments ---*/
215/*------------------------------------------------------------*/
216
217/* assign value to tmp */
sewardj1dbd19d2004-11-03 09:10:30 +0000218#define assign(_bb,_tmp,_expr) \
219 addStmtToIRBB((_bb), IRStmt_Tmp((_tmp),(_expr)))
sewardj2985e1b2004-11-06 13:51:48 +0000220
221/* add stmt to a bb */
sewardj1dbd19d2004-11-03 09:10:30 +0000222#define stmt(_bb,_stmt) \
223 addStmtToIRBB((_bb), (_stmt))
224
sewardj2985e1b2004-11-06 13:51:48 +0000225/* build various kinds of expressions */
sewardj1dbd19d2004-11-03 09:10:30 +0000226#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
227#define unop(_op, _arg) IRExpr_Unop((_op),(_arg))
sewardja6929da2004-11-03 15:22:25 +0000228#define mkU8(_n) IRExpr_Const(IRConst_U8(_n))
229#define mkU16(_n) IRExpr_Const(IRConst_U16(_n))
sewardj1dbd19d2004-11-03 09:10:30 +0000230#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
231#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
232#define mkexpr(_tmp) IRExpr_Tmp((_tmp))
233
sewardj2985e1b2004-11-06 13:51:48 +0000234/* bind the given expression to a new temporary, and return the
235 temporary. This effectively converts an arbitrary expression into
236 an atom. */
sewardj1dbd19d2004-11-03 09:10:30 +0000237static IRAtom* assignNew ( MCEnv* mce, IRType ty, IRExpr* e ) {
238 IRTemp t = newIRTemp(mce->bb->tyenv, ty);
239 assign(mce->bb, t, e);
240 return mkexpr(t);
241}
242
sewardj2985e1b2004-11-06 13:51:48 +0000243
244/*------------------------------------------------------------*/
245/*--- Constructing definedness primitive ops ---*/
246/*------------------------------------------------------------*/
247
248/* --------- Defined-if-either-defined --------- */
sewardj1dbd19d2004-11-03 09:10:30 +0000249
sewardj1ce85b52004-11-04 15:21:04 +0000250static IRAtom* mkDifD8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
251 sk_assert(isShadowAtom(mce,a1));
252 sk_assert(isShadowAtom(mce,a2));
253 return assignNew(mce, Ity_I8, binop(Iop_And8, a1, a2));
254}
sewardj2985e1b2004-11-06 13:51:48 +0000255
sewardj35b5db82004-11-04 20:27:37 +0000256static IRAtom* mkDifD16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
257 sk_assert(isShadowAtom(mce,a1));
258 sk_assert(isShadowAtom(mce,a2));
259 return assignNew(mce, Ity_I16, binop(Iop_And16, a1, a2));
260}
sewardj2985e1b2004-11-06 13:51:48 +0000261
sewardj1ce85b52004-11-04 15:21:04 +0000262static IRAtom* mkDifD32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
263 sk_assert(isShadowAtom(mce,a1));
264 sk_assert(isShadowAtom(mce,a2));
265 return assignNew(mce, Ity_I32, binop(Iop_And32, a1, a2));
266}
267
sewardj2985e1b2004-11-06 13:51:48 +0000268/* --------- Undefined-if-either-undefined --------- */
269
sewardj1ce85b52004-11-04 15:21:04 +0000270static IRAtom* mkUifU8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
271 sk_assert(isShadowAtom(mce,a1));
272 sk_assert(isShadowAtom(mce,a2));
273 return assignNew(mce, Ity_I8, binop(Iop_Or8, a1, a2));
274}
sewardj2985e1b2004-11-06 13:51:48 +0000275
sewardja6929da2004-11-03 15:22:25 +0000276static IRAtom* mkUifU16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
277 sk_assert(isShadowAtom(mce,a1));
278 sk_assert(isShadowAtom(mce,a2));
279 return assignNew(mce, Ity_I16, binop(Iop_Or16, a1, a2));
280}
sewardj2985e1b2004-11-06 13:51:48 +0000281
sewardj1dbd19d2004-11-03 09:10:30 +0000282static IRAtom* mkUifU32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
283 sk_assert(isShadowAtom(mce,a1));
284 sk_assert(isShadowAtom(mce,a2));
285 return assignNew(mce, Ity_I32, binop(Iop_Or32, a1, a2));
286}
sewardj2985e1b2004-11-06 13:51:48 +0000287
sewardj9da6e242004-11-05 01:56:14 +0000288static IRAtom* mkUifU64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
289 sk_assert(isShadowAtom(mce,a1));
290 sk_assert(isShadowAtom(mce,a2));
291 return assignNew(mce, Ity_I64, binop(Iop_Or64, a1, a2));
292}
293
sewardj1ce85b52004-11-04 15:21:04 +0000294static IRAtom* mkUifU ( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) {
295 switch (vty) {
sewardjedf9b5a2004-11-09 16:02:11 +0000296 case Ity_I16: return mkUifU16(mce, a1, a2);
sewardj1ce85b52004-11-04 15:21:04 +0000297 case Ity_I32: return mkUifU32(mce, a1, a2);
sewardj9da6e242004-11-05 01:56:14 +0000298 case Ity_I64: return mkUifU64(mce, a1, a2);
sewardj1ce85b52004-11-04 15:21:04 +0000299 default:
300 VG_(printf)("\n"); ppIRType(vty); VG_(printf)("\n");
301 VG_(skin_panic)("memcheck:mkUifU");
302 }
303}
sewardj1dbd19d2004-11-03 09:10:30 +0000304
sewardj2985e1b2004-11-06 13:51:48 +0000305/* --------- The Left-family of operations. --------- */
sewardj1ce85b52004-11-04 15:21:04 +0000306
307static IRAtom* mkLeft8 ( MCEnv* mce, IRAtom* a1 ) {
308 sk_assert(isShadowAtom(mce,a1));
309 /* It's safe to duplicate a1 since it's only an atom */
310 return assignNew(mce, Ity_I8,
311 binop(Iop_Or8, a1,
312 assignNew(mce, Ity_I8,
313 /* unop(Iop_Neg8, a1)))); */
314 binop(Iop_Sub8, mkU8(0), a1) )));
315}
sewardj2985e1b2004-11-06 13:51:48 +0000316
sewardj35b5db82004-11-04 20:27:37 +0000317static IRAtom* mkLeft16 ( MCEnv* mce, IRAtom* a1 ) {
318 sk_assert(isShadowAtom(mce,a1));
319 /* It's safe to duplicate a1 since it's only an atom */
320 return assignNew(mce, Ity_I16,
321 binop(Iop_Or16, a1,
322 assignNew(mce, Ity_I16,
323 /* unop(Iop_Neg16, a1)))); */
324 binop(Iop_Sub16, mkU16(0), a1) )));
325}
sewardj2985e1b2004-11-06 13:51:48 +0000326
sewardj1ce85b52004-11-04 15:21:04 +0000327static IRAtom* mkLeft32 ( MCEnv* mce, IRAtom* a1 ) {
sewardj1dbd19d2004-11-03 09:10:30 +0000328 sk_assert(isShadowAtom(mce,a1));
329 /* It's safe to duplicate a1 since it's only an atom */
330 return assignNew(mce, Ity_I32,
sewardj1ce85b52004-11-04 15:21:04 +0000331 binop(Iop_Or32, a1,
332 assignNew(mce, Ity_I32,
333 /* unop(Iop_Neg32, a1)))); */
334 binop(Iop_Sub32, mkU32(0), a1) )));
sewardj1dbd19d2004-11-03 09:10:30 +0000335}
336
sewardj2985e1b2004-11-06 13:51:48 +0000337/* --------- 'Improvement' functions for AND/OR. --------- */
338
sewardj1ce85b52004-11-04 15:21:04 +0000339/* ImproveAND(data, vbits) = data OR vbits. Defined (0) data 0s give
340 defined (0); all other -> undefined (1).
341*/
342static IRAtom* mkImproveAND8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
343{
344 sk_assert(isOriginalAtom(mce, data));
345 sk_assert(isShadowAtom(mce, vbits));
346 sk_assert(sameKindedAtoms(data, vbits));
347 return assignNew(mce, Ity_I8, binop(Iop_Or8, data, vbits));
348}
sewardj2985e1b2004-11-06 13:51:48 +0000349
sewardj4fec8f32004-11-08 13:08:53 +0000350static IRAtom* mkImproveAND16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
351{
352 sk_assert(isOriginalAtom(mce, data));
353 sk_assert(isShadowAtom(mce, vbits));
354 sk_assert(sameKindedAtoms(data, vbits));
355 return assignNew(mce, Ity_I16, binop(Iop_Or16, data, vbits));
356}
357
sewardj1ce85b52004-11-04 15:21:04 +0000358static IRAtom* mkImproveAND32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
359{
360 sk_assert(isOriginalAtom(mce, data));
361 sk_assert(isShadowAtom(mce, vbits));
362 sk_assert(sameKindedAtoms(data, vbits));
363 return assignNew(mce, Ity_I32, binop(Iop_Or32, data, vbits));
364}
365
366/* ImproveOR(data, vbits) = ~data OR vbits. Defined (0) data 1s give
367 defined (0); all other -> undefined (1).
368*/
369static IRAtom* mkImproveOR8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
370{
371 sk_assert(isOriginalAtom(mce, data));
372 sk_assert(isShadowAtom(mce, vbits));
373 sk_assert(sameKindedAtoms(data, vbits));
374 return assignNew(
375 mce, Ity_I8,
376 binop(Iop_Or8,
377 assignNew(mce, Ity_I8, unop(Iop_Not8, data)),
378 vbits) );
379}
sewardj2985e1b2004-11-06 13:51:48 +0000380
sewardj35b5db82004-11-04 20:27:37 +0000381static IRAtom* mkImproveOR16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
382{
383 sk_assert(isOriginalAtom(mce, data));
384 sk_assert(isShadowAtom(mce, vbits));
385 sk_assert(sameKindedAtoms(data, vbits));
386 return assignNew(
387 mce, Ity_I16,
388 binop(Iop_Or16,
389 assignNew(mce, Ity_I16, unop(Iop_Not16, data)),
390 vbits) );
391}
sewardj2985e1b2004-11-06 13:51:48 +0000392
sewardj1ce85b52004-11-04 15:21:04 +0000393static IRAtom* mkImproveOR32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
394{
395 sk_assert(isOriginalAtom(mce, data));
396 sk_assert(isShadowAtom(mce, vbits));
397 sk_assert(sameKindedAtoms(data, vbits));
398 return assignNew(
399 mce, Ity_I32,
400 binop(Iop_Or32,
401 assignNew(mce, Ity_I32, unop(Iop_Not32, data)),
402 vbits) );
403}
404
sewardj2985e1b2004-11-06 13:51:48 +0000405/* --------- Pessimising casts. --------- */
sewardj1ce85b52004-11-04 15:21:04 +0000406
sewardj2985e1b2004-11-06 13:51:48 +0000407static IRAtom* mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits )
408{
409 /* Note, dst_ty is a shadow type, not an original type. */
sewardj1dbd19d2004-11-03 09:10:30 +0000410 /* First of all, collapse vbits down to a single bit. */
411 sk_assert(isShadowAtom(mce,vbits));
412 IRType ty = typeOfIRExpr(mce->bb->tyenv, vbits);
413 IRAtom* tmp1 = NULL;
414 switch (ty) {
sewardjba999312004-11-15 15:21:17 +0000415 case Ity_I1:
sewardja6929da2004-11-03 15:22:25 +0000416 tmp1 = vbits;
417 break;
418 case Ity_I8:
sewardjba999312004-11-15 15:21:17 +0000419 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE8, vbits, mkU8(0)));
sewardja6929da2004-11-03 15:22:25 +0000420 break;
421 case Ity_I16:
sewardjba999312004-11-15 15:21:17 +0000422 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE16, vbits, mkU16(0)));
sewardja6929da2004-11-03 15:22:25 +0000423 break;
sewardj1dbd19d2004-11-03 09:10:30 +0000424 case Ity_I32:
sewardjba999312004-11-15 15:21:17 +0000425 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE32, vbits, mkU32(0)));
sewardj1dbd19d2004-11-03 09:10:30 +0000426 break;
sewardj80415ee2004-11-04 16:56:29 +0000427 case Ity_I64:
sewardjba999312004-11-15 15:21:17 +0000428 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE64, vbits, mkU64(0)));
sewardj80415ee2004-11-04 16:56:29 +0000429 break;
sewardj1dbd19d2004-11-03 09:10:30 +0000430 default:
sewardja6929da2004-11-03 15:22:25 +0000431 VG_(skin_panic)("mkPCastTo(1)");
sewardj1dbd19d2004-11-03 09:10:30 +0000432 }
433 sk_assert(tmp1);
434 /* Now widen up to the dst type. */
435 switch (dst_ty) {
sewardjba999312004-11-15 15:21:17 +0000436 case Ity_I1:
sewardj1dbd19d2004-11-03 09:10:30 +0000437 return tmp1;
sewardjedf9b5a2004-11-09 16:02:11 +0000438 case Ity_I8:
439 return assignNew(mce, Ity_I8, unop(Iop_1Sto8, tmp1));
sewardj9da6e242004-11-05 01:56:14 +0000440 case Ity_I16:
441 return assignNew(mce, Ity_I16, unop(Iop_1Sto16, tmp1));
sewardj1dbd19d2004-11-03 09:10:30 +0000442 case Ity_I32:
443 return assignNew(mce, Ity_I32, unop(Iop_1Sto32, tmp1));
sewardj80415ee2004-11-04 16:56:29 +0000444 case Ity_I64:
445 return assignNew(mce, Ity_I64, unop(Iop_1Sto64, tmp1));
sewardj1dbd19d2004-11-03 09:10:30 +0000446 default:
447 ppIRType(dst_ty);
sewardja6929da2004-11-03 15:22:25 +0000448 VG_(skin_panic)("mkPCastTo(2)");
sewardj1dbd19d2004-11-03 09:10:30 +0000449 }
450}
451
sewardj1dbd19d2004-11-03 09:10:30 +0000452
sewardj2985e1b2004-11-06 13:51:48 +0000453/*------------------------------------------------------------*/
454/*--- Emit a test and complaint if something is undefined. ---*/
455/*------------------------------------------------------------*/
sewardj1dbd19d2004-11-03 09:10:30 +0000456
sewardj2985e1b2004-11-06 13:51:48 +0000457/* Set the annotations on a dirty helper to indicate that the stack
458 pointer and instruction pointers might be read. This is the
459 behaviour of all 'emit-a-complaint' style functions we might
460 call. */
sewardj1dbd19d2004-11-03 09:10:30 +0000461
sewardj2985e1b2004-11-06 13:51:48 +0000462static void setHelperAnns ( MCEnv* mce, IRDirty* di ) {
463 di->nFxState = 2;
464 di->fxState[0].fx = Ifx_Read;
465 di->fxState[0].offset = mce->layout->offset_SP;
466 di->fxState[0].size = mce->layout->sizeof_SP;
467 di->fxState[1].fx = Ifx_Read;
468 di->fxState[1].offset = mce->layout->offset_IP;
469 di->fxState[1].size = mce->layout->sizeof_IP;
sewardj1dbd19d2004-11-03 09:10:30 +0000470}
471
472
sewardj2985e1b2004-11-06 13:51:48 +0000473/* Check the supplied **original** atom for undefinedness, and emit a
474 complaint if so. Once that happens, mark it as defined. This is
475 possible because the atom is either a tmp or literal. If it's a
476 tmp, it will be shadowed by a tmp, and so we can set the shadow to
477 be defined. In fact as mentioned above, we will have to allocate a
478 new tmp to carry the new 'defined' shadow value, and update the
479 original->tmp mapping accordingly; we cannot simply assign a new
480 value to an existing shadow tmp as this breaks SSAness -- resulting
481 in the post-instrumentation sanity checker spluttering in disapproval.
482*/
483static void complainIfUndefined ( MCEnv* mce, IRAtom* atom )
sewardj1dbd19d2004-11-03 09:10:30 +0000484{
sewardja6929da2004-11-03 15:22:25 +0000485 /* Since the original expression is atomic, there's no duplicated
486 work generated by making multiple V-expressions for it. So we
487 don't really care about the possibility that someone else may
488 also create a V-interpretion for it. */
sewardj1dbd19d2004-11-03 09:10:30 +0000489 sk_assert(isOriginalAtom(mce, atom));
sewardja6929da2004-11-03 15:22:25 +0000490 IRAtom* vatom = expr2vbits( mce, atom );
sewardj1dbd19d2004-11-03 09:10:30 +0000491 sk_assert(isShadowAtom(mce, vatom));
492 sk_assert(sameKindedAtoms(atom, vatom));
493
494 IRType ty = typeOfIRExpr(mce->bb->tyenv, vatom);
sewardj1dbd19d2004-11-03 09:10:30 +0000495
sewardja6929da2004-11-03 15:22:25 +0000496 /* sz is only used for constructing the error message */
sewardjba999312004-11-15 15:21:17 +0000497 Int sz = ty==Ity_I1 ? 0 : sizeofIRType(ty);
sewardja6929da2004-11-03 15:22:25 +0000498
sewardjba999312004-11-15 15:21:17 +0000499 IRAtom* cond = mkPCastTo( mce, Ity_I1, vatom );
sewardj1dbd19d2004-11-03 09:10:30 +0000500 /* cond will be 0 if all defined, and 1 if any not defined. */
501
sewardjacbad752004-11-08 18:55:47 +0000502 IRDirty* di;
sewardj9f8abf82004-11-10 02:39:49 +0000503 switch (sz) {
504 case 0:
505 di = unsafeIRDirty_0_N( 0/*regparms*/,
506 "MC_(helperc_value_check0_fail)",
507 &MC_(helperc_value_check0_fail),
508 mkIRExprVec_0()
509 );
510 break;
511 case 1:
512 di = unsafeIRDirty_0_N( 0/*regparms*/,
513 "MC_(helperc_value_check1_fail)",
514 &MC_(helperc_value_check1_fail),
515 mkIRExprVec_0()
516 );
517 break;
518 case 4:
519 di = unsafeIRDirty_0_N( 0/*regparms*/,
520 "MC_(helperc_value_check4_fail)",
521 &MC_(helperc_value_check4_fail),
522 mkIRExprVec_0()
523 );
524 break;
525 default:
526 di = unsafeIRDirty_0_N( 1/*regparms*/,
527 "MC_(helperc_complain_undef)",
528 &MC_(helperc_complain_undef),
529 mkIRExprVec_1( mkIRExpr_HWord( sz ))
530 );
531 break;
sewardjacbad752004-11-08 18:55:47 +0000532 }
sewardj1dbd19d2004-11-03 09:10:30 +0000533 di->guard = cond;
534 setHelperAnns( mce, di );
535 stmt( mce->bb, IRStmt_Dirty(di));
536
537 /* Set the shadow tmp to be defined. First, update the
538 orig->shadow tmp mapping to reflect the fact that this shadow is
539 getting a new value. */
540 sk_assert(isAtom(vatom));
541 /* sameKindedAtoms ... */
542 if (vatom->tag == Iex_Tmp) {
543 sk_assert(atom->tag == Iex_Tmp);
544 newShadowTmp(mce, atom->Iex.Tmp.tmp);
545 assign(mce->bb, findShadowTmp(mce, atom->Iex.Tmp.tmp),
546 definedOfType(ty));
547 }
548}
549
550
sewardj2985e1b2004-11-06 13:51:48 +0000551/*------------------------------------------------------------*/
552/*--- Shadowing PUTs/GETs, and indexed variants thereof ---*/
553/*------------------------------------------------------------*/
554
555/* Examine the always-defined sections declared in layout to see if
556 the (offset,size) section is within one. Note, is is an error to
557 partially fall into such a region: (offset,size) should either be
558 completely in such a region or completely not-in such a region.
559*/
560static Bool isAlwaysDefd ( MCEnv* mce, Int offset, Int size )
sewardj1ce85b52004-11-04 15:21:04 +0000561{
sewardj2985e1b2004-11-06 13:51:48 +0000562 Int minoffD, maxoffD, i;
563 Int minoff = offset;
564 Int maxoff = minoff + size - 1;
565 sk_assert((minoff & ~0xFFFF) == 0);
566 sk_assert((maxoff & ~0xFFFF) == 0);
567
568 for (i = 0; i < mce->layout->n_alwaysDefd; i++) {
569 minoffD = mce->layout->alwaysDefd[i].offset;
570 maxoffD = minoffD + mce->layout->alwaysDefd[i].size - 1;
571 sk_assert((minoffD & ~0xFFFF) == 0);
572 sk_assert((maxoffD & ~0xFFFF) == 0);
573
574 if (maxoff < minoffD || maxoffD < minoff)
575 continue; /* no overlap */
576 if (minoff >= minoffD && maxoff <= maxoffD)
577 return True; /* completely contained in an always-defd section */
578
579 VG_(skin_panic)("memcheck:isAlwaysDefd:partial overlap");
sewardj1ce85b52004-11-04 15:21:04 +0000580 }
sewardj2985e1b2004-11-06 13:51:48 +0000581 return False; /* could not find any containing section */
sewardj1ce85b52004-11-04 15:21:04 +0000582}
583
sewardj2985e1b2004-11-06 13:51:48 +0000584
sewardj1dbd19d2004-11-03 09:10:30 +0000585/* Generate into bb suitable actions to shadow this Put. If the state
sewardj2985e1b2004-11-06 13:51:48 +0000586 slice is marked 'always defined', do nothing. Otherwise, write the
sewardj9f8abf82004-11-10 02:39:49 +0000587 supplied V bits to the shadow state. We can pass in either an
588 original atom or a V-atom, but not both. In the former case the
589 relevant V-bits are then generated from the original.
sewardj2985e1b2004-11-06 13:51:48 +0000590*/
sewardj1dbd19d2004-11-03 09:10:30 +0000591static
sewardj9f8abf82004-11-10 02:39:49 +0000592void do_shadow_PUT ( MCEnv* mce, Int offset,
593 IRAtom* atom, IRAtom* vatom )
sewardj1dbd19d2004-11-03 09:10:30 +0000594{
sewardj9f8abf82004-11-10 02:39:49 +0000595 if (atom) {
596 sk_assert(!vatom);
597 sk_assert(isOriginalAtom(mce, atom));
598 vatom = expr2vbits( mce, atom );
599 } else {
600 sk_assert(vatom);
601 sk_assert(isShadowAtom(mce, vatom));
602 }
603
sewardj1dbd19d2004-11-03 09:10:30 +0000604 IRType ty = typeOfIRExpr(mce->bb->tyenv, vatom);
sewardjba999312004-11-15 15:21:17 +0000605 sk_assert(ty != Ity_I1);
sewardj1dbd19d2004-11-03 09:10:30 +0000606 if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
sewardj2985e1b2004-11-06 13:51:48 +0000607 /* later: no ... */
sewardj1dbd19d2004-11-03 09:10:30 +0000608 /* emit code to emit a complaint if any of the vbits are 1. */
sewardj2985e1b2004-11-06 13:51:48 +0000609 /* complainIfUndefined(mce, atom); */
sewardj1dbd19d2004-11-03 09:10:30 +0000610 } else {
611 /* Do a plain shadow Put. */
612 stmt( mce->bb, IRStmt_Put( offset + mce->layout->total_sizeB, vatom ) );
613 }
614}
615
616
617/* Return an expression which contains the V bits corresponding to the
sewardj9da6e242004-11-05 01:56:14 +0000618 given GETI (passed in in pieces).
619*/
620static
sewardj2985e1b2004-11-06 13:51:48 +0000621void do_shadow_PUTI ( MCEnv* mce,
622 IRArray* descr, IRAtom* ix, Int bias, IRAtom* atom )
sewardj9da6e242004-11-05 01:56:14 +0000623{
624 sk_assert(isOriginalAtom(mce,atom));
625 IRAtom* vatom = expr2vbits( mce, atom );
626 sk_assert(sameKindedAtoms(atom, vatom));
627 IRType ty = descr->elemTy;
628 IRType tyS = shadowType(ty);
629 Int arrSize = descr->nElems * sizeofIRType(ty);
sewardjba999312004-11-15 15:21:17 +0000630 sk_assert(ty != Ity_I1);
sewardj9da6e242004-11-05 01:56:14 +0000631 sk_assert(isOriginalAtom(mce,ix));
632 complainIfUndefined(mce,ix);
633 if (isAlwaysDefd(mce, descr->base, arrSize)) {
sewardj2985e1b2004-11-06 13:51:48 +0000634 /* later: no ... */
sewardj9da6e242004-11-05 01:56:14 +0000635 /* emit code to emit a complaint if any of the vbits are 1. */
sewardj2985e1b2004-11-06 13:51:48 +0000636 /* complainIfUndefined(mce, atom); */
sewardj9da6e242004-11-05 01:56:14 +0000637 } else {
638 /* Do a cloned version of the Put that refers to the shadow
639 area. */
640 IRArray* new_descr
641 = mkIRArray( descr->base + mce->layout->total_sizeB,
642 tyS, descr->nElems);
643 stmt( mce->bb, IRStmt_PutI( new_descr, ix, bias, vatom ));
644 }
645}
646
sewardj2985e1b2004-11-06 13:51:48 +0000647
sewardj9da6e242004-11-05 01:56:14 +0000648/* Return an expression which contains the V bits corresponding to the
sewardj1dbd19d2004-11-03 09:10:30 +0000649 given GET (passed in in pieces).
650*/
651static
652IRExpr* shadow_GET ( MCEnv* mce, Int offset, IRType ty )
653{
654 IRType tyS = shadowType(ty);
sewardjba999312004-11-15 15:21:17 +0000655 sk_assert(ty != Ity_I1);
sewardj1dbd19d2004-11-03 09:10:30 +0000656 if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
657 /* Always defined, return all zeroes of the relevant type */
658 return definedOfType(tyS);
659 } else {
660 /* return a cloned version of the Get that refers to the shadow
661 area. */
662 return IRExpr_Get( offset + mce->layout->total_sizeB, tyS );
663 }
664}
665
666
667/* Return an expression which contains the V bits corresponding to the
668 given GETI (passed in in pieces).
669*/
670static
671IRExpr* shadow_GETI ( MCEnv* mce, IRArray* descr, IRAtom* ix, Int bias )
672{
673 IRType ty = descr->elemTy;
674 IRType tyS = shadowType(ty);
675 Int arrSize = descr->nElems * sizeofIRType(ty);
sewardjba999312004-11-15 15:21:17 +0000676 sk_assert(ty != Ity_I1);
sewardj1dbd19d2004-11-03 09:10:30 +0000677 sk_assert(isOriginalAtom(mce,ix));
sewardj9da6e242004-11-05 01:56:14 +0000678 complainIfUndefined(mce,ix);
sewardj1dbd19d2004-11-03 09:10:30 +0000679 if (isAlwaysDefd(mce, descr->base, arrSize)) {
680 /* Always defined, return all zeroes of the relevant type */
681 return definedOfType(tyS);
682 } else {
683 /* return a cloned version of the Get that refers to the shadow
684 area. */
685 IRArray* new_descr
686 = mkIRArray( descr->base + mce->layout->total_sizeB,
687 tyS, descr->nElems);
688 return IRExpr_GetI( new_descr, ix, bias );
689 }
690}
691
692
sewardj2985e1b2004-11-06 13:51:48 +0000693/*------------------------------------------------------------*/
694/*--- Generating approximations for unknown operations, ---*/
695/*--- using lazy-propagate semantics ---*/
696/*------------------------------------------------------------*/
697
698/* Lazy propagation of undefinedness from two values, resulting in the
699 specified shadow type.
700*/
sewardj80415ee2004-11-04 16:56:29 +0000701static
sewardj2985e1b2004-11-06 13:51:48 +0000702IRAtom* mkLazy2 ( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 )
sewardj80415ee2004-11-04 16:56:29 +0000703{
704 /* force everything via 32-bit intermediaries. */
705 IRAtom* at;
sewardj9da6e242004-11-05 01:56:14 +0000706 sk_assert(isShadowAtom(mce,va1));
707 sk_assert(isShadowAtom(mce,va2));
708 at = mkPCastTo(mce, Ity_I32, va1);
709 at = mkUifU(mce, Ity_I32, at, mkPCastTo(mce, Ity_I32, va2));
710 at = mkPCastTo(mce, finalVty, at);
sewardj80415ee2004-11-04 16:56:29 +0000711 return at;
712}
713
sewardja1cb1b52004-11-06 12:27:15 +0000714
sewardj2985e1b2004-11-06 13:51:48 +0000715/* Do the lazy propagation game from a null-terminated vector of
716 atoms. This is presumably the arguments to a helper call, so the
717 IRCallee info is also supplied in order that we can know which
718 arguments should be ignored (via the .mcx_mask field).
719*/
720static
721IRAtom* mkLazyN ( MCEnv* mce,
722 IRAtom** exprvec, IRType finalVtype, IRCallee* cee )
723{
724 Int i;
725 IRAtom* here;
726 IRAtom* curr = definedOfType(Ity_I32);
727 for (i = 0; exprvec[i]; i++) {
728 sk_assert(i < 32);
729 sk_assert(isOriginalAtom(mce, exprvec[i]));
730 /* Only take notice of this arg if the callee's mc-exclusion
731 mask does not say it is to be excluded. */
732 if (cee->mcx_mask & (1<<i)) {
733 /* the arg is to be excluded from definedness checking. Do
734 nothing. */
735 if (0) VG_(printf)("excluding %s(%d)\n", cee->name, i);
736 } else {
737 /* calculate the arg's definedness, and pessimistically merge
738 it in. */
739 here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, exprvec[i]) );
740 curr = mkUifU32(mce, here, curr);
741 }
742 }
743 return mkPCastTo(mce, finalVtype, curr );
744}
745
746
747/*------------------------------------------------------------*/
748/*--- Generating expensive sequences for exact carry-chain ---*/
749/*--- propagation in add/sub and related operations. ---*/
750/*------------------------------------------------------------*/
751
sewardja1cb1b52004-11-06 12:27:15 +0000752static
753IRAtom* expensiveAdd32 ( MCEnv* mce, IRAtom* qaa, IRAtom* qbb,
754 IRAtom* aa, IRAtom* bb )
755{
756 sk_assert(isShadowAtom(mce,qaa));
757 sk_assert(isShadowAtom(mce,qbb));
758 sk_assert(isOriginalAtom(mce,aa));
759 sk_assert(isOriginalAtom(mce,bb));
760 sk_assert(sameKindedAtoms(qaa,aa));
761 sk_assert(sameKindedAtoms(qbb,bb));
762
763 IRType ty = Ity_I32;
764 IROp opAND = Iop_And32;
765 IROp opOR = Iop_Or32;
766 IROp opXOR = Iop_Xor32;
767 IROp opNOT = Iop_Not32;
768 IROp opADD = Iop_Add32;
769
770 IRAtom *a_min, *b_min, *a_max, *b_max;
771
772 // a_min = aa & ~qaa
773 a_min = assignNew(mce,ty,
774 binop(opAND, aa,
775 assignNew(mce,ty, unop(opNOT, qaa))));
776
777 // b_min = bb & ~qbb
778 b_min = assignNew(mce,ty,
779 binop(opAND, bb,
780 assignNew(mce,ty, unop(opNOT, qbb))));
781
782 // a_max = aa | qaa
783 a_max = assignNew(mce,ty, binop(opOR, aa, qaa));
784
785 // b_max = bb | qbb
786 b_max = assignNew(mce,ty, binop(opOR, bb, qbb));
787
788 // result = (qaa | qbb) | ((a_min + b_min) ^ (a_max + b_max))
789 return
790 assignNew(mce,ty,
791 binop( opOR,
792 assignNew(mce,ty, binop(opOR, qaa, qbb)),
793 assignNew(mce,ty,
794 binop(opXOR, assignNew(mce,ty, binop(opADD, a_min, b_min)),
795 assignNew(mce,ty, binop(opADD, a_max, b_max))
796 )
797 )
798 )
799 );
800}
801
802
sewardj2985e1b2004-11-06 13:51:48 +0000803/*------------------------------------------------------------*/
804/*--- Generate shadow values from all kinds of IRExprs. ---*/
805/*------------------------------------------------------------*/
806
sewardj1dbd19d2004-11-03 09:10:30 +0000807static
sewardj80415ee2004-11-04 16:56:29 +0000808IRAtom* expr2vbits_Binop ( MCEnv* mce,
sewardj1dbd19d2004-11-03 09:10:30 +0000809 IROp op,
sewardj2985e1b2004-11-06 13:51:48 +0000810 IRAtom* atom1, IRAtom* atom2 )
sewardj1dbd19d2004-11-03 09:10:30 +0000811{
sewardj1ce85b52004-11-04 15:21:04 +0000812 IRType and_or_ty;
813 IRAtom* (*uifu) (MCEnv*, IRAtom*, IRAtom*);
814 IRAtom* (*difd) (MCEnv*, IRAtom*, IRAtom*);
815 IRAtom* (*improve) (MCEnv*, IRAtom*, IRAtom*);
816
sewardj2985e1b2004-11-06 13:51:48 +0000817 IRAtom* vatom1 = expr2vbits( mce, atom1 );
818 IRAtom* vatom2 = expr2vbits( mce, atom2 );
819
sewardj1dbd19d2004-11-03 09:10:30 +0000820 sk_assert(isOriginalAtom(mce,atom1));
821 sk_assert(isOriginalAtom(mce,atom2));
822 sk_assert(isShadowAtom(mce,vatom1));
823 sk_assert(isShadowAtom(mce,vatom2));
824 sk_assert(sameKindedAtoms(atom1,vatom1));
825 sk_assert(sameKindedAtoms(atom2,vatom2));
826 switch (op) {
sewardj80415ee2004-11-04 16:56:29 +0000827
sewardjedf9b5a2004-11-09 16:02:11 +0000828 case Iop_RoundF64:
sewardj4fec8f32004-11-08 13:08:53 +0000829 case Iop_F64toI64:
830 /* First arg is I32 (rounding mode), second is F64 (data). */
831 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
832
sewardjedf9b5a2004-11-09 16:02:11 +0000833 case Iop_PRemC3210F64: case Iop_PRem1C3210F64:
834 /* Takes two F64 args. */
sewardj9da6e242004-11-05 01:56:14 +0000835 case Iop_F64toI32:
836 /* First arg is I32 (rounding mode), second is F64 (data). */
sewardj2985e1b2004-11-06 13:51:48 +0000837 return mkLazy2(mce, Ity_I32, vatom1, vatom2);
sewardj9da6e242004-11-05 01:56:14 +0000838
839 case Iop_F64toI16:
840 /* First arg is I32 (rounding mode), second is F64 (data). */
sewardj2985e1b2004-11-06 13:51:48 +0000841 return mkLazy2(mce, Ity_I16, vatom1, vatom2);
sewardj9da6e242004-11-05 01:56:14 +0000842
sewardjedf9b5a2004-11-09 16:02:11 +0000843 case Iop_ScaleF64:
844 case Iop_Yl2xF64:
sewardj9f8abf82004-11-10 02:39:49 +0000845 case Iop_Yl2xp1F64:
sewardjedf9b5a2004-11-09 16:02:11 +0000846 case Iop_PRemF64:
sewardj4fec8f32004-11-08 13:08:53 +0000847 case Iop_AtanF64:
sewardjf1067112004-11-07 18:46:22 +0000848 case Iop_AddF64:
849 case Iop_DivF64:
850 case Iop_SubF64:
sewardj9da6e242004-11-05 01:56:14 +0000851 case Iop_MulF64:
sewardj2985e1b2004-11-06 13:51:48 +0000852 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
sewardj9da6e242004-11-05 01:56:14 +0000853
sewardjf1067112004-11-07 18:46:22 +0000854 case Iop_CmpF64:
855 return mkLazy2(mce, Ity_I32, vatom1, vatom2);
856
sewardj9da6e242004-11-05 01:56:14 +0000857 /* non-FP after here */
858
sewardj80415ee2004-11-04 16:56:29 +0000859 case Iop_DivModU64to32:
860 case Iop_DivModS64to32:
sewardj2985e1b2004-11-06 13:51:48 +0000861 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
sewardj80415ee2004-11-04 16:56:29 +0000862
sewardjedf9b5a2004-11-09 16:02:11 +0000863 case Iop_16HLto32:
864 return assignNew(mce, Ity_I32,
865 binop(Iop_16HLto32, vatom1, vatom2));
sewardj80415ee2004-11-04 16:56:29 +0000866 case Iop_32HLto64:
867 return assignNew(mce, Ity_I64,
sewardj23be73b2004-11-06 16:17:21 +0000868 binop(Iop_32HLto64, vatom1, vatom2));
sewardj80415ee2004-11-04 16:56:29 +0000869
sewardj4b39f282004-11-04 19:41:09 +0000870 case Iop_MullS32:
sewardj80415ee2004-11-04 16:56:29 +0000871 case Iop_MullU32: {
872 IRAtom* vLo32 = mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
sewardj9da6e242004-11-05 01:56:14 +0000873 IRAtom* vHi32 = mkPCastTo(mce, Ity_I32, vLo32);
sewardj80415ee2004-11-04 16:56:29 +0000874 return assignNew(mce, Ity_I64, binop(Iop_32HLto64, vHi32, vLo32));
875 }
876
sewardjedf9b5a2004-11-09 16:02:11 +0000877 case Iop_MullS16:
878 case Iop_MullU16: {
879 IRAtom* vLo16 = mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
880 IRAtom* vHi16 = mkPCastTo(mce, Ity_I16, vLo16);
881 return assignNew(mce, Ity_I32, binop(Iop_16HLto32, vHi16, vLo16));
882 }
883
884 case Iop_MullS8:
885 case Iop_MullU8: {
886 IRAtom* vLo8 = mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
887 IRAtom* vHi8 = mkPCastTo(mce, Ity_I8, vLo8);
888 return assignNew(mce, Ity_I16, binop(Iop_8HLto16, vHi8, vLo8));
889 }
890
sewardj1dbd19d2004-11-03 09:10:30 +0000891 case Iop_Add32:
sewardj2985e1b2004-11-06 13:51:48 +0000892# if 0
sewardja1cb1b52004-11-06 12:27:15 +0000893 return expensiveAdd32(mce, vatom1,vatom2, atom1,atom2);
sewardj2985e1b2004-11-06 13:51:48 +0000894# endif
sewardja1cb1b52004-11-06 12:27:15 +0000895 case Iop_Sub32:
sewardj1ce85b52004-11-04 15:21:04 +0000896 case Iop_Mul32:
sewardja6929da2004-11-03 15:22:25 +0000897 return mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
898
sewardjedf9b5a2004-11-09 16:02:11 +0000899 case Iop_Mul16:
sewardja1cb1b52004-11-06 12:27:15 +0000900 case Iop_Add16:
sewardj35b5db82004-11-04 20:27:37 +0000901 case Iop_Sub16:
902 return mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
903
sewardj1ce85b52004-11-04 15:21:04 +0000904 case Iop_Sub8:
905 case Iop_Add8:
906 return mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
907
sewardj80415ee2004-11-04 16:56:29 +0000908 case Iop_CmpLE32S: case Iop_CmpLE32U:
909 case Iop_CmpLT32U: case Iop_CmpLT32S:
sewardj35b5db82004-11-04 20:27:37 +0000910 case Iop_CmpEQ32: case Iop_CmpNE32:
sewardjba999312004-11-15 15:21:17 +0000911 return mkPCastTo(mce, Ity_I1, mkUifU32(mce, vatom1,vatom2));
sewardja6929da2004-11-03 15:22:25 +0000912
sewardjedf9b5a2004-11-09 16:02:11 +0000913 case Iop_CmpEQ16: case Iop_CmpNE16:
sewardjba999312004-11-15 15:21:17 +0000914 return mkPCastTo(mce, Ity_I1, mkUifU16(mce, vatom1,vatom2));
sewardja6929da2004-11-03 15:22:25 +0000915
sewardj1ce85b52004-11-04 15:21:04 +0000916 case Iop_CmpEQ8: case Iop_CmpNE8:
sewardjba999312004-11-15 15:21:17 +0000917 return mkPCastTo(mce, Ity_I1, mkUifU8(mce, vatom1,vatom2));
sewardj1ce85b52004-11-04 15:21:04 +0000918
sewardj80415ee2004-11-04 16:56:29 +0000919 case Iop_Shl32: case Iop_Shr32: case Iop_Sar32:
sewardja6929da2004-11-03 15:22:25 +0000920 /* Complain if the shift amount is undefined. Then simply
921 shift the first arg's V bits by the real shift amount. */
922 complainIfUndefined(mce, atom2);
923 return assignNew(mce, Ity_I32, binop(op, vatom1, atom2));
924
sewardj4b39f282004-11-04 19:41:09 +0000925 case Iop_Shl16: case Iop_Shr16:
926 /* Same scheme as with 32-bit shifts. */
927 complainIfUndefined(mce, atom2);
928 return assignNew(mce, Ity_I16, binop(op, vatom1, atom2));
929
sewardjedf9b5a2004-11-09 16:02:11 +0000930 case Iop_Shl8: case Iop_Shr8:
sewardj35b5db82004-11-04 20:27:37 +0000931 /* Same scheme as with 32-bit shifts. */
932 complainIfUndefined(mce, atom2);
933 return assignNew(mce, Ity_I8, binop(op, vatom1, atom2));
934
sewardj80415ee2004-11-04 16:56:29 +0000935 case Iop_Shl64: case Iop_Shr64:
936 /* Same scheme as with 32-bit shifts. */
937 complainIfUndefined(mce, atom2);
938 return assignNew(mce, Ity_I64, binop(op, vatom1, atom2));
939
sewardj1ce85b52004-11-04 15:21:04 +0000940 case Iop_And32:
941 uifu = mkUifU32; difd = mkDifD32;
942 and_or_ty = Ity_I32; improve = mkImproveAND32; goto do_And_Or;
sewardj4fec8f32004-11-08 13:08:53 +0000943 case Iop_And16:
944 uifu = mkUifU16; difd = mkDifD16;
945 and_or_ty = Ity_I16; improve = mkImproveAND16; goto do_And_Or;
sewardj1ce85b52004-11-04 15:21:04 +0000946 case Iop_And8:
947 uifu = mkUifU8; difd = mkDifD8;
948 and_or_ty = Ity_I8; improve = mkImproveAND8; goto do_And_Or;
949
950 case Iop_Or32:
951 uifu = mkUifU32; difd = mkDifD32;
952 and_or_ty = Ity_I32; improve = mkImproveOR32; goto do_And_Or;
sewardj35b5db82004-11-04 20:27:37 +0000953 case Iop_Or16:
954 uifu = mkUifU16; difd = mkDifD16;
955 and_or_ty = Ity_I16; improve = mkImproveOR16; goto do_And_Or;
sewardj1ce85b52004-11-04 15:21:04 +0000956 case Iop_Or8:
957 uifu = mkUifU8; difd = mkDifD8;
958 and_or_ty = Ity_I8; improve = mkImproveOR8; goto do_And_Or;
959
960 do_And_Or:
961 return
962 assignNew(
963 mce,
964 and_or_ty,
965 difd(mce, uifu(mce, vatom1, vatom2),
966 difd(mce, improve(mce, atom1, vatom1),
967 improve(mce, atom2, vatom2) ) ) );
sewardj4b39f282004-11-04 19:41:09 +0000968
969 case Iop_Xor8:
970 return mkUifU8(mce, vatom1, vatom2);
sewardj4fec8f32004-11-08 13:08:53 +0000971 case Iop_Xor16:
972 return mkUifU16(mce, vatom1, vatom2);
sewardj1ce85b52004-11-04 15:21:04 +0000973 case Iop_Xor32:
974 return mkUifU32(mce, vatom1, vatom2);
975
sewardj1dbd19d2004-11-03 09:10:30 +0000976 default:
977 ppIROp(op);
978 VG_(skin_panic)("memcheck:expr2vbits_Binop");
979 }
980}
981
982
sewardja6929da2004-11-03 15:22:25 +0000983static
sewardj2985e1b2004-11-06 13:51:48 +0000984IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
sewardja6929da2004-11-03 15:22:25 +0000985{
sewardj2985e1b2004-11-06 13:51:48 +0000986 IRAtom* vatom = expr2vbits( mce, atom );
sewardja6929da2004-11-03 15:22:25 +0000987 sk_assert(isOriginalAtom(mce,atom));
sewardja6929da2004-11-03 15:22:25 +0000988 switch (op) {
sewardj9da6e242004-11-05 01:56:14 +0000989
990 case Iop_F32toF64:
sewardjf1067112004-11-07 18:46:22 +0000991 case Iop_I32toF64:
992 case Iop_I64toF64:
993 case Iop_NegF64:
sewardj4fec8f32004-11-08 13:08:53 +0000994 case Iop_SinF64:
995 case Iop_CosF64:
sewardjedf9b5a2004-11-09 16:02:11 +0000996 case Iop_TanF64:
sewardj4fec8f32004-11-08 13:08:53 +0000997 case Iop_SqrtF64:
sewardjedf9b5a2004-11-09 16:02:11 +0000998 case Iop_AbsF64:
999 case Iop_2xm1F64:
sewardj9da6e242004-11-05 01:56:14 +00001000 return mkPCastTo(mce, Ity_I64, vatom);
1001
sewardjf1067112004-11-07 18:46:22 +00001002 case Iop_F64toF32:
sewardj4fec8f32004-11-08 13:08:53 +00001003 case Iop_Clz32:
sewardjedf9b5a2004-11-09 16:02:11 +00001004 case Iop_Ctz32:
sewardjf1067112004-11-07 18:46:22 +00001005 return mkPCastTo(mce, Ity_I32, vatom);
1006
sewardjedf9b5a2004-11-09 16:02:11 +00001007 case Iop_32Sto64:
1008 case Iop_32Uto64:
1009 return assignNew(mce, Ity_I64, unop(op, vatom));
1010
sewardj80415ee2004-11-04 16:56:29 +00001011 case Iop_64to32:
1012 case Iop_64HIto32:
sewardja6929da2004-11-03 15:22:25 +00001013 case Iop_1Uto32:
1014 case Iop_8Uto32:
1015 case Iop_16Uto32:
sewardj35b5db82004-11-04 20:27:37 +00001016 case Iop_16Sto32:
sewardj1ce85b52004-11-04 15:21:04 +00001017 case Iop_8Sto32:
sewardja6929da2004-11-03 15:22:25 +00001018 return assignNew(mce, Ity_I32, unop(op, vatom));
sewardj1ce85b52004-11-04 15:21:04 +00001019
sewardj35b5db82004-11-04 20:27:37 +00001020 case Iop_8Sto16:
sewardjedf9b5a2004-11-09 16:02:11 +00001021 case Iop_8Uto16:
sewardja6929da2004-11-03 15:22:25 +00001022 case Iop_32to16:
sewardjedf9b5a2004-11-09 16:02:11 +00001023 case Iop_32HIto16:
sewardj35b5db82004-11-04 20:27:37 +00001024 return assignNew(mce, Ity_I16, unop(op, vatom));
sewardj1ce85b52004-11-04 15:21:04 +00001025
1026 case Iop_1Uto8:
sewardjedf9b5a2004-11-09 16:02:11 +00001027 case Iop_16to8:
sewardj1ce85b52004-11-04 15:21:04 +00001028 case Iop_32to8:
1029 return assignNew(mce, Ity_I8, unop(op, vatom));
1030
1031 case Iop_32to1:
sewardjba999312004-11-15 15:21:17 +00001032 return assignNew(mce, Ity_I1, unop(Iop_32to1, vatom));
sewardj1ce85b52004-11-04 15:21:04 +00001033
sewardj4fec8f32004-11-08 13:08:53 +00001034 case Iop_ReinterpF64asI64:
sewardjb139e822004-11-10 12:07:51 +00001035 case Iop_ReinterpI64asF64:
sewardj1ce85b52004-11-04 15:21:04 +00001036 case Iop_Not32:
sewardjedf9b5a2004-11-09 16:02:11 +00001037 case Iop_Not16:
sewardj3861f462004-11-06 14:22:03 +00001038 case Iop_Not8:
sewardja6929da2004-11-03 15:22:25 +00001039 case Iop_Not1:
1040 return vatom;
1041 default:
1042 ppIROp(op);
1043 VG_(skin_panic)("memcheck:expr2vbits_Unop");
1044 }
1045}
1046
1047
sewardj1dbd19d2004-11-03 09:10:30 +00001048static
sewardjb139e822004-11-10 12:07:51 +00001049IRAtom* expr2vbits_LDle ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias )
sewardj1dbd19d2004-11-03 09:10:30 +00001050{
1051 void* helper;
1052 Char* hname;
1053 IRDirty* di;
1054 IRTemp datavbits;
sewardjb139e822004-11-10 12:07:51 +00001055 IRAtom* addrAct;
sewardj1dbd19d2004-11-03 09:10:30 +00001056
1057 sk_assert(isOriginalAtom(mce,addr));
1058
1059 /* First, emit a definedness test for the address. This also sets
1060 the address (shadow) to 'defined' following the test. */
sewardja6929da2004-11-03 15:22:25 +00001061 complainIfUndefined( mce, addr );
sewardj1dbd19d2004-11-03 09:10:30 +00001062
1063 /* Now cook up a call to the relevant helper function, to read the
1064 data V bits from shadow memory. */
sewardj9da6e242004-11-05 01:56:14 +00001065 ty = shadowType(ty);
sewardj1dbd19d2004-11-03 09:10:30 +00001066 switch (ty) {
sewardjf1067112004-11-07 18:46:22 +00001067 case Ity_I64: helper = &MC_(helperc_LOADV8);
1068 hname = "MC_(helperc_LOADV8)";
1069 break;
sewardj1dbd19d2004-11-03 09:10:30 +00001070 case Ity_I32: helper = &MC_(helperc_LOADV4);
1071 hname = "MC_(helperc_LOADV4)";
1072 break;
1073 case Ity_I16: helper = &MC_(helperc_LOADV2);
1074 hname = "MC_(helperc_LOADV2)";
1075 break;
1076 case Ity_I8: helper = &MC_(helperc_LOADV1);
1077 hname = "MC_(helperc_LOADV1)";
1078 break;
sewardj9da6e242004-11-05 01:56:14 +00001079 default: ppIRType(ty);
1080 VG_(skin_panic)("memcheck:do_shadow_LDle");
sewardj1dbd19d2004-11-03 09:10:30 +00001081 }
1082
sewardjb139e822004-11-10 12:07:51 +00001083 /* Generate the actual address into addrAct. */
1084 if (bias == 0) {
1085 addrAct = addr;
1086 } else {
1087 IRType tyAddr = mce->hWordTy;
1088 sk_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
1089 IROp mkAdd = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
1090 IRAtom* eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
1091 addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
1092 }
1093
sewardj1dbd19d2004-11-03 09:10:30 +00001094 /* We need to have a place to park the V bits we're just about to
1095 read. */
1096 datavbits = newIRTemp(mce->bb->tyenv, ty);
1097 di = unsafeIRDirty_1_N( datavbits,
sewardjb139e822004-11-10 12:07:51 +00001098 1/*regparms*/, hname, helper,
1099 mkIRExprVec_1( addrAct ));
sewardj1dbd19d2004-11-03 09:10:30 +00001100 setHelperAnns( mce, di );
1101 stmt( mce->bb, IRStmt_Dirty(di) );
1102
1103 return mkexpr(datavbits);
1104}
1105
1106
1107static
sewardj1ce85b52004-11-04 15:21:04 +00001108IRAtom* expr2vbits_Mux0X ( MCEnv* mce,
sewardj9f8abf82004-11-10 02:39:49 +00001109 IRAtom* cond, IRAtom* expr0, IRAtom* exprX )
sewardj1ce85b52004-11-04 15:21:04 +00001110{
1111 IRAtom *vbitsC, *vbits0, *vbitsX;
1112 IRType ty;
1113 /* Given Mux0X(cond,expr0,exprX), generate
1114 Mux0X(cond,expr0#,exprX#) `UifU` PCast(cond#)
1115 That is, steer the V bits like the originals, but trash the
1116 result if the steering value is undefined. This gives
1117 lazy propagation. */
1118 sk_assert(isOriginalAtom(mce, cond));
1119 sk_assert(isOriginalAtom(mce, expr0));
1120 sk_assert(isOriginalAtom(mce, exprX));
1121
1122 vbitsC = expr2vbits(mce, cond);
1123 vbits0 = expr2vbits(mce, expr0);
1124 vbitsX = expr2vbits(mce, exprX);
1125 ty = typeOfIRExpr(mce->bb->tyenv, vbits0);
1126
1127 return
1128 mkUifU(mce, ty, assignNew(mce, ty, IRExpr_Mux0X(cond, vbits0, vbitsX)),
sewardj9da6e242004-11-05 01:56:14 +00001129 mkPCastTo(mce, ty, vbitsC) );
sewardj1ce85b52004-11-04 15:21:04 +00001130}
1131
sewardj2985e1b2004-11-06 13:51:48 +00001132/* --------- This is the main expression-handling function. --------- */
sewardj1ce85b52004-11-04 15:21:04 +00001133
1134static
sewardj1dbd19d2004-11-03 09:10:30 +00001135IRExpr* expr2vbits ( MCEnv* mce, IRExpr* e )
1136{
sewardj1dbd19d2004-11-03 09:10:30 +00001137 switch (e->tag) {
1138
1139 case Iex_Get:
1140 return shadow_GET( mce, e->Iex.Get.offset, e->Iex.Get.ty );
1141
sewardj9da6e242004-11-05 01:56:14 +00001142 case Iex_GetI:
1143 return shadow_GETI( mce, e->Iex.GetI.descr,
1144 e->Iex.GetI.ix, e->Iex.GetI.bias );
1145
sewardj1dbd19d2004-11-03 09:10:30 +00001146 case Iex_Tmp:
1147 return IRExpr_Tmp( findShadowTmp(mce, e->Iex.Tmp.tmp) );
1148
1149 case Iex_Const:
1150 return definedOfType(shadowType(typeOfIRExpr(mce->bb->tyenv, e)));
1151
1152 case Iex_Binop:
sewardj1dbd19d2004-11-03 09:10:30 +00001153 return expr2vbits_Binop(
1154 mce,
1155 e->Iex.Binop.op,
sewardj2985e1b2004-11-06 13:51:48 +00001156 e->Iex.Binop.arg1, e->Iex.Binop.arg2
sewardj1dbd19d2004-11-03 09:10:30 +00001157 );
1158
sewardja6929da2004-11-03 15:22:25 +00001159 case Iex_Unop:
sewardj2985e1b2004-11-06 13:51:48 +00001160 return expr2vbits_Unop( mce, e->Iex.Unop.op, e->Iex.Unop.arg );
sewardja6929da2004-11-03 15:22:25 +00001161
sewardj1dbd19d2004-11-03 09:10:30 +00001162 case Iex_LDle:
sewardjb139e822004-11-10 12:07:51 +00001163 return expr2vbits_LDle( mce, e->Iex.LDle.ty,
1164 e->Iex.LDle.addr, 0/*addr bias*/ );
sewardj1dbd19d2004-11-03 09:10:30 +00001165
sewardj1ce85b52004-11-04 15:21:04 +00001166 case Iex_CCall:
sewardj2985e1b2004-11-06 13:51:48 +00001167 return mkLazyN( mce, e->Iex.CCall.args,
1168 e->Iex.CCall.retty,
1169 e->Iex.CCall.cee );
sewardj1ce85b52004-11-04 15:21:04 +00001170
1171 case Iex_Mux0X:
1172 return expr2vbits_Mux0X( mce, e->Iex.Mux0X.cond, e->Iex.Mux0X.expr0,
1173 e->Iex.Mux0X.exprX);
1174
sewardj1dbd19d2004-11-03 09:10:30 +00001175 default:
1176 VG_(printf)("\n");
1177 ppIRExpr(e);
1178 VG_(printf)("\n");
1179 VG_(skin_panic)("memcheck: expr2vbits");
1180 }
1181}
1182
sewardj2985e1b2004-11-06 13:51:48 +00001183/*------------------------------------------------------------*/
1184/*--- Generate shadow stmts from all kinds of IRStmts. ---*/
1185/*------------------------------------------------------------*/
1186
sewardj1ce85b52004-11-04 15:21:04 +00001187/* Widen a value to the host word size. */
sewardj2985e1b2004-11-06 13:51:48 +00001188
sewardj1dbd19d2004-11-03 09:10:30 +00001189static
1190IRExpr* zwidenToHostWord ( MCEnv* mce, IRAtom* vatom )
1191{
sewardj2985e1b2004-11-06 13:51:48 +00001192 /* vatom is vbits-value and as such can only have a shadow type. */
sewardj1dbd19d2004-11-03 09:10:30 +00001193 sk_assert(isShadowAtom(mce,vatom));
sewardj2985e1b2004-11-06 13:51:48 +00001194
sewardj1dbd19d2004-11-03 09:10:30 +00001195 IRType ty = typeOfIRExpr(mce->bb->tyenv, vatom);
sewardj2985e1b2004-11-06 13:51:48 +00001196 IRType tyH = mce->hWordTy;
1197
sewardj1ce85b52004-11-04 15:21:04 +00001198 if (tyH == Ity_I32) {
1199 switch (ty) {
1200 case Ity_I32: return vatom;
1201 case Ity_I16: return assignNew(mce, tyH, unop(Iop_16Uto32, vatom));
1202 case Ity_I8: return assignNew(mce, tyH, unop(Iop_8Uto32, vatom));
1203 default: goto unhandled;
1204 }
1205 } else {
1206 goto unhandled;
sewardj1dbd19d2004-11-03 09:10:30 +00001207 }
sewardj1ce85b52004-11-04 15:21:04 +00001208 unhandled:
1209 VG_(printf)("\nty = "); ppIRType(ty); VG_(printf)("\n");
1210 VG_(skin_panic)("zwidenToHostWord");
sewardj1dbd19d2004-11-03 09:10:30 +00001211}
1212
sewardj2985e1b2004-11-06 13:51:48 +00001213
sewardjb139e822004-11-10 12:07:51 +00001214/* Generate a shadow store. addr is always the original address atom.
1215 You can pass in either originals or V-bits for the data atom, but
1216 obviously not both. */
1217
sewardj1dbd19d2004-11-03 09:10:30 +00001218static
sewardjb139e822004-11-10 12:07:51 +00001219void do_shadow_STle ( MCEnv* mce,
1220 IRAtom* addr, UInt bias,
1221 IRAtom* data, IRAtom* vdata )
sewardj1dbd19d2004-11-03 09:10:30 +00001222{
1223 IRType ty;
1224 IRDirty* di;
sewardj1dbd19d2004-11-03 09:10:30 +00001225 void* helper = NULL;
1226 Char* hname = NULL;
sewardjb139e822004-11-10 12:07:51 +00001227 IRAtom* addrAct;
sewardj1dbd19d2004-11-03 09:10:30 +00001228
sewardjb139e822004-11-10 12:07:51 +00001229 if (data) {
1230 sk_assert(!vdata);
1231 sk_assert(isOriginalAtom(mce, data));
1232 sk_assert(bias == 0);
1233 vdata = expr2vbits( mce, data );
1234 } else {
1235 sk_assert(vdata);
1236 }
sewardj1dbd19d2004-11-03 09:10:30 +00001237
1238 sk_assert(isOriginalAtom(mce,addr));
sewardjb139e822004-11-10 12:07:51 +00001239 sk_assert(isShadowAtom(mce,vdata));
1240
1241 ty = typeOfIRExpr(mce->bb->tyenv, vdata);
sewardj1dbd19d2004-11-03 09:10:30 +00001242
1243 /* First, emit a definedness test for the address. This also sets
1244 the address (shadow) to 'defined' following the test. */
sewardjb139e822004-11-10 12:07:51 +00001245 complainIfUndefined( mce, addr );
sewardj1dbd19d2004-11-03 09:10:30 +00001246
1247 /* Now cook up a call to the relevant helper function, to write the
1248 data V bits into shadow memory. */
sewardj1dbd19d2004-11-03 09:10:30 +00001249 switch (ty) {
sewardjf1067112004-11-07 18:46:22 +00001250 case Ity_I64: helper = &MC_(helperc_STOREV8);
1251 hname = "MC_(helperc_STOREV8)";
1252 break;
sewardj1dbd19d2004-11-03 09:10:30 +00001253 case Ity_I32: helper = &MC_(helperc_STOREV4);
1254 hname = "MC_(helperc_STOREV4)";
1255 break;
1256 case Ity_I16: helper = &MC_(helperc_STOREV2);
1257 hname = "MC_(helperc_STOREV2)";
1258 break;
1259 case Ity_I8: helper = &MC_(helperc_STOREV1);
1260 hname = "MC_(helperc_STOREV1)";
1261 break;
1262 default: VG_(skin_panic)("memcheck:do_shadow_STle");
1263 }
1264
sewardjb139e822004-11-10 12:07:51 +00001265 /* Generate the actual address into addrAct. */
1266 if (bias == 0) {
1267 addrAct = addr;
1268 } else {
1269 IRType tyAddr = mce->hWordTy;
1270 sk_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
1271 IROp mkAdd = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
1272 IRAtom* eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
1273 addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
1274 }
1275
sewardjf1067112004-11-07 18:46:22 +00001276 if (ty == Ity_I64) {
sewardj9f8abf82004-11-10 02:39:49 +00001277 /* We can't do this with regparm 2 on 32-bit platforms, since
1278 the back ends aren't clever enough to handle 64-bit regparm
1279 args. Therefore be different. */
sewardjf1067112004-11-07 18:46:22 +00001280 di = unsafeIRDirty_0_N(
1281 1/*regparms*/, hname, helper,
sewardjb139e822004-11-10 12:07:51 +00001282 mkIRExprVec_2( addrAct, vdata ));
sewardjf1067112004-11-07 18:46:22 +00001283 } else {
1284 di = unsafeIRDirty_0_N(
1285 2/*regparms*/, hname, helper,
sewardjb139e822004-11-10 12:07:51 +00001286 mkIRExprVec_2( addrAct,
1287 zwidenToHostWord( mce, vdata )));
sewardjf1067112004-11-07 18:46:22 +00001288 }
sewardj1dbd19d2004-11-03 09:10:30 +00001289 setHelperAnns( mce, di );
1290 stmt( mce->bb, IRStmt_Dirty(di) );
1291}
1292
sewardj2985e1b2004-11-06 13:51:48 +00001293
sewardj9f8abf82004-11-10 02:39:49 +00001294/* Do lazy pessimistic propagation through a dirty helper call, by
1295 looking at the annotations on it. This is the most complex part of
1296 Memcheck. */
1297
1298static IRType szToITy ( Int n )
1299{
1300 switch (n) {
1301 case 1: return Ity_I8;
1302 case 2: return Ity_I16;
1303 case 4: return Ity_I32;
1304 case 8: return Ity_I64;
1305 default: VG_(skin_panic)("szToITy(memcheck)");
1306 }
1307}
1308
1309static
1310void do_shadow_Dirty ( MCEnv* mce, IRDirty* d )
1311{
sewardjb139e822004-11-10 12:07:51 +00001312 Int i, offset, toDo;
1313 IRAtom* src;
1314 IRType tyAddr, tySrc, tyDst;
1315 IRTemp dst;
sewardj9f8abf82004-11-10 02:39:49 +00001316
1317 /* First check the guard. */
1318 complainIfUndefined(mce, d->guard);
1319
1320 /* Now round up all inputs and PCast over them. */
1321 IRAtom* here;
1322 IRAtom* curr = definedOfType(Ity_I32);
1323
1324 /* Inputs: unmasked args */
1325 for (i = 0; d->args[i]; i++) {
1326 if (d->cee->mcx_mask & (1<<i)) {
1327 /* ignore this arg */
1328 } else {
1329 here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, d->args[i]) );
1330 curr = mkUifU32(mce, here, curr);
1331 }
1332 }
1333
1334 /* Inputs: guest state that we read. */
1335 for (i = 0; i < d->nFxState; i++) {
1336 sk_assert(d->fxState[i].fx != Ifx_None);
1337 if (d->fxState[i].fx == Ifx_Write)
1338 continue;
1339 /* This state element is read or modified. So we need to
1340 consider it. */
1341 tySrc = szToITy( d->fxState[i].size );
1342 src = assignNew( mce, tySrc,
1343 shadow_GET(mce, d->fxState[i].offset, tySrc ) );
1344 here = mkPCastTo( mce, Ity_I32, src );
1345 curr = mkUifU32(mce, here, curr);
1346 }
1347
1348 /* Inputs: memory. First set up some info needed regardless of
1349 whether we're doing reads or writes. */
sewardj9f8abf82004-11-10 02:39:49 +00001350 tyAddr = Ity_INVALID;
1351
1352 if (d->mFx != Ifx_None) {
1353 /* Because we may do multiple shadow loads/stores from the same
1354 base address, it's best to do a single test of its
1355 definedness right now. Post-instrumentation optimisation
1356 should remove all but this test. */
1357 sk_assert(d->mAddr);
1358 complainIfUndefined(mce, d->mAddr);
1359
1360 tyAddr = typeOfIRExpr(mce->bb->tyenv, d->mAddr);
1361 sk_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
sewardjb139e822004-11-10 12:07:51 +00001362 sk_assert(tyAddr == mce->hWordTy); /* not really right */
sewardj9f8abf82004-11-10 02:39:49 +00001363 }
1364
1365 /* Deal with memory inputs (reads or modifies) */
1366 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
1367 offset = 0;
1368 toDo = d->mSize;
sewardj9f8abf82004-11-10 02:39:49 +00001369 /* chew off 32-bit chunks */
1370 while (toDo >= 4) {
1371 here = mkPCastTo(
1372 mce, Ity_I32,
sewardjb139e822004-11-10 12:07:51 +00001373 expr2vbits_LDle ( mce, Ity_I32,
1374 d->mAddr, d->mSize - toDo )
sewardj9f8abf82004-11-10 02:39:49 +00001375 );
1376 curr = mkUifU32(mce, here, curr);
sewardj9f8abf82004-11-10 02:39:49 +00001377 toDo -= 4;
1378 }
1379 /* chew off 16-bit chunks */
1380 while (toDo >= 2) {
1381 here = mkPCastTo(
1382 mce, Ity_I32,
sewardjb139e822004-11-10 12:07:51 +00001383 expr2vbits_LDle ( mce, Ity_I16,
1384 d->mAddr, d->mSize - toDo )
sewardj9f8abf82004-11-10 02:39:49 +00001385 );
1386 curr = mkUifU32(mce, here, curr);
sewardj9f8abf82004-11-10 02:39:49 +00001387 toDo -= 2;
1388 }
1389 sk_assert(toDo == 0); /* also need to handle 1-byte excess */
1390 }
1391
1392 /* Whew! So curr is a 32-bit V-value summarising pessimistically
1393 all the inputs to the helper. Now we need to re-distribute the
1394 results to all destinations. */
1395
1396 /* Outputs: the destination temporary, if there is one. */
sewardj92d168d2004-11-15 14:22:12 +00001397 if (d->tmp != IRTemp_INVALID) {
sewardj9f8abf82004-11-10 02:39:49 +00001398 dst = findShadowTmp(mce, d->tmp);
1399 tyDst = typeOfIRTemp(mce->bb->tyenv, d->tmp);
1400 assign( mce->bb, dst, mkPCastTo( mce, tyDst, curr) );
1401 }
1402
1403 /* Outputs: guest state that we write or modify. */
1404 for (i = 0; i < d->nFxState; i++) {
1405 sk_assert(d->fxState[i].fx != Ifx_None);
1406 if (d->fxState[i].fx == Ifx_Read)
1407 continue;
1408 /* this state element is written or modified. So we need to
1409 consider it. */
1410 tyDst = szToITy( d->fxState[i].size );
1411 do_shadow_PUT( mce, d->fxState[i].offset,
1412 NULL, /* original atom */
1413 mkPCastTo( mce, tyDst, curr ) );
1414 }
1415
1416 /* Outputs: memory that we write or modify. */
1417 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
1418 offset = 0;
1419 toDo = d->mSize;
sewardj9f8abf82004-11-10 02:39:49 +00001420 /* chew off 32-bit chunks */
1421 while (toDo >= 4) {
sewardjb139e822004-11-10 12:07:51 +00001422 do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
1423 NULL, /* original data */
sewardj9f8abf82004-11-10 02:39:49 +00001424 mkPCastTo( mce, Ity_I32, curr ) );
sewardj9f8abf82004-11-10 02:39:49 +00001425 toDo -= 4;
1426 }
1427 /* chew off 16-bit chunks */
1428 while (toDo >= 2) {
sewardjb139e822004-11-10 12:07:51 +00001429 do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
1430 NULL, /* original data */
1431 mkPCastTo( mce, Ity_I16, curr ) );
sewardj9f8abf82004-11-10 02:39:49 +00001432 toDo -= 2;
1433 }
1434 sk_assert(toDo == 0); /* also need to handle 1-byte excess */
1435 }
1436
1437}
1438
1439
sewardj2985e1b2004-11-06 13:51:48 +00001440/*------------------------------------------------------------*/
1441/*--- Memcheck main ---*/
1442/*------------------------------------------------------------*/
sewardj1dbd19d2004-11-03 09:10:30 +00001443
sewardj67cc0db2004-11-07 00:57:46 +00001444#if 0 /* UNUSED */
1445static Bool isBogusAtom ( IRAtom* at )
1446{
1447 ULong n = 0;
1448 IRConst* con;
1449 sk_assert(isAtom(at));
1450 if (at->tag == Iex_Tmp)
1451 return False;
1452 sk_assert(at->tag == Iex_Const);
1453 con = at->Iex.Const.con;
1454 switch (con->tag) {
1455 case Ico_U8: n = (ULong)con->Ico.U8; break;
1456 case Ico_U16: n = (ULong)con->Ico.U16; break;
1457 case Ico_U32: n = (ULong)con->Ico.U32; break;
1458 case Ico_U64: n = (ULong)con->Ico.U64; break;
1459 default: ppIRExpr(at); sk_assert(0);
1460 }
1461 /* VG_(printf)("%llx\n", n); */
1462 return (n == 0xFEFEFEFF
1463 || n == 0x80808080
1464 || n == 0x1010101
1465 || n == 1010100);
1466}
1467
1468static Bool checkForBogusLiterals ( /*FLAT*/ IRStmt* st )
1469{
1470 Int i;
1471 IRExpr* e;
1472 switch (st->tag) {
1473 case Ist_Tmp:
1474 e = st->Ist.Tmp.data;
1475 switch (e->tag) {
1476 case Iex_Get:
1477 case Iex_Tmp:
1478 return False;
1479 case Iex_Unop:
1480 return isBogusAtom(e->Iex.Unop.arg);
1481 case Iex_Binop:
1482 return isBogusAtom(e->Iex.Binop.arg1)
1483 || isBogusAtom(e->Iex.Binop.arg2);
1484 case Iex_Mux0X:
1485 return isBogusAtom(e->Iex.Mux0X.cond)
1486 || isBogusAtom(e->Iex.Mux0X.expr0)
1487 || isBogusAtom(e->Iex.Mux0X.exprX);
1488 case Iex_LDle:
1489 return isBogusAtom(e->Iex.LDle.addr);
1490 case Iex_CCall:
1491 for (i = 0; e->Iex.CCall.args[i]; i++)
1492 if (isBogusAtom(e->Iex.CCall.args[i]))
1493 return True;
1494 return False;
1495 default:
1496 goto unhandled;
1497 }
1498 case Ist_Put:
1499 return isBogusAtom(st->Ist.Put.data);
1500 case Ist_STle:
1501 return isBogusAtom(st->Ist.STle.addr)
1502 || isBogusAtom(st->Ist.STle.data);
1503 case Ist_Exit:
1504 return isBogusAtom(st->Ist.Exit.cond);
1505 default:
1506 unhandled:
1507 ppIRStmt(st);
1508 VG_(skin_panic)("hasBogusLiterals");
1509 }
1510}
1511#endif /* UNUSED */
1512
1513
sewardj1dbd19d2004-11-03 09:10:30 +00001514IRBB* SK_(instrument) ( IRBB* bb_in, VexGuestLayout* layout, IRType hWordTy )
1515{
sewardj9f8abf82004-11-10 02:39:49 +00001516 Bool verboze = False; //True;
sewardj67cc0db2004-11-07 00:57:46 +00001517
1518 /* Bool hasBogusLiterals = False; */
sewardj1dbd19d2004-11-03 09:10:30 +00001519
sewardj2985e1b2004-11-06 13:51:48 +00001520 Int i, j, first_stmt;
sewardj1dbd19d2004-11-03 09:10:30 +00001521 IRStmt* st;
1522 MCEnv mce;
1523
1524 /* Set up BB */
1525 IRBB* bb = emptyIRBB();
1526 bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);
1527 bb->next = dopyIRExpr(bb_in->next);
1528 bb->jumpkind = bb_in->jumpkind;
1529
sewardj1dbd19d2004-11-03 09:10:30 +00001530 /* Set up the running environment. Only .bb is modified as we go
1531 along. */
1532 mce.bb = bb;
1533 mce.layout = layout;
sewardj2985e1b2004-11-06 13:51:48 +00001534 mce.n_originalTmps = bb->tyenv->types_used;
sewardj1dbd19d2004-11-03 09:10:30 +00001535 mce.hWordTy = hWordTy;
sewardj2985e1b2004-11-06 13:51:48 +00001536 mce.tmpMap = LibVEX_Alloc(mce.n_originalTmps * sizeof(IRTemp));
1537 for (i = 0; i < mce.n_originalTmps; i++)
sewardj92d168d2004-11-15 14:22:12 +00001538 mce.tmpMap[i] = IRTemp_INVALID;
sewardj1dbd19d2004-11-03 09:10:30 +00001539
sewardj2985e1b2004-11-06 13:51:48 +00001540 /* Iterate over the stmts. */
1541
sewardj1dbd19d2004-11-03 09:10:30 +00001542 for (i = 0; i < bb_in->stmts_used; i++) {
1543 st = bb_in->stmts[i];
1544 if (!st) continue;
1545
1546 sk_assert(isFlatIRStmt(st));
sewardj67cc0db2004-11-07 00:57:46 +00001547
1548 /*
1549 if (!hasBogusLiterals) {
1550 hasBogusLiterals = checkForBogusLiterals(st);
1551 if (hasBogusLiterals) {
1552 VG_(printf)("bogus: ");
1553 ppIRStmt(st);
1554 VG_(printf)("\n");
1555 }
1556 }
1557 */
sewardj1dbd19d2004-11-03 09:10:30 +00001558 first_stmt = bb->stmts_used;
1559
sewardj3861f462004-11-06 14:22:03 +00001560 if (verboze) {
sewardj1dbd19d2004-11-03 09:10:30 +00001561 ppIRStmt(st);
1562 VG_(printf)("\n\n");
1563 }
1564
1565 switch (st->tag) {
1566
1567 case Ist_Tmp:
1568 assign( bb, findShadowTmp(&mce, st->Ist.Tmp.tmp),
1569 expr2vbits( &mce, st->Ist.Tmp.data) );
1570 break;
1571
1572 case Ist_Put:
1573 do_shadow_PUT( &mce,
1574 st->Ist.Put.offset,
sewardj9f8abf82004-11-10 02:39:49 +00001575 st->Ist.Put.data,
1576 NULL /* shadow atom */ );
sewardj1dbd19d2004-11-03 09:10:30 +00001577 break;
1578
sewardj9da6e242004-11-05 01:56:14 +00001579 case Ist_PutI:
1580 do_shadow_PUTI( &mce,
1581 st->Ist.PutI.descr,
1582 st->Ist.PutI.ix,
1583 st->Ist.PutI.bias,
1584 st->Ist.PutI.data );
1585 break;
1586
sewardj1dbd19d2004-11-03 09:10:30 +00001587 case Ist_STle:
sewardjb139e822004-11-10 12:07:51 +00001588 do_shadow_STle( &mce, st->Ist.STle.addr, 0/* addr bias */,
1589 st->Ist.STle.data,
1590 NULL /* shadow data */ );
sewardj1dbd19d2004-11-03 09:10:30 +00001591 break;
1592
sewardja6929da2004-11-03 15:22:25 +00001593 case Ist_Exit:
sewardj67cc0db2004-11-07 00:57:46 +00001594 /* if (!hasBogusLiterals) */
1595 complainIfUndefined( &mce, st->Ist.Exit.cond );
sewardja6929da2004-11-03 15:22:25 +00001596 break;
1597
sewardj9f8abf82004-11-10 02:39:49 +00001598 case Ist_Dirty:
1599 do_shadow_Dirty( &mce, st->Ist.Dirty.details );
1600 break;
1601
sewardj1dbd19d2004-11-03 09:10:30 +00001602 default:
1603 VG_(printf)("\n");
1604 ppIRStmt(st);
1605 VG_(printf)("\n");
1606 VG_(skin_panic)("memcheck: unhandled IRStmt");
1607
1608 } /* switch (st->tag) */
1609
sewardj3861f462004-11-06 14:22:03 +00001610 if (verboze) {
sewardj1dbd19d2004-11-03 09:10:30 +00001611 for (j = first_stmt; j < bb->stmts_used; j++) {
1612 VG_(printf)(" ");
1613 ppIRStmt(bb->stmts[j]);
1614 VG_(printf)("\n");
1615 }
1616 VG_(printf)("\n");
1617 }
1618
1619 addStmtToIRBB(bb, st);
1620
1621 }
1622
sewardj2985e1b2004-11-06 13:51:48 +00001623 /* Now we need to complain if the jump target is undefined. */
sewardja6929da2004-11-03 15:22:25 +00001624 first_stmt = bb->stmts_used;
1625
sewardj3861f462004-11-06 14:22:03 +00001626 if (verboze) {
sewardja6929da2004-11-03 15:22:25 +00001627 VG_(printf)("bb->next = ");
1628 ppIRExpr(bb->next);
1629 VG_(printf)("\n\n");
1630 }
1631
1632 complainIfUndefined( &mce, bb->next );
1633
sewardj3861f462004-11-06 14:22:03 +00001634 if (verboze) {
sewardja6929da2004-11-03 15:22:25 +00001635 for (j = first_stmt; j < bb->stmts_used; j++) {
1636 VG_(printf)(" ");
1637 ppIRStmt(bb->stmts[j]);
1638 VG_(printf)("\n");
1639 }
1640 VG_(printf)("\n");
1641 }
sewardj1dbd19d2004-11-03 09:10:30 +00001642
1643 return bb;
sewardj1dbd19d2004-11-03 09:10:30 +00001644}
1645
sewardj21082ff2004-10-19 13:11:35 +00001646/*--------------------------------------------------------------------*/
1647/*--- end mc_translate.c ---*/
1648/*--------------------------------------------------------------------*/