blob: 470f3cf1d07d682474e93b85f25c5dc93e3bead4 [file] [log] [blame]
nethercotebb1c9912004-01-04 16:43:23 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
sewardj95448072004-11-22 20:19:51 +00003/*--- Instrument IR to perform memory checking operations. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- mc_translate.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00006
njn25e49d8e72002-09-23 09:36:25 +00007/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +00009 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
njn25cac76cb2002-09-23 11:21:57 +000032#include "mc_include.h"
njn25e49d8e72002-09-23 09:36:25 +000033
njn25e49d8e72002-09-23 09:36:25 +000034
sewardj95448072004-11-22 20:19:51 +000035/*------------------------------------------------------------*/
36/*--- Forward decls ---*/
37/*------------------------------------------------------------*/
38
39struct _MCEnv;
40
41static IRType shadowType ( IRType ty );
42static IRExpr* expr2vbits ( struct _MCEnv* mce, IRExpr* e );
43
44
45/*------------------------------------------------------------*/
46/*--- Memcheck running state, and tmp management. ---*/
47/*------------------------------------------------------------*/
48
49/* Carries around state during memcheck instrumentation. */
50typedef
51 struct _MCEnv {
52 /* MODIFIED: the bb being constructed. IRStmts are added. */
53 IRBB* bb;
54
55 /* MODIFIED: a table [0 .. #temps_in_original_bb-1] which maps
56 original temps to their current their current shadow temp.
57 Initially all entries are IRTemp_INVALID. Entries are added
58 lazily since many original temps are not used due to
59 optimisation prior to instrumentation. Note that floating
60 point original tmps are shadowed by integer tmps of the same
61 size, and Bit-typed original tmps are shadowed by the type
62 Ity_I8. See comment below. */
63 IRTemp* tmpMap;
64 Int n_originalTmps; /* for range checking */
65
66 /* READONLY: the guest layout. This indicates which parts of
67 the guest state should be regarded as 'always defined'. */
68 VexGuestLayout* layout;
69 /* READONLY: the host word type. Needed for constructing
70 arguments of type 'HWord' to be passed to helper functions.
71 Ity_I32 or Ity_I64 only. */
72 IRType hWordTy;
73 }
74 MCEnv;
75
76/* SHADOW TMP MANAGEMENT. Shadow tmps are allocated lazily (on
77 demand), as they are encountered. This is for two reasons.
78
79 (1) (less important reason): Many original tmps are unused due to
80 initial IR optimisation, and we do not want to spaces in tables
81 tracking them.
82
83 Shadow IRTemps are therefore allocated on demand. mce.tmpMap is a
84 table indexed [0 .. n_types-1], which gives the current shadow for
85 each original tmp, or INVALID_IRTEMP if none is so far assigned.
86 It is necessary to support making multiple assignments to a shadow
87 -- specifically, after testing a shadow for definedness, it needs
88 to be made defined. But IR's SSA property disallows this.
89
90 (2) (more important reason): Therefore, when a shadow needs to get
91 a new value, a new temporary is created, the value is assigned to
92 that, and the tmpMap is updated to reflect the new binding.
93
94 A corollary is that if the tmpMap maps a given tmp to
95 INVALID_IRTEMP and we are hoping to read that shadow tmp, it means
96 there's a read-before-write error in the original tmps. The IR
97 sanity checker should catch all such anomalies, however.
njn25e49d8e72002-09-23 09:36:25 +000098*/
sewardj95448072004-11-22 20:19:51 +000099
100/* Find the tmp currently shadowing the given original tmp. If none
101 so far exists, allocate one. */
102static IRTemp findShadowTmp ( MCEnv* mce, IRTemp orig )
njn25e49d8e72002-09-23 09:36:25 +0000103{
sewardj95448072004-11-22 20:19:51 +0000104 tl_assert(orig < mce->n_originalTmps);
105 if (mce->tmpMap[orig] == IRTemp_INVALID) {
106 mce->tmpMap[orig]
107 = newIRTemp(mce->bb->tyenv,
108 shadowType(mce->bb->tyenv->types[orig]));
njn25e49d8e72002-09-23 09:36:25 +0000109 }
sewardj95448072004-11-22 20:19:51 +0000110 return mce->tmpMap[orig];
njn25e49d8e72002-09-23 09:36:25 +0000111}
112
sewardj95448072004-11-22 20:19:51 +0000113/* Allocate a new shadow for the given original tmp. This means any
114 previous shadow is abandoned. This is needed because it is
115 necessary to give a new value to a shadow once it has been tested
116 for undefinedness, but unfortunately IR's SSA property disallows
117 this. Instead we must abandon the old shadow, allocate a new one
118 and use that instead. */
119static void newShadowTmp ( MCEnv* mce, IRTemp orig )
njn25e49d8e72002-09-23 09:36:25 +0000120{
sewardj95448072004-11-22 20:19:51 +0000121 tl_assert(orig < mce->n_originalTmps);
122 mce->tmpMap[orig]
123 = newIRTemp(mce->bb->tyenv,
124 shadowType(mce->bb->tyenv->types[orig]));
125}
126
127
128/*------------------------------------------------------------*/
129/*--- IRAtoms -- a subset of IRExprs ---*/
130/*------------------------------------------------------------*/
131
132/* An atom is either an IRExpr_Const or an IRExpr_Tmp, as defined by
133 isAtom() in libvex_ir.h. Because this instrumenter expects flat
134 input, most of this code deals in atoms. Usefully, a value atom
135 always has a V-value which is also an atom: constants are shadowed
136 by constants, and temps are shadowed by the corresponding shadow
137 temporary. */
138
139typedef IRExpr IRAtom;
140
141/* (used for sanity checks only): is this an atom which looks
142 like it's from original code? */
143static Bool isOriginalAtom ( MCEnv* mce, IRAtom* a1 )
144{
145 if (a1->tag == Iex_Const)
146 return True;
147 if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp < mce->n_originalTmps)
148 return True;
149 return False;
150}
151
152/* (used for sanity checks only): is this an atom which looks
153 like it's from shadow code? */
154static Bool isShadowAtom ( MCEnv* mce, IRAtom* a1 )
155{
156 if (a1->tag == Iex_Const)
157 return True;
158 if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp >= mce->n_originalTmps)
159 return True;
160 return False;
161}
162
163/* (used for sanity checks only): check that both args are atoms and
164 are identically-kinded. */
165static Bool sameKindedAtoms ( IRAtom* a1, IRAtom* a2 )
166{
167 if (a1->tag == Iex_Tmp && a1->tag == Iex_Tmp)
168 return True;
169 if (a1->tag == Iex_Const && a1->tag == Iex_Const)
170 return True;
171 return False;
172}
173
174
175/*------------------------------------------------------------*/
176/*--- Type management ---*/
177/*------------------------------------------------------------*/
178
179/* Shadow state is always accessed using integer types. This returns
180 an integer type with the same size (as per sizeofIRType) as the
181 given type. The only valid shadow types are Bit, I8, I16, I32,
182 I64. */
183
184static IRType shadowType ( IRType ty )
185{
186 switch (ty) {
187 case Ity_I1:
188 case Ity_I8:
189 case Ity_I16:
190 case Ity_I32:
191 case Ity_I64: return ty;
192 case Ity_F32: return Ity_I32;
193 case Ity_F64: return Ity_I64;
194 default: ppIRType(ty);
195 VG_(tool_panic)("memcheck:shadowType");
196 }
197}
198
199/* Produce a 'defined' value of the given shadow type. Should only be
200 supplied shadow types (Bit/I8/I16/I32/UI64). */
201static IRExpr* definedOfType ( IRType ty ) {
202 switch (ty) {
203 case Ity_I1: return IRExpr_Const(IRConst_U1(False));
204 case Ity_I8: return IRExpr_Const(IRConst_U8(0));
205 case Ity_I16: return IRExpr_Const(IRConst_U16(0));
206 case Ity_I32: return IRExpr_Const(IRConst_U32(0));
207 case Ity_I64: return IRExpr_Const(IRConst_U64(0));
208 default: VG_(tool_panic)("memcheck:definedOfType");
njn25e49d8e72002-09-23 09:36:25 +0000209 }
210}
211
212
sewardj95448072004-11-22 20:19:51 +0000213/*------------------------------------------------------------*/
214/*--- Constructing IR fragments ---*/
215/*------------------------------------------------------------*/
216
217/* assign value to tmp */
218#define assign(_bb,_tmp,_expr) \
219 addStmtToIRBB((_bb), IRStmt_Tmp((_tmp),(_expr)))
220
221/* add stmt to a bb */
222#define stmt(_bb,_stmt) \
223 addStmtToIRBB((_bb), (_stmt))
224
225/* build various kinds of expressions */
226#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
227#define unop(_op, _arg) IRExpr_Unop((_op),(_arg))
228#define mkU8(_n) IRExpr_Const(IRConst_U8(_n))
229#define mkU16(_n) IRExpr_Const(IRConst_U16(_n))
230#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
231#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
232#define mkexpr(_tmp) IRExpr_Tmp((_tmp))
233
234/* bind the given expression to a new temporary, and return the
235 temporary. This effectively converts an arbitrary expression into
236 an atom. */
237static IRAtom* assignNew ( MCEnv* mce, IRType ty, IRExpr* e ) {
238 IRTemp t = newIRTemp(mce->bb->tyenv, ty);
239 assign(mce->bb, t, e);
240 return mkexpr(t);
241}
242
243
244/*------------------------------------------------------------*/
245/*--- Constructing definedness primitive ops ---*/
246/*------------------------------------------------------------*/
247
248/* --------- Defined-if-either-defined --------- */
249
250static IRAtom* mkDifD8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
251 tl_assert(isShadowAtom(mce,a1));
252 tl_assert(isShadowAtom(mce,a2));
253 return assignNew(mce, Ity_I8, binop(Iop_And8, a1, a2));
254}
255
256static IRAtom* mkDifD16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
257 tl_assert(isShadowAtom(mce,a1));
258 tl_assert(isShadowAtom(mce,a2));
259 return assignNew(mce, Ity_I16, binop(Iop_And16, a1, a2));
260}
261
262static IRAtom* mkDifD32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
263 tl_assert(isShadowAtom(mce,a1));
264 tl_assert(isShadowAtom(mce,a2));
265 return assignNew(mce, Ity_I32, binop(Iop_And32, a1, a2));
266}
267
sewardj7010f6e2004-12-10 13:35:22 +0000268static IRAtom* mkDifD64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
269 tl_assert(isShadowAtom(mce,a1));
270 tl_assert(isShadowAtom(mce,a2));
271 return assignNew(mce, Ity_I64, binop(Iop_And64, a1, a2));
272}
273
sewardj95448072004-11-22 20:19:51 +0000274/* --------- Undefined-if-either-undefined --------- */
275
276static IRAtom* mkUifU8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
277 tl_assert(isShadowAtom(mce,a1));
278 tl_assert(isShadowAtom(mce,a2));
279 return assignNew(mce, Ity_I8, binop(Iop_Or8, a1, a2));
280}
281
282static IRAtom* mkUifU16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
283 tl_assert(isShadowAtom(mce,a1));
284 tl_assert(isShadowAtom(mce,a2));
285 return assignNew(mce, Ity_I16, binop(Iop_Or16, a1, a2));
286}
287
288static IRAtom* mkUifU32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
289 tl_assert(isShadowAtom(mce,a1));
290 tl_assert(isShadowAtom(mce,a2));
291 return assignNew(mce, Ity_I32, binop(Iop_Or32, a1, a2));
292}
293
294static IRAtom* mkUifU64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
295 tl_assert(isShadowAtom(mce,a1));
296 tl_assert(isShadowAtom(mce,a2));
297 return assignNew(mce, Ity_I64, binop(Iop_Or64, a1, a2));
298}
299
300static IRAtom* mkUifU ( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) {
301 switch (vty) {
302 case Ity_I16: return mkUifU16(mce, a1, a2);
303 case Ity_I32: return mkUifU32(mce, a1, a2);
304 case Ity_I64: return mkUifU64(mce, a1, a2);
305 default:
306 VG_(printf)("\n"); ppIRType(vty); VG_(printf)("\n");
307 VG_(tool_panic)("memcheck:mkUifU");
njn25e49d8e72002-09-23 09:36:25 +0000308 }
309}
310
sewardj95448072004-11-22 20:19:51 +0000311/* --------- The Left-family of operations. --------- */
njn25e49d8e72002-09-23 09:36:25 +0000312
sewardj95448072004-11-22 20:19:51 +0000313static IRAtom* mkLeft8 ( MCEnv* mce, IRAtom* a1 ) {
314 tl_assert(isShadowAtom(mce,a1));
315 /* It's safe to duplicate a1 since it's only an atom */
316 return assignNew(mce, Ity_I8,
317 binop(Iop_Or8, a1,
318 assignNew(mce, Ity_I8,
319 /* unop(Iop_Neg8, a1)))); */
320 binop(Iop_Sub8, mkU8(0), a1) )));
321}
322
323static IRAtom* mkLeft16 ( MCEnv* mce, IRAtom* a1 ) {
324 tl_assert(isShadowAtom(mce,a1));
325 /* It's safe to duplicate a1 since it's only an atom */
326 return assignNew(mce, Ity_I16,
327 binop(Iop_Or16, a1,
328 assignNew(mce, Ity_I16,
329 /* unop(Iop_Neg16, a1)))); */
330 binop(Iop_Sub16, mkU16(0), a1) )));
331}
332
333static IRAtom* mkLeft32 ( MCEnv* mce, IRAtom* a1 ) {
334 tl_assert(isShadowAtom(mce,a1));
335 /* It's safe to duplicate a1 since it's only an atom */
336 return assignNew(mce, Ity_I32,
337 binop(Iop_Or32, a1,
338 assignNew(mce, Ity_I32,
339 /* unop(Iop_Neg32, a1)))); */
340 binop(Iop_Sub32, mkU32(0), a1) )));
341}
342
343/* --------- 'Improvement' functions for AND/OR. --------- */
344
345/* ImproveAND(data, vbits) = data OR vbits. Defined (0) data 0s give
346 defined (0); all other -> undefined (1).
347*/
348static IRAtom* mkImproveAND8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
njn25e49d8e72002-09-23 09:36:25 +0000349{
sewardj95448072004-11-22 20:19:51 +0000350 tl_assert(isOriginalAtom(mce, data));
351 tl_assert(isShadowAtom(mce, vbits));
352 tl_assert(sameKindedAtoms(data, vbits));
353 return assignNew(mce, Ity_I8, binop(Iop_Or8, data, vbits));
354}
njn25e49d8e72002-09-23 09:36:25 +0000355
sewardj95448072004-11-22 20:19:51 +0000356static IRAtom* mkImproveAND16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
357{
358 tl_assert(isOriginalAtom(mce, data));
359 tl_assert(isShadowAtom(mce, vbits));
360 tl_assert(sameKindedAtoms(data, vbits));
361 return assignNew(mce, Ity_I16, binop(Iop_Or16, data, vbits));
362}
njn25e49d8e72002-09-23 09:36:25 +0000363
sewardj95448072004-11-22 20:19:51 +0000364static IRAtom* mkImproveAND32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
365{
366 tl_assert(isOriginalAtom(mce, data));
367 tl_assert(isShadowAtom(mce, vbits));
368 tl_assert(sameKindedAtoms(data, vbits));
369 return assignNew(mce, Ity_I32, binop(Iop_Or32, data, vbits));
370}
njn25e49d8e72002-09-23 09:36:25 +0000371
sewardj7010f6e2004-12-10 13:35:22 +0000372static IRAtom* mkImproveAND64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
373{
374 tl_assert(isOriginalAtom(mce, data));
375 tl_assert(isShadowAtom(mce, vbits));
376 tl_assert(sameKindedAtoms(data, vbits));
377 return assignNew(mce, Ity_I64, binop(Iop_Or64, data, vbits));
378}
379
sewardj95448072004-11-22 20:19:51 +0000380/* ImproveOR(data, vbits) = ~data OR vbits. Defined (0) data 1s give
381 defined (0); all other -> undefined (1).
382*/
383static IRAtom* mkImproveOR8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
384{
385 tl_assert(isOriginalAtom(mce, data));
386 tl_assert(isShadowAtom(mce, vbits));
387 tl_assert(sameKindedAtoms(data, vbits));
388 return assignNew(
389 mce, Ity_I8,
390 binop(Iop_Or8,
391 assignNew(mce, Ity_I8, unop(Iop_Not8, data)),
392 vbits) );
393}
njn25e49d8e72002-09-23 09:36:25 +0000394
sewardj95448072004-11-22 20:19:51 +0000395static IRAtom* mkImproveOR16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
396{
397 tl_assert(isOriginalAtom(mce, data));
398 tl_assert(isShadowAtom(mce, vbits));
399 tl_assert(sameKindedAtoms(data, vbits));
400 return assignNew(
401 mce, Ity_I16,
402 binop(Iop_Or16,
403 assignNew(mce, Ity_I16, unop(Iop_Not16, data)),
404 vbits) );
405}
njn25e49d8e72002-09-23 09:36:25 +0000406
sewardj95448072004-11-22 20:19:51 +0000407static IRAtom* mkImproveOR32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
408{
409 tl_assert(isOriginalAtom(mce, data));
410 tl_assert(isShadowAtom(mce, vbits));
411 tl_assert(sameKindedAtoms(data, vbits));
412 return assignNew(
413 mce, Ity_I32,
414 binop(Iop_Or32,
415 assignNew(mce, Ity_I32, unop(Iop_Not32, data)),
416 vbits) );
417}
418
sewardj7010f6e2004-12-10 13:35:22 +0000419static IRAtom* mkImproveOR64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
420{
421 tl_assert(isOriginalAtom(mce, data));
422 tl_assert(isShadowAtom(mce, vbits));
423 tl_assert(sameKindedAtoms(data, vbits));
424 return assignNew(
425 mce, Ity_I64,
426 binop(Iop_Or64,
427 assignNew(mce, Ity_I64, unop(Iop_Not64, data)),
428 vbits) );
429}
430
sewardj95448072004-11-22 20:19:51 +0000431/* --------- Pessimising casts. --------- */
432
433static IRAtom* mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits )
434{
sewardj7cf97ee2004-11-28 14:25:01 +0000435 IRType ty;
436 IRAtom* tmp1;
sewardj95448072004-11-22 20:19:51 +0000437 /* Note, dst_ty is a shadow type, not an original type. */
438 /* First of all, collapse vbits down to a single bit. */
439 tl_assert(isShadowAtom(mce,vbits));
sewardj7cf97ee2004-11-28 14:25:01 +0000440 ty = typeOfIRExpr(mce->bb->tyenv, vbits);
441 tmp1 = NULL;
sewardj95448072004-11-22 20:19:51 +0000442 switch (ty) {
443 case Ity_I1:
444 tmp1 = vbits;
njn25e49d8e72002-09-23 09:36:25 +0000445 break;
sewardj95448072004-11-22 20:19:51 +0000446 case Ity_I8:
447 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE8, vbits, mkU8(0)));
448 break;
449 case Ity_I16:
450 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE16, vbits, mkU16(0)));
451 break;
452 case Ity_I32:
453 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE32, vbits, mkU32(0)));
454 break;
455 case Ity_I64:
456 tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE64, vbits, mkU64(0)));
457 break;
458 default:
459 VG_(tool_panic)("mkPCastTo(1)");
460 }
461 tl_assert(tmp1);
462 /* Now widen up to the dst type. */
463 switch (dst_ty) {
464 case Ity_I1:
465 return tmp1;
466 case Ity_I8:
467 return assignNew(mce, Ity_I8, unop(Iop_1Sto8, tmp1));
468 case Ity_I16:
469 return assignNew(mce, Ity_I16, unop(Iop_1Sto16, tmp1));
470 case Ity_I32:
471 return assignNew(mce, Ity_I32, unop(Iop_1Sto32, tmp1));
472 case Ity_I64:
473 return assignNew(mce, Ity_I64, unop(Iop_1Sto64, tmp1));
474 default:
475 ppIRType(dst_ty);
476 VG_(tool_panic)("mkPCastTo(2)");
477 }
478}
479
480
481/*------------------------------------------------------------*/
482/*--- Emit a test and complaint if something is undefined. ---*/
483/*------------------------------------------------------------*/
484
485/* Set the annotations on a dirty helper to indicate that the stack
486 pointer and instruction pointers might be read. This is the
487 behaviour of all 'emit-a-complaint' style functions we might
488 call. */
489
490static void setHelperAnns ( MCEnv* mce, IRDirty* di ) {
491 di->nFxState = 2;
492 di->fxState[0].fx = Ifx_Read;
493 di->fxState[0].offset = mce->layout->offset_SP;
494 di->fxState[0].size = mce->layout->sizeof_SP;
495 di->fxState[1].fx = Ifx_Read;
496 di->fxState[1].offset = mce->layout->offset_IP;
497 di->fxState[1].size = mce->layout->sizeof_IP;
498}
499
500
501/* Check the supplied **original** atom for undefinedness, and emit a
502 complaint if so. Once that happens, mark it as defined. This is
503 possible because the atom is either a tmp or literal. If it's a
504 tmp, it will be shadowed by a tmp, and so we can set the shadow to
505 be defined. In fact as mentioned above, we will have to allocate a
506 new tmp to carry the new 'defined' shadow value, and update the
507 original->tmp mapping accordingly; we cannot simply assign a new
508 value to an existing shadow tmp as this breaks SSAness -- resulting
509 in the post-instrumentation sanity checker spluttering in disapproval.
510*/
511static void complainIfUndefined ( MCEnv* mce, IRAtom* atom )
512{
sewardj7cf97ee2004-11-28 14:25:01 +0000513 IRAtom* vatom;
514 IRType ty;
515 Int sz;
516 IRDirty* di;
517 IRAtom* cond;
518
sewardj95448072004-11-22 20:19:51 +0000519 /* Since the original expression is atomic, there's no duplicated
520 work generated by making multiple V-expressions for it. So we
521 don't really care about the possibility that someone else may
522 also create a V-interpretion for it. */
523 tl_assert(isOriginalAtom(mce, atom));
sewardj7cf97ee2004-11-28 14:25:01 +0000524 vatom = expr2vbits( mce, atom );
sewardj95448072004-11-22 20:19:51 +0000525 tl_assert(isShadowAtom(mce, vatom));
526 tl_assert(sameKindedAtoms(atom, vatom));
527
sewardj7cf97ee2004-11-28 14:25:01 +0000528 ty = typeOfIRExpr(mce->bb->tyenv, vatom);
sewardj95448072004-11-22 20:19:51 +0000529
530 /* sz is only used for constructing the error message */
sewardj7cf97ee2004-11-28 14:25:01 +0000531 sz = ty==Ity_I1 ? 0 : sizeofIRType(ty);
sewardj95448072004-11-22 20:19:51 +0000532
sewardj7cf97ee2004-11-28 14:25:01 +0000533 cond = mkPCastTo( mce, Ity_I1, vatom );
sewardj95448072004-11-22 20:19:51 +0000534 /* cond will be 0 if all defined, and 1 if any not defined. */
535
sewardj95448072004-11-22 20:19:51 +0000536 switch (sz) {
537 case 0:
538 di = unsafeIRDirty_0_N( 0/*regparms*/,
539 "MC_(helperc_value_check0_fail)",
540 &MC_(helperc_value_check0_fail),
541 mkIRExprVec_0()
542 );
543 break;
544 case 1:
545 di = unsafeIRDirty_0_N( 0/*regparms*/,
546 "MC_(helperc_value_check1_fail)",
547 &MC_(helperc_value_check1_fail),
548 mkIRExprVec_0()
549 );
550 break;
551 case 4:
552 di = unsafeIRDirty_0_N( 0/*regparms*/,
553 "MC_(helperc_value_check4_fail)",
554 &MC_(helperc_value_check4_fail),
555 mkIRExprVec_0()
556 );
557 break;
558 default:
559 di = unsafeIRDirty_0_N( 1/*regparms*/,
560 "MC_(helperc_complain_undef)",
561 &MC_(helperc_complain_undef),
562 mkIRExprVec_1( mkIRExpr_HWord( sz ))
563 );
564 break;
565 }
566 di->guard = cond;
567 setHelperAnns( mce, di );
568 stmt( mce->bb, IRStmt_Dirty(di));
569
570 /* Set the shadow tmp to be defined. First, update the
571 orig->shadow tmp mapping to reflect the fact that this shadow is
572 getting a new value. */
573 tl_assert(isAtom(vatom));
574 /* sameKindedAtoms ... */
575 if (vatom->tag == Iex_Tmp) {
576 tl_assert(atom->tag == Iex_Tmp);
577 newShadowTmp(mce, atom->Iex.Tmp.tmp);
578 assign(mce->bb, findShadowTmp(mce, atom->Iex.Tmp.tmp),
579 definedOfType(ty));
580 }
581}
582
583
584/*------------------------------------------------------------*/
585/*--- Shadowing PUTs/GETs, and indexed variants thereof ---*/
586/*------------------------------------------------------------*/
587
588/* Examine the always-defined sections declared in layout to see if
589 the (offset,size) section is within one. Note, is is an error to
590 partially fall into such a region: (offset,size) should either be
591 completely in such a region or completely not-in such a region.
592*/
593static Bool isAlwaysDefd ( MCEnv* mce, Int offset, Int size )
594{
595 Int minoffD, maxoffD, i;
596 Int minoff = offset;
597 Int maxoff = minoff + size - 1;
598 tl_assert((minoff & ~0xFFFF) == 0);
599 tl_assert((maxoff & ~0xFFFF) == 0);
600
601 for (i = 0; i < mce->layout->n_alwaysDefd; i++) {
602 minoffD = mce->layout->alwaysDefd[i].offset;
603 maxoffD = minoffD + mce->layout->alwaysDefd[i].size - 1;
604 tl_assert((minoffD & ~0xFFFF) == 0);
605 tl_assert((maxoffD & ~0xFFFF) == 0);
606
607 if (maxoff < minoffD || maxoffD < minoff)
608 continue; /* no overlap */
609 if (minoff >= minoffD && maxoff <= maxoffD)
610 return True; /* completely contained in an always-defd section */
611
612 VG_(tool_panic)("memcheck:isAlwaysDefd:partial overlap");
613 }
614 return False; /* could not find any containing section */
615}
616
617
618/* Generate into bb suitable actions to shadow this Put. If the state
619 slice is marked 'always defined', do nothing. Otherwise, write the
620 supplied V bits to the shadow state. We can pass in either an
621 original atom or a V-atom, but not both. In the former case the
622 relevant V-bits are then generated from the original.
623*/
624static
625void do_shadow_PUT ( MCEnv* mce, Int offset,
626 IRAtom* atom, IRAtom* vatom )
627{
sewardj7cf97ee2004-11-28 14:25:01 +0000628 IRType ty;
sewardj95448072004-11-22 20:19:51 +0000629 if (atom) {
630 tl_assert(!vatom);
631 tl_assert(isOriginalAtom(mce, atom));
632 vatom = expr2vbits( mce, atom );
633 } else {
634 tl_assert(vatom);
635 tl_assert(isShadowAtom(mce, vatom));
636 }
637
sewardj7cf97ee2004-11-28 14:25:01 +0000638 ty = typeOfIRExpr(mce->bb->tyenv, vatom);
sewardj95448072004-11-22 20:19:51 +0000639 tl_assert(ty != Ity_I1);
640 if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
641 /* later: no ... */
642 /* emit code to emit a complaint if any of the vbits are 1. */
643 /* complainIfUndefined(mce, atom); */
644 } else {
645 /* Do a plain shadow Put. */
646 stmt( mce->bb, IRStmt_Put( offset + mce->layout->total_sizeB, vatom ) );
647 }
648}
649
650
651/* Return an expression which contains the V bits corresponding to the
652 given GETI (passed in in pieces).
653*/
654static
655void do_shadow_PUTI ( MCEnv* mce,
656 IRArray* descr, IRAtom* ix, Int bias, IRAtom* atom )
657{
sewardj7cf97ee2004-11-28 14:25:01 +0000658 IRAtom* vatom;
659 IRType ty, tyS;
660 Int arrSize;;
661
sewardj95448072004-11-22 20:19:51 +0000662 tl_assert(isOriginalAtom(mce,atom));
sewardj7cf97ee2004-11-28 14:25:01 +0000663 vatom = expr2vbits( mce, atom );
sewardj95448072004-11-22 20:19:51 +0000664 tl_assert(sameKindedAtoms(atom, vatom));
sewardj7cf97ee2004-11-28 14:25:01 +0000665 ty = descr->elemTy;
666 tyS = shadowType(ty);
667 arrSize = descr->nElems * sizeofIRType(ty);
sewardj95448072004-11-22 20:19:51 +0000668 tl_assert(ty != Ity_I1);
669 tl_assert(isOriginalAtom(mce,ix));
670 complainIfUndefined(mce,ix);
671 if (isAlwaysDefd(mce, descr->base, arrSize)) {
672 /* later: no ... */
673 /* emit code to emit a complaint if any of the vbits are 1. */
674 /* complainIfUndefined(mce, atom); */
675 } else {
676 /* Do a cloned version of the Put that refers to the shadow
677 area. */
678 IRArray* new_descr
679 = mkIRArray( descr->base + mce->layout->total_sizeB,
680 tyS, descr->nElems);
681 stmt( mce->bb, IRStmt_PutI( new_descr, ix, bias, vatom ));
682 }
683}
684
685
686/* Return an expression which contains the V bits corresponding to the
687 given GET (passed in in pieces).
688*/
689static
690IRExpr* shadow_GET ( MCEnv* mce, Int offset, IRType ty )
691{
692 IRType tyS = shadowType(ty);
693 tl_assert(ty != Ity_I1);
694 if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
695 /* Always defined, return all zeroes of the relevant type */
696 return definedOfType(tyS);
697 } else {
698 /* return a cloned version of the Get that refers to the shadow
699 area. */
700 return IRExpr_Get( offset + mce->layout->total_sizeB, tyS );
701 }
702}
703
704
705/* Return an expression which contains the V bits corresponding to the
706 given GETI (passed in in pieces).
707*/
708static
709IRExpr* shadow_GETI ( MCEnv* mce, IRArray* descr, IRAtom* ix, Int bias )
710{
711 IRType ty = descr->elemTy;
712 IRType tyS = shadowType(ty);
713 Int arrSize = descr->nElems * sizeofIRType(ty);
714 tl_assert(ty != Ity_I1);
715 tl_assert(isOriginalAtom(mce,ix));
716 complainIfUndefined(mce,ix);
717 if (isAlwaysDefd(mce, descr->base, arrSize)) {
718 /* Always defined, return all zeroes of the relevant type */
719 return definedOfType(tyS);
720 } else {
721 /* return a cloned version of the Get that refers to the shadow
722 area. */
723 IRArray* new_descr
724 = mkIRArray( descr->base + mce->layout->total_sizeB,
725 tyS, descr->nElems);
726 return IRExpr_GetI( new_descr, ix, bias );
727 }
728}
729
730
731/*------------------------------------------------------------*/
732/*--- Generating approximations for unknown operations, ---*/
733/*--- using lazy-propagate semantics ---*/
734/*------------------------------------------------------------*/
735
736/* Lazy propagation of undefinedness from two values, resulting in the
737 specified shadow type.
738*/
739static
740IRAtom* mkLazy2 ( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 )
741{
742 /* force everything via 32-bit intermediaries. */
743 IRAtom* at;
744 tl_assert(isShadowAtom(mce,va1));
745 tl_assert(isShadowAtom(mce,va2));
746 at = mkPCastTo(mce, Ity_I32, va1);
747 at = mkUifU(mce, Ity_I32, at, mkPCastTo(mce, Ity_I32, va2));
748 at = mkPCastTo(mce, finalVty, at);
749 return at;
750}
751
752
753/* Do the lazy propagation game from a null-terminated vector of
754 atoms. This is presumably the arguments to a helper call, so the
755 IRCallee info is also supplied in order that we can know which
756 arguments should be ignored (via the .mcx_mask field).
757*/
758static
759IRAtom* mkLazyN ( MCEnv* mce,
760 IRAtom** exprvec, IRType finalVtype, IRCallee* cee )
761{
762 Int i;
763 IRAtom* here;
764 IRAtom* curr = definedOfType(Ity_I32);
765 for (i = 0; exprvec[i]; i++) {
766 tl_assert(i < 32);
767 tl_assert(isOriginalAtom(mce, exprvec[i]));
768 /* Only take notice of this arg if the callee's mc-exclusion
769 mask does not say it is to be excluded. */
770 if (cee->mcx_mask & (1<<i)) {
771 /* the arg is to be excluded from definedness checking. Do
772 nothing. */
773 if (0) VG_(printf)("excluding %s(%d)\n", cee->name, i);
774 } else {
775 /* calculate the arg's definedness, and pessimistically merge
776 it in. */
777 here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, exprvec[i]) );
778 curr = mkUifU32(mce, here, curr);
779 }
780 }
781 return mkPCastTo(mce, finalVtype, curr );
782}
783
784
785/*------------------------------------------------------------*/
786/*--- Generating expensive sequences for exact carry-chain ---*/
787/*--- propagation in add/sub and related operations. ---*/
788/*------------------------------------------------------------*/
789
790static
791IRAtom* expensiveAdd32 ( MCEnv* mce, IRAtom* qaa, IRAtom* qbb,
792 IRAtom* aa, IRAtom* bb )
793{
sewardj7cf97ee2004-11-28 14:25:01 +0000794 IRAtom *a_min, *b_min, *a_max, *b_max;
795 IRType ty;
796 IROp opAND, opOR, opXOR, opNOT, opADD;
797
sewardj95448072004-11-22 20:19:51 +0000798 tl_assert(isShadowAtom(mce,qaa));
799 tl_assert(isShadowAtom(mce,qbb));
800 tl_assert(isOriginalAtom(mce,aa));
801 tl_assert(isOriginalAtom(mce,bb));
802 tl_assert(sameKindedAtoms(qaa,aa));
803 tl_assert(sameKindedAtoms(qbb,bb));
804
sewardj7cf97ee2004-11-28 14:25:01 +0000805 ty = Ity_I32;
806 opAND = Iop_And32;
807 opOR = Iop_Or32;
808 opXOR = Iop_Xor32;
809 opNOT = Iop_Not32;
810 opADD = Iop_Add32;
sewardj95448072004-11-22 20:19:51 +0000811
812 // a_min = aa & ~qaa
813 a_min = assignNew(mce,ty,
814 binop(opAND, aa,
815 assignNew(mce,ty, unop(opNOT, qaa))));
816
817 // b_min = bb & ~qbb
818 b_min = assignNew(mce,ty,
819 binop(opAND, bb,
820 assignNew(mce,ty, unop(opNOT, qbb))));
821
822 // a_max = aa | qaa
823 a_max = assignNew(mce,ty, binop(opOR, aa, qaa));
824
825 // b_max = bb | qbb
826 b_max = assignNew(mce,ty, binop(opOR, bb, qbb));
827
828 // result = (qaa | qbb) | ((a_min + b_min) ^ (a_max + b_max))
829 return
830 assignNew(mce,ty,
831 binop( opOR,
832 assignNew(mce,ty, binop(opOR, qaa, qbb)),
833 assignNew(mce,ty,
834 binop(opXOR, assignNew(mce,ty, binop(opADD, a_min, b_min)),
835 assignNew(mce,ty, binop(opADD, a_max, b_max))
836 )
837 )
838 )
839 );
840}
841
842
843/*------------------------------------------------------------*/
844/*--- Generate shadow values from all kinds of IRExprs. ---*/
845/*------------------------------------------------------------*/
846
847static
848IRAtom* expr2vbits_Binop ( MCEnv* mce,
849 IROp op,
850 IRAtom* atom1, IRAtom* atom2 )
851{
852 IRType and_or_ty;
853 IRAtom* (*uifu) (MCEnv*, IRAtom*, IRAtom*);
854 IRAtom* (*difd) (MCEnv*, IRAtom*, IRAtom*);
855 IRAtom* (*improve) (MCEnv*, IRAtom*, IRAtom*);
856
857 IRAtom* vatom1 = expr2vbits( mce, atom1 );
858 IRAtom* vatom2 = expr2vbits( mce, atom2 );
859
860 tl_assert(isOriginalAtom(mce,atom1));
861 tl_assert(isOriginalAtom(mce,atom2));
862 tl_assert(isShadowAtom(mce,vatom1));
863 tl_assert(isShadowAtom(mce,vatom2));
864 tl_assert(sameKindedAtoms(atom1,vatom1));
865 tl_assert(sameKindedAtoms(atom2,vatom2));
866 switch (op) {
867
868 case Iop_RoundF64:
869 case Iop_F64toI64:
sewardje9e16d32004-12-10 13:17:55 +0000870 case Iop_I64toF64:
871 /* First arg is I32 (rounding mode), second is F64 or I64
872 (data). */
sewardj95448072004-11-22 20:19:51 +0000873 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
874
875 case Iop_PRemC3210F64: case Iop_PRem1C3210F64:
876 /* Takes two F64 args. */
877 case Iop_F64toI32:
sewardje9e16d32004-12-10 13:17:55 +0000878 case Iop_F64toF32:
sewardj95448072004-11-22 20:19:51 +0000879 /* First arg is I32 (rounding mode), second is F64 (data). */
880 return mkLazy2(mce, Ity_I32, vatom1, vatom2);
881
882 case Iop_F64toI16:
883 /* First arg is I32 (rounding mode), second is F64 (data). */
884 return mkLazy2(mce, Ity_I16, vatom1, vatom2);
885
886 case Iop_ScaleF64:
887 case Iop_Yl2xF64:
888 case Iop_Yl2xp1F64:
889 case Iop_PRemF64:
890 case Iop_AtanF64:
891 case Iop_AddF64:
892 case Iop_DivF64:
893 case Iop_SubF64:
894 case Iop_MulF64:
895 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
896
897 case Iop_CmpF64:
898 return mkLazy2(mce, Ity_I32, vatom1, vatom2);
899
900 /* non-FP after here */
901
902 case Iop_DivModU64to32:
903 case Iop_DivModS64to32:
904 return mkLazy2(mce, Ity_I64, vatom1, vatom2);
905
906 case Iop_16HLto32:
907 return assignNew(mce, Ity_I32,
908 binop(Iop_16HLto32, vatom1, vatom2));
909 case Iop_32HLto64:
910 return assignNew(mce, Ity_I64,
911 binop(Iop_32HLto64, vatom1, vatom2));
912
913 case Iop_MullS32:
914 case Iop_MullU32: {
915 IRAtom* vLo32 = mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
916 IRAtom* vHi32 = mkPCastTo(mce, Ity_I32, vLo32);
917 return assignNew(mce, Ity_I64, binop(Iop_32HLto64, vHi32, vLo32));
918 }
919
920 case Iop_MullS16:
921 case Iop_MullU16: {
922 IRAtom* vLo16 = mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
923 IRAtom* vHi16 = mkPCastTo(mce, Ity_I16, vLo16);
924 return assignNew(mce, Ity_I32, binop(Iop_16HLto32, vHi16, vLo16));
925 }
926
927 case Iop_MullS8:
928 case Iop_MullU8: {
929 IRAtom* vLo8 = mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
930 IRAtom* vHi8 = mkPCastTo(mce, Ity_I8, vLo8);
931 return assignNew(mce, Ity_I16, binop(Iop_8HLto16, vHi8, vLo8));
932 }
933
934 case Iop_Add32:
935# if 0
936 return expensiveAdd32(mce, vatom1,vatom2, atom1,atom2);
937# endif
938 case Iop_Sub32:
939 case Iop_Mul32:
940 return mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
941
942 case Iop_Mul16:
943 case Iop_Add16:
944 case Iop_Sub16:
945 return mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
946
947 case Iop_Sub8:
948 case Iop_Add8:
949 return mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
950
951 case Iop_CmpLE32S: case Iop_CmpLE32U:
952 case Iop_CmpLT32U: case Iop_CmpLT32S:
953 case Iop_CmpEQ32: case Iop_CmpNE32:
954 return mkPCastTo(mce, Ity_I1, mkUifU32(mce, vatom1,vatom2));
955
956 case Iop_CmpEQ16: case Iop_CmpNE16:
957 return mkPCastTo(mce, Ity_I1, mkUifU16(mce, vatom1,vatom2));
958
959 case Iop_CmpEQ8: case Iop_CmpNE8:
960 return mkPCastTo(mce, Ity_I1, mkUifU8(mce, vatom1,vatom2));
961
962 case Iop_Shl32: case Iop_Shr32: case Iop_Sar32:
963 /* Complain if the shift amount is undefined. Then simply
964 shift the first arg's V bits by the real shift amount. */
965 complainIfUndefined(mce, atom2);
966 return assignNew(mce, Ity_I32, binop(op, vatom1, atom2));
967
968 case Iop_Shl16: case Iop_Shr16:
969 /* Same scheme as with 32-bit shifts. */
970 complainIfUndefined(mce, atom2);
971 return assignNew(mce, Ity_I16, binop(op, vatom1, atom2));
972
973 case Iop_Shl8: case Iop_Shr8:
974 /* Same scheme as with 32-bit shifts. */
975 complainIfUndefined(mce, atom2);
976 return assignNew(mce, Ity_I8, binop(op, vatom1, atom2));
977
978 case Iop_Shl64: case Iop_Shr64:
979 /* Same scheme as with 32-bit shifts. */
980 complainIfUndefined(mce, atom2);
981 return assignNew(mce, Ity_I64, binop(op, vatom1, atom2));
982
sewardj7010f6e2004-12-10 13:35:22 +0000983 case Iop_And64:
984 uifu = mkUifU64; difd = mkDifD64;
985 and_or_ty = Ity_I64; improve = mkImproveAND64; goto do_And_Or;
sewardj95448072004-11-22 20:19:51 +0000986 case Iop_And32:
987 uifu = mkUifU32; difd = mkDifD32;
988 and_or_ty = Ity_I32; improve = mkImproveAND32; goto do_And_Or;
989 case Iop_And16:
990 uifu = mkUifU16; difd = mkDifD16;
991 and_or_ty = Ity_I16; improve = mkImproveAND16; goto do_And_Or;
992 case Iop_And8:
993 uifu = mkUifU8; difd = mkDifD8;
994 and_or_ty = Ity_I8; improve = mkImproveAND8; goto do_And_Or;
995
sewardj7010f6e2004-12-10 13:35:22 +0000996 case Iop_Or64:
997 uifu = mkUifU64; difd = mkDifD64;
998 and_or_ty = Ity_I64; improve = mkImproveOR64; goto do_And_Or;
sewardj95448072004-11-22 20:19:51 +0000999 case Iop_Or32:
1000 uifu = mkUifU32; difd = mkDifD32;
1001 and_or_ty = Ity_I32; improve = mkImproveOR32; goto do_And_Or;
1002 case Iop_Or16:
1003 uifu = mkUifU16; difd = mkDifD16;
1004 and_or_ty = Ity_I16; improve = mkImproveOR16; goto do_And_Or;
1005 case Iop_Or8:
1006 uifu = mkUifU8; difd = mkDifD8;
1007 and_or_ty = Ity_I8; improve = mkImproveOR8; goto do_And_Or;
1008
1009 do_And_Or:
1010 return
1011 assignNew(
1012 mce,
1013 and_or_ty,
1014 difd(mce, uifu(mce, vatom1, vatom2),
1015 difd(mce, improve(mce, atom1, vatom1),
1016 improve(mce, atom2, vatom2) ) ) );
1017
1018 case Iop_Xor8:
1019 return mkUifU8(mce, vatom1, vatom2);
1020 case Iop_Xor16:
1021 return mkUifU16(mce, vatom1, vatom2);
1022 case Iop_Xor32:
1023 return mkUifU32(mce, vatom1, vatom2);
sewardj7010f6e2004-12-10 13:35:22 +00001024 case Iop_Xor64:
1025 return mkUifU64(mce, vatom1, vatom2);
njn25e49d8e72002-09-23 09:36:25 +00001026
1027 default:
sewardj95448072004-11-22 20:19:51 +00001028 ppIROp(op);
1029 VG_(tool_panic)("memcheck:expr2vbits_Binop");
njn25e49d8e72002-09-23 09:36:25 +00001030 }
njn25e49d8e72002-09-23 09:36:25 +00001031}
1032
njn25e49d8e72002-09-23 09:36:25 +00001033
sewardj95448072004-11-22 20:19:51 +00001034static
1035IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
1036{
1037 IRAtom* vatom = expr2vbits( mce, atom );
1038 tl_assert(isOriginalAtom(mce,atom));
1039 switch (op) {
1040
1041 case Iop_F32toF64:
1042 case Iop_I32toF64:
sewardj95448072004-11-22 20:19:51 +00001043 case Iop_NegF64:
1044 case Iop_SinF64:
1045 case Iop_CosF64:
1046 case Iop_TanF64:
1047 case Iop_SqrtF64:
1048 case Iop_AbsF64:
1049 case Iop_2xm1F64:
1050 return mkPCastTo(mce, Ity_I64, vatom);
1051
sewardj95448072004-11-22 20:19:51 +00001052 case Iop_Clz32:
1053 case Iop_Ctz32:
1054 return mkPCastTo(mce, Ity_I32, vatom);
1055
1056 case Iop_32Sto64:
1057 case Iop_32Uto64:
1058 return assignNew(mce, Ity_I64, unop(op, vatom));
1059
1060 case Iop_64to32:
1061 case Iop_64HIto32:
1062 case Iop_1Uto32:
1063 case Iop_8Uto32:
1064 case Iop_16Uto32:
1065 case Iop_16Sto32:
1066 case Iop_8Sto32:
1067 return assignNew(mce, Ity_I32, unop(op, vatom));
1068
1069 case Iop_8Sto16:
1070 case Iop_8Uto16:
1071 case Iop_32to16:
1072 case Iop_32HIto16:
1073 return assignNew(mce, Ity_I16, unop(op, vatom));
1074
1075 case Iop_1Uto8:
1076 case Iop_16to8:
1077 case Iop_32to8:
1078 return assignNew(mce, Ity_I8, unop(op, vatom));
1079
1080 case Iop_32to1:
1081 return assignNew(mce, Ity_I1, unop(Iop_32to1, vatom));
1082
1083 case Iop_ReinterpF64asI64:
1084 case Iop_ReinterpI64asF64:
sewardj7010f6e2004-12-10 13:35:22 +00001085 case Iop_Not64:
sewardj95448072004-11-22 20:19:51 +00001086 case Iop_Not32:
1087 case Iop_Not16:
1088 case Iop_Not8:
1089 case Iop_Not1:
1090 return vatom;
sewardj7010f6e2004-12-10 13:35:22 +00001091
sewardj95448072004-11-22 20:19:51 +00001092 default:
1093 ppIROp(op);
1094 VG_(tool_panic)("memcheck:expr2vbits_Unop");
1095 }
1096}
1097
1098
1099static
1100IRAtom* expr2vbits_LDle ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias )
1101{
1102 void* helper;
1103 Char* hname;
1104 IRDirty* di;
1105 IRTemp datavbits;
1106 IRAtom* addrAct;
1107
1108 tl_assert(isOriginalAtom(mce,addr));
1109
1110 /* First, emit a definedness test for the address. This also sets
1111 the address (shadow) to 'defined' following the test. */
1112 complainIfUndefined( mce, addr );
1113
1114 /* Now cook up a call to the relevant helper function, to read the
1115 data V bits from shadow memory. */
1116 ty = shadowType(ty);
1117 switch (ty) {
1118 case Ity_I64: helper = &MC_(helperc_LOADV8);
1119 hname = "MC_(helperc_LOADV8)";
1120 break;
1121 case Ity_I32: helper = &MC_(helperc_LOADV4);
1122 hname = "MC_(helperc_LOADV4)";
1123 break;
1124 case Ity_I16: helper = &MC_(helperc_LOADV2);
1125 hname = "MC_(helperc_LOADV2)";
1126 break;
1127 case Ity_I8: helper = &MC_(helperc_LOADV1);
1128 hname = "MC_(helperc_LOADV1)";
1129 break;
1130 default: ppIRType(ty);
1131 VG_(tool_panic)("memcheck:do_shadow_LDle");
1132 }
1133
1134 /* Generate the actual address into addrAct. */
1135 if (bias == 0) {
1136 addrAct = addr;
1137 } else {
sewardj7cf97ee2004-11-28 14:25:01 +00001138 IROp mkAdd;
1139 IRAtom* eBias;
sewardj95448072004-11-22 20:19:51 +00001140 IRType tyAddr = mce->hWordTy;
1141 tl_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
sewardj7cf97ee2004-11-28 14:25:01 +00001142 mkAdd = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
1143 eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
sewardj95448072004-11-22 20:19:51 +00001144 addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
1145 }
1146
1147 /* We need to have a place to park the V bits we're just about to
1148 read. */
1149 datavbits = newIRTemp(mce->bb->tyenv, ty);
1150 di = unsafeIRDirty_1_N( datavbits,
1151 1/*regparms*/, hname, helper,
1152 mkIRExprVec_1( addrAct ));
1153 setHelperAnns( mce, di );
1154 stmt( mce->bb, IRStmt_Dirty(di) );
1155
1156 return mkexpr(datavbits);
1157}
1158
1159
1160static
1161IRAtom* expr2vbits_Mux0X ( MCEnv* mce,
1162 IRAtom* cond, IRAtom* expr0, IRAtom* exprX )
1163{
1164 IRAtom *vbitsC, *vbits0, *vbitsX;
1165 IRType ty;
1166 /* Given Mux0X(cond,expr0,exprX), generate
1167 Mux0X(cond,expr0#,exprX#) `UifU` PCast(cond#)
1168 That is, steer the V bits like the originals, but trash the
1169 result if the steering value is undefined. This gives
1170 lazy propagation. */
1171 tl_assert(isOriginalAtom(mce, cond));
1172 tl_assert(isOriginalAtom(mce, expr0));
1173 tl_assert(isOriginalAtom(mce, exprX));
1174
1175 vbitsC = expr2vbits(mce, cond);
1176 vbits0 = expr2vbits(mce, expr0);
1177 vbitsX = expr2vbits(mce, exprX);
1178 ty = typeOfIRExpr(mce->bb->tyenv, vbits0);
1179
1180 return
1181 mkUifU(mce, ty, assignNew(mce, ty, IRExpr_Mux0X(cond, vbits0, vbitsX)),
1182 mkPCastTo(mce, ty, vbitsC) );
1183}
1184
1185/* --------- This is the main expression-handling function. --------- */
1186
1187static
1188IRExpr* expr2vbits ( MCEnv* mce, IRExpr* e )
1189{
1190 switch (e->tag) {
1191
1192 case Iex_Get:
1193 return shadow_GET( mce, e->Iex.Get.offset, e->Iex.Get.ty );
1194
1195 case Iex_GetI:
1196 return shadow_GETI( mce, e->Iex.GetI.descr,
1197 e->Iex.GetI.ix, e->Iex.GetI.bias );
1198
1199 case Iex_Tmp:
1200 return IRExpr_Tmp( findShadowTmp(mce, e->Iex.Tmp.tmp) );
1201
1202 case Iex_Const:
1203 return definedOfType(shadowType(typeOfIRExpr(mce->bb->tyenv, e)));
1204
1205 case Iex_Binop:
1206 return expr2vbits_Binop(
1207 mce,
1208 e->Iex.Binop.op,
1209 e->Iex.Binop.arg1, e->Iex.Binop.arg2
1210 );
1211
1212 case Iex_Unop:
1213 return expr2vbits_Unop( mce, e->Iex.Unop.op, e->Iex.Unop.arg );
1214
1215 case Iex_LDle:
1216 return expr2vbits_LDle( mce, e->Iex.LDle.ty,
1217 e->Iex.LDle.addr, 0/*addr bias*/ );
1218
1219 case Iex_CCall:
1220 return mkLazyN( mce, e->Iex.CCall.args,
1221 e->Iex.CCall.retty,
1222 e->Iex.CCall.cee );
1223
1224 case Iex_Mux0X:
1225 return expr2vbits_Mux0X( mce, e->Iex.Mux0X.cond, e->Iex.Mux0X.expr0,
1226 e->Iex.Mux0X.exprX);
njn25e49d8e72002-09-23 09:36:25 +00001227
1228 default:
sewardj95448072004-11-22 20:19:51 +00001229 VG_(printf)("\n");
1230 ppIRExpr(e);
1231 VG_(printf)("\n");
1232 VG_(tool_panic)("memcheck: expr2vbits");
njn25e49d8e72002-09-23 09:36:25 +00001233 }
njn25e49d8e72002-09-23 09:36:25 +00001234}
1235
1236/*------------------------------------------------------------*/
sewardj95448072004-11-22 20:19:51 +00001237/*--- Generate shadow stmts from all kinds of IRStmts. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001238/*------------------------------------------------------------*/
1239
sewardj95448072004-11-22 20:19:51 +00001240/* Widen a value to the host word size. */
njn25e49d8e72002-09-23 09:36:25 +00001241
1242static
sewardj95448072004-11-22 20:19:51 +00001243IRExpr* zwidenToHostWord ( MCEnv* mce, IRAtom* vatom )
njn25e49d8e72002-09-23 09:36:25 +00001244{
sewardj7cf97ee2004-11-28 14:25:01 +00001245 IRType ty, tyH;
1246
sewardj95448072004-11-22 20:19:51 +00001247 /* vatom is vbits-value and as such can only have a shadow type. */
1248 tl_assert(isShadowAtom(mce,vatom));
njn25e49d8e72002-09-23 09:36:25 +00001249
sewardj7cf97ee2004-11-28 14:25:01 +00001250 ty = typeOfIRExpr(mce->bb->tyenv, vatom);
1251 tyH = mce->hWordTy;
njn25e49d8e72002-09-23 09:36:25 +00001252
sewardj95448072004-11-22 20:19:51 +00001253 if (tyH == Ity_I32) {
1254 switch (ty) {
1255 case Ity_I32: return vatom;
1256 case Ity_I16: return assignNew(mce, tyH, unop(Iop_16Uto32, vatom));
1257 case Ity_I8: return assignNew(mce, tyH, unop(Iop_8Uto32, vatom));
1258 default: goto unhandled;
sewardj8ec2cfc2002-10-13 00:57:26 +00001259 }
sewardj95448072004-11-22 20:19:51 +00001260 } else {
1261 goto unhandled;
sewardj8ec2cfc2002-10-13 00:57:26 +00001262 }
sewardj95448072004-11-22 20:19:51 +00001263 unhandled:
1264 VG_(printf)("\nty = "); ppIRType(ty); VG_(printf)("\n");
1265 VG_(tool_panic)("zwidenToHostWord");
njn25e49d8e72002-09-23 09:36:25 +00001266}
1267
njn25e49d8e72002-09-23 09:36:25 +00001268
sewardj95448072004-11-22 20:19:51 +00001269/* Generate a shadow store. addr is always the original address atom.
1270 You can pass in either originals or V-bits for the data atom, but
1271 obviously not both. */
njn25e49d8e72002-09-23 09:36:25 +00001272
sewardj95448072004-11-22 20:19:51 +00001273static
1274void do_shadow_STle ( MCEnv* mce,
1275 IRAtom* addr, UInt bias,
1276 IRAtom* data, IRAtom* vdata )
njn25e49d8e72002-09-23 09:36:25 +00001277{
sewardj95448072004-11-22 20:19:51 +00001278 IRType ty;
1279 IRDirty* di;
1280 void* helper = NULL;
1281 Char* hname = NULL;
1282 IRAtom* addrAct;
njn25e49d8e72002-09-23 09:36:25 +00001283
sewardj95448072004-11-22 20:19:51 +00001284 if (data) {
1285 tl_assert(!vdata);
1286 tl_assert(isOriginalAtom(mce, data));
1287 tl_assert(bias == 0);
1288 vdata = expr2vbits( mce, data );
1289 } else {
1290 tl_assert(vdata);
1291 }
njn25e49d8e72002-09-23 09:36:25 +00001292
sewardj95448072004-11-22 20:19:51 +00001293 tl_assert(isOriginalAtom(mce,addr));
1294 tl_assert(isShadowAtom(mce,vdata));
njn25e49d8e72002-09-23 09:36:25 +00001295
sewardj95448072004-11-22 20:19:51 +00001296 ty = typeOfIRExpr(mce->bb->tyenv, vdata);
njn25e49d8e72002-09-23 09:36:25 +00001297
sewardj95448072004-11-22 20:19:51 +00001298 /* First, emit a definedness test for the address. This also sets
1299 the address (shadow) to 'defined' following the test. */
1300 complainIfUndefined( mce, addr );
njn25e49d8e72002-09-23 09:36:25 +00001301
sewardj95448072004-11-22 20:19:51 +00001302 /* Now cook up a call to the relevant helper function, to write the
1303 data V bits into shadow memory. */
1304 switch (ty) {
1305 case Ity_I64: helper = &MC_(helperc_STOREV8);
1306 hname = "MC_(helperc_STOREV8)";
1307 break;
1308 case Ity_I32: helper = &MC_(helperc_STOREV4);
1309 hname = "MC_(helperc_STOREV4)";
1310 break;
1311 case Ity_I16: helper = &MC_(helperc_STOREV2);
1312 hname = "MC_(helperc_STOREV2)";
1313 break;
1314 case Ity_I8: helper = &MC_(helperc_STOREV1);
1315 hname = "MC_(helperc_STOREV1)";
1316 break;
1317 default: VG_(tool_panic)("memcheck:do_shadow_STle");
1318 }
njn25e49d8e72002-09-23 09:36:25 +00001319
sewardj95448072004-11-22 20:19:51 +00001320 /* Generate the actual address into addrAct. */
1321 if (bias == 0) {
1322 addrAct = addr;
1323 } else {
sewardj7cf97ee2004-11-28 14:25:01 +00001324 IROp mkAdd;
1325 IRAtom* eBias;
sewardj95448072004-11-22 20:19:51 +00001326 IRType tyAddr = mce->hWordTy;
1327 tl_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
sewardj7cf97ee2004-11-28 14:25:01 +00001328 mkAdd = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
1329 eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
sewardj95448072004-11-22 20:19:51 +00001330 addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
1331 }
njn25e49d8e72002-09-23 09:36:25 +00001332
sewardj95448072004-11-22 20:19:51 +00001333 if (ty == Ity_I64) {
1334 /* We can't do this with regparm 2 on 32-bit platforms, since
1335 the back ends aren't clever enough to handle 64-bit regparm
1336 args. Therefore be different. */
1337 di = unsafeIRDirty_0_N(
1338 1/*regparms*/, hname, helper,
1339 mkIRExprVec_2( addrAct, vdata ));
1340 } else {
1341 di = unsafeIRDirty_0_N(
1342 2/*regparms*/, hname, helper,
1343 mkIRExprVec_2( addrAct,
1344 zwidenToHostWord( mce, vdata )));
1345 }
1346 setHelperAnns( mce, di );
1347 stmt( mce->bb, IRStmt_Dirty(di) );
1348}
njn25e49d8e72002-09-23 09:36:25 +00001349
njn25e49d8e72002-09-23 09:36:25 +00001350
sewardj95448072004-11-22 20:19:51 +00001351/* Do lazy pessimistic propagation through a dirty helper call, by
1352 looking at the annotations on it. This is the most complex part of
1353 Memcheck. */
njn25e49d8e72002-09-23 09:36:25 +00001354
sewardj95448072004-11-22 20:19:51 +00001355static IRType szToITy ( Int n )
1356{
1357 switch (n) {
1358 case 1: return Ity_I8;
1359 case 2: return Ity_I16;
1360 case 4: return Ity_I32;
1361 case 8: return Ity_I64;
1362 default: VG_(tool_panic)("szToITy(memcheck)");
1363 }
1364}
njn25e49d8e72002-09-23 09:36:25 +00001365
sewardj95448072004-11-22 20:19:51 +00001366static
1367void do_shadow_Dirty ( MCEnv* mce, IRDirty* d )
1368{
sewardje9e16d32004-12-10 13:17:55 +00001369 Int i, n, offset, toDo, gSz, gOff;
sewardj7cf97ee2004-11-28 14:25:01 +00001370 IRAtom *src, *here, *curr;
sewardj95448072004-11-22 20:19:51 +00001371 IRType tyAddr, tySrc, tyDst;
1372 IRTemp dst;
njn25e49d8e72002-09-23 09:36:25 +00001373
sewardj95448072004-11-22 20:19:51 +00001374 /* First check the guard. */
1375 complainIfUndefined(mce, d->guard);
1376
1377 /* Now round up all inputs and PCast over them. */
sewardj7cf97ee2004-11-28 14:25:01 +00001378 curr = definedOfType(Ity_I32);
sewardj95448072004-11-22 20:19:51 +00001379
1380 /* Inputs: unmasked args */
1381 for (i = 0; d->args[i]; i++) {
1382 if (d->cee->mcx_mask & (1<<i)) {
1383 /* ignore this arg */
njn25e49d8e72002-09-23 09:36:25 +00001384 } else {
sewardj95448072004-11-22 20:19:51 +00001385 here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, d->args[i]) );
1386 curr = mkUifU32(mce, here, curr);
njn25e49d8e72002-09-23 09:36:25 +00001387 }
1388 }
sewardj95448072004-11-22 20:19:51 +00001389
1390 /* Inputs: guest state that we read. */
1391 for (i = 0; i < d->nFxState; i++) {
1392 tl_assert(d->fxState[i].fx != Ifx_None);
1393 if (d->fxState[i].fx == Ifx_Write)
1394 continue;
sewardja7203252004-11-26 19:17:47 +00001395
1396 /* Ignore any sections marked as 'always defined'. */
1397 if (isAlwaysDefd(mce, d->fxState[i].offset, d->fxState[i].size )) {
sewardje9e16d32004-12-10 13:17:55 +00001398 if (0)
sewardja7203252004-11-26 19:17:47 +00001399 VG_(printf)("memcheck: Dirty gst: ignored off %d, sz %d\n",
1400 d->fxState[i].offset, d->fxState[i].size );
1401 continue;
1402 }
1403
sewardj95448072004-11-22 20:19:51 +00001404 /* This state element is read or modified. So we need to
sewardje9e16d32004-12-10 13:17:55 +00001405 consider it. If larger than 8 bytes, deal with it in 8-byte
1406 chunks. */
1407 gSz = d->fxState[i].size;
1408 gOff = d->fxState[i].offset;
1409 tl_assert(gSz > 0);
1410 while (True) {
1411 if (gSz == 0) break;
1412 n = gSz <= 8 ? gSz : 8;
1413 /* update 'curr' with UifU of the state slice
1414 gOff .. gOff+n-1 */
1415 tySrc = szToITy( n );
1416 src = assignNew( mce, tySrc,
1417 shadow_GET(mce, gOff, tySrc ) );
1418 here = mkPCastTo( mce, Ity_I32, src );
1419 curr = mkUifU32(mce, here, curr);
1420 gSz -= n;
1421 gOff += n;
1422 }
1423
sewardj95448072004-11-22 20:19:51 +00001424 }
1425
1426 /* Inputs: memory. First set up some info needed regardless of
1427 whether we're doing reads or writes. */
1428 tyAddr = Ity_INVALID;
1429
1430 if (d->mFx != Ifx_None) {
1431 /* Because we may do multiple shadow loads/stores from the same
1432 base address, it's best to do a single test of its
1433 definedness right now. Post-instrumentation optimisation
1434 should remove all but this test. */
1435 tl_assert(d->mAddr);
1436 complainIfUndefined(mce, d->mAddr);
1437
1438 tyAddr = typeOfIRExpr(mce->bb->tyenv, d->mAddr);
1439 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
1440 tl_assert(tyAddr == mce->hWordTy); /* not really right */
1441 }
1442
1443 /* Deal with memory inputs (reads or modifies) */
1444 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
1445 offset = 0;
1446 toDo = d->mSize;
1447 /* chew off 32-bit chunks */
1448 while (toDo >= 4) {
1449 here = mkPCastTo(
1450 mce, Ity_I32,
1451 expr2vbits_LDle ( mce, Ity_I32,
1452 d->mAddr, d->mSize - toDo )
1453 );
1454 curr = mkUifU32(mce, here, curr);
1455 toDo -= 4;
1456 }
1457 /* chew off 16-bit chunks */
1458 while (toDo >= 2) {
1459 here = mkPCastTo(
1460 mce, Ity_I32,
1461 expr2vbits_LDle ( mce, Ity_I16,
1462 d->mAddr, d->mSize - toDo )
1463 );
1464 curr = mkUifU32(mce, here, curr);
1465 toDo -= 2;
1466 }
1467 tl_assert(toDo == 0); /* also need to handle 1-byte excess */
1468 }
1469
1470 /* Whew! So curr is a 32-bit V-value summarising pessimistically
1471 all the inputs to the helper. Now we need to re-distribute the
1472 results to all destinations. */
1473
1474 /* Outputs: the destination temporary, if there is one. */
1475 if (d->tmp != IRTemp_INVALID) {
1476 dst = findShadowTmp(mce, d->tmp);
1477 tyDst = typeOfIRTemp(mce->bb->tyenv, d->tmp);
1478 assign( mce->bb, dst, mkPCastTo( mce, tyDst, curr) );
1479 }
1480
1481 /* Outputs: guest state that we write or modify. */
1482 for (i = 0; i < d->nFxState; i++) {
1483 tl_assert(d->fxState[i].fx != Ifx_None);
1484 if (d->fxState[i].fx == Ifx_Read)
1485 continue;
sewardja7203252004-11-26 19:17:47 +00001486 /* Ignore any sections marked as 'always defined'. */
1487 if (isAlwaysDefd(mce, d->fxState[i].offset, d->fxState[i].size ))
1488 continue;
sewardje9e16d32004-12-10 13:17:55 +00001489 /* This state element is written or modified. So we need to
1490 consider it. If larger than 8 bytes, deal with it in 8-byte
1491 chunks. */
1492 gSz = d->fxState[i].size;
1493 gOff = d->fxState[i].offset;
1494 tl_assert(gSz > 0);
1495 while (True) {
1496 if (gSz == 0) break;
1497 n = gSz <= 8 ? gSz : 8;
1498 /* Write suitably-casted 'curr' to the state slice
1499 gOff .. gOff+n-1 */
1500 tyDst = szToITy( n );
1501 do_shadow_PUT( mce, gOff,
1502 NULL, /* original atom */
1503 mkPCastTo( mce, tyDst, curr ) );
1504 gSz -= n;
1505 gOff += n;
1506 }
sewardj95448072004-11-22 20:19:51 +00001507 }
1508
1509 /* Outputs: memory that we write or modify. */
1510 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
1511 offset = 0;
1512 toDo = d->mSize;
1513 /* chew off 32-bit chunks */
1514 while (toDo >= 4) {
1515 do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
1516 NULL, /* original data */
1517 mkPCastTo( mce, Ity_I32, curr ) );
1518 toDo -= 4;
1519 }
1520 /* chew off 16-bit chunks */
1521 while (toDo >= 2) {
1522 do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
1523 NULL, /* original data */
1524 mkPCastTo( mce, Ity_I16, curr ) );
1525 toDo -= 2;
1526 }
1527 tl_assert(toDo == 0); /* also need to handle 1-byte excess */
1528 }
1529
njn25e49d8e72002-09-23 09:36:25 +00001530}
1531
1532
sewardj95448072004-11-22 20:19:51 +00001533/*------------------------------------------------------------*/
1534/*--- Memcheck main ---*/
1535/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001536
sewardj95448072004-11-22 20:19:51 +00001537#if 0 /* UNUSED */
1538static Bool isBogusAtom ( IRAtom* at )
njn25e49d8e72002-09-23 09:36:25 +00001539{
sewardj95448072004-11-22 20:19:51 +00001540 ULong n = 0;
1541 IRConst* con;
1542 tl_assert(isAtom(at));
1543 if (at->tag == Iex_Tmp)
1544 return False;
1545 tl_assert(at->tag == Iex_Const);
1546 con = at->Iex.Const.con;
1547 switch (con->tag) {
1548 case Ico_U8: n = (ULong)con->Ico.U8; break;
1549 case Ico_U16: n = (ULong)con->Ico.U16; break;
1550 case Ico_U32: n = (ULong)con->Ico.U32; break;
1551 case Ico_U64: n = (ULong)con->Ico.U64; break;
1552 default: ppIRExpr(at); tl_assert(0);
1553 }
1554 /* VG_(printf)("%llx\n", n); */
1555 return (n == 0xFEFEFEFF
1556 || n == 0x80808080
1557 || n == 0x1010101
1558 || n == 1010100);
1559}
njn25e49d8e72002-09-23 09:36:25 +00001560
sewardj95448072004-11-22 20:19:51 +00001561static Bool checkForBogusLiterals ( /*FLAT*/ IRStmt* st )
1562{
1563 Int i;
1564 IRExpr* e;
1565 switch (st->tag) {
1566 case Ist_Tmp:
1567 e = st->Ist.Tmp.data;
1568 switch (e->tag) {
1569 case Iex_Get:
1570 case Iex_Tmp:
1571 return False;
1572 case Iex_Unop:
1573 return isBogusAtom(e->Iex.Unop.arg);
1574 case Iex_Binop:
1575 return isBogusAtom(e->Iex.Binop.arg1)
1576 || isBogusAtom(e->Iex.Binop.arg2);
1577 case Iex_Mux0X:
1578 return isBogusAtom(e->Iex.Mux0X.cond)
1579 || isBogusAtom(e->Iex.Mux0X.expr0)
1580 || isBogusAtom(e->Iex.Mux0X.exprX);
1581 case Iex_LDle:
1582 return isBogusAtom(e->Iex.LDle.addr);
1583 case Iex_CCall:
1584 for (i = 0; e->Iex.CCall.args[i]; i++)
1585 if (isBogusAtom(e->Iex.CCall.args[i]))
1586 return True;
1587 return False;
1588 default:
1589 goto unhandled;
1590 }
1591 case Ist_Put:
1592 return isBogusAtom(st->Ist.Put.data);
1593 case Ist_STle:
1594 return isBogusAtom(st->Ist.STle.addr)
1595 || isBogusAtom(st->Ist.STle.data);
1596 case Ist_Exit:
1597 return isBogusAtom(st->Ist.Exit.cond);
1598 default:
1599 unhandled:
1600 ppIRStmt(st);
1601 VG_(tool_panic)("hasBogusLiterals");
1602 }
1603}
1604#endif /* UNUSED */
njn25e49d8e72002-09-23 09:36:25 +00001605
njn25e49d8e72002-09-23 09:36:25 +00001606
sewardj95448072004-11-22 20:19:51 +00001607IRBB* TL_(instrument) ( IRBB* bb_in, VexGuestLayout* layout, IRType hWordTy )
1608{
1609 Bool verboze = False; //True;
njn25e49d8e72002-09-23 09:36:25 +00001610
sewardj95448072004-11-22 20:19:51 +00001611 /* Bool hasBogusLiterals = False; */
njn25e49d8e72002-09-23 09:36:25 +00001612
sewardj95448072004-11-22 20:19:51 +00001613 Int i, j, first_stmt;
1614 IRStmt* st;
1615 MCEnv mce;
njn25e49d8e72002-09-23 09:36:25 +00001616
sewardj95448072004-11-22 20:19:51 +00001617 /* Set up BB */
1618 IRBB* bb = emptyIRBB();
1619 bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);
1620 bb->next = dopyIRExpr(bb_in->next);
1621 bb->jumpkind = bb_in->jumpkind;
njn25e49d8e72002-09-23 09:36:25 +00001622
sewardj95448072004-11-22 20:19:51 +00001623 /* Set up the running environment. Only .bb is modified as we go
1624 along. */
1625 mce.bb = bb;
1626 mce.layout = layout;
1627 mce.n_originalTmps = bb->tyenv->types_used;
1628 mce.hWordTy = hWordTy;
1629 mce.tmpMap = LibVEX_Alloc(mce.n_originalTmps * sizeof(IRTemp));
1630 for (i = 0; i < mce.n_originalTmps; i++)
1631 mce.tmpMap[i] = IRTemp_INVALID;
1632
1633 /* Iterate over the stmts. */
1634
1635 for (i = 0; i < bb_in->stmts_used; i++) {
1636 st = bb_in->stmts[i];
1637 if (!st) continue;
1638
1639 tl_assert(isFlatIRStmt(st));
1640
1641 /*
1642 if (!hasBogusLiterals) {
1643 hasBogusLiterals = checkForBogusLiterals(st);
1644 if (hasBogusLiterals) {
1645 VG_(printf)("bogus: ");
1646 ppIRStmt(st);
1647 VG_(printf)("\n");
1648 }
1649 }
1650 */
1651 first_stmt = bb->stmts_used;
1652
1653 if (verboze) {
1654 ppIRStmt(st);
1655 VG_(printf)("\n\n");
1656 }
1657
1658 switch (st->tag) {
1659
1660 case Ist_Tmp:
1661 assign( bb, findShadowTmp(&mce, st->Ist.Tmp.tmp),
1662 expr2vbits( &mce, st->Ist.Tmp.data) );
njn25e49d8e72002-09-23 09:36:25 +00001663 break;
1664
sewardj95448072004-11-22 20:19:51 +00001665 case Ist_Put:
1666 do_shadow_PUT( &mce,
1667 st->Ist.Put.offset,
1668 st->Ist.Put.data,
1669 NULL /* shadow atom */ );
njn25e49d8e72002-09-23 09:36:25 +00001670 break;
1671
sewardj95448072004-11-22 20:19:51 +00001672 case Ist_PutI:
1673 do_shadow_PUTI( &mce,
1674 st->Ist.PutI.descr,
1675 st->Ist.PutI.ix,
1676 st->Ist.PutI.bias,
1677 st->Ist.PutI.data );
njn25e49d8e72002-09-23 09:36:25 +00001678 break;
1679
sewardj95448072004-11-22 20:19:51 +00001680 case Ist_STle:
1681 do_shadow_STle( &mce, st->Ist.STle.addr, 0/* addr bias */,
1682 st->Ist.STle.data,
1683 NULL /* shadow data */ );
njn25e49d8e72002-09-23 09:36:25 +00001684 break;
1685
sewardj95448072004-11-22 20:19:51 +00001686 case Ist_Exit:
1687 /* if (!hasBogusLiterals) */
1688 complainIfUndefined( &mce, st->Ist.Exit.guard );
njn25e49d8e72002-09-23 09:36:25 +00001689 break;
1690
sewardj95448072004-11-22 20:19:51 +00001691 case Ist_Dirty:
1692 do_shadow_Dirty( &mce, st->Ist.Dirty.details );
njn25e49d8e72002-09-23 09:36:25 +00001693 break;
1694
1695 default:
sewardj95448072004-11-22 20:19:51 +00001696 VG_(printf)("\n");
1697 ppIRStmt(st);
1698 VG_(printf)("\n");
1699 VG_(tool_panic)("memcheck: unhandled IRStmt");
1700
1701 } /* switch (st->tag) */
1702
1703 if (verboze) {
1704 for (j = first_stmt; j < bb->stmts_used; j++) {
1705 VG_(printf)(" ");
1706 ppIRStmt(bb->stmts[j]);
1707 VG_(printf)("\n");
1708 }
1709 VG_(printf)("\n");
njn25e49d8e72002-09-23 09:36:25 +00001710 }
sewardj95448072004-11-22 20:19:51 +00001711
1712 addStmtToIRBB(bb, st);
1713
njn25e49d8e72002-09-23 09:36:25 +00001714 }
njn25e49d8e72002-09-23 09:36:25 +00001715
sewardj95448072004-11-22 20:19:51 +00001716 /* Now we need to complain if the jump target is undefined. */
1717 first_stmt = bb->stmts_used;
njn25e49d8e72002-09-23 09:36:25 +00001718
sewardj95448072004-11-22 20:19:51 +00001719 if (verboze) {
1720 VG_(printf)("bb->next = ");
1721 ppIRExpr(bb->next);
1722 VG_(printf)("\n\n");
1723 }
njn25e49d8e72002-09-23 09:36:25 +00001724
sewardj95448072004-11-22 20:19:51 +00001725 complainIfUndefined( &mce, bb->next );
njn25e49d8e72002-09-23 09:36:25 +00001726
sewardj95448072004-11-22 20:19:51 +00001727 if (verboze) {
1728 for (j = first_stmt; j < bb->stmts_used; j++) {
1729 VG_(printf)(" ");
1730 ppIRStmt(bb->stmts[j]);
1731 VG_(printf)("\n");
njn25e49d8e72002-09-23 09:36:25 +00001732 }
sewardj95448072004-11-22 20:19:51 +00001733 VG_(printf)("\n");
njn25e49d8e72002-09-23 09:36:25 +00001734 }
njn25e49d8e72002-09-23 09:36:25 +00001735
sewardj95448072004-11-22 20:19:51 +00001736 return bb;
1737}
njn25e49d8e72002-09-23 09:36:25 +00001738
1739/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001740/*--- end mc_translate.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001741/*--------------------------------------------------------------------*/