blob: 7b49c840ea88e65b7e303329c33c49826e73b164 [file] [log] [blame]
Chris Lattner80f43d32010-01-04 07:53:58 +00001//===- InstCombineCasts.cpp -----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for cast operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
15#include "llvm/Target/TargetData.h"
16#include "llvm/Support/PatternMatch.h"
17using namespace llvm;
18using namespace PatternMatch;
19
Chris Lattnerf3d1b5d2010-01-04 07:59:07 +000020/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
21/// expression. If so, decompose it, returning some value X, such that Val is
22/// X*Scale+Offset.
23///
24static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
25 int &Offset) {
Benjamin Kramer11acaa32010-01-05 20:07:06 +000026 assert(Val->getType()->isInteger(32) && "Unexpected allocation size type!");
Chris Lattnerf3d1b5d2010-01-04 07:59:07 +000027 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
28 Offset = CI->getZExtValue();
29 Scale = 0;
30 return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
Chris Lattnerf86d7992010-01-05 20:57:30 +000031 }
32
33 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
Chris Lattnerf3d1b5d2010-01-04 07:59:07 +000034 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
35 if (I->getOpcode() == Instruction::Shl) {
36 // This is a value scaled by '1 << the shift amt'.
37 Scale = 1U << RHS->getZExtValue();
38 Offset = 0;
39 return I->getOperand(0);
Chris Lattnerf86d7992010-01-05 20:57:30 +000040 }
41
42 if (I->getOpcode() == Instruction::Mul) {
Chris Lattnerf3d1b5d2010-01-04 07:59:07 +000043 // This value is scaled by 'RHS'.
44 Scale = RHS->getZExtValue();
45 Offset = 0;
46 return I->getOperand(0);
Chris Lattnerf86d7992010-01-05 20:57:30 +000047 }
48
49 if (I->getOpcode() == Instruction::Add) {
Chris Lattnerf3d1b5d2010-01-04 07:59:07 +000050 // We have X+C. Check to see if we really have (X*C2)+C1,
51 // where C1 is divisible by C2.
52 unsigned SubScale;
53 Value *SubVal =
54 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
55 Offset += RHS->getZExtValue();
56 Scale = SubScale;
57 return SubVal;
58 }
59 }
60 }
61
62 // Otherwise, we can't look past this.
63 Scale = 1;
64 Offset = 0;
65 return Val;
66}
67
68/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
69/// try to eliminate the cast by moving the type information into the alloc.
70Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
71 AllocaInst &AI) {
72 // This requires TargetData to get the alloca alignment and size information.
73 if (!TD) return 0;
74
75 const PointerType *PTy = cast<PointerType>(CI.getType());
76
77 BuilderTy AllocaBuilder(*Builder);
78 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
79
80 // Get the type really allocated and the type casted to.
81 const Type *AllocElTy = AI.getAllocatedType();
82 const Type *CastElTy = PTy->getElementType();
83 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
84
85 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
86 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
87 if (CastElTyAlign < AllocElTyAlign) return 0;
88
89 // If the allocation has multiple uses, only promote it if we are strictly
90 // increasing the alignment of the resultant allocation. If we keep it the
91 // same, we open the door to infinite loops of various kinds. (A reference
92 // from a dbg.declare doesn't count as a use for this purpose.)
93 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
94 CastElTyAlign == AllocElTyAlign) return 0;
95
96 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
97 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
98 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
99
100 // See if we can satisfy the modulus by pulling a scale out of the array
101 // size argument.
102 unsigned ArraySizeScale;
103 int ArrayOffset;
104 Value *NumElements = // See if the array size is a decomposable linear expr.
105 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
106
107 // If we can now satisfy the modulus, by using a non-1 scale, we really can
108 // do the xform.
109 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
110 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
111
112 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
113 Value *Amt = 0;
114 if (Scale == 1) {
115 Amt = NumElements;
116 } else {
117 Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
118 // Insert before the alloca, not before the cast.
119 Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
120 }
121
122 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
123 Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
124 Offset, true);
125 Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
126 }
127
128 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
129 New->setAlignment(AI.getAlignment());
130 New->takeName(&AI);
131
132 // If the allocation has one real use plus a dbg.declare, just remove the
133 // declare.
134 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
135 EraseInstFromFunction(*(Instruction*)DI);
136 }
137 // If the allocation has multiple real uses, insert a cast and change all
138 // things that used it to use the new cast. This will also hack on CI, but it
139 // will die soon.
140 else if (!AI.hasOneUse()) {
141 // New is the allocation instruction, pointer typed. AI is the original
142 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
143 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
144 AI.replaceAllUsesWith(NewCast);
145 }
146 return ReplaceInstUsesWith(CI, New);
147}
148
149
Chris Lattner5f0290e2010-01-04 07:54:59 +0000150/// CanEvaluateInDifferentType - Return true if we can take the specified value
151/// and return it as type Ty without inserting any new casts and without
152/// changing the computed value. This is used by code that tries to decide
153/// whether promoting or shrinking integer operations to wider or smaller types
154/// will allow us to eliminate a truncate or extend.
155///
156/// This is a truncation operation if Ty is smaller than V->getType(), or an
157/// extension operation if Ty is larger.
158///
159/// If CastOpc is a truncation, then Ty will be a type smaller than V. We
160/// should return true if trunc(V) can be computed by computing V in the smaller
161/// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
162/// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
163/// efficiently truncated.
164///
165/// If CastOpc is a sext or zext, we are asking if the low bits of the value can
166/// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
167/// the final result.
168bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
169 unsigned CastOpc,
170 int &NumCastsRemoved){
171 // We can always evaluate constants in another type.
172 if (isa<Constant>(V))
173 return true;
174
175 Instruction *I = dyn_cast<Instruction>(V);
176 if (!I) return false;
177
178 const Type *OrigTy = V->getType();
179
180 // If this is an extension or truncate, we can often eliminate it.
181 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
182 // If this is a cast from the destination type, we can trivially eliminate
183 // it, and this will remove a cast overall.
184 if (I->getOperand(0)->getType() == Ty) {
185 // If the first operand is itself a cast, and is eliminable, do not count
186 // this as an eliminable cast. We would prefer to eliminate those two
187 // casts first.
188 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
189 ++NumCastsRemoved;
190 return true;
191 }
192 }
193
194 // We can't extend or shrink something that has multiple uses: doing so would
195 // require duplicating the instruction in general, which isn't profitable.
196 if (!I->hasOneUse()) return false;
197
198 unsigned Opc = I->getOpcode();
199 switch (Opc) {
200 case Instruction::Add:
201 case Instruction::Sub:
202 case Instruction::Mul:
203 case Instruction::And:
204 case Instruction::Or:
205 case Instruction::Xor:
206 // These operators can all arbitrarily be extended or truncated.
207 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
208 NumCastsRemoved) &&
209 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
210 NumCastsRemoved);
211
212 case Instruction::UDiv:
213 case Instruction::URem: {
214 // UDiv and URem can be truncated if all the truncated bits are zero.
215 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
216 uint32_t BitWidth = Ty->getScalarSizeInBits();
217 if (BitWidth < OrigBitWidth) {
218 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
219 if (MaskedValueIsZero(I->getOperand(0), Mask) &&
220 MaskedValueIsZero(I->getOperand(1), Mask)) {
221 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
222 NumCastsRemoved) &&
223 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
224 NumCastsRemoved);
225 }
226 }
227 break;
228 }
229 case Instruction::Shl:
230 // If we are truncating the result of this SHL, and if it's a shift of a
231 // constant amount, we can always perform a SHL in a smaller type.
232 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
233 uint32_t BitWidth = Ty->getScalarSizeInBits();
234 if (BitWidth < OrigTy->getScalarSizeInBits() &&
235 CI->getLimitedValue(BitWidth) < BitWidth)
236 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
237 NumCastsRemoved);
238 }
239 break;
240 case Instruction::LShr:
241 // If this is a truncate of a logical shr, we can truncate it to a smaller
242 // lshr iff we know that the bits we would otherwise be shifting in are
243 // already zeros.
244 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
245 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
246 uint32_t BitWidth = Ty->getScalarSizeInBits();
247 if (BitWidth < OrigBitWidth &&
248 MaskedValueIsZero(I->getOperand(0),
249 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
250 CI->getLimitedValue(BitWidth) < BitWidth) {
251 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
252 NumCastsRemoved);
253 }
254 }
255 break;
256 case Instruction::ZExt:
257 case Instruction::SExt:
258 case Instruction::Trunc:
259 // If this is the same kind of case as our original (e.g. zext+zext), we
260 // can safely replace it. Note that replacing it does not reduce the number
261 // of casts in the input.
262 if (Opc == CastOpc)
263 return true;
264
265 // sext (zext ty1), ty2 -> zext ty2
266 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
267 return true;
268 break;
269 case Instruction::Select: {
270 SelectInst *SI = cast<SelectInst>(I);
271 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
272 NumCastsRemoved) &&
273 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
274 NumCastsRemoved);
275 }
276 case Instruction::PHI: {
277 // We can change a phi if we can change all operands.
278 PHINode *PN = cast<PHINode>(I);
279 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
280 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
281 NumCastsRemoved))
282 return false;
283 return true;
284 }
285 default:
286 // TODO: Can handle more cases here.
287 break;
288 }
289
290 return false;
291}
292
293/// EvaluateInDifferentType - Given an expression that
294/// CanEvaluateInDifferentType returns true for, actually insert the code to
295/// evaluate the expression.
296Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
297 bool isSigned) {
298 if (Constant *C = dyn_cast<Constant>(V))
299 return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
300
301 // Otherwise, it must be an instruction.
302 Instruction *I = cast<Instruction>(V);
303 Instruction *Res = 0;
304 unsigned Opc = I->getOpcode();
305 switch (Opc) {
306 case Instruction::Add:
307 case Instruction::Sub:
308 case Instruction::Mul:
309 case Instruction::And:
310 case Instruction::Or:
311 case Instruction::Xor:
312 case Instruction::AShr:
313 case Instruction::LShr:
314 case Instruction::Shl:
315 case Instruction::UDiv:
316 case Instruction::URem: {
317 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
318 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
319 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
320 break;
321 }
322 case Instruction::Trunc:
323 case Instruction::ZExt:
324 case Instruction::SExt:
325 // If the source type of the cast is the type we're trying for then we can
326 // just return the source. There's no need to insert it because it is not
327 // new.
328 if (I->getOperand(0)->getType() == Ty)
329 return I->getOperand(0);
330
331 // Otherwise, must be the same type of cast, so just reinsert a new one.
332 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),Ty);
333 break;
334 case Instruction::Select: {
335 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
336 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
337 Res = SelectInst::Create(I->getOperand(0), True, False);
338 break;
339 }
340 case Instruction::PHI: {
341 PHINode *OPN = cast<PHINode>(I);
342 PHINode *NPN = PHINode::Create(Ty);
343 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
344 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
345 NPN->addIncoming(V, OPN->getIncomingBlock(i));
346 }
347 Res = NPN;
348 break;
349 }
350 default:
351 // TODO: Can handle more cases here.
352 llvm_unreachable("Unreachable!");
353 break;
354 }
355
356 Res->takeName(I);
357 return InsertNewInstBefore(Res, *I);
358}
Chris Lattner80f43d32010-01-04 07:53:58 +0000359
360
361/// This function is a wrapper around CastInst::isEliminableCastPair. It
362/// simply extracts arguments and returns what that function returns.
363static Instruction::CastOps
364isEliminableCastPair(
365 const CastInst *CI, ///< The first cast instruction
366 unsigned opcode, ///< The opcode of the second cast instruction
367 const Type *DstTy, ///< The target type for the second cast instruction
368 TargetData *TD ///< The target data for pointer size
369) {
370
371 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
372 const Type *MidTy = CI->getType(); // B from above
373
374 // Get the opcodes of the two Cast instructions
375 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
376 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
377
378 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
379 DstTy,
380 TD ? TD->getIntPtrType(CI->getContext()) : 0);
381
382 // We don't want to form an inttoptr or ptrtoint that converts to an integer
383 // type that differs from the pointer size.
384 if ((Res == Instruction::IntToPtr &&
385 (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
386 (Res == Instruction::PtrToInt &&
387 (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
388 Res = 0;
389
390 return Instruction::CastOps(Res);
391}
392
393/// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
394/// in any code being generated. It does not require codegen if V is simple
395/// enough or if the cast can be folded into other casts.
396bool InstCombiner::ValueRequiresCast(Instruction::CastOps opcode,const Value *V,
397 const Type *Ty) {
398 if (V->getType() == Ty || isa<Constant>(V)) return false;
399
400 // If this is another cast that can be eliminated, it isn't codegen either.
401 if (const CastInst *CI = dyn_cast<CastInst>(V))
402 if (isEliminableCastPair(CI, opcode, Ty, TD))
403 return false;
404 return true;
405}
406
407
408/// @brief Implement the transforms common to all CastInst visitors.
409Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
410 Value *Src = CI.getOperand(0);
411
412 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
413 // eliminate it now.
414 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
415 if (Instruction::CastOps opc =
416 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
417 // The first cast (CSrc) is eliminable so we need to fix up or replace
418 // the second cast (CI). CSrc will then have a good chance of being dead.
419 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
420 }
421 }
422
423 // If we are casting a select then fold the cast into the select
424 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
425 if (Instruction *NV = FoldOpIntoSelect(CI, SI))
426 return NV;
427
428 // If we are casting a PHI then fold the cast into the PHI
429 if (isa<PHINode>(Src)) {
430 // We don't do this if this would create a PHI node with an illegal type if
431 // it is currently legal.
432 if (!isa<IntegerType>(Src->getType()) ||
433 !isa<IntegerType>(CI.getType()) ||
434 ShouldChangeType(CI.getType(), Src->getType()))
435 if (Instruction *NV = FoldOpIntoPhi(CI))
436 return NV;
437 }
438
439 return 0;
440}
441
442/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
443Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
444 Value *Src = CI.getOperand(0);
445
446 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
447 // If casting the result of a getelementptr instruction with no offset, turn
448 // this into a cast of the original pointer!
449 if (GEP->hasAllZeroIndices()) {
450 // Changing the cast operand is usually not a good idea but it is safe
451 // here because the pointer operand is being replaced with another
452 // pointer operand so the opcode doesn't need to change.
453 Worklist.Add(GEP);
454 CI.setOperand(0, GEP->getOperand(0));
455 return &CI;
456 }
457
458 // If the GEP has a single use, and the base pointer is a bitcast, and the
459 // GEP computes a constant offset, see if we can convert these three
460 // instructions into fewer. This typically happens with unions and other
461 // non-type-safe code.
462 if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
463 if (GEP->hasAllConstantIndices()) {
464 // We are guaranteed to get a constant from EmitGEPOffset.
465 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP));
466 int64_t Offset = OffsetV->getSExtValue();
467
468 // Get the base pointer input of the bitcast, and the type it points to.
469 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
470 const Type *GEPIdxTy =
471 cast<PointerType>(OrigBase->getType())->getElementType();
472 SmallVector<Value*, 8> NewIndices;
473 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
474 // If we were able to index down into an element, create the GEP
475 // and bitcast the result. This eliminates one bitcast, potentially
476 // two.
477 Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
478 Builder->CreateInBoundsGEP(OrigBase,
479 NewIndices.begin(), NewIndices.end()) :
480 Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
481 NGEP->takeName(GEP);
482
483 if (isa<BitCastInst>(CI))
484 return new BitCastInst(NGEP, CI.getType());
485 assert(isa<PtrToIntInst>(CI));
486 return new PtrToIntInst(NGEP, CI.getType());
487 }
488 }
489 }
490 }
491
492 return commonCastTransforms(CI);
493}
494
495/// commonIntCastTransforms - This function implements the common transforms
496/// for trunc, zext, and sext.
497Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
498 if (Instruction *Result = commonCastTransforms(CI))
499 return Result;
500
Chris Lattner80f43d32010-01-04 07:53:58 +0000501 // See if we can simplify any instructions used by the LHS whose sole
502 // purpose is to compute bits we don't care about.
503 if (SimplifyDemandedInstructionBits(CI))
504 return &CI;
Chris Lattner274ad682010-01-05 22:07:33 +0000505
Chris Lattner80f43d32010-01-04 07:53:58 +0000506 // If the source isn't an instruction or has more than one use then we
507 // can't do anything more.
Chris Lattner274ad682010-01-05 22:07:33 +0000508 Instruction *Src = dyn_cast<Instruction>(CI.getOperand(0));
509 if (!Src || !Src->hasOneUse())
Chris Lattner80f43d32010-01-04 07:53:58 +0000510 return 0;
Chris Lattner274ad682010-01-05 22:07:33 +0000511
512 const Type *SrcTy = Src->getType();
513 const Type *DestTy = CI.getType();
514 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
515 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
Chris Lattner80f43d32010-01-04 07:53:58 +0000516
517 // Attempt to propagate the cast into the instruction for int->int casts.
518 int NumCastsRemoved = 0;
519 // Only do this if the dest type is a simple type, don't convert the
520 // expression tree to something weird like i93 unless the source is also
521 // strange.
522 if ((isa<VectorType>(DestTy) ||
Chris Lattner274ad682010-01-05 22:07:33 +0000523 ShouldChangeType(Src->getType(), DestTy)) &&
524 CanEvaluateInDifferentType(Src, DestTy,
Chris Lattner80f43d32010-01-04 07:53:58 +0000525 CI.getOpcode(), NumCastsRemoved)) {
526 // If this cast is a truncate, evaluting in a different type always
527 // eliminates the cast, so it is always a win. If this is a zero-extension,
528 // we need to do an AND to maintain the clear top-part of the computation,
529 // so we require that the input have eliminated at least one cast. If this
530 // is a sign extension, we insert two new casts (to do the extension) so we
531 // require that two casts have been eliminated.
532 bool DoXForm = false;
533 bool JustReplace = false;
534 switch (CI.getOpcode()) {
535 default:
536 // All the others use floating point so we shouldn't actually
537 // get here because of the check above.
538 llvm_unreachable("Unknown cast type");
539 case Instruction::Trunc:
540 DoXForm = true;
541 break;
542 case Instruction::ZExt: {
543 DoXForm = NumCastsRemoved >= 1;
544
545 if (!DoXForm && 0) {
546 // If it's unnecessary to issue an AND to clear the high bits, it's
547 // always profitable to do this xform.
Chris Lattner274ad682010-01-05 22:07:33 +0000548 Value *TryRes = EvaluateInDifferentType(Src, DestTy, false);
Chris Lattner80f43d32010-01-04 07:53:58 +0000549 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
550 if (MaskedValueIsZero(TryRes, Mask))
551 return ReplaceInstUsesWith(CI, TryRes);
552
553 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
554 if (TryI->use_empty())
555 EraseInstFromFunction(*TryI);
556 }
557 break;
558 }
559 case Instruction::SExt: {
560 DoXForm = NumCastsRemoved >= 2;
Chris Lattner274ad682010-01-05 22:07:33 +0000561 if (!DoXForm && !isa<TruncInst>(Src) && 0) {
Chris Lattner80f43d32010-01-04 07:53:58 +0000562 // If we do not have to emit the truncate + sext pair, then it's always
563 // profitable to do this xform.
564 //
565 // It's not safe to eliminate the trunc + sext pair if one of the
566 // eliminated cast is a truncate. e.g.
567 // t2 = trunc i32 t1 to i16
568 // t3 = sext i16 t2 to i32
569 // !=
570 // i32 t1
Chris Lattner274ad682010-01-05 22:07:33 +0000571 Value *TryRes = EvaluateInDifferentType(Src, DestTy, true);
Chris Lattner80f43d32010-01-04 07:53:58 +0000572 unsigned NumSignBits = ComputeNumSignBits(TryRes);
573 if (NumSignBits > (DestBitSize - SrcBitSize))
574 return ReplaceInstUsesWith(CI, TryRes);
575
576 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
577 if (TryI->use_empty())
578 EraseInstFromFunction(*TryI);
579 }
580 break;
581 }
582 }
583
584 if (DoXForm) {
585 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
586 " to avoid cast: " << CI);
Chris Lattner274ad682010-01-05 22:07:33 +0000587 Value *Res = EvaluateInDifferentType(Src, DestTy,
Chris Lattner80f43d32010-01-04 07:53:58 +0000588 CI.getOpcode() == Instruction::SExt);
589 if (JustReplace)
590 // Just replace this cast with the result.
591 return ReplaceInstUsesWith(CI, Res);
592
593 assert(Res->getType() == DestTy);
594 switch (CI.getOpcode()) {
595 default: llvm_unreachable("Unknown cast type!");
596 case Instruction::Trunc:
597 // Just replace this cast with the result.
598 return ReplaceInstUsesWith(CI, Res);
599 case Instruction::ZExt: {
600 assert(SrcBitSize < DestBitSize && "Not a zext?");
601
602 // If the high bits are already zero, just replace this cast with the
603 // result.
604 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
605 if (MaskedValueIsZero(Res, Mask))
606 return ReplaceInstUsesWith(CI, Res);
607
608 // We need to emit an AND to clear the high bits.
609 Constant *C = ConstantInt::get(CI.getContext(),
610 APInt::getLowBitsSet(DestBitSize, SrcBitSize));
611 return BinaryOperator::CreateAnd(Res, C);
612 }
613 case Instruction::SExt: {
614 // If the high bits are already filled with sign bit, just replace this
615 // cast with the result.
616 unsigned NumSignBits = ComputeNumSignBits(Res);
617 if (NumSignBits > (DestBitSize - SrcBitSize))
618 return ReplaceInstUsesWith(CI, Res);
619
620 // We need to emit a cast to truncate, then a cast to sext.
621 return new SExtInst(Builder->CreateTrunc(Res, Src->getType()), DestTy);
622 }
623 }
624 }
625 }
626
Chris Lattner80f43d32010-01-04 07:53:58 +0000627 return 0;
628}
629
Chris Lattner80f43d32010-01-04 07:53:58 +0000630Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
631 if (Instruction *Result = commonIntCastTransforms(CI))
632 return Result;
633
634 Value *Src = CI.getOperand(0);
Chris Lattner49bdfef2010-01-05 21:11:17 +0000635 const Type *DestTy = CI.getType();
Chris Lattner80f43d32010-01-04 07:53:58 +0000636
637 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
Chris Lattner274ad682010-01-05 22:07:33 +0000638 if (DestTy->isInteger(1)) {
Chris Lattner80f43d32010-01-04 07:53:58 +0000639 Constant *One = ConstantInt::get(Src->getType(), 1);
640 Src = Builder->CreateAnd(Src, One, "tmp");
641 Value *Zero = Constant::getNullValue(Src->getType());
642 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
643 }
644
Chris Lattner80f43d32010-01-04 07:53:58 +0000645 return 0;
646}
647
648/// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
649/// in order to eliminate the icmp.
650Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
651 bool DoXform) {
652 // If we are just checking for a icmp eq of a single bit and zext'ing it
653 // to an integer, then shift the bit to the appropriate place and then
654 // cast to integer to avoid the comparison.
655 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
656 const APInt &Op1CV = Op1C->getValue();
657
658 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
659 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
660 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
661 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
662 if (!DoXform) return ICI;
663
664 Value *In = ICI->getOperand(0);
665 Value *Sh = ConstantInt::get(In->getType(),
666 In->getType()->getScalarSizeInBits()-1);
667 In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
668 if (In->getType() != CI.getType())
669 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
670
671 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
672 Constant *One = ConstantInt::get(In->getType(), 1);
673 In = Builder->CreateXor(In, One, In->getName()+".not");
674 }
675
676 return ReplaceInstUsesWith(CI, In);
677 }
678
679
680
681 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
682 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
683 // zext (X == 1) to i32 --> X iff X has only the low bit set.
684 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
685 // zext (X != 0) to i32 --> X iff X has only the low bit set.
686 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
687 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
688 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
689 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
690 // This only works for EQ and NE
691 ICI->isEquality()) {
692 // If Op1C some other power of two, convert:
693 uint32_t BitWidth = Op1C->getType()->getBitWidth();
694 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
695 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
696 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
697
698 APInt KnownZeroMask(~KnownZero);
699 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
700 if (!DoXform) return ICI;
701
702 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
703 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
704 // (X&4) == 2 --> false
705 // (X&4) != 2 --> true
706 Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()),
707 isNE);
708 Res = ConstantExpr::getZExt(Res, CI.getType());
709 return ReplaceInstUsesWith(CI, Res);
710 }
711
712 uint32_t ShiftAmt = KnownZeroMask.logBase2();
713 Value *In = ICI->getOperand(0);
714 if (ShiftAmt) {
715 // Perform a logical shr by shiftamt.
716 // Insert the shift to put the result in the low bit.
717 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
718 In->getName()+".lobit");
719 }
720
721 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
722 Constant *One = ConstantInt::get(In->getType(), 1);
723 In = Builder->CreateXor(In, One, "tmp");
724 }
725
726 if (CI.getType() == In->getType())
727 return ReplaceInstUsesWith(CI, In);
728 else
729 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
730 }
731 }
732 }
733
734 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
735 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
736 // may lead to additional simplifications.
737 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
738 if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
739 uint32_t BitWidth = ITy->getBitWidth();
740 Value *LHS = ICI->getOperand(0);
741 Value *RHS = ICI->getOperand(1);
742
743 APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
744 APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
745 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
746 ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
747 ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
748
749 if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
750 APInt KnownBits = KnownZeroLHS | KnownOneLHS;
751 APInt UnknownBit = ~KnownBits;
752 if (UnknownBit.countPopulation() == 1) {
753 if (!DoXform) return ICI;
754
755 Value *Result = Builder->CreateXor(LHS, RHS);
756
757 // Mask off any bits that are set and won't be shifted away.
758 if (KnownOneLHS.uge(UnknownBit))
759 Result = Builder->CreateAnd(Result,
760 ConstantInt::get(ITy, UnknownBit));
761
762 // Shift the bit we're testing down to the lsb.
763 Result = Builder->CreateLShr(
764 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
765
766 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
767 Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
768 Result->takeName(ICI);
769 return ReplaceInstUsesWith(CI, Result);
770 }
771 }
772 }
773 }
774
775 return 0;
776}
777
778Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
779 // If one of the common conversion will work, do it.
780 if (Instruction *Result = commonIntCastTransforms(CI))
781 return Result;
782
783 Value *Src = CI.getOperand(0);
784
785 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
786 // types and if the sizes are just right we can convert this into a logical
787 // 'and' which will be much cheaper than the pair of casts.
788 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
789 // Get the sizes of the types involved. We know that the intermediate type
790 // will be smaller than A or C, but don't know the relation between A and C.
791 Value *A = CSrc->getOperand(0);
792 unsigned SrcSize = A->getType()->getScalarSizeInBits();
793 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
794 unsigned DstSize = CI.getType()->getScalarSizeInBits();
795 // If we're actually extending zero bits, then if
796 // SrcSize < DstSize: zext(a & mask)
797 // SrcSize == DstSize: a & mask
798 // SrcSize > DstSize: trunc(a) & mask
799 if (SrcSize < DstSize) {
800 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
801 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
802 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
803 return new ZExtInst(And, CI.getType());
804 }
805
806 if (SrcSize == DstSize) {
807 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
808 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
809 AndValue));
810 }
811 if (SrcSize > DstSize) {
812 Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
813 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
814 return BinaryOperator::CreateAnd(Trunc,
815 ConstantInt::get(Trunc->getType(),
816 AndValue));
817 }
818 }
819
820 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
821 return transformZExtICmp(ICI, CI);
822
823 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
824 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
825 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
826 // of the (zext icmp) will be transformed.
827 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
828 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
829 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
830 (transformZExtICmp(LHS, CI, false) ||
831 transformZExtICmp(RHS, CI, false))) {
832 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
833 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
834 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
835 }
836 }
837
838 // zext(trunc(t) & C) -> (t & zext(C)).
839 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
840 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
841 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
842 Value *TI0 = TI->getOperand(0);
843 if (TI0->getType() == CI.getType())
844 return
845 BinaryOperator::CreateAnd(TI0,
846 ConstantExpr::getZExt(C, CI.getType()));
847 }
848
849 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
850 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
851 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
852 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
853 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
854 And->getOperand(1) == C)
855 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
856 Value *TI0 = TI->getOperand(0);
857 if (TI0->getType() == CI.getType()) {
858 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
859 Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
860 return BinaryOperator::CreateXor(NewAnd, ZC);
861 }
862 }
863
Chris Lattner718bf3f2010-01-05 21:04:47 +0000864 // zext (xor i1 X, true) to i32 --> xor (zext i1 X to i32), 1
865 Value *X;
Chris Lattner49bdfef2010-01-05 21:11:17 +0000866 if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isInteger(1) &&
867 match(SrcI, m_Not(m_Value(X))) &&
Chris Lattner718bf3f2010-01-05 21:04:47 +0000868 (!X->hasOneUse() || !isa<CmpInst>(X))) {
869 Value *New = Builder->CreateZExt(X, CI.getType());
870 return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
871 }
872
Chris Lattner80f43d32010-01-04 07:53:58 +0000873 return 0;
874}
875
876Instruction *InstCombiner::visitSExt(SExtInst &CI) {
877 if (Instruction *I = commonIntCastTransforms(CI))
878 return I;
879
880 Value *Src = CI.getOperand(0);
881
882 // Canonicalize sign-extend from i1 to a select.
Benjamin Kramer11acaa32010-01-05 20:07:06 +0000883 if (Src->getType()->isInteger(1))
Chris Lattner80f43d32010-01-04 07:53:58 +0000884 return SelectInst::Create(Src,
885 Constant::getAllOnesValue(CI.getType()),
886 Constant::getNullValue(CI.getType()));
887
888 // See if the value being truncated is already sign extended. If so, just
889 // eliminate the trunc/sext pair.
890 if (Operator::getOpcode(Src) == Instruction::Trunc) {
891 Value *Op = cast<User>(Src)->getOperand(0);
892 unsigned OpBits = Op->getType()->getScalarSizeInBits();
893 unsigned MidBits = Src->getType()->getScalarSizeInBits();
894 unsigned DestBits = CI.getType()->getScalarSizeInBits();
895 unsigned NumSignBits = ComputeNumSignBits(Op);
896
897 if (OpBits == DestBits) {
898 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
899 // bits, it is already ready.
900 if (NumSignBits > DestBits-MidBits)
901 return ReplaceInstUsesWith(CI, Op);
902 } else if (OpBits < DestBits) {
903 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
904 // bits, just sext from i32.
905 if (NumSignBits > OpBits-MidBits)
906 return new SExtInst(Op, CI.getType(), "tmp");
907 } else {
908 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
909 // bits, just truncate to i32.
910 if (NumSignBits > OpBits-MidBits)
911 return new TruncInst(Op, CI.getType(), "tmp");
912 }
913 }
914
915 // If the input is a shl/ashr pair of a same constant, then this is a sign
916 // extension from a smaller value. If we could trust arbitrary bitwidth
917 // integers, we could turn this into a truncate to the smaller bit and then
918 // use a sext for the whole extension. Since we don't, look deeper and check
919 // for a truncate. If the source and dest are the same type, eliminate the
920 // trunc and extend and just do shifts. For example, turn:
921 // %a = trunc i32 %i to i8
922 // %b = shl i8 %a, 6
923 // %c = ashr i8 %b, 6
924 // %d = sext i8 %c to i32
925 // into:
926 // %a = shl i32 %i, 30
927 // %d = ashr i32 %a, 30
928 Value *A = 0;
929 ConstantInt *BA = 0, *CA = 0;
930 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
931 m_ConstantInt(CA))) &&
932 BA == CA && isa<TruncInst>(A)) {
933 Value *I = cast<TruncInst>(A)->getOperand(0);
934 if (I->getType() == CI.getType()) {
935 unsigned MidSize = Src->getType()->getScalarSizeInBits();
936 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
937 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
938 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
939 I = Builder->CreateShl(I, ShAmtV, CI.getName());
940 return BinaryOperator::CreateAShr(I, ShAmtV);
941 }
942 }
943
944 return 0;
945}
946
947
948/// FitsInFPType - Return a Constant* for the specified FP constant if it fits
949/// in the specified FP type without changing its value.
950static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
951 bool losesInfo;
952 APFloat F = CFP->getValueAPF();
953 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
954 if (!losesInfo)
955 return ConstantFP::get(CFP->getContext(), F);
956 return 0;
957}
958
959/// LookThroughFPExtensions - If this is an fp extension instruction, look
960/// through it until we get the source value.
961static Value *LookThroughFPExtensions(Value *V) {
962 if (Instruction *I = dyn_cast<Instruction>(V))
963 if (I->getOpcode() == Instruction::FPExt)
964 return LookThroughFPExtensions(I->getOperand(0));
965
966 // If this value is a constant, return the constant in the smallest FP type
967 // that can accurately represent it. This allows us to turn
968 // (float)((double)X+2.0) into x+2.0f.
969 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
970 if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
971 return V; // No constant folding of this.
972 // See if the value can be truncated to float and then reextended.
973 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle))
974 return V;
Benjamin Kramerf0127052010-01-05 13:12:22 +0000975 if (CFP->getType()->isDoubleTy())
Chris Lattner80f43d32010-01-04 07:53:58 +0000976 return V; // Won't shrink.
977 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble))
978 return V;
979 // Don't try to shrink to various long double types.
980 }
981
982 return V;
983}
984
985Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
986 if (Instruction *I = commonCastTransforms(CI))
987 return I;
988
989 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
990 // smaller than the destination type, we can eliminate the truncate by doing
991 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
992 // as many builtins (sqrt, etc).
993 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
994 if (OpI && OpI->hasOneUse()) {
995 switch (OpI->getOpcode()) {
996 default: break;
997 case Instruction::FAdd:
998 case Instruction::FSub:
999 case Instruction::FMul:
1000 case Instruction::FDiv:
1001 case Instruction::FRem:
1002 const Type *SrcTy = OpI->getType();
1003 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
1004 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
1005 if (LHSTrunc->getType() != SrcTy &&
1006 RHSTrunc->getType() != SrcTy) {
1007 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1008 // If the source types were both smaller than the destination type of
1009 // the cast, do this xform.
1010 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
1011 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
1012 LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
1013 RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
1014 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
1015 }
1016 }
1017 break;
1018 }
1019 }
1020 return 0;
1021}
1022
1023Instruction *InstCombiner::visitFPExt(CastInst &CI) {
1024 return commonCastTransforms(CI);
1025}
1026
1027Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
1028 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1029 if (OpI == 0)
1030 return commonCastTransforms(FI);
1031
1032 // fptoui(uitofp(X)) --> X
1033 // fptoui(sitofp(X)) --> X
1034 // This is safe if the intermediate type has enough bits in its mantissa to
1035 // accurately represent all values of X. For example, do not do this with
1036 // i64->float->i64. This is also safe for sitofp case, because any negative
1037 // 'X' value would cause an undefined result for the fptoui.
1038 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
1039 OpI->getOperand(0)->getType() == FI.getType() &&
1040 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
1041 OpI->getType()->getFPMantissaWidth())
1042 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
1043
1044 return commonCastTransforms(FI);
1045}
1046
1047Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
1048 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1049 if (OpI == 0)
1050 return commonCastTransforms(FI);
1051
1052 // fptosi(sitofp(X)) --> X
1053 // fptosi(uitofp(X)) --> X
1054 // This is safe if the intermediate type has enough bits in its mantissa to
1055 // accurately represent all values of X. For example, do not do this with
1056 // i64->float->i64. This is also safe for sitofp case, because any negative
1057 // 'X' value would cause an undefined result for the fptoui.
1058 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
1059 OpI->getOperand(0)->getType() == FI.getType() &&
1060 (int)FI.getType()->getScalarSizeInBits() <=
1061 OpI->getType()->getFPMantissaWidth())
1062 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
1063
1064 return commonCastTransforms(FI);
1065}
1066
1067Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
1068 return commonCastTransforms(CI);
1069}
1070
1071Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
1072 return commonCastTransforms(CI);
1073}
1074
1075Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
1076 // If the destination integer type is smaller than the intptr_t type for
1077 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
1078 // trunc to be exposed to other transforms. Don't do this for extending
1079 // ptrtoint's, because we don't know if the target sign or zero extends its
1080 // pointers.
1081 if (TD &&
1082 CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
1083 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
1084 TD->getIntPtrType(CI.getContext()),
1085 "tmp");
1086 return new TruncInst(P, CI.getType());
1087 }
1088
1089 return commonPointerCastTransforms(CI);
1090}
1091
1092
1093Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
1094 // If the source integer type is larger than the intptr_t type for
1095 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
1096 // allows the trunc to be exposed to other transforms. Don't do this for
1097 // extending inttoptr's, because we don't know if the target sign or zero
1098 // extends to pointers.
1099 if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
1100 TD->getPointerSizeInBits()) {
1101 Value *P = Builder->CreateTrunc(CI.getOperand(0),
1102 TD->getIntPtrType(CI.getContext()), "tmp");
1103 return new IntToPtrInst(P, CI.getType());
1104 }
1105
1106 if (Instruction *I = commonCastTransforms(CI))
1107 return I;
1108
1109 return 0;
1110}
1111
1112Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
1113 // If the operands are integer typed then apply the integer transforms,
1114 // otherwise just apply the common ones.
1115 Value *Src = CI.getOperand(0);
1116 const Type *SrcTy = Src->getType();
1117 const Type *DestTy = CI.getType();
1118
1119 if (isa<PointerType>(SrcTy)) {
1120 if (Instruction *I = commonPointerCastTransforms(CI))
1121 return I;
1122 } else {
1123 if (Instruction *Result = commonCastTransforms(CI))
1124 return Result;
1125 }
1126
1127
1128 // Get rid of casts from one type to the same type. These are useless and can
1129 // be replaced by the operand.
1130 if (DestTy == Src->getType())
1131 return ReplaceInstUsesWith(CI, Src);
1132
1133 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
1134 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
1135 const Type *DstElTy = DstPTy->getElementType();
1136 const Type *SrcElTy = SrcPTy->getElementType();
1137
1138 // If the address spaces don't match, don't eliminate the bitcast, which is
1139 // required for changing types.
1140 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
1141 return 0;
1142
1143 // If we are casting a alloca to a pointer to a type of the same
1144 // size, rewrite the allocation instruction to allocate the "right" type.
1145 // There is no need to modify malloc calls because it is their bitcast that
1146 // needs to be cleaned up.
1147 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
1148 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
1149 return V;
1150
1151 // If the source and destination are pointers, and this cast is equivalent
1152 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
1153 // This can enhance SROA and other transforms that want type-safe pointers.
1154 Constant *ZeroUInt =
1155 Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
1156 unsigned NumZeros = 0;
1157 while (SrcElTy != DstElTy &&
1158 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
1159 SrcElTy->getNumContainedTypes() /* not "{}" */) {
1160 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
1161 ++NumZeros;
1162 }
1163
1164 // If we found a path from the src to dest, create the getelementptr now.
1165 if (SrcElTy == DstElTy) {
1166 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
1167 return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(),"",
1168 ((Instruction*) NULL));
1169 }
1170 }
1171
1172 if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
1173 if (DestVTy->getNumElements() == 1) {
1174 if (!isa<VectorType>(SrcTy)) {
1175 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
1176 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
1177 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
1178 }
1179 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
1180 }
1181 }
1182
1183 if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
1184 if (SrcVTy->getNumElements() == 1) {
1185 if (!isa<VectorType>(DestTy)) {
1186 Value *Elem =
1187 Builder->CreateExtractElement(Src,
1188 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
1189 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
1190 }
1191 }
1192 }
1193
1194 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
1195 if (SVI->hasOneUse()) {
1196 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
1197 // a bitconvert to a vector with the same # elts.
1198 if (isa<VectorType>(DestTy) &&
1199 cast<VectorType>(DestTy)->getNumElements() ==
1200 SVI->getType()->getNumElements() &&
1201 SVI->getType()->getNumElements() ==
1202 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
1203 CastInst *Tmp;
1204 // If either of the operands is a cast from CI.getType(), then
1205 // evaluating the shuffle in the casted destination's type will allow
1206 // us to eliminate at least one cast.
1207 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
1208 Tmp->getOperand(0)->getType() == DestTy) ||
1209 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
1210 Tmp->getOperand(0)->getType() == DestTy)) {
1211 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
1212 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
1213 // Return a new shuffle vector. Use the same element ID's, as we
1214 // know the vector types match #elts.
1215 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
1216 }
1217 }
1218 }
1219 }
1220 return 0;
1221}