blob: 507e6f2a854d4069cd675b5a391ed28b81d18e4a [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// InstructionCombining - Combine instructions to form fewer, simple
11// instructions. This pass does not modify the CFG This pass is where algebraic
12// simplification happens.
13//
14// This pass combines things like:
15// %Y = add i32 %X, 1
16// %Z = add i32 %Y, 1
17// into:
18// %Z = add i32 %X, 2
19//
20// This is a simple worklist driven algorithm.
21//
22// This pass guarantees that the following canonicalizations are performed on
23// the program:
24// 1. If a binary operator has a constant operand, it is moved to the RHS
25// 2. Bitwise operators with constant operands are always grouped so that
26// shifts are performed first, then or's, then and's, then xor's.
27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28// 4. All cmp instructions on boolean values are replaced with logical ops
29// 5. add X, X is represented as (X*2) => (X << 1)
30// 6. Multiplies with a power-of-two constant argument are transformed into
31// shifts.
32// ... etc.
33//
34//===----------------------------------------------------------------------===//
35
36#define DEBUG_TYPE "instcombine"
37#include "llvm/Transforms/Scalar.h"
38#include "llvm/IntrinsicInst.h"
39#include "llvm/Pass.h"
40#include "llvm/DerivedTypes.h"
41#include "llvm/GlobalVariable.h"
42#include "llvm/Analysis/ConstantFolding.h"
43#include "llvm/Target/TargetData.h"
44#include "llvm/Transforms/Utils/BasicBlockUtils.h"
45#include "llvm/Transforms/Utils/Local.h"
46#include "llvm/Support/CallSite.h"
Nick Lewycky0185bbf2008-02-03 16:33:09 +000047#include "llvm/Support/ConstantRange.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000048#include "llvm/Support/Debug.h"
49#include "llvm/Support/GetElementPtrTypeIterator.h"
50#include "llvm/Support/InstVisitor.h"
51#include "llvm/Support/MathExtras.h"
52#include "llvm/Support/PatternMatch.h"
53#include "llvm/Support/Compiler.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/SmallVector.h"
56#include "llvm/ADT/SmallPtrSet.h"
57#include "llvm/ADT/Statistic.h"
58#include "llvm/ADT/STLExtras.h"
59#include <algorithm>
Edwin Töröka0e6fce2008-04-20 08:33:11 +000060#include <climits>
Dan Gohmanf17a25c2007-07-18 16:29:46 +000061#include <sstream>
62using namespace llvm;
63using namespace llvm::PatternMatch;
64
65STATISTIC(NumCombined , "Number of insts combined");
66STATISTIC(NumConstProp, "Number of constant folds");
67STATISTIC(NumDeadInst , "Number of dead inst eliminated");
68STATISTIC(NumDeadStore, "Number of dead stores eliminated");
69STATISTIC(NumSunkInst , "Number of instructions sunk");
70
71namespace {
72 class VISIBILITY_HIDDEN InstCombiner
73 : public FunctionPass,
74 public InstVisitor<InstCombiner, Instruction*> {
75 // Worklist of all of the instructions that need to be simplified.
76 std::vector<Instruction*> Worklist;
77 DenseMap<Instruction*, unsigned> WorklistMap;
78 TargetData *TD;
79 bool MustPreserveLCSSA;
80 public:
81 static char ID; // Pass identification, replacement for typeid
82 InstCombiner() : FunctionPass((intptr_t)&ID) {}
83
84 /// AddToWorkList - Add the specified instruction to the worklist if it
85 /// isn't already in it.
86 void AddToWorkList(Instruction *I) {
87 if (WorklistMap.insert(std::make_pair(I, Worklist.size())))
88 Worklist.push_back(I);
89 }
90
91 // RemoveFromWorkList - remove I from the worklist if it exists.
92 void RemoveFromWorkList(Instruction *I) {
93 DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
94 if (It == WorklistMap.end()) return; // Not in worklist.
95
96 // Don't bother moving everything down, just null out the slot.
97 Worklist[It->second] = 0;
98
99 WorklistMap.erase(It);
100 }
101
102 Instruction *RemoveOneFromWorkList() {
103 Instruction *I = Worklist.back();
104 Worklist.pop_back();
105 WorklistMap.erase(I);
106 return I;
107 }
108
109
110 /// AddUsersToWorkList - When an instruction is simplified, add all users of
111 /// the instruction to the work lists because they might get more simplified
112 /// now.
113 ///
114 void AddUsersToWorkList(Value &I) {
115 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
116 UI != UE; ++UI)
117 AddToWorkList(cast<Instruction>(*UI));
118 }
119
120 /// AddUsesToWorkList - When an instruction is simplified, add operands to
121 /// the work lists because they might get more simplified now.
122 ///
123 void AddUsesToWorkList(Instruction &I) {
124 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
125 if (Instruction *Op = dyn_cast<Instruction>(I.getOperand(i)))
126 AddToWorkList(Op);
127 }
128
129 /// AddSoonDeadInstToWorklist - The specified instruction is about to become
130 /// dead. Add all of its operands to the worklist, turning them into
131 /// undef's to reduce the number of uses of those instructions.
132 ///
133 /// Return the specified operand before it is turned into an undef.
134 ///
135 Value *AddSoonDeadInstToWorklist(Instruction &I, unsigned op) {
136 Value *R = I.getOperand(op);
137
138 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
139 if (Instruction *Op = dyn_cast<Instruction>(I.getOperand(i))) {
140 AddToWorkList(Op);
141 // Set the operand to undef to drop the use.
142 I.setOperand(i, UndefValue::get(Op->getType()));
143 }
144
145 return R;
146 }
147
148 public:
149 virtual bool runOnFunction(Function &F);
150
151 bool DoOneIteration(Function &F, unsigned ItNum);
152
153 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
154 AU.addRequired<TargetData>();
155 AU.addPreservedID(LCSSAID);
156 AU.setPreservesCFG();
157 }
158
159 TargetData &getTargetData() const { return *TD; }
160
161 // Visitation implementation - Implement instruction combining for different
162 // instruction types. The semantics are as follows:
163 // Return Value:
164 // null - No change was made
165 // I - Change was made, I is still valid, I may be dead though
166 // otherwise - Change was made, replace I with returned instruction
167 //
168 Instruction *visitAdd(BinaryOperator &I);
169 Instruction *visitSub(BinaryOperator &I);
170 Instruction *visitMul(BinaryOperator &I);
171 Instruction *visitURem(BinaryOperator &I);
172 Instruction *visitSRem(BinaryOperator &I);
173 Instruction *visitFRem(BinaryOperator &I);
174 Instruction *commonRemTransforms(BinaryOperator &I);
175 Instruction *commonIRemTransforms(BinaryOperator &I);
176 Instruction *commonDivTransforms(BinaryOperator &I);
177 Instruction *commonIDivTransforms(BinaryOperator &I);
178 Instruction *visitUDiv(BinaryOperator &I);
179 Instruction *visitSDiv(BinaryOperator &I);
180 Instruction *visitFDiv(BinaryOperator &I);
181 Instruction *visitAnd(BinaryOperator &I);
182 Instruction *visitOr (BinaryOperator &I);
183 Instruction *visitXor(BinaryOperator &I);
184 Instruction *visitShl(BinaryOperator &I);
185 Instruction *visitAShr(BinaryOperator &I);
186 Instruction *visitLShr(BinaryOperator &I);
187 Instruction *commonShiftTransforms(BinaryOperator &I);
188 Instruction *visitFCmpInst(FCmpInst &I);
189 Instruction *visitICmpInst(ICmpInst &I);
190 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
191 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
192 Instruction *LHS,
193 ConstantInt *RHS);
194 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
195 ConstantInt *DivRHS);
196
197 Instruction *FoldGEPICmp(User *GEPLHS, Value *RHS,
198 ICmpInst::Predicate Cond, Instruction &I);
199 Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
200 BinaryOperator &I);
201 Instruction *commonCastTransforms(CastInst &CI);
202 Instruction *commonIntCastTransforms(CastInst &CI);
203 Instruction *commonPointerCastTransforms(CastInst &CI);
204 Instruction *visitTrunc(TruncInst &CI);
205 Instruction *visitZExt(ZExtInst &CI);
206 Instruction *visitSExt(SExtInst &CI);
Chris Lattnerdf7e8402008-01-27 05:29:54 +0000207 Instruction *visitFPTrunc(FPTruncInst &CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000208 Instruction *visitFPExt(CastInst &CI);
209 Instruction *visitFPToUI(CastInst &CI);
210 Instruction *visitFPToSI(CastInst &CI);
211 Instruction *visitUIToFP(CastInst &CI);
212 Instruction *visitSIToFP(CastInst &CI);
213 Instruction *visitPtrToInt(CastInst &CI);
Chris Lattner7c1626482008-01-08 07:23:51 +0000214 Instruction *visitIntToPtr(IntToPtrInst &CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000215 Instruction *visitBitCast(BitCastInst &CI);
216 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
217 Instruction *FI);
218 Instruction *visitSelectInst(SelectInst &CI);
219 Instruction *visitCallInst(CallInst &CI);
220 Instruction *visitInvokeInst(InvokeInst &II);
221 Instruction *visitPHINode(PHINode &PN);
222 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
223 Instruction *visitAllocationInst(AllocationInst &AI);
224 Instruction *visitFreeInst(FreeInst &FI);
225 Instruction *visitLoadInst(LoadInst &LI);
226 Instruction *visitStoreInst(StoreInst &SI);
227 Instruction *visitBranchInst(BranchInst &BI);
228 Instruction *visitSwitchInst(SwitchInst &SI);
229 Instruction *visitInsertElementInst(InsertElementInst &IE);
230 Instruction *visitExtractElementInst(ExtractElementInst &EI);
231 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
232
233 // visitInstruction - Specify what to return for unhandled instructions...
234 Instruction *visitInstruction(Instruction &I) { return 0; }
235
236 private:
237 Instruction *visitCallSite(CallSite CS);
238 bool transformConstExprCastCall(CallSite CS);
Duncan Sands74833f22007-09-17 10:26:40 +0000239 Instruction *transformCallThroughTrampoline(CallSite CS);
Evan Chenge3779cf2008-03-24 00:21:34 +0000240 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
241 bool DoXform = true);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000242
243 public:
244 // InsertNewInstBefore - insert an instruction New before instruction Old
245 // in the program. Add the new instruction to the worklist.
246 //
247 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
248 assert(New && New->getParent() == 0 &&
249 "New instruction already inserted into a basic block!");
250 BasicBlock *BB = Old.getParent();
251 BB->getInstList().insert(&Old, New); // Insert inst
252 AddToWorkList(New);
253 return New;
254 }
255
256 /// InsertCastBefore - Insert a cast of V to TY before the instruction POS.
257 /// This also adds the cast to the worklist. Finally, this returns the
258 /// cast.
259 Value *InsertCastBefore(Instruction::CastOps opc, Value *V, const Type *Ty,
260 Instruction &Pos) {
261 if (V->getType() == Ty) return V;
262
263 if (Constant *CV = dyn_cast<Constant>(V))
264 return ConstantExpr::getCast(opc, CV, Ty);
265
266 Instruction *C = CastInst::create(opc, V, Ty, V->getName(), &Pos);
267 AddToWorkList(C);
268 return C;
269 }
Chris Lattner13c2d6e2008-01-13 22:23:22 +0000270
271 Value *InsertBitCastBefore(Value *V, const Type *Ty, Instruction &Pos) {
272 return InsertCastBefore(Instruction::BitCast, V, Ty, Pos);
273 }
274
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000275
276 // ReplaceInstUsesWith - This method is to be used when an instruction is
277 // found to be dead, replacable with another preexisting expression. Here
278 // we add all uses of I to the worklist, replace all uses of I with the new
279 // value, then return I, so that the inst combiner will know that I was
280 // modified.
281 //
282 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
283 AddUsersToWorkList(I); // Add all modified instrs to worklist
284 if (&I != V) {
285 I.replaceAllUsesWith(V);
286 return &I;
287 } else {
288 // If we are replacing the instruction with itself, this must be in a
289 // segment of unreachable code, so just clobber the instruction.
290 I.replaceAllUsesWith(UndefValue::get(I.getType()));
291 return &I;
292 }
293 }
294
295 // UpdateValueUsesWith - This method is to be used when an value is
296 // found to be replacable with another preexisting expression or was
297 // updated. Here we add all uses of I to the worklist, replace all uses of
298 // I with the new value (unless the instruction was just updated), then
299 // return true, so that the inst combiner will know that I was modified.
300 //
301 bool UpdateValueUsesWith(Value *Old, Value *New) {
302 AddUsersToWorkList(*Old); // Add all modified instrs to worklist
303 if (Old != New)
304 Old->replaceAllUsesWith(New);
305 if (Instruction *I = dyn_cast<Instruction>(Old))
306 AddToWorkList(I);
307 if (Instruction *I = dyn_cast<Instruction>(New))
308 AddToWorkList(I);
309 return true;
310 }
311
312 // EraseInstFromFunction - When dealing with an instruction that has side
313 // effects or produces a void value, we can't rely on DCE to delete the
314 // instruction. Instead, visit methods should return the value returned by
315 // this function.
316 Instruction *EraseInstFromFunction(Instruction &I) {
317 assert(I.use_empty() && "Cannot erase instruction that is used!");
318 AddUsesToWorkList(I);
319 RemoveFromWorkList(&I);
320 I.eraseFromParent();
321 return 0; // Don't do anything with FI
322 }
323
324 private:
325 /// InsertOperandCastBefore - This inserts a cast of V to DestTy before the
326 /// InsertBefore instruction. This is specialized a bit to avoid inserting
327 /// casts that are known to not do anything...
328 ///
329 Value *InsertOperandCastBefore(Instruction::CastOps opcode,
330 Value *V, const Type *DestTy,
331 Instruction *InsertBefore);
332
333 /// SimplifyCommutative - This performs a few simplifications for
334 /// commutative operators.
335 bool SimplifyCommutative(BinaryOperator &I);
336
337 /// SimplifyCompare - This reorders the operands of a CmpInst to get them in
338 /// most-complex to least-complex order.
339 bool SimplifyCompare(CmpInst &I);
340
341 /// SimplifyDemandedBits - Attempts to replace V with a simpler value based
342 /// on the demanded bits.
343 bool SimplifyDemandedBits(Value *V, APInt DemandedMask,
344 APInt& KnownZero, APInt& KnownOne,
345 unsigned Depth = 0);
346
347 Value *SimplifyDemandedVectorElts(Value *V, uint64_t DemandedElts,
348 uint64_t &UndefElts, unsigned Depth = 0);
349
350 // FoldOpIntoPhi - Given a binary operator or cast instruction which has a
351 // PHI node as operand #0, see if we can fold the instruction into the PHI
352 // (which is only possible if all operands to the PHI are constants).
353 Instruction *FoldOpIntoPhi(Instruction &I);
354
355 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
356 // operator and they all are only used by the PHI, PHI together their
357 // inputs, and do the operation once, to the result of the PHI.
358 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
359 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
360
361
362 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
363 ConstantInt *AndRHS, BinaryOperator &TheAnd);
364
365 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
366 bool isSub, Instruction &I);
367 Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
368 bool isSigned, bool Inside, Instruction &IB);
369 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
370 Instruction *MatchBSwap(BinaryOperator &I);
371 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
Chris Lattner00ae5132008-01-13 23:50:23 +0000372 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
Chris Lattner5af8a912008-04-30 06:39:11 +0000373 Instruction *SimplifyMemSet(MemSetInst *MI);
Chris Lattner00ae5132008-01-13 23:50:23 +0000374
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000375
376 Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000377
378 void ComputeMaskedBits(Value *V, const APInt &Mask, APInt& KnownZero,
379 APInt& KnownOne, unsigned Depth = 0);
380 bool MaskedValueIsZero(Value *V, const APInt& Mask, unsigned Depth = 0);
381 bool CanEvaluateInDifferentType(Value *V, const IntegerType *Ty,
382 unsigned CastOpc,
383 int &NumCastsRemoved);
384 unsigned GetOrEnforceKnownAlignment(Value *V,
385 unsigned PrefAlign = 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000386 };
387
388 char InstCombiner::ID = 0;
389 RegisterPass<InstCombiner> X("instcombine", "Combine redundant instructions");
390}
391
392// getComplexity: Assign a complexity or rank value to LLVM Values...
393// 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
394static unsigned getComplexity(Value *V) {
395 if (isa<Instruction>(V)) {
396 if (BinaryOperator::isNeg(V) || BinaryOperator::isNot(V))
397 return 3;
398 return 4;
399 }
400 if (isa<Argument>(V)) return 3;
401 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
402}
403
404// isOnlyUse - Return true if this instruction will be deleted if we stop using
405// it.
406static bool isOnlyUse(Value *V) {
407 return V->hasOneUse() || isa<Constant>(V);
408}
409
410// getPromotedType - Return the specified type promoted as it would be to pass
411// though a va_arg area...
412static const Type *getPromotedType(const Type *Ty) {
413 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
414 if (ITy->getBitWidth() < 32)
415 return Type::Int32Ty;
416 }
417 return Ty;
418}
419
420/// getBitCastOperand - If the specified operand is a CastInst or a constant
421/// expression bitcast, return the operand value, otherwise return null.
422static Value *getBitCastOperand(Value *V) {
423 if (BitCastInst *I = dyn_cast<BitCastInst>(V))
424 return I->getOperand(0);
425 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
426 if (CE->getOpcode() == Instruction::BitCast)
427 return CE->getOperand(0);
428 return 0;
429}
430
431/// This function is a wrapper around CastInst::isEliminableCastPair. It
432/// simply extracts arguments and returns what that function returns.
433static Instruction::CastOps
434isEliminableCastPair(
435 const CastInst *CI, ///< The first cast instruction
436 unsigned opcode, ///< The opcode of the second cast instruction
437 const Type *DstTy, ///< The target type for the second cast instruction
438 TargetData *TD ///< The target data for pointer size
439) {
440
441 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
442 const Type *MidTy = CI->getType(); // B from above
443
444 // Get the opcodes of the two Cast instructions
445 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
446 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
447
448 return Instruction::CastOps(
449 CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
450 DstTy, TD->getIntPtrType()));
451}
452
453/// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
454/// in any code being generated. It does not require codegen if V is simple
455/// enough or if the cast can be folded into other casts.
456static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
457 const Type *Ty, TargetData *TD) {
458 if (V->getType() == Ty || isa<Constant>(V)) return false;
459
460 // If this is another cast that can be eliminated, it isn't codegen either.
461 if (const CastInst *CI = dyn_cast<CastInst>(V))
462 if (isEliminableCastPair(CI, opcode, Ty, TD))
463 return false;
464 return true;
465}
466
467/// InsertOperandCastBefore - This inserts a cast of V to DestTy before the
468/// InsertBefore instruction. This is specialized a bit to avoid inserting
469/// casts that are known to not do anything...
470///
471Value *InstCombiner::InsertOperandCastBefore(Instruction::CastOps opcode,
472 Value *V, const Type *DestTy,
473 Instruction *InsertBefore) {
474 if (V->getType() == DestTy) return V;
475 if (Constant *C = dyn_cast<Constant>(V))
476 return ConstantExpr::getCast(opcode, C, DestTy);
477
478 return InsertCastBefore(opcode, V, DestTy, *InsertBefore);
479}
480
481// SimplifyCommutative - This performs a few simplifications for commutative
482// operators:
483//
484// 1. Order operands such that they are listed from right (least complex) to
485// left (most complex). This puts constants before unary operators before
486// binary operators.
487//
488// 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
489// 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
490//
491bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
492 bool Changed = false;
493 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
494 Changed = !I.swapOperands();
495
496 if (!I.isAssociative()) return Changed;
497 Instruction::BinaryOps Opcode = I.getOpcode();
498 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
499 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
500 if (isa<Constant>(I.getOperand(1))) {
501 Constant *Folded = ConstantExpr::get(I.getOpcode(),
502 cast<Constant>(I.getOperand(1)),
503 cast<Constant>(Op->getOperand(1)));
504 I.setOperand(0, Op->getOperand(0));
505 I.setOperand(1, Folded);
506 return true;
507 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
508 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
509 isOnlyUse(Op) && isOnlyUse(Op1)) {
510 Constant *C1 = cast<Constant>(Op->getOperand(1));
511 Constant *C2 = cast<Constant>(Op1->getOperand(1));
512
513 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
514 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
515 Instruction *New = BinaryOperator::create(Opcode, Op->getOperand(0),
516 Op1->getOperand(0),
517 Op1->getName(), &I);
518 AddToWorkList(New);
519 I.setOperand(0, New);
520 I.setOperand(1, Folded);
521 return true;
522 }
523 }
524 return Changed;
525}
526
527/// SimplifyCompare - For a CmpInst this function just orders the operands
528/// so that theyare listed from right (least complex) to left (most complex).
529/// This puts constants before unary operators before binary operators.
530bool InstCombiner::SimplifyCompare(CmpInst &I) {
531 if (getComplexity(I.getOperand(0)) >= getComplexity(I.getOperand(1)))
532 return false;
533 I.swapOperands();
534 // Compare instructions are not associative so there's nothing else we can do.
535 return true;
536}
537
538// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
539// if the LHS is a constant zero (which is the 'negate' form).
540//
541static inline Value *dyn_castNegVal(Value *V) {
542 if (BinaryOperator::isNeg(V))
543 return BinaryOperator::getNegArgument(V);
544
545 // Constants can be considered to be negated values if they can be folded.
546 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
547 return ConstantExpr::getNeg(C);
548 return 0;
549}
550
551static inline Value *dyn_castNotVal(Value *V) {
552 if (BinaryOperator::isNot(V))
553 return BinaryOperator::getNotArgument(V);
554
555 // Constants can be considered to be not'ed values...
556 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
557 return ConstantInt::get(~C->getValue());
558 return 0;
559}
560
561// dyn_castFoldableMul - If this value is a multiply that can be folded into
562// other computations (because it has a constant operand), return the
563// non-constant operand of the multiply, and set CST to point to the multiplier.
564// Otherwise, return null.
565//
566static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
567 if (V->hasOneUse() && V->getType()->isInteger())
568 if (Instruction *I = dyn_cast<Instruction>(V)) {
569 if (I->getOpcode() == Instruction::Mul)
570 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
571 return I->getOperand(0);
572 if (I->getOpcode() == Instruction::Shl)
573 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
574 // The multiplier is really 1 << CST.
575 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
576 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
577 CST = ConstantInt::get(APInt(BitWidth, 1).shl(CSTVal));
578 return I->getOperand(0);
579 }
580 }
581 return 0;
582}
583
584/// dyn_castGetElementPtr - If this is a getelementptr instruction or constant
585/// expression, return it.
586static User *dyn_castGetElementPtr(Value *V) {
587 if (isa<GetElementPtrInst>(V)) return cast<User>(V);
588 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
589 if (CE->getOpcode() == Instruction::GetElementPtr)
590 return cast<User>(V);
591 return false;
592}
593
Dan Gohman2d648bb2008-04-10 18:43:06 +0000594/// getOpcode - If this is an Instruction or a ConstantExpr, return the
595/// opcode value. Otherwise return UserOp1.
596static unsigned getOpcode(User *U) {
597 if (Instruction *I = dyn_cast<Instruction>(U))
598 return I->getOpcode();
599 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U))
600 return CE->getOpcode();
601 // Use UserOp1 to mean there's no opcode.
602 return Instruction::UserOp1;
603}
604
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000605/// AddOne - Add one to a ConstantInt
606static ConstantInt *AddOne(ConstantInt *C) {
607 APInt Val(C->getValue());
608 return ConstantInt::get(++Val);
609}
610/// SubOne - Subtract one from a ConstantInt
611static ConstantInt *SubOne(ConstantInt *C) {
612 APInt Val(C->getValue());
613 return ConstantInt::get(--Val);
614}
615/// Add - Add two ConstantInts together
616static ConstantInt *Add(ConstantInt *C1, ConstantInt *C2) {
617 return ConstantInt::get(C1->getValue() + C2->getValue());
618}
619/// And - Bitwise AND two ConstantInts together
620static ConstantInt *And(ConstantInt *C1, ConstantInt *C2) {
621 return ConstantInt::get(C1->getValue() & C2->getValue());
622}
623/// Subtract - Subtract one ConstantInt from another
624static ConstantInt *Subtract(ConstantInt *C1, ConstantInt *C2) {
625 return ConstantInt::get(C1->getValue() - C2->getValue());
626}
627/// Multiply - Multiply two ConstantInts together
628static ConstantInt *Multiply(ConstantInt *C1, ConstantInt *C2) {
629 return ConstantInt::get(C1->getValue() * C2->getValue());
630}
Nick Lewycky9d798f92008-02-18 22:48:05 +0000631/// MultiplyOverflows - True if the multiply can not be expressed in an int
632/// this size.
633static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
634 uint32_t W = C1->getBitWidth();
635 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
636 if (sign) {
637 LHSExt.sext(W * 2);
638 RHSExt.sext(W * 2);
639 } else {
640 LHSExt.zext(W * 2);
641 RHSExt.zext(W * 2);
642 }
643
644 APInt MulExt = LHSExt * RHSExt;
645
646 if (sign) {
647 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
648 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
649 return MulExt.slt(Min) || MulExt.sgt(Max);
650 } else
651 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
652}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000653
654/// ComputeMaskedBits - Determine which of the bits specified in Mask are
655/// known to be either zero or one and return them in the KnownZero/KnownOne
656/// bit sets. This code only analyzes bits in Mask, in order to short-circuit
657/// processing.
658/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
659/// we cannot optimize based on the assumption that it is zero without changing
660/// it to be an explicit zero. If we don't change it to zero, other code could
661/// optimized based on the contradictory assumption that it is non-zero.
662/// Because instcombine aggressively folds operations with undef args anyway,
663/// this won't lose us code quality.
Dan Gohman2d648bb2008-04-10 18:43:06 +0000664void InstCombiner::ComputeMaskedBits(Value *V, const APInt &Mask,
665 APInt& KnownZero, APInt& KnownOne,
666 unsigned Depth) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000667 assert(V && "No Value?");
668 assert(Depth <= 6 && "Limit Search Depth");
669 uint32_t BitWidth = Mask.getBitWidth();
Dan Gohman2d648bb2008-04-10 18:43:06 +0000670 assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) &&
671 "Not integer or pointer type!");
672 assert((!TD || TD->getTypeSizeInBits(V->getType()) == BitWidth) &&
673 (!isa<IntegerType>(V->getType()) ||
674 V->getType()->getPrimitiveSizeInBits() == BitWidth) &&
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000675 KnownZero.getBitWidth() == BitWidth &&
676 KnownOne.getBitWidth() == BitWidth &&
677 "V, Mask, KnownOne and KnownZero should have same BitWidth");
678 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
679 // We know all of the bits for a constant!
680 KnownOne = CI->getValue() & Mask;
681 KnownZero = ~KnownOne & Mask;
682 return;
683 }
Dan Gohman2d648bb2008-04-10 18:43:06 +0000684 // Null is all-zeros.
685 if (isa<ConstantPointerNull>(V)) {
686 KnownOne.clear();
687 KnownZero = Mask;
688 return;
689 }
690 // The address of an aligned GlobalValue has trailing zeros.
691 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
692 unsigned Align = GV->getAlignment();
693 if (Align == 0 && TD && GV->getType()->getElementType()->isSized())
694 Align = TD->getPrefTypeAlignment(GV->getType()->getElementType());
695 if (Align > 0)
696 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
697 CountTrailingZeros_32(Align));
698 else
699 KnownZero.clear();
700 KnownOne.clear();
701 return;
702 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000703
Dan Gohmanbec16052008-04-28 17:02:21 +0000704 KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything.
705
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000706 if (Depth == 6 || Mask == 0)
707 return; // Limit search depth.
708
Dan Gohman2d648bb2008-04-10 18:43:06 +0000709 User *I = dyn_cast<User>(V);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000710 if (!I) return;
711
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000712 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000713 switch (getOpcode(I)) {
714 default: break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000715 case Instruction::And: {
716 // If either the LHS or the RHS are Zero, the result is zero.
717 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
718 APInt Mask2(Mask & ~KnownZero);
719 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
720 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
721 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
722
723 // Output known-1 bits are only known if set in both the LHS & RHS.
724 KnownOne &= KnownOne2;
725 // Output known-0 are known to be clear if zero in either the LHS | RHS.
726 KnownZero |= KnownZero2;
727 return;
728 }
729 case Instruction::Or: {
730 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
731 APInt Mask2(Mask & ~KnownOne);
732 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
733 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
734 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
735
736 // Output known-0 bits are only known if clear in both the LHS & RHS.
737 KnownZero &= KnownZero2;
738 // Output known-1 are known to be set if set in either the LHS | RHS.
739 KnownOne |= KnownOne2;
740 return;
741 }
742 case Instruction::Xor: {
743 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
744 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
745 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
746 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
747
748 // Output known-0 bits are known if clear or set in both the LHS & RHS.
749 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
750 // Output known-1 are known to be set if set in only one of the LHS, RHS.
751 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
752 KnownZero = KnownZeroOut;
753 return;
754 }
Dan Gohman2d648bb2008-04-10 18:43:06 +0000755 case Instruction::Mul: {
756 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
757 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
758 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
759 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
760 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
761
762 // If low bits are zero in either operand, output low known-0 bits.
Dan Gohmanbec16052008-04-28 17:02:21 +0000763 // Also compute a conserative estimate for high known-0 bits.
Dan Gohman2d648bb2008-04-10 18:43:06 +0000764 // More trickiness is possible, but this is sufficient for the
765 // interesting case of alignment computation.
766 KnownOne.clear();
767 unsigned TrailZ = KnownZero.countTrailingOnes() +
768 KnownZero2.countTrailingOnes();
Dan Gohmanbec16052008-04-28 17:02:21 +0000769 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
770 KnownZero2.countLeadingOnes() +
771 1, BitWidth) - BitWidth;
772
Dan Gohman2d648bb2008-04-10 18:43:06 +0000773 TrailZ = std::min(TrailZ, BitWidth);
Dan Gohmanbec16052008-04-28 17:02:21 +0000774 LeadZ = std::min(LeadZ, BitWidth);
775 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
776 APInt::getHighBitsSet(BitWidth, LeadZ);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000777 KnownZero &= Mask;
778 return;
779 }
Dan Gohmanbec16052008-04-28 17:02:21 +0000780 case Instruction::UDiv: {
781 // For the purposes of computing leading zeros we can conservatively
782 // treat a udiv as a logical right shift by the power of 2 known to
783 // be greater than the denominator.
784 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
785 ComputeMaskedBits(I->getOperand(0),
786 AllOnes, KnownZero2, KnownOne2, Depth+1);
787 unsigned LeadZ = KnownZero2.countLeadingOnes();
788
789 KnownOne2.clear();
790 KnownZero2.clear();
791 ComputeMaskedBits(I->getOperand(1),
792 AllOnes, KnownZero2, KnownOne2, Depth+1);
793 LeadZ = std::min(BitWidth,
794 LeadZ + BitWidth - KnownOne2.countLeadingZeros());
795
796 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
797 return;
798 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000799 case Instruction::Select:
800 ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
801 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
802 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
803 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
804
805 // Only known if known in both the LHS and RHS.
806 KnownOne &= KnownOne2;
807 KnownZero &= KnownZero2;
808 return;
809 case Instruction::FPTrunc:
810 case Instruction::FPExt:
811 case Instruction::FPToUI:
812 case Instruction::FPToSI:
813 case Instruction::SIToFP:
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000814 case Instruction::UIToFP:
Dan Gohman2d648bb2008-04-10 18:43:06 +0000815 return; // Can't work with floating point.
816 case Instruction::PtrToInt:
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000817 case Instruction::IntToPtr:
Dan Gohman2d648bb2008-04-10 18:43:06 +0000818 // We can't handle these if we don't know the pointer size.
819 if (!TD) return;
820 // Fall through and handle them the same as zext/trunc.
821 case Instruction::ZExt:
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000822 case Instruction::Trunc: {
823 // All these have integer operands
Dan Gohman2d648bb2008-04-10 18:43:06 +0000824 const Type *SrcTy = I->getOperand(0)->getType();
825 uint32_t SrcBitWidth = TD ?
826 TD->getTypeSizeInBits(SrcTy) :
827 SrcTy->getPrimitiveSizeInBits();
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000828 APInt MaskIn(Mask);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000829 MaskIn.zextOrTrunc(SrcBitWidth);
830 KnownZero.zextOrTrunc(SrcBitWidth);
831 KnownOne.zextOrTrunc(SrcBitWidth);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000832 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, Depth+1);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000833 KnownZero.zextOrTrunc(BitWidth);
834 KnownOne.zextOrTrunc(BitWidth);
835 // Any top bits are known to be zero.
836 if (BitWidth > SrcBitWidth)
837 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000838 return;
839 }
840 case Instruction::BitCast: {
841 const Type *SrcTy = I->getOperand(0)->getType();
Dan Gohman2d648bb2008-04-10 18:43:06 +0000842 if (SrcTy->isInteger() || isa<PointerType>(SrcTy)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000843 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
844 return;
845 }
846 break;
847 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000848 case Instruction::SExt: {
849 // Compute the bits in the result that are not present in the input.
850 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
851 uint32_t SrcBitWidth = SrcTy->getBitWidth();
852
853 APInt MaskIn(Mask);
854 MaskIn.trunc(SrcBitWidth);
855 KnownZero.trunc(SrcBitWidth);
856 KnownOne.trunc(SrcBitWidth);
857 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, Depth+1);
858 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
859 KnownZero.zext(BitWidth);
860 KnownOne.zext(BitWidth);
861
862 // If the sign bit of the input is known set or clear, then we know the
863 // top bits of the result.
864 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
865 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
866 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
867 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
868 return;
869 }
870 case Instruction::Shl:
871 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
872 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
873 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
874 APInt Mask2(Mask.lshr(ShiftAmt));
875 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, Depth+1);
876 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
877 KnownZero <<= ShiftAmt;
878 KnownOne <<= ShiftAmt;
879 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
880 return;
881 }
882 break;
883 case Instruction::LShr:
884 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
885 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
886 // Compute the new bits that are at the top now.
887 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
888
889 // Unsigned shift right.
890 APInt Mask2(Mask.shl(ShiftAmt));
891 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne,Depth+1);
892 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
893 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
894 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
895 // high bits known zero.
896 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
897 return;
898 }
899 break;
900 case Instruction::AShr:
901 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
902 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
903 // Compute the new bits that are at the top now.
904 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
905
906 // Signed shift right.
907 APInt Mask2(Mask.shl(ShiftAmt));
908 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne,Depth+1);
909 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
910 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
911 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
912
913 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
914 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
915 KnownZero |= HighBits;
916 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
917 KnownOne |= HighBits;
918 return;
919 }
920 break;
Dan Gohman2d648bb2008-04-10 18:43:06 +0000921 case Instruction::Sub: {
922 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) {
923 // We know that the top bits of C-X are clear if X contains less bits
924 // than C (i.e. no wrap-around can happen). For example, 20-X is
925 // positive if we can prove that X is >= 0 and < 16.
926 if (!CLHS->getValue().isNegative()) {
927 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
928 // NLZ can't be BitWidth with no sign bit
929 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
Dan Gohmanbec16052008-04-28 17:02:21 +0000930 ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2,
931 Depth+1);
Dan Gohman2d648bb2008-04-10 18:43:06 +0000932
Dan Gohmanbec16052008-04-28 17:02:21 +0000933 // If all of the MaskV bits are known to be zero, then we know the
934 // output top bits are zero, because we now know that the output is
935 // from [0-C].
936 if ((KnownZero2 & MaskV) == MaskV) {
Dan Gohman2d648bb2008-04-10 18:43:06 +0000937 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
938 // Top bits known zero.
939 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
Dan Gohman2d648bb2008-04-10 18:43:06 +0000940 }
Dan Gohman2d648bb2008-04-10 18:43:06 +0000941 }
942 }
943 }
944 // fall through
Duncan Sandse71d4482008-03-21 08:32:17 +0000945 case Instruction::Add: {
Chris Lattner5ee84f82008-03-21 05:19:58 +0000946 // Output known-0 bits are known if clear or set in both the low clear bits
947 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
948 // low 3 bits clear.
Dan Gohmanbec16052008-04-28 17:02:21 +0000949 APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
950 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
951 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
952 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
953
954 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
955 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
956 KnownZeroOut = std::min(KnownZeroOut,
957 KnownZero2.countTrailingOnes());
958
959 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
Chris Lattner5ee84f82008-03-21 05:19:58 +0000960 return;
Duncan Sandse71d4482008-03-21 08:32:17 +0000961 }
Nick Lewyckyc1372c82008-03-06 06:48:30 +0000962 case Instruction::SRem:
963 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
964 APInt RA = Rem->getValue();
965 if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
966 APInt LowBits = RA.isStrictlyPositive() ? ((RA - 1) | RA) : ~RA;
967 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
968 ComputeMaskedBits(I->getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
969
970 // The sign of a remainder is equal to the sign of the first
971 // operand (zero being positive).
972 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
973 KnownZero2 |= ~LowBits;
974 else if (KnownOne2[BitWidth-1])
975 KnownOne2 |= ~LowBits;
976
977 KnownZero |= KnownZero2 & Mask;
978 KnownOne |= KnownOne2 & Mask;
979
980 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
981 }
982 }
983 break;
Dan Gohmanbec16052008-04-28 17:02:21 +0000984 case Instruction::URem: {
Nick Lewyckyc1372c82008-03-06 06:48:30 +0000985 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
986 APInt RA = Rem->getValue();
987 if (RA.isStrictlyPositive() && RA.isPowerOf2()) {
988 APInt LowBits = (RA - 1) | RA;
989 APInt Mask2 = LowBits & Mask;
990 KnownZero |= ~LowBits & Mask;
991 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
992 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
Dan Gohmanbec16052008-04-28 17:02:21 +0000993 break;
Nick Lewyckyc1372c82008-03-06 06:48:30 +0000994 }
Nick Lewyckyc1372c82008-03-06 06:48:30 +0000995 }
Dan Gohmanbec16052008-04-28 17:02:21 +0000996
997 // Since the result is less than or equal to either operand, any leading
998 // zero bits in either operand must also exist in the result.
999 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1000 ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne,
1001 Depth+1);
1002 ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2,
1003 Depth+1);
1004
1005 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
1006 KnownZero2.countLeadingOnes());
1007 KnownOne.clear();
1008 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001009 break;
Dan Gohmanbec16052008-04-28 17:02:21 +00001010 }
Dan Gohman2d648bb2008-04-10 18:43:06 +00001011
1012 case Instruction::Alloca:
1013 case Instruction::Malloc: {
1014 AllocationInst *AI = cast<AllocationInst>(V);
1015 unsigned Align = AI->getAlignment();
1016 if (Align == 0 && TD) {
1017 if (isa<AllocaInst>(AI))
1018 Align = TD->getPrefTypeAlignment(AI->getType()->getElementType());
1019 else if (isa<MallocInst>(AI)) {
1020 // Malloc returns maximally aligned memory.
1021 Align = TD->getABITypeAlignment(AI->getType()->getElementType());
1022 Align =
1023 std::max(Align,
1024 (unsigned)TD->getABITypeAlignment(Type::DoubleTy));
1025 Align =
1026 std::max(Align,
1027 (unsigned)TD->getABITypeAlignment(Type::Int64Ty));
1028 }
1029 }
1030
1031 if (Align > 0)
1032 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
1033 CountTrailingZeros_32(Align));
1034 break;
1035 }
1036 case Instruction::GetElementPtr: {
1037 // Analyze all of the subscripts of this getelementptr instruction
1038 // to determine if we can prove known low zero bits.
1039 APInt LocalMask = APInt::getAllOnesValue(BitWidth);
1040 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1041 ComputeMaskedBits(I->getOperand(0), LocalMask,
1042 LocalKnownZero, LocalKnownOne, Depth+1);
1043 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1044
1045 gep_type_iterator GTI = gep_type_begin(I);
1046 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1047 Value *Index = I->getOperand(i);
1048 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
1049 // Handle struct member offset arithmetic.
1050 if (!TD) return;
1051 const StructLayout *SL = TD->getStructLayout(STy);
1052 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1053 uint64_t Offset = SL->getElementOffset(Idx);
1054 TrailZ = std::min(TrailZ,
1055 CountTrailingZeros_64(Offset));
1056 } else {
1057 // Handle array index arithmetic.
1058 const Type *IndexedTy = GTI.getIndexedType();
1059 if (!IndexedTy->isSized()) return;
1060 unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits();
1061 uint64_t TypeSize = TD ? TD->getABITypeSize(IndexedTy) : 1;
1062 LocalMask = APInt::getAllOnesValue(GEPOpiBits);
1063 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1064 ComputeMaskedBits(Index, LocalMask,
1065 LocalKnownZero, LocalKnownOne, Depth+1);
1066 TrailZ = std::min(TrailZ,
1067 CountTrailingZeros_64(TypeSize) +
1068 LocalKnownZero.countTrailingOnes());
1069 }
1070 }
1071
1072 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask;
1073 break;
1074 }
1075 case Instruction::PHI: {
1076 PHINode *P = cast<PHINode>(I);
1077 // Handle the case of a simple two-predecessor recurrence PHI.
1078 // There's a lot more that could theoretically be done here, but
1079 // this is sufficient to catch some interesting cases.
1080 if (P->getNumIncomingValues() == 2) {
1081 for (unsigned i = 0; i != 2; ++i) {
1082 Value *L = P->getIncomingValue(i);
1083 Value *R = P->getIncomingValue(!i);
1084 User *LU = dyn_cast<User>(L);
1085 unsigned Opcode = LU ? getOpcode(LU) : (unsigned)Instruction::UserOp1;
1086 // Check for operations that have the property that if
1087 // both their operands have low zero bits, the result
1088 // will have low zero bits.
1089 if (Opcode == Instruction::Add ||
1090 Opcode == Instruction::Sub ||
1091 Opcode == Instruction::And ||
1092 Opcode == Instruction::Or ||
1093 Opcode == Instruction::Mul) {
1094 Value *LL = LU->getOperand(0);
1095 Value *LR = LU->getOperand(1);
1096 // Find a recurrence.
1097 if (LL == I)
1098 L = LR;
1099 else if (LR == I)
1100 L = LL;
1101 else
1102 break;
1103 // Ok, we have a PHI of the form L op= R. Check for low
1104 // zero bits.
1105 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
1106 ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, Depth+1);
1107 Mask2 = APInt::getLowBitsSet(BitWidth,
1108 KnownZero2.countTrailingOnes());
1109 KnownOne2.clear();
1110 KnownZero2.clear();
1111 ComputeMaskedBits(L, Mask2, KnownZero2, KnownOne2, Depth+1);
1112 KnownZero = Mask &
1113 APInt::getLowBitsSet(BitWidth,
1114 KnownZero2.countTrailingOnes());
1115 break;
1116 }
1117 }
1118 }
1119 break;
1120 }
Dan Gohmanbec16052008-04-28 17:02:21 +00001121 case Instruction::Call:
1122 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1123 switch (II->getIntrinsicID()) {
1124 default: break;
1125 case Intrinsic::ctpop:
1126 case Intrinsic::ctlz:
1127 case Intrinsic::cttz: {
1128 unsigned LowBits = Log2_32(BitWidth)+1;
1129 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1130 break;
1131 }
1132 }
1133 }
1134 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001135 }
1136}
1137
1138/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1139/// this predicate to simplify operations downstream. Mask is known to be zero
1140/// for bits that V cannot have.
Dan Gohman2d648bb2008-04-10 18:43:06 +00001141bool InstCombiner::MaskedValueIsZero(Value *V, const APInt& Mask,
1142 unsigned Depth) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001143 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1144 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, Depth);
1145 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1146 return (KnownZero & Mask) == Mask;
1147}
1148
1149/// ShrinkDemandedConstant - Check to see if the specified operand of the
1150/// specified instruction is a constant integer. If so, check to see if there
1151/// are any bits set in the constant that are not demanded. If so, shrink the
1152/// constant and return true.
1153static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
1154 APInt Demanded) {
1155 assert(I && "No instruction?");
1156 assert(OpNo < I->getNumOperands() && "Operand index too large");
1157
1158 // If the operand is not a constant integer, nothing to do.
1159 ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
1160 if (!OpC) return false;
1161
1162 // If there are no bits set that aren't demanded, nothing to do.
1163 Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
1164 if ((~Demanded & OpC->getValue()) == 0)
1165 return false;
1166
1167 // This instruction is producing bits that are not demanded. Shrink the RHS.
1168 Demanded &= OpC->getValue();
1169 I->setOperand(OpNo, ConstantInt::get(Demanded));
1170 return true;
1171}
1172
1173// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
1174// set of known zero and one bits, compute the maximum and minimum values that
1175// could have the specified known zero and known one bits, returning them in
1176// min/max.
1177static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty,
1178 const APInt& KnownZero,
1179 const APInt& KnownOne,
1180 APInt& Min, APInt& Max) {
1181 uint32_t BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1182 assert(KnownZero.getBitWidth() == BitWidth &&
1183 KnownOne.getBitWidth() == BitWidth &&
1184 Min.getBitWidth() == BitWidth && Max.getBitWidth() == BitWidth &&
1185 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
1186 APInt UnknownBits = ~(KnownZero|KnownOne);
1187
1188 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
1189 // bit if it is unknown.
1190 Min = KnownOne;
1191 Max = KnownOne|UnknownBits;
1192
1193 if (UnknownBits[BitWidth-1]) { // Sign bit is unknown
1194 Min.set(BitWidth-1);
1195 Max.clear(BitWidth-1);
1196 }
1197}
1198
1199// ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
1200// a set of known zero and one bits, compute the maximum and minimum values that
1201// could have the specified known zero and known one bits, returning them in
1202// min/max.
1203static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty,
Chris Lattnerb933ea62007-08-05 08:47:58 +00001204 const APInt &KnownZero,
1205 const APInt &KnownOne,
1206 APInt &Min, APInt &Max) {
1207 uint32_t BitWidth = cast<IntegerType>(Ty)->getBitWidth(); BitWidth = BitWidth;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001208 assert(KnownZero.getBitWidth() == BitWidth &&
1209 KnownOne.getBitWidth() == BitWidth &&
1210 Min.getBitWidth() == BitWidth && Max.getBitWidth() &&
1211 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
1212 APInt UnknownBits = ~(KnownZero|KnownOne);
1213
1214 // The minimum value is when the unknown bits are all zeros.
1215 Min = KnownOne;
1216 // The maximum value is when the unknown bits are all ones.
1217 Max = KnownOne|UnknownBits;
1218}
1219
1220/// SimplifyDemandedBits - This function attempts to replace V with a simpler
1221/// value based on the demanded bits. When this function is called, it is known
1222/// that only the bits set in DemandedMask of the result of V are ever used
1223/// downstream. Consequently, depending on the mask and V, it may be possible
1224/// to replace V with a constant or one of its operands. In such cases, this
1225/// function does the replacement and returns true. In all other cases, it
1226/// returns false after analyzing the expression and setting KnownOne and known
1227/// to be one in the expression. KnownZero contains all the bits that are known
1228/// to be zero in the expression. These are provided to potentially allow the
1229/// caller (which might recursively be SimplifyDemandedBits itself) to simplify
1230/// the expression. KnownOne and KnownZero always follow the invariant that
1231/// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
1232/// the bits in KnownOne and KnownZero may only be accurate for those bits set
1233/// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
1234/// and KnownOne must all be the same.
1235bool InstCombiner::SimplifyDemandedBits(Value *V, APInt DemandedMask,
1236 APInt& KnownZero, APInt& KnownOne,
1237 unsigned Depth) {
1238 assert(V != 0 && "Null pointer of Value???");
1239 assert(Depth <= 6 && "Limit Search Depth");
1240 uint32_t BitWidth = DemandedMask.getBitWidth();
1241 const IntegerType *VTy = cast<IntegerType>(V->getType());
1242 assert(VTy->getBitWidth() == BitWidth &&
1243 KnownZero.getBitWidth() == BitWidth &&
1244 KnownOne.getBitWidth() == BitWidth &&
1245 "Value *V, DemandedMask, KnownZero and KnownOne \
1246 must have same BitWidth");
1247 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1248 // We know all of the bits for a constant!
1249 KnownOne = CI->getValue() & DemandedMask;
1250 KnownZero = ~KnownOne & DemandedMask;
1251 return false;
1252 }
1253
1254 KnownZero.clear();
1255 KnownOne.clear();
1256 if (!V->hasOneUse()) { // Other users may use these bits.
1257 if (Depth != 0) { // Not at the root.
1258 // Just compute the KnownZero/KnownOne bits to simplify things downstream.
1259 ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
1260 return false;
1261 }
1262 // If this is the root being simplified, allow it to have multiple uses,
1263 // just set the DemandedMask to all bits.
1264 DemandedMask = APInt::getAllOnesValue(BitWidth);
1265 } else if (DemandedMask == 0) { // Not demanding any bits from V.
1266 if (V != UndefValue::get(VTy))
1267 return UpdateValueUsesWith(V, UndefValue::get(VTy));
1268 return false;
1269 } else if (Depth == 6) { // Limit search depth.
1270 return false;
1271 }
1272
1273 Instruction *I = dyn_cast<Instruction>(V);
1274 if (!I) return false; // Only analyze instructions.
1275
1276 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1277 APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
1278 switch (I->getOpcode()) {
Dan Gohmanbec16052008-04-28 17:02:21 +00001279 default:
1280 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1281 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001282 case Instruction::And:
1283 // If either the LHS or the RHS are Zero, the result is zero.
1284 if (SimplifyDemandedBits(I->getOperand(1), DemandedMask,
1285 RHSKnownZero, RHSKnownOne, Depth+1))
1286 return true;
1287 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1288 "Bits known to be one AND zero?");
1289
1290 // If something is known zero on the RHS, the bits aren't demanded on the
1291 // LHS.
1292 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
1293 LHSKnownZero, LHSKnownOne, Depth+1))
1294 return true;
1295 assert((LHSKnownZero & LHSKnownOne) == 0 &&
1296 "Bits known to be one AND zero?");
1297
1298 // If all of the demanded bits are known 1 on one side, return the other.
1299 // These bits cannot contribute to the result of the 'and'.
1300 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
1301 (DemandedMask & ~LHSKnownZero))
1302 return UpdateValueUsesWith(I, I->getOperand(0));
1303 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
1304 (DemandedMask & ~RHSKnownZero))
1305 return UpdateValueUsesWith(I, I->getOperand(1));
1306
1307 // If all of the demanded bits in the inputs are known zeros, return zero.
1308 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
1309 return UpdateValueUsesWith(I, Constant::getNullValue(VTy));
1310
1311 // If the RHS is a constant, see if we can simplify it.
1312 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
1313 return UpdateValueUsesWith(I, I);
1314
1315 // Output known-1 bits are only known if set in both the LHS & RHS.
1316 RHSKnownOne &= LHSKnownOne;
1317 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1318 RHSKnownZero |= LHSKnownZero;
1319 break;
1320 case Instruction::Or:
1321 // If either the LHS or the RHS are One, the result is One.
1322 if (SimplifyDemandedBits(I->getOperand(1), DemandedMask,
1323 RHSKnownZero, RHSKnownOne, Depth+1))
1324 return true;
1325 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1326 "Bits known to be one AND zero?");
1327 // If something is known one on the RHS, the bits aren't demanded on the
1328 // LHS.
1329 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
1330 LHSKnownZero, LHSKnownOne, Depth+1))
1331 return true;
1332 assert((LHSKnownZero & LHSKnownOne) == 0 &&
1333 "Bits known to be one AND zero?");
1334
1335 // If all of the demanded bits are known zero on one side, return the other.
1336 // These bits cannot contribute to the result of the 'or'.
1337 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
1338 (DemandedMask & ~LHSKnownOne))
1339 return UpdateValueUsesWith(I, I->getOperand(0));
1340 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
1341 (DemandedMask & ~RHSKnownOne))
1342 return UpdateValueUsesWith(I, I->getOperand(1));
1343
1344 // If all of the potentially set bits on one side are known to be set on
1345 // the other side, just use the 'other' side.
1346 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
1347 (DemandedMask & (~RHSKnownZero)))
1348 return UpdateValueUsesWith(I, I->getOperand(0));
1349 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
1350 (DemandedMask & (~LHSKnownZero)))
1351 return UpdateValueUsesWith(I, I->getOperand(1));
1352
1353 // If the RHS is a constant, see if we can simplify it.
1354 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1355 return UpdateValueUsesWith(I, I);
1356
1357 // Output known-0 bits are only known if clear in both the LHS & RHS.
1358 RHSKnownZero &= LHSKnownZero;
1359 // Output known-1 are known to be set if set in either the LHS | RHS.
1360 RHSKnownOne |= LHSKnownOne;
1361 break;
1362 case Instruction::Xor: {
1363 if (SimplifyDemandedBits(I->getOperand(1), DemandedMask,
1364 RHSKnownZero, RHSKnownOne, Depth+1))
1365 return true;
1366 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1367 "Bits known to be one AND zero?");
1368 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
1369 LHSKnownZero, LHSKnownOne, Depth+1))
1370 return true;
1371 assert((LHSKnownZero & LHSKnownOne) == 0 &&
1372 "Bits known to be one AND zero?");
1373
1374 // If all of the demanded bits are known zero on one side, return the other.
1375 // These bits cannot contribute to the result of the 'xor'.
1376 if ((DemandedMask & RHSKnownZero) == DemandedMask)
1377 return UpdateValueUsesWith(I, I->getOperand(0));
1378 if ((DemandedMask & LHSKnownZero) == DemandedMask)
1379 return UpdateValueUsesWith(I, I->getOperand(1));
1380
1381 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1382 APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
1383 (RHSKnownOne & LHSKnownOne);
1384 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1385 APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
1386 (RHSKnownOne & LHSKnownZero);
1387
1388 // If all of the demanded bits are known to be zero on one side or the
1389 // other, turn this into an *inclusive* or.
1390 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1391 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
1392 Instruction *Or =
1393 BinaryOperator::createOr(I->getOperand(0), I->getOperand(1),
1394 I->getName());
1395 InsertNewInstBefore(Or, *I);
1396 return UpdateValueUsesWith(I, Or);
1397 }
1398
1399 // If all of the demanded bits on one side are known, and all of the set
1400 // bits on that side are also known to be set on the other side, turn this
1401 // into an AND, as we know the bits will be cleared.
1402 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1403 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1404 // all known
1405 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
1406 Constant *AndC = ConstantInt::get(~RHSKnownOne & DemandedMask);
1407 Instruction *And =
1408 BinaryOperator::createAnd(I->getOperand(0), AndC, "tmp");
1409 InsertNewInstBefore(And, *I);
1410 return UpdateValueUsesWith(I, And);
1411 }
1412 }
1413
1414 // If the RHS is a constant, see if we can simplify it.
1415 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1416 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1417 return UpdateValueUsesWith(I, I);
1418
1419 RHSKnownZero = KnownZeroOut;
1420 RHSKnownOne = KnownOneOut;
1421 break;
1422 }
1423 case Instruction::Select:
1424 if (SimplifyDemandedBits(I->getOperand(2), DemandedMask,
1425 RHSKnownZero, RHSKnownOne, Depth+1))
1426 return true;
1427 if (SimplifyDemandedBits(I->getOperand(1), DemandedMask,
1428 LHSKnownZero, LHSKnownOne, Depth+1))
1429 return true;
1430 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1431 "Bits known to be one AND zero?");
1432 assert((LHSKnownZero & LHSKnownOne) == 0 &&
1433 "Bits known to be one AND zero?");
1434
1435 // If the operands are constants, see if we can simplify them.
1436 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1437 return UpdateValueUsesWith(I, I);
1438 if (ShrinkDemandedConstant(I, 2, DemandedMask))
1439 return UpdateValueUsesWith(I, I);
1440
1441 // Only known if known in both the LHS and RHS.
1442 RHSKnownOne &= LHSKnownOne;
1443 RHSKnownZero &= LHSKnownZero;
1444 break;
1445 case Instruction::Trunc: {
1446 uint32_t truncBf =
1447 cast<IntegerType>(I->getOperand(0)->getType())->getBitWidth();
1448 DemandedMask.zext(truncBf);
1449 RHSKnownZero.zext(truncBf);
1450 RHSKnownOne.zext(truncBf);
1451 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
1452 RHSKnownZero, RHSKnownOne, Depth+1))
1453 return true;
1454 DemandedMask.trunc(BitWidth);
1455 RHSKnownZero.trunc(BitWidth);
1456 RHSKnownOne.trunc(BitWidth);
1457 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1458 "Bits known to be one AND zero?");
1459 break;
1460 }
1461 case Instruction::BitCast:
1462 if (!I->getOperand(0)->getType()->isInteger())
1463 return false;
1464
1465 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
1466 RHSKnownZero, RHSKnownOne, Depth+1))
1467 return true;
1468 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1469 "Bits known to be one AND zero?");
1470 break;
1471 case Instruction::ZExt: {
1472 // Compute the bits in the result that are not present in the input.
1473 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
1474 uint32_t SrcBitWidth = SrcTy->getBitWidth();
1475
1476 DemandedMask.trunc(SrcBitWidth);
1477 RHSKnownZero.trunc(SrcBitWidth);
1478 RHSKnownOne.trunc(SrcBitWidth);
1479 if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
1480 RHSKnownZero, RHSKnownOne, Depth+1))
1481 return true;
1482 DemandedMask.zext(BitWidth);
1483 RHSKnownZero.zext(BitWidth);
1484 RHSKnownOne.zext(BitWidth);
1485 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1486 "Bits known to be one AND zero?");
1487 // The top bits are known to be zero.
1488 RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1489 break;
1490 }
1491 case Instruction::SExt: {
1492 // Compute the bits in the result that are not present in the input.
1493 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
1494 uint32_t SrcBitWidth = SrcTy->getBitWidth();
1495
1496 APInt InputDemandedBits = DemandedMask &
1497 APInt::getLowBitsSet(BitWidth, SrcBitWidth);
1498
1499 APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
1500 // If any of the sign extended bits are demanded, we know that the sign
1501 // bit is demanded.
1502 if ((NewBits & DemandedMask) != 0)
1503 InputDemandedBits.set(SrcBitWidth-1);
1504
1505 InputDemandedBits.trunc(SrcBitWidth);
1506 RHSKnownZero.trunc(SrcBitWidth);
1507 RHSKnownOne.trunc(SrcBitWidth);
1508 if (SimplifyDemandedBits(I->getOperand(0), InputDemandedBits,
1509 RHSKnownZero, RHSKnownOne, Depth+1))
1510 return true;
1511 InputDemandedBits.zext(BitWidth);
1512 RHSKnownZero.zext(BitWidth);
1513 RHSKnownOne.zext(BitWidth);
1514 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1515 "Bits known to be one AND zero?");
1516
1517 // If the sign bit of the input is known set or clear, then we know the
1518 // top bits of the result.
1519
1520 // If the input sign bit is known zero, or if the NewBits are not demanded
1521 // convert this into a zero extension.
1522 if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits)
1523 {
1524 // Convert to ZExt cast
1525 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName(), I);
1526 return UpdateValueUsesWith(I, NewCast);
1527 } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
1528 RHSKnownOne |= NewBits;
1529 }
1530 break;
1531 }
1532 case Instruction::Add: {
1533 // Figure out what the input bits are. If the top bits of the and result
1534 // are not demanded, then the add doesn't demand them from its input
1535 // either.
1536 uint32_t NLZ = DemandedMask.countLeadingZeros();
1537
1538 // If there is a constant on the RHS, there are a variety of xformations
1539 // we can do.
1540 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1541 // If null, this should be simplified elsewhere. Some of the xforms here
1542 // won't work if the RHS is zero.
1543 if (RHS->isZero())
1544 break;
1545
1546 // If the top bit of the output is demanded, demand everything from the
1547 // input. Otherwise, we demand all the input bits except NLZ top bits.
1548 APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
1549
1550 // Find information about known zero/one bits in the input.
1551 if (SimplifyDemandedBits(I->getOperand(0), InDemandedBits,
1552 LHSKnownZero, LHSKnownOne, Depth+1))
1553 return true;
1554
1555 // If the RHS of the add has bits set that can't affect the input, reduce
1556 // the constant.
1557 if (ShrinkDemandedConstant(I, 1, InDemandedBits))
1558 return UpdateValueUsesWith(I, I);
1559
1560 // Avoid excess work.
1561 if (LHSKnownZero == 0 && LHSKnownOne == 0)
1562 break;
1563
1564 // Turn it into OR if input bits are zero.
1565 if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
1566 Instruction *Or =
1567 BinaryOperator::createOr(I->getOperand(0), I->getOperand(1),
1568 I->getName());
1569 InsertNewInstBefore(Or, *I);
1570 return UpdateValueUsesWith(I, Or);
1571 }
1572
1573 // We can say something about the output known-zero and known-one bits,
1574 // depending on potential carries from the input constant and the
1575 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1576 // bits set and the RHS constant is 0x01001, then we know we have a known
1577 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1578
1579 // To compute this, we first compute the potential carry bits. These are
1580 // the bits which may be modified. I'm not aware of a better way to do
1581 // this scan.
1582 const APInt& RHSVal = RHS->getValue();
1583 APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
1584
1585 // Now that we know which bits have carries, compute the known-1/0 sets.
1586
1587 // Bits are known one if they are known zero in one operand and one in the
1588 // other, and there is no input carry.
1589 RHSKnownOne = ((LHSKnownZero & RHSVal) |
1590 (LHSKnownOne & ~RHSVal)) & ~CarryBits;
1591
1592 // Bits are known zero if they are known zero in both operands and there
1593 // is no input carry.
1594 RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
1595 } else {
1596 // If the high-bits of this ADD are not demanded, then it does not demand
1597 // the high bits of its LHS or RHS.
1598 if (DemandedMask[BitWidth-1] == 0) {
1599 // Right fill the mask of bits for this ADD to demand the most
1600 // significant bit and all those below it.
1601 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1602 if (SimplifyDemandedBits(I->getOperand(0), DemandedFromOps,
1603 LHSKnownZero, LHSKnownOne, Depth+1))
1604 return true;
1605 if (SimplifyDemandedBits(I->getOperand(1), DemandedFromOps,
1606 LHSKnownZero, LHSKnownOne, Depth+1))
1607 return true;
1608 }
1609 }
1610 break;
1611 }
1612 case Instruction::Sub:
1613 // If the high-bits of this SUB are not demanded, then it does not demand
1614 // the high bits of its LHS or RHS.
1615 if (DemandedMask[BitWidth-1] == 0) {
1616 // Right fill the mask of bits for this SUB to demand the most
1617 // significant bit and all those below it.
1618 uint32_t NLZ = DemandedMask.countLeadingZeros();
1619 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1620 if (SimplifyDemandedBits(I->getOperand(0), DemandedFromOps,
1621 LHSKnownZero, LHSKnownOne, Depth+1))
1622 return true;
1623 if (SimplifyDemandedBits(I->getOperand(1), DemandedFromOps,
1624 LHSKnownZero, LHSKnownOne, Depth+1))
1625 return true;
1626 }
Dan Gohmanbec16052008-04-28 17:02:21 +00001627 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1628 // the known zeros and ones.
1629 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001630 break;
1631 case Instruction::Shl:
1632 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1633 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1634 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
1635 if (SimplifyDemandedBits(I->getOperand(0), DemandedMaskIn,
1636 RHSKnownZero, RHSKnownOne, Depth+1))
1637 return true;
1638 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1639 "Bits known to be one AND zero?");
1640 RHSKnownZero <<= ShiftAmt;
1641 RHSKnownOne <<= ShiftAmt;
1642 // low bits known zero.
1643 if (ShiftAmt)
1644 RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
1645 }
1646 break;
1647 case Instruction::LShr:
1648 // For a logical shift right
1649 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1650 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1651
1652 // Unsigned shift right.
1653 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1654 if (SimplifyDemandedBits(I->getOperand(0), DemandedMaskIn,
1655 RHSKnownZero, RHSKnownOne, Depth+1))
1656 return true;
1657 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1658 "Bits known to be one AND zero?");
1659 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1660 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1661 if (ShiftAmt) {
1662 // Compute the new bits that are at the top now.
1663 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1664 RHSKnownZero |= HighBits; // high bits known zero.
1665 }
1666 }
1667 break;
1668 case Instruction::AShr:
1669 // If this is an arithmetic shift right and only the low-bit is set, we can
1670 // always convert this into a logical shr, even if the shift amount is
1671 // variable. The low bit of the shift cannot be an input sign bit unless
1672 // the shift amount is >= the size of the datatype, which is undefined.
1673 if (DemandedMask == 1) {
1674 // Perform the logical shift right.
1675 Value *NewVal = BinaryOperator::createLShr(
1676 I->getOperand(0), I->getOperand(1), I->getName());
1677 InsertNewInstBefore(cast<Instruction>(NewVal), *I);
1678 return UpdateValueUsesWith(I, NewVal);
1679 }
1680
1681 // If the sign bit is the only bit demanded by this ashr, then there is no
1682 // need to do it, the shift doesn't change the high bit.
1683 if (DemandedMask.isSignBit())
1684 return UpdateValueUsesWith(I, I->getOperand(0));
1685
1686 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1687 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
1688
1689 // Signed shift right.
1690 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1691 // If any of the "high bits" are demanded, we should set the sign bit as
1692 // demanded.
1693 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
1694 DemandedMaskIn.set(BitWidth-1);
1695 if (SimplifyDemandedBits(I->getOperand(0),
1696 DemandedMaskIn,
1697 RHSKnownZero, RHSKnownOne, Depth+1))
1698 return true;
1699 assert((RHSKnownZero & RHSKnownOne) == 0 &&
1700 "Bits known to be one AND zero?");
1701 // Compute the new bits that are at the top now.
1702 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1703 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1704 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1705
1706 // Handle the sign bits.
1707 APInt SignBit(APInt::getSignBit(BitWidth));
1708 // Adjust to where it is now in the mask.
1709 SignBit = APIntOps::lshr(SignBit, ShiftAmt);
1710
1711 // If the input sign bit is known to be zero, or if none of the top bits
1712 // are demanded, turn this into an unsigned shift right.
1713 if (RHSKnownZero[BitWidth-ShiftAmt-1] ||
1714 (HighBits & ~DemandedMask) == HighBits) {
1715 // Perform the logical shift right.
1716 Value *NewVal = BinaryOperator::createLShr(
1717 I->getOperand(0), SA, I->getName());
1718 InsertNewInstBefore(cast<Instruction>(NewVal), *I);
1719 return UpdateValueUsesWith(I, NewVal);
1720 } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
1721 RHSKnownOne |= HighBits;
1722 }
1723 }
1724 break;
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001725 case Instruction::SRem:
1726 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1727 APInt RA = Rem->getValue();
1728 if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
1729 APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) | RA : ~RA;
1730 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1731 if (SimplifyDemandedBits(I->getOperand(0), Mask2,
1732 LHSKnownZero, LHSKnownOne, Depth+1))
1733 return true;
1734
1735 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
1736 LHSKnownZero |= ~LowBits;
1737 else if (LHSKnownOne[BitWidth-1])
1738 LHSKnownOne |= ~LowBits;
1739
1740 KnownZero |= LHSKnownZero & DemandedMask;
1741 KnownOne |= LHSKnownOne & DemandedMask;
1742
1743 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
1744 }
1745 }
1746 break;
Dan Gohmanbec16052008-04-28 17:02:21 +00001747 case Instruction::URem: {
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001748 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1749 APInt RA = Rem->getValue();
Dan Gohmanbec16052008-04-28 17:02:21 +00001750 if (RA.isStrictlyPositive() && RA.isPowerOf2()) {
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001751 APInt LowBits = (RA - 1) | RA;
1752 APInt Mask2 = LowBits & DemandedMask;
1753 KnownZero |= ~LowBits & DemandedMask;
1754 if (SimplifyDemandedBits(I->getOperand(0), Mask2,
1755 KnownZero, KnownOne, Depth+1))
1756 return true;
1757
1758 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
Dan Gohmanbec16052008-04-28 17:02:21 +00001759 break;
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001760 }
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001761 }
Dan Gohmanbec16052008-04-28 17:02:21 +00001762
1763 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
1764 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1765 ComputeMaskedBits(I->getOperand(0), AllOnes,
1766 KnownZero2, KnownOne2, Depth+1);
1767 uint32_t Leaders = KnownZero2.countLeadingOnes();
1768 APInt HighZeros = APInt::getHighBitsSet(BitWidth, Leaders);
1769 if (SimplifyDemandedBits(I->getOperand(1), ~HighZeros,
1770 KnownZero2, KnownOne2, Depth+1))
1771 return true;
1772
1773 Leaders = std::max(Leaders,
1774 KnownZero2.countLeadingOnes());
1775 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
Nick Lewyckyc1372c82008-03-06 06:48:30 +00001776 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001777 }
Dan Gohmanbec16052008-04-28 17:02:21 +00001778 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001779
1780 // If the client is only demanding bits that we know, return the known
1781 // constant.
1782 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask)
1783 return UpdateValueUsesWith(I, ConstantInt::get(RHSKnownOne));
1784 return false;
1785}
1786
1787
1788/// SimplifyDemandedVectorElts - The specified value producecs a vector with
1789/// 64 or fewer elements. DemandedElts contains the set of elements that are
1790/// actually used by the caller. This method analyzes which elements of the
1791/// operand are undef and returns that information in UndefElts.
1792///
1793/// If the information about demanded elements can be used to simplify the
1794/// operation, the operation is simplified, then the resultant value is
1795/// returned. This returns null if no change was made.
1796Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, uint64_t DemandedElts,
1797 uint64_t &UndefElts,
1798 unsigned Depth) {
1799 unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
1800 assert(VWidth <= 64 && "Vector too wide to analyze!");
1801 uint64_t EltMask = ~0ULL >> (64-VWidth);
1802 assert(DemandedElts != EltMask && (DemandedElts & ~EltMask) == 0 &&
1803 "Invalid DemandedElts!");
1804
1805 if (isa<UndefValue>(V)) {
1806 // If the entire vector is undefined, just return this info.
1807 UndefElts = EltMask;
1808 return 0;
1809 } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
1810 UndefElts = EltMask;
1811 return UndefValue::get(V->getType());
1812 }
1813
1814 UndefElts = 0;
1815 if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
1816 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1817 Constant *Undef = UndefValue::get(EltTy);
1818
1819 std::vector<Constant*> Elts;
1820 for (unsigned i = 0; i != VWidth; ++i)
1821 if (!(DemandedElts & (1ULL << i))) { // If not demanded, set to undef.
1822 Elts.push_back(Undef);
1823 UndefElts |= (1ULL << i);
1824 } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
1825 Elts.push_back(Undef);
1826 UndefElts |= (1ULL << i);
1827 } else { // Otherwise, defined.
1828 Elts.push_back(CP->getOperand(i));
1829 }
1830
1831 // If we changed the constant, return it.
1832 Constant *NewCP = ConstantVector::get(Elts);
1833 return NewCP != CP ? NewCP : 0;
1834 } else if (isa<ConstantAggregateZero>(V)) {
1835 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1836 // set to undef.
1837 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1838 Constant *Zero = Constant::getNullValue(EltTy);
1839 Constant *Undef = UndefValue::get(EltTy);
1840 std::vector<Constant*> Elts;
1841 for (unsigned i = 0; i != VWidth; ++i)
1842 Elts.push_back((DemandedElts & (1ULL << i)) ? Zero : Undef);
1843 UndefElts = DemandedElts ^ EltMask;
1844 return ConstantVector::get(Elts);
1845 }
1846
1847 if (!V->hasOneUse()) { // Other users may use these bits.
1848 if (Depth != 0) { // Not at the root.
1849 // TODO: Just compute the UndefElts information recursively.
1850 return false;
1851 }
1852 return false;
1853 } else if (Depth == 10) { // Limit search depth.
1854 return false;
1855 }
1856
1857 Instruction *I = dyn_cast<Instruction>(V);
1858 if (!I) return false; // Only analyze instructions.
1859
1860 bool MadeChange = false;
1861 uint64_t UndefElts2;
1862 Value *TmpV;
1863 switch (I->getOpcode()) {
1864 default: break;
1865
1866 case Instruction::InsertElement: {
1867 // If this is a variable index, we don't know which element it overwrites.
1868 // demand exactly the same input as we produce.
1869 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1870 if (Idx == 0) {
1871 // Note that we can't propagate undef elt info, because we don't know
1872 // which elt is getting updated.
1873 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1874 UndefElts2, Depth+1);
1875 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1876 break;
1877 }
1878
1879 // If this is inserting an element that isn't demanded, remove this
1880 // insertelement.
1881 unsigned IdxNo = Idx->getZExtValue();
1882 if (IdxNo >= VWidth || (DemandedElts & (1ULL << IdxNo)) == 0)
1883 return AddSoonDeadInstToWorklist(*I, 0);
1884
1885 // Otherwise, the element inserted overwrites whatever was there, so the
1886 // input demanded set is simpler than the output set.
1887 TmpV = SimplifyDemandedVectorElts(I->getOperand(0),
1888 DemandedElts & ~(1ULL << IdxNo),
1889 UndefElts, Depth+1);
1890 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1891
1892 // The inserted element is defined.
1893 UndefElts |= 1ULL << IdxNo;
1894 break;
1895 }
1896 case Instruction::BitCast: {
1897 // Vector->vector casts only.
1898 const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1899 if (!VTy) break;
1900 unsigned InVWidth = VTy->getNumElements();
1901 uint64_t InputDemandedElts = 0;
1902 unsigned Ratio;
1903
1904 if (VWidth == InVWidth) {
1905 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1906 // elements as are demanded of us.
1907 Ratio = 1;
1908 InputDemandedElts = DemandedElts;
1909 } else if (VWidth > InVWidth) {
1910 // Untested so far.
1911 break;
1912
1913 // If there are more elements in the result than there are in the source,
1914 // then an input element is live if any of the corresponding output
1915 // elements are live.
1916 Ratio = VWidth/InVWidth;
1917 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1918 if (DemandedElts & (1ULL << OutIdx))
1919 InputDemandedElts |= 1ULL << (OutIdx/Ratio);
1920 }
1921 } else {
1922 // Untested so far.
1923 break;
1924
1925 // If there are more elements in the source than there are in the result,
1926 // then an input element is live if the corresponding output element is
1927 // live.
1928 Ratio = InVWidth/VWidth;
1929 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1930 if (DemandedElts & (1ULL << InIdx/Ratio))
1931 InputDemandedElts |= 1ULL << InIdx;
1932 }
1933
1934 // div/rem demand all inputs, because they don't want divide by zero.
1935 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1936 UndefElts2, Depth+1);
1937 if (TmpV) {
1938 I->setOperand(0, TmpV);
1939 MadeChange = true;
1940 }
1941
1942 UndefElts = UndefElts2;
1943 if (VWidth > InVWidth) {
1944 assert(0 && "Unimp");
1945 // If there are more elements in the result than there are in the source,
1946 // then an output element is undef if the corresponding input element is
1947 // undef.
1948 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1949 if (UndefElts2 & (1ULL << (OutIdx/Ratio)))
1950 UndefElts |= 1ULL << OutIdx;
1951 } else if (VWidth < InVWidth) {
1952 assert(0 && "Unimp");
1953 // If there are more elements in the source than there are in the result,
1954 // then a result element is undef if all of the corresponding input
1955 // elements are undef.
1956 UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
1957 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1958 if ((UndefElts2 & (1ULL << InIdx)) == 0) // Not undef?
1959 UndefElts &= ~(1ULL << (InIdx/Ratio)); // Clear undef bit.
1960 }
1961 break;
1962 }
1963 case Instruction::And:
1964 case Instruction::Or:
1965 case Instruction::Xor:
1966 case Instruction::Add:
1967 case Instruction::Sub:
1968 case Instruction::Mul:
1969 // div/rem demand all inputs, because they don't want divide by zero.
1970 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1971 UndefElts, Depth+1);
1972 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1973 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1974 UndefElts2, Depth+1);
1975 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1976
1977 // Output elements are undefined if both are undefined. Consider things
1978 // like undef&0. The result is known zero, not undef.
1979 UndefElts &= UndefElts2;
1980 break;
1981
1982 case Instruction::Call: {
1983 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1984 if (!II) break;
1985 switch (II->getIntrinsicID()) {
1986 default: break;
1987
1988 // Binary vector operations that work column-wise. A dest element is a
1989 // function of the corresponding input elements from the two inputs.
1990 case Intrinsic::x86_sse_sub_ss:
1991 case Intrinsic::x86_sse_mul_ss:
1992 case Intrinsic::x86_sse_min_ss:
1993 case Intrinsic::x86_sse_max_ss:
1994 case Intrinsic::x86_sse2_sub_sd:
1995 case Intrinsic::x86_sse2_mul_sd:
1996 case Intrinsic::x86_sse2_min_sd:
1997 case Intrinsic::x86_sse2_max_sd:
1998 TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
1999 UndefElts, Depth+1);
2000 if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
2001 TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
2002 UndefElts2, Depth+1);
2003 if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
2004
2005 // If only the low elt is demanded and this is a scalarizable intrinsic,
2006 // scalarize it now.
2007 if (DemandedElts == 1) {
2008 switch (II->getIntrinsicID()) {
2009 default: break;
2010 case Intrinsic::x86_sse_sub_ss:
2011 case Intrinsic::x86_sse_mul_ss:
2012 case Intrinsic::x86_sse2_sub_sd:
2013 case Intrinsic::x86_sse2_mul_sd:
2014 // TODO: Lower MIN/MAX/ABS/etc
2015 Value *LHS = II->getOperand(1);
2016 Value *RHS = II->getOperand(2);
2017 // Extract the element as scalars.
2018 LHS = InsertNewInstBefore(new ExtractElementInst(LHS, 0U,"tmp"), *II);
2019 RHS = InsertNewInstBefore(new ExtractElementInst(RHS, 0U,"tmp"), *II);
2020
2021 switch (II->getIntrinsicID()) {
2022 default: assert(0 && "Case stmts out of sync!");
2023 case Intrinsic::x86_sse_sub_ss:
2024 case Intrinsic::x86_sse2_sub_sd:
2025 TmpV = InsertNewInstBefore(BinaryOperator::createSub(LHS, RHS,
2026 II->getName()), *II);
2027 break;
2028 case Intrinsic::x86_sse_mul_ss:
2029 case Intrinsic::x86_sse2_mul_sd:
2030 TmpV = InsertNewInstBefore(BinaryOperator::createMul(LHS, RHS,
2031 II->getName()), *II);
2032 break;
2033 }
2034
2035 Instruction *New =
Gabor Greifd6da1d02008-04-06 20:25:17 +00002036 InsertElementInst::Create(UndefValue::get(II->getType()), TmpV, 0U,
2037 II->getName());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002038 InsertNewInstBefore(New, *II);
2039 AddSoonDeadInstToWorklist(*II, 0);
2040 return New;
2041 }
2042 }
2043
2044 // Output elements are undefined if both are undefined. Consider things
2045 // like undef&0. The result is known zero, not undef.
2046 UndefElts &= UndefElts2;
2047 break;
2048 }
2049 break;
2050 }
2051 }
2052 return MadeChange ? I : 0;
2053}
2054
Nick Lewycky2de09a92007-09-06 02:40:25 +00002055/// @returns true if the specified compare predicate is
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002056/// true when both operands are equal...
Nick Lewycky2de09a92007-09-06 02:40:25 +00002057/// @brief Determine if the icmp Predicate is true when both operands are equal
2058static bool isTrueWhenEqual(ICmpInst::Predicate pred) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002059 return pred == ICmpInst::ICMP_EQ || pred == ICmpInst::ICMP_UGE ||
2060 pred == ICmpInst::ICMP_SGE || pred == ICmpInst::ICMP_ULE ||
2061 pred == ICmpInst::ICMP_SLE;
2062}
2063
Nick Lewycky2de09a92007-09-06 02:40:25 +00002064/// @returns true if the specified compare instruction is
2065/// true when both operands are equal...
2066/// @brief Determine if the ICmpInst returns true when both operands are equal
2067static bool isTrueWhenEqual(ICmpInst &ICI) {
2068 return isTrueWhenEqual(ICI.getPredicate());
2069}
2070
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002071/// AssociativeOpt - Perform an optimization on an associative operator. This
2072/// function is designed to check a chain of associative operators for a
2073/// potential to apply a certain optimization. Since the optimization may be
2074/// applicable if the expression was reassociated, this checks the chain, then
2075/// reassociates the expression as necessary to expose the optimization
2076/// opportunity. This makes use of a special Functor, which must define
2077/// 'shouldApply' and 'apply' methods.
2078///
2079template<typename Functor>
2080Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
2081 unsigned Opcode = Root.getOpcode();
2082 Value *LHS = Root.getOperand(0);
2083
2084 // Quick check, see if the immediate LHS matches...
2085 if (F.shouldApply(LHS))
2086 return F.apply(Root);
2087
2088 // Otherwise, if the LHS is not of the same opcode as the root, return.
2089 Instruction *LHSI = dyn_cast<Instruction>(LHS);
2090 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
2091 // Should we apply this transform to the RHS?
2092 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
2093
2094 // If not to the RHS, check to see if we should apply to the LHS...
2095 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
2096 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
2097 ShouldApply = true;
2098 }
2099
2100 // If the functor wants to apply the optimization to the RHS of LHSI,
2101 // reassociate the expression from ((? op A) op B) to (? op (A op B))
2102 if (ShouldApply) {
2103 BasicBlock *BB = Root.getParent();
2104
2105 // Now all of the instructions are in the current basic block, go ahead
2106 // and perform the reassociation.
2107 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
2108
2109 // First move the selected RHS to the LHS of the root...
2110 Root.setOperand(0, LHSI->getOperand(1));
2111
2112 // Make what used to be the LHS of the root be the user of the root...
2113 Value *ExtraOperand = TmpLHSI->getOperand(1);
2114 if (&Root == TmpLHSI) {
2115 Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType()));
2116 return 0;
2117 }
2118 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
2119 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
2120 TmpLHSI->getParent()->getInstList().remove(TmpLHSI);
2121 BasicBlock::iterator ARI = &Root; ++ARI;
2122 BB->getInstList().insert(ARI, TmpLHSI); // Move TmpLHSI to after Root
2123 ARI = Root;
2124
2125 // Now propagate the ExtraOperand down the chain of instructions until we
2126 // get to LHSI.
2127 while (TmpLHSI != LHSI) {
2128 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
2129 // Move the instruction to immediately before the chain we are
2130 // constructing to avoid breaking dominance properties.
2131 NextLHSI->getParent()->getInstList().remove(NextLHSI);
2132 BB->getInstList().insert(ARI, NextLHSI);
2133 ARI = NextLHSI;
2134
2135 Value *NextOp = NextLHSI->getOperand(1);
2136 NextLHSI->setOperand(1, ExtraOperand);
2137 TmpLHSI = NextLHSI;
2138 ExtraOperand = NextOp;
2139 }
2140
2141 // Now that the instructions are reassociated, have the functor perform
2142 // the transformation...
2143 return F.apply(Root);
2144 }
2145
2146 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
2147 }
2148 return 0;
2149}
2150
2151
2152// AddRHS - Implements: X + X --> X << 1
2153struct AddRHS {
2154 Value *RHS;
2155 AddRHS(Value *rhs) : RHS(rhs) {}
2156 bool shouldApply(Value *LHS) const { return LHS == RHS; }
2157 Instruction *apply(BinaryOperator &Add) const {
2158 return BinaryOperator::createShl(Add.getOperand(0),
2159 ConstantInt::get(Add.getType(), 1));
2160 }
2161};
2162
2163// AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
2164// iff C1&C2 == 0
2165struct AddMaskingAnd {
2166 Constant *C2;
2167 AddMaskingAnd(Constant *c) : C2(c) {}
2168 bool shouldApply(Value *LHS) const {
2169 ConstantInt *C1;
2170 return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
2171 ConstantExpr::getAnd(C1, C2)->isNullValue();
2172 }
2173 Instruction *apply(BinaryOperator &Add) const {
2174 return BinaryOperator::createOr(Add.getOperand(0), Add.getOperand(1));
2175 }
2176};
2177
2178static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
2179 InstCombiner *IC) {
2180 if (CastInst *CI = dyn_cast<CastInst>(&I)) {
2181 if (Constant *SOC = dyn_cast<Constant>(SO))
2182 return ConstantExpr::getCast(CI->getOpcode(), SOC, I.getType());
2183
2184 return IC->InsertNewInstBefore(CastInst::create(
2185 CI->getOpcode(), SO, I.getType(), SO->getName() + ".cast"), I);
2186 }
2187
2188 // Figure out if the constant is the left or the right argument.
2189 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
2190 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
2191
2192 if (Constant *SOC = dyn_cast<Constant>(SO)) {
2193 if (ConstIsRHS)
2194 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
2195 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
2196 }
2197
2198 Value *Op0 = SO, *Op1 = ConstOperand;
2199 if (!ConstIsRHS)
2200 std::swap(Op0, Op1);
2201 Instruction *New;
2202 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2203 New = BinaryOperator::create(BO->getOpcode(), Op0, Op1,SO->getName()+".op");
2204 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2205 New = CmpInst::create(CI->getOpcode(), CI->getPredicate(), Op0, Op1,
2206 SO->getName()+".cmp");
2207 else {
2208 assert(0 && "Unknown binary instruction type!");
2209 abort();
2210 }
2211 return IC->InsertNewInstBefore(New, I);
2212}
2213
2214// FoldOpIntoSelect - Given an instruction with a select as one operand and a
2215// constant as the other operand, try to fold the binary operator into the
2216// select arguments. This also works for Cast instructions, which obviously do
2217// not have a second operand.
2218static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
2219 InstCombiner *IC) {
2220 // Don't modify shared select instructions
2221 if (!SI->hasOneUse()) return 0;
2222 Value *TV = SI->getOperand(1);
2223 Value *FV = SI->getOperand(2);
2224
2225 if (isa<Constant>(TV) || isa<Constant>(FV)) {
2226 // Bool selects with constant operands can be folded to logical ops.
2227 if (SI->getType() == Type::Int1Ty) return 0;
2228
2229 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
2230 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
2231
Gabor Greifd6da1d02008-04-06 20:25:17 +00002232 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
2233 SelectFalseVal);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002234 }
2235 return 0;
2236}
2237
2238
2239/// FoldOpIntoPhi - Given a binary operator or cast instruction which has a PHI
2240/// node as operand #0, see if we can fold the instruction into the PHI (which
2241/// is only possible if all operands to the PHI are constants).
2242Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
2243 PHINode *PN = cast<PHINode>(I.getOperand(0));
2244 unsigned NumPHIValues = PN->getNumIncomingValues();
2245 if (!PN->hasOneUse() || NumPHIValues == 0) return 0;
2246
2247 // Check to see if all of the operands of the PHI are constants. If there is
2248 // one non-constant value, remember the BB it is. If there is more than one
2249 // or if *it* is a PHI, bail out.
2250 BasicBlock *NonConstBB = 0;
2251 for (unsigned i = 0; i != NumPHIValues; ++i)
2252 if (!isa<Constant>(PN->getIncomingValue(i))) {
2253 if (NonConstBB) return 0; // More than one non-const value.
2254 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
2255 NonConstBB = PN->getIncomingBlock(i);
2256
2257 // If the incoming non-constant value is in I's block, we have an infinite
2258 // loop.
2259 if (NonConstBB == I.getParent())
2260 return 0;
2261 }
2262
2263 // If there is exactly one non-constant value, we can insert a copy of the
2264 // operation in that block. However, if this is a critical edge, we would be
2265 // inserting the computation one some other paths (e.g. inside a loop). Only
2266 // do this if the pred block is unconditionally branching into the phi block.
2267 if (NonConstBB) {
2268 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
2269 if (!BI || !BI->isUnconditional()) return 0;
2270 }
2271
2272 // Okay, we can do the transformation: create the new PHI node.
Gabor Greifd6da1d02008-04-06 20:25:17 +00002273 PHINode *NewPN = PHINode::Create(I.getType(), "");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002274 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
2275 InsertNewInstBefore(NewPN, *PN);
2276 NewPN->takeName(PN);
2277
2278 // Next, add all of the operands to the PHI.
2279 if (I.getNumOperands() == 2) {
2280 Constant *C = cast<Constant>(I.getOperand(1));
2281 for (unsigned i = 0; i != NumPHIValues; ++i) {
Chris Lattnerb933ea62007-08-05 08:47:58 +00002282 Value *InV = 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002283 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2284 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2285 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
2286 else
2287 InV = ConstantExpr::get(I.getOpcode(), InC, C);
2288 } else {
2289 assert(PN->getIncomingBlock(i) == NonConstBB);
2290 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2291 InV = BinaryOperator::create(BO->getOpcode(),
2292 PN->getIncomingValue(i), C, "phitmp",
2293 NonConstBB->getTerminator());
2294 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2295 InV = CmpInst::create(CI->getOpcode(),
2296 CI->getPredicate(),
2297 PN->getIncomingValue(i), C, "phitmp",
2298 NonConstBB->getTerminator());
2299 else
2300 assert(0 && "Unknown binop!");
2301
2302 AddToWorkList(cast<Instruction>(InV));
2303 }
2304 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2305 }
2306 } else {
2307 CastInst *CI = cast<CastInst>(&I);
2308 const Type *RetTy = CI->getType();
2309 for (unsigned i = 0; i != NumPHIValues; ++i) {
2310 Value *InV;
2311 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2312 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
2313 } else {
2314 assert(PN->getIncomingBlock(i) == NonConstBB);
2315 InV = CastInst::create(CI->getOpcode(), PN->getIncomingValue(i),
2316 I.getType(), "phitmp",
2317 NonConstBB->getTerminator());
2318 AddToWorkList(cast<Instruction>(InV));
2319 }
2320 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2321 }
2322 }
2323 return ReplaceInstUsesWith(I, NewPN);
2324}
2325
Chris Lattner55476162008-01-29 06:52:45 +00002326
2327/// CannotBeNegativeZero - Return true if we can prove that the specified FP
2328/// value is never equal to -0.0.
2329///
2330/// Note that this function will need to be revisited when we support nondefault
2331/// rounding modes!
2332///
2333static bool CannotBeNegativeZero(const Value *V) {
2334 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2335 return !CFP->getValueAPF().isNegZero();
2336
2337 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2338 if (const Instruction *I = dyn_cast<Instruction>(V)) {
2339 if (I->getOpcode() == Instruction::Add &&
2340 isa<ConstantFP>(I->getOperand(1)) &&
2341 cast<ConstantFP>(I->getOperand(1))->isNullValue())
2342 return true;
2343
2344 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2345 if (II->getIntrinsicID() == Intrinsic::sqrt)
2346 return CannotBeNegativeZero(II->getOperand(1));
2347
2348 if (const CallInst *CI = dyn_cast<CallInst>(I))
2349 if (const Function *F = CI->getCalledFunction()) {
2350 if (F->isDeclaration()) {
2351 switch (F->getNameLen()) {
2352 case 3: // abs(x) != -0.0
2353 if (!strcmp(F->getNameStart(), "abs")) return true;
2354 break;
2355 case 4: // abs[lf](x) != -0.0
2356 if (!strcmp(F->getNameStart(), "absf")) return true;
2357 if (!strcmp(F->getNameStart(), "absl")) return true;
2358 break;
2359 }
2360 }
2361 }
2362 }
2363
2364 return false;
2365}
2366
2367
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002368Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
2369 bool Changed = SimplifyCommutative(I);
2370 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2371
2372 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2373 // X + undef -> undef
2374 if (isa<UndefValue>(RHS))
2375 return ReplaceInstUsesWith(I, RHS);
2376
2377 // X + 0 --> X
2378 if (!I.getType()->isFPOrFPVector()) { // NOTE: -0 + +0 = +0.
2379 if (RHSC->isNullValue())
2380 return ReplaceInstUsesWith(I, LHS);
2381 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
Dale Johannesen2fc20782007-09-14 22:26:36 +00002382 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
2383 (I.getType())->getValueAPF()))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002384 return ReplaceInstUsesWith(I, LHS);
2385 }
2386
2387 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
2388 // X + (signbit) --> X ^ signbit
2389 const APInt& Val = CI->getValue();
2390 uint32_t BitWidth = Val.getBitWidth();
2391 if (Val == APInt::getSignBit(BitWidth))
2392 return BinaryOperator::createXor(LHS, RHS);
2393
2394 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2395 // (X & 254)+1 -> (X&254)|1
2396 if (!isa<VectorType>(I.getType())) {
2397 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2398 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(BitWidth),
2399 KnownZero, KnownOne))
2400 return &I;
2401 }
2402 }
2403
2404 if (isa<PHINode>(LHS))
2405 if (Instruction *NV = FoldOpIntoPhi(I))
2406 return NV;
2407
2408 ConstantInt *XorRHS = 0;
2409 Value *XorLHS = 0;
2410 if (isa<ConstantInt>(RHSC) &&
2411 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
2412 uint32_t TySizeBits = I.getType()->getPrimitiveSizeInBits();
2413 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
2414
2415 uint32_t Size = TySizeBits / 2;
2416 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
2417 APInt CFF80Val(-C0080Val);
2418 do {
2419 if (TySizeBits > Size) {
2420 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2421 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2422 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
2423 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
2424 // This is a sign extend if the top bits are known zero.
2425 if (!MaskedValueIsZero(XorLHS,
2426 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
2427 Size = 0; // Not a sign ext, but can't be any others either.
2428 break;
2429 }
2430 }
2431 Size >>= 1;
2432 C0080Val = APIntOps::lshr(C0080Val, Size);
2433 CFF80Val = APIntOps::ashr(CFF80Val, Size);
2434 } while (Size >= 1);
2435
2436 // FIXME: This shouldn't be necessary. When the backends can handle types
2437 // with funny bit widths then this whole cascade of if statements should
2438 // be removed. It is just here to get the size of the "middle" type back
2439 // up to something that the back ends can handle.
2440 const Type *MiddleType = 0;
2441 switch (Size) {
2442 default: break;
2443 case 32: MiddleType = Type::Int32Ty; break;
2444 case 16: MiddleType = Type::Int16Ty; break;
2445 case 8: MiddleType = Type::Int8Ty; break;
2446 }
2447 if (MiddleType) {
2448 Instruction *NewTrunc = new TruncInst(XorLHS, MiddleType, "sext");
2449 InsertNewInstBefore(NewTrunc, I);
2450 return new SExtInst(NewTrunc, I.getType(), I.getName());
2451 }
2452 }
2453 }
2454
2455 // X + X --> X << 1
2456 if (I.getType()->isInteger() && I.getType() != Type::Int1Ty) {
2457 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS))) return Result;
2458
2459 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
2460 if (RHSI->getOpcode() == Instruction::Sub)
2461 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
2462 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
2463 }
2464 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
2465 if (LHSI->getOpcode() == Instruction::Sub)
2466 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
2467 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
2468 }
2469 }
2470
2471 // -A + B --> B - A
Chris Lattner53c9fbf2008-02-17 21:03:36 +00002472 // -A + -B --> -(A + B)
2473 if (Value *LHSV = dyn_castNegVal(LHS)) {
Chris Lattner322a9192008-02-18 17:50:16 +00002474 if (LHS->getType()->isIntOrIntVector()) {
2475 if (Value *RHSV = dyn_castNegVal(RHS)) {
2476 Instruction *NewAdd = BinaryOperator::createAdd(LHSV, RHSV, "sum");
2477 InsertNewInstBefore(NewAdd, I);
2478 return BinaryOperator::createNeg(NewAdd);
2479 }
Chris Lattner53c9fbf2008-02-17 21:03:36 +00002480 }
2481
2482 return BinaryOperator::createSub(RHS, LHSV);
2483 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002484
2485 // A + -B --> A - B
2486 if (!isa<Constant>(RHS))
2487 if (Value *V = dyn_castNegVal(RHS))
2488 return BinaryOperator::createSub(LHS, V);
2489
2490
2491 ConstantInt *C2;
2492 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
2493 if (X == RHS) // X*C + X --> X * (C+1)
2494 return BinaryOperator::createMul(RHS, AddOne(C2));
2495
2496 // X*C1 + X*C2 --> X * (C1+C2)
2497 ConstantInt *C1;
2498 if (X == dyn_castFoldableMul(RHS, C1))
2499 return BinaryOperator::createMul(X, Add(C1, C2));
2500 }
2501
2502 // X + X*C --> X * (C+1)
2503 if (dyn_castFoldableMul(RHS, C2) == LHS)
2504 return BinaryOperator::createMul(LHS, AddOne(C2));
2505
2506 // X + ~X --> -1 since ~X = -X-1
2507 if (dyn_castNotVal(LHS) == RHS || dyn_castNotVal(RHS) == LHS)
2508 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2509
2510
2511 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2512 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2))))
2513 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
2514 return R;
2515
Nick Lewycky83598a72008-02-03 07:42:09 +00002516 // W*X + Y*Z --> W * (X+Z) iff W == Y
Nick Lewycky5d03b512008-02-03 08:19:11 +00002517 if (I.getType()->isIntOrIntVector()) {
Nick Lewycky83598a72008-02-03 07:42:09 +00002518 Value *W, *X, *Y, *Z;
2519 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
2520 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
2521 if (W != Y) {
2522 if (W == Z) {
Bill Wendling44a36ea2008-02-26 10:53:30 +00002523 std::swap(Y, Z);
Nick Lewycky83598a72008-02-03 07:42:09 +00002524 } else if (Y == X) {
Bill Wendling44a36ea2008-02-26 10:53:30 +00002525 std::swap(W, X);
2526 } else if (X == Z) {
Nick Lewycky83598a72008-02-03 07:42:09 +00002527 std::swap(Y, Z);
2528 std::swap(W, X);
2529 }
2530 }
2531
2532 if (W == Y) {
2533 Value *NewAdd = InsertNewInstBefore(BinaryOperator::createAdd(X, Z,
2534 LHS->getName()), I);
2535 return BinaryOperator::createMul(W, NewAdd);
2536 }
2537 }
2538 }
2539
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002540 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
2541 Value *X = 0;
2542 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
2543 return BinaryOperator::createSub(SubOne(CRHS), X);
2544
2545 // (X & FF00) + xx00 -> (X+xx00) & FF00
2546 if (LHS->hasOneUse() && match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
2547 Constant *Anded = And(CRHS, C2);
2548 if (Anded == CRHS) {
2549 // See if all bits from the first bit set in the Add RHS up are included
2550 // in the mask. First, get the rightmost bit.
2551 const APInt& AddRHSV = CRHS->getValue();
2552
2553 // Form a mask of all bits from the lowest bit added through the top.
2554 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
2555
2556 // See if the and mask includes all of these bits.
2557 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
2558
2559 if (AddRHSHighBits == AddRHSHighBitsAnd) {
2560 // Okay, the xform is safe. Insert the new add pronto.
2561 Value *NewAdd = InsertNewInstBefore(BinaryOperator::createAdd(X, CRHS,
2562 LHS->getName()), I);
2563 return BinaryOperator::createAnd(NewAdd, C2);
2564 }
2565 }
2566 }
2567
2568 // Try to fold constant add into select arguments.
2569 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
2570 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2571 return R;
2572 }
2573
2574 // add (cast *A to intptrtype) B ->
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002575 // cast (GEP (cast *A to sbyte*) B) --> intptrtype
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002576 {
2577 CastInst *CI = dyn_cast<CastInst>(LHS);
2578 Value *Other = RHS;
2579 if (!CI) {
2580 CI = dyn_cast<CastInst>(RHS);
2581 Other = LHS;
2582 }
2583 if (CI && CI->getType()->isSized() &&
2584 (CI->getType()->getPrimitiveSizeInBits() ==
2585 TD->getIntPtrType()->getPrimitiveSizeInBits())
2586 && isa<PointerType>(CI->getOperand(0)->getType())) {
Christopher Lambbb2f2222007-12-17 01:12:55 +00002587 unsigned AS =
2588 cast<PointerType>(CI->getOperand(0)->getType())->getAddressSpace();
Chris Lattner13c2d6e2008-01-13 22:23:22 +00002589 Value *I2 = InsertBitCastBefore(CI->getOperand(0),
2590 PointerType::get(Type::Int8Ty, AS), I);
Gabor Greifd6da1d02008-04-06 20:25:17 +00002591 I2 = InsertNewInstBefore(GetElementPtrInst::Create(I2, Other, "ctg2"), I);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002592 return new PtrToIntInst(I2, CI->getType());
2593 }
2594 }
Christopher Lamb244ec282007-12-18 09:34:41 +00002595
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002596 // add (select X 0 (sub n A)) A --> select X A n
Christopher Lamb244ec282007-12-18 09:34:41 +00002597 {
2598 SelectInst *SI = dyn_cast<SelectInst>(LHS);
2599 Value *Other = RHS;
2600 if (!SI) {
2601 SI = dyn_cast<SelectInst>(RHS);
2602 Other = LHS;
2603 }
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002604 if (SI && SI->hasOneUse()) {
Christopher Lamb244ec282007-12-18 09:34:41 +00002605 Value *TV = SI->getTrueValue();
2606 Value *FV = SI->getFalseValue();
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002607 Value *A, *N;
Christopher Lamb244ec282007-12-18 09:34:41 +00002608
2609 // Can we fold the add into the argument of the select?
2610 // We check both true and false select arguments for a matching subtract.
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002611 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Value(A))) &&
2612 A == Other) // Fold the add into the true select value.
Gabor Greifd6da1d02008-04-06 20:25:17 +00002613 return SelectInst::Create(SI->getCondition(), N, A);
Chris Lattnerbf0c5f32007-12-20 01:56:58 +00002614 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Value(A))) &&
2615 A == Other) // Fold the add into the false select value.
Gabor Greifd6da1d02008-04-06 20:25:17 +00002616 return SelectInst::Create(SI->getCondition(), A, N);
Christopher Lamb244ec282007-12-18 09:34:41 +00002617 }
2618 }
Chris Lattner55476162008-01-29 06:52:45 +00002619
2620 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2621 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
2622 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
2623 return ReplaceInstUsesWith(I, LHS);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002624
2625 return Changed ? &I : 0;
2626}
2627
2628// isSignBit - Return true if the value represented by the constant only has the
2629// highest order bit set.
2630static bool isSignBit(ConstantInt *CI) {
2631 uint32_t NumBits = CI->getType()->getPrimitiveSizeInBits();
2632 return CI->getValue() == APInt::getSignBit(NumBits);
2633}
2634
2635Instruction *InstCombiner::visitSub(BinaryOperator &I) {
2636 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2637
2638 if (Op0 == Op1) // sub X, X -> 0
2639 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2640
2641 // If this is a 'B = x-(-A)', change to B = x+A...
2642 if (Value *V = dyn_castNegVal(Op1))
2643 return BinaryOperator::createAdd(Op0, V);
2644
2645 if (isa<UndefValue>(Op0))
2646 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
2647 if (isa<UndefValue>(Op1))
2648 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
2649
2650 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
2651 // Replace (-1 - A) with (~A)...
2652 if (C->isAllOnesValue())
2653 return BinaryOperator::createNot(Op1);
2654
2655 // C - ~X == X + (1+C)
2656 Value *X = 0;
2657 if (match(Op1, m_Not(m_Value(X))))
2658 return BinaryOperator::createAdd(X, AddOne(C));
2659
2660 // -(X >>u 31) -> (X >>s 31)
2661 // -(X >>s 31) -> (X >>u 31)
2662 if (C->isZero()) {
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00002663 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002664 if (SI->getOpcode() == Instruction::LShr) {
2665 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2666 // Check to see if we are shifting out everything but the sign bit.
2667 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2668 SI->getType()->getPrimitiveSizeInBits()-1) {
2669 // Ok, the transformation is safe. Insert AShr.
2670 return BinaryOperator::create(Instruction::AShr,
2671 SI->getOperand(0), CU, SI->getName());
2672 }
2673 }
2674 }
2675 else if (SI->getOpcode() == Instruction::AShr) {
2676 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2677 // Check to see if we are shifting out everything but the sign bit.
2678 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2679 SI->getType()->getPrimitiveSizeInBits()-1) {
2680 // Ok, the transformation is safe. Insert LShr.
2681 return BinaryOperator::createLShr(
2682 SI->getOperand(0), CU, SI->getName());
2683 }
2684 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00002685 }
2686 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002687 }
2688
2689 // Try to fold constant sub into select arguments.
2690 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2691 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2692 return R;
2693
2694 if (isa<PHINode>(Op0))
2695 if (Instruction *NV = FoldOpIntoPhi(I))
2696 return NV;
2697 }
2698
2699 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2700 if (Op1I->getOpcode() == Instruction::Add &&
2701 !Op0->getType()->isFPOrFPVector()) {
2702 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2703 return BinaryOperator::createNeg(Op1I->getOperand(1), I.getName());
2704 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2705 return BinaryOperator::createNeg(Op1I->getOperand(0), I.getName());
2706 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
2707 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
2708 // C1-(X+C2) --> (C1-C2)-X
2709 return BinaryOperator::createSub(Subtract(CI1, CI2),
2710 Op1I->getOperand(0));
2711 }
2712 }
2713
2714 if (Op1I->hasOneUse()) {
2715 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2716 // is not used by anyone else...
2717 //
2718 if (Op1I->getOpcode() == Instruction::Sub &&
2719 !Op1I->getType()->isFPOrFPVector()) {
2720 // Swap the two operands of the subexpr...
2721 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
2722 Op1I->setOperand(0, IIOp1);
2723 Op1I->setOperand(1, IIOp0);
2724
2725 // Create the new top level add instruction...
2726 return BinaryOperator::createAdd(Op0, Op1);
2727 }
2728
2729 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2730 //
2731 if (Op1I->getOpcode() == Instruction::And &&
2732 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
2733 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
2734
2735 Value *NewNot =
2736 InsertNewInstBefore(BinaryOperator::createNot(OtherOp, "B.not"), I);
2737 return BinaryOperator::createAnd(Op0, NewNot);
2738 }
2739
2740 // 0 - (X sdiv C) -> (X sdiv -C)
2741 if (Op1I->getOpcode() == Instruction::SDiv)
2742 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2743 if (CSI->isZero())
2744 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
2745 return BinaryOperator::createSDiv(Op1I->getOperand(0),
2746 ConstantExpr::getNeg(DivRHS));
2747
2748 // X - X*C --> X * (1-C)
2749 ConstantInt *C2 = 0;
2750 if (dyn_castFoldableMul(Op1I, C2) == Op0) {
2751 Constant *CP1 = Subtract(ConstantInt::get(I.getType(), 1), C2);
2752 return BinaryOperator::createMul(Op0, CP1);
2753 }
Dan Gohmanda338742007-09-17 17:31:57 +00002754
2755 // X - ((X / Y) * Y) --> X % Y
2756 if (Op1I->getOpcode() == Instruction::Mul)
2757 if (Instruction *I = dyn_cast<Instruction>(Op1I->getOperand(0)))
2758 if (Op0 == I->getOperand(0) &&
2759 Op1I->getOperand(1) == I->getOperand(1)) {
2760 if (I->getOpcode() == Instruction::SDiv)
2761 return BinaryOperator::createSRem(Op0, Op1I->getOperand(1));
2762 if (I->getOpcode() == Instruction::UDiv)
2763 return BinaryOperator::createURem(Op0, Op1I->getOperand(1));
2764 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002765 }
2766 }
2767
2768 if (!Op0->getType()->isFPOrFPVector())
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00002769 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002770 if (Op0I->getOpcode() == Instruction::Add) {
2771 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
2772 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
2773 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
2774 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
2775 } else if (Op0I->getOpcode() == Instruction::Sub) {
2776 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
2777 return BinaryOperator::createNeg(Op0I->getOperand(1), I.getName());
2778 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00002779 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002780
2781 ConstantInt *C1;
2782 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
2783 if (X == Op1) // X*C - X --> X * (C-1)
2784 return BinaryOperator::createMul(Op1, SubOne(C1));
2785
2786 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
2787 if (X == dyn_castFoldableMul(Op1, C2))
Zhou Shengc7d7cdc2008-02-22 10:00:35 +00002788 return BinaryOperator::createMul(X, Subtract(C1, C2));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002789 }
2790 return 0;
2791}
2792
2793/// isSignBitCheck - Given an exploded icmp instruction, return true if the
2794/// comparison only checks the sign bit. If it only checks the sign bit, set
2795/// TrueIfSigned if the result of the comparison is true when the input value is
2796/// signed.
2797static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
2798 bool &TrueIfSigned) {
2799 switch (pred) {
2800 case ICmpInst::ICMP_SLT: // True if LHS s< 0
2801 TrueIfSigned = true;
2802 return RHS->isZero();
2803 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
2804 TrueIfSigned = true;
2805 return RHS->isAllOnesValue();
2806 case ICmpInst::ICMP_SGT: // True if LHS s> -1
2807 TrueIfSigned = false;
2808 return RHS->isAllOnesValue();
2809 case ICmpInst::ICMP_UGT:
2810 // True if LHS u> RHS and RHS == high-bit-mask - 1
2811 TrueIfSigned = true;
2812 return RHS->getValue() ==
2813 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
2814 case ICmpInst::ICMP_UGE:
2815 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2816 TrueIfSigned = true;
2817 return RHS->getValue() ==
2818 APInt::getSignBit(RHS->getType()->getPrimitiveSizeInBits());
2819 default:
2820 return false;
2821 }
2822}
2823
2824Instruction *InstCombiner::visitMul(BinaryOperator &I) {
2825 bool Changed = SimplifyCommutative(I);
2826 Value *Op0 = I.getOperand(0);
2827
2828 if (isa<UndefValue>(I.getOperand(1))) // undef * X -> 0
2829 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2830
2831 // Simplify mul instructions with a constant RHS...
2832 if (Constant *Op1 = dyn_cast<Constant>(I.getOperand(1))) {
2833 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2834
2835 // ((X << C1)*C2) == (X * (C2 << C1))
2836 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
2837 if (SI->getOpcode() == Instruction::Shl)
2838 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
2839 return BinaryOperator::createMul(SI->getOperand(0),
2840 ConstantExpr::getShl(CI, ShOp));
2841
2842 if (CI->isZero())
2843 return ReplaceInstUsesWith(I, Op1); // X * 0 == 0
2844 if (CI->equalsInt(1)) // X * 1 == X
2845 return ReplaceInstUsesWith(I, Op0);
2846 if (CI->isAllOnesValue()) // X * -1 == 0 - X
2847 return BinaryOperator::createNeg(Op0, I.getName());
2848
2849 const APInt& Val = cast<ConstantInt>(CI)->getValue();
2850 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
2851 return BinaryOperator::createShl(Op0,
2852 ConstantInt::get(Op0->getType(), Val.logBase2()));
2853 }
2854 } else if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1)) {
2855 if (Op1F->isNullValue())
2856 return ReplaceInstUsesWith(I, Op1);
2857
2858 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
2859 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
Dale Johannesen2fc20782007-09-14 22:26:36 +00002860 // We need a better interface for long double here.
2861 if (Op1->getType() == Type::FloatTy || Op1->getType() == Type::DoubleTy)
2862 if (Op1F->isExactlyValue(1.0))
2863 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002864 }
2865
2866 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
2867 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
2868 isa<ConstantInt>(Op0I->getOperand(1))) {
2869 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
2870 Instruction *Add = BinaryOperator::createMul(Op0I->getOperand(0),
2871 Op1, "tmp");
2872 InsertNewInstBefore(Add, I);
2873 Value *C1C2 = ConstantExpr::getMul(Op1,
2874 cast<Constant>(Op0I->getOperand(1)));
2875 return BinaryOperator::createAdd(Add, C1C2);
2876
2877 }
2878
2879 // Try to fold constant mul into select arguments.
2880 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2881 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2882 return R;
2883
2884 if (isa<PHINode>(Op0))
2885 if (Instruction *NV = FoldOpIntoPhi(I))
2886 return NV;
2887 }
2888
2889 if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
2890 if (Value *Op1v = dyn_castNegVal(I.getOperand(1)))
2891 return BinaryOperator::createMul(Op0v, Op1v);
2892
2893 // If one of the operands of the multiply is a cast from a boolean value, then
2894 // we know the bool is either zero or one, so this is a 'masking' multiply.
2895 // See if we can simplify things based on how the boolean was originally
2896 // formed.
2897 CastInst *BoolCast = 0;
2898 if (ZExtInst *CI = dyn_cast<ZExtInst>(I.getOperand(0)))
2899 if (CI->getOperand(0)->getType() == Type::Int1Ty)
2900 BoolCast = CI;
2901 if (!BoolCast)
2902 if (ZExtInst *CI = dyn_cast<ZExtInst>(I.getOperand(1)))
2903 if (CI->getOperand(0)->getType() == Type::Int1Ty)
2904 BoolCast = CI;
2905 if (BoolCast) {
2906 if (ICmpInst *SCI = dyn_cast<ICmpInst>(BoolCast->getOperand(0))) {
2907 Value *SCIOp0 = SCI->getOperand(0), *SCIOp1 = SCI->getOperand(1);
2908 const Type *SCOpTy = SCIOp0->getType();
2909 bool TIS = false;
2910
2911 // If the icmp is true iff the sign bit of X is set, then convert this
2912 // multiply into a shift/and combination.
2913 if (isa<ConstantInt>(SCIOp1) &&
2914 isSignBitCheck(SCI->getPredicate(), cast<ConstantInt>(SCIOp1), TIS) &&
2915 TIS) {
2916 // Shift the X value right to turn it into "all signbits".
2917 Constant *Amt = ConstantInt::get(SCIOp0->getType(),
2918 SCOpTy->getPrimitiveSizeInBits()-1);
2919 Value *V =
2920 InsertNewInstBefore(
2921 BinaryOperator::create(Instruction::AShr, SCIOp0, Amt,
2922 BoolCast->getOperand(0)->getName()+
2923 ".mask"), I);
2924
2925 // If the multiply type is not the same as the source type, sign extend
2926 // or truncate to the multiply type.
2927 if (I.getType() != V->getType()) {
2928 uint32_t SrcBits = V->getType()->getPrimitiveSizeInBits();
2929 uint32_t DstBits = I.getType()->getPrimitiveSizeInBits();
2930 Instruction::CastOps opcode =
2931 (SrcBits == DstBits ? Instruction::BitCast :
2932 (SrcBits < DstBits ? Instruction::SExt : Instruction::Trunc));
2933 V = InsertCastBefore(opcode, V, I.getType(), I);
2934 }
2935
2936 Value *OtherOp = Op0 == BoolCast ? I.getOperand(1) : Op0;
2937 return BinaryOperator::createAnd(V, OtherOp);
2938 }
2939 }
2940 }
2941
2942 return Changed ? &I : 0;
2943}
2944
2945/// This function implements the transforms on div instructions that work
2946/// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
2947/// used by the visitors to those instructions.
2948/// @brief Transforms common to all three div instructions
2949Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
2950 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2951
Chris Lattner653ef3c2008-02-19 06:12:18 +00002952 // undef / X -> 0 for integer.
2953 // undef / X -> undef for FP (the undef could be a snan).
2954 if (isa<UndefValue>(Op0)) {
2955 if (Op0->getType()->isFPOrFPVector())
2956 return ReplaceInstUsesWith(I, Op0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002957 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
Chris Lattner653ef3c2008-02-19 06:12:18 +00002958 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002959
2960 // X / undef -> undef
2961 if (isa<UndefValue>(Op1))
2962 return ReplaceInstUsesWith(I, Op1);
2963
Chris Lattner5be238b2008-01-28 00:58:18 +00002964 // Handle cases involving: [su]div X, (select Cond, Y, Z)
2965 // This does not apply for fdiv.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002966 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
Chris Lattner5be238b2008-01-28 00:58:18 +00002967 // [su]div X, (Cond ? 0 : Y) -> div X, Y. If the div and the select are in
2968 // the same basic block, then we replace the select with Y, and the
2969 // condition of the select with false (if the cond value is in the same BB).
2970 // If the select has uses other than the div, this allows them to be
2971 // simplified also. Note that div X, Y is just as good as div X, 0 (undef)
2972 if (ConstantInt *ST = dyn_cast<ConstantInt>(SI->getOperand(1)))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002973 if (ST->isNullValue()) {
2974 Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
2975 if (CondI && CondI->getParent() == I.getParent())
2976 UpdateValueUsesWith(CondI, ConstantInt::getFalse());
2977 else if (I.getParent() != SI->getParent() || SI->hasOneUse())
2978 I.setOperand(1, SI->getOperand(2));
2979 else
2980 UpdateValueUsesWith(SI, SI->getOperand(2));
2981 return &I;
2982 }
2983
Chris Lattner5be238b2008-01-28 00:58:18 +00002984 // Likewise for: [su]div X, (Cond ? Y : 0) -> div X, Y
2985 if (ConstantInt *ST = dyn_cast<ConstantInt>(SI->getOperand(2)))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002986 if (ST->isNullValue()) {
2987 Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
2988 if (CondI && CondI->getParent() == I.getParent())
2989 UpdateValueUsesWith(CondI, ConstantInt::getTrue());
2990 else if (I.getParent() != SI->getParent() || SI->hasOneUse())
2991 I.setOperand(1, SI->getOperand(1));
2992 else
2993 UpdateValueUsesWith(SI, SI->getOperand(1));
2994 return &I;
2995 }
2996 }
2997
2998 return 0;
2999}
3000
3001/// This function implements the transforms common to both integer division
3002/// instructions (udiv and sdiv). It is called by the visitors to those integer
3003/// division instructions.
3004/// @brief Common integer divide transforms
3005Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
3006 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3007
3008 if (Instruction *Common = commonDivTransforms(I))
3009 return Common;
3010
3011 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3012 // div X, 1 == X
3013 if (RHS->equalsInt(1))
3014 return ReplaceInstUsesWith(I, Op0);
3015
3016 // (X / C1) / C2 -> X / (C1*C2)
3017 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
3018 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
3019 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
Nick Lewycky9d798f92008-02-18 22:48:05 +00003020 if (MultiplyOverflows(RHS, LHSRHS, I.getOpcode()==Instruction::SDiv))
3021 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3022 else
3023 return BinaryOperator::create(I.getOpcode(), LHS->getOperand(0),
3024 Multiply(RHS, LHSRHS));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003025 }
3026
3027 if (!RHS->isZero()) { // avoid X udiv 0
3028 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3029 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3030 return R;
3031 if (isa<PHINode>(Op0))
3032 if (Instruction *NV = FoldOpIntoPhi(I))
3033 return NV;
3034 }
3035 }
3036
3037 // 0 / X == 0, we don't need to preserve faults!
3038 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
3039 if (LHS->equalsInt(0))
3040 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3041
3042 return 0;
3043}
3044
3045Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
3046 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3047
3048 // Handle the integer div common cases
3049 if (Instruction *Common = commonIDivTransforms(I))
3050 return Common;
3051
3052 // X udiv C^2 -> X >> C
3053 // Check to see if this is an unsigned division with an exact power of 2,
3054 // if so, convert to a right shift.
3055 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
3056 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
3057 return BinaryOperator::createLShr(Op0,
3058 ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
3059 }
3060
3061 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3062 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
3063 if (RHSI->getOpcode() == Instruction::Shl &&
3064 isa<ConstantInt>(RHSI->getOperand(0))) {
3065 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
3066 if (C1.isPowerOf2()) {
3067 Value *N = RHSI->getOperand(1);
3068 const Type *NTy = N->getType();
3069 if (uint32_t C2 = C1.logBase2()) {
3070 Constant *C2V = ConstantInt::get(NTy, C2);
3071 N = InsertNewInstBefore(BinaryOperator::createAdd(N, C2V, "tmp"), I);
3072 }
3073 return BinaryOperator::createLShr(Op0, N);
3074 }
3075 }
3076 }
3077
3078 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3079 // where C1&C2 are powers of two.
3080 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3081 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3082 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3083 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
3084 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
3085 // Compute the shift amounts
3086 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
3087 // Construct the "on true" case of the select
3088 Constant *TC = ConstantInt::get(Op0->getType(), TSA);
3089 Instruction *TSI = BinaryOperator::createLShr(
3090 Op0, TC, SI->getName()+".t");
3091 TSI = InsertNewInstBefore(TSI, I);
3092
3093 // Construct the "on false" case of the select
3094 Constant *FC = ConstantInt::get(Op0->getType(), FSA);
3095 Instruction *FSI = BinaryOperator::createLShr(
3096 Op0, FC, SI->getName()+".f");
3097 FSI = InsertNewInstBefore(FSI, I);
3098
3099 // construct the select instruction and return it.
Gabor Greifd6da1d02008-04-06 20:25:17 +00003100 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003101 }
3102 }
3103 return 0;
3104}
3105
3106Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
3107 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3108
3109 // Handle the integer div common cases
3110 if (Instruction *Common = commonIDivTransforms(I))
3111 return Common;
3112
3113 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3114 // sdiv X, -1 == -X
3115 if (RHS->isAllOnesValue())
3116 return BinaryOperator::createNeg(Op0);
3117
3118 // -X/C -> X/-C
3119 if (Value *LHSNeg = dyn_castNegVal(Op0))
3120 return BinaryOperator::createSDiv(LHSNeg, ConstantExpr::getNeg(RHS));
3121 }
3122
3123 // If the sign bits of both operands are zero (i.e. we can prove they are
3124 // unsigned inputs), turn this into a udiv.
3125 if (I.getType()->isInteger()) {
3126 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3127 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
Dan Gohmandb3dd962007-11-05 23:16:33 +00003128 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003129 return BinaryOperator::createUDiv(Op0, Op1, I.getName());
3130 }
3131 }
3132
3133 return 0;
3134}
3135
3136Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
3137 return commonDivTransforms(I);
3138}
3139
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003140/// This function implements the transforms on rem instructions that work
3141/// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3142/// is used by the visitors to those instructions.
3143/// @brief Transforms common to all three rem instructions
3144Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
3145 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3146
Chris Lattner653ef3c2008-02-19 06:12:18 +00003147 // 0 % X == 0 for integer, we don't need to preserve faults!
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003148 if (Constant *LHS = dyn_cast<Constant>(Op0))
3149 if (LHS->isNullValue())
3150 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3151
Chris Lattner653ef3c2008-02-19 06:12:18 +00003152 if (isa<UndefValue>(Op0)) { // undef % X -> 0
3153 if (I.getType()->isFPOrFPVector())
3154 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003155 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
Chris Lattner653ef3c2008-02-19 06:12:18 +00003156 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003157 if (isa<UndefValue>(Op1))
3158 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
3159
3160 // Handle cases involving: rem X, (select Cond, Y, Z)
3161 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3162 // rem X, (Cond ? 0 : Y) -> rem X, Y. If the rem and the select are in
3163 // the same basic block, then we replace the select with Y, and the
3164 // condition of the select with false (if the cond value is in the same
3165 // BB). If the select has uses other than the div, this allows them to be
3166 // simplified also.
3167 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
3168 if (ST->isNullValue()) {
3169 Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
3170 if (CondI && CondI->getParent() == I.getParent())
3171 UpdateValueUsesWith(CondI, ConstantInt::getFalse());
3172 else if (I.getParent() != SI->getParent() || SI->hasOneUse())
3173 I.setOperand(1, SI->getOperand(2));
3174 else
3175 UpdateValueUsesWith(SI, SI->getOperand(2));
3176 return &I;
3177 }
3178 // Likewise for: rem X, (Cond ? Y : 0) -> rem X, Y
3179 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
3180 if (ST->isNullValue()) {
3181 Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
3182 if (CondI && CondI->getParent() == I.getParent())
3183 UpdateValueUsesWith(CondI, ConstantInt::getTrue());
3184 else if (I.getParent() != SI->getParent() || SI->hasOneUse())
3185 I.setOperand(1, SI->getOperand(1));
3186 else
3187 UpdateValueUsesWith(SI, SI->getOperand(1));
3188 return &I;
3189 }
3190 }
3191
3192 return 0;
3193}
3194
3195/// This function implements the transforms common to both integer remainder
3196/// instructions (urem and srem). It is called by the visitors to those integer
3197/// remainder instructions.
3198/// @brief Common integer remainder transforms
3199Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
3200 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3201
3202 if (Instruction *common = commonRemTransforms(I))
3203 return common;
3204
3205 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3206 // X % 0 == undef, we don't need to preserve faults!
3207 if (RHS->equalsInt(0))
3208 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
3209
3210 if (RHS->equalsInt(1)) // X % 1 == 0
3211 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3212
3213 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
3214 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
3215 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3216 return R;
3217 } else if (isa<PHINode>(Op0I)) {
3218 if (Instruction *NV = FoldOpIntoPhi(I))
3219 return NV;
3220 }
Nick Lewyckyc1372c82008-03-06 06:48:30 +00003221
3222 // See if we can fold away this rem instruction.
3223 uint32_t BitWidth = cast<IntegerType>(I.getType())->getBitWidth();
3224 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3225 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(BitWidth),
3226 KnownZero, KnownOne))
3227 return &I;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003228 }
3229 }
3230
3231 return 0;
3232}
3233
3234Instruction *InstCombiner::visitURem(BinaryOperator &I) {
3235 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3236
3237 if (Instruction *common = commonIRemTransforms(I))
3238 return common;
3239
3240 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3241 // X urem C^2 -> X and C
3242 // Check to see if this is an unsigned remainder with an exact power of 2,
3243 // if so, convert to a bitwise and.
3244 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
3245 if (C->getValue().isPowerOf2())
3246 return BinaryOperator::createAnd(Op0, SubOne(C));
3247 }
3248
3249 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
3250 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3251 if (RHSI->getOpcode() == Instruction::Shl &&
3252 isa<ConstantInt>(RHSI->getOperand(0))) {
3253 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
3254 Constant *N1 = ConstantInt::getAllOnesValue(I.getType());
3255 Value *Add = InsertNewInstBefore(BinaryOperator::createAdd(RHSI, N1,
3256 "tmp"), I);
3257 return BinaryOperator::createAnd(Op0, Add);
3258 }
3259 }
3260 }
3261
3262 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3263 // where C1&C2 are powers of two.
3264 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3265 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3266 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3267 // STO == 0 and SFO == 0 handled above.
3268 if ((STO->getValue().isPowerOf2()) &&
3269 (SFO->getValue().isPowerOf2())) {
3270 Value *TrueAnd = InsertNewInstBefore(
3271 BinaryOperator::createAnd(Op0, SubOne(STO), SI->getName()+".t"), I);
3272 Value *FalseAnd = InsertNewInstBefore(
3273 BinaryOperator::createAnd(Op0, SubOne(SFO), SI->getName()+".f"), I);
Gabor Greifd6da1d02008-04-06 20:25:17 +00003274 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003275 }
3276 }
3277 }
3278
3279 return 0;
3280}
3281
3282Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
3283 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3284
Dan Gohmandb3dd962007-11-05 23:16:33 +00003285 // Handle the integer rem common cases
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003286 if (Instruction *common = commonIRemTransforms(I))
3287 return common;
3288
3289 if (Value *RHSNeg = dyn_castNegVal(Op1))
3290 if (!isa<ConstantInt>(RHSNeg) ||
3291 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive()) {
3292 // X % -Y -> X % Y
3293 AddUsesToWorkList(I);
3294 I.setOperand(1, RHSNeg);
3295 return &I;
3296 }
3297
Dan Gohmandb3dd962007-11-05 23:16:33 +00003298 // If the sign bits of both operands are zero (i.e. we can prove they are
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003299 // unsigned inputs), turn this into a urem.
Dan Gohmandb3dd962007-11-05 23:16:33 +00003300 if (I.getType()->isInteger()) {
3301 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3302 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3303 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3304 return BinaryOperator::createURem(Op0, Op1, I.getName());
3305 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003306 }
3307
3308 return 0;
3309}
3310
3311Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
3312 return commonRemTransforms(I);
3313}
3314
3315// isMaxValueMinusOne - return true if this is Max-1
3316static bool isMaxValueMinusOne(const ConstantInt *C, bool isSigned) {
3317 uint32_t TypeBits = C->getType()->getPrimitiveSizeInBits();
3318 if (!isSigned)
3319 return C->getValue() == APInt::getAllOnesValue(TypeBits) - 1;
3320 return C->getValue() == APInt::getSignedMaxValue(TypeBits)-1;
3321}
3322
3323// isMinValuePlusOne - return true if this is Min+1
3324static bool isMinValuePlusOne(const ConstantInt *C, bool isSigned) {
3325 if (!isSigned)
3326 return C->getValue() == 1; // unsigned
3327
3328 // Calculate 1111111111000000000000
3329 uint32_t TypeBits = C->getType()->getPrimitiveSizeInBits();
3330 return C->getValue() == APInt::getSignedMinValue(TypeBits)+1;
3331}
3332
3333// isOneBitSet - Return true if there is exactly one bit set in the specified
3334// constant.
3335static bool isOneBitSet(const ConstantInt *CI) {
3336 return CI->getValue().isPowerOf2();
3337}
3338
3339// isHighOnes - Return true if the constant is of the form 1+0+.
3340// This is the same as lowones(~X).
3341static bool isHighOnes(const ConstantInt *CI) {
3342 return (~CI->getValue() + 1).isPowerOf2();
3343}
3344
3345/// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3346/// are carefully arranged to allow folding of expressions such as:
3347///
3348/// (A < B) | (A > B) --> (A != B)
3349///
3350/// Note that this is only valid if the first and second predicates have the
3351/// same sign. Is illegal to do: (A u< B) | (A s> B)
3352///
3353/// Three bits are used to represent the condition, as follows:
3354/// 0 A > B
3355/// 1 A == B
3356/// 2 A < B
3357///
3358/// <=> Value Definition
3359/// 000 0 Always false
3360/// 001 1 A > B
3361/// 010 2 A == B
3362/// 011 3 A >= B
3363/// 100 4 A < B
3364/// 101 5 A != B
3365/// 110 6 A <= B
3366/// 111 7 Always true
3367///
3368static unsigned getICmpCode(const ICmpInst *ICI) {
3369 switch (ICI->getPredicate()) {
3370 // False -> 0
3371 case ICmpInst::ICMP_UGT: return 1; // 001
3372 case ICmpInst::ICMP_SGT: return 1; // 001
3373 case ICmpInst::ICMP_EQ: return 2; // 010
3374 case ICmpInst::ICMP_UGE: return 3; // 011
3375 case ICmpInst::ICMP_SGE: return 3; // 011
3376 case ICmpInst::ICMP_ULT: return 4; // 100
3377 case ICmpInst::ICMP_SLT: return 4; // 100
3378 case ICmpInst::ICMP_NE: return 5; // 101
3379 case ICmpInst::ICMP_ULE: return 6; // 110
3380 case ICmpInst::ICMP_SLE: return 6; // 110
3381 // True -> 7
3382 default:
3383 assert(0 && "Invalid ICmp predicate!");
3384 return 0;
3385 }
3386}
3387
3388/// getICmpValue - This is the complement of getICmpCode, which turns an
3389/// opcode and two operands into either a constant true or false, or a brand
Dan Gohmanda338742007-09-17 17:31:57 +00003390/// new ICmp instruction. The sign is passed in to determine which kind
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003391/// of predicate to use in new icmp instructions.
3392static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS) {
3393 switch (code) {
3394 default: assert(0 && "Illegal ICmp code!");
3395 case 0: return ConstantInt::getFalse();
3396 case 1:
3397 if (sign)
3398 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
3399 else
3400 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
3401 case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
3402 case 3:
3403 if (sign)
3404 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
3405 else
3406 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
3407 case 4:
3408 if (sign)
3409 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
3410 else
3411 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
3412 case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
3413 case 6:
3414 if (sign)
3415 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
3416 else
3417 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
3418 case 7: return ConstantInt::getTrue();
3419 }
3420}
3421
3422static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
3423 return (ICmpInst::isSignedPredicate(p1) == ICmpInst::isSignedPredicate(p2)) ||
3424 (ICmpInst::isSignedPredicate(p1) &&
3425 (p2 == ICmpInst::ICMP_EQ || p2 == ICmpInst::ICMP_NE)) ||
3426 (ICmpInst::isSignedPredicate(p2) &&
3427 (p1 == ICmpInst::ICMP_EQ || p1 == ICmpInst::ICMP_NE));
3428}
3429
3430namespace {
3431// FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3432struct FoldICmpLogical {
3433 InstCombiner &IC;
3434 Value *LHS, *RHS;
3435 ICmpInst::Predicate pred;
3436 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
3437 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
3438 pred(ICI->getPredicate()) {}
3439 bool shouldApply(Value *V) const {
3440 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
3441 if (PredicatesFoldable(pred, ICI->getPredicate()))
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00003442 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
3443 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003444 return false;
3445 }
3446 Instruction *apply(Instruction &Log) const {
3447 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
3448 if (ICI->getOperand(0) != LHS) {
3449 assert(ICI->getOperand(1) == LHS);
3450 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
3451 }
3452
3453 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
3454 unsigned LHSCode = getICmpCode(ICI);
3455 unsigned RHSCode = getICmpCode(RHSICI);
3456 unsigned Code;
3457 switch (Log.getOpcode()) {
3458 case Instruction::And: Code = LHSCode & RHSCode; break;
3459 case Instruction::Or: Code = LHSCode | RHSCode; break;
3460 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
3461 default: assert(0 && "Illegal logical opcode!"); return 0;
3462 }
3463
3464 bool isSigned = ICmpInst::isSignedPredicate(RHSICI->getPredicate()) ||
3465 ICmpInst::isSignedPredicate(ICI->getPredicate());
3466
3467 Value *RV = getICmpValue(isSigned, Code, LHS, RHS);
3468 if (Instruction *I = dyn_cast<Instruction>(RV))
3469 return I;
3470 // Otherwise, it's a constant boolean value...
3471 return IC.ReplaceInstUsesWith(Log, RV);
3472 }
3473};
3474} // end anonymous namespace
3475
3476// OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3477// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3478// guaranteed to be a binary operator.
3479Instruction *InstCombiner::OptAndOp(Instruction *Op,
3480 ConstantInt *OpRHS,
3481 ConstantInt *AndRHS,
3482 BinaryOperator &TheAnd) {
3483 Value *X = Op->getOperand(0);
3484 Constant *Together = 0;
3485 if (!Op->isShift())
3486 Together = And(AndRHS, OpRHS);
3487
3488 switch (Op->getOpcode()) {
3489 case Instruction::Xor:
3490 if (Op->hasOneUse()) {
3491 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3492 Instruction *And = BinaryOperator::createAnd(X, AndRHS);
3493 InsertNewInstBefore(And, TheAnd);
3494 And->takeName(Op);
3495 return BinaryOperator::createXor(And, Together);
3496 }
3497 break;
3498 case Instruction::Or:
3499 if (Together == AndRHS) // (X | C) & C --> C
3500 return ReplaceInstUsesWith(TheAnd, AndRHS);
3501
3502 if (Op->hasOneUse() && Together != OpRHS) {
3503 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3504 Instruction *Or = BinaryOperator::createOr(X, Together);
3505 InsertNewInstBefore(Or, TheAnd);
3506 Or->takeName(Op);
3507 return BinaryOperator::createAnd(Or, AndRHS);
3508 }
3509 break;
3510 case Instruction::Add:
3511 if (Op->hasOneUse()) {
3512 // Adding a one to a single bit bit-field should be turned into an XOR
3513 // of the bit. First thing to check is to see if this AND is with a
3514 // single bit constant.
3515 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
3516
3517 // If there is only one bit set...
3518 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
3519 // Ok, at this point, we know that we are masking the result of the
3520 // ADD down to exactly one bit. If the constant we are adding has
3521 // no bits set below this bit, then we can eliminate the ADD.
3522 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
3523
3524 // Check to see if any bits below the one bit set in AndRHSV are set.
3525 if ((AddRHS & (AndRHSV-1)) == 0) {
3526 // If not, the only thing that can effect the output of the AND is
3527 // the bit specified by AndRHSV. If that bit is set, the effect of
3528 // the XOR is to toggle the bit. If it is clear, then the ADD has
3529 // no effect.
3530 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
3531 TheAnd.setOperand(0, X);
3532 return &TheAnd;
3533 } else {
3534 // Pull the XOR out of the AND.
3535 Instruction *NewAnd = BinaryOperator::createAnd(X, AndRHS);
3536 InsertNewInstBefore(NewAnd, TheAnd);
3537 NewAnd->takeName(Op);
3538 return BinaryOperator::createXor(NewAnd, AndRHS);
3539 }
3540 }
3541 }
3542 }
3543 break;
3544
3545 case Instruction::Shl: {
3546 // We know that the AND will not produce any of the bits shifted in, so if
3547 // the anded constant includes them, clear them now!
3548 //
3549 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3550 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3551 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
3552 ConstantInt *CI = ConstantInt::get(AndRHS->getValue() & ShlMask);
3553
3554 if (CI->getValue() == ShlMask) {
3555 // Masking out bits that the shift already masks
3556 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
3557 } else if (CI != AndRHS) { // Reducing bits set in and.
3558 TheAnd.setOperand(1, CI);
3559 return &TheAnd;
3560 }
3561 break;
3562 }
3563 case Instruction::LShr:
3564 {
3565 // We know that the AND will not produce any of the bits shifted in, so if
3566 // the anded constant includes them, clear them now! This only applies to
3567 // unsigned shifts, because a signed shr may bring in set bits!
3568 //
3569 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3570 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3571 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3572 ConstantInt *CI = ConstantInt::get(AndRHS->getValue() & ShrMask);
3573
3574 if (CI->getValue() == ShrMask) {
3575 // Masking out bits that the shift already masks.
3576 return ReplaceInstUsesWith(TheAnd, Op);
3577 } else if (CI != AndRHS) {
3578 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
3579 return &TheAnd;
3580 }
3581 break;
3582 }
3583 case Instruction::AShr:
3584 // Signed shr.
3585 // See if this is shifting in some sign extension, then masking it out
3586 // with an and.
3587 if (Op->hasOneUse()) {
3588 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3589 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3590 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3591 Constant *C = ConstantInt::get(AndRHS->getValue() & ShrMask);
3592 if (C == AndRHS) { // Masking out bits shifted in.
3593 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3594 // Make the argument unsigned.
3595 Value *ShVal = Op->getOperand(0);
3596 ShVal = InsertNewInstBefore(
3597 BinaryOperator::createLShr(ShVal, OpRHS,
3598 Op->getName()), TheAnd);
3599 return BinaryOperator::createAnd(ShVal, AndRHS, TheAnd.getName());
3600 }
3601 }
3602 break;
3603 }
3604 return 0;
3605}
3606
3607
3608/// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3609/// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3610/// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3611/// whether to treat the V, Lo and HI as signed or not. IB is the location to
3612/// insert new instructions.
3613Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
3614 bool isSigned, bool Inside,
3615 Instruction &IB) {
3616 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
3617 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
3618 "Lo is not <= Hi in range emission code!");
3619
3620 if (Inside) {
3621 if (Lo == Hi) // Trivially false.
3622 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
3623
3624 // V >= Min && V < Hi --> V < Hi
3625 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3626 ICmpInst::Predicate pred = (isSigned ?
3627 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
3628 return new ICmpInst(pred, V, Hi);
3629 }
3630
3631 // Emit V-Lo <u Hi-Lo
3632 Constant *NegLo = ConstantExpr::getNeg(Lo);
3633 Instruction *Add = BinaryOperator::createAdd(V, NegLo, V->getName()+".off");
3634 InsertNewInstBefore(Add, IB);
3635 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
3636 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
3637 }
3638
3639 if (Lo == Hi) // Trivially true.
3640 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
3641
3642 // V < Min || V >= Hi -> V > Hi-1
3643 Hi = SubOne(cast<ConstantInt>(Hi));
3644 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3645 ICmpInst::Predicate pred = (isSigned ?
3646 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
3647 return new ICmpInst(pred, V, Hi);
3648 }
3649
3650 // Emit V-Lo >u Hi-1-Lo
3651 // Note that Hi has already had one subtracted from it, above.
3652 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
3653 Instruction *Add = BinaryOperator::createAdd(V, NegLo, V->getName()+".off");
3654 InsertNewInstBefore(Add, IB);
3655 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
3656 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
3657}
3658
3659// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3660// any number of 0s on either side. The 1s are allowed to wrap from LSB to
3661// MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
3662// not, since all 1s are not contiguous.
3663static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
3664 const APInt& V = Val->getValue();
3665 uint32_t BitWidth = Val->getType()->getBitWidth();
3666 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
3667
3668 // look for the first zero bit after the run of ones
3669 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
3670 // look for the first non-zero bit
3671 ME = V.getActiveBits();
3672 return true;
3673}
3674
3675/// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
3676/// where isSub determines whether the operator is a sub. If we can fold one of
3677/// the following xforms:
3678///
3679/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
3680/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3681/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3682///
3683/// return (A +/- B).
3684///
3685Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
3686 ConstantInt *Mask, bool isSub,
3687 Instruction &I) {
3688 Instruction *LHSI = dyn_cast<Instruction>(LHS);
3689 if (!LHSI || LHSI->getNumOperands() != 2 ||
3690 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
3691
3692 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
3693
3694 switch (LHSI->getOpcode()) {
3695 default: return 0;
3696 case Instruction::And:
3697 if (And(N, Mask) == Mask) {
3698 // If the AndRHS is a power of two minus one (0+1+), this is simple.
3699 if ((Mask->getValue().countLeadingZeros() +
3700 Mask->getValue().countPopulation()) ==
3701 Mask->getValue().getBitWidth())
3702 break;
3703
3704 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
3705 // part, we don't need any explicit masks to take them out of A. If that
3706 // is all N is, ignore it.
3707 uint32_t MB = 0, ME = 0;
3708 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
3709 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
3710 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
3711 if (MaskedValueIsZero(RHS, Mask))
3712 break;
3713 }
3714 }
3715 return 0;
3716 case Instruction::Or:
3717 case Instruction::Xor:
3718 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
3719 if ((Mask->getValue().countLeadingZeros() +
3720 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
3721 && And(N, Mask)->isZero())
3722 break;
3723 return 0;
3724 }
3725
3726 Instruction *New;
3727 if (isSub)
3728 New = BinaryOperator::createSub(LHSI->getOperand(0), RHS, "fold");
3729 else
3730 New = BinaryOperator::createAdd(LHSI->getOperand(0), RHS, "fold");
3731 return InsertNewInstBefore(New, I);
3732}
3733
3734Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
3735 bool Changed = SimplifyCommutative(I);
3736 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3737
3738 if (isa<UndefValue>(Op1)) // X & undef -> 0
3739 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3740
3741 // and X, X = X
3742 if (Op0 == Op1)
3743 return ReplaceInstUsesWith(I, Op1);
3744
3745 // See if we can simplify any instructions used by the instruction whose sole
3746 // purpose is to compute bits we don't care about.
3747 if (!isa<VectorType>(I.getType())) {
3748 uint32_t BitWidth = cast<IntegerType>(I.getType())->getBitWidth();
3749 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3750 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(BitWidth),
3751 KnownZero, KnownOne))
3752 return &I;
3753 } else {
3754 if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
3755 if (CP->isAllOnesValue()) // X & <-1,-1> -> X
3756 return ReplaceInstUsesWith(I, I.getOperand(0));
3757 } else if (isa<ConstantAggregateZero>(Op1)) {
3758 return ReplaceInstUsesWith(I, Op1); // X & <0,0> -> <0,0>
3759 }
3760 }
3761
3762 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
3763 const APInt& AndRHSMask = AndRHS->getValue();
3764 APInt NotAndRHS(~AndRHSMask);
3765
3766 // Optimize a variety of ((val OP C1) & C2) combinations...
3767 if (isa<BinaryOperator>(Op0)) {
3768 Instruction *Op0I = cast<Instruction>(Op0);
3769 Value *Op0LHS = Op0I->getOperand(0);
3770 Value *Op0RHS = Op0I->getOperand(1);
3771 switch (Op0I->getOpcode()) {
3772 case Instruction::Xor:
3773 case Instruction::Or:
3774 // If the mask is only needed on one incoming arm, push it up.
3775 if (Op0I->hasOneUse()) {
3776 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
3777 // Not masking anything out for the LHS, move to RHS.
3778 Instruction *NewRHS = BinaryOperator::createAnd(Op0RHS, AndRHS,
3779 Op0RHS->getName()+".masked");
3780 InsertNewInstBefore(NewRHS, I);
3781 return BinaryOperator::create(
3782 cast<BinaryOperator>(Op0I)->getOpcode(), Op0LHS, NewRHS);
3783 }
3784 if (!isa<Constant>(Op0RHS) &&
3785 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
3786 // Not masking anything out for the RHS, move to LHS.
3787 Instruction *NewLHS = BinaryOperator::createAnd(Op0LHS, AndRHS,
3788 Op0LHS->getName()+".masked");
3789 InsertNewInstBefore(NewLHS, I);
3790 return BinaryOperator::create(
3791 cast<BinaryOperator>(Op0I)->getOpcode(), NewLHS, Op0RHS);
3792 }
3793 }
3794
3795 break;
3796 case Instruction::Add:
3797 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
3798 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
3799 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
3800 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
3801 return BinaryOperator::createAnd(V, AndRHS);
3802 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
3803 return BinaryOperator::createAnd(V, AndRHS); // Add commutes
3804 break;
3805
3806 case Instruction::Sub:
3807 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
3808 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
3809 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
3810 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
3811 return BinaryOperator::createAnd(V, AndRHS);
3812 break;
3813 }
3814
3815 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
3816 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
3817 return Res;
3818 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
3819 // If this is an integer truncation or change from signed-to-unsigned, and
3820 // if the source is an and/or with immediate, transform it. This
3821 // frequently occurs for bitfield accesses.
3822 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
3823 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
3824 CastOp->getNumOperands() == 2)
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00003825 if (ConstantInt *AndCI = dyn_cast<ConstantInt>(CastOp->getOperand(1))) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003826 if (CastOp->getOpcode() == Instruction::And) {
3827 // Change: and (cast (and X, C1) to T), C2
3828 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
3829 // This will fold the two constants together, which may allow
3830 // other simplifications.
3831 Instruction *NewCast = CastInst::createTruncOrBitCast(
3832 CastOp->getOperand(0), I.getType(),
3833 CastOp->getName()+".shrunk");
3834 NewCast = InsertNewInstBefore(NewCast, I);
3835 // trunc_or_bitcast(C1)&C2
3836 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
3837 C3 = ConstantExpr::getAnd(C3, AndRHS);
3838 return BinaryOperator::createAnd(NewCast, C3);
3839 } else if (CastOp->getOpcode() == Instruction::Or) {
3840 // Change: and (cast (or X, C1) to T), C2
3841 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
3842 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
3843 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS) // trunc(C1)&C2
3844 return ReplaceInstUsesWith(I, AndRHS);
3845 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00003846 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003847 }
3848 }
3849
3850 // Try to fold constant and into select arguments.
3851 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3852 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3853 return R;
3854 if (isa<PHINode>(Op0))
3855 if (Instruction *NV = FoldOpIntoPhi(I))
3856 return NV;
3857 }
3858
3859 Value *Op0NotVal = dyn_castNotVal(Op0);
3860 Value *Op1NotVal = dyn_castNotVal(Op1);
3861
3862 if (Op0NotVal == Op1 || Op1NotVal == Op0) // A & ~A == ~A & A == 0
3863 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3864
3865 // (~A & ~B) == (~(A | B)) - De Morgan's Law
3866 if (Op0NotVal && Op1NotVal && isOnlyUse(Op0) && isOnlyUse(Op1)) {
3867 Instruction *Or = BinaryOperator::createOr(Op0NotVal, Op1NotVal,
3868 I.getName()+".demorgan");
3869 InsertNewInstBefore(Or, I);
3870 return BinaryOperator::createNot(Or);
3871 }
3872
3873 {
3874 Value *A = 0, *B = 0, *C = 0, *D = 0;
3875 if (match(Op0, m_Or(m_Value(A), m_Value(B)))) {
3876 if (A == Op1 || B == Op1) // (A | ?) & A --> A
3877 return ReplaceInstUsesWith(I, Op1);
3878
3879 // (A|B) & ~(A&B) -> A^B
3880 if (match(Op1, m_Not(m_And(m_Value(C), m_Value(D))))) {
3881 if ((A == C && B == D) || (A == D && B == C))
3882 return BinaryOperator::createXor(A, B);
3883 }
3884 }
3885
3886 if (match(Op1, m_Or(m_Value(A), m_Value(B)))) {
3887 if (A == Op0 || B == Op0) // A & (A | ?) --> A
3888 return ReplaceInstUsesWith(I, Op0);
3889
3890 // ~(A&B) & (A|B) -> A^B
3891 if (match(Op0, m_Not(m_And(m_Value(C), m_Value(D))))) {
3892 if ((A == C && B == D) || (A == D && B == C))
3893 return BinaryOperator::createXor(A, B);
3894 }
3895 }
3896
3897 if (Op0->hasOneUse() &&
3898 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3899 if (A == Op1) { // (A^B)&A -> A&(A^B)
3900 I.swapOperands(); // Simplify below
3901 std::swap(Op0, Op1);
3902 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
3903 cast<BinaryOperator>(Op0)->swapOperands();
3904 I.swapOperands(); // Simplify below
3905 std::swap(Op0, Op1);
3906 }
3907 }
3908 if (Op1->hasOneUse() &&
3909 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
3910 if (B == Op0) { // B&(A^B) -> B&(B^A)
3911 cast<BinaryOperator>(Op1)->swapOperands();
3912 std::swap(A, B);
3913 }
3914 if (A == Op0) { // A&(A^B) -> A & ~B
3915 Instruction *NotB = BinaryOperator::createNot(B, "tmp");
3916 InsertNewInstBefore(NotB, I);
3917 return BinaryOperator::createAnd(A, NotB);
3918 }
3919 }
3920 }
3921
3922 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
3923 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3924 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
3925 return R;
3926
3927 Value *LHSVal, *RHSVal;
3928 ConstantInt *LHSCst, *RHSCst;
3929 ICmpInst::Predicate LHSCC, RHSCC;
3930 if (match(Op0, m_ICmp(LHSCC, m_Value(LHSVal), m_ConstantInt(LHSCst))))
3931 if (match(RHS, m_ICmp(RHSCC, m_Value(RHSVal), m_ConstantInt(RHSCst))))
3932 if (LHSVal == RHSVal && // Found (X icmp C1) & (X icmp C2)
3933 // ICMP_[GL]E X, CST is folded to ICMP_[GL]T elsewhere.
3934 LHSCC != ICmpInst::ICMP_UGE && LHSCC != ICmpInst::ICMP_ULE &&
3935 RHSCC != ICmpInst::ICMP_UGE && RHSCC != ICmpInst::ICMP_ULE &&
3936 LHSCC != ICmpInst::ICMP_SGE && LHSCC != ICmpInst::ICMP_SLE &&
Chris Lattner205ad1d2007-11-22 23:47:13 +00003937 RHSCC != ICmpInst::ICMP_SGE && RHSCC != ICmpInst::ICMP_SLE &&
3938
3939 // Don't try to fold ICMP_SLT + ICMP_ULT.
3940 (ICmpInst::isEquality(LHSCC) || ICmpInst::isEquality(RHSCC) ||
3941 ICmpInst::isSignedPredicate(LHSCC) ==
3942 ICmpInst::isSignedPredicate(RHSCC))) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003943 // Ensure that the larger constant is on the RHS.
Chris Lattnerda628ca2008-01-13 20:59:02 +00003944 ICmpInst::Predicate GT;
3945 if (ICmpInst::isSignedPredicate(LHSCC) ||
3946 (ICmpInst::isEquality(LHSCC) &&
3947 ICmpInst::isSignedPredicate(RHSCC)))
3948 GT = ICmpInst::ICMP_SGT;
3949 else
3950 GT = ICmpInst::ICMP_UGT;
3951
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003952 Constant *Cmp = ConstantExpr::getICmp(GT, LHSCst, RHSCst);
3953 ICmpInst *LHS = cast<ICmpInst>(Op0);
3954 if (cast<ConstantInt>(Cmp)->getZExtValue()) {
3955 std::swap(LHS, RHS);
3956 std::swap(LHSCst, RHSCst);
3957 std::swap(LHSCC, RHSCC);
3958 }
3959
3960 // At this point, we know we have have two icmp instructions
3961 // comparing a value against two constants and and'ing the result
3962 // together. Because of the above check, we know that we only have
3963 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
3964 // (from the FoldICmpLogical check above), that the two constants
3965 // are not equal and that the larger constant is on the RHS
3966 assert(LHSCst != RHSCst && "Compares not folded above?");
3967
3968 switch (LHSCC) {
3969 default: assert(0 && "Unknown integer condition code!");
3970 case ICmpInst::ICMP_EQ:
3971 switch (RHSCC) {
3972 default: assert(0 && "Unknown integer condition code!");
3973 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
3974 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
3975 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
3976 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
3977 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
3978 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
3979 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
3980 return ReplaceInstUsesWith(I, LHS);
3981 }
3982 case ICmpInst::ICMP_NE:
3983 switch (RHSCC) {
3984 default: assert(0 && "Unknown integer condition code!");
3985 case ICmpInst::ICMP_ULT:
3986 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
3987 return new ICmpInst(ICmpInst::ICMP_ULT, LHSVal, LHSCst);
3988 break; // (X != 13 & X u< 15) -> no change
3989 case ICmpInst::ICMP_SLT:
3990 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
3991 return new ICmpInst(ICmpInst::ICMP_SLT, LHSVal, LHSCst);
3992 break; // (X != 13 & X s< 15) -> no change
3993 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
3994 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
3995 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
3996 return ReplaceInstUsesWith(I, RHS);
3997 case ICmpInst::ICMP_NE:
3998 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
3999 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4000 Instruction *Add = BinaryOperator::createAdd(LHSVal, AddCST,
4001 LHSVal->getName()+".off");
4002 InsertNewInstBefore(Add, I);
4003 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
4004 ConstantInt::get(Add->getType(), 1));
4005 }
4006 break; // (X != 13 & X != 15) -> no change
4007 }
4008 break;
4009 case ICmpInst::ICMP_ULT:
4010 switch (RHSCC) {
4011 default: assert(0 && "Unknown integer condition code!");
4012 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
4013 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
4014 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
4015 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
4016 break;
4017 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
4018 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
4019 return ReplaceInstUsesWith(I, LHS);
4020 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
4021 break;
4022 }
4023 break;
4024 case ICmpInst::ICMP_SLT:
4025 switch (RHSCC) {
4026 default: assert(0 && "Unknown integer condition code!");
4027 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
4028 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
4029 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
4030 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
4031 break;
4032 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
4033 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
4034 return ReplaceInstUsesWith(I, LHS);
4035 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
4036 break;
4037 }
4038 break;
4039 case ICmpInst::ICMP_UGT:
4040 switch (RHSCC) {
4041 default: assert(0 && "Unknown integer condition code!");
4042 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X > 13
4043 return ReplaceInstUsesWith(I, LHS);
4044 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
4045 return ReplaceInstUsesWith(I, RHS);
4046 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
4047 break;
4048 case ICmpInst::ICMP_NE:
4049 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
4050 return new ICmpInst(LHSCC, LHSVal, RHSCst);
4051 break; // (X u> 13 & X != 15) -> no change
4052 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) ->(X-14) <u 1
4053 return InsertRangeTest(LHSVal, AddOne(LHSCst), RHSCst, false,
4054 true, I);
4055 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
4056 break;
4057 }
4058 break;
4059 case ICmpInst::ICMP_SGT:
4060 switch (RHSCC) {
4061 default: assert(0 && "Unknown integer condition code!");
Chris Lattnerab0fc252007-11-16 06:04:17 +00004062 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004063 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
4064 return ReplaceInstUsesWith(I, RHS);
4065 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
4066 break;
4067 case ICmpInst::ICMP_NE:
4068 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
4069 return new ICmpInst(LHSCC, LHSVal, RHSCst);
4070 break; // (X s> 13 & X != 15) -> no change
4071 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) ->(X-14) s< 1
4072 return InsertRangeTest(LHSVal, AddOne(LHSCst), RHSCst, true,
4073 true, I);
4074 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
4075 break;
4076 }
4077 break;
4078 }
4079 }
4080 }
4081
4082 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4083 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
4084 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4085 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
4086 const Type *SrcTy = Op0C->getOperand(0)->getType();
4087 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4088 // Only do this if the casts both really cause code to be generated.
4089 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4090 I.getType(), TD) &&
4091 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4092 I.getType(), TD)) {
4093 Instruction *NewOp = BinaryOperator::createAnd(Op0C->getOperand(0),
4094 Op1C->getOperand(0),
4095 I.getName());
4096 InsertNewInstBefore(NewOp, I);
4097 return CastInst::create(Op0C->getOpcode(), NewOp, I.getType());
4098 }
4099 }
4100
4101 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4102 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4103 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4104 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4105 SI0->getOperand(1) == SI1->getOperand(1) &&
4106 (SI0->hasOneUse() || SI1->hasOneUse())) {
4107 Instruction *NewOp =
4108 InsertNewInstBefore(BinaryOperator::createAnd(SI0->getOperand(0),
4109 SI1->getOperand(0),
4110 SI0->getName()), I);
4111 return BinaryOperator::create(SI1->getOpcode(), NewOp,
4112 SI1->getOperand(1));
4113 }
4114 }
4115
Chris Lattner91882432007-10-24 05:38:08 +00004116 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
4117 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4118 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) {
4119 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
4120 RHS->getPredicate() == FCmpInst::FCMP_ORD)
4121 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4122 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4123 // If either of the constants are nans, then the whole thing returns
4124 // false.
Chris Lattnera6c7dce2007-10-24 18:54:45 +00004125 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
Chris Lattner91882432007-10-24 05:38:08 +00004126 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
4127 return new FCmpInst(FCmpInst::FCMP_ORD, LHS->getOperand(0),
4128 RHS->getOperand(0));
4129 }
4130 }
4131 }
4132
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004133 return Changed ? &I : 0;
4134}
4135
4136/// CollectBSwapParts - Look to see if the specified value defines a single byte
4137/// in the result. If it does, and if the specified byte hasn't been filled in
4138/// yet, fill it in and return false.
4139static bool CollectBSwapParts(Value *V, SmallVector<Value*, 8> &ByteValues) {
4140 Instruction *I = dyn_cast<Instruction>(V);
4141 if (I == 0) return true;
4142
4143 // If this is an or instruction, it is an inner node of the bswap.
4144 if (I->getOpcode() == Instruction::Or)
4145 return CollectBSwapParts(I->getOperand(0), ByteValues) ||
4146 CollectBSwapParts(I->getOperand(1), ByteValues);
4147
4148 uint32_t BitWidth = I->getType()->getPrimitiveSizeInBits();
4149 // If this is a shift by a constant int, and it is "24", then its operand
4150 // defines a byte. We only handle unsigned types here.
4151 if (I->isShift() && isa<ConstantInt>(I->getOperand(1))) {
4152 // Not shifting the entire input by N-1 bytes?
4153 if (cast<ConstantInt>(I->getOperand(1))->getLimitedValue(BitWidth) !=
4154 8*(ByteValues.size()-1))
4155 return true;
4156
4157 unsigned DestNo;
4158 if (I->getOpcode() == Instruction::Shl) {
4159 // X << 24 defines the top byte with the lowest of the input bytes.
4160 DestNo = ByteValues.size()-1;
4161 } else {
4162 // X >>u 24 defines the low byte with the highest of the input bytes.
4163 DestNo = 0;
4164 }
4165
4166 // If the destination byte value is already defined, the values are or'd
4167 // together, which isn't a bswap (unless it's an or of the same bits).
4168 if (ByteValues[DestNo] && ByteValues[DestNo] != I->getOperand(0))
4169 return true;
4170 ByteValues[DestNo] = I->getOperand(0);
4171 return false;
4172 }
4173
4174 // Otherwise, we can only handle and(shift X, imm), imm). Bail out of if we
4175 // don't have this.
4176 Value *Shift = 0, *ShiftLHS = 0;
4177 ConstantInt *AndAmt = 0, *ShiftAmt = 0;
4178 if (!match(I, m_And(m_Value(Shift), m_ConstantInt(AndAmt))) ||
4179 !match(Shift, m_Shift(m_Value(ShiftLHS), m_ConstantInt(ShiftAmt))))
4180 return true;
4181 Instruction *SI = cast<Instruction>(Shift);
4182
4183 // Make sure that the shift amount is by a multiple of 8 and isn't too big.
4184 if (ShiftAmt->getLimitedValue(BitWidth) & 7 ||
4185 ShiftAmt->getLimitedValue(BitWidth) > 8*ByteValues.size())
4186 return true;
4187
4188 // Turn 0xFF -> 0, 0xFF00 -> 1, 0xFF0000 -> 2, etc.
4189 unsigned DestByte;
4190 if (AndAmt->getValue().getActiveBits() > 64)
4191 return true;
4192 uint64_t AndAmtVal = AndAmt->getZExtValue();
4193 for (DestByte = 0; DestByte != ByteValues.size(); ++DestByte)
4194 if (AndAmtVal == uint64_t(0xFF) << 8*DestByte)
4195 break;
4196 // Unknown mask for bswap.
4197 if (DestByte == ByteValues.size()) return true;
4198
4199 unsigned ShiftBytes = ShiftAmt->getZExtValue()/8;
4200 unsigned SrcByte;
4201 if (SI->getOpcode() == Instruction::Shl)
4202 SrcByte = DestByte - ShiftBytes;
4203 else
4204 SrcByte = DestByte + ShiftBytes;
4205
4206 // If the SrcByte isn't a bswapped value from the DestByte, reject it.
4207 if (SrcByte != ByteValues.size()-DestByte-1)
4208 return true;
4209
4210 // If the destination byte value is already defined, the values are or'd
4211 // together, which isn't a bswap (unless it's an or of the same bits).
4212 if (ByteValues[DestByte] && ByteValues[DestByte] != SI->getOperand(0))
4213 return true;
4214 ByteValues[DestByte] = SI->getOperand(0);
4215 return false;
4216}
4217
4218/// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4219/// If so, insert the new bswap intrinsic and return it.
4220Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
4221 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
4222 if (!ITy || ITy->getBitWidth() % 16)
4223 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4224
4225 /// ByteValues - For each byte of the result, we keep track of which value
4226 /// defines each byte.
4227 SmallVector<Value*, 8> ByteValues;
4228 ByteValues.resize(ITy->getBitWidth()/8);
4229
4230 // Try to find all the pieces corresponding to the bswap.
4231 if (CollectBSwapParts(I.getOperand(0), ByteValues) ||
4232 CollectBSwapParts(I.getOperand(1), ByteValues))
4233 return 0;
4234
4235 // Check to see if all of the bytes come from the same value.
4236 Value *V = ByteValues[0];
4237 if (V == 0) return 0; // Didn't find a byte? Must be zero.
4238
4239 // Check to make sure that all of the bytes come from the same value.
4240 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
4241 if (ByteValues[i] != V)
4242 return 0;
Chandler Carrutha228e392007-08-04 01:51:18 +00004243 const Type *Tys[] = { ITy };
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004244 Module *M = I.getParent()->getParent()->getParent();
Chandler Carrutha228e392007-08-04 01:51:18 +00004245 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
Gabor Greifd6da1d02008-04-06 20:25:17 +00004246 return CallInst::Create(F, V);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004247}
4248
4249
4250Instruction *InstCombiner::visitOr(BinaryOperator &I) {
4251 bool Changed = SimplifyCommutative(I);
4252 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4253
4254 if (isa<UndefValue>(Op1)) // X | undef -> -1
4255 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4256
4257 // or X, X = X
4258 if (Op0 == Op1)
4259 return ReplaceInstUsesWith(I, Op0);
4260
4261 // See if we can simplify any instructions used by the instruction whose sole
4262 // purpose is to compute bits we don't care about.
4263 if (!isa<VectorType>(I.getType())) {
4264 uint32_t BitWidth = cast<IntegerType>(I.getType())->getBitWidth();
4265 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4266 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(BitWidth),
4267 KnownZero, KnownOne))
4268 return &I;
4269 } else if (isa<ConstantAggregateZero>(Op1)) {
4270 return ReplaceInstUsesWith(I, Op0); // X | <0,0> -> X
4271 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
4272 if (CP->isAllOnesValue()) // X | <-1,-1> -> <-1,-1>
4273 return ReplaceInstUsesWith(I, I.getOperand(1));
4274 }
4275
4276
4277
4278 // or X, -1 == -1
4279 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
4280 ConstantInt *C1 = 0; Value *X = 0;
4281 // (X & C1) | C2 --> (X | C2) & (C1|C2)
4282 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) {
4283 Instruction *Or = BinaryOperator::createOr(X, RHS);
4284 InsertNewInstBefore(Or, I);
4285 Or->takeName(Op0);
4286 return BinaryOperator::createAnd(Or,
4287 ConstantInt::get(RHS->getValue() | C1->getValue()));
4288 }
4289
4290 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
4291 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) {
4292 Instruction *Or = BinaryOperator::createOr(X, RHS);
4293 InsertNewInstBefore(Or, I);
4294 Or->takeName(Op0);
4295 return BinaryOperator::createXor(Or,
4296 ConstantInt::get(C1->getValue() & ~RHS->getValue()));
4297 }
4298
4299 // Try to fold constant and into select arguments.
4300 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4301 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4302 return R;
4303 if (isa<PHINode>(Op0))
4304 if (Instruction *NV = FoldOpIntoPhi(I))
4305 return NV;
4306 }
4307
4308 Value *A = 0, *B = 0;
4309 ConstantInt *C1 = 0, *C2 = 0;
4310
4311 if (match(Op0, m_And(m_Value(A), m_Value(B))))
4312 if (A == Op1 || B == Op1) // (A & ?) | A --> A
4313 return ReplaceInstUsesWith(I, Op1);
4314 if (match(Op1, m_And(m_Value(A), m_Value(B))))
4315 if (A == Op0 || B == Op0) // A | (A & ?) --> A
4316 return ReplaceInstUsesWith(I, Op0);
4317
4318 // (A | B) | C and A | (B | C) -> bswap if possible.
4319 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
4320 if (match(Op0, m_Or(m_Value(), m_Value())) ||
4321 match(Op1, m_Or(m_Value(), m_Value())) ||
4322 (match(Op0, m_Shift(m_Value(), m_Value())) &&
4323 match(Op1, m_Shift(m_Value(), m_Value())))) {
4324 if (Instruction *BSwap = MatchBSwap(I))
4325 return BSwap;
4326 }
4327
4328 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
4329 if (Op0->hasOneUse() && match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
4330 MaskedValueIsZero(Op1, C1->getValue())) {
4331 Instruction *NOr = BinaryOperator::createOr(A, Op1);
4332 InsertNewInstBefore(NOr, I);
4333 NOr->takeName(Op0);
4334 return BinaryOperator::createXor(NOr, C1);
4335 }
4336
4337 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
4338 if (Op1->hasOneUse() && match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
4339 MaskedValueIsZero(Op0, C1->getValue())) {
4340 Instruction *NOr = BinaryOperator::createOr(A, Op0);
4341 InsertNewInstBefore(NOr, I);
4342 NOr->takeName(Op0);
4343 return BinaryOperator::createXor(NOr, C1);
4344 }
4345
4346 // (A & C)|(B & D)
4347 Value *C = 0, *D = 0;
4348 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
4349 match(Op1, m_And(m_Value(B), m_Value(D)))) {
4350 Value *V1 = 0, *V2 = 0, *V3 = 0;
4351 C1 = dyn_cast<ConstantInt>(C);
4352 C2 = dyn_cast<ConstantInt>(D);
4353 if (C1 && C2) { // (A & C1)|(B & C2)
4354 // If we have: ((V + N) & C1) | (V & C2)
4355 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
4356 // replace with V+N.
4357 if (C1->getValue() == ~C2->getValue()) {
4358 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
4359 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
4360 // Add commutes, try both ways.
4361 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
4362 return ReplaceInstUsesWith(I, A);
4363 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
4364 return ReplaceInstUsesWith(I, A);
4365 }
4366 // Or commutes, try both ways.
4367 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
4368 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
4369 // Add commutes, try both ways.
4370 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
4371 return ReplaceInstUsesWith(I, B);
4372 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
4373 return ReplaceInstUsesWith(I, B);
4374 }
4375 }
4376 V1 = 0; V2 = 0; V3 = 0;
4377 }
4378
4379 // Check to see if we have any common things being and'ed. If so, find the
4380 // terms for V1 & (V2|V3).
4381 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
4382 if (A == B) // (A & C)|(A & D) == A & (C|D)
4383 V1 = A, V2 = C, V3 = D;
4384 else if (A == D) // (A & C)|(B & A) == A & (B|C)
4385 V1 = A, V2 = B, V3 = C;
4386 else if (C == B) // (A & C)|(C & D) == C & (A|D)
4387 V1 = C, V2 = A, V3 = D;
4388 else if (C == D) // (A & C)|(B & C) == C & (A|B)
4389 V1 = C, V2 = A, V3 = B;
4390
4391 if (V1) {
4392 Value *Or =
4393 InsertNewInstBefore(BinaryOperator::createOr(V2, V3, "tmp"), I);
4394 return BinaryOperator::createAnd(V1, Or);
4395 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004396 }
4397 }
4398
4399 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
4400 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4401 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4402 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4403 SI0->getOperand(1) == SI1->getOperand(1) &&
4404 (SI0->hasOneUse() || SI1->hasOneUse())) {
4405 Instruction *NewOp =
4406 InsertNewInstBefore(BinaryOperator::createOr(SI0->getOperand(0),
4407 SI1->getOperand(0),
4408 SI0->getName()), I);
4409 return BinaryOperator::create(SI1->getOpcode(), NewOp,
4410 SI1->getOperand(1));
4411 }
4412 }
4413
4414 if (match(Op0, m_Not(m_Value(A)))) { // ~A | Op1
4415 if (A == Op1) // ~A | A == -1
4416 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4417 } else {
4418 A = 0;
4419 }
4420 // Note, A is still live here!
4421 if (match(Op1, m_Not(m_Value(B)))) { // Op0 | ~B
4422 if (Op0 == B)
4423 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4424
4425 // (~A | ~B) == (~(A & B)) - De Morgan's Law
4426 if (A && isOnlyUse(Op0) && isOnlyUse(Op1)) {
4427 Value *And = InsertNewInstBefore(BinaryOperator::createAnd(A, B,
4428 I.getName()+".demorgan"), I);
4429 return BinaryOperator::createNot(And);
4430 }
4431 }
4432
4433 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
4434 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
4435 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4436 return R;
4437
4438 Value *LHSVal, *RHSVal;
4439 ConstantInt *LHSCst, *RHSCst;
4440 ICmpInst::Predicate LHSCC, RHSCC;
4441 if (match(Op0, m_ICmp(LHSCC, m_Value(LHSVal), m_ConstantInt(LHSCst))))
4442 if (match(RHS, m_ICmp(RHSCC, m_Value(RHSVal), m_ConstantInt(RHSCst))))
4443 if (LHSVal == RHSVal && // Found (X icmp C1) | (X icmp C2)
4444 // icmp [us][gl]e x, cst is folded to icmp [us][gl]t elsewhere.
4445 LHSCC != ICmpInst::ICMP_UGE && LHSCC != ICmpInst::ICMP_ULE &&
4446 RHSCC != ICmpInst::ICMP_UGE && RHSCC != ICmpInst::ICMP_ULE &&
4447 LHSCC != ICmpInst::ICMP_SGE && LHSCC != ICmpInst::ICMP_SLE &&
4448 RHSCC != ICmpInst::ICMP_SGE && RHSCC != ICmpInst::ICMP_SLE &&
4449 // We can't fold (ugt x, C) | (sgt x, C2).
4450 PredicatesFoldable(LHSCC, RHSCC)) {
4451 // Ensure that the larger constant is on the RHS.
4452 ICmpInst *LHS = cast<ICmpInst>(Op0);
4453 bool NeedsSwap;
4454 if (ICmpInst::isSignedPredicate(LHSCC))
4455 NeedsSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4456 else
4457 NeedsSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4458
4459 if (NeedsSwap) {
4460 std::swap(LHS, RHS);
4461 std::swap(LHSCst, RHSCst);
4462 std::swap(LHSCC, RHSCC);
4463 }
4464
4465 // At this point, we know we have have two icmp instructions
4466 // comparing a value against two constants and or'ing the result
4467 // together. Because of the above check, we know that we only have
4468 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4469 // FoldICmpLogical check above), that the two constants are not
4470 // equal.
4471 assert(LHSCst != RHSCst && "Compares not folded above?");
4472
4473 switch (LHSCC) {
4474 default: assert(0 && "Unknown integer condition code!");
4475 case ICmpInst::ICMP_EQ:
4476 switch (RHSCC) {
4477 default: assert(0 && "Unknown integer condition code!");
4478 case ICmpInst::ICMP_EQ:
4479 if (LHSCst == SubOne(RHSCst)) {// (X == 13 | X == 14) -> X-13 <u 2
4480 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4481 Instruction *Add = BinaryOperator::createAdd(LHSVal, AddCST,
4482 LHSVal->getName()+".off");
4483 InsertNewInstBefore(Add, I);
4484 AddCST = Subtract(AddOne(RHSCst), LHSCst);
4485 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
4486 }
4487 break; // (X == 13 | X == 15) -> no change
4488 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
4489 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
4490 break;
4491 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
4492 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
4493 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
4494 return ReplaceInstUsesWith(I, RHS);
4495 }
4496 break;
4497 case ICmpInst::ICMP_NE:
4498 switch (RHSCC) {
4499 default: assert(0 && "Unknown integer condition code!");
4500 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
4501 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
4502 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
4503 return ReplaceInstUsesWith(I, LHS);
4504 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
4505 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
4506 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
4507 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
4508 }
4509 break;
4510 case ICmpInst::ICMP_ULT:
4511 switch (RHSCC) {
4512 default: assert(0 && "Unknown integer condition code!");
4513 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
4514 break;
4515 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) ->(X-13) u> 2
Chris Lattner26376862007-11-01 02:18:41 +00004516 // If RHSCst is [us]MAXINT, it is always false. Not handling
4517 // this can cause overflow.
4518 if (RHSCst->isMaxValue(false))
4519 return ReplaceInstUsesWith(I, LHS);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004520 return InsertRangeTest(LHSVal, LHSCst, AddOne(RHSCst), false,
4521 false, I);
4522 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
4523 break;
4524 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
4525 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
4526 return ReplaceInstUsesWith(I, RHS);
4527 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
4528 break;
4529 }
4530 break;
4531 case ICmpInst::ICMP_SLT:
4532 switch (RHSCC) {
4533 default: assert(0 && "Unknown integer condition code!");
4534 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
4535 break;
4536 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) ->(X-13) s> 2
Chris Lattner26376862007-11-01 02:18:41 +00004537 // If RHSCst is [us]MAXINT, it is always false. Not handling
4538 // this can cause overflow.
4539 if (RHSCst->isMaxValue(true))
4540 return ReplaceInstUsesWith(I, LHS);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004541 return InsertRangeTest(LHSVal, LHSCst, AddOne(RHSCst), true,
4542 false, I);
4543 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
4544 break;
4545 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
4546 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
4547 return ReplaceInstUsesWith(I, RHS);
4548 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
4549 break;
4550 }
4551 break;
4552 case ICmpInst::ICMP_UGT:
4553 switch (RHSCC) {
4554 default: assert(0 && "Unknown integer condition code!");
4555 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
4556 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
4557 return ReplaceInstUsesWith(I, LHS);
4558 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
4559 break;
4560 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
4561 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
4562 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
4563 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
4564 break;
4565 }
4566 break;
4567 case ICmpInst::ICMP_SGT:
4568 switch (RHSCC) {
4569 default: assert(0 && "Unknown integer condition code!");
4570 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
4571 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
4572 return ReplaceInstUsesWith(I, LHS);
4573 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
4574 break;
4575 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
4576 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
4577 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
4578 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
4579 break;
4580 }
4581 break;
4582 }
4583 }
4584 }
4585
4586 // fold (or (cast A), (cast B)) -> (cast (or A, B))
Chris Lattner91882432007-10-24 05:38:08 +00004587 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004588 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4589 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
Evan Chenge3779cf2008-03-24 00:21:34 +00004590 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
4591 !isa<ICmpInst>(Op1C->getOperand(0))) {
4592 const Type *SrcTy = Op0C->getOperand(0)->getType();
4593 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4594 // Only do this if the casts both really cause code to be
4595 // generated.
4596 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4597 I.getType(), TD) &&
4598 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4599 I.getType(), TD)) {
4600 Instruction *NewOp = BinaryOperator::createOr(Op0C->getOperand(0),
4601 Op1C->getOperand(0),
4602 I.getName());
4603 InsertNewInstBefore(NewOp, I);
4604 return CastInst::create(Op0C->getOpcode(), NewOp, I.getType());
4605 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004606 }
4607 }
Chris Lattner91882432007-10-24 05:38:08 +00004608 }
4609
4610
4611 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
4612 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4613 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) {
4614 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
Chris Lattnerbe9e63e2008-02-29 06:09:11 +00004615 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
4616 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType())
Chris Lattner91882432007-10-24 05:38:08 +00004617 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4618 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4619 // If either of the constants are nans, then the whole thing returns
4620 // true.
Chris Lattnera6c7dce2007-10-24 18:54:45 +00004621 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
Chris Lattner91882432007-10-24 05:38:08 +00004622 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
4623
4624 // Otherwise, no need to compare the two constants, compare the
4625 // rest.
4626 return new FCmpInst(FCmpInst::FCMP_UNO, LHS->getOperand(0),
4627 RHS->getOperand(0));
4628 }
4629 }
4630 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004631
4632 return Changed ? &I : 0;
4633}
4634
4635// XorSelf - Implements: X ^ X --> 0
4636struct XorSelf {
4637 Value *RHS;
4638 XorSelf(Value *rhs) : RHS(rhs) {}
4639 bool shouldApply(Value *LHS) const { return LHS == RHS; }
4640 Instruction *apply(BinaryOperator &Xor) const {
4641 return &Xor;
4642 }
4643};
4644
4645
4646Instruction *InstCombiner::visitXor(BinaryOperator &I) {
4647 bool Changed = SimplifyCommutative(I);
4648 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4649
Evan Chenge5cd8032008-03-25 20:07:13 +00004650 if (isa<UndefValue>(Op1)) {
4651 if (isa<UndefValue>(Op0))
4652 // Handle undef ^ undef -> 0 special case. This is a common
4653 // idiom (misuse).
4654 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004655 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
Evan Chenge5cd8032008-03-25 20:07:13 +00004656 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004657
4658 // xor X, X = 0, even if X is nested in a sequence of Xor's.
4659 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
Chris Lattnerb933ea62007-08-05 08:47:58 +00004660 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004661 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
4662 }
4663
4664 // See if we can simplify any instructions used by the instruction whose sole
4665 // purpose is to compute bits we don't care about.
4666 if (!isa<VectorType>(I.getType())) {
4667 uint32_t BitWidth = cast<IntegerType>(I.getType())->getBitWidth();
4668 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4669 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(BitWidth),
4670 KnownZero, KnownOne))
4671 return &I;
4672 } else if (isa<ConstantAggregateZero>(Op1)) {
4673 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
4674 }
4675
4676 // Is this a ~ operation?
4677 if (Value *NotOp = dyn_castNotVal(&I)) {
4678 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
4679 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
4680 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
4681 if (Op0I->getOpcode() == Instruction::And ||
4682 Op0I->getOpcode() == Instruction::Or) {
4683 if (dyn_castNotVal(Op0I->getOperand(1))) Op0I->swapOperands();
4684 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
4685 Instruction *NotY =
4686 BinaryOperator::createNot(Op0I->getOperand(1),
4687 Op0I->getOperand(1)->getName()+".not");
4688 InsertNewInstBefore(NotY, I);
4689 if (Op0I->getOpcode() == Instruction::And)
4690 return BinaryOperator::createOr(Op0NotVal, NotY);
4691 else
4692 return BinaryOperator::createAnd(Op0NotVal, NotY);
4693 }
4694 }
4695 }
4696 }
4697
4698
4699 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
Nick Lewycky1405e922007-08-06 20:04:16 +00004700 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
4701 if (RHS == ConstantInt::getTrue() && Op0->hasOneUse()) {
4702 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004703 return new ICmpInst(ICI->getInversePredicate(),
4704 ICI->getOperand(0), ICI->getOperand(1));
4705
Nick Lewycky1405e922007-08-06 20:04:16 +00004706 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
4707 return new FCmpInst(FCI->getInversePredicate(),
4708 FCI->getOperand(0), FCI->getOperand(1));
4709 }
4710
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004711 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
4712 // ~(c-X) == X-c-1 == X+(-c-1)
4713 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
4714 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
4715 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
4716 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
4717 ConstantInt::get(I.getType(), 1));
4718 return BinaryOperator::createAdd(Op0I->getOperand(1), ConstantRHS);
4719 }
4720
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00004721 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004722 if (Op0I->getOpcode() == Instruction::Add) {
4723 // ~(X-c) --> (-c-1)-X
4724 if (RHS->isAllOnesValue()) {
4725 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
4726 return BinaryOperator::createSub(
4727 ConstantExpr::getSub(NegOp0CI,
4728 ConstantInt::get(I.getType(), 1)),
4729 Op0I->getOperand(0));
4730 } else if (RHS->getValue().isSignBit()) {
4731 // (X + C) ^ signbit -> (X + C + signbit)
4732 Constant *C = ConstantInt::get(RHS->getValue() + Op0CI->getValue());
4733 return BinaryOperator::createAdd(Op0I->getOperand(0), C);
4734
4735 }
4736 } else if (Op0I->getOpcode() == Instruction::Or) {
4737 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
4738 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
4739 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
4740 // Anything in both C1 and C2 is known to be zero, remove it from
4741 // NewRHS.
4742 Constant *CommonBits = And(Op0CI, RHS);
4743 NewRHS = ConstantExpr::getAnd(NewRHS,
4744 ConstantExpr::getNot(CommonBits));
4745 AddToWorkList(Op0I);
4746 I.setOperand(0, Op0I->getOperand(0));
4747 I.setOperand(1, NewRHS);
4748 return &I;
4749 }
4750 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00004751 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004752 }
4753
4754 // Try to fold constant and into select arguments.
4755 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4756 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4757 return R;
4758 if (isa<PHINode>(Op0))
4759 if (Instruction *NV = FoldOpIntoPhi(I))
4760 return NV;
4761 }
4762
4763 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
4764 if (X == Op1)
4765 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4766
4767 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
4768 if (X == Op0)
4769 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4770
4771
4772 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
4773 if (Op1I) {
4774 Value *A, *B;
4775 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
4776 if (A == Op0) { // B^(B|A) == (A|B)^B
4777 Op1I->swapOperands();
4778 I.swapOperands();
4779 std::swap(Op0, Op1);
4780 } else if (B == Op0) { // B^(A|B) == (A|B)^B
4781 I.swapOperands(); // Simplified below.
4782 std::swap(Op0, Op1);
4783 }
4784 } else if (match(Op1I, m_Xor(m_Value(A), m_Value(B)))) {
4785 if (Op0 == A) // A^(A^B) == B
4786 return ReplaceInstUsesWith(I, B);
4787 else if (Op0 == B) // A^(B^A) == B
4788 return ReplaceInstUsesWith(I, A);
4789 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) && Op1I->hasOneUse()){
4790 if (A == Op0) { // A^(A&B) -> A^(B&A)
4791 Op1I->swapOperands();
4792 std::swap(A, B);
4793 }
4794 if (B == Op0) { // A^(B&A) -> (B&A)^A
4795 I.swapOperands(); // Simplified below.
4796 std::swap(Op0, Op1);
4797 }
4798 }
4799 }
4800
4801 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
4802 if (Op0I) {
4803 Value *A, *B;
4804 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && Op0I->hasOneUse()) {
4805 if (A == Op1) // (B|A)^B == (A|B)^B
4806 std::swap(A, B);
4807 if (B == Op1) { // (A|B)^B == A & ~B
4808 Instruction *NotB =
4809 InsertNewInstBefore(BinaryOperator::createNot(Op1, "tmp"), I);
4810 return BinaryOperator::createAnd(A, NotB);
4811 }
4812 } else if (match(Op0I, m_Xor(m_Value(A), m_Value(B)))) {
4813 if (Op1 == A) // (A^B)^A == B
4814 return ReplaceInstUsesWith(I, B);
4815 else if (Op1 == B) // (B^A)^A == B
4816 return ReplaceInstUsesWith(I, A);
4817 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) && Op0I->hasOneUse()){
4818 if (A == Op1) // (A&B)^A -> (B&A)^A
4819 std::swap(A, B);
4820 if (B == Op1 && // (B&A)^A == ~B & A
4821 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
4822 Instruction *N =
4823 InsertNewInstBefore(BinaryOperator::createNot(A, "tmp"), I);
4824 return BinaryOperator::createAnd(N, Op1);
4825 }
4826 }
4827 }
4828
4829 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
4830 if (Op0I && Op1I && Op0I->isShift() &&
4831 Op0I->getOpcode() == Op1I->getOpcode() &&
4832 Op0I->getOperand(1) == Op1I->getOperand(1) &&
4833 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
4834 Instruction *NewOp =
4835 InsertNewInstBefore(BinaryOperator::createXor(Op0I->getOperand(0),
4836 Op1I->getOperand(0),
4837 Op0I->getName()), I);
4838 return BinaryOperator::create(Op1I->getOpcode(), NewOp,
4839 Op1I->getOperand(1));
4840 }
4841
4842 if (Op0I && Op1I) {
4843 Value *A, *B, *C, *D;
4844 // (A & B)^(A | B) -> A ^ B
4845 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
4846 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
4847 if ((A == C && B == D) || (A == D && B == C))
4848 return BinaryOperator::createXor(A, B);
4849 }
4850 // (A | B)^(A & B) -> A ^ B
4851 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
4852 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
4853 if ((A == C && B == D) || (A == D && B == C))
4854 return BinaryOperator::createXor(A, B);
4855 }
4856
4857 // (A & B)^(C & D)
4858 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
4859 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
4860 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
4861 // (X & Y)^(X & Y) -> (Y^Z) & X
4862 Value *X = 0, *Y = 0, *Z = 0;
4863 if (A == C)
4864 X = A, Y = B, Z = D;
4865 else if (A == D)
4866 X = A, Y = B, Z = C;
4867 else if (B == C)
4868 X = B, Y = A, Z = D;
4869 else if (B == D)
4870 X = B, Y = A, Z = C;
4871
4872 if (X) {
4873 Instruction *NewOp =
4874 InsertNewInstBefore(BinaryOperator::createXor(Y, Z, Op0->getName()), I);
4875 return BinaryOperator::createAnd(NewOp, X);
4876 }
4877 }
4878 }
4879
4880 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4881 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
4882 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4883 return R;
4884
4885 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
Chris Lattner91882432007-10-24 05:38:08 +00004886 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004887 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4888 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
4889 const Type *SrcTy = Op0C->getOperand(0)->getType();
4890 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4891 // Only do this if the casts both really cause code to be generated.
4892 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4893 I.getType(), TD) &&
4894 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4895 I.getType(), TD)) {
4896 Instruction *NewOp = BinaryOperator::createXor(Op0C->getOperand(0),
4897 Op1C->getOperand(0),
4898 I.getName());
4899 InsertNewInstBefore(NewOp, I);
4900 return CastInst::create(Op0C->getOpcode(), NewOp, I.getType());
4901 }
4902 }
Chris Lattner91882432007-10-24 05:38:08 +00004903 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004904 return Changed ? &I : 0;
4905}
4906
4907/// AddWithOverflow - Compute Result = In1+In2, returning true if the result
4908/// overflowed for this type.
4909static bool AddWithOverflow(ConstantInt *&Result, ConstantInt *In1,
4910 ConstantInt *In2, bool IsSigned = false) {
4911 Result = cast<ConstantInt>(Add(In1, In2));
4912
4913 if (IsSigned)
4914 if (In2->getValue().isNegative())
4915 return Result->getValue().sgt(In1->getValue());
4916 else
4917 return Result->getValue().slt(In1->getValue());
4918 else
4919 return Result->getValue().ult(In1->getValue());
4920}
4921
4922/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
4923/// code necessary to compute the offset from the base pointer (without adding
4924/// in the base pointer). Return the result as a signed integer of intptr size.
4925static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
4926 TargetData &TD = IC.getTargetData();
4927 gep_type_iterator GTI = gep_type_begin(GEP);
4928 const Type *IntPtrTy = TD.getIntPtrType();
4929 Value *Result = Constant::getNullValue(IntPtrTy);
4930
4931 // Build a mask for high order bits.
Chris Lattnereba75862008-04-22 02:53:33 +00004932 unsigned IntPtrWidth = TD.getPointerSizeInBits();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004933 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
4934
4935 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
4936 Value *Op = GEP->getOperand(i);
Duncan Sandsf99fdc62007-11-01 20:53:16 +00004937 uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()) & PtrSizeMask;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004938 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
4939 if (OpC->isZero()) continue;
4940
4941 // Handle a struct index, which adds its field offset to the pointer.
4942 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
4943 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
4944
4945 if (ConstantInt *RC = dyn_cast<ConstantInt>(Result))
4946 Result = ConstantInt::get(RC->getValue() + APInt(IntPtrWidth, Size));
4947 else
4948 Result = IC.InsertNewInstBefore(
4949 BinaryOperator::createAdd(Result,
4950 ConstantInt::get(IntPtrTy, Size),
4951 GEP->getName()+".offs"), I);
4952 continue;
4953 }
4954
4955 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
4956 Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
4957 Scale = ConstantExpr::getMul(OC, Scale);
4958 if (Constant *RC = dyn_cast<Constant>(Result))
4959 Result = ConstantExpr::getAdd(RC, Scale);
4960 else {
4961 // Emit an add instruction.
4962 Result = IC.InsertNewInstBefore(
4963 BinaryOperator::createAdd(Result, Scale,
4964 GEP->getName()+".offs"), I);
4965 }
4966 continue;
4967 }
4968 // Convert to correct type.
4969 if (Op->getType() != IntPtrTy) {
4970 if (Constant *OpC = dyn_cast<Constant>(Op))
4971 Op = ConstantExpr::getSExt(OpC, IntPtrTy);
4972 else
4973 Op = IC.InsertNewInstBefore(new SExtInst(Op, IntPtrTy,
4974 Op->getName()+".c"), I);
4975 }
4976 if (Size != 1) {
4977 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
4978 if (Constant *OpC = dyn_cast<Constant>(Op))
4979 Op = ConstantExpr::getMul(OpC, Scale);
4980 else // We'll let instcombine(mul) convert this to a shl if possible.
4981 Op = IC.InsertNewInstBefore(BinaryOperator::createMul(Op, Scale,
4982 GEP->getName()+".idx"), I);
4983 }
4984
4985 // Emit an add instruction.
4986 if (isa<Constant>(Op) && isa<Constant>(Result))
4987 Result = ConstantExpr::getAdd(cast<Constant>(Op),
4988 cast<Constant>(Result));
4989 else
4990 Result = IC.InsertNewInstBefore(BinaryOperator::createAdd(Op, Result,
4991 GEP->getName()+".offs"), I);
4992 }
4993 return Result;
4994}
4995
Chris Lattnereba75862008-04-22 02:53:33 +00004996
4997/// EvaluateGEPOffsetExpression - Return an value that can be used to compare of
4998/// the *offset* implied by GEP to zero. For example, if we have &A[i], we want
4999/// to return 'i' for "icmp ne i, 0". Note that, in general, indices can be
5000/// complex, and scales are involved. The above expression would also be legal
5001/// to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32). This
5002/// later form is less amenable to optimization though, and we are allowed to
5003/// generate the first by knowing that pointer arithmetic doesn't overflow.
5004///
5005/// If we can't emit an optimized form for this expression, this returns null.
5006///
5007static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
5008 InstCombiner &IC) {
Chris Lattnereba75862008-04-22 02:53:33 +00005009 TargetData &TD = IC.getTargetData();
5010 gep_type_iterator GTI = gep_type_begin(GEP);
5011
5012 // Check to see if this gep only has a single variable index. If so, and if
5013 // any constant indices are a multiple of its scale, then we can compute this
5014 // in terms of the scale of the variable index. For example, if the GEP
5015 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
5016 // because the expression will cross zero at the same point.
5017 unsigned i, e = GEP->getNumOperands();
5018 int64_t Offset = 0;
5019 for (i = 1; i != e; ++i, ++GTI) {
5020 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
5021 // Compute the aggregate offset of constant indices.
5022 if (CI->isZero()) continue;
5023
5024 // Handle a struct index, which adds its field offset to the pointer.
5025 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5026 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5027 } else {
5028 uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
5029 Offset += Size*CI->getSExtValue();
5030 }
5031 } else {
5032 // Found our variable index.
5033 break;
5034 }
5035 }
5036
5037 // If there are no variable indices, we must have a constant offset, just
5038 // evaluate it the general way.
5039 if (i == e) return 0;
5040
5041 Value *VariableIdx = GEP->getOperand(i);
5042 // Determine the scale factor of the variable element. For example, this is
5043 // 4 if the variable index is into an array of i32.
5044 uint64_t VariableScale = TD.getABITypeSize(GTI.getIndexedType());
5045
5046 // Verify that there are no other variable indices. If so, emit the hard way.
5047 for (++i, ++GTI; i != e; ++i, ++GTI) {
5048 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
5049 if (!CI) return 0;
5050
5051 // Compute the aggregate offset of constant indices.
5052 if (CI->isZero()) continue;
5053
5054 // Handle a struct index, which adds its field offset to the pointer.
5055 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5056 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5057 } else {
5058 uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
5059 Offset += Size*CI->getSExtValue();
5060 }
5061 }
5062
5063 // Okay, we know we have a single variable index, which must be a
5064 // pointer/array/vector index. If there is no offset, life is simple, return
5065 // the index.
5066 unsigned IntPtrWidth = TD.getPointerSizeInBits();
5067 if (Offset == 0) {
5068 // Cast to intptrty in case a truncation occurs. If an extension is needed,
5069 // we don't need to bother extending: the extension won't affect where the
5070 // computation crosses zero.
5071 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
5072 VariableIdx = new TruncInst(VariableIdx, TD.getIntPtrType(),
5073 VariableIdx->getNameStart(), &I);
5074 return VariableIdx;
5075 }
5076
5077 // Otherwise, there is an index. The computation we will do will be modulo
5078 // the pointer size, so get it.
5079 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
5080
5081 Offset &= PtrSizeMask;
5082 VariableScale &= PtrSizeMask;
5083
5084 // To do this transformation, any constant index must be a multiple of the
5085 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
5086 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
5087 // multiple of the variable scale.
5088 int64_t NewOffs = Offset / (int64_t)VariableScale;
5089 if (Offset != NewOffs*(int64_t)VariableScale)
5090 return 0;
5091
5092 // Okay, we can do this evaluation. Start by converting the index to intptr.
5093 const Type *IntPtrTy = TD.getIntPtrType();
5094 if (VariableIdx->getType() != IntPtrTy)
5095 VariableIdx = CastInst::createIntegerCast(VariableIdx, IntPtrTy,
5096 true /*SExt*/,
5097 VariableIdx->getNameStart(), &I);
5098 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
5099 return BinaryOperator::createAdd(VariableIdx, OffsetVal, "offset", &I);
5100}
5101
5102
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005103/// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5104/// else. At this point we know that the GEP is on the LHS of the comparison.
5105Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS,
5106 ICmpInst::Predicate Cond,
5107 Instruction &I) {
5108 assert(dyn_castGetElementPtr(GEPLHS) && "LHS is not a getelementptr!");
5109
Chris Lattnereba75862008-04-22 02:53:33 +00005110 // Look through bitcasts.
5111 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
5112 RHS = BCI->getOperand(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005113
5114 Value *PtrBase = GEPLHS->getOperand(0);
5115 if (PtrBase == RHS) {
Chris Lattneraf97d022008-02-05 04:45:32 +00005116 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
Chris Lattnereba75862008-04-22 02:53:33 +00005117 // This transformation (ignoring the base and scales) is valid because we
5118 // know pointers can't overflow. See if we can output an optimized form.
5119 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
5120
5121 // If not, synthesize the offset the hard way.
5122 if (Offset == 0)
5123 Offset = EmitGEPOffset(GEPLHS, I, *this);
Chris Lattneraf97d022008-02-05 04:45:32 +00005124 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
5125 Constant::getNullValue(Offset->getType()));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005126 } else if (User *GEPRHS = dyn_castGetElementPtr(RHS)) {
5127 // If the base pointers are different, but the indices are the same, just
5128 // compare the base pointer.
5129 if (PtrBase != GEPRHS->getOperand(0)) {
5130 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
5131 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
5132 GEPRHS->getOperand(0)->getType();
5133 if (IndicesTheSame)
5134 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5135 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5136 IndicesTheSame = false;
5137 break;
5138 }
5139
5140 // If all indices are the same, just compare the base pointers.
5141 if (IndicesTheSame)
5142 return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
5143 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
5144
5145 // Otherwise, the base pointers are different and the indices are
5146 // different, bail out.
5147 return 0;
5148 }
5149
5150 // If one of the GEPs has all zero indices, recurse.
5151 bool AllZeros = true;
5152 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5153 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
5154 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
5155 AllZeros = false;
5156 break;
5157 }
5158 if (AllZeros)
5159 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
5160 ICmpInst::getSwappedPredicate(Cond), I);
5161
5162 // If the other GEP has all zero indices, recurse.
5163 AllZeros = true;
5164 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5165 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
5166 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
5167 AllZeros = false;
5168 break;
5169 }
5170 if (AllZeros)
5171 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
5172
5173 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
5174 // If the GEPs only differ by one index, compare it.
5175 unsigned NumDifferences = 0; // Keep track of # differences.
5176 unsigned DiffOperand = 0; // The operand that differs.
5177 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5178 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5179 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
5180 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
5181 // Irreconcilable differences.
5182 NumDifferences = 2;
5183 break;
5184 } else {
5185 if (NumDifferences++) break;
5186 DiffOperand = i;
5187 }
5188 }
5189
5190 if (NumDifferences == 0) // SAME GEP?
5191 return ReplaceInstUsesWith(I, // No comparison is needed here.
Nick Lewycky2de09a92007-09-06 02:40:25 +00005192 ConstantInt::get(Type::Int1Ty,
5193 isTrueWhenEqual(Cond)));
5194
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005195 else if (NumDifferences == 1) {
5196 Value *LHSV = GEPLHS->getOperand(DiffOperand);
5197 Value *RHSV = GEPRHS->getOperand(DiffOperand);
5198 // Make sure we do a signed comparison here.
5199 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
5200 }
5201 }
5202
5203 // Only lower this if the icmp is the only user of the GEP or if we expect
5204 // the result to fold to a constant!
5205 if ((isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
5206 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
5207 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5208 Value *L = EmitGEPOffset(GEPLHS, I, *this);
5209 Value *R = EmitGEPOffset(GEPRHS, I, *this);
5210 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
5211 }
5212 }
5213 return 0;
5214}
5215
5216Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
5217 bool Changed = SimplifyCompare(I);
5218 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5219
5220 // Fold trivial predicates.
5221 if (I.getPredicate() == FCmpInst::FCMP_FALSE)
5222 return ReplaceInstUsesWith(I, Constant::getNullValue(Type::Int1Ty));
5223 if (I.getPredicate() == FCmpInst::FCMP_TRUE)
5224 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty, 1));
5225
5226 // Simplify 'fcmp pred X, X'
5227 if (Op0 == Op1) {
5228 switch (I.getPredicate()) {
5229 default: assert(0 && "Unknown predicate!");
5230 case FCmpInst::FCMP_UEQ: // True if unordered or equal
5231 case FCmpInst::FCMP_UGE: // True if unordered, greater than, or equal
5232 case FCmpInst::FCMP_ULE: // True if unordered, less than, or equal
5233 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty, 1));
5234 case FCmpInst::FCMP_OGT: // True if ordered and greater than
5235 case FCmpInst::FCMP_OLT: // True if ordered and less than
5236 case FCmpInst::FCMP_ONE: // True if ordered and operands are unequal
5237 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty, 0));
5238
5239 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
5240 case FCmpInst::FCMP_ULT: // True if unordered or less than
5241 case FCmpInst::FCMP_UGT: // True if unordered or greater than
5242 case FCmpInst::FCMP_UNE: // True if unordered or not equal
5243 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5244 I.setPredicate(FCmpInst::FCMP_UNO);
5245 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5246 return &I;
5247
5248 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
5249 case FCmpInst::FCMP_OEQ: // True if ordered and equal
5250 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
5251 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
5252 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5253 I.setPredicate(FCmpInst::FCMP_ORD);
5254 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5255 return &I;
5256 }
5257 }
5258
5259 if (isa<UndefValue>(Op1)) // fcmp pred X, undef -> undef
5260 return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty));
5261
5262 // Handle fcmp with constant RHS
5263 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5264 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5265 switch (LHSI->getOpcode()) {
5266 case Instruction::PHI:
5267 if (Instruction *NV = FoldOpIntoPhi(I))
5268 return NV;
5269 break;
5270 case Instruction::Select:
5271 // If either operand of the select is a constant, we can fold the
5272 // comparison into the select arms, which will cause one to be
5273 // constant folded and the select turned into a bitwise or.
5274 Value *Op1 = 0, *Op2 = 0;
5275 if (LHSI->hasOneUse()) {
5276 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
5277 // Fold the known value into the constant operand.
5278 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5279 // Insert a new FCmp of the other select operand.
5280 Op2 = InsertNewInstBefore(new FCmpInst(I.getPredicate(),
5281 LHSI->getOperand(2), RHSC,
5282 I.getName()), I);
5283 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
5284 // Fold the known value into the constant operand.
5285 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5286 // Insert a new FCmp of the other select operand.
5287 Op1 = InsertNewInstBefore(new FCmpInst(I.getPredicate(),
5288 LHSI->getOperand(1), RHSC,
5289 I.getName()), I);
5290 }
5291 }
5292
5293 if (Op1)
Gabor Greifd6da1d02008-04-06 20:25:17 +00005294 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005295 break;
5296 }
5297 }
5298
5299 return Changed ? &I : 0;
5300}
5301
5302Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5303 bool Changed = SimplifyCompare(I);
5304 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5305 const Type *Ty = Op0->getType();
5306
5307 // icmp X, X
5308 if (Op0 == Op1)
5309 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
5310 isTrueWhenEqual(I)));
5311
5312 if (isa<UndefValue>(Op1)) // X icmp undef -> undef
5313 return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty));
Christopher Lambf78cd322007-12-18 21:32:20 +00005314
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005315 // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
5316 // addresses never equal each other! We already know that Op0 != Op1.
5317 if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
5318 isa<ConstantPointerNull>(Op0)) &&
5319 (isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
5320 isa<ConstantPointerNull>(Op1)))
5321 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
5322 !isTrueWhenEqual(I)));
5323
5324 // icmp's with boolean values can always be turned into bitwise operations
5325 if (Ty == Type::Int1Ty) {
5326 switch (I.getPredicate()) {
5327 default: assert(0 && "Invalid icmp instruction!");
5328 case ICmpInst::ICMP_EQ: { // icmp eq bool %A, %B -> ~(A^B)
5329 Instruction *Xor = BinaryOperator::createXor(Op0, Op1, I.getName()+"tmp");
5330 InsertNewInstBefore(Xor, I);
5331 return BinaryOperator::createNot(Xor);
5332 }
5333 case ICmpInst::ICMP_NE: // icmp eq bool %A, %B -> A^B
5334 return BinaryOperator::createXor(Op0, Op1);
5335
5336 case ICmpInst::ICMP_UGT:
5337 case ICmpInst::ICMP_SGT:
5338 std::swap(Op0, Op1); // Change icmp gt -> icmp lt
5339 // FALL THROUGH
5340 case ICmpInst::ICMP_ULT:
5341 case ICmpInst::ICMP_SLT: { // icmp lt bool A, B -> ~X & Y
5342 Instruction *Not = BinaryOperator::createNot(Op0, I.getName()+"tmp");
5343 InsertNewInstBefore(Not, I);
5344 return BinaryOperator::createAnd(Not, Op1);
5345 }
5346 case ICmpInst::ICMP_UGE:
5347 case ICmpInst::ICMP_SGE:
5348 std::swap(Op0, Op1); // Change icmp ge -> icmp le
5349 // FALL THROUGH
5350 case ICmpInst::ICMP_ULE:
5351 case ICmpInst::ICMP_SLE: { // icmp le bool %A, %B -> ~A | B
5352 Instruction *Not = BinaryOperator::createNot(Op0, I.getName()+"tmp");
5353 InsertNewInstBefore(Not, I);
5354 return BinaryOperator::createOr(Not, Op1);
5355 }
5356 }
5357 }
5358
5359 // See if we are doing a comparison between a constant and an instruction that
5360 // can be folded into the comparison.
5361 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
Christopher Lambfa6b3102007-12-20 07:21:11 +00005362 Value *A, *B;
5363
Chris Lattnerbe6c54a2008-01-05 01:18:20 +00005364 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
5365 if (I.isEquality() && CI->isNullValue() &&
5366 match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
5367 // (icmp cond A B) if cond is equality
5368 return new ICmpInst(I.getPredicate(), A, B);
Owen Anderson42f61ed2007-12-28 07:42:12 +00005369 }
Christopher Lambfa6b3102007-12-20 07:21:11 +00005370
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005371 switch (I.getPredicate()) {
5372 default: break;
5373 case ICmpInst::ICMP_ULT: // A <u MIN -> FALSE
5374 if (CI->isMinValue(false))
5375 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5376 if (CI->isMaxValue(false)) // A <u MAX -> A != MAX
5377 return new ICmpInst(ICmpInst::ICMP_NE, Op0,Op1);
5378 if (isMinValuePlusOne(CI,false)) // A <u MIN+1 -> A == MIN
5379 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, SubOne(CI));
5380 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
5381 if (CI->isMinValue(true))
5382 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
5383 ConstantInt::getAllOnesValue(Op0->getType()));
5384
5385 break;
5386
5387 case ICmpInst::ICMP_SLT:
5388 if (CI->isMinValue(true)) // A <s MIN -> FALSE
5389 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5390 if (CI->isMaxValue(true)) // A <s MAX -> A != MAX
5391 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5392 if (isMinValuePlusOne(CI,true)) // A <s MIN+1 -> A == MIN
5393 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, SubOne(CI));
5394 break;
5395
5396 case ICmpInst::ICMP_UGT:
5397 if (CI->isMaxValue(false)) // A >u MAX -> FALSE
5398 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5399 if (CI->isMinValue(false)) // A >u MIN -> A != MIN
5400 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5401 if (isMaxValueMinusOne(CI, false)) // A >u MAX-1 -> A == MAX
5402 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, AddOne(CI));
5403
5404 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
5405 if (CI->isMaxValue(true))
5406 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
5407 ConstantInt::getNullValue(Op0->getType()));
5408 break;
5409
5410 case ICmpInst::ICMP_SGT:
5411 if (CI->isMaxValue(true)) // A >s MAX -> FALSE
5412 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5413 if (CI->isMinValue(true)) // A >s MIN -> A != MIN
5414 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5415 if (isMaxValueMinusOne(CI, true)) // A >s MAX-1 -> A == MAX
5416 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, AddOne(CI));
5417 break;
5418
5419 case ICmpInst::ICMP_ULE:
5420 if (CI->isMaxValue(false)) // A <=u MAX -> TRUE
5421 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5422 if (CI->isMinValue(false)) // A <=u MIN -> A == MIN
5423 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5424 if (isMaxValueMinusOne(CI,false)) // A <=u MAX-1 -> A != MAX
5425 return new ICmpInst(ICmpInst::ICMP_NE, Op0, AddOne(CI));
5426 break;
5427
5428 case ICmpInst::ICMP_SLE:
5429 if (CI->isMaxValue(true)) // A <=s MAX -> TRUE
5430 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5431 if (CI->isMinValue(true)) // A <=s MIN -> A == MIN
5432 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5433 if (isMaxValueMinusOne(CI,true)) // A <=s MAX-1 -> A != MAX
5434 return new ICmpInst(ICmpInst::ICMP_NE, Op0, AddOne(CI));
5435 break;
5436
5437 case ICmpInst::ICMP_UGE:
5438 if (CI->isMinValue(false)) // A >=u MIN -> TRUE
5439 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5440 if (CI->isMaxValue(false)) // A >=u MAX -> A == MAX
5441 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5442 if (isMinValuePlusOne(CI,false)) // A >=u MIN-1 -> A != MIN
5443 return new ICmpInst(ICmpInst::ICMP_NE, Op0, SubOne(CI));
5444 break;
5445
5446 case ICmpInst::ICMP_SGE:
5447 if (CI->isMinValue(true)) // A >=s MIN -> TRUE
5448 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5449 if (CI->isMaxValue(true)) // A >=s MAX -> A == MAX
5450 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5451 if (isMinValuePlusOne(CI,true)) // A >=s MIN-1 -> A != MIN
5452 return new ICmpInst(ICmpInst::ICMP_NE, Op0, SubOne(CI));
5453 break;
5454 }
5455
5456 // If we still have a icmp le or icmp ge instruction, turn it into the
5457 // appropriate icmp lt or icmp gt instruction. Since the border cases have
5458 // already been handled above, this requires little checking.
5459 //
5460 switch (I.getPredicate()) {
5461 default: break;
5462 case ICmpInst::ICMP_ULE:
5463 return new ICmpInst(ICmpInst::ICMP_ULT, Op0, AddOne(CI));
5464 case ICmpInst::ICMP_SLE:
5465 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, AddOne(CI));
5466 case ICmpInst::ICMP_UGE:
5467 return new ICmpInst( ICmpInst::ICMP_UGT, Op0, SubOne(CI));
5468 case ICmpInst::ICMP_SGE:
5469 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, SubOne(CI));
5470 }
5471
5472 // See if we can fold the comparison based on bits known to be zero or one
5473 // in the input. If this comparison is a normal comparison, it demands all
5474 // bits, if it is a sign bit comparison, it only demands the sign bit.
5475
5476 bool UnusedBit;
5477 bool isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
5478
5479 uint32_t BitWidth = cast<IntegerType>(Ty)->getBitWidth();
5480 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
5481 if (SimplifyDemandedBits(Op0,
5482 isSignBit ? APInt::getSignBit(BitWidth)
5483 : APInt::getAllOnesValue(BitWidth),
5484 KnownZero, KnownOne, 0))
5485 return &I;
5486
5487 // Given the known and unknown bits, compute a range that the LHS could be
5488 // in.
5489 if ((KnownOne | KnownZero) != 0) {
5490 // Compute the Min, Max and RHS values based on the known bits. For the
5491 // EQ and NE we use unsigned values.
5492 APInt Min(BitWidth, 0), Max(BitWidth, 0);
5493 const APInt& RHSVal = CI->getValue();
5494 if (ICmpInst::isSignedPredicate(I.getPredicate())) {
5495 ComputeSignedMinMaxValuesFromKnownBits(Ty, KnownZero, KnownOne, Min,
5496 Max);
5497 } else {
5498 ComputeUnsignedMinMaxValuesFromKnownBits(Ty, KnownZero, KnownOne, Min,
5499 Max);
5500 }
5501 switch (I.getPredicate()) { // LE/GE have been folded already.
5502 default: assert(0 && "Unknown icmp opcode!");
5503 case ICmpInst::ICMP_EQ:
5504 if (Max.ult(RHSVal) || Min.ugt(RHSVal))
5505 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5506 break;
5507 case ICmpInst::ICMP_NE:
5508 if (Max.ult(RHSVal) || Min.ugt(RHSVal))
5509 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5510 break;
5511 case ICmpInst::ICMP_ULT:
5512 if (Max.ult(RHSVal))
5513 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5514 if (Min.uge(RHSVal))
5515 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5516 break;
5517 case ICmpInst::ICMP_UGT:
5518 if (Min.ugt(RHSVal))
5519 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5520 if (Max.ule(RHSVal))
5521 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5522 break;
5523 case ICmpInst::ICMP_SLT:
5524 if (Max.slt(RHSVal))
5525 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5526 if (Min.sgt(RHSVal))
5527 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5528 break;
5529 case ICmpInst::ICMP_SGT:
5530 if (Min.sgt(RHSVal))
5531 return ReplaceInstUsesWith(I, ConstantInt::getTrue());
5532 if (Max.sle(RHSVal))
5533 return ReplaceInstUsesWith(I, ConstantInt::getFalse());
5534 break;
5535 }
5536 }
5537
5538 // Since the RHS is a ConstantInt (CI), if the left hand side is an
5539 // instruction, see if that instruction also has constants so that the
5540 // instruction can be folded into the icmp
5541 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5542 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
5543 return Res;
5544 }
5545
5546 // Handle icmp with constant (but not simple integer constant) RHS
5547 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5548 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5549 switch (LHSI->getOpcode()) {
5550 case Instruction::GetElementPtr:
5551 if (RHSC->isNullValue()) {
5552 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
5553 bool isAllZeros = true;
5554 for (unsigned i = 1, e = LHSI->getNumOperands(); i != e; ++i)
5555 if (!isa<Constant>(LHSI->getOperand(i)) ||
5556 !cast<Constant>(LHSI->getOperand(i))->isNullValue()) {
5557 isAllZeros = false;
5558 break;
5559 }
5560 if (isAllZeros)
5561 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
5562 Constant::getNullValue(LHSI->getOperand(0)->getType()));
5563 }
5564 break;
5565
5566 case Instruction::PHI:
5567 if (Instruction *NV = FoldOpIntoPhi(I))
5568 return NV;
5569 break;
5570 case Instruction::Select: {
5571 // If either operand of the select is a constant, we can fold the
5572 // comparison into the select arms, which will cause one to be
5573 // constant folded and the select turned into a bitwise or.
5574 Value *Op1 = 0, *Op2 = 0;
5575 if (LHSI->hasOneUse()) {
5576 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
5577 // Fold the known value into the constant operand.
5578 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
5579 // Insert a new ICmp of the other select operand.
5580 Op2 = InsertNewInstBefore(new ICmpInst(I.getPredicate(),
5581 LHSI->getOperand(2), RHSC,
5582 I.getName()), I);
5583 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
5584 // Fold the known value into the constant operand.
5585 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
5586 // Insert a new ICmp of the other select operand.
5587 Op1 = InsertNewInstBefore(new ICmpInst(I.getPredicate(),
5588 LHSI->getOperand(1), RHSC,
5589 I.getName()), I);
5590 }
5591 }
5592
5593 if (Op1)
Gabor Greifd6da1d02008-04-06 20:25:17 +00005594 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005595 break;
5596 }
5597 case Instruction::Malloc:
5598 // If we have (malloc != null), and if the malloc has a single use, we
5599 // can assume it is successful and remove the malloc.
5600 if (LHSI->hasOneUse() && isa<ConstantPointerNull>(RHSC)) {
5601 AddToWorkList(LHSI);
5602 return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
5603 !isTrueWhenEqual(I)));
5604 }
5605 break;
5606 }
5607 }
5608
5609 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5610 if (User *GEP = dyn_castGetElementPtr(Op0))
5611 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
5612 return NI;
5613 if (User *GEP = dyn_castGetElementPtr(Op1))
5614 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
5615 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5616 return NI;
5617
5618 // Test to see if the operands of the icmp are casted versions of other
5619 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
5620 // now.
5621 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
5622 if (isa<PointerType>(Op0->getType()) &&
5623 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
5624 // We keep moving the cast from the left operand over to the right
5625 // operand, where it can often be eliminated completely.
5626 Op0 = CI->getOperand(0);
5627
5628 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
5629 // so eliminate it as well.
5630 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
5631 Op1 = CI2->getOperand(0);
5632
5633 // If Op1 is a constant, we can fold the cast into the constant.
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00005634 if (Op0->getType() != Op1->getType()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005635 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
5636 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
5637 } else {
5638 // Otherwise, cast the RHS right before the icmp
Chris Lattner13c2d6e2008-01-13 22:23:22 +00005639 Op1 = InsertBitCastBefore(Op1, Op0->getType(), I);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005640 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00005641 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005642 return new ICmpInst(I.getPredicate(), Op0, Op1);
5643 }
5644 }
5645
5646 if (isa<CastInst>(Op0)) {
5647 // Handle the special case of: icmp (cast bool to X), <cst>
5648 // This comes up when you have code like
5649 // int X = A < B;
5650 // if (X) ...
5651 // For generality, we handle any zero-extension of any operand comparison
5652 // with a constant or another cast from the same type.
5653 if (isa<ConstantInt>(Op1) || isa<CastInst>(Op1))
5654 if (Instruction *R = visitICmpInstWithCastAndCast(I))
5655 return R;
5656 }
5657
5658 if (I.isEquality()) {
5659 Value *A, *B, *C, *D;
5660 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
5661 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
5662 Value *OtherVal = A == Op1 ? B : A;
5663 return new ICmpInst(I.getPredicate(), OtherVal,
5664 Constant::getNullValue(A->getType()));
5665 }
5666
5667 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
5668 // A^c1 == C^c2 --> A == C^(c1^c2)
5669 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
5670 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D))
5671 if (Op1->hasOneUse()) {
5672 Constant *NC = ConstantInt::get(C1->getValue() ^ C2->getValue());
5673 Instruction *Xor = BinaryOperator::createXor(C, NC, "tmp");
5674 return new ICmpInst(I.getPredicate(), A,
5675 InsertNewInstBefore(Xor, I));
5676 }
5677
5678 // A^B == A^D -> B == D
5679 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
5680 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
5681 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
5682 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
5683 }
5684 }
5685
5686 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
5687 (A == Op0 || B == Op0)) {
5688 // A == (A^B) -> B == 0
5689 Value *OtherVal = A == Op0 ? B : A;
5690 return new ICmpInst(I.getPredicate(), OtherVal,
5691 Constant::getNullValue(A->getType()));
5692 }
5693 if (match(Op0, m_Sub(m_Value(A), m_Value(B))) && A == Op1) {
5694 // (A-B) == A -> B == 0
5695 return new ICmpInst(I.getPredicate(), B,
5696 Constant::getNullValue(B->getType()));
5697 }
5698 if (match(Op1, m_Sub(m_Value(A), m_Value(B))) && A == Op0) {
5699 // A == (A-B) -> B == 0
5700 return new ICmpInst(I.getPredicate(), B,
5701 Constant::getNullValue(B->getType()));
5702 }
5703
5704 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
5705 if (Op0->hasOneUse() && Op1->hasOneUse() &&
5706 match(Op0, m_And(m_Value(A), m_Value(B))) &&
5707 match(Op1, m_And(m_Value(C), m_Value(D)))) {
5708 Value *X = 0, *Y = 0, *Z = 0;
5709
5710 if (A == C) {
5711 X = B; Y = D; Z = A;
5712 } else if (A == D) {
5713 X = B; Y = C; Z = A;
5714 } else if (B == C) {
5715 X = A; Y = D; Z = B;
5716 } else if (B == D) {
5717 X = A; Y = C; Z = B;
5718 }
5719
5720 if (X) { // Build (X^Y) & Z
5721 Op1 = InsertNewInstBefore(BinaryOperator::createXor(X, Y, "tmp"), I);
5722 Op1 = InsertNewInstBefore(BinaryOperator::createAnd(Op1, Z, "tmp"), I);
5723 I.setOperand(0, Op1);
5724 I.setOperand(1, Constant::getNullValue(Op1->getType()));
5725 return &I;
5726 }
5727 }
5728 }
5729 return Changed ? &I : 0;
5730}
5731
5732
5733/// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
5734/// and CmpRHS are both known to be integer constants.
5735Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
5736 ConstantInt *DivRHS) {
5737 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
5738 const APInt &CmpRHSV = CmpRHS->getValue();
5739
5740 // FIXME: If the operand types don't match the type of the divide
5741 // then don't attempt this transform. The code below doesn't have the
5742 // logic to deal with a signed divide and an unsigned compare (and
5743 // vice versa). This is because (x /s C1) <s C2 produces different
5744 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
5745 // (x /u C1) <u C2. Simply casting the operands and result won't
5746 // work. :( The if statement below tests that condition and bails
5747 // if it finds it.
5748 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
5749 if (!ICI.isEquality() && DivIsSigned != ICI.isSignedPredicate())
5750 return 0;
5751 if (DivRHS->isZero())
5752 return 0; // The ProdOV computation fails on divide by zero.
5753
5754 // Compute Prod = CI * DivRHS. We are essentially solving an equation
5755 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
5756 // C2 (CI). By solving for X we can turn this into a range check
5757 // instead of computing a divide.
5758 ConstantInt *Prod = Multiply(CmpRHS, DivRHS);
5759
5760 // Determine if the product overflows by seeing if the product is
5761 // not equal to the divide. Make sure we do the same kind of divide
5762 // as in the LHS instruction that we're folding.
5763 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
5764 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
5765
5766 // Get the ICmp opcode
5767 ICmpInst::Predicate Pred = ICI.getPredicate();
5768
5769 // Figure out the interval that is being checked. For example, a comparison
5770 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
5771 // Compute this interval based on the constants involved and the signedness of
5772 // the compare/divide. This computes a half-open interval, keeping track of
5773 // whether either value in the interval overflows. After analysis each
5774 // overflow variable is set to 0 if it's corresponding bound variable is valid
5775 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
5776 int LoOverflow = 0, HiOverflow = 0;
5777 ConstantInt *LoBound = 0, *HiBound = 0;
5778
5779
5780 if (!DivIsSigned) { // udiv
5781 // e.g. X/5 op 3 --> [15, 20)
5782 LoBound = Prod;
5783 HiOverflow = LoOverflow = ProdOV;
5784 if (!HiOverflow)
5785 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, false);
Dan Gohman5dceed12008-02-13 22:09:18 +00005786 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005787 if (CmpRHSV == 0) { // (X / pos) op 0
5788 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
5789 LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
5790 HiBound = DivRHS;
Dan Gohman5dceed12008-02-13 22:09:18 +00005791 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005792 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
5793 HiOverflow = LoOverflow = ProdOV;
5794 if (!HiOverflow)
5795 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, true);
5796 } else { // (X / pos) op neg
5797 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
5798 Constant *DivRHSH = ConstantExpr::getNeg(SubOne(DivRHS));
5799 LoOverflow = AddWithOverflow(LoBound, Prod,
5800 cast<ConstantInt>(DivRHSH), true) ? -1 : 0;
5801 HiBound = AddOne(Prod);
5802 HiOverflow = ProdOV ? -1 : 0;
5803 }
Dan Gohman5dceed12008-02-13 22:09:18 +00005804 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005805 if (CmpRHSV == 0) { // (X / neg) op 0
5806 // e.g. X/-5 op 0 --> [-4, 5)
5807 LoBound = AddOne(DivRHS);
5808 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
5809 if (HiBound == DivRHS) { // -INTMIN = INTMIN
5810 HiOverflow = 1; // [INTMIN+1, overflow)
5811 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
5812 }
Dan Gohman5dceed12008-02-13 22:09:18 +00005813 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005814 // e.g. X/-5 op 3 --> [-19, -14)
5815 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
5816 if (!LoOverflow)
5817 LoOverflow = AddWithOverflow(LoBound, Prod, AddOne(DivRHS), true) ?-1:0;
5818 HiBound = AddOne(Prod);
5819 } else { // (X / neg) op neg
5820 // e.g. X/-5 op -3 --> [15, 20)
5821 LoBound = Prod;
5822 LoOverflow = HiOverflow = ProdOV ? 1 : 0;
5823 HiBound = Subtract(Prod, DivRHS);
5824 }
5825
5826 // Dividing by a negative swaps the condition. LT <-> GT
5827 Pred = ICmpInst::getSwappedPredicate(Pred);
5828 }
5829
5830 Value *X = DivI->getOperand(0);
5831 switch (Pred) {
5832 default: assert(0 && "Unhandled icmp opcode!");
5833 case ICmpInst::ICMP_EQ:
5834 if (LoOverflow && HiOverflow)
5835 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse());
5836 else if (HiOverflow)
5837 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
5838 ICmpInst::ICMP_UGE, X, LoBound);
5839 else if (LoOverflow)
5840 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
5841 ICmpInst::ICMP_ULT, X, HiBound);
5842 else
5843 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
5844 case ICmpInst::ICMP_NE:
5845 if (LoOverflow && HiOverflow)
5846 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue());
5847 else if (HiOverflow)
5848 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
5849 ICmpInst::ICMP_ULT, X, LoBound);
5850 else if (LoOverflow)
5851 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
5852 ICmpInst::ICMP_UGE, X, HiBound);
5853 else
5854 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
5855 case ICmpInst::ICMP_ULT:
5856 case ICmpInst::ICMP_SLT:
5857 if (LoOverflow == +1) // Low bound is greater than input range.
5858 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue());
5859 if (LoOverflow == -1) // Low bound is less than input range.
5860 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse());
5861 return new ICmpInst(Pred, X, LoBound);
5862 case ICmpInst::ICMP_UGT:
5863 case ICmpInst::ICMP_SGT:
5864 if (HiOverflow == +1) // High bound greater than input range.
5865 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse());
5866 else if (HiOverflow == -1) // High bound less than input range.
5867 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue());
5868 if (Pred == ICmpInst::ICMP_UGT)
5869 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
5870 else
5871 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
5872 }
5873}
5874
5875
5876/// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
5877///
5878Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
5879 Instruction *LHSI,
5880 ConstantInt *RHS) {
5881 const APInt &RHSV = RHS->getValue();
5882
5883 switch (LHSI->getOpcode()) {
5884 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
5885 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
5886 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
5887 // fold the xor.
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00005888 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
5889 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005890 Value *CompareVal = LHSI->getOperand(0);
5891
5892 // If the sign bit of the XorCST is not set, there is no change to
5893 // the operation, just stop using the Xor.
5894 if (!XorCST->getValue().isNegative()) {
5895 ICI.setOperand(0, CompareVal);
5896 AddToWorkList(LHSI);
5897 return &ICI;
5898 }
5899
5900 // Was the old condition true if the operand is positive?
5901 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
5902
5903 // If so, the new one isn't.
5904 isTrueIfPositive ^= true;
5905
5906 if (isTrueIfPositive)
5907 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal, SubOne(RHS));
5908 else
5909 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal, AddOne(RHS));
5910 }
5911 }
5912 break;
5913 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
5914 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
5915 LHSI->getOperand(0)->hasOneUse()) {
5916 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
5917
5918 // If the LHS is an AND of a truncating cast, we can widen the
5919 // and/compare to be the input width without changing the value
5920 // produced, eliminating a cast.
5921 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
5922 // We can do this transformation if either the AND constant does not
5923 // have its sign bit set or if it is an equality comparison.
5924 // Extending a relational comparison when we're checking the sign
5925 // bit would not work.
5926 if (Cast->hasOneUse() &&
Anton Korobeynikov6a4a9332008-02-20 12:07:57 +00005927 (ICI.isEquality() ||
5928 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005929 uint32_t BitWidth =
5930 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
5931 APInt NewCST = AndCST->getValue();
5932 NewCST.zext(BitWidth);
5933 APInt NewCI = RHSV;
5934 NewCI.zext(BitWidth);
5935 Instruction *NewAnd =
5936 BinaryOperator::createAnd(Cast->getOperand(0),
5937 ConstantInt::get(NewCST),LHSI->getName());
5938 InsertNewInstBefore(NewAnd, ICI);
5939 return new ICmpInst(ICI.getPredicate(), NewAnd,
5940 ConstantInt::get(NewCI));
5941 }
5942 }
5943
5944 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
5945 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
5946 // happens a LOT in code produced by the C front-end, for bitfield
5947 // access.
5948 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
5949 if (Shift && !Shift->isShift())
5950 Shift = 0;
5951
5952 ConstantInt *ShAmt;
5953 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
5954 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
5955 const Type *AndTy = AndCST->getType(); // Type of the and.
5956
5957 // We can fold this as long as we can't shift unknown bits
5958 // into the mask. This can only happen with signed shift
5959 // rights, as they sign-extend.
5960 if (ShAmt) {
5961 bool CanFold = Shift->isLogicalShift();
5962 if (!CanFold) {
5963 // To test for the bad case of the signed shr, see if any
5964 // of the bits shifted in could be tested after the mask.
5965 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
5966 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
5967
5968 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
5969 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
5970 AndCST->getValue()) == 0)
5971 CanFold = true;
5972 }
5973
5974 if (CanFold) {
5975 Constant *NewCst;
5976 if (Shift->getOpcode() == Instruction::Shl)
5977 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
5978 else
5979 NewCst = ConstantExpr::getShl(RHS, ShAmt);
5980
5981 // Check to see if we are shifting out any of the bits being
5982 // compared.
5983 if (ConstantExpr::get(Shift->getOpcode(), NewCst, ShAmt) != RHS) {
5984 // If we shifted bits out, the fold is not going to work out.
5985 // As a special case, check to see if this means that the
5986 // result is always true or false now.
5987 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
5988 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse());
5989 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
5990 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue());
5991 } else {
5992 ICI.setOperand(1, NewCst);
5993 Constant *NewAndCST;
5994 if (Shift->getOpcode() == Instruction::Shl)
5995 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
5996 else
5997 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
5998 LHSI->setOperand(1, NewAndCST);
5999 LHSI->setOperand(0, Shift->getOperand(0));
6000 AddToWorkList(Shift); // Shift is dead.
6001 AddUsesToWorkList(ICI);
6002 return &ICI;
6003 }
6004 }
6005 }
6006
6007 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6008 // preferable because it allows the C<<Y expression to be hoisted out
6009 // of a loop if Y is invariant and X is not.
6010 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
6011 ICI.isEquality() && !Shift->isArithmeticShift() &&
6012 isa<Instruction>(Shift->getOperand(0))) {
6013 // Compute C << Y.
6014 Value *NS;
6015 if (Shift->getOpcode() == Instruction::LShr) {
6016 NS = BinaryOperator::createShl(AndCST,
6017 Shift->getOperand(1), "tmp");
6018 } else {
6019 // Insert a logical shift.
6020 NS = BinaryOperator::createLShr(AndCST,
6021 Shift->getOperand(1), "tmp");
6022 }
6023 InsertNewInstBefore(cast<Instruction>(NS), ICI);
6024
6025 // Compute X & (C << Y).
6026 Instruction *NewAnd =
6027 BinaryOperator::createAnd(Shift->getOperand(0), NS, LHSI->getName());
6028 InsertNewInstBefore(NewAnd, ICI);
6029
6030 ICI.setOperand(0, NewAnd);
6031 return &ICI;
6032 }
6033 }
6034 break;
6035
6036 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
6037 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6038 if (!ShAmt) break;
6039
6040 uint32_t TypeBits = RHSV.getBitWidth();
6041
6042 // Check that the shift amount is in range. If not, don't perform
6043 // undefined shifts. When the shift is visited it will be
6044 // simplified.
6045 if (ShAmt->uge(TypeBits))
6046 break;
6047
6048 if (ICI.isEquality()) {
6049 // If we are comparing against bits always shifted out, the
6050 // comparison cannot succeed.
6051 Constant *Comp =
6052 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt), ShAmt);
6053 if (Comp != RHS) {// Comparing against a bit that we know is zero.
6054 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6055 Constant *Cst = ConstantInt::get(Type::Int1Ty, IsICMP_NE);
6056 return ReplaceInstUsesWith(ICI, Cst);
6057 }
6058
6059 if (LHSI->hasOneUse()) {
6060 // Otherwise strength reduce the shift into an and.
6061 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6062 Constant *Mask =
6063 ConstantInt::get(APInt::getLowBitsSet(TypeBits, TypeBits-ShAmtVal));
6064
6065 Instruction *AndI =
6066 BinaryOperator::createAnd(LHSI->getOperand(0),
6067 Mask, LHSI->getName()+".mask");
6068 Value *And = InsertNewInstBefore(AndI, ICI);
6069 return new ICmpInst(ICI.getPredicate(), And,
6070 ConstantInt::get(RHSV.lshr(ShAmtVal)));
6071 }
6072 }
6073
6074 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6075 bool TrueIfSigned = false;
6076 if (LHSI->hasOneUse() &&
6077 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
6078 // (X << 31) <s 0 --> (X&1) != 0
6079 Constant *Mask = ConstantInt::get(APInt(TypeBits, 1) <<
6080 (TypeBits-ShAmt->getZExtValue()-1));
6081 Instruction *AndI =
6082 BinaryOperator::createAnd(LHSI->getOperand(0),
6083 Mask, LHSI->getName()+".mask");
6084 Value *And = InsertNewInstBefore(AndI, ICI);
6085
6086 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
6087 And, Constant::getNullValue(And->getType()));
6088 }
6089 break;
6090 }
6091
6092 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
6093 case Instruction::AShr: {
Chris Lattner5ee84f82008-03-21 05:19:58 +00006094 // Only handle equality comparisons of shift-by-constant.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006095 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
Chris Lattner5ee84f82008-03-21 05:19:58 +00006096 if (!ShAmt || !ICI.isEquality()) break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006097
Chris Lattner5ee84f82008-03-21 05:19:58 +00006098 // Check that the shift amount is in range. If not, don't perform
6099 // undefined shifts. When the shift is visited it will be
6100 // simplified.
6101 uint32_t TypeBits = RHSV.getBitWidth();
6102 if (ShAmt->uge(TypeBits))
6103 break;
6104
6105 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006106
Chris Lattner5ee84f82008-03-21 05:19:58 +00006107 // If we are comparing against bits always shifted out, the
6108 // comparison cannot succeed.
6109 APInt Comp = RHSV << ShAmtVal;
6110 if (LHSI->getOpcode() == Instruction::LShr)
6111 Comp = Comp.lshr(ShAmtVal);
6112 else
6113 Comp = Comp.ashr(ShAmtVal);
6114
6115 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
6116 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6117 Constant *Cst = ConstantInt::get(Type::Int1Ty, IsICMP_NE);
6118 return ReplaceInstUsesWith(ICI, Cst);
6119 }
6120
6121 // Otherwise, check to see if the bits shifted out are known to be zero.
6122 // If so, we can compare against the unshifted value:
6123 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
Evan Chengfb9292a2008-04-23 00:38:06 +00006124 if (LHSI->hasOneUse() &&
6125 MaskedValueIsZero(LHSI->getOperand(0),
Chris Lattner5ee84f82008-03-21 05:19:58 +00006126 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
6127 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
6128 ConstantExpr::getShl(RHS, ShAmt));
6129 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006130
Evan Chengfb9292a2008-04-23 00:38:06 +00006131 if (LHSI->hasOneUse()) {
Chris Lattner5ee84f82008-03-21 05:19:58 +00006132 // Otherwise strength reduce the shift into an and.
6133 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
6134 Constant *Mask = ConstantInt::get(Val);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006135
Chris Lattner5ee84f82008-03-21 05:19:58 +00006136 Instruction *AndI =
6137 BinaryOperator::createAnd(LHSI->getOperand(0),
6138 Mask, LHSI->getName()+".mask");
6139 Value *And = InsertNewInstBefore(AndI, ICI);
6140 return new ICmpInst(ICI.getPredicate(), And,
6141 ConstantExpr::getShl(RHS, ShAmt));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006142 }
6143 break;
6144 }
6145
6146 case Instruction::SDiv:
6147 case Instruction::UDiv:
6148 // Fold: icmp pred ([us]div X, C1), C2 -> range test
6149 // Fold this div into the comparison, producing a range check.
6150 // Determine, based on the divide type, what the range is being
6151 // checked. If there is an overflow on the low or high side, remember
6152 // it, otherwise compute the range [low, hi) bounding the new value.
6153 // See: InsertRangeTest above for the kinds of replacements possible.
6154 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
6155 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
6156 DivRHS))
6157 return R;
6158 break;
Nick Lewycky0185bbf2008-02-03 16:33:09 +00006159
6160 case Instruction::Add:
6161 // Fold: icmp pred (add, X, C1), C2
6162
6163 if (!ICI.isEquality()) {
6164 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6165 if (!LHSC) break;
6166 const APInt &LHSV = LHSC->getValue();
6167
6168 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
6169 .subtract(LHSV);
6170
6171 if (ICI.isSignedPredicate()) {
6172 if (CR.getLower().isSignBit()) {
6173 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
6174 ConstantInt::get(CR.getUpper()));
6175 } else if (CR.getUpper().isSignBit()) {
6176 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
6177 ConstantInt::get(CR.getLower()));
6178 }
6179 } else {
6180 if (CR.getLower().isMinValue()) {
6181 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
6182 ConstantInt::get(CR.getUpper()));
6183 } else if (CR.getUpper().isMinValue()) {
6184 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
6185 ConstantInt::get(CR.getLower()));
6186 }
6187 }
6188 }
6189 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006190 }
6191
6192 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
6193 if (ICI.isEquality()) {
6194 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6195
6196 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
6197 // the second operand is a constant, simplify a bit.
6198 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
6199 switch (BO->getOpcode()) {
6200 case Instruction::SRem:
6201 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
6202 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
6203 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
6204 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
6205 Instruction *NewRem =
6206 BinaryOperator::createURem(BO->getOperand(0), BO->getOperand(1),
6207 BO->getName());
6208 InsertNewInstBefore(NewRem, ICI);
6209 return new ICmpInst(ICI.getPredicate(), NewRem,
6210 Constant::getNullValue(BO->getType()));
6211 }
6212 }
6213 break;
6214 case Instruction::Add:
6215 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
6216 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6217 if (BO->hasOneUse())
6218 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6219 Subtract(RHS, BOp1C));
6220 } else if (RHSV == 0) {
6221 // Replace ((add A, B) != 0) with (A != -B) if A or B is
6222 // efficiently invertible, or if the add has just this one use.
6223 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
6224
6225 if (Value *NegVal = dyn_castNegVal(BOp1))
6226 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
6227 else if (Value *NegVal = dyn_castNegVal(BOp0))
6228 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
6229 else if (BO->hasOneUse()) {
6230 Instruction *Neg = BinaryOperator::createNeg(BOp1);
6231 InsertNewInstBefore(Neg, ICI);
6232 Neg->takeName(BO);
6233 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
6234 }
6235 }
6236 break;
6237 case Instruction::Xor:
6238 // For the xor case, we can xor two constants together, eliminating
6239 // the explicit xor.
6240 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
6241 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6242 ConstantExpr::getXor(RHS, BOC));
6243
6244 // FALLTHROUGH
6245 case Instruction::Sub:
6246 // Replace (([sub|xor] A, B) != 0) with (A != B)
6247 if (RHSV == 0)
6248 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6249 BO->getOperand(1));
6250 break;
6251
6252 case Instruction::Or:
6253 // If bits are being or'd in that are not present in the constant we
6254 // are comparing against, then the comparison could never succeed!
6255 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
6256 Constant *NotCI = ConstantExpr::getNot(RHS);
6257 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
6258 return ReplaceInstUsesWith(ICI, ConstantInt::get(Type::Int1Ty,
6259 isICMP_NE));
6260 }
6261 break;
6262
6263 case Instruction::And:
6264 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6265 // If bits are being compared against that are and'd out, then the
6266 // comparison can never succeed!
6267 if ((RHSV & ~BOC->getValue()) != 0)
6268 return ReplaceInstUsesWith(ICI, ConstantInt::get(Type::Int1Ty,
6269 isICMP_NE));
6270
6271 // If we have ((X & C) == C), turn it into ((X & C) != 0).
6272 if (RHS == BOC && RHSV.isPowerOf2())
6273 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
6274 ICmpInst::ICMP_NE, LHSI,
6275 Constant::getNullValue(RHS->getType()));
6276
6277 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
6278 if (isSignBit(BOC)) {
6279 Value *X = BO->getOperand(0);
6280 Constant *Zero = Constant::getNullValue(X->getType());
6281 ICmpInst::Predicate pred = isICMP_NE ?
6282 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
6283 return new ICmpInst(pred, X, Zero);
6284 }
6285
6286 // ((X & ~7) == 0) --> X < 8
6287 if (RHSV == 0 && isHighOnes(BOC)) {
6288 Value *X = BO->getOperand(0);
6289 Constant *NegX = ConstantExpr::getNeg(BOC);
6290 ICmpInst::Predicate pred = isICMP_NE ?
6291 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
6292 return new ICmpInst(pred, X, NegX);
6293 }
6294 }
6295 default: break;
6296 }
6297 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
6298 // Handle icmp {eq|ne} <intrinsic>, intcst.
6299 if (II->getIntrinsicID() == Intrinsic::bswap) {
6300 AddToWorkList(II);
6301 ICI.setOperand(0, II->getOperand(1));
6302 ICI.setOperand(1, ConstantInt::get(RHSV.byteSwap()));
6303 return &ICI;
6304 }
6305 }
6306 } else { // Not a ICMP_EQ/ICMP_NE
6307 // If the LHS is a cast from an integral value of the same size,
6308 // then since we know the RHS is a constant, try to simlify.
6309 if (CastInst *Cast = dyn_cast<CastInst>(LHSI)) {
6310 Value *CastOp = Cast->getOperand(0);
6311 const Type *SrcTy = CastOp->getType();
6312 uint32_t SrcTySize = SrcTy->getPrimitiveSizeInBits();
6313 if (SrcTy->isInteger() &&
6314 SrcTySize == Cast->getType()->getPrimitiveSizeInBits()) {
6315 // If this is an unsigned comparison, try to make the comparison use
6316 // smaller constant values.
6317 if (ICI.getPredicate() == ICmpInst::ICMP_ULT && RHSV.isSignBit()) {
6318 // X u< 128 => X s> -1
6319 return new ICmpInst(ICmpInst::ICMP_SGT, CastOp,
6320 ConstantInt::get(APInt::getAllOnesValue(SrcTySize)));
6321 } else if (ICI.getPredicate() == ICmpInst::ICMP_UGT &&
6322 RHSV == APInt::getSignedMaxValue(SrcTySize)) {
6323 // X u> 127 => X s< 0
6324 return new ICmpInst(ICmpInst::ICMP_SLT, CastOp,
6325 Constant::getNullValue(SrcTy));
6326 }
6327 }
6328 }
6329 }
6330 return 0;
6331}
6332
6333/// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
6334/// We only handle extending casts so far.
6335///
6336Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
6337 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
6338 Value *LHSCIOp = LHSCI->getOperand(0);
6339 const Type *SrcTy = LHSCIOp->getType();
6340 const Type *DestTy = LHSCI->getType();
6341 Value *RHSCIOp;
6342
6343 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6344 // integer type is the same size as the pointer type.
6345 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
6346 getTargetData().getPointerSizeInBits() ==
6347 cast<IntegerType>(DestTy)->getBitWidth()) {
6348 Value *RHSOp = 0;
6349 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
6350 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6351 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
6352 RHSOp = RHSC->getOperand(0);
6353 // If the pointer types don't match, insert a bitcast.
6354 if (LHSCIOp->getType() != RHSOp->getType())
Chris Lattner13c2d6e2008-01-13 22:23:22 +00006355 RHSOp = InsertBitCastBefore(RHSOp, LHSCIOp->getType(), ICI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006356 }
6357
6358 if (RHSOp)
6359 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
6360 }
6361
6362 // The code below only handles extension cast instructions, so far.
6363 // Enforce this.
6364 if (LHSCI->getOpcode() != Instruction::ZExt &&
6365 LHSCI->getOpcode() != Instruction::SExt)
6366 return 0;
6367
6368 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
6369 bool isSignedCmp = ICI.isSignedPredicate();
6370
6371 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
6372 // Not an extension from the same type?
6373 RHSCIOp = CI->getOperand(0);
6374 if (RHSCIOp->getType() != LHSCIOp->getType())
6375 return 0;
6376
Nick Lewyckyd4264dc2008-01-28 03:48:02 +00006377 // If the signedness of the two casts doesn't agree (i.e. one is a sext
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006378 // and the other is a zext), then we can't handle this.
6379 if (CI->getOpcode() != LHSCI->getOpcode())
6380 return 0;
6381
Nick Lewyckyd4264dc2008-01-28 03:48:02 +00006382 // Deal with equality cases early.
6383 if (ICI.isEquality())
6384 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
6385
6386 // A signed comparison of sign extended values simplifies into a
6387 // signed comparison.
6388 if (isSignedCmp && isSignedExt)
6389 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
6390
6391 // The other three cases all fold into an unsigned comparison.
6392 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006393 }
6394
6395 // If we aren't dealing with a constant on the RHS, exit early
6396 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
6397 if (!CI)
6398 return 0;
6399
6400 // Compute the constant that would happen if we truncated to SrcTy then
6401 // reextended to DestTy.
6402 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
6403 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy);
6404
6405 // If the re-extended constant didn't change...
6406 if (Res2 == CI) {
6407 // Make sure that sign of the Cmp and the sign of the Cast are the same.
6408 // For example, we might have:
6409 // %A = sext short %X to uint
6410 // %B = icmp ugt uint %A, 1330
6411 // It is incorrect to transform this into
6412 // %B = icmp ugt short %X, 1330
6413 // because %A may have negative value.
6414 //
6415 // However, it is OK if SrcTy is bool (See cast-set.ll testcase)
6416 // OR operation is EQ/NE.
6417 if (isSignedExt == isSignedCmp || SrcTy == Type::Int1Ty || ICI.isEquality())
6418 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
6419 else
6420 return 0;
6421 }
6422
6423 // The re-extended constant changed so the constant cannot be represented
6424 // in the shorter type. Consequently, we cannot emit a simple comparison.
6425
6426 // First, handle some easy cases. We know the result cannot be equal at this
6427 // point so handle the ICI.isEquality() cases
6428 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
6429 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse());
6430 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
6431 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue());
6432
6433 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
6434 // should have been folded away previously and not enter in here.
6435 Value *Result;
6436 if (isSignedCmp) {
6437 // We're performing a signed comparison.
6438 if (cast<ConstantInt>(CI)->getValue().isNegative())
6439 Result = ConstantInt::getFalse(); // X < (small) --> false
6440 else
6441 Result = ConstantInt::getTrue(); // X < (large) --> true
6442 } else {
6443 // We're performing an unsigned comparison.
6444 if (isSignedExt) {
6445 // We're performing an unsigned comp with a sign extended value.
6446 // This is true if the input is >= 0. [aka >s -1]
6447 Constant *NegOne = ConstantInt::getAllOnesValue(SrcTy);
6448 Result = InsertNewInstBefore(new ICmpInst(ICmpInst::ICMP_SGT, LHSCIOp,
6449 NegOne, ICI.getName()), ICI);
6450 } else {
6451 // Unsigned extend & unsigned compare -> always true.
6452 Result = ConstantInt::getTrue();
6453 }
6454 }
6455
6456 // Finally, return the value computed.
6457 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
6458 ICI.getPredicate() == ICmpInst::ICMP_SLT) {
6459 return ReplaceInstUsesWith(ICI, Result);
6460 } else {
6461 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
6462 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
6463 "ICmp should be folded!");
6464 if (Constant *CI = dyn_cast<Constant>(Result))
6465 return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
6466 else
6467 return BinaryOperator::createNot(Result);
6468 }
6469}
6470
6471Instruction *InstCombiner::visitShl(BinaryOperator &I) {
6472 return commonShiftTransforms(I);
6473}
6474
6475Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
6476 return commonShiftTransforms(I);
6477}
6478
6479Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
Chris Lattnere3c504f2007-12-06 01:59:46 +00006480 if (Instruction *R = commonShiftTransforms(I))
6481 return R;
6482
6483 Value *Op0 = I.getOperand(0);
6484
6485 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
6486 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
6487 if (CSI->isAllOnesValue())
6488 return ReplaceInstUsesWith(I, CSI);
6489
6490 // See if we can turn a signed shr into an unsigned shr.
6491 if (MaskedValueIsZero(Op0,
6492 APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())))
6493 return BinaryOperator::createLShr(Op0, I.getOperand(1));
6494
6495 return 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006496}
6497
6498Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
6499 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
6500 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6501
6502 // shl X, 0 == X and shr X, 0 == X
6503 // shl 0, X == 0 and shr 0, X == 0
6504 if (Op1 == Constant::getNullValue(Op1->getType()) ||
6505 Op0 == Constant::getNullValue(Op0->getType()))
6506 return ReplaceInstUsesWith(I, Op0);
6507
6508 if (isa<UndefValue>(Op0)) {
6509 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
6510 return ReplaceInstUsesWith(I, Op0);
6511 else // undef << X -> 0, undef >>u X -> 0
6512 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6513 }
6514 if (isa<UndefValue>(Op1)) {
6515 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
6516 return ReplaceInstUsesWith(I, Op0);
6517 else // X << undef, X >>u undef -> 0
6518 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6519 }
6520
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006521 // Try to fold constant and into select arguments.
6522 if (isa<Constant>(Op0))
6523 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
6524 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
6525 return R;
6526
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006527 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
6528 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
6529 return Res;
6530 return 0;
6531}
6532
6533Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
6534 BinaryOperator &I) {
6535 bool isLeftShift = I.getOpcode() == Instruction::Shl;
6536
6537 // See if we can simplify any instructions used by the instruction whose sole
6538 // purpose is to compute bits we don't care about.
6539 uint32_t TypeBits = Op0->getType()->getPrimitiveSizeInBits();
6540 APInt KnownZero(TypeBits, 0), KnownOne(TypeBits, 0);
6541 if (SimplifyDemandedBits(&I, APInt::getAllOnesValue(TypeBits),
6542 KnownZero, KnownOne))
6543 return &I;
6544
6545 // shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr
6546 // of a signed value.
6547 //
6548 if (Op1->uge(TypeBits)) {
6549 if (I.getOpcode() != Instruction::AShr)
6550 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
6551 else {
6552 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
6553 return &I;
6554 }
6555 }
6556
6557 // ((X*C1) << C2) == (X * (C1 << C2))
6558 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
6559 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
6560 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
6561 return BinaryOperator::createMul(BO->getOperand(0),
6562 ConstantExpr::getShl(BOOp, Op1));
6563
6564 // Try to fold constant and into select arguments.
6565 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
6566 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
6567 return R;
6568 if (isa<PHINode>(Op0))
6569 if (Instruction *NV = FoldOpIntoPhi(I))
6570 return NV;
6571
Chris Lattnerc6d1f642007-12-22 09:07:47 +00006572 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
6573 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
6574 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
6575 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
6576 // place. Don't try to do this transformation in this case. Also, we
6577 // require that the input operand is a shift-by-constant so that we have
6578 // confidence that the shifts will get folded together. We could do this
6579 // xform in more cases, but it is unlikely to be profitable.
6580 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
6581 isa<ConstantInt>(TrOp->getOperand(1))) {
6582 // Okay, we'll do this xform. Make the shift of shift.
6583 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
6584 Instruction *NSh = BinaryOperator::create(I.getOpcode(), TrOp, ShAmt,
6585 I.getName());
6586 InsertNewInstBefore(NSh, I); // (shift2 (shift1 & 0x00FF), c2)
6587
6588 // For logical shifts, the truncation has the effect of making the high
6589 // part of the register be zeros. Emulate this by inserting an AND to
6590 // clear the top bits as needed. This 'and' will usually be zapped by
6591 // other xforms later if dead.
6592 unsigned SrcSize = TrOp->getType()->getPrimitiveSizeInBits();
6593 unsigned DstSize = TI->getType()->getPrimitiveSizeInBits();
6594 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
6595
6596 // The mask we constructed says what the trunc would do if occurring
6597 // between the shifts. We want to know the effect *after* the second
6598 // shift. We know that it is a logical shift by a constant, so adjust the
6599 // mask as appropriate.
6600 if (I.getOpcode() == Instruction::Shl)
6601 MaskV <<= Op1->getZExtValue();
6602 else {
6603 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
6604 MaskV = MaskV.lshr(Op1->getZExtValue());
6605 }
6606
6607 Instruction *And = BinaryOperator::createAnd(NSh, ConstantInt::get(MaskV),
6608 TI->getName());
6609 InsertNewInstBefore(And, I); // shift1 & 0x00FF
6610
6611 // Return the value truncated to the interesting size.
6612 return new TruncInst(And, I.getType());
6613 }
6614 }
6615
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006616 if (Op0->hasOneUse()) {
6617 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
6618 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
6619 Value *V1, *V2;
6620 ConstantInt *CC;
6621 switch (Op0BO->getOpcode()) {
6622 default: break;
6623 case Instruction::Add:
6624 case Instruction::And:
6625 case Instruction::Or:
6626 case Instruction::Xor: {
6627 // These operators commute.
6628 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
6629 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
6630 match(Op0BO->getOperand(1),
6631 m_Shr(m_Value(V1), m_ConstantInt(CC))) && CC == Op1) {
6632 Instruction *YS = BinaryOperator::createShl(
6633 Op0BO->getOperand(0), Op1,
6634 Op0BO->getName());
6635 InsertNewInstBefore(YS, I); // (Y << C)
6636 Instruction *X =
6637 BinaryOperator::create(Op0BO->getOpcode(), YS, V1,
6638 Op0BO->getOperand(1)->getName());
6639 InsertNewInstBefore(X, I); // (X + (Y << C))
6640 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
6641 return BinaryOperator::createAnd(X, ConstantInt::get(
6642 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
6643 }
6644
6645 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
6646 Value *Op0BOOp1 = Op0BO->getOperand(1);
6647 if (isLeftShift && Op0BOOp1->hasOneUse() &&
6648 match(Op0BOOp1,
6649 m_And(m_Shr(m_Value(V1), m_Value(V2)),m_ConstantInt(CC))) &&
6650 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse() &&
6651 V2 == Op1) {
6652 Instruction *YS = BinaryOperator::createShl(
6653 Op0BO->getOperand(0), Op1,
6654 Op0BO->getName());
6655 InsertNewInstBefore(YS, I); // (Y << C)
6656 Instruction *XM =
6657 BinaryOperator::createAnd(V1, ConstantExpr::getShl(CC, Op1),
6658 V1->getName()+".mask");
6659 InsertNewInstBefore(XM, I); // X & (CC << C)
6660
6661 return BinaryOperator::create(Op0BO->getOpcode(), YS, XM);
6662 }
6663 }
6664
6665 // FALL THROUGH.
6666 case Instruction::Sub: {
6667 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
6668 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
6669 match(Op0BO->getOperand(0),
6670 m_Shr(m_Value(V1), m_ConstantInt(CC))) && CC == Op1) {
6671 Instruction *YS = BinaryOperator::createShl(
6672 Op0BO->getOperand(1), Op1,
6673 Op0BO->getName());
6674 InsertNewInstBefore(YS, I); // (Y << C)
6675 Instruction *X =
6676 BinaryOperator::create(Op0BO->getOpcode(), V1, YS,
6677 Op0BO->getOperand(0)->getName());
6678 InsertNewInstBefore(X, I); // (X + (Y << C))
6679 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
6680 return BinaryOperator::createAnd(X, ConstantInt::get(
6681 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
6682 }
6683
6684 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
6685 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
6686 match(Op0BO->getOperand(0),
6687 m_And(m_Shr(m_Value(V1), m_Value(V2)),
6688 m_ConstantInt(CC))) && V2 == Op1 &&
6689 cast<BinaryOperator>(Op0BO->getOperand(0))
6690 ->getOperand(0)->hasOneUse()) {
6691 Instruction *YS = BinaryOperator::createShl(
6692 Op0BO->getOperand(1), Op1,
6693 Op0BO->getName());
6694 InsertNewInstBefore(YS, I); // (Y << C)
6695 Instruction *XM =
6696 BinaryOperator::createAnd(V1, ConstantExpr::getShl(CC, Op1),
6697 V1->getName()+".mask");
6698 InsertNewInstBefore(XM, I); // X & (CC << C)
6699
6700 return BinaryOperator::create(Op0BO->getOpcode(), XM, YS);
6701 }
6702
6703 break;
6704 }
6705 }
6706
6707
6708 // If the operand is an bitwise operator with a constant RHS, and the
6709 // shift is the only use, we can pull it out of the shift.
6710 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
6711 bool isValid = true; // Valid only for And, Or, Xor
6712 bool highBitSet = false; // Transform if high bit of constant set?
6713
6714 switch (Op0BO->getOpcode()) {
6715 default: isValid = false; break; // Do not perform transform!
6716 case Instruction::Add:
6717 isValid = isLeftShift;
6718 break;
6719 case Instruction::Or:
6720 case Instruction::Xor:
6721 highBitSet = false;
6722 break;
6723 case Instruction::And:
6724 highBitSet = true;
6725 break;
6726 }
6727
6728 // If this is a signed shift right, and the high bit is modified
6729 // by the logical operation, do not perform the transformation.
6730 // The highBitSet boolean indicates the value of the high bit of
6731 // the constant which would cause it to be modified for this
6732 // operation.
6733 //
Chris Lattner15b76e32007-12-06 06:25:04 +00006734 if (isValid && I.getOpcode() == Instruction::AShr)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006735 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006736
6737 if (isValid) {
6738 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
6739
6740 Instruction *NewShift =
6741 BinaryOperator::create(I.getOpcode(), Op0BO->getOperand(0), Op1);
6742 InsertNewInstBefore(NewShift, I);
6743 NewShift->takeName(Op0BO);
6744
6745 return BinaryOperator::create(Op0BO->getOpcode(), NewShift,
6746 NewRHS);
6747 }
6748 }
6749 }
6750 }
6751
6752 // Find out if this is a shift of a shift by a constant.
6753 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
6754 if (ShiftOp && !ShiftOp->isShift())
6755 ShiftOp = 0;
6756
6757 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
6758 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
6759 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
6760 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
6761 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
6762 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
6763 Value *X = ShiftOp->getOperand(0);
6764
6765 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
6766 if (AmtSum > TypeBits)
6767 AmtSum = TypeBits;
6768
6769 const IntegerType *Ty = cast<IntegerType>(I.getType());
6770
6771 // Check for (X << c1) << c2 and (X >> c1) >> c2
6772 if (I.getOpcode() == ShiftOp->getOpcode()) {
6773 return BinaryOperator::create(I.getOpcode(), X,
6774 ConstantInt::get(Ty, AmtSum));
6775 } else if (ShiftOp->getOpcode() == Instruction::LShr &&
6776 I.getOpcode() == Instruction::AShr) {
6777 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
6778 return BinaryOperator::createLShr(X, ConstantInt::get(Ty, AmtSum));
6779 } else if (ShiftOp->getOpcode() == Instruction::AShr &&
6780 I.getOpcode() == Instruction::LShr) {
6781 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
6782 Instruction *Shift =
6783 BinaryOperator::createAShr(X, ConstantInt::get(Ty, AmtSum));
6784 InsertNewInstBefore(Shift, I);
6785
6786 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6787 return BinaryOperator::createAnd(Shift, ConstantInt::get(Mask));
6788 }
6789
6790 // Okay, if we get here, one shift must be left, and the other shift must be
6791 // right. See if the amounts are equal.
6792 if (ShiftAmt1 == ShiftAmt2) {
6793 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
6794 if (I.getOpcode() == Instruction::Shl) {
6795 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
6796 return BinaryOperator::createAnd(X, ConstantInt::get(Mask));
6797 }
6798 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
6799 if (I.getOpcode() == Instruction::LShr) {
6800 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
6801 return BinaryOperator::createAnd(X, ConstantInt::get(Mask));
6802 }
6803 // We can simplify ((X << C) >>s C) into a trunc + sext.
6804 // NOTE: we could do this for any C, but that would make 'unusual' integer
6805 // types. For now, just stick to ones well-supported by the code
6806 // generators.
6807 const Type *SExtType = 0;
6808 switch (Ty->getBitWidth() - ShiftAmt1) {
6809 case 1 :
6810 case 8 :
6811 case 16 :
6812 case 32 :
6813 case 64 :
6814 case 128:
6815 SExtType = IntegerType::get(Ty->getBitWidth() - ShiftAmt1);
6816 break;
6817 default: break;
6818 }
6819 if (SExtType) {
6820 Instruction *NewTrunc = new TruncInst(X, SExtType, "sext");
6821 InsertNewInstBefore(NewTrunc, I);
6822 return new SExtInst(NewTrunc, Ty);
6823 }
6824 // Otherwise, we can't handle it yet.
6825 } else if (ShiftAmt1 < ShiftAmt2) {
6826 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
6827
6828 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
6829 if (I.getOpcode() == Instruction::Shl) {
6830 assert(ShiftOp->getOpcode() == Instruction::LShr ||
6831 ShiftOp->getOpcode() == Instruction::AShr);
6832 Instruction *Shift =
6833 BinaryOperator::createShl(X, ConstantInt::get(Ty, ShiftDiff));
6834 InsertNewInstBefore(Shift, I);
6835
6836 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
6837 return BinaryOperator::createAnd(Shift, ConstantInt::get(Mask));
6838 }
6839
6840 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
6841 if (I.getOpcode() == Instruction::LShr) {
6842 assert(ShiftOp->getOpcode() == Instruction::Shl);
6843 Instruction *Shift =
6844 BinaryOperator::createLShr(X, ConstantInt::get(Ty, ShiftDiff));
6845 InsertNewInstBefore(Shift, I);
6846
6847 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6848 return BinaryOperator::createAnd(Shift, ConstantInt::get(Mask));
6849 }
6850
6851 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
6852 } else {
6853 assert(ShiftAmt2 < ShiftAmt1);
6854 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
6855
6856 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
6857 if (I.getOpcode() == Instruction::Shl) {
6858 assert(ShiftOp->getOpcode() == Instruction::LShr ||
6859 ShiftOp->getOpcode() == Instruction::AShr);
6860 Instruction *Shift =
6861 BinaryOperator::create(ShiftOp->getOpcode(), X,
6862 ConstantInt::get(Ty, ShiftDiff));
6863 InsertNewInstBefore(Shift, I);
6864
6865 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
6866 return BinaryOperator::createAnd(Shift, ConstantInt::get(Mask));
6867 }
6868
6869 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
6870 if (I.getOpcode() == Instruction::LShr) {
6871 assert(ShiftOp->getOpcode() == Instruction::Shl);
6872 Instruction *Shift =
6873 BinaryOperator::createShl(X, ConstantInt::get(Ty, ShiftDiff));
6874 InsertNewInstBefore(Shift, I);
6875
6876 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6877 return BinaryOperator::createAnd(Shift, ConstantInt::get(Mask));
6878 }
6879
6880 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
6881 }
6882 }
6883 return 0;
6884}
6885
6886
6887/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
6888/// expression. If so, decompose it, returning some value X, such that Val is
6889/// X*Scale+Offset.
6890///
6891static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
6892 int &Offset) {
6893 assert(Val->getType() == Type::Int32Ty && "Unexpected allocation size type!");
6894 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
6895 Offset = CI->getZExtValue();
Chris Lattnerc59171a2007-10-12 05:30:59 +00006896 Scale = 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006897 return ConstantInt::get(Type::Int32Ty, 0);
Chris Lattnerc59171a2007-10-12 05:30:59 +00006898 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
6899 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
6900 if (I->getOpcode() == Instruction::Shl) {
6901 // This is a value scaled by '1 << the shift amt'.
6902 Scale = 1U << RHS->getZExtValue();
6903 Offset = 0;
6904 return I->getOperand(0);
6905 } else if (I->getOpcode() == Instruction::Mul) {
6906 // This value is scaled by 'RHS'.
6907 Scale = RHS->getZExtValue();
6908 Offset = 0;
6909 return I->getOperand(0);
6910 } else if (I->getOpcode() == Instruction::Add) {
6911 // We have X+C. Check to see if we really have (X*C2)+C1,
6912 // where C1 is divisible by C2.
6913 unsigned SubScale;
6914 Value *SubVal =
6915 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
6916 Offset += RHS->getZExtValue();
6917 Scale = SubScale;
6918 return SubVal;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006919 }
6920 }
6921 }
6922
6923 // Otherwise, we can't look past this.
6924 Scale = 1;
6925 Offset = 0;
6926 return Val;
6927}
6928
6929
6930/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
6931/// try to eliminate the cast by moving the type information into the alloc.
6932Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
6933 AllocationInst &AI) {
6934 const PointerType *PTy = cast<PointerType>(CI.getType());
6935
6936 // Remove any uses of AI that are dead.
6937 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
6938
6939 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
6940 Instruction *User = cast<Instruction>(*UI++);
6941 if (isInstructionTriviallyDead(User)) {
6942 while (UI != E && *UI == User)
6943 ++UI; // If this instruction uses AI more than once, don't break UI.
6944
6945 ++NumDeadInst;
6946 DOUT << "IC: DCE: " << *User;
6947 EraseInstFromFunction(*User);
6948 }
6949 }
6950
6951 // Get the type really allocated and the type casted to.
6952 const Type *AllocElTy = AI.getAllocatedType();
6953 const Type *CastElTy = PTy->getElementType();
6954 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
6955
6956 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
6957 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
6958 if (CastElTyAlign < AllocElTyAlign) return 0;
6959
6960 // If the allocation has multiple uses, only promote it if we are strictly
6961 // increasing the alignment of the resultant allocation. If we keep it the
6962 // same, we open the door to infinite loops of various kinds.
6963 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
6964
Duncan Sandsf99fdc62007-11-01 20:53:16 +00006965 uint64_t AllocElTySize = TD->getABITypeSize(AllocElTy);
6966 uint64_t CastElTySize = TD->getABITypeSize(CastElTy);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006967 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
6968
6969 // See if we can satisfy the modulus by pulling a scale out of the array
6970 // size argument.
6971 unsigned ArraySizeScale;
6972 int ArrayOffset;
6973 Value *NumElements = // See if the array size is a decomposable linear expr.
6974 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
6975
6976 // If we can now satisfy the modulus, by using a non-1 scale, we really can
6977 // do the xform.
6978 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
6979 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
6980
6981 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
6982 Value *Amt = 0;
6983 if (Scale == 1) {
6984 Amt = NumElements;
6985 } else {
6986 // If the allocation size is constant, form a constant mul expression
6987 Amt = ConstantInt::get(Type::Int32Ty, Scale);
6988 if (isa<ConstantInt>(NumElements))
6989 Amt = Multiply(cast<ConstantInt>(NumElements), cast<ConstantInt>(Amt));
6990 // otherwise multiply the amount and the number of elements
6991 else if (Scale != 1) {
6992 Instruction *Tmp = BinaryOperator::createMul(Amt, NumElements, "tmp");
6993 Amt = InsertNewInstBefore(Tmp, AI);
6994 }
6995 }
6996
6997 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
6998 Value *Off = ConstantInt::get(Type::Int32Ty, Offset, true);
6999 Instruction *Tmp = BinaryOperator::createAdd(Amt, Off, "tmp");
7000 Amt = InsertNewInstBefore(Tmp, AI);
7001 }
7002
7003 AllocationInst *New;
7004 if (isa<MallocInst>(AI))
7005 New = new MallocInst(CastElTy, Amt, AI.getAlignment());
7006 else
7007 New = new AllocaInst(CastElTy, Amt, AI.getAlignment());
7008 InsertNewInstBefore(New, AI);
7009 New->takeName(&AI);
7010
7011 // If the allocation has multiple uses, insert a cast and change all things
7012 // that used it to use the new cast. This will also hack on CI, but it will
7013 // die soon.
7014 if (!AI.hasOneUse()) {
7015 AddUsesToWorkList(AI);
7016 // New is the allocation instruction, pointer typed. AI is the original
7017 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7018 CastInst *NewCast = new BitCastInst(New, AI.getType(), "tmpcast");
7019 InsertNewInstBefore(NewCast, AI);
7020 AI.replaceAllUsesWith(NewCast);
7021 }
7022 return ReplaceInstUsesWith(CI, New);
7023}
7024
7025/// CanEvaluateInDifferentType - Return true if we can take the specified value
7026/// and return it as type Ty without inserting any new casts and without
7027/// changing the computed value. This is used by code that tries to decide
7028/// whether promoting or shrinking integer operations to wider or smaller types
7029/// will allow us to eliminate a truncate or extend.
7030///
7031/// This is a truncation operation if Ty is smaller than V->getType(), or an
7032/// extension operation if Ty is larger.
Dan Gohman2d648bb2008-04-10 18:43:06 +00007033bool InstCombiner::CanEvaluateInDifferentType(Value *V, const IntegerType *Ty,
7034 unsigned CastOpc,
7035 int &NumCastsRemoved) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007036 // We can always evaluate constants in another type.
7037 if (isa<ConstantInt>(V))
7038 return true;
7039
7040 Instruction *I = dyn_cast<Instruction>(V);
7041 if (!I) return false;
7042
7043 const IntegerType *OrigTy = cast<IntegerType>(V->getType());
7044
Chris Lattneref70bb82007-08-02 06:11:14 +00007045 // If this is an extension or truncate, we can often eliminate it.
7046 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7047 // If this is a cast from the destination type, we can trivially eliminate
7048 // it, and this will remove a cast overall.
7049 if (I->getOperand(0)->getType() == Ty) {
7050 // If the first operand is itself a cast, and is eliminable, do not count
7051 // this as an eliminable cast. We would prefer to eliminate those two
7052 // casts first.
7053 if (!isa<CastInst>(I->getOperand(0)))
7054 ++NumCastsRemoved;
7055 return true;
7056 }
7057 }
7058
7059 // We can't extend or shrink something that has multiple uses: doing so would
7060 // require duplicating the instruction in general, which isn't profitable.
7061 if (!I->hasOneUse()) return false;
7062
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007063 switch (I->getOpcode()) {
7064 case Instruction::Add:
7065 case Instruction::Sub:
7066 case Instruction::And:
7067 case Instruction::Or:
7068 case Instruction::Xor:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007069 // These operators can all arbitrarily be extended or truncated.
Chris Lattneref70bb82007-08-02 06:11:14 +00007070 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7071 NumCastsRemoved) &&
7072 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7073 NumCastsRemoved);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007074
Nick Lewyckyc52646a2008-01-22 05:08:48 +00007075 case Instruction::Mul:
Nick Lewyckyc52646a2008-01-22 05:08:48 +00007076 // A multiply can be truncated by truncating its operands.
7077 return Ty->getBitWidth() < OrigTy->getBitWidth() &&
7078 CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7079 NumCastsRemoved) &&
7080 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7081 NumCastsRemoved);
7082
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007083 case Instruction::Shl:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007084 // If we are truncating the result of this SHL, and if it's a shift of a
7085 // constant amount, we can always perform a SHL in a smaller type.
7086 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7087 uint32_t BitWidth = Ty->getBitWidth();
7088 if (BitWidth < OrigTy->getBitWidth() &&
7089 CI->getLimitedValue(BitWidth) < BitWidth)
Chris Lattneref70bb82007-08-02 06:11:14 +00007090 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7091 NumCastsRemoved);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007092 }
7093 break;
7094 case Instruction::LShr:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007095 // If this is a truncate of a logical shr, we can truncate it to a smaller
7096 // lshr iff we know that the bits we would otherwise be shifting in are
7097 // already zeros.
7098 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7099 uint32_t OrigBitWidth = OrigTy->getBitWidth();
7100 uint32_t BitWidth = Ty->getBitWidth();
7101 if (BitWidth < OrigBitWidth &&
7102 MaskedValueIsZero(I->getOperand(0),
7103 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
7104 CI->getLimitedValue(BitWidth) < BitWidth) {
Chris Lattneref70bb82007-08-02 06:11:14 +00007105 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7106 NumCastsRemoved);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007107 }
7108 }
7109 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007110 case Instruction::ZExt:
7111 case Instruction::SExt:
Chris Lattneref70bb82007-08-02 06:11:14 +00007112 case Instruction::Trunc:
7113 // If this is the same kind of case as our original (e.g. zext+zext), we
Chris Lattner9c909d22007-08-02 17:23:38 +00007114 // can safely replace it. Note that replacing it does not reduce the number
7115 // of casts in the input.
7116 if (I->getOpcode() == CastOpc)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007117 return true;
Chris Lattner2799b2f2007-09-10 23:46:29 +00007118
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007119 break;
7120 default:
7121 // TODO: Can handle more cases here.
7122 break;
7123 }
7124
7125 return false;
7126}
7127
7128/// EvaluateInDifferentType - Given an expression that
7129/// CanEvaluateInDifferentType returns true for, actually insert the code to
7130/// evaluate the expression.
7131Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
7132 bool isSigned) {
7133 if (Constant *C = dyn_cast<Constant>(V))
7134 return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
7135
7136 // Otherwise, it must be an instruction.
7137 Instruction *I = cast<Instruction>(V);
7138 Instruction *Res = 0;
7139 switch (I->getOpcode()) {
7140 case Instruction::Add:
7141 case Instruction::Sub:
Nick Lewyckyc52646a2008-01-22 05:08:48 +00007142 case Instruction::Mul:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007143 case Instruction::And:
7144 case Instruction::Or:
7145 case Instruction::Xor:
7146 case Instruction::AShr:
7147 case Instruction::LShr:
7148 case Instruction::Shl: {
7149 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
7150 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
7151 Res = BinaryOperator::create((Instruction::BinaryOps)I->getOpcode(),
7152 LHS, RHS, I->getName());
7153 break;
7154 }
7155 case Instruction::Trunc:
7156 case Instruction::ZExt:
7157 case Instruction::SExt:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007158 // If the source type of the cast is the type we're trying for then we can
Chris Lattneref70bb82007-08-02 06:11:14 +00007159 // just return the source. There's no need to insert it because it is not
7160 // new.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007161 if (I->getOperand(0)->getType() == Ty)
7162 return I->getOperand(0);
7163
Chris Lattneref70bb82007-08-02 06:11:14 +00007164 // Otherwise, must be the same type of case, so just reinsert a new one.
7165 Res = CastInst::create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),
7166 Ty, I->getName());
7167 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007168 default:
7169 // TODO: Can handle more cases here.
7170 assert(0 && "Unreachable!");
7171 break;
7172 }
7173
7174 return InsertNewInstBefore(Res, *I);
7175}
7176
7177/// @brief Implement the transforms common to all CastInst visitors.
7178Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
7179 Value *Src = CI.getOperand(0);
7180
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007181 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
7182 // eliminate it now.
7183 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
7184 if (Instruction::CastOps opc =
7185 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
7186 // The first cast (CSrc) is eliminable so we need to fix up or replace
7187 // the second cast (CI). CSrc will then have a good chance of being dead.
7188 return CastInst::create(opc, CSrc->getOperand(0), CI.getType());
7189 }
7190 }
7191
7192 // If we are casting a select then fold the cast into the select
7193 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
7194 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
7195 return NV;
7196
7197 // If we are casting a PHI then fold the cast into the PHI
7198 if (isa<PHINode>(Src))
7199 if (Instruction *NV = FoldOpIntoPhi(CI))
7200 return NV;
7201
7202 return 0;
7203}
7204
7205/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
7206Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
7207 Value *Src = CI.getOperand(0);
7208
7209 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
7210 // If casting the result of a getelementptr instruction with no offset, turn
7211 // this into a cast of the original pointer!
7212 if (GEP->hasAllZeroIndices()) {
7213 // Changing the cast operand is usually not a good idea but it is safe
7214 // here because the pointer operand is being replaced with another
7215 // pointer operand so the opcode doesn't need to change.
7216 AddToWorkList(GEP);
7217 CI.setOperand(0, GEP->getOperand(0));
7218 return &CI;
7219 }
7220
7221 // If the GEP has a single use, and the base pointer is a bitcast, and the
7222 // GEP computes a constant offset, see if we can convert these three
7223 // instructions into fewer. This typically happens with unions and other
7224 // non-type-safe code.
7225 if (GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
7226 if (GEP->hasAllConstantIndices()) {
7227 // We are guaranteed to get a constant from EmitGEPOffset.
7228 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP, CI, *this));
7229 int64_t Offset = OffsetV->getSExtValue();
7230
7231 // Get the base pointer input of the bitcast, and the type it points to.
7232 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
7233 const Type *GEPIdxTy =
7234 cast<PointerType>(OrigBase->getType())->getElementType();
7235 if (GEPIdxTy->isSized()) {
7236 SmallVector<Value*, 8> NewIndices;
7237
7238 // Start with the index over the outer type. Note that the type size
7239 // might be zero (even if the offset isn't zero) if the indexed type
7240 // is something like [0 x {int, int}]
7241 const Type *IntPtrTy = TD->getIntPtrType();
7242 int64_t FirstIdx = 0;
Duncan Sandsf99fdc62007-11-01 20:53:16 +00007243 if (int64_t TySize = TD->getABITypeSize(GEPIdxTy)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007244 FirstIdx = Offset/TySize;
7245 Offset %= TySize;
7246
7247 // Handle silly modulus not returning values values [0..TySize).
7248 if (Offset < 0) {
7249 --FirstIdx;
7250 Offset += TySize;
7251 assert(Offset >= 0);
7252 }
7253 assert((uint64_t)Offset < (uint64_t)TySize &&"Out of range offset");
7254 }
7255
7256 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
7257
7258 // Index into the types. If we fail, set OrigBase to null.
7259 while (Offset) {
7260 if (const StructType *STy = dyn_cast<StructType>(GEPIdxTy)) {
7261 const StructLayout *SL = TD->getStructLayout(STy);
7262 if (Offset < (int64_t)SL->getSizeInBytes()) {
7263 unsigned Elt = SL->getElementContainingOffset(Offset);
7264 NewIndices.push_back(ConstantInt::get(Type::Int32Ty, Elt));
7265
7266 Offset -= SL->getElementOffset(Elt);
7267 GEPIdxTy = STy->getElementType(Elt);
7268 } else {
7269 // Otherwise, we can't index into this, bail out.
7270 Offset = 0;
7271 OrigBase = 0;
7272 }
7273 } else if (isa<ArrayType>(GEPIdxTy) || isa<VectorType>(GEPIdxTy)) {
7274 const SequentialType *STy = cast<SequentialType>(GEPIdxTy);
Duncan Sandsf99fdc62007-11-01 20:53:16 +00007275 if (uint64_t EltSize = TD->getABITypeSize(STy->getElementType())){
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007276 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
7277 Offset %= EltSize;
7278 } else {
7279 NewIndices.push_back(ConstantInt::get(IntPtrTy, 0));
7280 }
7281 GEPIdxTy = STy->getElementType();
7282 } else {
7283 // Otherwise, we can't index into this, bail out.
7284 Offset = 0;
7285 OrigBase = 0;
7286 }
7287 }
7288 if (OrigBase) {
7289 // If we were able to index down into an element, create the GEP
7290 // and bitcast the result. This eliminates one bitcast, potentially
7291 // two.
Gabor Greifd6da1d02008-04-06 20:25:17 +00007292 Instruction *NGEP = GetElementPtrInst::Create(OrigBase,
7293 NewIndices.begin(),
7294 NewIndices.end(), "");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007295 InsertNewInstBefore(NGEP, CI);
7296 NGEP->takeName(GEP);
7297
7298 if (isa<BitCastInst>(CI))
7299 return new BitCastInst(NGEP, CI.getType());
7300 assert(isa<PtrToIntInst>(CI));
7301 return new PtrToIntInst(NGEP, CI.getType());
7302 }
7303 }
7304 }
7305 }
7306 }
7307
7308 return commonCastTransforms(CI);
7309}
7310
7311
7312
7313/// Only the TRUNC, ZEXT, SEXT, and BITCAST can both operand and result as
7314/// integer types. This function implements the common transforms for all those
7315/// cases.
7316/// @brief Implement the transforms common to CastInst with integer operands
7317Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
7318 if (Instruction *Result = commonCastTransforms(CI))
7319 return Result;
7320
7321 Value *Src = CI.getOperand(0);
7322 const Type *SrcTy = Src->getType();
7323 const Type *DestTy = CI.getType();
7324 uint32_t SrcBitSize = SrcTy->getPrimitiveSizeInBits();
7325 uint32_t DestBitSize = DestTy->getPrimitiveSizeInBits();
7326
7327 // See if we can simplify any instructions used by the LHS whose sole
7328 // purpose is to compute bits we don't care about.
7329 APInt KnownZero(DestBitSize, 0), KnownOne(DestBitSize, 0);
7330 if (SimplifyDemandedBits(&CI, APInt::getAllOnesValue(DestBitSize),
7331 KnownZero, KnownOne))
7332 return &CI;
7333
7334 // If the source isn't an instruction or has more than one use then we
7335 // can't do anything more.
7336 Instruction *SrcI = dyn_cast<Instruction>(Src);
7337 if (!SrcI || !Src->hasOneUse())
7338 return 0;
7339
7340 // Attempt to propagate the cast into the instruction for int->int casts.
7341 int NumCastsRemoved = 0;
7342 if (!isa<BitCastInst>(CI) &&
7343 CanEvaluateInDifferentType(SrcI, cast<IntegerType>(DestTy),
Chris Lattneref70bb82007-08-02 06:11:14 +00007344 CI.getOpcode(), NumCastsRemoved)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007345 // If this cast is a truncate, evaluting in a different type always
Chris Lattneref70bb82007-08-02 06:11:14 +00007346 // eliminates the cast, so it is always a win. If this is a zero-extension,
7347 // we need to do an AND to maintain the clear top-part of the computation,
7348 // so we require that the input have eliminated at least one cast. If this
7349 // is a sign extension, we insert two new casts (to do the extension) so we
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007350 // require that two casts have been eliminated.
7351 bool DoXForm;
7352 switch (CI.getOpcode()) {
7353 default:
7354 // All the others use floating point so we shouldn't actually
7355 // get here because of the check above.
7356 assert(0 && "Unknown cast type");
7357 case Instruction::Trunc:
7358 DoXForm = true;
7359 break;
7360 case Instruction::ZExt:
7361 DoXForm = NumCastsRemoved >= 1;
7362 break;
7363 case Instruction::SExt:
7364 DoXForm = NumCastsRemoved >= 2;
7365 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007366 }
7367
7368 if (DoXForm) {
7369 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
7370 CI.getOpcode() == Instruction::SExt);
7371 assert(Res->getType() == DestTy);
7372 switch (CI.getOpcode()) {
7373 default: assert(0 && "Unknown cast type!");
7374 case Instruction::Trunc:
7375 case Instruction::BitCast:
7376 // Just replace this cast with the result.
7377 return ReplaceInstUsesWith(CI, Res);
7378 case Instruction::ZExt: {
7379 // We need to emit an AND to clear the high bits.
7380 assert(SrcBitSize < DestBitSize && "Not a zext?");
7381 Constant *C = ConstantInt::get(APInt::getLowBitsSet(DestBitSize,
7382 SrcBitSize));
7383 return BinaryOperator::createAnd(Res, C);
7384 }
7385 case Instruction::SExt:
7386 // We need to emit a cast to truncate, then a cast to sext.
7387 return CastInst::create(Instruction::SExt,
7388 InsertCastBefore(Instruction::Trunc, Res, Src->getType(),
7389 CI), DestTy);
7390 }
7391 }
7392 }
7393
7394 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
7395 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
7396
7397 switch (SrcI->getOpcode()) {
7398 case Instruction::Add:
7399 case Instruction::Mul:
7400 case Instruction::And:
7401 case Instruction::Or:
7402 case Instruction::Xor:
7403 // If we are discarding information, rewrite.
7404 if (DestBitSize <= SrcBitSize && DestBitSize != 1) {
7405 // Don't insert two casts if they cannot be eliminated. We allow
7406 // two casts to be inserted if the sizes are the same. This could
7407 // only be converting signedness, which is a noop.
7408 if (DestBitSize == SrcBitSize ||
7409 !ValueRequiresCast(CI.getOpcode(), Op1, DestTy,TD) ||
7410 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
7411 Instruction::CastOps opcode = CI.getOpcode();
7412 Value *Op0c = InsertOperandCastBefore(opcode, Op0, DestTy, SrcI);
7413 Value *Op1c = InsertOperandCastBefore(opcode, Op1, DestTy, SrcI);
7414 return BinaryOperator::create(
7415 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
7416 }
7417 }
7418
7419 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
7420 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
7421 SrcI->getOpcode() == Instruction::Xor &&
7422 Op1 == ConstantInt::getTrue() &&
7423 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
7424 Value *New = InsertOperandCastBefore(Instruction::ZExt, Op0, DestTy, &CI);
7425 return BinaryOperator::createXor(New, ConstantInt::get(CI.getType(), 1));
7426 }
7427 break;
7428 case Instruction::SDiv:
7429 case Instruction::UDiv:
7430 case Instruction::SRem:
7431 case Instruction::URem:
7432 // If we are just changing the sign, rewrite.
7433 if (DestBitSize == SrcBitSize) {
7434 // Don't insert two casts if they cannot be eliminated. We allow
7435 // two casts to be inserted if the sizes are the same. This could
7436 // only be converting signedness, which is a noop.
7437 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
7438 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
7439 Value *Op0c = InsertOperandCastBefore(Instruction::BitCast,
7440 Op0, DestTy, SrcI);
7441 Value *Op1c = InsertOperandCastBefore(Instruction::BitCast,
7442 Op1, DestTy, SrcI);
7443 return BinaryOperator::create(
7444 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
7445 }
7446 }
7447 break;
7448
7449 case Instruction::Shl:
7450 // Allow changing the sign of the source operand. Do not allow
7451 // changing the size of the shift, UNLESS the shift amount is a
7452 // constant. We must not change variable sized shifts to a smaller
7453 // size, because it is undefined to shift more bits out than exist
7454 // in the value.
7455 if (DestBitSize == SrcBitSize ||
7456 (DestBitSize < SrcBitSize && isa<Constant>(Op1))) {
7457 Instruction::CastOps opcode = (DestBitSize == SrcBitSize ?
7458 Instruction::BitCast : Instruction::Trunc);
7459 Value *Op0c = InsertOperandCastBefore(opcode, Op0, DestTy, SrcI);
7460 Value *Op1c = InsertOperandCastBefore(opcode, Op1, DestTy, SrcI);
7461 return BinaryOperator::createShl(Op0c, Op1c);
7462 }
7463 break;
7464 case Instruction::AShr:
7465 // If this is a signed shr, and if all bits shifted in are about to be
7466 // truncated off, turn it into an unsigned shr to allow greater
7467 // simplifications.
7468 if (DestBitSize < SrcBitSize &&
7469 isa<ConstantInt>(Op1)) {
7470 uint32_t ShiftAmt = cast<ConstantInt>(Op1)->getLimitedValue(SrcBitSize);
7471 if (SrcBitSize > ShiftAmt && SrcBitSize-ShiftAmt >= DestBitSize) {
7472 // Insert the new logical shift right.
7473 return BinaryOperator::createLShr(Op0, Op1);
7474 }
7475 }
7476 break;
7477 }
7478 return 0;
7479}
7480
7481Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
7482 if (Instruction *Result = commonIntCastTransforms(CI))
7483 return Result;
7484
7485 Value *Src = CI.getOperand(0);
7486 const Type *Ty = CI.getType();
7487 uint32_t DestBitWidth = Ty->getPrimitiveSizeInBits();
7488 uint32_t SrcBitWidth = cast<IntegerType>(Src->getType())->getBitWidth();
7489
7490 if (Instruction *SrcI = dyn_cast<Instruction>(Src)) {
7491 switch (SrcI->getOpcode()) {
7492 default: break;
7493 case Instruction::LShr:
7494 // We can shrink lshr to something smaller if we know the bits shifted in
7495 // are already zeros.
7496 if (ConstantInt *ShAmtV = dyn_cast<ConstantInt>(SrcI->getOperand(1))) {
7497 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
7498
7499 // Get a mask for the bits shifting in.
7500 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
7501 Value* SrcIOp0 = SrcI->getOperand(0);
7502 if (SrcI->hasOneUse() && MaskedValueIsZero(SrcIOp0, Mask)) {
7503 if (ShAmt >= DestBitWidth) // All zeros.
7504 return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty));
7505
7506 // Okay, we can shrink this. Truncate the input, then return a new
7507 // shift.
7508 Value *V1 = InsertCastBefore(Instruction::Trunc, SrcIOp0, Ty, CI);
7509 Value *V2 = InsertCastBefore(Instruction::Trunc, SrcI->getOperand(1),
7510 Ty, CI);
7511 return BinaryOperator::createLShr(V1, V2);
7512 }
7513 } else { // This is a variable shr.
7514
7515 // Turn 'trunc (lshr X, Y) to bool' into '(X & (1 << Y)) != 0'. This is
7516 // more LLVM instructions, but allows '1 << Y' to be hoisted if
7517 // loop-invariant and CSE'd.
7518 if (CI.getType() == Type::Int1Ty && SrcI->hasOneUse()) {
7519 Value *One = ConstantInt::get(SrcI->getType(), 1);
7520
7521 Value *V = InsertNewInstBefore(
7522 BinaryOperator::createShl(One, SrcI->getOperand(1),
7523 "tmp"), CI);
7524 V = InsertNewInstBefore(BinaryOperator::createAnd(V,
7525 SrcI->getOperand(0),
7526 "tmp"), CI);
7527 Value *Zero = Constant::getNullValue(V->getType());
7528 return new ICmpInst(ICmpInst::ICMP_NE, V, Zero);
7529 }
7530 }
7531 break;
7532 }
7533 }
7534
7535 return 0;
7536}
7537
Evan Chenge3779cf2008-03-24 00:21:34 +00007538/// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
7539/// in order to eliminate the icmp.
7540Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
7541 bool DoXform) {
7542 // If we are just checking for a icmp eq of a single bit and zext'ing it
7543 // to an integer, then shift the bit to the appropriate place and then
7544 // cast to integer to avoid the comparison.
7545 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
7546 const APInt &Op1CV = Op1C->getValue();
7547
7548 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
7549 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
7550 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
7551 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
7552 if (!DoXform) return ICI;
7553
7554 Value *In = ICI->getOperand(0);
7555 Value *Sh = ConstantInt::get(In->getType(),
7556 In->getType()->getPrimitiveSizeInBits()-1);
7557 In = InsertNewInstBefore(BinaryOperator::createLShr(In, Sh,
7558 In->getName()+".lobit"),
7559 CI);
7560 if (In->getType() != CI.getType())
7561 In = CastInst::createIntegerCast(In, CI.getType(),
7562 false/*ZExt*/, "tmp", &CI);
7563
7564 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
7565 Constant *One = ConstantInt::get(In->getType(), 1);
7566 In = InsertNewInstBefore(BinaryOperator::createXor(In, One,
7567 In->getName()+".not"),
7568 CI);
7569 }
7570
7571 return ReplaceInstUsesWith(CI, In);
7572 }
7573
7574
7575
7576 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
7577 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
7578 // zext (X == 1) to i32 --> X iff X has only the low bit set.
7579 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
7580 // zext (X != 0) to i32 --> X iff X has only the low bit set.
7581 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
7582 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
7583 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
7584 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
7585 // This only works for EQ and NE
7586 ICI->isEquality()) {
7587 // If Op1C some other power of two, convert:
7588 uint32_t BitWidth = Op1C->getType()->getBitWidth();
7589 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
7590 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
7591 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
7592
7593 APInt KnownZeroMask(~KnownZero);
7594 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
7595 if (!DoXform) return ICI;
7596
7597 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
7598 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
7599 // (X&4) == 2 --> false
7600 // (X&4) != 2 --> true
7601 Constant *Res = ConstantInt::get(Type::Int1Ty, isNE);
7602 Res = ConstantExpr::getZExt(Res, CI.getType());
7603 return ReplaceInstUsesWith(CI, Res);
7604 }
7605
7606 uint32_t ShiftAmt = KnownZeroMask.logBase2();
7607 Value *In = ICI->getOperand(0);
7608 if (ShiftAmt) {
7609 // Perform a logical shr by shiftamt.
7610 // Insert the shift to put the result in the low bit.
7611 In = InsertNewInstBefore(BinaryOperator::createLShr(In,
7612 ConstantInt::get(In->getType(), ShiftAmt),
7613 In->getName()+".lobit"), CI);
7614 }
7615
7616 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
7617 Constant *One = ConstantInt::get(In->getType(), 1);
7618 In = BinaryOperator::createXor(In, One, "tmp");
7619 InsertNewInstBefore(cast<Instruction>(In), CI);
7620 }
7621
7622 if (CI.getType() == In->getType())
7623 return ReplaceInstUsesWith(CI, In);
7624 else
7625 return CastInst::createIntegerCast(In, CI.getType(), false/*ZExt*/);
7626 }
7627 }
7628 }
7629
7630 return 0;
7631}
7632
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007633Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
7634 // If one of the common conversion will work ..
7635 if (Instruction *Result = commonIntCastTransforms(CI))
7636 return Result;
7637
7638 Value *Src = CI.getOperand(0);
7639
7640 // If this is a cast of a cast
7641 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
7642 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
7643 // types and if the sizes are just right we can convert this into a logical
7644 // 'and' which will be much cheaper than the pair of casts.
7645 if (isa<TruncInst>(CSrc)) {
7646 // Get the sizes of the types involved
7647 Value *A = CSrc->getOperand(0);
7648 uint32_t SrcSize = A->getType()->getPrimitiveSizeInBits();
7649 uint32_t MidSize = CSrc->getType()->getPrimitiveSizeInBits();
7650 uint32_t DstSize = CI.getType()->getPrimitiveSizeInBits();
7651 // If we're actually extending zero bits and the trunc is a no-op
7652 if (MidSize < DstSize && SrcSize == DstSize) {
7653 // Replace both of the casts with an And of the type mask.
7654 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
7655 Constant *AndConst = ConstantInt::get(AndValue);
7656 Instruction *And =
7657 BinaryOperator::createAnd(CSrc->getOperand(0), AndConst);
7658 // Unfortunately, if the type changed, we need to cast it back.
7659 if (And->getType() != CI.getType()) {
7660 And->setName(CSrc->getName()+".mask");
7661 InsertNewInstBefore(And, CI);
7662 And = CastInst::createIntegerCast(And, CI.getType(), false/*ZExt*/);
7663 }
7664 return And;
7665 }
7666 }
7667 }
7668
Evan Chenge3779cf2008-03-24 00:21:34 +00007669 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
7670 return transformZExtICmp(ICI, CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007671
Evan Chenge3779cf2008-03-24 00:21:34 +00007672 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
7673 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
7674 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
7675 // of the (zext icmp) will be transformed.
7676 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
7677 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
7678 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
7679 (transformZExtICmp(LHS, CI, false) ||
7680 transformZExtICmp(RHS, CI, false))) {
7681 Value *LCast = InsertCastBefore(Instruction::ZExt, LHS, CI.getType(), CI);
7682 Value *RCast = InsertCastBefore(Instruction::ZExt, RHS, CI.getType(), CI);
7683 return BinaryOperator::create(Instruction::Or, LCast, RCast);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007684 }
Evan Chenge3779cf2008-03-24 00:21:34 +00007685 }
7686
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007687 return 0;
7688}
7689
7690Instruction *InstCombiner::visitSExt(SExtInst &CI) {
7691 if (Instruction *I = commonIntCastTransforms(CI))
7692 return I;
7693
7694 Value *Src = CI.getOperand(0);
7695
7696 // sext (x <s 0) -> ashr x, 31 -> all ones if signed
7697 // sext (x >s -1) -> ashr x, 31 -> all ones if not signed
7698 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) {
7699 // If we are just checking for a icmp eq of a single bit and zext'ing it
7700 // to an integer, then shift the bit to the appropriate place and then
7701 // cast to integer to avoid the comparison.
7702 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
7703 const APInt &Op1CV = Op1C->getValue();
7704
7705 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
7706 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
7707 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
7708 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())){
7709 Value *In = ICI->getOperand(0);
7710 Value *Sh = ConstantInt::get(In->getType(),
7711 In->getType()->getPrimitiveSizeInBits()-1);
7712 In = InsertNewInstBefore(BinaryOperator::createAShr(In, Sh,
7713 In->getName()+".lobit"),
7714 CI);
7715 if (In->getType() != CI.getType())
7716 In = CastInst::createIntegerCast(In, CI.getType(),
7717 true/*SExt*/, "tmp", &CI);
7718
7719 if (ICI->getPredicate() == ICmpInst::ICMP_SGT)
7720 In = InsertNewInstBefore(BinaryOperator::createNot(In,
7721 In->getName()+".not"), CI);
7722
7723 return ReplaceInstUsesWith(CI, In);
7724 }
7725 }
7726 }
7727
7728 return 0;
7729}
7730
Chris Lattnerdf7e8402008-01-27 05:29:54 +00007731/// FitsInFPType - Return a Constant* for the specified FP constant if it fits
7732/// in the specified FP type without changing its value.
Chris Lattner5e0610f2008-04-20 00:41:09 +00007733static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
Chris Lattnerdf7e8402008-01-27 05:29:54 +00007734 APFloat F = CFP->getValueAPF();
7735 if (F.convert(Sem, APFloat::rmNearestTiesToEven) == APFloat::opOK)
Chris Lattner5e0610f2008-04-20 00:41:09 +00007736 return ConstantFP::get(F);
Chris Lattnerdf7e8402008-01-27 05:29:54 +00007737 return 0;
7738}
7739
7740/// LookThroughFPExtensions - If this is an fp extension instruction, look
7741/// through it until we get the source value.
7742static Value *LookThroughFPExtensions(Value *V) {
7743 if (Instruction *I = dyn_cast<Instruction>(V))
7744 if (I->getOpcode() == Instruction::FPExt)
7745 return LookThroughFPExtensions(I->getOperand(0));
7746
7747 // If this value is a constant, return the constant in the smallest FP type
7748 // that can accurately represent it. This allows us to turn
7749 // (float)((double)X+2.0) into x+2.0f.
7750 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
7751 if (CFP->getType() == Type::PPC_FP128Ty)
7752 return V; // No constant folding of this.
7753 // See if the value can be truncated to float and then reextended.
Chris Lattner5e0610f2008-04-20 00:41:09 +00007754 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle))
Chris Lattnerdf7e8402008-01-27 05:29:54 +00007755 return V;
7756 if (CFP->getType() == Type::DoubleTy)
7757 return V; // Won't shrink.
Chris Lattner5e0610f2008-04-20 00:41:09 +00007758 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble))
Chris Lattnerdf7e8402008-01-27 05:29:54 +00007759 return V;
7760 // Don't try to shrink to various long double types.
7761 }
7762
7763 return V;
7764}
7765
7766Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
7767 if (Instruction *I = commonCastTransforms(CI))
7768 return I;
7769
7770 // If we have fptrunc(add (fpextend x), (fpextend y)), where x and y are
7771 // smaller than the destination type, we can eliminate the truncate by doing
7772 // the add as the smaller type. This applies to add/sub/mul/div as well as
7773 // many builtins (sqrt, etc).
7774 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
7775 if (OpI && OpI->hasOneUse()) {
7776 switch (OpI->getOpcode()) {
7777 default: break;
7778 case Instruction::Add:
7779 case Instruction::Sub:
7780 case Instruction::Mul:
7781 case Instruction::FDiv:
7782 case Instruction::FRem:
7783 const Type *SrcTy = OpI->getType();
7784 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
7785 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
7786 if (LHSTrunc->getType() != SrcTy &&
7787 RHSTrunc->getType() != SrcTy) {
7788 unsigned DstSize = CI.getType()->getPrimitiveSizeInBits();
7789 // If the source types were both smaller than the destination type of
7790 // the cast, do this xform.
7791 if (LHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize &&
7792 RHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize) {
7793 LHSTrunc = InsertCastBefore(Instruction::FPExt, LHSTrunc,
7794 CI.getType(), CI);
7795 RHSTrunc = InsertCastBefore(Instruction::FPExt, RHSTrunc,
7796 CI.getType(), CI);
7797 return BinaryOperator::create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
7798 }
7799 }
7800 break;
7801 }
7802 }
7803 return 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007804}
7805
7806Instruction *InstCombiner::visitFPExt(CastInst &CI) {
7807 return commonCastTransforms(CI);
7808}
7809
7810Instruction *InstCombiner::visitFPToUI(CastInst &CI) {
7811 return commonCastTransforms(CI);
7812}
7813
7814Instruction *InstCombiner::visitFPToSI(CastInst &CI) {
7815 return commonCastTransforms(CI);
7816}
7817
7818Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
7819 return commonCastTransforms(CI);
7820}
7821
7822Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
7823 return commonCastTransforms(CI);
7824}
7825
7826Instruction *InstCombiner::visitPtrToInt(CastInst &CI) {
7827 return commonPointerCastTransforms(CI);
7828}
7829
Chris Lattner7c1626482008-01-08 07:23:51 +00007830Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
7831 if (Instruction *I = commonCastTransforms(CI))
7832 return I;
7833
7834 const Type *DestPointee = cast<PointerType>(CI.getType())->getElementType();
7835 if (!DestPointee->isSized()) return 0;
7836
7837 // If this is inttoptr(add (ptrtoint x), cst), try to turn this into a GEP.
7838 ConstantInt *Cst;
7839 Value *X;
7840 if (match(CI.getOperand(0), m_Add(m_Cast<PtrToIntInst>(m_Value(X)),
7841 m_ConstantInt(Cst)))) {
7842 // If the source and destination operands have the same type, see if this
7843 // is a single-index GEP.
7844 if (X->getType() == CI.getType()) {
7845 // Get the size of the pointee type.
Bill Wendling9594af02008-03-14 05:12:19 +00007846 uint64_t Size = TD->getABITypeSize(DestPointee);
Chris Lattner7c1626482008-01-08 07:23:51 +00007847
7848 // Convert the constant to intptr type.
7849 APInt Offset = Cst->getValue();
7850 Offset.sextOrTrunc(TD->getPointerSizeInBits());
7851
7852 // If Offset is evenly divisible by Size, we can do this xform.
7853 if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
7854 Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
Gabor Greifd6da1d02008-04-06 20:25:17 +00007855 return GetElementPtrInst::Create(X, ConstantInt::get(Offset));
Chris Lattner7c1626482008-01-08 07:23:51 +00007856 }
7857 }
7858 // TODO: Could handle other cases, e.g. where add is indexing into field of
7859 // struct etc.
7860 } else if (CI.getOperand(0)->hasOneUse() &&
7861 match(CI.getOperand(0), m_Add(m_Value(X), m_ConstantInt(Cst)))) {
7862 // Otherwise, if this is inttoptr(add x, cst), try to turn this into an
7863 // "inttoptr+GEP" instead of "add+intptr".
7864
7865 // Get the size of the pointee type.
7866 uint64_t Size = TD->getABITypeSize(DestPointee);
7867
7868 // Convert the constant to intptr type.
7869 APInt Offset = Cst->getValue();
7870 Offset.sextOrTrunc(TD->getPointerSizeInBits());
7871
7872 // If Offset is evenly divisible by Size, we can do this xform.
7873 if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
7874 Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
7875
7876 Instruction *P = InsertNewInstBefore(new IntToPtrInst(X, CI.getType(),
7877 "tmp"), CI);
Gabor Greifd6da1d02008-04-06 20:25:17 +00007878 return GetElementPtrInst::Create(P, ConstantInt::get(Offset), "tmp");
Chris Lattner7c1626482008-01-08 07:23:51 +00007879 }
7880 }
7881 return 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007882}
7883
7884Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
7885 // If the operands are integer typed then apply the integer transforms,
7886 // otherwise just apply the common ones.
7887 Value *Src = CI.getOperand(0);
7888 const Type *SrcTy = Src->getType();
7889 const Type *DestTy = CI.getType();
7890
7891 if (SrcTy->isInteger() && DestTy->isInteger()) {
7892 if (Instruction *Result = commonIntCastTransforms(CI))
7893 return Result;
7894 } else if (isa<PointerType>(SrcTy)) {
7895 if (Instruction *I = commonPointerCastTransforms(CI))
7896 return I;
7897 } else {
7898 if (Instruction *Result = commonCastTransforms(CI))
7899 return Result;
7900 }
7901
7902
7903 // Get rid of casts from one type to the same type. These are useless and can
7904 // be replaced by the operand.
7905 if (DestTy == Src->getType())
7906 return ReplaceInstUsesWith(CI, Src);
7907
7908 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
7909 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
7910 const Type *DstElTy = DstPTy->getElementType();
7911 const Type *SrcElTy = SrcPTy->getElementType();
7912
Nate Begemandf5b3612008-03-31 00:22:16 +00007913 // If the address spaces don't match, don't eliminate the bitcast, which is
7914 // required for changing types.
7915 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
7916 return 0;
7917
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007918 // If we are casting a malloc or alloca to a pointer to a type of the same
7919 // size, rewrite the allocation instruction to allocate the "right" type.
7920 if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
7921 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
7922 return V;
7923
7924 // If the source and destination are pointers, and this cast is equivalent
7925 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
7926 // This can enhance SROA and other transforms that want type-safe pointers.
7927 Constant *ZeroUInt = Constant::getNullValue(Type::Int32Ty);
7928 unsigned NumZeros = 0;
7929 while (SrcElTy != DstElTy &&
7930 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
7931 SrcElTy->getNumContainedTypes() /* not "{}" */) {
7932 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
7933 ++NumZeros;
7934 }
7935
7936 // If we found a path from the src to dest, create the getelementptr now.
7937 if (SrcElTy == DstElTy) {
7938 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
Gabor Greifd6da1d02008-04-06 20:25:17 +00007939 return GetElementPtrInst::Create(Src, Idxs.begin(), Idxs.end(), "",
7940 ((Instruction*) NULL));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007941 }
7942 }
7943
7944 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
7945 if (SVI->hasOneUse()) {
7946 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
7947 // a bitconvert to a vector with the same # elts.
7948 if (isa<VectorType>(DestTy) &&
7949 cast<VectorType>(DestTy)->getNumElements() ==
7950 SVI->getType()->getNumElements()) {
7951 CastInst *Tmp;
7952 // If either of the operands is a cast from CI.getType(), then
7953 // evaluating the shuffle in the casted destination's type will allow
7954 // us to eliminate at least one cast.
7955 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
7956 Tmp->getOperand(0)->getType() == DestTy) ||
7957 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
7958 Tmp->getOperand(0)->getType() == DestTy)) {
7959 Value *LHS = InsertOperandCastBefore(Instruction::BitCast,
7960 SVI->getOperand(0), DestTy, &CI);
7961 Value *RHS = InsertOperandCastBefore(Instruction::BitCast,
7962 SVI->getOperand(1), DestTy, &CI);
7963 // Return a new shuffle vector. Use the same element ID's, as we
7964 // know the vector types match #elts.
7965 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
7966 }
7967 }
7968 }
7969 }
7970 return 0;
7971}
7972
7973/// GetSelectFoldableOperands - We want to turn code that looks like this:
7974/// %C = or %A, %B
7975/// %D = select %cond, %C, %A
7976/// into:
7977/// %C = select %cond, %B, 0
7978/// %D = or %A, %C
7979///
7980/// Assuming that the specified instruction is an operand to the select, return
7981/// a bitmask indicating which operands of this instruction are foldable if they
7982/// equal the other incoming value of the select.
7983///
7984static unsigned GetSelectFoldableOperands(Instruction *I) {
7985 switch (I->getOpcode()) {
7986 case Instruction::Add:
7987 case Instruction::Mul:
7988 case Instruction::And:
7989 case Instruction::Or:
7990 case Instruction::Xor:
7991 return 3; // Can fold through either operand.
7992 case Instruction::Sub: // Can only fold on the amount subtracted.
7993 case Instruction::Shl: // Can only fold on the shift amount.
7994 case Instruction::LShr:
7995 case Instruction::AShr:
7996 return 1;
7997 default:
7998 return 0; // Cannot fold
7999 }
8000}
8001
8002/// GetSelectFoldableConstant - For the same transformation as the previous
8003/// function, return the identity constant that goes into the select.
8004static Constant *GetSelectFoldableConstant(Instruction *I) {
8005 switch (I->getOpcode()) {
8006 default: assert(0 && "This cannot happen!"); abort();
8007 case Instruction::Add:
8008 case Instruction::Sub:
8009 case Instruction::Or:
8010 case Instruction::Xor:
8011 case Instruction::Shl:
8012 case Instruction::LShr:
8013 case Instruction::AShr:
8014 return Constant::getNullValue(I->getType());
8015 case Instruction::And:
8016 return Constant::getAllOnesValue(I->getType());
8017 case Instruction::Mul:
8018 return ConstantInt::get(I->getType(), 1);
8019 }
8020}
8021
8022/// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
8023/// have the same opcode and only one use each. Try to simplify this.
8024Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
8025 Instruction *FI) {
8026 if (TI->getNumOperands() == 1) {
8027 // If this is a non-volatile load or a cast from the same type,
8028 // merge.
8029 if (TI->isCast()) {
8030 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
8031 return 0;
8032 } else {
8033 return 0; // unknown unary op.
8034 }
8035
8036 // Fold this by inserting a select from the input values.
Gabor Greifd6da1d02008-04-06 20:25:17 +00008037 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
8038 FI->getOperand(0), SI.getName()+".v");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008039 InsertNewInstBefore(NewSI, SI);
8040 return CastInst::create(Instruction::CastOps(TI->getOpcode()), NewSI,
8041 TI->getType());
8042 }
8043
8044 // Only handle binary operators here.
8045 if (!isa<BinaryOperator>(TI))
8046 return 0;
8047
8048 // Figure out if the operations have any operands in common.
8049 Value *MatchOp, *OtherOpT, *OtherOpF;
8050 bool MatchIsOpZero;
8051 if (TI->getOperand(0) == FI->getOperand(0)) {
8052 MatchOp = TI->getOperand(0);
8053 OtherOpT = TI->getOperand(1);
8054 OtherOpF = FI->getOperand(1);
8055 MatchIsOpZero = true;
8056 } else if (TI->getOperand(1) == FI->getOperand(1)) {
8057 MatchOp = TI->getOperand(1);
8058 OtherOpT = TI->getOperand(0);
8059 OtherOpF = FI->getOperand(0);
8060 MatchIsOpZero = false;
8061 } else if (!TI->isCommutative()) {
8062 return 0;
8063 } else if (TI->getOperand(0) == FI->getOperand(1)) {
8064 MatchOp = TI->getOperand(0);
8065 OtherOpT = TI->getOperand(1);
8066 OtherOpF = FI->getOperand(0);
8067 MatchIsOpZero = true;
8068 } else if (TI->getOperand(1) == FI->getOperand(0)) {
8069 MatchOp = TI->getOperand(1);
8070 OtherOpT = TI->getOperand(0);
8071 OtherOpF = FI->getOperand(1);
8072 MatchIsOpZero = true;
8073 } else {
8074 return 0;
8075 }
8076
8077 // If we reach here, they do have operations in common.
Gabor Greifd6da1d02008-04-06 20:25:17 +00008078 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
8079 OtherOpF, SI.getName()+".v");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008080 InsertNewInstBefore(NewSI, SI);
8081
8082 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
8083 if (MatchIsOpZero)
8084 return BinaryOperator::create(BO->getOpcode(), MatchOp, NewSI);
8085 else
8086 return BinaryOperator::create(BO->getOpcode(), NewSI, MatchOp);
8087 }
8088 assert(0 && "Shouldn't get here");
8089 return 0;
8090}
8091
8092Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
8093 Value *CondVal = SI.getCondition();
8094 Value *TrueVal = SI.getTrueValue();
8095 Value *FalseVal = SI.getFalseValue();
8096
8097 // select true, X, Y -> X
8098 // select false, X, Y -> Y
8099 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
8100 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
8101
8102 // select C, X, X -> X
8103 if (TrueVal == FalseVal)
8104 return ReplaceInstUsesWith(SI, TrueVal);
8105
8106 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
8107 return ReplaceInstUsesWith(SI, FalseVal);
8108 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
8109 return ReplaceInstUsesWith(SI, TrueVal);
8110 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
8111 if (isa<Constant>(TrueVal))
8112 return ReplaceInstUsesWith(SI, TrueVal);
8113 else
8114 return ReplaceInstUsesWith(SI, FalseVal);
8115 }
8116
8117 if (SI.getType() == Type::Int1Ty) {
8118 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
8119 if (C->getZExtValue()) {
8120 // Change: A = select B, true, C --> A = or B, C
8121 return BinaryOperator::createOr(CondVal, FalseVal);
8122 } else {
8123 // Change: A = select B, false, C --> A = and !B, C
8124 Value *NotCond =
8125 InsertNewInstBefore(BinaryOperator::createNot(CondVal,
8126 "not."+CondVal->getName()), SI);
8127 return BinaryOperator::createAnd(NotCond, FalseVal);
8128 }
8129 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
8130 if (C->getZExtValue() == false) {
8131 // Change: A = select B, C, false --> A = and B, C
8132 return BinaryOperator::createAnd(CondVal, TrueVal);
8133 } else {
8134 // Change: A = select B, C, true --> A = or !B, C
8135 Value *NotCond =
8136 InsertNewInstBefore(BinaryOperator::createNot(CondVal,
8137 "not."+CondVal->getName()), SI);
8138 return BinaryOperator::createOr(NotCond, TrueVal);
8139 }
8140 }
Chris Lattner53f85a72007-11-25 21:27:53 +00008141
8142 // select a, b, a -> a&b
8143 // select a, a, b -> a|b
8144 if (CondVal == TrueVal)
8145 return BinaryOperator::createOr(CondVal, FalseVal);
8146 else if (CondVal == FalseVal)
8147 return BinaryOperator::createAnd(CondVal, TrueVal);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008148 }
8149
8150 // Selecting between two integer constants?
8151 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
8152 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
8153 // select C, 1, 0 -> zext C to int
8154 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
8155 return CastInst::create(Instruction::ZExt, CondVal, SI.getType());
8156 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
8157 // select C, 0, 1 -> zext !C to int
8158 Value *NotCond =
8159 InsertNewInstBefore(BinaryOperator::createNot(CondVal,
8160 "not."+CondVal->getName()), SI);
8161 return CastInst::create(Instruction::ZExt, NotCond, SI.getType());
8162 }
8163
8164 // FIXME: Turn select 0/-1 and -1/0 into sext from condition!
8165
8166 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
8167
8168 // (x <s 0) ? -1 : 0 -> ashr x, 31
8169 if (TrueValC->isAllOnesValue() && FalseValC->isZero())
8170 if (ConstantInt *CmpCst = dyn_cast<ConstantInt>(IC->getOperand(1))) {
8171 if (IC->getPredicate() == ICmpInst::ICMP_SLT && CmpCst->isZero()) {
8172 // The comparison constant and the result are not neccessarily the
8173 // same width. Make an all-ones value by inserting a AShr.
8174 Value *X = IC->getOperand(0);
8175 uint32_t Bits = X->getType()->getPrimitiveSizeInBits();
8176 Constant *ShAmt = ConstantInt::get(X->getType(), Bits-1);
8177 Instruction *SRA = BinaryOperator::create(Instruction::AShr, X,
8178 ShAmt, "ones");
8179 InsertNewInstBefore(SRA, SI);
8180
8181 // Finally, convert to the type of the select RHS. We figure out
8182 // if this requires a SExt, Trunc or BitCast based on the sizes.
8183 Instruction::CastOps opc = Instruction::BitCast;
8184 uint32_t SRASize = SRA->getType()->getPrimitiveSizeInBits();
8185 uint32_t SISize = SI.getType()->getPrimitiveSizeInBits();
8186 if (SRASize < SISize)
8187 opc = Instruction::SExt;
8188 else if (SRASize > SISize)
8189 opc = Instruction::Trunc;
8190 return CastInst::create(opc, SRA, SI.getType());
8191 }
8192 }
8193
8194
8195 // If one of the constants is zero (we know they can't both be) and we
8196 // have an icmp instruction with zero, and we have an 'and' with the
8197 // non-constant value, eliminate this whole mess. This corresponds to
8198 // cases like this: ((X & 27) ? 27 : 0)
8199 if (TrueValC->isZero() || FalseValC->isZero())
8200 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
8201 cast<Constant>(IC->getOperand(1))->isNullValue())
8202 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
8203 if (ICA->getOpcode() == Instruction::And &&
8204 isa<ConstantInt>(ICA->getOperand(1)) &&
8205 (ICA->getOperand(1) == TrueValC ||
8206 ICA->getOperand(1) == FalseValC) &&
8207 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
8208 // Okay, now we know that everything is set up, we just don't
8209 // know whether we have a icmp_ne or icmp_eq and whether the
8210 // true or false val is the zero.
8211 bool ShouldNotVal = !TrueValC->isZero();
8212 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
8213 Value *V = ICA;
8214 if (ShouldNotVal)
8215 V = InsertNewInstBefore(BinaryOperator::create(
8216 Instruction::Xor, V, ICA->getOperand(1)), SI);
8217 return ReplaceInstUsesWith(SI, V);
8218 }
8219 }
8220 }
8221
8222 // See if we are selecting two values based on a comparison of the two values.
8223 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
8224 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
8225 // Transform (X == Y) ? X : Y -> Y
Dale Johannesen2e1b7692007-10-03 17:45:27 +00008226 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
8227 // This is not safe in general for floating point:
8228 // consider X== -0, Y== +0.
8229 // It becomes safe if either operand is a nonzero constant.
8230 ConstantFP *CFPt, *CFPf;
8231 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
8232 !CFPt->getValueAPF().isZero()) ||
8233 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
8234 !CFPf->getValueAPF().isZero()))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008235 return ReplaceInstUsesWith(SI, FalseVal);
Dale Johannesen2e1b7692007-10-03 17:45:27 +00008236 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008237 // Transform (X != Y) ? X : Y -> X
8238 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
8239 return ReplaceInstUsesWith(SI, TrueVal);
8240 // NOTE: if we wanted to, this is where to detect MIN/MAX/ABS/etc.
8241
8242 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
8243 // Transform (X == Y) ? Y : X -> X
Dale Johannesen2e1b7692007-10-03 17:45:27 +00008244 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
8245 // This is not safe in general for floating point:
8246 // consider X== -0, Y== +0.
8247 // It becomes safe if either operand is a nonzero constant.
8248 ConstantFP *CFPt, *CFPf;
8249 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
8250 !CFPt->getValueAPF().isZero()) ||
8251 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
8252 !CFPf->getValueAPF().isZero()))
8253 return ReplaceInstUsesWith(SI, FalseVal);
8254 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008255 // Transform (X != Y) ? Y : X -> Y
8256 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
8257 return ReplaceInstUsesWith(SI, TrueVal);
8258 // NOTE: if we wanted to, this is where to detect MIN/MAX/ABS/etc.
8259 }
8260 }
8261
8262 // See if we are selecting two values based on a comparison of the two values.
8263 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal)) {
8264 if (ICI->getOperand(0) == TrueVal && ICI->getOperand(1) == FalseVal) {
8265 // Transform (X == Y) ? X : Y -> Y
8266 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
8267 return ReplaceInstUsesWith(SI, FalseVal);
8268 // Transform (X != Y) ? X : Y -> X
8269 if (ICI->getPredicate() == ICmpInst::ICMP_NE)
8270 return ReplaceInstUsesWith(SI, TrueVal);
8271 // NOTE: if we wanted to, this is where to detect MIN/MAX/ABS/etc.
8272
8273 } else if (ICI->getOperand(0) == FalseVal && ICI->getOperand(1) == TrueVal){
8274 // Transform (X == Y) ? Y : X -> X
8275 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
8276 return ReplaceInstUsesWith(SI, FalseVal);
8277 // Transform (X != Y) ? Y : X -> Y
8278 if (ICI->getPredicate() == ICmpInst::ICMP_NE)
8279 return ReplaceInstUsesWith(SI, TrueVal);
8280 // NOTE: if we wanted to, this is where to detect MIN/MAX/ABS/etc.
8281 }
8282 }
8283
8284 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
8285 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
8286 if (TI->hasOneUse() && FI->hasOneUse()) {
8287 Instruction *AddOp = 0, *SubOp = 0;
8288
8289 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
8290 if (TI->getOpcode() == FI->getOpcode())
8291 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
8292 return IV;
8293
8294 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
8295 // even legal for FP.
8296 if (TI->getOpcode() == Instruction::Sub &&
8297 FI->getOpcode() == Instruction::Add) {
8298 AddOp = FI; SubOp = TI;
8299 } else if (FI->getOpcode() == Instruction::Sub &&
8300 TI->getOpcode() == Instruction::Add) {
8301 AddOp = TI; SubOp = FI;
8302 }
8303
8304 if (AddOp) {
8305 Value *OtherAddOp = 0;
8306 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
8307 OtherAddOp = AddOp->getOperand(1);
8308 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
8309 OtherAddOp = AddOp->getOperand(0);
8310 }
8311
8312 if (OtherAddOp) {
8313 // So at this point we know we have (Y -> OtherAddOp):
8314 // select C, (add X, Y), (sub X, Z)
8315 Value *NegVal; // Compute -Z
8316 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
8317 NegVal = ConstantExpr::getNeg(C);
8318 } else {
8319 NegVal = InsertNewInstBefore(
8320 BinaryOperator::createNeg(SubOp->getOperand(1), "tmp"), SI);
8321 }
8322
8323 Value *NewTrueOp = OtherAddOp;
8324 Value *NewFalseOp = NegVal;
8325 if (AddOp != TI)
8326 std::swap(NewTrueOp, NewFalseOp);
8327 Instruction *NewSel =
Gabor Greifd6da1d02008-04-06 20:25:17 +00008328 SelectInst::Create(CondVal, NewTrueOp,NewFalseOp,SI.getName()+".p");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008329
8330 NewSel = InsertNewInstBefore(NewSel, SI);
8331 return BinaryOperator::createAdd(SubOp->getOperand(0), NewSel);
8332 }
8333 }
8334 }
8335
8336 // See if we can fold the select into one of our operands.
8337 if (SI.getType()->isInteger()) {
8338 // See the comment above GetSelectFoldableOperands for a description of the
8339 // transformation we are doing here.
8340 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal))
8341 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
8342 !isa<Constant>(FalseVal))
8343 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
8344 unsigned OpToFold = 0;
8345 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
8346 OpToFold = 1;
8347 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
8348 OpToFold = 2;
8349 }
8350
8351 if (OpToFold) {
8352 Constant *C = GetSelectFoldableConstant(TVI);
8353 Instruction *NewSel =
Gabor Greifd6da1d02008-04-06 20:25:17 +00008354 SelectInst::Create(SI.getCondition(), TVI->getOperand(2-OpToFold), C);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008355 InsertNewInstBefore(NewSel, SI);
8356 NewSel->takeName(TVI);
8357 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
8358 return BinaryOperator::create(BO->getOpcode(), FalseVal, NewSel);
8359 else {
8360 assert(0 && "Unknown instruction!!");
8361 }
8362 }
8363 }
8364
8365 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal))
8366 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
8367 !isa<Constant>(TrueVal))
8368 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
8369 unsigned OpToFold = 0;
8370 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
8371 OpToFold = 1;
8372 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
8373 OpToFold = 2;
8374 }
8375
8376 if (OpToFold) {
8377 Constant *C = GetSelectFoldableConstant(FVI);
8378 Instruction *NewSel =
Gabor Greifd6da1d02008-04-06 20:25:17 +00008379 SelectInst::Create(SI.getCondition(), C, FVI->getOperand(2-OpToFold));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008380 InsertNewInstBefore(NewSel, SI);
8381 NewSel->takeName(FVI);
8382 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
8383 return BinaryOperator::create(BO->getOpcode(), TrueVal, NewSel);
8384 else
8385 assert(0 && "Unknown instruction!!");
8386 }
8387 }
8388 }
8389
8390 if (BinaryOperator::isNot(CondVal)) {
8391 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
8392 SI.setOperand(1, FalseVal);
8393 SI.setOperand(2, TrueVal);
8394 return &SI;
8395 }
8396
8397 return 0;
8398}
8399
Dan Gohman2d648bb2008-04-10 18:43:06 +00008400/// EnforceKnownAlignment - If the specified pointer points to an object that
8401/// we control, modify the object's alignment to PrefAlign. This isn't
8402/// often possible though. If alignment is important, a more reliable approach
8403/// is to simply align all global variables and allocation instructions to
8404/// their preferred alignment from the beginning.
8405///
8406static unsigned EnforceKnownAlignment(Value *V,
8407 unsigned Align, unsigned PrefAlign) {
Chris Lattner47cf3452007-08-09 19:05:49 +00008408
Dan Gohman2d648bb2008-04-10 18:43:06 +00008409 User *U = dyn_cast<User>(V);
8410 if (!U) return Align;
8411
8412 switch (getOpcode(U)) {
8413 default: break;
8414 case Instruction::BitCast:
8415 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
8416 case Instruction::GetElementPtr: {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008417 // If all indexes are zero, it is just the alignment of the base pointer.
8418 bool AllZeroOperands = true;
Dan Gohman2d648bb2008-04-10 18:43:06 +00008419 for (unsigned i = 1, e = U->getNumOperands(); i != e; ++i)
8420 if (!isa<Constant>(U->getOperand(i)) ||
8421 !cast<Constant>(U->getOperand(i))->isNullValue()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008422 AllZeroOperands = false;
8423 break;
8424 }
Chris Lattner47cf3452007-08-09 19:05:49 +00008425
8426 if (AllZeroOperands) {
8427 // Treat this like a bitcast.
Dan Gohman2d648bb2008-04-10 18:43:06 +00008428 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
Chris Lattner47cf3452007-08-09 19:05:49 +00008429 }
Dan Gohman2d648bb2008-04-10 18:43:06 +00008430 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008431 }
Dan Gohman2d648bb2008-04-10 18:43:06 +00008432 }
8433
8434 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
8435 // If there is a large requested alignment and we can, bump up the alignment
8436 // of the global.
8437 if (!GV->isDeclaration()) {
8438 GV->setAlignment(PrefAlign);
8439 Align = PrefAlign;
8440 }
8441 } else if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) {
8442 // If there is a requested alignment and if this is an alloca, round up. We
8443 // don't do this for malloc, because some systems can't respect the request.
8444 if (isa<AllocaInst>(AI)) {
8445 AI->setAlignment(PrefAlign);
8446 Align = PrefAlign;
8447 }
8448 }
8449
8450 return Align;
8451}
8452
8453/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
8454/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
8455/// and it is more than the alignment of the ultimate object, see if we can
8456/// increase the alignment of the ultimate object, making this check succeed.
8457unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
8458 unsigned PrefAlign) {
8459 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
8460 sizeof(PrefAlign) * CHAR_BIT;
8461 APInt Mask = APInt::getAllOnesValue(BitWidth);
8462 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
8463 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
8464 unsigned TrailZ = KnownZero.countTrailingOnes();
8465 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
8466
8467 if (PrefAlign > Align)
8468 Align = EnforceKnownAlignment(V, Align, PrefAlign);
8469
8470 // We don't need to make any adjustment.
8471 return Align;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008472}
8473
Chris Lattner00ae5132008-01-13 23:50:23 +00008474Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
Dan Gohman2d648bb2008-04-10 18:43:06 +00008475 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
8476 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
Chris Lattner00ae5132008-01-13 23:50:23 +00008477 unsigned MinAlign = std::min(DstAlign, SrcAlign);
8478 unsigned CopyAlign = MI->getAlignment()->getZExtValue();
8479
8480 if (CopyAlign < MinAlign) {
8481 MI->setAlignment(ConstantInt::get(Type::Int32Ty, MinAlign));
8482 return MI;
8483 }
8484
8485 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
8486 // load/store.
8487 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
8488 if (MemOpLength == 0) return 0;
8489
Chris Lattnerc669fb62008-01-14 00:28:35 +00008490 // Source and destination pointer types are always "i8*" for intrinsic. See
8491 // if the size is something we can handle with a single primitive load/store.
8492 // A single load+store correctly handles overlapping memory in the memmove
8493 // case.
Chris Lattner00ae5132008-01-13 23:50:23 +00008494 unsigned Size = MemOpLength->getZExtValue();
Chris Lattner5af8a912008-04-30 06:39:11 +00008495 if (Size == 0) return MI; // Delete this mem transfer.
8496
8497 if (Size > 8 || (Size&(Size-1)))
Chris Lattnerc669fb62008-01-14 00:28:35 +00008498 return 0; // If not 1/2/4/8 bytes, exit.
Chris Lattner00ae5132008-01-13 23:50:23 +00008499
Chris Lattnerc669fb62008-01-14 00:28:35 +00008500 // Use an integer load+store unless we can find something better.
Chris Lattner00ae5132008-01-13 23:50:23 +00008501 Type *NewPtrTy = PointerType::getUnqual(IntegerType::get(Size<<3));
Chris Lattnerc669fb62008-01-14 00:28:35 +00008502
8503 // Memcpy forces the use of i8* for the source and destination. That means
8504 // that if you're using memcpy to move one double around, you'll get a cast
8505 // from double* to i8*. We'd much rather use a double load+store rather than
8506 // an i64 load+store, here because this improves the odds that the source or
8507 // dest address will be promotable. See if we can find a better type than the
8508 // integer datatype.
8509 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
8510 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
8511 if (SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
8512 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
8513 // down through these levels if so.
8514 while (!SrcETy->isFirstClassType()) {
8515 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
8516 if (STy->getNumElements() == 1)
8517 SrcETy = STy->getElementType(0);
8518 else
8519 break;
8520 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
8521 if (ATy->getNumElements() == 1)
8522 SrcETy = ATy->getElementType();
8523 else
8524 break;
8525 } else
8526 break;
8527 }
8528
8529 if (SrcETy->isFirstClassType())
8530 NewPtrTy = PointerType::getUnqual(SrcETy);
8531 }
8532 }
8533
8534
Chris Lattner00ae5132008-01-13 23:50:23 +00008535 // If the memcpy/memmove provides better alignment info than we can
8536 // infer, use it.
8537 SrcAlign = std::max(SrcAlign, CopyAlign);
8538 DstAlign = std::max(DstAlign, CopyAlign);
8539
8540 Value *Src = InsertBitCastBefore(MI->getOperand(2), NewPtrTy, *MI);
8541 Value *Dest = InsertBitCastBefore(MI->getOperand(1), NewPtrTy, *MI);
Chris Lattnerc669fb62008-01-14 00:28:35 +00008542 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
8543 InsertNewInstBefore(L, *MI);
8544 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
8545
8546 // Set the size of the copy to 0, it will be deleted on the next iteration.
8547 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
8548 return MI;
Chris Lattner00ae5132008-01-13 23:50:23 +00008549}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008550
Chris Lattner5af8a912008-04-30 06:39:11 +00008551Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
8552 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
8553 if (MI->getAlignment()->getZExtValue() < Alignment) {
8554 MI->setAlignment(ConstantInt::get(Type::Int32Ty, Alignment));
8555 return MI;
8556 }
8557
8558 // Extract the length and alignment and fill if they are constant.
8559 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
8560 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
8561 if (!LenC || !FillC || FillC->getType() != Type::Int8Ty)
8562 return 0;
8563 uint64_t Len = LenC->getZExtValue();
8564 Alignment = MI->getAlignment()->getZExtValue();
8565
8566 // If the length is zero, this is a no-op
8567 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
8568
8569 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
8570 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
8571 const Type *ITy = IntegerType::get(Len*8); // n=1 -> i8.
8572
8573 Value *Dest = MI->getDest();
8574 Dest = InsertBitCastBefore(Dest, PointerType::getUnqual(ITy), *MI);
8575
8576 // Alignment 0 is identity for alignment 1 for memset, but not store.
8577 if (Alignment == 0) Alignment = 1;
8578
8579 // Extract the fill value and store.
8580 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
8581 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), Dest, false,
8582 Alignment), *MI);
8583
8584 // Set the size of the copy to 0, it will be deleted on the next iteration.
8585 MI->setLength(Constant::getNullValue(LenC->getType()));
8586 return MI;
8587 }
8588
8589 return 0;
8590}
8591
8592
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008593/// visitCallInst - CallInst simplification. This mostly only handles folding
8594/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
8595/// the heavy lifting.
8596///
8597Instruction *InstCombiner::visitCallInst(CallInst &CI) {
8598 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
8599 if (!II) return visitCallSite(&CI);
8600
8601 // Intrinsics cannot occur in an invoke, so handle them here instead of in
8602 // visitCallSite.
8603 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
8604 bool Changed = false;
8605
8606 // memmove/cpy/set of zero bytes is a noop.
8607 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
8608 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
8609
8610 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
8611 if (CI->getZExtValue() == 1) {
8612 // Replace the instruction with just byte operations. We would
8613 // transform other cases to loads/stores, but we don't know if
8614 // alignment is sufficient.
8615 }
8616 }
8617
8618 // If we have a memmove and the source operation is a constant global,
8619 // then the source and dest pointers can't alias, so we can change this
8620 // into a call to memcpy.
Chris Lattner00ae5132008-01-13 23:50:23 +00008621 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008622 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
8623 if (GVSrc->isConstant()) {
8624 Module *M = CI.getParent()->getParent()->getParent();
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008625 Intrinsic::ID MemCpyID;
8626 if (CI.getOperand(3)->getType() == Type::Int32Ty)
8627 MemCpyID = Intrinsic::memcpy_i32;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008628 else
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008629 MemCpyID = Intrinsic::memcpy_i64;
8630 CI.setOperand(0, Intrinsic::getDeclaration(M, MemCpyID));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008631 Changed = true;
8632 }
8633 }
8634
8635 // If we can determine a pointer alignment that is bigger than currently
8636 // set, update the alignment.
8637 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
Chris Lattner00ae5132008-01-13 23:50:23 +00008638 if (Instruction *I = SimplifyMemTransfer(MI))
8639 return I;
Chris Lattner5af8a912008-04-30 06:39:11 +00008640 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
8641 if (Instruction *I = SimplifyMemSet(MSI))
8642 return I;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008643 }
8644
8645 if (Changed) return II;
8646 } else {
8647 switch (II->getIntrinsicID()) {
8648 default: break;
8649 case Intrinsic::ppc_altivec_lvx:
8650 case Intrinsic::ppc_altivec_lvxl:
8651 case Intrinsic::x86_sse_loadu_ps:
8652 case Intrinsic::x86_sse2_loadu_pd:
8653 case Intrinsic::x86_sse2_loadu_dq:
8654 // Turn PPC lvx -> load if the pointer is known aligned.
8655 // Turn X86 loadups -> load if the pointer is known aligned.
Dan Gohman2d648bb2008-04-10 18:43:06 +00008656 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008657 Value *Ptr = InsertBitCastBefore(II->getOperand(1),
8658 PointerType::getUnqual(II->getType()),
8659 CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008660 return new LoadInst(Ptr);
8661 }
8662 break;
8663 case Intrinsic::ppc_altivec_stvx:
8664 case Intrinsic::ppc_altivec_stvxl:
8665 // Turn stvx -> store if the pointer is known aligned.
Dan Gohman2d648bb2008-04-10 18:43:06 +00008666 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
Christopher Lambbb2f2222007-12-17 01:12:55 +00008667 const Type *OpPtrTy =
8668 PointerType::getUnqual(II->getOperand(1)->getType());
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008669 Value *Ptr = InsertBitCastBefore(II->getOperand(2), OpPtrTy, CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008670 return new StoreInst(II->getOperand(1), Ptr);
8671 }
8672 break;
8673 case Intrinsic::x86_sse_storeu_ps:
8674 case Intrinsic::x86_sse2_storeu_pd:
8675 case Intrinsic::x86_sse2_storeu_dq:
8676 case Intrinsic::x86_sse2_storel_dq:
8677 // Turn X86 storeu -> store if the pointer is known aligned.
Dan Gohman2d648bb2008-04-10 18:43:06 +00008678 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
Christopher Lambbb2f2222007-12-17 01:12:55 +00008679 const Type *OpPtrTy =
8680 PointerType::getUnqual(II->getOperand(2)->getType());
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008681 Value *Ptr = InsertBitCastBefore(II->getOperand(1), OpPtrTy, CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008682 return new StoreInst(II->getOperand(2), Ptr);
8683 }
8684 break;
8685
8686 case Intrinsic::x86_sse_cvttss2si: {
8687 // These intrinsics only demands the 0th element of its input vector. If
8688 // we can simplify the input based on that, do so now.
8689 uint64_t UndefElts;
8690 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), 1,
8691 UndefElts)) {
8692 II->setOperand(1, V);
8693 return II;
8694 }
8695 break;
8696 }
8697
8698 case Intrinsic::ppc_altivec_vperm:
8699 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
8700 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
8701 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
8702
8703 // Check that all of the elements are integer constants or undefs.
8704 bool AllEltsOk = true;
8705 for (unsigned i = 0; i != 16; ++i) {
8706 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
8707 !isa<UndefValue>(Mask->getOperand(i))) {
8708 AllEltsOk = false;
8709 break;
8710 }
8711 }
8712
8713 if (AllEltsOk) {
8714 // Cast the input vectors to byte vectors.
Chris Lattner13c2d6e2008-01-13 22:23:22 +00008715 Value *Op0 =InsertBitCastBefore(II->getOperand(1),Mask->getType(),CI);
8716 Value *Op1 =InsertBitCastBefore(II->getOperand(2),Mask->getType(),CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008717 Value *Result = UndefValue::get(Op0->getType());
8718
8719 // Only extract each element once.
8720 Value *ExtractedElts[32];
8721 memset(ExtractedElts, 0, sizeof(ExtractedElts));
8722
8723 for (unsigned i = 0; i != 16; ++i) {
8724 if (isa<UndefValue>(Mask->getOperand(i)))
8725 continue;
8726 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
8727 Idx &= 31; // Match the hardware behavior.
8728
8729 if (ExtractedElts[Idx] == 0) {
8730 Instruction *Elt =
8731 new ExtractElementInst(Idx < 16 ? Op0 : Op1, Idx&15, "tmp");
8732 InsertNewInstBefore(Elt, CI);
8733 ExtractedElts[Idx] = Elt;
8734 }
8735
8736 // Insert this value into the result vector.
Gabor Greifd6da1d02008-04-06 20:25:17 +00008737 Result = InsertElementInst::Create(Result, ExtractedElts[Idx], i, "tmp");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008738 InsertNewInstBefore(cast<Instruction>(Result), CI);
8739 }
8740 return CastInst::create(Instruction::BitCast, Result, CI.getType());
8741 }
8742 }
8743 break;
8744
8745 case Intrinsic::stackrestore: {
8746 // If the save is right next to the restore, remove the restore. This can
8747 // happen when variable allocas are DCE'd.
8748 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
8749 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
8750 BasicBlock::iterator BI = SS;
8751 if (&*++BI == II)
8752 return EraseInstFromFunction(CI);
8753 }
8754 }
8755
Chris Lattner416d91c2008-02-18 06:12:38 +00008756 // Scan down this block to see if there is another stack restore in the
8757 // same block without an intervening call/alloca.
8758 BasicBlock::iterator BI = II;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008759 TerminatorInst *TI = II->getParent()->getTerminator();
Chris Lattner416d91c2008-02-18 06:12:38 +00008760 bool CannotRemove = false;
8761 for (++BI; &*BI != TI; ++BI) {
8762 if (isa<AllocaInst>(BI)) {
8763 CannotRemove = true;
8764 break;
8765 }
8766 if (isa<CallInst>(BI)) {
8767 if (!isa<IntrinsicInst>(BI)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008768 CannotRemove = true;
8769 break;
8770 }
Chris Lattner416d91c2008-02-18 06:12:38 +00008771 // If there is a stackrestore below this one, remove this one.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008772 return EraseInstFromFunction(CI);
Chris Lattner416d91c2008-02-18 06:12:38 +00008773 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008774 }
Chris Lattner416d91c2008-02-18 06:12:38 +00008775
8776 // If the stack restore is in a return/unwind block and if there are no
8777 // allocas or calls between the restore and the return, nuke the restore.
8778 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
8779 return EraseInstFromFunction(CI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008780 break;
8781 }
8782 }
8783 }
8784
8785 return visitCallSite(II);
8786}
8787
8788// InvokeInst simplification
8789//
8790Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
8791 return visitCallSite(&II);
8792}
8793
Dale Johannesen96021832008-04-25 21:16:07 +00008794/// isSafeToEliminateVarargsCast - If this cast does not affect the value
8795/// passed through the varargs area, we can eliminate the use of the cast.
Dale Johannesen35615462008-04-23 18:34:37 +00008796static bool isSafeToEliminateVarargsCast(const CallSite CS,
8797 const CastInst * const CI,
8798 const TargetData * const TD,
8799 const int ix) {
8800 if (!CI->isLosslessCast())
8801 return false;
8802
8803 // The size of ByVal arguments is derived from the type, so we
8804 // can't change to a type with a different size. If the size were
8805 // passed explicitly we could avoid this check.
8806 if (!CS.paramHasAttr(ix, ParamAttr::ByVal))
8807 return true;
8808
8809 const Type* SrcTy =
8810 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
8811 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
8812 if (!SrcTy->isSized() || !DstTy->isSized())
8813 return false;
8814 if (TD->getABITypeSize(SrcTy) != TD->getABITypeSize(DstTy))
8815 return false;
8816 return true;
8817}
8818
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008819// visitCallSite - Improvements for call and invoke instructions.
8820//
8821Instruction *InstCombiner::visitCallSite(CallSite CS) {
8822 bool Changed = false;
8823
8824 // If the callee is a constexpr cast of a function, attempt to move the cast
8825 // to the arguments of the call/invoke.
8826 if (transformConstExprCastCall(CS)) return 0;
8827
8828 Value *Callee = CS.getCalledValue();
8829
8830 if (Function *CalleeF = dyn_cast<Function>(Callee))
8831 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
8832 Instruction *OldCall = CS.getInstruction();
8833 // If the call and callee calling conventions don't match, this call must
8834 // be unreachable, as the call is undefined.
8835 new StoreInst(ConstantInt::getTrue(),
Christopher Lambbb2f2222007-12-17 01:12:55 +00008836 UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
8837 OldCall);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008838 if (!OldCall->use_empty())
8839 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
8840 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
8841 return EraseInstFromFunction(*OldCall);
8842 return 0;
8843 }
8844
8845 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
8846 // This instruction is not reachable, just remove it. We insert a store to
8847 // undef so that we know that this code is not reachable, despite the fact
8848 // that we can't modify the CFG here.
8849 new StoreInst(ConstantInt::getTrue(),
Christopher Lambbb2f2222007-12-17 01:12:55 +00008850 UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008851 CS.getInstruction());
8852
8853 if (!CS.getInstruction()->use_empty())
8854 CS.getInstruction()->
8855 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
8856
8857 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
8858 // Don't break the CFG, insert a dummy cond branch.
Gabor Greifd6da1d02008-04-06 20:25:17 +00008859 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
8860 ConstantInt::getTrue(), II);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008861 }
8862 return EraseInstFromFunction(*CS.getInstruction());
8863 }
8864
Duncan Sands74833f22007-09-17 10:26:40 +00008865 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
8866 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
8867 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
8868 return transformCallThroughTrampoline(CS);
8869
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008870 const PointerType *PTy = cast<PointerType>(Callee->getType());
8871 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
8872 if (FTy->isVarArg()) {
Dale Johannesen502336c2008-04-23 01:03:05 +00008873 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008874 // See if we can optimize any arguments passed through the varargs area of
8875 // the call.
8876 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
Dale Johannesen35615462008-04-23 18:34:37 +00008877 E = CS.arg_end(); I != E; ++I, ++ix) {
8878 CastInst *CI = dyn_cast<CastInst>(*I);
8879 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
8880 *I = CI->getOperand(0);
8881 Changed = true;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008882 }
Dale Johannesen35615462008-04-23 18:34:37 +00008883 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008884 }
8885
Duncan Sands2937e352007-12-19 21:13:37 +00008886 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
Duncan Sands7868f3c2007-12-16 15:51:49 +00008887 // Inline asm calls cannot throw - mark them 'nounwind'.
Duncan Sands2937e352007-12-19 21:13:37 +00008888 CS.setDoesNotThrow();
Duncan Sands7868f3c2007-12-16 15:51:49 +00008889 Changed = true;
8890 }
8891
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008892 return Changed ? CS.getInstruction() : 0;
8893}
8894
8895// transformConstExprCastCall - If the callee is a constexpr cast of a function,
8896// attempt to move the cast to the arguments of the call/invoke.
8897//
8898bool InstCombiner::transformConstExprCastCall(CallSite CS) {
8899 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
8900 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
8901 if (CE->getOpcode() != Instruction::BitCast ||
8902 !isa<Function>(CE->getOperand(0)))
8903 return false;
8904 Function *Callee = cast<Function>(CE->getOperand(0));
8905 Instruction *Caller = CS.getInstruction();
Chris Lattner1c8733e2008-03-12 17:45:29 +00008906 const PAListPtr &CallerPAL = CS.getParamAttrs();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008907
8908 // Okay, this is a cast from a function to a different type. Unless doing so
8909 // would cause a type conversion of one of our arguments, change this call to
8910 // be a direct call with arguments casted to the appropriate types.
8911 //
8912 const FunctionType *FT = Callee->getFunctionType();
8913 const Type *OldRetTy = Caller->getType();
8914
Devang Pateld091d322008-03-11 18:04:06 +00008915 if (isa<StructType>(FT->getReturnType()))
8916 return false; // TODO: Handle multiple return values.
8917
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008918 // Check to see if we are changing the return type...
8919 if (OldRetTy != FT->getReturnType()) {
8920 if (Callee->isDeclaration() && !Caller->use_empty() &&
8921 // Conversion is ok if changing from pointer to int of same size.
8922 !(isa<PointerType>(FT->getReturnType()) &&
8923 TD->getIntPtrType() == OldRetTy))
8924 return false; // Cannot transform this return value.
8925
Duncan Sands5c489582008-01-06 10:12:28 +00008926 if (!Caller->use_empty() &&
Duncan Sands5c489582008-01-06 10:12:28 +00008927 // void -> non-void is handled specially
Duncan Sands4ced1f82008-01-13 08:02:44 +00008928 FT->getReturnType() != Type::VoidTy &&
8929 !CastInst::isCastable(FT->getReturnType(), OldRetTy))
Duncan Sands5c489582008-01-06 10:12:28 +00008930 return false; // Cannot transform this return value.
8931
Chris Lattner1c8733e2008-03-12 17:45:29 +00008932 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
8933 ParameterAttributes RAttrs = CallerPAL.getParamAttrs(0);
Duncan Sandsdbe97dc2008-01-07 17:16:06 +00008934 if (RAttrs & ParamAttr::typeIncompatible(FT->getReturnType()))
8935 return false; // Attribute not compatible with transformed value.
8936 }
Duncan Sandsc849e662008-01-06 18:27:01 +00008937
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008938 // If the callsite is an invoke instruction, and the return value is used by
8939 // a PHI node in a successor, we cannot change the return type of the call
8940 // because there is no place to put the cast instruction (without breaking
8941 // the critical edge). Bail out in this case.
8942 if (!Caller->use_empty())
8943 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
8944 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
8945 UI != E; ++UI)
8946 if (PHINode *PN = dyn_cast<PHINode>(*UI))
8947 if (PN->getParent() == II->getNormalDest() ||
8948 PN->getParent() == II->getUnwindDest())
8949 return false;
8950 }
8951
8952 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
8953 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
8954
8955 CallSite::arg_iterator AI = CS.arg_begin();
8956 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
8957 const Type *ParamTy = FT->getParamType(i);
8958 const Type *ActTy = (*AI)->getType();
Duncan Sands5c489582008-01-06 10:12:28 +00008959
8960 if (!CastInst::isCastable(ActTy, ParamTy))
Duncan Sandsc849e662008-01-06 18:27:01 +00008961 return false; // Cannot transform this parameter value.
8962
Chris Lattner1c8733e2008-03-12 17:45:29 +00008963 if (CallerPAL.getParamAttrs(i + 1) & ParamAttr::typeIncompatible(ParamTy))
8964 return false; // Attribute not compatible with transformed value.
Duncan Sands5c489582008-01-06 10:12:28 +00008965
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008966 ConstantInt *c = dyn_cast<ConstantInt>(*AI);
Duncan Sands5c489582008-01-06 10:12:28 +00008967 // Some conversions are safe even if we do not have a body.
8968 // Either we can cast directly, or we can upconvert the argument
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008969 bool isConvertible = ActTy == ParamTy ||
8970 (isa<PointerType>(ParamTy) && isa<PointerType>(ActTy)) ||
8971 (ParamTy->isInteger() && ActTy->isInteger() &&
8972 ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()) ||
8973 (c && ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()
8974 && c->getValue().isStrictlyPositive());
8975 if (Callee->isDeclaration() && !isConvertible) return false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008976 }
8977
8978 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
8979 Callee->isDeclaration())
Chris Lattner1c8733e2008-03-12 17:45:29 +00008980 return false; // Do not delete arguments unless we have a function body.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008981
Chris Lattner1c8733e2008-03-12 17:45:29 +00008982 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
8983 !CallerPAL.isEmpty())
Duncan Sandsc849e662008-01-06 18:27:01 +00008984 // In this case we have more arguments than the new function type, but we
Duncan Sands4ced1f82008-01-13 08:02:44 +00008985 // won't be dropping them. Check that these extra arguments have attributes
8986 // that are compatible with being a vararg call argument.
Chris Lattner1c8733e2008-03-12 17:45:29 +00008987 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
8988 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
Duncan Sands4ced1f82008-01-13 08:02:44 +00008989 break;
Chris Lattner1c8733e2008-03-12 17:45:29 +00008990 ParameterAttributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
Duncan Sands4ced1f82008-01-13 08:02:44 +00008991 if (PAttrs & ParamAttr::VarArgsIncompatible)
8992 return false;
8993 }
Duncan Sandsc849e662008-01-06 18:27:01 +00008994
Dan Gohmanf17a25c2007-07-18 16:29:46 +00008995 // Okay, we decided that this is a safe thing to do: go ahead and start
8996 // inserting cast instructions as necessary...
8997 std::vector<Value*> Args;
8998 Args.reserve(NumActualArgs);
Chris Lattner1c8733e2008-03-12 17:45:29 +00008999 SmallVector<ParamAttrsWithIndex, 8> attrVec;
Duncan Sandsc849e662008-01-06 18:27:01 +00009000 attrVec.reserve(NumCommonArgs);
9001
9002 // Get any return attributes.
Chris Lattner1c8733e2008-03-12 17:45:29 +00009003 ParameterAttributes RAttrs = CallerPAL.getParamAttrs(0);
Duncan Sandsc849e662008-01-06 18:27:01 +00009004
9005 // If the return value is not being used, the type may not be compatible
9006 // with the existing attributes. Wipe out any problematic attributes.
Duncan Sandsdbe97dc2008-01-07 17:16:06 +00009007 RAttrs &= ~ParamAttr::typeIncompatible(FT->getReturnType());
Duncan Sandsc849e662008-01-06 18:27:01 +00009008
9009 // Add the new return attributes.
9010 if (RAttrs)
9011 attrVec.push_back(ParamAttrsWithIndex::get(0, RAttrs));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009012
9013 AI = CS.arg_begin();
9014 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
9015 const Type *ParamTy = FT->getParamType(i);
9016 if ((*AI)->getType() == ParamTy) {
9017 Args.push_back(*AI);
9018 } else {
9019 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
9020 false, ParamTy, false);
9021 CastInst *NewCast = CastInst::create(opcode, *AI, ParamTy, "tmp");
9022 Args.push_back(InsertNewInstBefore(NewCast, *Caller));
9023 }
Duncan Sandsc849e662008-01-06 18:27:01 +00009024
9025 // Add any parameter attributes.
Chris Lattner1c8733e2008-03-12 17:45:29 +00009026 if (ParameterAttributes PAttrs = CallerPAL.getParamAttrs(i + 1))
Duncan Sandsc849e662008-01-06 18:27:01 +00009027 attrVec.push_back(ParamAttrsWithIndex::get(i + 1, PAttrs));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009028 }
9029
9030 // If the function takes more arguments than the call was taking, add them
9031 // now...
9032 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
9033 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
9034
9035 // If we are removing arguments to the function, emit an obnoxious warning...
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009036 if (FT->getNumParams() < NumActualArgs) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009037 if (!FT->isVarArg()) {
9038 cerr << "WARNING: While resolving call to function '"
9039 << Callee->getName() << "' arguments were dropped!\n";
9040 } else {
9041 // Add all of the arguments in their promoted form to the arg list...
9042 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
9043 const Type *PTy = getPromotedType((*AI)->getType());
9044 if (PTy != (*AI)->getType()) {
9045 // Must promote to pass through va_arg area!
9046 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, false,
9047 PTy, false);
9048 Instruction *Cast = CastInst::create(opcode, *AI, PTy, "tmp");
9049 InsertNewInstBefore(Cast, *Caller);
9050 Args.push_back(Cast);
9051 } else {
9052 Args.push_back(*AI);
9053 }
Duncan Sandsc849e662008-01-06 18:27:01 +00009054
Duncan Sands4ced1f82008-01-13 08:02:44 +00009055 // Add any parameter attributes.
Chris Lattner1c8733e2008-03-12 17:45:29 +00009056 if (ParameterAttributes PAttrs = CallerPAL.getParamAttrs(i + 1))
Duncan Sands4ced1f82008-01-13 08:02:44 +00009057 attrVec.push_back(ParamAttrsWithIndex::get(i + 1, PAttrs));
9058 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009059 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009060 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009061
9062 if (FT->getReturnType() == Type::VoidTy)
9063 Caller->setName(""); // Void type should not have a name.
9064
Chris Lattner1c8733e2008-03-12 17:45:29 +00009065 const PAListPtr &NewCallerPAL = PAListPtr::get(attrVec.begin(),attrVec.end());
Duncan Sandsc849e662008-01-06 18:27:01 +00009066
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009067 Instruction *NC;
9068 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009069 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
9070 Args.begin(), Args.end(), Caller->getName(), Caller);
Reid Spencer6b0b09a2007-07-30 19:53:57 +00009071 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
Duncan Sandsc849e662008-01-06 18:27:01 +00009072 cast<InvokeInst>(NC)->setParamAttrs(NewCallerPAL);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009073 } else {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009074 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
9075 Caller->getName(), Caller);
Duncan Sandsf5588dc2007-11-27 13:23:08 +00009076 CallInst *CI = cast<CallInst>(Caller);
9077 if (CI->isTailCall())
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009078 cast<CallInst>(NC)->setTailCall();
Duncan Sandsf5588dc2007-11-27 13:23:08 +00009079 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
Duncan Sandsc849e662008-01-06 18:27:01 +00009080 cast<CallInst>(NC)->setParamAttrs(NewCallerPAL);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009081 }
9082
9083 // Insert a cast of the return type as necessary.
9084 Value *NV = NC;
Duncan Sands5c489582008-01-06 10:12:28 +00009085 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009086 if (NV->getType() != Type::VoidTy) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009087 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
Duncan Sands5c489582008-01-06 10:12:28 +00009088 OldRetTy, false);
9089 NV = NC = CastInst::create(opcode, NC, OldRetTy, "tmp");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009090
9091 // If this is an invoke instruction, we should insert it after the first
9092 // non-phi, instruction in the normal successor block.
9093 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
9094 BasicBlock::iterator I = II->getNormalDest()->begin();
9095 while (isa<PHINode>(I)) ++I;
9096 InsertNewInstBefore(NC, *I);
9097 } else {
9098 // Otherwise, it's a call, just insert cast right after the call instr
9099 InsertNewInstBefore(NC, *Caller);
9100 }
9101 AddUsersToWorkList(*Caller);
9102 } else {
9103 NV = UndefValue::get(Caller->getType());
9104 }
9105 }
9106
9107 if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
9108 Caller->replaceAllUsesWith(NV);
9109 Caller->eraseFromParent();
9110 RemoveFromWorkList(Caller);
9111 return true;
9112}
9113
Duncan Sands74833f22007-09-17 10:26:40 +00009114// transformCallThroughTrampoline - Turn a call to a function created by the
9115// init_trampoline intrinsic into a direct call to the underlying function.
9116//
9117Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
9118 Value *Callee = CS.getCalledValue();
9119 const PointerType *PTy = cast<PointerType>(Callee->getType());
9120 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Chris Lattner1c8733e2008-03-12 17:45:29 +00009121 const PAListPtr &Attrs = CS.getParamAttrs();
Duncan Sands48b81112008-01-14 19:52:09 +00009122
9123 // If the call already has the 'nest' attribute somewhere then give up -
9124 // otherwise 'nest' would occur twice after splicing in the chain.
Chris Lattner1c8733e2008-03-12 17:45:29 +00009125 if (Attrs.hasAttrSomewhere(ParamAttr::Nest))
Duncan Sands48b81112008-01-14 19:52:09 +00009126 return 0;
Duncan Sands74833f22007-09-17 10:26:40 +00009127
9128 IntrinsicInst *Tramp =
9129 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
9130
9131 Function *NestF =
9132 cast<Function>(IntrinsicInst::StripPointerCasts(Tramp->getOperand(2)));
9133 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
9134 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
9135
Chris Lattner1c8733e2008-03-12 17:45:29 +00009136 const PAListPtr &NestAttrs = NestF->getParamAttrs();
9137 if (!NestAttrs.isEmpty()) {
Duncan Sands74833f22007-09-17 10:26:40 +00009138 unsigned NestIdx = 1;
9139 const Type *NestTy = 0;
Dale Johannesenf4666f52008-02-19 21:38:47 +00009140 ParameterAttributes NestAttr = ParamAttr::None;
Duncan Sands74833f22007-09-17 10:26:40 +00009141
9142 // Look for a parameter marked with the 'nest' attribute.
9143 for (FunctionType::param_iterator I = NestFTy->param_begin(),
9144 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
Chris Lattner1c8733e2008-03-12 17:45:29 +00009145 if (NestAttrs.paramHasAttr(NestIdx, ParamAttr::Nest)) {
Duncan Sands74833f22007-09-17 10:26:40 +00009146 // Record the parameter type and any other attributes.
9147 NestTy = *I;
Chris Lattner1c8733e2008-03-12 17:45:29 +00009148 NestAttr = NestAttrs.getParamAttrs(NestIdx);
Duncan Sands74833f22007-09-17 10:26:40 +00009149 break;
9150 }
9151
9152 if (NestTy) {
9153 Instruction *Caller = CS.getInstruction();
9154 std::vector<Value*> NewArgs;
9155 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
9156
Chris Lattner1c8733e2008-03-12 17:45:29 +00009157 SmallVector<ParamAttrsWithIndex, 8> NewAttrs;
9158 NewAttrs.reserve(Attrs.getNumSlots() + 1);
Duncan Sands48b81112008-01-14 19:52:09 +00009159
Duncan Sands74833f22007-09-17 10:26:40 +00009160 // Insert the nest argument into the call argument list, which may
Duncan Sands48b81112008-01-14 19:52:09 +00009161 // mean appending it. Likewise for attributes.
9162
9163 // Add any function result attributes.
Chris Lattner1c8733e2008-03-12 17:45:29 +00009164 if (ParameterAttributes Attr = Attrs.getParamAttrs(0))
9165 NewAttrs.push_back(ParamAttrsWithIndex::get(0, Attr));
Duncan Sands48b81112008-01-14 19:52:09 +00009166
Duncan Sands74833f22007-09-17 10:26:40 +00009167 {
9168 unsigned Idx = 1;
9169 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
9170 do {
9171 if (Idx == NestIdx) {
Duncan Sands48b81112008-01-14 19:52:09 +00009172 // Add the chain argument and attributes.
Duncan Sands74833f22007-09-17 10:26:40 +00009173 Value *NestVal = Tramp->getOperand(3);
9174 if (NestVal->getType() != NestTy)
9175 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
9176 NewArgs.push_back(NestVal);
Duncan Sands48b81112008-01-14 19:52:09 +00009177 NewAttrs.push_back(ParamAttrsWithIndex::get(NestIdx, NestAttr));
Duncan Sands74833f22007-09-17 10:26:40 +00009178 }
9179
9180 if (I == E)
9181 break;
9182
Duncan Sands48b81112008-01-14 19:52:09 +00009183 // Add the original argument and attributes.
Duncan Sands74833f22007-09-17 10:26:40 +00009184 NewArgs.push_back(*I);
Chris Lattner1c8733e2008-03-12 17:45:29 +00009185 if (ParameterAttributes Attr = Attrs.getParamAttrs(Idx))
Duncan Sands48b81112008-01-14 19:52:09 +00009186 NewAttrs.push_back
9187 (ParamAttrsWithIndex::get(Idx + (Idx >= NestIdx), Attr));
Duncan Sands74833f22007-09-17 10:26:40 +00009188
9189 ++Idx, ++I;
9190 } while (1);
9191 }
9192
9193 // The trampoline may have been bitcast to a bogus type (FTy).
9194 // Handle this by synthesizing a new function type, equal to FTy
Duncan Sands48b81112008-01-14 19:52:09 +00009195 // with the chain parameter inserted.
Duncan Sands74833f22007-09-17 10:26:40 +00009196
Duncan Sands74833f22007-09-17 10:26:40 +00009197 std::vector<const Type*> NewTypes;
Duncan Sands74833f22007-09-17 10:26:40 +00009198 NewTypes.reserve(FTy->getNumParams()+1);
9199
Duncan Sands74833f22007-09-17 10:26:40 +00009200 // Insert the chain's type into the list of parameter types, which may
Duncan Sands48b81112008-01-14 19:52:09 +00009201 // mean appending it.
Duncan Sands74833f22007-09-17 10:26:40 +00009202 {
9203 unsigned Idx = 1;
9204 FunctionType::param_iterator I = FTy->param_begin(),
9205 E = FTy->param_end();
9206
9207 do {
Duncan Sands48b81112008-01-14 19:52:09 +00009208 if (Idx == NestIdx)
9209 // Add the chain's type.
Duncan Sands74833f22007-09-17 10:26:40 +00009210 NewTypes.push_back(NestTy);
Duncan Sands74833f22007-09-17 10:26:40 +00009211
9212 if (I == E)
9213 break;
9214
Duncan Sands48b81112008-01-14 19:52:09 +00009215 // Add the original type.
Duncan Sands74833f22007-09-17 10:26:40 +00009216 NewTypes.push_back(*I);
Duncan Sands74833f22007-09-17 10:26:40 +00009217
9218 ++Idx, ++I;
9219 } while (1);
9220 }
9221
9222 // Replace the trampoline call with a direct call. Let the generic
9223 // code sort out any function type mismatches.
9224 FunctionType *NewFTy =
Duncan Sandsf5588dc2007-11-27 13:23:08 +00009225 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());
Christopher Lambbb2f2222007-12-17 01:12:55 +00009226 Constant *NewCallee = NestF->getType() == PointerType::getUnqual(NewFTy) ?
9227 NestF : ConstantExpr::getBitCast(NestF, PointerType::getUnqual(NewFTy));
Chris Lattner1c8733e2008-03-12 17:45:29 +00009228 const PAListPtr &NewPAL = PAListPtr::get(NewAttrs.begin(),NewAttrs.end());
Duncan Sands74833f22007-09-17 10:26:40 +00009229
9230 Instruction *NewCaller;
9231 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009232 NewCaller = InvokeInst::Create(NewCallee,
9233 II->getNormalDest(), II->getUnwindDest(),
9234 NewArgs.begin(), NewArgs.end(),
9235 Caller->getName(), Caller);
Duncan Sands74833f22007-09-17 10:26:40 +00009236 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
Duncan Sandsf5588dc2007-11-27 13:23:08 +00009237 cast<InvokeInst>(NewCaller)->setParamAttrs(NewPAL);
Duncan Sands74833f22007-09-17 10:26:40 +00009238 } else {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009239 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
9240 Caller->getName(), Caller);
Duncan Sands74833f22007-09-17 10:26:40 +00009241 if (cast<CallInst>(Caller)->isTailCall())
9242 cast<CallInst>(NewCaller)->setTailCall();
9243 cast<CallInst>(NewCaller)->
9244 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
Duncan Sandsf5588dc2007-11-27 13:23:08 +00009245 cast<CallInst>(NewCaller)->setParamAttrs(NewPAL);
Duncan Sands74833f22007-09-17 10:26:40 +00009246 }
9247 if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
9248 Caller->replaceAllUsesWith(NewCaller);
9249 Caller->eraseFromParent();
9250 RemoveFromWorkList(Caller);
9251 return 0;
9252 }
9253 }
9254
9255 // Replace the trampoline call with a direct call. Since there is no 'nest'
9256 // parameter, there is no need to adjust the argument list. Let the generic
9257 // code sort out any function type mismatches.
9258 Constant *NewCallee =
9259 NestF->getType() == PTy ? NestF : ConstantExpr::getBitCast(NestF, PTy);
9260 CS.setCalledFunction(NewCallee);
9261 return CS.getInstruction();
9262}
9263
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009264/// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(c,d)]
9265/// and if a/b/c/d and the add's all have a single use, turn this into two phi's
9266/// and a single binop.
9267Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
9268 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
9269 assert(isa<BinaryOperator>(FirstInst) || isa<GetElementPtrInst>(FirstInst) ||
9270 isa<CmpInst>(FirstInst));
9271 unsigned Opc = FirstInst->getOpcode();
9272 Value *LHSVal = FirstInst->getOperand(0);
9273 Value *RHSVal = FirstInst->getOperand(1);
9274
9275 const Type *LHSType = LHSVal->getType();
9276 const Type *RHSType = RHSVal->getType();
9277
9278 // Scan to see if all operands are the same opcode, all have one use, and all
9279 // kill their operands (i.e. the operands have one use).
9280 for (unsigned i = 0; i != PN.getNumIncomingValues(); ++i) {
9281 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
9282 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
9283 // Verify type of the LHS matches so we don't fold cmp's of different
9284 // types or GEP's with different index types.
9285 I->getOperand(0)->getType() != LHSType ||
9286 I->getOperand(1)->getType() != RHSType)
9287 return 0;
9288
9289 // If they are CmpInst instructions, check their predicates
9290 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
9291 if (cast<CmpInst>(I)->getPredicate() !=
9292 cast<CmpInst>(FirstInst)->getPredicate())
9293 return 0;
9294
9295 // Keep track of which operand needs a phi node.
9296 if (I->getOperand(0) != LHSVal) LHSVal = 0;
9297 if (I->getOperand(1) != RHSVal) RHSVal = 0;
9298 }
9299
9300 // Otherwise, this is safe to transform, determine if it is profitable.
9301
9302 // If this is a GEP, and if the index (not the pointer) needs a PHI, bail out.
9303 // Indexes are often folded into load/store instructions, so we don't want to
9304 // hide them behind a phi.
9305 if (isa<GetElementPtrInst>(FirstInst) && RHSVal == 0)
9306 return 0;
9307
9308 Value *InLHS = FirstInst->getOperand(0);
9309 Value *InRHS = FirstInst->getOperand(1);
9310 PHINode *NewLHS = 0, *NewRHS = 0;
9311 if (LHSVal == 0) {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009312 NewLHS = PHINode::Create(LHSType, FirstInst->getOperand(0)->getName()+".pn");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009313 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
9314 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
9315 InsertNewInstBefore(NewLHS, PN);
9316 LHSVal = NewLHS;
9317 }
9318
9319 if (RHSVal == 0) {
Gabor Greifd6da1d02008-04-06 20:25:17 +00009320 NewRHS = PHINode::Create(RHSType, FirstInst->getOperand(1)->getName()+".pn");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009321 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
9322 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
9323 InsertNewInstBefore(NewRHS, PN);
9324 RHSVal = NewRHS;
9325 }
9326
9327 // Add all operands to the new PHIs.
9328 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
9329 if (NewLHS) {
9330 Value *NewInLHS =cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
9331 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
9332 }
9333 if (NewRHS) {
9334 Value *NewInRHS =cast<Instruction>(PN.getIncomingValue(i))->getOperand(1);
9335 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
9336 }
9337 }
9338
9339 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
9340 return BinaryOperator::create(BinOp->getOpcode(), LHSVal, RHSVal);
9341 else if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
9342 return CmpInst::create(CIOp->getOpcode(), CIOp->getPredicate(), LHSVal,
9343 RHSVal);
9344 else {
9345 assert(isa<GetElementPtrInst>(FirstInst));
Gabor Greifd6da1d02008-04-06 20:25:17 +00009346 return GetElementPtrInst::Create(LHSVal, RHSVal);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009347 }
9348}
9349
9350/// isSafeToSinkLoad - Return true if we know that it is safe sink the load out
9351/// of the block that defines it. This means that it must be obvious the value
9352/// of the load is not changed from the point of the load to the end of the
9353/// block it is in.
9354///
9355/// Finally, it is safe, but not profitable, to sink a load targetting a
9356/// non-address-taken alloca. Doing so will cause us to not promote the alloca
9357/// to a register.
9358static bool isSafeToSinkLoad(LoadInst *L) {
9359 BasicBlock::iterator BBI = L, E = L->getParent()->end();
9360
9361 for (++BBI; BBI != E; ++BBI)
9362 if (BBI->mayWriteToMemory())
9363 return false;
9364
9365 // Check for non-address taken alloca. If not address-taken already, it isn't
9366 // profitable to do this xform.
9367 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
9368 bool isAddressTaken = false;
9369 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
9370 UI != E; ++UI) {
9371 if (isa<LoadInst>(UI)) continue;
9372 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
9373 // If storing TO the alloca, then the address isn't taken.
9374 if (SI->getOperand(1) == AI) continue;
9375 }
9376 isAddressTaken = true;
9377 break;
9378 }
9379
9380 if (!isAddressTaken)
9381 return false;
9382 }
9383
9384 return true;
9385}
9386
9387
9388// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
9389// operator and they all are only used by the PHI, PHI together their
9390// inputs, and do the operation once, to the result of the PHI.
9391Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
9392 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
9393
9394 // Scan the instruction, looking for input operations that can be folded away.
9395 // If all input operands to the phi are the same instruction (e.g. a cast from
9396 // the same type or "+42") we can pull the operation through the PHI, reducing
9397 // code size and simplifying code.
9398 Constant *ConstantOp = 0;
9399 const Type *CastSrcTy = 0;
9400 bool isVolatile = false;
9401 if (isa<CastInst>(FirstInst)) {
9402 CastSrcTy = FirstInst->getOperand(0)->getType();
9403 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
9404 // Can fold binop, compare or shift here if the RHS is a constant,
9405 // otherwise call FoldPHIArgBinOpIntoPHI.
9406 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
9407 if (ConstantOp == 0)
9408 return FoldPHIArgBinOpIntoPHI(PN);
9409 } else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst)) {
9410 isVolatile = LI->isVolatile();
9411 // We can't sink the load if the loaded value could be modified between the
9412 // load and the PHI.
9413 if (LI->getParent() != PN.getIncomingBlock(0) ||
9414 !isSafeToSinkLoad(LI))
9415 return 0;
9416 } else if (isa<GetElementPtrInst>(FirstInst)) {
9417 if (FirstInst->getNumOperands() == 2)
9418 return FoldPHIArgBinOpIntoPHI(PN);
9419 // Can't handle general GEPs yet.
9420 return 0;
9421 } else {
9422 return 0; // Cannot fold this operation.
9423 }
9424
9425 // Check to see if all arguments are the same operation.
9426 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
9427 if (!isa<Instruction>(PN.getIncomingValue(i))) return 0;
9428 Instruction *I = cast<Instruction>(PN.getIncomingValue(i));
9429 if (!I->hasOneUse() || !I->isSameOperationAs(FirstInst))
9430 return 0;
9431 if (CastSrcTy) {
9432 if (I->getOperand(0)->getType() != CastSrcTy)
9433 return 0; // Cast operation must match.
9434 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
9435 // We can't sink the load if the loaded value could be modified between
9436 // the load and the PHI.
9437 if (LI->isVolatile() != isVolatile ||
9438 LI->getParent() != PN.getIncomingBlock(i) ||
9439 !isSafeToSinkLoad(LI))
9440 return 0;
Chris Lattnerf7867012008-04-29 17:28:22 +00009441
9442 // If the PHI is volatile and its block has multiple successors, sinking
9443 // it would remove a load of the volatile value from the path through the
9444 // other successor.
9445 if (isVolatile &&
9446 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
9447 return 0;
9448
9449
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009450 } else if (I->getOperand(1) != ConstantOp) {
9451 return 0;
9452 }
9453 }
9454
9455 // Okay, they are all the same operation. Create a new PHI node of the
9456 // correct type, and PHI together all of the LHS's of the instructions.
Gabor Greifd6da1d02008-04-06 20:25:17 +00009457 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
9458 PN.getName()+".in");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009459 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
9460
9461 Value *InVal = FirstInst->getOperand(0);
9462 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
9463
9464 // Add all operands to the new PHI.
9465 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
9466 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
9467 if (NewInVal != InVal)
9468 InVal = 0;
9469 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
9470 }
9471
9472 Value *PhiVal;
9473 if (InVal) {
9474 // The new PHI unions all of the same values together. This is really
9475 // common, so we handle it intelligently here for compile-time speed.
9476 PhiVal = InVal;
9477 delete NewPN;
9478 } else {
9479 InsertNewInstBefore(NewPN, PN);
9480 PhiVal = NewPN;
9481 }
9482
9483 // Insert and return the new operation.
9484 if (CastInst* FirstCI = dyn_cast<CastInst>(FirstInst))
9485 return CastInst::create(FirstCI->getOpcode(), PhiVal, PN.getType());
Chris Lattnerfc984e92008-04-29 17:13:43 +00009486 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009487 return BinaryOperator::create(BinOp->getOpcode(), PhiVal, ConstantOp);
Chris Lattnerfc984e92008-04-29 17:13:43 +00009488 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009489 return CmpInst::create(CIOp->getOpcode(), CIOp->getPredicate(),
9490 PhiVal, ConstantOp);
Chris Lattnerfc984e92008-04-29 17:13:43 +00009491 assert(isa<LoadInst>(FirstInst) && "Unknown operation");
9492
9493 // If this was a volatile load that we are merging, make sure to loop through
9494 // and mark all the input loads as non-volatile. If we don't do this, we will
9495 // insert a new volatile load and the old ones will not be deletable.
9496 if (isVolatile)
9497 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
9498 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
9499
9500 return new LoadInst(PhiVal, "", isVolatile);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009501}
9502
9503/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
9504/// that is dead.
9505static bool DeadPHICycle(PHINode *PN,
9506 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
9507 if (PN->use_empty()) return true;
9508 if (!PN->hasOneUse()) return false;
9509
9510 // Remember this node, and if we find the cycle, return.
9511 if (!PotentiallyDeadPHIs.insert(PN))
9512 return true;
Chris Lattneradf2e342007-08-28 04:23:55 +00009513
9514 // Don't scan crazily complex things.
9515 if (PotentiallyDeadPHIs.size() == 16)
9516 return false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009517
9518 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
9519 return DeadPHICycle(PU, PotentiallyDeadPHIs);
9520
9521 return false;
9522}
9523
Chris Lattner27b695d2007-11-06 21:52:06 +00009524/// PHIsEqualValue - Return true if this phi node is always equal to
9525/// NonPhiInVal. This happens with mutually cyclic phi nodes like:
9526/// z = some value; x = phi (y, z); y = phi (x, z)
9527static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
9528 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
9529 // See if we already saw this PHI node.
9530 if (!ValueEqualPHIs.insert(PN))
9531 return true;
9532
9533 // Don't scan crazily complex things.
9534 if (ValueEqualPHIs.size() == 16)
9535 return false;
9536
9537 // Scan the operands to see if they are either phi nodes or are equal to
9538 // the value.
9539 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
9540 Value *Op = PN->getIncomingValue(i);
9541 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
9542 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
9543 return false;
9544 } else if (Op != NonPhiInVal)
9545 return false;
9546 }
9547
9548 return true;
9549}
9550
9551
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009552// PHINode simplification
9553//
9554Instruction *InstCombiner::visitPHINode(PHINode &PN) {
9555 // If LCSSA is around, don't mess with Phi nodes
9556 if (MustPreserveLCSSA) return 0;
9557
9558 if (Value *V = PN.hasConstantValue())
9559 return ReplaceInstUsesWith(PN, V);
9560
9561 // If all PHI operands are the same operation, pull them through the PHI,
9562 // reducing code size.
9563 if (isa<Instruction>(PN.getIncomingValue(0)) &&
9564 PN.getIncomingValue(0)->hasOneUse())
9565 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
9566 return Result;
9567
9568 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
9569 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
9570 // PHI)... break the cycle.
9571 if (PN.hasOneUse()) {
9572 Instruction *PHIUser = cast<Instruction>(PN.use_back());
9573 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
9574 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
9575 PotentiallyDeadPHIs.insert(&PN);
9576 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
9577 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
9578 }
9579
9580 // If this phi has a single use, and if that use just computes a value for
9581 // the next iteration of a loop, delete the phi. This occurs with unused
9582 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
9583 // common case here is good because the only other things that catch this
9584 // are induction variable analysis (sometimes) and ADCE, which is only run
9585 // late.
9586 if (PHIUser->hasOneUse() &&
9587 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
9588 PHIUser->use_back() == &PN) {
9589 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
9590 }
9591 }
9592
Chris Lattner27b695d2007-11-06 21:52:06 +00009593 // We sometimes end up with phi cycles that non-obviously end up being the
9594 // same value, for example:
9595 // z = some value; x = phi (y, z); y = phi (x, z)
9596 // where the phi nodes don't necessarily need to be in the same block. Do a
9597 // quick check to see if the PHI node only contains a single non-phi value, if
9598 // so, scan to see if the phi cycle is actually equal to that value.
9599 {
9600 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
9601 // Scan for the first non-phi operand.
9602 while (InValNo != NumOperandVals &&
9603 isa<PHINode>(PN.getIncomingValue(InValNo)))
9604 ++InValNo;
9605
9606 if (InValNo != NumOperandVals) {
9607 Value *NonPhiInVal = PN.getOperand(InValNo);
9608
9609 // Scan the rest of the operands to see if there are any conflicts, if so
9610 // there is no need to recursively scan other phis.
9611 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
9612 Value *OpVal = PN.getIncomingValue(InValNo);
9613 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
9614 break;
9615 }
9616
9617 // If we scanned over all operands, then we have one unique value plus
9618 // phi values. Scan PHI nodes to see if they all merge in each other or
9619 // the value.
9620 if (InValNo == NumOperandVals) {
9621 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
9622 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
9623 return ReplaceInstUsesWith(PN, NonPhiInVal);
9624 }
9625 }
9626 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009627 return 0;
9628}
9629
9630static Value *InsertCastToIntPtrTy(Value *V, const Type *DTy,
9631 Instruction *InsertPoint,
9632 InstCombiner *IC) {
9633 unsigned PtrSize = DTy->getPrimitiveSizeInBits();
9634 unsigned VTySize = V->getType()->getPrimitiveSizeInBits();
9635 // We must cast correctly to the pointer type. Ensure that we
9636 // sign extend the integer value if it is smaller as this is
9637 // used for address computation.
9638 Instruction::CastOps opcode =
9639 (VTySize < PtrSize ? Instruction::SExt :
9640 (VTySize == PtrSize ? Instruction::BitCast : Instruction::Trunc));
9641 return IC->InsertCastBefore(opcode, V, DTy, *InsertPoint);
9642}
9643
9644
9645Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
9646 Value *PtrOp = GEP.getOperand(0);
9647 // Is it 'getelementptr %P, i32 0' or 'getelementptr %P'
9648 // If so, eliminate the noop.
9649 if (GEP.getNumOperands() == 1)
9650 return ReplaceInstUsesWith(GEP, PtrOp);
9651
9652 if (isa<UndefValue>(GEP.getOperand(0)))
9653 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
9654
9655 bool HasZeroPointerIndex = false;
9656 if (Constant *C = dyn_cast<Constant>(GEP.getOperand(1)))
9657 HasZeroPointerIndex = C->isNullValue();
9658
9659 if (GEP.getNumOperands() == 2 && HasZeroPointerIndex)
9660 return ReplaceInstUsesWith(GEP, PtrOp);
9661
9662 // Eliminate unneeded casts for indices.
9663 bool MadeChange = false;
9664
9665 gep_type_iterator GTI = gep_type_begin(GEP);
9666 for (unsigned i = 1, e = GEP.getNumOperands(); i != e; ++i, ++GTI) {
9667 if (isa<SequentialType>(*GTI)) {
9668 if (CastInst *CI = dyn_cast<CastInst>(GEP.getOperand(i))) {
9669 if (CI->getOpcode() == Instruction::ZExt ||
9670 CI->getOpcode() == Instruction::SExt) {
9671 const Type *SrcTy = CI->getOperand(0)->getType();
9672 // We can eliminate a cast from i32 to i64 iff the target
9673 // is a 32-bit pointer target.
9674 if (SrcTy->getPrimitiveSizeInBits() >= TD->getPointerSizeInBits()) {
9675 MadeChange = true;
9676 GEP.setOperand(i, CI->getOperand(0));
9677 }
9678 }
9679 }
9680 // If we are using a wider index than needed for this platform, shrink it
9681 // to what we need. If the incoming value needs a cast instruction,
9682 // insert it. This explicit cast can make subsequent optimizations more
9683 // obvious.
9684 Value *Op = GEP.getOperand(i);
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009685 if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009686 if (Constant *C = dyn_cast<Constant>(Op)) {
9687 GEP.setOperand(i, ConstantExpr::getTrunc(C, TD->getIntPtrType()));
9688 MadeChange = true;
9689 } else {
9690 Op = InsertCastBefore(Instruction::Trunc, Op, TD->getIntPtrType(),
9691 GEP);
9692 GEP.setOperand(i, Op);
9693 MadeChange = true;
9694 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009695 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009696 }
9697 }
9698 if (MadeChange) return &GEP;
9699
9700 // If this GEP instruction doesn't move the pointer, and if the input operand
9701 // is a bitcast of another pointer, just replace the GEP with a bitcast of the
9702 // real input to the dest type.
Chris Lattnerc59171a2007-10-12 05:30:59 +00009703 if (GEP.hasAllZeroIndices()) {
9704 if (BitCastInst *BCI = dyn_cast<BitCastInst>(GEP.getOperand(0))) {
9705 // If the bitcast is of an allocation, and the allocation will be
9706 // converted to match the type of the cast, don't touch this.
9707 if (isa<AllocationInst>(BCI->getOperand(0))) {
9708 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
Chris Lattner551a5872007-10-12 18:05:47 +00009709 if (Instruction *I = visitBitCast(*BCI)) {
9710 if (I != BCI) {
9711 I->takeName(BCI);
9712 BCI->getParent()->getInstList().insert(BCI, I);
9713 ReplaceInstUsesWith(*BCI, I);
9714 }
Chris Lattnerc59171a2007-10-12 05:30:59 +00009715 return &GEP;
Chris Lattner551a5872007-10-12 18:05:47 +00009716 }
Chris Lattnerc59171a2007-10-12 05:30:59 +00009717 }
9718 return new BitCastInst(BCI->getOperand(0), GEP.getType());
9719 }
9720 }
9721
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009722 // Combine Indices - If the source pointer to this getelementptr instruction
9723 // is a getelementptr instruction, combine the indices of the two
9724 // getelementptr instructions into a single instruction.
9725 //
9726 SmallVector<Value*, 8> SrcGEPOperands;
9727 if (User *Src = dyn_castGetElementPtr(PtrOp))
9728 SrcGEPOperands.append(Src->op_begin(), Src->op_end());
9729
9730 if (!SrcGEPOperands.empty()) {
9731 // Note that if our source is a gep chain itself that we wait for that
9732 // chain to be resolved before we perform this transformation. This
9733 // avoids us creating a TON of code in some cases.
9734 //
9735 if (isa<GetElementPtrInst>(SrcGEPOperands[0]) &&
9736 cast<Instruction>(SrcGEPOperands[0])->getNumOperands() == 2)
9737 return 0; // Wait until our source is folded to completion.
9738
9739 SmallVector<Value*, 8> Indices;
9740
9741 // Find out whether the last index in the source GEP is a sequential idx.
9742 bool EndsWithSequential = false;
9743 for (gep_type_iterator I = gep_type_begin(*cast<User>(PtrOp)),
9744 E = gep_type_end(*cast<User>(PtrOp)); I != E; ++I)
9745 EndsWithSequential = !isa<StructType>(*I);
9746
9747 // Can we combine the two pointer arithmetics offsets?
9748 if (EndsWithSequential) {
9749 // Replace: gep (gep %P, long B), long A, ...
9750 // With: T = long A+B; gep %P, T, ...
9751 //
9752 Value *Sum, *SO1 = SrcGEPOperands.back(), *GO1 = GEP.getOperand(1);
9753 if (SO1 == Constant::getNullValue(SO1->getType())) {
9754 Sum = GO1;
9755 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
9756 Sum = SO1;
9757 } else {
9758 // If they aren't the same type, convert both to an integer of the
9759 // target's pointer size.
9760 if (SO1->getType() != GO1->getType()) {
9761 if (Constant *SO1C = dyn_cast<Constant>(SO1)) {
9762 SO1 = ConstantExpr::getIntegerCast(SO1C, GO1->getType(), true);
9763 } else if (Constant *GO1C = dyn_cast<Constant>(GO1)) {
9764 GO1 = ConstantExpr::getIntegerCast(GO1C, SO1->getType(), true);
9765 } else {
Duncan Sandsf99fdc62007-11-01 20:53:16 +00009766 unsigned PS = TD->getPointerSizeInBits();
9767 if (TD->getTypeSizeInBits(SO1->getType()) == PS) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009768 // Convert GO1 to SO1's type.
9769 GO1 = InsertCastToIntPtrTy(GO1, SO1->getType(), &GEP, this);
9770
Duncan Sandsf99fdc62007-11-01 20:53:16 +00009771 } else if (TD->getTypeSizeInBits(GO1->getType()) == PS) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009772 // Convert SO1 to GO1's type.
9773 SO1 = InsertCastToIntPtrTy(SO1, GO1->getType(), &GEP, this);
9774 } else {
9775 const Type *PT = TD->getIntPtrType();
9776 SO1 = InsertCastToIntPtrTy(SO1, PT, &GEP, this);
9777 GO1 = InsertCastToIntPtrTy(GO1, PT, &GEP, this);
9778 }
9779 }
9780 }
9781 if (isa<Constant>(SO1) && isa<Constant>(GO1))
9782 Sum = ConstantExpr::getAdd(cast<Constant>(SO1), cast<Constant>(GO1));
9783 else {
9784 Sum = BinaryOperator::createAdd(SO1, GO1, PtrOp->getName()+".sum");
9785 InsertNewInstBefore(cast<Instruction>(Sum), GEP);
9786 }
9787 }
9788
9789 // Recycle the GEP we already have if possible.
9790 if (SrcGEPOperands.size() == 2) {
9791 GEP.setOperand(0, SrcGEPOperands[0]);
9792 GEP.setOperand(1, Sum);
9793 return &GEP;
9794 } else {
9795 Indices.insert(Indices.end(), SrcGEPOperands.begin()+1,
9796 SrcGEPOperands.end()-1);
9797 Indices.push_back(Sum);
9798 Indices.insert(Indices.end(), GEP.op_begin()+2, GEP.op_end());
9799 }
9800 } else if (isa<Constant>(*GEP.idx_begin()) &&
9801 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
9802 SrcGEPOperands.size() != 1) {
9803 // Otherwise we can do the fold if the first index of the GEP is a zero
9804 Indices.insert(Indices.end(), SrcGEPOperands.begin()+1,
9805 SrcGEPOperands.end());
9806 Indices.insert(Indices.end(), GEP.idx_begin()+1, GEP.idx_end());
9807 }
9808
9809 if (!Indices.empty())
Gabor Greifd6da1d02008-04-06 20:25:17 +00009810 return GetElementPtrInst::Create(SrcGEPOperands[0], Indices.begin(),
9811 Indices.end(), GEP.getName());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009812
9813 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(PtrOp)) {
9814 // GEP of global variable. If all of the indices for this GEP are
9815 // constants, we can promote this to a constexpr instead of an instruction.
9816
9817 // Scan for nonconstants...
9818 SmallVector<Constant*, 8> Indices;
9819 User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end();
9820 for (; I != E && isa<Constant>(*I); ++I)
9821 Indices.push_back(cast<Constant>(*I));
9822
9823 if (I == E) { // If they are all constants...
9824 Constant *CE = ConstantExpr::getGetElementPtr(GV,
9825 &Indices[0],Indices.size());
9826
9827 // Replace all uses of the GEP with the new constexpr...
9828 return ReplaceInstUsesWith(GEP, CE);
9829 }
9830 } else if (Value *X = getBitCastOperand(PtrOp)) { // Is the operand a cast?
9831 if (!isa<PointerType>(X->getType())) {
9832 // Not interesting. Source pointer must be a cast from pointer.
9833 } else if (HasZeroPointerIndex) {
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009834 // transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
9835 // into : GEP [10 x i8]* X, i32 0, ...
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009836 //
9837 // This occurs when the program declares an array extern like "int X[];"
9838 //
9839 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
9840 const PointerType *XTy = cast<PointerType>(X->getType());
9841 if (const ArrayType *XATy =
9842 dyn_cast<ArrayType>(XTy->getElementType()))
9843 if (const ArrayType *CATy =
9844 dyn_cast<ArrayType>(CPTy->getElementType()))
9845 if (CATy->getElementType() == XATy->getElementType()) {
9846 // At this point, we know that the cast source type is a pointer
9847 // to an array of the same type as the destination pointer
9848 // array. Because the array type is never stepped over (there
9849 // is a leading zero) we can fold the cast into this GEP.
9850 GEP.setOperand(0, X);
9851 return &GEP;
9852 }
9853 } else if (GEP.getNumOperands() == 2) {
9854 // Transform things like:
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009855 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
9856 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009857 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
9858 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
9859 if (isa<ArrayType>(SrcElTy) &&
Duncan Sandsf99fdc62007-11-01 20:53:16 +00009860 TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
9861 TD->getABITypeSize(ResElTy)) {
David Greene393be882007-09-04 15:46:09 +00009862 Value *Idx[2];
9863 Idx[0] = Constant::getNullValue(Type::Int32Ty);
9864 Idx[1] = GEP.getOperand(1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009865 Value *V = InsertNewInstBefore(
Gabor Greifd6da1d02008-04-06 20:25:17 +00009866 GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName()), GEP);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009867 // V and GEP are both pointer types --> BitCast
9868 return new BitCastInst(V, GEP.getType());
9869 }
9870
9871 // Transform things like:
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009872 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009873 // (where tmp = 8*tmp2) into:
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009874 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009875
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009876 if (isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009877 uint64_t ArrayEltSize =
Duncan Sandsf99fdc62007-11-01 20:53:16 +00009878 TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009879
9880 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
9881 // allow either a mul, shift, or constant here.
9882 Value *NewIdx = 0;
9883 ConstantInt *Scale = 0;
9884 if (ArrayEltSize == 1) {
9885 NewIdx = GEP.getOperand(1);
9886 Scale = ConstantInt::get(NewIdx->getType(), 1);
9887 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
9888 NewIdx = ConstantInt::get(CI->getType(), 1);
9889 Scale = CI;
9890 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
9891 if (Inst->getOpcode() == Instruction::Shl &&
9892 isa<ConstantInt>(Inst->getOperand(1))) {
9893 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
9894 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
9895 Scale = ConstantInt::get(Inst->getType(), 1ULL << ShAmtVal);
9896 NewIdx = Inst->getOperand(0);
9897 } else if (Inst->getOpcode() == Instruction::Mul &&
9898 isa<ConstantInt>(Inst->getOperand(1))) {
9899 Scale = cast<ConstantInt>(Inst->getOperand(1));
9900 NewIdx = Inst->getOperand(0);
9901 }
9902 }
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009903
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009904 // If the index will be to exactly the right offset with the scale taken
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009905 // out, perform the transformation. Note, we don't know whether Scale is
9906 // signed or not. We'll use unsigned version of division/modulo
9907 // operation after making sure Scale doesn't have the sign bit set.
9908 if (Scale && Scale->getSExtValue() >= 0LL &&
9909 Scale->getZExtValue() % ArrayEltSize == 0) {
9910 Scale = ConstantInt::get(Scale->getType(),
9911 Scale->getZExtValue() / ArrayEltSize);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009912 if (Scale->getZExtValue() != 1) {
9913 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
Wojciech Matyjewicz5b5ab532007-12-12 15:21:32 +00009914 false /*ZExt*/);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009915 Instruction *Sc = BinaryOperator::createMul(NewIdx, C, "idxscale");
9916 NewIdx = InsertNewInstBefore(Sc, GEP);
9917 }
9918
9919 // Insert the new GEP instruction.
David Greene393be882007-09-04 15:46:09 +00009920 Value *Idx[2];
9921 Idx[0] = Constant::getNullValue(Type::Int32Ty);
9922 Idx[1] = NewIdx;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009923 Instruction *NewGEP =
Gabor Greifd6da1d02008-04-06 20:25:17 +00009924 GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009925 NewGEP = InsertNewInstBefore(NewGEP, GEP);
9926 // The NewGEP must be pointer typed, so must the old one -> BitCast
9927 return new BitCastInst(NewGEP, GEP.getType());
9928 }
9929 }
9930 }
9931 }
9932
9933 return 0;
9934}
9935
9936Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
9937 // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009938 if (AI.isArrayAllocation()) { // Check C != 1
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009939 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
9940 const Type *NewTy =
9941 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
9942 AllocationInst *New = 0;
9943
9944 // Create and insert the replacement instruction...
9945 if (isa<MallocInst>(AI))
9946 New = new MallocInst(NewTy, 0, AI.getAlignment(), AI.getName());
9947 else {
9948 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
9949 New = new AllocaInst(NewTy, 0, AI.getAlignment(), AI.getName());
9950 }
9951
9952 InsertNewInstBefore(New, AI);
9953
9954 // Scan to the end of the allocation instructions, to skip over a block of
9955 // allocas if possible...
9956 //
9957 BasicBlock::iterator It = New;
9958 while (isa<AllocationInst>(*It)) ++It;
9959
9960 // Now that I is pointing to the first non-allocation-inst in the block,
9961 // insert our getelementptr instruction...
9962 //
9963 Value *NullIdx = Constant::getNullValue(Type::Int32Ty);
David Greene393be882007-09-04 15:46:09 +00009964 Value *Idx[2];
9965 Idx[0] = NullIdx;
9966 Idx[1] = NullIdx;
Gabor Greifd6da1d02008-04-06 20:25:17 +00009967 Value *V = GetElementPtrInst::Create(New, Idx, Idx + 2,
9968 New->getName()+".sub", It);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009969
9970 // Now make everything use the getelementptr instead of the original
9971 // allocation.
9972 return ReplaceInstUsesWith(AI, V);
9973 } else if (isa<UndefValue>(AI.getArraySize())) {
9974 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
9975 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +00009976 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009977
9978 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
9979 // Note that we only do this for alloca's, because malloc should allocate and
9980 // return a unique pointer, even for a zero byte allocation.
9981 if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() &&
Duncan Sandsf99fdc62007-11-01 20:53:16 +00009982 TD->getABITypeSize(AI.getAllocatedType()) == 0)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009983 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
9984
9985 return 0;
9986}
9987
9988Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
9989 Value *Op = FI.getOperand(0);
9990
9991 // free undef -> unreachable.
9992 if (isa<UndefValue>(Op)) {
9993 // Insert a new store to null because we cannot modify the CFG here.
9994 new StoreInst(ConstantInt::getTrue(),
Christopher Lambbb2f2222007-12-17 01:12:55 +00009995 UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), &FI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00009996 return EraseInstFromFunction(FI);
9997 }
9998
9999 // If we have 'free null' delete the instruction. This can happen in stl code
10000 // when lots of inlining happens.
10001 if (isa<ConstantPointerNull>(Op))
10002 return EraseInstFromFunction(FI);
10003
10004 // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
10005 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op)) {
10006 FI.setOperand(0, CI->getOperand(0));
10007 return &FI;
10008 }
10009
10010 // Change free (gep X, 0,0,0,0) into free(X)
10011 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
10012 if (GEPI->hasAllZeroIndices()) {
10013 AddToWorkList(GEPI);
10014 FI.setOperand(0, GEPI->getOperand(0));
10015 return &FI;
10016 }
10017 }
10018
10019 // Change free(malloc) into nothing, if the malloc has a single use.
10020 if (MallocInst *MI = dyn_cast<MallocInst>(Op))
10021 if (MI->hasOneUse()) {
10022 EraseInstFromFunction(FI);
10023 return EraseInstFromFunction(*MI);
10024 }
10025
10026 return 0;
10027}
10028
10029
10030/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
Devang Patela0f8ea82007-10-18 19:52:32 +000010031static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
Bill Wendling44a36ea2008-02-26 10:53:30 +000010032 const TargetData *TD) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010033 User *CI = cast<User>(LI.getOperand(0));
10034 Value *CastOp = CI->getOperand(0);
10035
Devang Patela0f8ea82007-10-18 19:52:32 +000010036 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(CI)) {
10037 // Instead of loading constant c string, use corresponding integer value
10038 // directly if string length is small enough.
10039 const std::string &Str = CE->getOperand(0)->getStringValue();
10040 if (!Str.empty()) {
10041 unsigned len = Str.length();
10042 const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
10043 unsigned numBits = Ty->getPrimitiveSizeInBits();
10044 // Replace LI with immediate integer store.
10045 if ((numBits >> 3) == len + 1) {
Bill Wendling44a36ea2008-02-26 10:53:30 +000010046 APInt StrVal(numBits, 0);
10047 APInt SingleChar(numBits, 0);
10048 if (TD->isLittleEndian()) {
10049 for (signed i = len-1; i >= 0; i--) {
10050 SingleChar = (uint64_t) Str[i];
10051 StrVal = (StrVal << 8) | SingleChar;
10052 }
10053 } else {
10054 for (unsigned i = 0; i < len; i++) {
10055 SingleChar = (uint64_t) Str[i];
10056 StrVal = (StrVal << 8) | SingleChar;
10057 }
10058 // Append NULL at the end.
10059 SingleChar = 0;
10060 StrVal = (StrVal << 8) | SingleChar;
10061 }
10062 Value *NL = ConstantInt::get(StrVal);
10063 return IC.ReplaceInstUsesWith(LI, NL);
Devang Patela0f8ea82007-10-18 19:52:32 +000010064 }
10065 }
10066 }
10067
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010068 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
10069 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
10070 const Type *SrcPTy = SrcTy->getElementType();
10071
10072 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
10073 isa<VectorType>(DestPTy)) {
10074 // If the source is an array, the code below will not succeed. Check to
10075 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
10076 // constants.
10077 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
10078 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
10079 if (ASrcTy->getNumElements() != 0) {
10080 Value *Idxs[2];
10081 Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty);
10082 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
10083 SrcTy = cast<PointerType>(CastOp->getType());
10084 SrcPTy = SrcTy->getElementType();
10085 }
10086
10087 if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
10088 isa<VectorType>(SrcPTy)) &&
10089 // Do not allow turning this into a load of an integer, which is then
10090 // casted to a pointer, this pessimizes pointer analysis a lot.
10091 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
10092 IC.getTargetData().getTypeSizeInBits(SrcPTy) ==
10093 IC.getTargetData().getTypeSizeInBits(DestPTy)) {
10094
10095 // Okay, we are casting from one integer or pointer type to another of
10096 // the same size. Instead of casting the pointer before the load, cast
10097 // the result of the loaded value.
10098 Value *NewLoad = IC.InsertNewInstBefore(new LoadInst(CastOp,
10099 CI->getName(),
10100 LI.isVolatile()),LI);
10101 // Now cast the result of the load.
10102 return new BitCastInst(NewLoad, LI.getType());
10103 }
10104 }
10105 }
10106 return 0;
10107}
10108
10109/// isSafeToLoadUnconditionally - Return true if we know that executing a load
10110/// from this value cannot trap. If it is not obviously safe to load from the
10111/// specified pointer, we do a quick local scan of the basic block containing
10112/// ScanFrom, to determine if the address is already accessed.
10113static bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
Duncan Sands9b27dbe2007-09-19 10:10:31 +000010114 // If it is an alloca it is always safe to load from.
10115 if (isa<AllocaInst>(V)) return true;
10116
Duncan Sandse40a94a2007-09-19 10:25:38 +000010117 // If it is a global variable it is mostly safe to load from.
Duncan Sands9b27dbe2007-09-19 10:10:31 +000010118 if (const GlobalValue *GV = dyn_cast<GlobalVariable>(V))
Duncan Sandse40a94a2007-09-19 10:25:38 +000010119 // Don't try to evaluate aliases. External weak GV can be null.
Duncan Sands9b27dbe2007-09-19 10:10:31 +000010120 return !isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage();
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010121
10122 // Otherwise, be a little bit agressive by scanning the local block where we
10123 // want to check to see if the pointer is already being loaded or stored
10124 // from/to. If so, the previous load or store would have already trapped,
10125 // so there is no harm doing an extra load (also, CSE will later eliminate
10126 // the load entirely).
10127 BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
10128
10129 while (BBI != E) {
10130 --BBI;
10131
10132 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
10133 if (LI->getOperand(0) == V) return true;
10134 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
10135 if (SI->getOperand(1) == V) return true;
10136
10137 }
10138 return false;
10139}
10140
Chris Lattner0270a112007-08-11 18:48:48 +000010141/// GetUnderlyingObject - Trace through a series of getelementptrs and bitcasts
10142/// until we find the underlying object a pointer is referring to or something
10143/// we don't understand. Note that the returned pointer may be offset from the
10144/// input, because we ignore GEP indices.
10145static Value *GetUnderlyingObject(Value *Ptr) {
10146 while (1) {
10147 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
10148 if (CE->getOpcode() == Instruction::BitCast ||
10149 CE->getOpcode() == Instruction::GetElementPtr)
10150 Ptr = CE->getOperand(0);
10151 else
10152 return Ptr;
10153 } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr)) {
10154 Ptr = BCI->getOperand(0);
10155 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
10156 Ptr = GEP->getOperand(0);
10157 } else {
10158 return Ptr;
10159 }
10160 }
10161}
10162
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010163Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
10164 Value *Op = LI.getOperand(0);
10165
Dan Gohman5c4d0e12007-07-20 16:34:21 +000010166 // Attempt to improve the alignment.
Dan Gohman2d648bb2008-04-10 18:43:06 +000010167 unsigned KnownAlign = GetOrEnforceKnownAlignment(Op);
10168 if (KnownAlign >
10169 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
10170 LI.getAlignment()))
Dan Gohman5c4d0e12007-07-20 16:34:21 +000010171 LI.setAlignment(KnownAlign);
10172
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010173 // load (cast X) --> cast (load X) iff safe
10174 if (isa<CastInst>(Op))
Devang Patela0f8ea82007-10-18 19:52:32 +000010175 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010176 return Res;
10177
10178 // None of the following transforms are legal for volatile loads.
10179 if (LI.isVolatile()) return 0;
10180
10181 if (&LI.getParent()->front() != &LI) {
10182 BasicBlock::iterator BBI = &LI; --BBI;
10183 // If the instruction immediately before this is a store to the same
10184 // address, do a simple form of store->load forwarding.
10185 if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
10186 if (SI->getOperand(1) == LI.getOperand(0))
10187 return ReplaceInstUsesWith(LI, SI->getOperand(0));
10188 if (LoadInst *LIB = dyn_cast<LoadInst>(BBI))
10189 if (LIB->getOperand(0) == LI.getOperand(0))
10190 return ReplaceInstUsesWith(LI, LIB);
10191 }
10192
Christopher Lamb2c175392007-12-29 07:56:53 +000010193 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
10194 const Value *GEPI0 = GEPI->getOperand(0);
10195 // TODO: Consider a target hook for valid address spaces for this xform.
10196 if (isa<ConstantPointerNull>(GEPI0) &&
10197 cast<PointerType>(GEPI0->getType())->getAddressSpace() == 0) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010198 // Insert a new store to null instruction before the load to indicate
10199 // that this code is not reachable. We do this instead of inserting
10200 // an unreachable instruction directly because we cannot modify the
10201 // CFG.
10202 new StoreInst(UndefValue::get(LI.getType()),
10203 Constant::getNullValue(Op->getType()), &LI);
10204 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
10205 }
Christopher Lamb2c175392007-12-29 07:56:53 +000010206 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010207
10208 if (Constant *C = dyn_cast<Constant>(Op)) {
10209 // load null/undef -> undef
Christopher Lamb2c175392007-12-29 07:56:53 +000010210 // TODO: Consider a target hook for valid address spaces for this xform.
10211 if (isa<UndefValue>(C) || (C->isNullValue() &&
10212 cast<PointerType>(Op->getType())->getAddressSpace() == 0)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010213 // Insert a new store to null instruction before the load to indicate that
10214 // this code is not reachable. We do this instead of inserting an
10215 // unreachable instruction directly because we cannot modify the CFG.
10216 new StoreInst(UndefValue::get(LI.getType()),
10217 Constant::getNullValue(Op->getType()), &LI);
10218 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
10219 }
10220
10221 // Instcombine load (constant global) into the value loaded.
10222 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op))
10223 if (GV->isConstant() && !GV->isDeclaration())
10224 return ReplaceInstUsesWith(LI, GV->getInitializer());
10225
10226 // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +000010227 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010228 if (CE->getOpcode() == Instruction::GetElementPtr) {
10229 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
10230 if (GV->isConstant() && !GV->isDeclaration())
10231 if (Constant *V =
10232 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
10233 return ReplaceInstUsesWith(LI, V);
10234 if (CE->getOperand(0)->isNullValue()) {
10235 // Insert a new store to null instruction before the load to indicate
10236 // that this code is not reachable. We do this instead of inserting
10237 // an unreachable instruction directly because we cannot modify the
10238 // CFG.
10239 new StoreInst(UndefValue::get(LI.getType()),
10240 Constant::getNullValue(Op->getType()), &LI);
10241 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
10242 }
10243
10244 } else if (CE->isCast()) {
Devang Patela0f8ea82007-10-18 19:52:32 +000010245 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010246 return Res;
10247 }
Anton Korobeynikov8522e1c2008-02-20 11:26:25 +000010248 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010249 }
Chris Lattner0270a112007-08-11 18:48:48 +000010250
10251 // If this load comes from anywhere in a constant global, and if the global
10252 // is all undef or zero, we know what it loads.
10253 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Op))) {
10254 if (GV->isConstant() && GV->hasInitializer()) {
10255 if (GV->getInitializer()->isNullValue())
10256 return ReplaceInstUsesWith(LI, Constant::getNullValue(LI.getType()));
10257 else if (isa<UndefValue>(GV->getInitializer()))
10258 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
10259 }
10260 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010261
10262 if (Op->hasOneUse()) {
10263 // Change select and PHI nodes to select values instead of addresses: this
10264 // helps alias analysis out a lot, allows many others simplifications, and
10265 // exposes redundancy in the code.
10266 //
10267 // Note that we cannot do the transformation unless we know that the
10268 // introduced loads cannot trap! Something like this is valid as long as
10269 // the condition is always false: load (select bool %C, int* null, int* %G),
10270 // but it would not be valid if we transformed it to load from null
10271 // unconditionally.
10272 //
10273 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
10274 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
10275 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
10276 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
10277 Value *V1 = InsertNewInstBefore(new LoadInst(SI->getOperand(1),
10278 SI->getOperand(1)->getName()+".val"), LI);
10279 Value *V2 = InsertNewInstBefore(new LoadInst(SI->getOperand(2),
10280 SI->getOperand(2)->getName()+".val"), LI);
Gabor Greifd6da1d02008-04-06 20:25:17 +000010281 return SelectInst::Create(SI->getCondition(), V1, V2);
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010282 }
10283
10284 // load (select (cond, null, P)) -> load P
10285 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
10286 if (C->isNullValue()) {
10287 LI.setOperand(0, SI->getOperand(2));
10288 return &LI;
10289 }
10290
10291 // load (select (cond, P, null)) -> load P
10292 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
10293 if (C->isNullValue()) {
10294 LI.setOperand(0, SI->getOperand(1));
10295 return &LI;
10296 }
10297 }
10298 }
10299 return 0;
10300}
10301
10302/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
10303/// when possible.
10304static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
10305 User *CI = cast<User>(SI.getOperand(1));
10306 Value *CastOp = CI->getOperand(0);
10307
10308 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
10309 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
10310 const Type *SrcPTy = SrcTy->getElementType();
10311
10312 if (DestPTy->isInteger() || isa<PointerType>(DestPTy)) {
10313 // If the source is an array, the code below will not succeed. Check to
10314 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
10315 // constants.
10316 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
10317 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
10318 if (ASrcTy->getNumElements() != 0) {
10319 Value* Idxs[2];
10320 Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty);
10321 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
10322 SrcTy = cast<PointerType>(CastOp->getType());
10323 SrcPTy = SrcTy->getElementType();
10324 }
10325
10326 if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy)) &&
10327 IC.getTargetData().getTypeSizeInBits(SrcPTy) ==
10328 IC.getTargetData().getTypeSizeInBits(DestPTy)) {
10329
10330 // Okay, we are casting from one integer or pointer type to another of
10331 // the same size. Instead of casting the pointer before
10332 // the store, cast the value to be stored.
10333 Value *NewCast;
10334 Value *SIOp0 = SI.getOperand(0);
10335 Instruction::CastOps opcode = Instruction::BitCast;
10336 const Type* CastSrcTy = SIOp0->getType();
10337 const Type* CastDstTy = SrcPTy;
10338 if (isa<PointerType>(CastDstTy)) {
10339 if (CastSrcTy->isInteger())
10340 opcode = Instruction::IntToPtr;
10341 } else if (isa<IntegerType>(CastDstTy)) {
10342 if (isa<PointerType>(SIOp0->getType()))
10343 opcode = Instruction::PtrToInt;
10344 }
10345 if (Constant *C = dyn_cast<Constant>(SIOp0))
10346 NewCast = ConstantExpr::getCast(opcode, C, CastDstTy);
10347 else
10348 NewCast = IC.InsertNewInstBefore(
10349 CastInst::create(opcode, SIOp0, CastDstTy, SIOp0->getName()+".c"),
10350 SI);
10351 return new StoreInst(NewCast, CastOp);
10352 }
10353 }
10354 }
10355 return 0;
10356}
10357
10358Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
10359 Value *Val = SI.getOperand(0);
10360 Value *Ptr = SI.getOperand(1);
10361
10362 if (isa<UndefValue>(Ptr)) { // store X, undef -> noop (even if volatile)
10363 EraseInstFromFunction(SI);
10364 ++NumCombined;
10365 return 0;
10366 }
10367
10368 // If the RHS is an alloca with a single use, zapify the store, making the
10369 // alloca dead.
Chris Lattnera02bacc2008-04-29 04:58:38 +000010370 if (Ptr->hasOneUse() && !SI.isVolatile()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010371 if (isa<AllocaInst>(Ptr)) {
10372 EraseInstFromFunction(SI);
10373 ++NumCombined;
10374 return 0;
10375 }
10376
10377 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
10378 if (isa<AllocaInst>(GEP->getOperand(0)) &&
10379 GEP->getOperand(0)->hasOneUse()) {
10380 EraseInstFromFunction(SI);
10381 ++NumCombined;
10382 return 0;
10383 }
10384 }
10385
Dan Gohman5c4d0e12007-07-20 16:34:21 +000010386 // Attempt to improve the alignment.
Dan Gohman2d648bb2008-04-10 18:43:06 +000010387 unsigned KnownAlign = GetOrEnforceKnownAlignment(Ptr);
10388 if (KnownAlign >
10389 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
10390 SI.getAlignment()))
Dan Gohman5c4d0e12007-07-20 16:34:21 +000010391 SI.setAlignment(KnownAlign);
10392
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010393 // Do really simple DSE, to catch cases where there are several consequtive
10394 // stores to the same location, separated by a few arithmetic operations. This
10395 // situation often occurs with bitfield accesses.
10396 BasicBlock::iterator BBI = &SI;
10397 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
10398 --ScanInsts) {
10399 --BBI;
10400
10401 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
10402 // Prev store isn't volatile, and stores to the same location?
10403 if (!PrevSI->isVolatile() && PrevSI->getOperand(1) == SI.getOperand(1)) {
10404 ++NumDeadStore;
10405 ++BBI;
10406 EraseInstFromFunction(*PrevSI);
10407 continue;
10408 }
10409 break;
10410 }
10411
10412 // If this is a load, we have to stop. However, if the loaded value is from
10413 // the pointer we're loading and is producing the pointer we're storing,
10414 // then *this* store is dead (X = load P; store X -> P).
10415 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
Chris Lattner24905f72007-09-07 05:33:03 +000010416 if (LI == Val && LI->getOperand(0) == Ptr && !SI.isVolatile()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010417 EraseInstFromFunction(SI);
10418 ++NumCombined;
10419 return 0;
10420 }
10421 // Otherwise, this is a load from some other location. Stores before it
10422 // may not be dead.
10423 break;
10424 }
10425
10426 // Don't skip over loads or things that can modify memory.
10427 if (BBI->mayWriteToMemory())
10428 break;
10429 }
10430
10431
10432 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
10433
10434 // store X, null -> turns into 'unreachable' in SimplifyCFG
10435 if (isa<ConstantPointerNull>(Ptr)) {
10436 if (!isa<UndefValue>(Val)) {
10437 SI.setOperand(0, UndefValue::get(Val->getType()));
10438 if (Instruction *U = dyn_cast<Instruction>(Val))
10439 AddToWorkList(U); // Dropped a use.
10440 ++NumCombined;
10441 }
10442 return 0; // Do not modify these!
10443 }
10444
10445 // store undef, Ptr -> noop
10446 if (isa<UndefValue>(Val)) {
10447 EraseInstFromFunction(SI);
10448 ++NumCombined;
10449 return 0;
10450 }
10451
10452 // If the pointer destination is a cast, see if we can fold the cast into the
10453 // source instead.
10454 if (isa<CastInst>(Ptr))
10455 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
10456 return Res;
10457 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
10458 if (CE->isCast())
10459 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
10460 return Res;
10461
10462
10463 // If this store is the last instruction in the basic block, and if the block
10464 // ends with an unconditional branch, try to move it to the successor block.
10465 BBI = &SI; ++BBI;
10466 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
10467 if (BI->isUnconditional())
10468 if (SimplifyStoreAtEndOfBlock(SI))
10469 return 0; // xform done!
10470
10471 return 0;
10472}
10473
10474/// SimplifyStoreAtEndOfBlock - Turn things like:
10475/// if () { *P = v1; } else { *P = v2 }
10476/// into a phi node with a store in the successor.
10477///
10478/// Simplify things like:
10479/// *P = v1; if () { *P = v2; }
10480/// into a phi node with a store in the successor.
10481///
10482bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
10483 BasicBlock *StoreBB = SI.getParent();
10484
10485 // Check to see if the successor block has exactly two incoming edges. If
10486 // so, see if the other predecessor contains a store to the same location.
10487 // if so, insert a PHI node (if needed) and move the stores down.
10488 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
10489
10490 // Determine whether Dest has exactly two predecessors and, if so, compute
10491 // the other predecessor.
10492 pred_iterator PI = pred_begin(DestBB);
10493 BasicBlock *OtherBB = 0;
10494 if (*PI != StoreBB)
10495 OtherBB = *PI;
10496 ++PI;
10497 if (PI == pred_end(DestBB))
10498 return false;
10499
10500 if (*PI != StoreBB) {
10501 if (OtherBB)
10502 return false;
10503 OtherBB = *PI;
10504 }
10505 if (++PI != pred_end(DestBB))
10506 return false;
10507
10508
10509 // Verify that the other block ends in a branch and is not otherwise empty.
10510 BasicBlock::iterator BBI = OtherBB->getTerminator();
10511 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
10512 if (!OtherBr || BBI == OtherBB->begin())
10513 return false;
10514
10515 // If the other block ends in an unconditional branch, check for the 'if then
10516 // else' case. there is an instruction before the branch.
10517 StoreInst *OtherStore = 0;
10518 if (OtherBr->isUnconditional()) {
10519 // If this isn't a store, or isn't a store to the same location, bail out.
10520 --BBI;
10521 OtherStore = dyn_cast<StoreInst>(BBI);
10522 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1))
10523 return false;
10524 } else {
10525 // Otherwise, the other block ended with a conditional branch. If one of the
10526 // destinations is StoreBB, then we have the if/then case.
10527 if (OtherBr->getSuccessor(0) != StoreBB &&
10528 OtherBr->getSuccessor(1) != StoreBB)
10529 return false;
10530
10531 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
10532 // if/then triangle. See if there is a store to the same ptr as SI that
10533 // lives in OtherBB.
10534 for (;; --BBI) {
10535 // Check to see if we find the matching store.
10536 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
10537 if (OtherStore->getOperand(1) != SI.getOperand(1))
10538 return false;
10539 break;
10540 }
10541 // If we find something that may be using the stored value, or if we run
10542 // out of instructions, we can't do the xform.
10543 if (isa<LoadInst>(BBI) || BBI->mayWriteToMemory() ||
10544 BBI == OtherBB->begin())
10545 return false;
10546 }
10547
10548 // In order to eliminate the store in OtherBr, we have to
10549 // make sure nothing reads the stored value in StoreBB.
10550 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
10551 // FIXME: This should really be AA driven.
10552 if (isa<LoadInst>(I) || I->mayWriteToMemory())
10553 return false;
10554 }
10555 }
10556
10557 // Insert a PHI node now if we need it.
10558 Value *MergedVal = OtherStore->getOperand(0);
10559 if (MergedVal != SI.getOperand(0)) {
Gabor Greifd6da1d02008-04-06 20:25:17 +000010560 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010561 PN->reserveOperandSpace(2);
10562 PN->addIncoming(SI.getOperand(0), SI.getParent());
10563 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
10564 MergedVal = InsertNewInstBefore(PN, DestBB->front());
10565 }
10566
10567 // Advance to a place where it is safe to insert the new store and
10568 // insert it.
10569 BBI = DestBB->begin();
10570 while (isa<PHINode>(BBI)) ++BBI;
10571 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
10572 OtherStore->isVolatile()), *BBI);
10573
10574 // Nuke the old stores.
10575 EraseInstFromFunction(SI);
10576 EraseInstFromFunction(*OtherStore);
10577 ++NumCombined;
10578 return true;
10579}
10580
10581
10582Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
10583 // Change br (not X), label True, label False to: br X, label False, True
10584 Value *X = 0;
10585 BasicBlock *TrueDest;
10586 BasicBlock *FalseDest;
10587 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
10588 !isa<Constant>(X)) {
10589 // Swap Destinations and condition...
10590 BI.setCondition(X);
10591 BI.setSuccessor(0, FalseDest);
10592 BI.setSuccessor(1, TrueDest);
10593 return &BI;
10594 }
10595
10596 // Cannonicalize fcmp_one -> fcmp_oeq
10597 FCmpInst::Predicate FPred; Value *Y;
10598 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
10599 TrueDest, FalseDest)))
10600 if ((FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
10601 FPred == FCmpInst::FCMP_OGE) && BI.getCondition()->hasOneUse()) {
10602 FCmpInst *I = cast<FCmpInst>(BI.getCondition());
10603 FCmpInst::Predicate NewPred = FCmpInst::getInversePredicate(FPred);
10604 Instruction *NewSCC = new FCmpInst(NewPred, X, Y, "", I);
10605 NewSCC->takeName(I);
10606 // Swap Destinations and condition...
10607 BI.setCondition(NewSCC);
10608 BI.setSuccessor(0, FalseDest);
10609 BI.setSuccessor(1, TrueDest);
10610 RemoveFromWorkList(I);
10611 I->eraseFromParent();
10612 AddToWorkList(NewSCC);
10613 return &BI;
10614 }
10615
10616 // Cannonicalize icmp_ne -> icmp_eq
10617 ICmpInst::Predicate IPred;
10618 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
10619 TrueDest, FalseDest)))
10620 if ((IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
10621 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
10622 IPred == ICmpInst::ICMP_SGE) && BI.getCondition()->hasOneUse()) {
10623 ICmpInst *I = cast<ICmpInst>(BI.getCondition());
10624 ICmpInst::Predicate NewPred = ICmpInst::getInversePredicate(IPred);
10625 Instruction *NewSCC = new ICmpInst(NewPred, X, Y, "", I);
10626 NewSCC->takeName(I);
10627 // Swap Destinations and condition...
10628 BI.setCondition(NewSCC);
10629 BI.setSuccessor(0, FalseDest);
10630 BI.setSuccessor(1, TrueDest);
10631 RemoveFromWorkList(I);
10632 I->eraseFromParent();;
10633 AddToWorkList(NewSCC);
10634 return &BI;
10635 }
10636
10637 return 0;
10638}
10639
10640Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
10641 Value *Cond = SI.getCondition();
10642 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
10643 if (I->getOpcode() == Instruction::Add)
10644 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
10645 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
10646 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
10647 SI.setOperand(i,ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
10648 AddRHS));
10649 SI.setOperand(0, I->getOperand(0));
10650 AddToWorkList(I);
10651 return &SI;
10652 }
10653 }
10654 return 0;
10655}
10656
10657/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
10658/// is to leave as a vector operation.
10659static bool CheapToScalarize(Value *V, bool isConstant) {
10660 if (isa<ConstantAggregateZero>(V))
10661 return true;
10662 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
10663 if (isConstant) return true;
10664 // If all elts are the same, we can extract.
10665 Constant *Op0 = C->getOperand(0);
10666 for (unsigned i = 1; i < C->getNumOperands(); ++i)
10667 if (C->getOperand(i) != Op0)
10668 return false;
10669 return true;
10670 }
10671 Instruction *I = dyn_cast<Instruction>(V);
10672 if (!I) return false;
10673
10674 // Insert element gets simplified to the inserted element or is deleted if
10675 // this is constant idx extract element and its a constant idx insertelt.
10676 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
10677 isa<ConstantInt>(I->getOperand(2)))
10678 return true;
10679 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
10680 return true;
10681 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
10682 if (BO->hasOneUse() &&
10683 (CheapToScalarize(BO->getOperand(0), isConstant) ||
10684 CheapToScalarize(BO->getOperand(1), isConstant)))
10685 return true;
10686 if (CmpInst *CI = dyn_cast<CmpInst>(I))
10687 if (CI->hasOneUse() &&
10688 (CheapToScalarize(CI->getOperand(0), isConstant) ||
10689 CheapToScalarize(CI->getOperand(1), isConstant)))
10690 return true;
10691
10692 return false;
10693}
10694
10695/// Read and decode a shufflevector mask.
10696///
10697/// It turns undef elements into values that are larger than the number of
10698/// elements in the input.
10699static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
10700 unsigned NElts = SVI->getType()->getNumElements();
10701 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
10702 return std::vector<unsigned>(NElts, 0);
10703 if (isa<UndefValue>(SVI->getOperand(2)))
10704 return std::vector<unsigned>(NElts, 2*NElts);
10705
10706 std::vector<unsigned> Result;
10707 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
10708 for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
10709 if (isa<UndefValue>(CP->getOperand(i)))
10710 Result.push_back(NElts*2); // undef -> 8
10711 else
10712 Result.push_back(cast<ConstantInt>(CP->getOperand(i))->getZExtValue());
10713 return Result;
10714}
10715
10716/// FindScalarElement - Given a vector and an element number, see if the scalar
10717/// value is already around as a register, for example if it were inserted then
10718/// extracted from the vector.
10719static Value *FindScalarElement(Value *V, unsigned EltNo) {
10720 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
10721 const VectorType *PTy = cast<VectorType>(V->getType());
10722 unsigned Width = PTy->getNumElements();
10723 if (EltNo >= Width) // Out of range access.
10724 return UndefValue::get(PTy->getElementType());
10725
10726 if (isa<UndefValue>(V))
10727 return UndefValue::get(PTy->getElementType());
10728 else if (isa<ConstantAggregateZero>(V))
10729 return Constant::getNullValue(PTy->getElementType());
10730 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
10731 return CP->getOperand(EltNo);
10732 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
10733 // If this is an insert to a variable element, we don't know what it is.
10734 if (!isa<ConstantInt>(III->getOperand(2)))
10735 return 0;
10736 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
10737
10738 // If this is an insert to the element we are looking for, return the
10739 // inserted value.
10740 if (EltNo == IIElt)
10741 return III->getOperand(1);
10742
10743 // Otherwise, the insertelement doesn't modify the value, recurse on its
10744 // vector input.
10745 return FindScalarElement(III->getOperand(0), EltNo);
10746 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
10747 unsigned InEl = getShuffleMask(SVI)[EltNo];
10748 if (InEl < Width)
10749 return FindScalarElement(SVI->getOperand(0), InEl);
10750 else if (InEl < Width*2)
10751 return FindScalarElement(SVI->getOperand(1), InEl - Width);
10752 else
10753 return UndefValue::get(PTy->getElementType());
10754 }
10755
10756 // Otherwise, we don't know.
10757 return 0;
10758}
10759
10760Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
10761
10762 // If vector val is undef, replace extract with scalar undef.
10763 if (isa<UndefValue>(EI.getOperand(0)))
10764 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
10765
10766 // If vector val is constant 0, replace extract with scalar 0.
10767 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
10768 return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
10769
10770 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
10771 // If vector val is constant with uniform operands, replace EI
10772 // with that operand
10773 Constant *op0 = C->getOperand(0);
10774 for (unsigned i = 1; i < C->getNumOperands(); ++i)
10775 if (C->getOperand(i) != op0) {
10776 op0 = 0;
10777 break;
10778 }
10779 if (op0)
10780 return ReplaceInstUsesWith(EI, op0);
10781 }
10782
10783 // If extracting a specified index from the vector, see if we can recursively
10784 // find a previously computed scalar that was inserted into the vector.
10785 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
10786 unsigned IndexVal = IdxC->getZExtValue();
10787 unsigned VectorWidth =
10788 cast<VectorType>(EI.getOperand(0)->getType())->getNumElements();
10789
10790 // If this is extracting an invalid index, turn this into undef, to avoid
10791 // crashing the code below.
10792 if (IndexVal >= VectorWidth)
10793 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
10794
10795 // This instruction only demands the single element from the input vector.
10796 // If the input vector has a single use, simplify it based on this use
10797 // property.
10798 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
10799 uint64_t UndefElts;
10800 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
10801 1 << IndexVal,
10802 UndefElts)) {
10803 EI.setOperand(0, V);
10804 return &EI;
10805 }
10806 }
10807
10808 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal))
10809 return ReplaceInstUsesWith(EI, Elt);
10810
10811 // If the this extractelement is directly using a bitcast from a vector of
10812 // the same number of elements, see if we can find the source element from
10813 // it. In this case, we will end up needing to bitcast the scalars.
10814 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
10815 if (const VectorType *VT =
10816 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
10817 if (VT->getNumElements() == VectorWidth)
10818 if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
10819 return new BitCastInst(Elt, EI.getType());
10820 }
10821 }
10822
10823 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
10824 if (I->hasOneUse()) {
10825 // Push extractelement into predecessor operation if legal and
10826 // profitable to do so
10827 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
10828 bool isConstantElt = isa<ConstantInt>(EI.getOperand(1));
10829 if (CheapToScalarize(BO, isConstantElt)) {
10830 ExtractElementInst *newEI0 =
10831 new ExtractElementInst(BO->getOperand(0), EI.getOperand(1),
10832 EI.getName()+".lhs");
10833 ExtractElementInst *newEI1 =
10834 new ExtractElementInst(BO->getOperand(1), EI.getOperand(1),
10835 EI.getName()+".rhs");
10836 InsertNewInstBefore(newEI0, EI);
10837 InsertNewInstBefore(newEI1, EI);
10838 return BinaryOperator::create(BO->getOpcode(), newEI0, newEI1);
10839 }
10840 } else if (isa<LoadInst>(I)) {
Christopher Lambbb2f2222007-12-17 01:12:55 +000010841 unsigned AS =
10842 cast<PointerType>(I->getOperand(0)->getType())->getAddressSpace();
Chris Lattner13c2d6e2008-01-13 22:23:22 +000010843 Value *Ptr = InsertBitCastBefore(I->getOperand(0),
10844 PointerType::get(EI.getType(), AS),EI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010845 GetElementPtrInst *GEP =
Gabor Greifd6da1d02008-04-06 20:25:17 +000010846 GetElementPtrInst::Create(Ptr, EI.getOperand(1), I->getName() + ".gep");
Dan Gohmanf17a25c2007-07-18 16:29:46 +000010847 InsertNewInstBefore(GEP, EI);
10848 return new LoadInst(GEP);
10849 }
10850 }
10851 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
10852 // Extracting the inserted element?
10853 if (IE->getOperand(2) == EI.getOperand(1))
10854 return ReplaceInstUsesWith(EI, IE->getOperand(1));
10855 // If the inserted and extracted elements are constants, they must not
10856 // be the same value, extract from the pre-inserted value instead.
10857 if (isa<Constant>(IE->getOperand(2)) &&
10858 isa<Constant>(EI.getOperand(1))) {
10859 AddUsesToWorkList(EI);
10860 EI.setOperand(0, IE->getOperand(0));
10861 return &EI;
10862 }
10863 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
10864 // If this is extracting an element from a shufflevector, figure out where
10865 // it came from and extract from the appropriate input element instead.
10866 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
10867 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
10868 Value *Src;
10869 if (SrcIdx < SVI->getType()->getNumElements())
10870 Src = SVI->getOperand(0);
10871 else if (SrcIdx < SVI->getType()->getNumElements()*2) {
10872 SrcIdx -= SVI->getType()->getNumElements();
10873 Src = SVI->getOperand(1);
10874 } else {
10875 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
10876 }
10877 return new ExtractElementInst(Src, SrcIdx);
10878 }
10879 }
10880 }
10881 return 0;
10882}
10883
10884/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
10885/// elements from either LHS or RHS, return the shuffle mask and true.
10886/// Otherwise, return false.
10887static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
10888 std::vector<Constant*> &Mask) {
10889 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
10890 "Invalid CollectSingleShuffleElements");
10891 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
10892
10893 if (isa<UndefValue>(V)) {
10894 Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
10895 return true;
10896 } else if (V == LHS) {
10897 for (unsigned i = 0; i != NumElts; ++i)
10898 Mask.push_back(ConstantInt::get(Type::Int32Ty, i));
10899 return true;
10900 } else if (V == RHS) {
10901 for (unsigned i = 0; i != NumElts; ++i)
10902 Mask.push_back(ConstantInt::get(Type::Int32Ty, i+NumElts));
10903 return true;
10904 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
10905 // If this is an insert of an extract from some other vector, include it.
10906 Value *VecOp = IEI->getOperand(0);
10907 Value *ScalarOp = IEI->getOperand(1);
10908 Value *IdxOp = IEI->getOperand(2);
10909
10910 if (!isa<ConstantInt>(IdxOp))
10911 return false;
10912 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
10913
10914 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
10915 // Okay, we can handle this if the vector we are insertinting into is
10916 // transitively ok.
10917 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
10918 // If so, update the mask to reflect the inserted undef.
10919 Mask[InsertedIdx] = UndefValue::get(Type::Int32Ty);
10920 return true;
10921 }
10922 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
10923 if (isa<ConstantInt>(EI->getOperand(1)) &&
10924 EI->getOperand(0)->getType() == V->getType()) {
10925 unsigned ExtractedIdx =
10926 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
10927
10928 // This must be extracting from either LHS or RHS.
10929 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
10930 // Okay, we can handle this if the vector we are insertinting into is
10931 // transitively ok.
10932 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
10933 // If so, update the mask to reflect the inserted value.
10934 if (EI->getOperand(0) == LHS) {
10935 Mask[InsertedIdx & (NumElts-1)] =
10936 ConstantInt::get(Type::Int32Ty, ExtractedIdx);
10937 } else {
10938 assert(EI->getOperand(0) == RHS);
10939 Mask[InsertedIdx & (NumElts-1)] =
10940 ConstantInt::get(Type::Int32Ty, ExtractedIdx+NumElts);
10941
10942 }
10943 return true;
10944 }
10945 }
10946 }
10947 }
10948 }
10949 // TODO: Handle shufflevector here!
10950
10951 return false;
10952}
10953
10954/// CollectShuffleElements - We are building a shuffle of V, using RHS as the
10955/// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
10956/// that computes V and the LHS value of the shuffle.
10957static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
10958 Value *&RHS) {
10959 assert(isa<VectorType>(V->getType()) &&
10960 (RHS == 0 || V->getType() == RHS->getType()) &&
10961 "Invalid shuffle!");
10962 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
10963
10964 if (isa<UndefValue>(V)) {
10965 Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
10966 return V;
10967 } else if (isa<ConstantAggregateZero>(V)) {
10968 Mask.assign(NumElts, ConstantInt::get(Type::Int32Ty, 0));
10969 return V;
10970 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
10971 // If this is an insert of an extract from some other vector, include it.
10972 Value *VecOp = IEI->getOperand(0);
10973 Value *ScalarOp = IEI->getOperand(1);
10974 Value *IdxOp = IEI->getOperand(2);
10975
10976 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
10977 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
10978 EI->getOperand(0)->getType() == V->getType()) {
10979 unsigned ExtractedIdx =
10980 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
10981 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
10982
10983 // Either the extracted from or inserted into vector must be RHSVec,
10984 // otherwise we'd end up with a shuffle of three inputs.
10985 if (EI->getOperand(0) == RHS || RHS == 0) {
10986 RHS = EI->getOperand(0);
10987 Value *V = CollectShuffleElements(VecOp, Mask, RHS);
10988 Mask[InsertedIdx & (NumElts-1)] =
10989 ConstantInt::get(Type::Int32Ty, NumElts+ExtractedIdx);
10990 return V;
10991 }
10992
10993 if (VecOp == RHS) {
10994 Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS);
10995 // Everything but the extracted element is replaced with the RHS.
10996 for (unsigned i = 0; i != NumElts; ++i) {
10997 if (i != InsertedIdx)
10998 Mask[i] = ConstantInt::get(Type::Int32Ty, NumElts+i);
10999 }
11000 return V;
11001 }
11002
11003 // If this insertelement is a chain that comes from exactly these two
11004 // vectors, return the vector and the effective shuffle.
11005 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask))
11006 return EI->getOperand(0);
11007
11008 }
11009 }
11010 }
11011 // TODO: Handle shufflevector here!
11012
11013 // Otherwise, can't do anything fancy. Return an identity vector.
11014 for (unsigned i = 0; i != NumElts; ++i)
11015 Mask.push_back(ConstantInt::get(Type::Int32Ty, i));
11016 return V;
11017}
11018
11019Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
11020 Value *VecOp = IE.getOperand(0);
11021 Value *ScalarOp = IE.getOperand(1);
11022 Value *IdxOp = IE.getOperand(2);
11023
11024 // Inserting an undef or into an undefined place, remove this.
11025 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
11026 ReplaceInstUsesWith(IE, VecOp);
11027
11028 // If the inserted element was extracted from some other vector, and if the
11029 // indexes are constant, try to turn this into a shufflevector operation.
11030 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
11031 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
11032 EI->getOperand(0)->getType() == IE.getType()) {
11033 unsigned NumVectorElts = IE.getType()->getNumElements();
11034 unsigned ExtractedIdx =
11035 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
11036 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
11037
11038 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
11039 return ReplaceInstUsesWith(IE, VecOp);
11040
11041 if (InsertedIdx >= NumVectorElts) // Out of range insert.
11042 return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
11043
11044 // If we are extracting a value from a vector, then inserting it right
11045 // back into the same place, just use the input vector.
11046 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
11047 return ReplaceInstUsesWith(IE, VecOp);
11048
11049 // We could theoretically do this for ANY input. However, doing so could
11050 // turn chains of insertelement instructions into a chain of shufflevector
11051 // instructions, and right now we do not merge shufflevectors. As such,
11052 // only do this in a situation where it is clear that there is benefit.
11053 if (isa<UndefValue>(VecOp) || isa<ConstantAggregateZero>(VecOp)) {
11054 // Turn this into shuffle(EIOp0, VecOp, Mask). The result has all of
11055 // the values of VecOp, except then one read from EIOp0.
11056 // Build a new shuffle mask.
11057 std::vector<Constant*> Mask;
11058 if (isa<UndefValue>(VecOp))
11059 Mask.assign(NumVectorElts, UndefValue::get(Type::Int32Ty));
11060 else {
11061 assert(isa<ConstantAggregateZero>(VecOp) && "Unknown thing");
11062 Mask.assign(NumVectorElts, ConstantInt::get(Type::Int32Ty,
11063 NumVectorElts));
11064 }
11065 Mask[InsertedIdx] = ConstantInt::get(Type::Int32Ty, ExtractedIdx);
11066 return new ShuffleVectorInst(EI->getOperand(0), VecOp,
11067 ConstantVector::get(Mask));
11068 }
11069
11070 // If this insertelement isn't used by some other insertelement, turn it
11071 // (and any insertelements it points to), into one big shuffle.
11072 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
11073 std::vector<Constant*> Mask;
11074 Value *RHS = 0;
11075 Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
11076 if (RHS == 0) RHS = UndefValue::get(LHS->getType());
11077 // We now have a shuffle of LHS, RHS, Mask.
11078 return new ShuffleVectorInst(LHS, RHS, ConstantVector::get(Mask));
11079 }
11080 }
11081 }
11082
11083 return 0;
11084}
11085
11086
11087Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
11088 Value *LHS = SVI.getOperand(0);
11089 Value *RHS = SVI.getOperand(1);
11090 std::vector<unsigned> Mask = getShuffleMask(&SVI);
11091
11092 bool MadeChange = false;
11093
11094 // Undefined shuffle mask -> undefined value.
11095 if (isa<UndefValue>(SVI.getOperand(2)))
11096 return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
11097
11098 // If we have shuffle(x, undef, mask) and any elements of mask refer to
11099 // the undef, change them to undefs.
11100 if (isa<UndefValue>(SVI.getOperand(1))) {
11101 // Scan to see if there are any references to the RHS. If so, replace them
11102 // with undef element refs and set MadeChange to true.
11103 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
11104 if (Mask[i] >= e && Mask[i] != 2*e) {
11105 Mask[i] = 2*e;
11106 MadeChange = true;
11107 }
11108 }
11109
11110 if (MadeChange) {
11111 // Remap any references to RHS to use LHS.
11112 std::vector<Constant*> Elts;
11113 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
11114 if (Mask[i] == 2*e)
11115 Elts.push_back(UndefValue::get(Type::Int32Ty));
11116 else
11117 Elts.push_back(ConstantInt::get(Type::Int32Ty, Mask[i]));
11118 }
11119 SVI.setOperand(2, ConstantVector::get(Elts));
11120 }
11121 }
11122
11123 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
11124 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
11125 if (LHS == RHS || isa<UndefValue>(LHS)) {
11126 if (isa<UndefValue>(LHS) && LHS == RHS) {
11127 // shuffle(undef,undef,mask) -> undef.
11128 return ReplaceInstUsesWith(SVI, LHS);
11129 }
11130
11131 // Remap any references to RHS to use LHS.
11132 std::vector<Constant*> Elts;
11133 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
11134 if (Mask[i] >= 2*e)
11135 Elts.push_back(UndefValue::get(Type::Int32Ty));
11136 else {
11137 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
11138 (Mask[i] < e && isa<UndefValue>(LHS)))
11139 Mask[i] = 2*e; // Turn into undef.
11140 else
11141 Mask[i] &= (e-1); // Force to LHS.
11142 Elts.push_back(ConstantInt::get(Type::Int32Ty, Mask[i]));
11143 }
11144 }
11145 SVI.setOperand(0, SVI.getOperand(1));
11146 SVI.setOperand(1, UndefValue::get(RHS->getType()));
11147 SVI.setOperand(2, ConstantVector::get(Elts));
11148 LHS = SVI.getOperand(0);
11149 RHS = SVI.getOperand(1);
11150 MadeChange = true;
11151 }
11152
11153 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
11154 bool isLHSID = true, isRHSID = true;
11155
11156 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
11157 if (Mask[i] >= e*2) continue; // Ignore undef values.
11158 // Is this an identity shuffle of the LHS value?
11159 isLHSID &= (Mask[i] == i);
11160
11161 // Is this an identity shuffle of the RHS value?
11162 isRHSID &= (Mask[i]-e == i);
11163 }
11164
11165 // Eliminate identity shuffles.
11166 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
11167 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
11168
11169 // If the LHS is a shufflevector itself, see if we can combine it with this
11170 // one without producing an unusual shuffle. Here we are really conservative:
11171 // we are absolutely afraid of producing a shuffle mask not in the input
11172 // program, because the code gen may not be smart enough to turn a merged
11173 // shuffle into two specific shuffles: it may produce worse code. As such,
11174 // we only merge two shuffles if the result is one of the two input shuffle
11175 // masks. In this case, merging the shuffles just removes one instruction,
11176 // which we know is safe. This is good for things like turning:
11177 // (splat(splat)) -> splat.
11178 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
11179 if (isa<UndefValue>(RHS)) {
11180 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
11181
11182 std::vector<unsigned> NewMask;
11183 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
11184 if (Mask[i] >= 2*e)
11185 NewMask.push_back(2*e);
11186 else
11187 NewMask.push_back(LHSMask[Mask[i]]);
11188
11189 // If the result mask is equal to the src shuffle or this shuffle mask, do
11190 // the replacement.
11191 if (NewMask == LHSMask || NewMask == Mask) {
11192 std::vector<Constant*> Elts;
11193 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
11194 if (NewMask[i] >= e*2) {
11195 Elts.push_back(UndefValue::get(Type::Int32Ty));
11196 } else {
11197 Elts.push_back(ConstantInt::get(Type::Int32Ty, NewMask[i]));
11198 }
11199 }
11200 return new ShuffleVectorInst(LHSSVI->getOperand(0),
11201 LHSSVI->getOperand(1),
11202 ConstantVector::get(Elts));
11203 }
11204 }
11205 }
11206
11207 return MadeChange ? &SVI : 0;
11208}
11209
11210
11211
11212
11213/// TryToSinkInstruction - Try to move the specified instruction from its
11214/// current block into the beginning of DestBlock, which can only happen if it's
11215/// safe to move the instruction past all of the instructions between it and the
11216/// end of its block.
11217static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
11218 assert(I->hasOneUse() && "Invariants didn't hold!");
11219
11220 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
11221 if (isa<PHINode>(I) || I->mayWriteToMemory()) return false;
11222
11223 // Do not sink alloca instructions out of the entry block.
11224 if (isa<AllocaInst>(I) && I->getParent() ==
11225 &DestBlock->getParent()->getEntryBlock())
11226 return false;
11227
11228 // We can only sink load instructions if there is nothing between the load and
11229 // the end of block that could change the value.
11230 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
11231 for (BasicBlock::iterator Scan = LI, E = LI->getParent()->end();
11232 Scan != E; ++Scan)
11233 if (Scan->mayWriteToMemory())
11234 return false;
11235 }
11236
11237 BasicBlock::iterator InsertPos = DestBlock->begin();
11238 while (isa<PHINode>(InsertPos)) ++InsertPos;
11239
11240 I->moveBefore(InsertPos);
11241 ++NumSunkInst;
11242 return true;
11243}
11244
11245
11246/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
11247/// all reachable code to the worklist.
11248///
11249/// This has a couple of tricks to make the code faster and more powerful. In
11250/// particular, we constant fold and DCE instructions as we go, to avoid adding
11251/// them to the worklist (this significantly speeds up instcombine on code where
11252/// many instructions are dead or constant). Additionally, if we find a branch
11253/// whose condition is a known constant, we only visit the reachable successors.
11254///
11255static void AddReachableCodeToWorklist(BasicBlock *BB,
11256 SmallPtrSet<BasicBlock*, 64> &Visited,
11257 InstCombiner &IC,
11258 const TargetData *TD) {
11259 std::vector<BasicBlock*> Worklist;
11260 Worklist.push_back(BB);
11261
11262 while (!Worklist.empty()) {
11263 BB = Worklist.back();
11264 Worklist.pop_back();
11265
11266 // We have now visited this block! If we've already been here, ignore it.
11267 if (!Visited.insert(BB)) continue;
11268
11269 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
11270 Instruction *Inst = BBI++;
11271
11272 // DCE instruction if trivially dead.
11273 if (isInstructionTriviallyDead(Inst)) {
11274 ++NumDeadInst;
11275 DOUT << "IC: DCE: " << *Inst;
11276 Inst->eraseFromParent();
11277 continue;
11278 }
11279
11280 // ConstantProp instruction if trivially constant.
11281 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
11282 DOUT << "IC: ConstFold to: " << *C << " from: " << *Inst;
11283 Inst->replaceAllUsesWith(C);
11284 ++NumConstProp;
11285 Inst->eraseFromParent();
11286 continue;
11287 }
Chris Lattnere0f462d2007-07-20 22:06:41 +000011288
Dan Gohmanf17a25c2007-07-18 16:29:46 +000011289 IC.AddToWorkList(Inst);
11290 }
11291
11292 // Recursively visit successors. If this is a branch or switch on a
11293 // constant, only visit the reachable successor.
11294 TerminatorInst *TI = BB->getTerminator();
11295 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
11296 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
11297 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
Nick Lewyckyd551cf12008-03-09 08:50:23 +000011298 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
Nick Lewyckyd8aa33a2008-04-25 16:53:59 +000011299 Worklist.push_back(ReachableBB);
Dan Gohmanf17a25c2007-07-18 16:29:46 +000011300 continue;
11301 }
11302 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
11303 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
11304 // See if this is an explicit destination.
11305 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
11306 if (SI->getCaseValue(i) == Cond) {
Nick Lewyckyd551cf12008-03-09 08:50:23 +000011307 BasicBlock *ReachableBB = SI->getSuccessor(i);
Nick Lewyckyd8aa33a2008-04-25 16:53:59 +000011308 Worklist.push_back(ReachableBB);
Dan Gohmanf17a25c2007-07-18 16:29:46 +000011309 continue;
11310 }
11311
11312 // Otherwise it is the default destination.
11313 Worklist.push_back(SI->getSuccessor(0));
11314 continue;
11315 }
11316 }
11317
11318 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
11319 Worklist.push_back(TI->getSuccessor(i));
11320 }
11321}
11322
11323bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
11324 bool Changed = false;
11325 TD = &getAnalysis<TargetData>();
11326
11327 DEBUG(DOUT << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
11328 << F.getNameStr() << "\n");
11329
11330 {
11331 // Do a depth-first traversal of the function, populate the worklist with
11332 // the reachable instructions. Ignore blocks that are not reachable. Keep
11333 // track of which blocks we visit.
11334 SmallPtrSet<BasicBlock*, 64> Visited;
11335 AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
11336
11337 // Do a quick scan over the function. If we find any blocks that are
11338 // unreachable, remove any instructions inside of them. This prevents
11339 // the instcombine code from having to deal with some bad special cases.
11340 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
11341 if (!Visited.count(BB)) {
11342 Instruction *Term = BB->getTerminator();
11343 while (Term != BB->begin()) { // Remove instrs bottom-up
11344 BasicBlock::iterator I = Term; --I;
11345
11346 DOUT << "IC: DCE: " << *I;
11347 ++NumDeadInst;
11348
11349 if (!I->use_empty())
11350 I->replaceAllUsesWith(UndefValue::get(I->getType()));
11351 I->eraseFromParent();
11352 }
11353 }
11354 }
11355
11356 while (!Worklist.empty()) {
11357 Instruction *I = RemoveOneFromWorkList();
11358 if (I == 0) continue; // skip null values.
11359
11360 // Check to see if we can DCE the instruction.
11361 if (isInstructionTriviallyDead(I)) {
11362 // Add operands to the worklist.
11363 if (I->getNumOperands() < 4)
11364 AddUsesToWorkList(*I);
11365 ++NumDeadInst;
11366
11367 DOUT << "IC: DCE: " << *I;
11368
11369 I->eraseFromParent();
11370 RemoveFromWorkList(I);
11371 continue;
11372 }
11373
11374 // Instruction isn't dead, see if we can constant propagate it.
11375 if (Constant *C = ConstantFoldInstruction(I, TD)) {
11376 DOUT << "IC: ConstFold to: " << *C << " from: " << *I;
11377
11378 // Add operands to the worklist.
11379 AddUsesToWorkList(*I);
11380 ReplaceInstUsesWith(*I, C);
11381
11382 ++NumConstProp;
11383 I->eraseFromParent();
11384 RemoveFromWorkList(I);
11385 continue;
11386 }
11387
11388 // See if we can trivially sink this instruction to a successor basic block.
11389 if (I->hasOneUse()) {
11390 BasicBlock *BB = I->getParent();
11391 BasicBlock *UserParent = cast<Instruction>(I->use_back())->getParent();
11392 if (UserParent != BB) {
11393 bool UserIsSuccessor = false;
11394 // See if the user is one of our successors.
11395 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
11396 if (*SI == UserParent) {
11397 UserIsSuccessor = true;
11398 break;
11399 }
11400
11401 // If the user is one of our immediate successors, and if that successor
11402 // only has us as a predecessors (we'd have to split the critical edge
11403 // otherwise), we can keep going.
11404 if (UserIsSuccessor && !isa<PHINode>(I->use_back()) &&
11405 next(pred_begin(UserParent)) == pred_end(UserParent))
11406 // Okay, the CFG is simple enough, try to sink this instruction.
11407 Changed |= TryToSinkInstruction(I, UserParent);
11408 }
11409 }
11410
11411 // Now that we have an instruction, try combining it to simplify it...
11412#ifndef NDEBUG
11413 std::string OrigI;
11414#endif
11415 DEBUG(std::ostringstream SS; I->print(SS); OrigI = SS.str(););
11416 if (Instruction *Result = visit(*I)) {
11417 ++NumCombined;
11418 // Should we replace the old instruction with a new one?
11419 if (Result != I) {
11420 DOUT << "IC: Old = " << *I
11421 << " New = " << *Result;
11422
11423 // Everything uses the new instruction now.
11424 I->replaceAllUsesWith(Result);
11425
11426 // Push the new instruction and any users onto the worklist.
11427 AddToWorkList(Result);
11428 AddUsersToWorkList(*Result);
11429
11430 // Move the name to the new instruction first.
11431 Result->takeName(I);
11432
11433 // Insert the new instruction into the basic block...
11434 BasicBlock *InstParent = I->getParent();
11435 BasicBlock::iterator InsertPos = I;
11436
11437 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
11438 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
11439 ++InsertPos;
11440
11441 InstParent->getInstList().insert(InsertPos, Result);
11442
11443 // Make sure that we reprocess all operands now that we reduced their
11444 // use counts.
11445 AddUsesToWorkList(*I);
11446
11447 // Instructions can end up on the worklist more than once. Make sure
11448 // we do not process an instruction that has been deleted.
11449 RemoveFromWorkList(I);
11450
11451 // Erase the old instruction.
11452 InstParent->getInstList().erase(I);
11453 } else {
11454#ifndef NDEBUG
11455 DOUT << "IC: Mod = " << OrigI
11456 << " New = " << *I;
11457#endif
11458
11459 // If the instruction was modified, it's possible that it is now dead.
11460 // if so, remove it.
11461 if (isInstructionTriviallyDead(I)) {
11462 // Make sure we process all operands now that we are reducing their
11463 // use counts.
11464 AddUsesToWorkList(*I);
11465
11466 // Instructions may end up in the worklist more than once. Erase all
11467 // occurrences of this instruction.
11468 RemoveFromWorkList(I);
11469 I->eraseFromParent();
11470 } else {
11471 AddToWorkList(I);
11472 AddUsersToWorkList(*I);
11473 }
11474 }
11475 Changed = true;
11476 }
11477 }
11478
11479 assert(WorklistMap.empty() && "Worklist empty, but map not?");
Chris Lattnerb933ea62007-08-05 08:47:58 +000011480
11481 // Do an explicit clear, this shrinks the map if needed.
11482 WorklistMap.clear();
Dan Gohmanf17a25c2007-07-18 16:29:46 +000011483 return Changed;
11484}
11485
11486
11487bool InstCombiner::runOnFunction(Function &F) {
11488 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
11489
11490 bool EverMadeChange = false;
11491
11492 // Iterate while there is work to do.
11493 unsigned Iteration = 0;
11494 while (DoOneIteration(F, Iteration++))
11495 EverMadeChange = true;
11496 return EverMadeChange;
11497}
11498
11499FunctionPass *llvm::createInstructionCombiningPass() {
11500 return new InstCombiner();
11501}
11502