Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1 | //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the visit functions for load, store and alloca. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chandler Carruth | a917458 | 2015-01-22 05:25:13 +0000 | [diff] [blame] | 14 | #include "InstCombineInternal.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 15 | #include "llvm/ADT/Statistic.h" |
Dan Gohman | 826bdf8 | 2010-05-28 16:19:17 +0000 | [diff] [blame] | 16 | #include "llvm/Analysis/Loads.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 17 | #include "llvm/IR/DataLayout.h" |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 18 | #include "llvm/IR/LLVMContext.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 19 | #include "llvm/IR/IntrinsicInst.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 20 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 21 | #include "llvm/Transforms/Utils/Local.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 22 | using namespace llvm; |
| 23 | |
Chandler Carruth | 964daaa | 2014-04-22 02:55:47 +0000 | [diff] [blame] | 24 | #define DEBUG_TYPE "instcombine" |
| 25 | |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 26 | STATISTIC(NumDeadStore, "Number of dead stores eliminated"); |
| 27 | STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); |
| 28 | |
| 29 | /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to |
| 30 | /// some part of a constant global variable. This intentionally only accepts |
| 31 | /// constant expressions because we can't rewrite arbitrary instructions. |
| 32 | static bool pointsToConstantGlobal(Value *V) { |
| 33 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) |
| 34 | return GV->isConstant(); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 35 | |
| 36 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 37 | if (CE->getOpcode() == Instruction::BitCast || |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 38 | CE->getOpcode() == Instruction::AddrSpaceCast || |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 39 | CE->getOpcode() == Instruction::GetElementPtr) |
| 40 | return pointsToConstantGlobal(CE->getOperand(0)); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 41 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 42 | return false; |
| 43 | } |
| 44 | |
| 45 | /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) |
| 46 | /// pointer to an alloca. Ignore any reads of the pointer, return false if we |
| 47 | /// see any stores or other unknown uses. If we see pointer arithmetic, keep |
| 48 | /// track of whether it moves the pointer (with IsOffset) but otherwise traverse |
| 49 | /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to |
| 50 | /// the alloca, and if the source pointer is a pointer to a constant global, we |
| 51 | /// can optimize this. |
| 52 | static bool |
| 53 | isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 54 | SmallVectorImpl<Instruction *> &ToDelete) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 55 | // We track lifetime intrinsics as we encounter them. If we decide to go |
| 56 | // ahead and replace the value with the global, this lets the caller quickly |
| 57 | // eliminate the markers. |
| 58 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 59 | SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; |
| 60 | ValuesToInspect.push_back(std::make_pair(V, false)); |
| 61 | while (!ValuesToInspect.empty()) { |
| 62 | auto ValuePair = ValuesToInspect.pop_back_val(); |
| 63 | const bool IsOffset = ValuePair.second; |
| 64 | for (auto &U : ValuePair.first->uses()) { |
| 65 | Instruction *I = cast<Instruction>(U.getUser()); |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 66 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 67 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { |
| 68 | // Ignore non-volatile loads, they are always ok. |
| 69 | if (!LI->isSimple()) return false; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 70 | continue; |
| 71 | } |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 72 | |
| 73 | if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { |
| 74 | // If uses of the bitcast are ok, we are ok. |
| 75 | ValuesToInspect.push_back(std::make_pair(I, IsOffset)); |
| 76 | continue; |
| 77 | } |
| 78 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { |
| 79 | // If the GEP has all zero indices, it doesn't offset the pointer. If it |
| 80 | // doesn't, it does. |
| 81 | ValuesToInspect.push_back( |
| 82 | std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices())); |
| 83 | continue; |
| 84 | } |
| 85 | |
| 86 | if (CallSite CS = I) { |
| 87 | // If this is the function being called then we treat it like a load and |
| 88 | // ignore it. |
| 89 | if (CS.isCallee(&U)) |
| 90 | continue; |
| 91 | |
| 92 | // Inalloca arguments are clobbered by the call. |
| 93 | unsigned ArgNo = CS.getArgumentNo(&U); |
| 94 | if (CS.isInAllocaArgument(ArgNo)) |
| 95 | return false; |
| 96 | |
| 97 | // If this is a readonly/readnone call site, then we know it is just a |
| 98 | // load (but one that potentially returns the value itself), so we can |
| 99 | // ignore it if we know that the value isn't captured. |
| 100 | if (CS.onlyReadsMemory() && |
| 101 | (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo))) |
| 102 | continue; |
| 103 | |
| 104 | // If this is being passed as a byval argument, the caller is making a |
| 105 | // copy, so it is only a read of the alloca. |
| 106 | if (CS.isByValArgument(ArgNo)) |
| 107 | continue; |
| 108 | } |
| 109 | |
| 110 | // Lifetime intrinsics can be handled by the caller. |
| 111 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
| 112 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || |
| 113 | II->getIntrinsicID() == Intrinsic::lifetime_end) { |
| 114 | assert(II->use_empty() && "Lifetime markers have no result to use!"); |
| 115 | ToDelete.push_back(II); |
| 116 | continue; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | // If this is isn't our memcpy/memmove, reject it as something we can't |
| 121 | // handle. |
| 122 | MemTransferInst *MI = dyn_cast<MemTransferInst>(I); |
| 123 | if (!MI) |
| 124 | return false; |
| 125 | |
| 126 | // If the transfer is using the alloca as a source of the transfer, then |
| 127 | // ignore it since it is a load (unless the transfer is volatile). |
| 128 | if (U.getOperandNo() == 1) { |
| 129 | if (MI->isVolatile()) return false; |
| 130 | continue; |
| 131 | } |
| 132 | |
| 133 | // If we already have seen a copy, reject the second one. |
| 134 | if (TheCopy) return false; |
| 135 | |
| 136 | // If the pointer has been offset from the start of the alloca, we can't |
| 137 | // safely handle this. |
| 138 | if (IsOffset) return false; |
| 139 | |
| 140 | // If the memintrinsic isn't using the alloca as the dest, reject it. |
| 141 | if (U.getOperandNo() != 0) return false; |
| 142 | |
| 143 | // If the source of the memcpy/move is not a constant global, reject it. |
| 144 | if (!pointsToConstantGlobal(MI->getSource())) |
| 145 | return false; |
| 146 | |
| 147 | // Otherwise, the transform is safe. Remember the copy instruction. |
| 148 | TheCopy = MI; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 149 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 150 | } |
| 151 | return true; |
| 152 | } |
| 153 | |
| 154 | /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only |
| 155 | /// modified by a copy from a constant global. If we can prove this, we can |
| 156 | /// replace any uses of the alloca with uses of the global directly. |
| 157 | static MemTransferInst * |
| 158 | isOnlyCopiedFromConstantGlobal(AllocaInst *AI, |
| 159 | SmallVectorImpl<Instruction *> &ToDelete) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 160 | MemTransferInst *TheCopy = nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 161 | if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) |
| 162 | return TheCopy; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 163 | return nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 164 | } |
| 165 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 166 | Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { |
Dan Gohman | df5d7dc | 2010-05-28 15:09:00 +0000 | [diff] [blame] | 167 | // Ensure that the alloca array size argument has type intptr_t, so that |
| 168 | // any casting is exposed early. |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 169 | if (DL) { |
| 170 | Type *IntPtrTy = DL->getIntPtrType(AI.getType()); |
Dan Gohman | df5d7dc | 2010-05-28 15:09:00 +0000 | [diff] [blame] | 171 | if (AI.getArraySize()->getType() != IntPtrTy) { |
| 172 | Value *V = Builder->CreateIntCast(AI.getArraySize(), |
| 173 | IntPtrTy, false); |
| 174 | AI.setOperand(0, V); |
| 175 | return &AI; |
| 176 | } |
| 177 | } |
| 178 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 179 | // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 |
| 180 | if (AI.isArrayAllocation()) { // Check C != 1 |
| 181 | if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 182 | Type *NewTy = |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 183 | ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 184 | AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 185 | New->setAlignment(AI.getAlignment()); |
| 186 | |
| 187 | // Scan to the end of the allocation instructions, to skip over a block of |
| 188 | // allocas if possible...also skip interleaved debug info |
| 189 | // |
| 190 | BasicBlock::iterator It = New; |
| 191 | while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It; |
| 192 | |
| 193 | // Now that I is pointing to the first non-allocation-inst in the block, |
| 194 | // insert our getelementptr instruction... |
| 195 | // |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 196 | Type *IdxTy = DL |
| 197 | ? DL->getIntPtrType(AI.getType()) |
Matt Arsenault | 9e3a6ca | 2013-08-14 00:24:38 +0000 | [diff] [blame] | 198 | : Type::getInt64Ty(AI.getContext()); |
| 199 | Value *NullIdx = Constant::getNullValue(IdxTy); |
Matt Arsenault | 640ff9d | 2013-08-14 00:24:05 +0000 | [diff] [blame] | 200 | Value *Idx[2] = { NullIdx, NullIdx }; |
Eli Friedman | 41e509a | 2011-05-18 23:58:37 +0000 | [diff] [blame] | 201 | Instruction *GEP = |
Matt Arsenault | 640ff9d | 2013-08-14 00:24:05 +0000 | [diff] [blame] | 202 | GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); |
Eli Friedman | 41e509a | 2011-05-18 23:58:37 +0000 | [diff] [blame] | 203 | InsertNewInstBefore(GEP, *It); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 204 | |
| 205 | // Now make everything use the getelementptr instead of the original |
| 206 | // allocation. |
Eli Friedman | 41e509a | 2011-05-18 23:58:37 +0000 | [diff] [blame] | 207 | return ReplaceInstUsesWith(AI, GEP); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 208 | } else if (isa<UndefValue>(AI.getArraySize())) { |
| 209 | return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); |
| 210 | } |
| 211 | } |
| 212 | |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 213 | if (DL && AI.getAllocatedType()->isSized()) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 214 | // If the alignment is 0 (unspecified), assign it the preferred alignment. |
| 215 | if (AI.getAlignment() == 0) |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 216 | AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 217 | |
| 218 | // Move all alloca's of zero byte objects to the entry block and merge them |
| 219 | // together. Note that we only do this for alloca's, because malloc should |
| 220 | // allocate and return a unique pointer, even for a zero byte allocation. |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 221 | if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 222 | // For a zero sized alloca there is no point in doing an array allocation. |
| 223 | // This is helpful if the array size is a complicated expression not used |
| 224 | // elsewhere. |
| 225 | if (AI.isArrayAllocation()) { |
| 226 | AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); |
| 227 | return &AI; |
| 228 | } |
| 229 | |
| 230 | // Get the first instruction in the entry block. |
| 231 | BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); |
| 232 | Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); |
| 233 | if (FirstInst != &AI) { |
| 234 | // If the entry block doesn't start with a zero-size alloca then move |
| 235 | // this one to the start of the entry block. There is no problem with |
| 236 | // dominance as the array size was forced to a constant earlier already. |
| 237 | AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); |
| 238 | if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 239 | DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 240 | AI.moveBefore(FirstInst); |
| 241 | return &AI; |
| 242 | } |
| 243 | |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 244 | // If the alignment of the entry block alloca is 0 (unspecified), |
| 245 | // assign it the preferred alignment. |
| 246 | if (EntryAI->getAlignment() == 0) |
| 247 | EntryAI->setAlignment( |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 248 | DL->getPrefTypeAlignment(EntryAI->getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 249 | // Replace this zero-sized alloca with the one at the start of the entry |
| 250 | // block after ensuring that the address will be aligned enough for both |
| 251 | // types. |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 252 | unsigned MaxAlign = std::max(EntryAI->getAlignment(), |
| 253 | AI.getAlignment()); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 254 | EntryAI->setAlignment(MaxAlign); |
| 255 | if (AI.getType() != EntryAI->getType()) |
| 256 | return new BitCastInst(EntryAI, AI.getType()); |
| 257 | return ReplaceInstUsesWith(AI, EntryAI); |
| 258 | } |
| 259 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 260 | } |
| 261 | |
Eli Friedman | b14873c | 2012-11-26 23:04:53 +0000 | [diff] [blame] | 262 | if (AI.getAlignment()) { |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 263 | // Check to see if this allocation is only modified by a memcpy/memmove from |
| 264 | // a constant global whose alignment is equal to or exceeds that of the |
| 265 | // allocation. If this is the case, we can change all users to use |
| 266 | // the constant global instead. This is commonly produced by the CFE by |
| 267 | // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' |
| 268 | // is only subsequently read. |
| 269 | SmallVector<Instruction *, 4> ToDelete; |
| 270 | if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 271 | unsigned SourceAlign = getOrEnforceKnownAlignment( |
| 272 | Copy->getSource(), AI.getAlignment(), DL, AC, &AI, DT); |
Eli Friedman | b14873c | 2012-11-26 23:04:53 +0000 | [diff] [blame] | 273 | if (AI.getAlignment() <= SourceAlign) { |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 274 | DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); |
| 275 | DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); |
| 276 | for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) |
| 277 | EraseInstFromFunction(*ToDelete[i]); |
| 278 | Constant *TheSrc = cast<Constant>(Copy->getSource()); |
Matt Arsenault | bbf18c6 | 2013-12-07 02:58:45 +0000 | [diff] [blame] | 279 | Constant *Cast |
| 280 | = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType()); |
| 281 | Instruction *NewI = ReplaceInstUsesWith(AI, Cast); |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 282 | EraseInstFromFunction(*Copy); |
| 283 | ++NumGlobalCopies; |
| 284 | return NewI; |
| 285 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 286 | } |
| 287 | } |
| 288 | |
Nuno Lopes | 95cc4f3 | 2012-07-09 18:38:20 +0000 | [diff] [blame] | 289 | // At last, use the generic allocation site handler to aggressively remove |
| 290 | // unused allocas. |
| 291 | return visitAllocSite(AI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 292 | } |
| 293 | |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 294 | /// \brief Helper to combine a load to a new type. |
| 295 | /// |
| 296 | /// This just does the work of combining a load to a new type. It handles |
| 297 | /// metadata, etc., and returns the new instruction. The \c NewTy should be the |
| 298 | /// loaded *value* type. This will convert it to a pointer, cast the operand to |
| 299 | /// that pointer type, load it, etc. |
| 300 | /// |
| 301 | /// Note that this will create all of the instructions with whatever insert |
| 302 | /// point the \c InstCombiner currently is using. |
| 303 | static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) { |
| 304 | Value *Ptr = LI.getPointerOperand(); |
| 305 | unsigned AS = LI.getPointerAddressSpace(); |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 306 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 307 | LI.getAllMetadata(MD); |
| 308 | |
| 309 | LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( |
| 310 | IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), |
| 311 | LI.getAlignment(), LI.getName()); |
| 312 | for (const auto &MDPair : MD) { |
| 313 | unsigned ID = MDPair.first; |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 314 | MDNode *N = MDPair.second; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 315 | // Note, essentially every kind of metadata should be preserved here! This |
| 316 | // routine is supposed to clone a load instruction changing *only its type*. |
| 317 | // The only metadata it makes sense to drop is metadata which is invalidated |
| 318 | // when the pointer type changes. This should essentially never be the case |
| 319 | // in LLVM, but we explicitly switch over only known metadata to be |
| 320 | // conservatively correct. If you are adding metadata to LLVM which pertains |
| 321 | // to loads, you almost certainly want to add it here. |
| 322 | switch (ID) { |
| 323 | case LLVMContext::MD_dbg: |
| 324 | case LLVMContext::MD_tbaa: |
| 325 | case LLVMContext::MD_prof: |
| 326 | case LLVMContext::MD_fpmath: |
| 327 | case LLVMContext::MD_tbaa_struct: |
| 328 | case LLVMContext::MD_invariant_load: |
| 329 | case LLVMContext::MD_alias_scope: |
| 330 | case LLVMContext::MD_noalias: |
Philip Reames | 5a3f5f7 | 2014-10-21 00:13:20 +0000 | [diff] [blame] | 331 | case LLVMContext::MD_nontemporal: |
| 332 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 333 | // All of these directly apply. |
| 334 | NewLoad->setMetadata(ID, N); |
| 335 | break; |
| 336 | |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 337 | case LLVMContext::MD_nonnull: |
| 338 | // FIXME: We should translate this into range metadata for integer types |
| 339 | // and vice versa. |
| 340 | if (NewTy->isPointerTy()) |
| 341 | NewLoad->setMetadata(ID, N); |
| 342 | break; |
| 343 | |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 344 | case LLVMContext::MD_range: |
| 345 | // FIXME: It would be nice to propagate this in some way, but the type |
| 346 | // conversions make it hard. |
| 347 | break; |
| 348 | } |
| 349 | } |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 350 | return NewLoad; |
| 351 | } |
| 352 | |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 353 | /// \brief Combine a store to a new type. |
| 354 | /// |
| 355 | /// Returns the newly created store instruction. |
| 356 | static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { |
| 357 | Value *Ptr = SI.getPointerOperand(); |
| 358 | unsigned AS = SI.getPointerAddressSpace(); |
| 359 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
| 360 | SI.getAllMetadata(MD); |
| 361 | |
| 362 | StoreInst *NewStore = IC.Builder->CreateAlignedStore( |
| 363 | V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), |
| 364 | SI.getAlignment()); |
| 365 | for (const auto &MDPair : MD) { |
| 366 | unsigned ID = MDPair.first; |
| 367 | MDNode *N = MDPair.second; |
| 368 | // Note, essentially every kind of metadata should be preserved here! This |
| 369 | // routine is supposed to clone a store instruction changing *only its |
| 370 | // type*. The only metadata it makes sense to drop is metadata which is |
| 371 | // invalidated when the pointer type changes. This should essentially |
| 372 | // never be the case in LLVM, but we explicitly switch over only known |
| 373 | // metadata to be conservatively correct. If you are adding metadata to |
| 374 | // LLVM which pertains to stores, you almost certainly want to add it |
| 375 | // here. |
| 376 | switch (ID) { |
| 377 | case LLVMContext::MD_dbg: |
| 378 | case LLVMContext::MD_tbaa: |
| 379 | case LLVMContext::MD_prof: |
| 380 | case LLVMContext::MD_fpmath: |
| 381 | case LLVMContext::MD_tbaa_struct: |
| 382 | case LLVMContext::MD_alias_scope: |
| 383 | case LLVMContext::MD_noalias: |
| 384 | case LLVMContext::MD_nontemporal: |
| 385 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 386 | // All of these directly apply. |
| 387 | NewStore->setMetadata(ID, N); |
| 388 | break; |
| 389 | |
| 390 | case LLVMContext::MD_invariant_load: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 391 | case LLVMContext::MD_nonnull: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 392 | case LLVMContext::MD_range: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 393 | // These don't apply for stores. |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 394 | break; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | return NewStore; |
| 399 | } |
| 400 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 401 | /// \brief Combine loads to match the type of value their uses after looking |
| 402 | /// through intervening bitcasts. |
| 403 | /// |
| 404 | /// The core idea here is that if the result of a load is used in an operation, |
| 405 | /// we should load the type most conducive to that operation. For example, when |
| 406 | /// loading an integer and converting that immediately to a pointer, we should |
| 407 | /// instead directly load a pointer. |
| 408 | /// |
| 409 | /// However, this routine must never change the width of a load or the number of |
| 410 | /// loads as that would introduce a semantic change. This combine is expected to |
| 411 | /// be a semantic no-op which just allows loads to more closely model the types |
| 412 | /// of their consuming operations. |
| 413 | /// |
| 414 | /// Currently, we also refuse to change the precise type used for an atomic load |
| 415 | /// or a volatile load. This is debatable, and might be reasonable to change |
| 416 | /// later. However, it is risky in case some backend or other part of LLVM is |
| 417 | /// relying on the exact type loaded to select appropriate atomic operations. |
| 418 | static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { |
| 419 | // FIXME: We could probably with some care handle both volatile and atomic |
| 420 | // loads here but it isn't clear that this is important. |
| 421 | if (!LI.isSimple()) |
| 422 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 423 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 424 | if (LI.use_empty()) |
| 425 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 426 | |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 427 | Type *Ty = LI.getType(); |
| 428 | |
| 429 | // Try to canonicalize loads which are only ever stored to operate over |
| 430 | // integers instead of any other type. We only do this when the loaded type |
| 431 | // is sized and has a size exactly the same as its store size and the store |
| 432 | // size is a legal integer type. |
| 433 | const DataLayout *DL = IC.getDataLayout(); |
| 434 | if (!Ty->isIntegerTy() && Ty->isSized() && DL && |
| 435 | DL->isLegalInteger(DL->getTypeStoreSizeInBits(Ty)) && |
| 436 | DL->getTypeStoreSizeInBits(Ty) == DL->getTypeSizeInBits(Ty)) { |
| 437 | if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) { |
| 438 | auto *SI = dyn_cast<StoreInst>(U); |
| 439 | return SI && SI->getPointerOperand() != &LI; |
| 440 | })) { |
| 441 | LoadInst *NewLoad = combineLoadToNewType( |
| 442 | IC, LI, |
| 443 | Type::getIntNTy(LI.getContext(), DL->getTypeStoreSizeInBits(Ty))); |
| 444 | // Replace all the stores with stores of the newly loaded value. |
| 445 | for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { |
| 446 | auto *SI = cast<StoreInst>(*UI++); |
| 447 | IC.Builder->SetInsertPoint(SI); |
| 448 | combineStoreToNewValue(IC, *SI, NewLoad); |
| 449 | IC.EraseInstFromFunction(*SI); |
| 450 | } |
| 451 | assert(LI.use_empty() && "Failed to remove all users of the load!"); |
| 452 | // Return the old load so the combiner can delete it safely. |
| 453 | return &LI; |
| 454 | } |
| 455 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 456 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 457 | // Fold away bit casts of the loaded value by loading the desired type. |
| 458 | if (LI.hasOneUse()) |
| 459 | if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) { |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 460 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy()); |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 461 | BC->replaceAllUsesWith(NewLoad); |
| 462 | IC.EraseInstFromFunction(*BC); |
| 463 | return &LI; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 464 | } |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 465 | |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 466 | // FIXME: We should also canonicalize loads of vectors when their elements are |
| 467 | // cast to other types. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 468 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 469 | } |
| 470 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame^] | 471 | // If we can determine that all possible objects pointed to by the provided |
| 472 | // pointer value are, not only dereferenceable, but also definitively less than |
| 473 | // or equal to the provided maximum size, then return true. Otherwise, return |
| 474 | // false (constant global values and allocas fall into this category). |
| 475 | // |
| 476 | // FIXME: This should probably live in ValueTracking (or similar). |
| 477 | static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, |
| 478 | const DataLayout *DL) { |
| 479 | SmallPtrSet<Value *, 4> Visited; |
| 480 | SmallVector<Value *, 4> Worklist(1, V); |
| 481 | |
| 482 | do { |
| 483 | Value *P = Worklist.pop_back_val(); |
| 484 | P = P->stripPointerCasts(); |
| 485 | |
| 486 | if (!Visited.insert(P).second) |
| 487 | continue; |
| 488 | |
| 489 | if (SelectInst *SI = dyn_cast<SelectInst>(P)) { |
| 490 | Worklist.push_back(SI->getTrueValue()); |
| 491 | Worklist.push_back(SI->getFalseValue()); |
| 492 | continue; |
| 493 | } |
| 494 | |
| 495 | if (PHINode *PN = dyn_cast<PHINode>(P)) { |
| 496 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) |
| 497 | Worklist.push_back(PN->getIncomingValue(i)); |
| 498 | continue; |
| 499 | } |
| 500 | |
| 501 | if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { |
| 502 | if (GA->mayBeOverridden()) |
| 503 | return false; |
| 504 | Worklist.push_back(GA->getAliasee()); |
| 505 | continue; |
| 506 | } |
| 507 | |
| 508 | // If we know how big this object is, and it is less than MaxSize, continue |
| 509 | // searching. Otherwise, return false. |
| 510 | if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { |
| 511 | if (!AI->getAllocatedType()->isSized()) |
| 512 | return false; |
| 513 | |
| 514 | ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); |
| 515 | if (!CS) |
| 516 | return false; |
| 517 | |
| 518 | uint64_t TypeSize = DL->getTypeAllocSize(AI->getAllocatedType()); |
| 519 | // Make sure that, even if the multiplication below would wrap as an |
| 520 | // uint64_t, we still do the right thing. |
| 521 | if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) |
| 522 | return false; |
| 523 | continue; |
| 524 | } |
| 525 | |
| 526 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { |
| 527 | if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) |
| 528 | return false; |
| 529 | |
| 530 | uint64_t InitSize = DL->getTypeAllocSize(GV->getType()->getElementType()); |
| 531 | if (InitSize > MaxSize) |
| 532 | return false; |
| 533 | continue; |
| 534 | } |
| 535 | |
| 536 | return false; |
| 537 | } while (!Worklist.empty()); |
| 538 | |
| 539 | return true; |
| 540 | } |
| 541 | |
| 542 | // If we're indexing into an object of a known size, and the outer index is |
| 543 | // not a constant, but having any value but zero would lead to undefined |
| 544 | // behavior, replace it with zero. |
| 545 | // |
| 546 | // For example, if we have: |
| 547 | // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 |
| 548 | // ... |
| 549 | // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x |
| 550 | // ... = load i32* %arrayidx, align 4 |
| 551 | // Then we know that we can replace %x in the GEP with i64 0. |
| 552 | // |
| 553 | // FIXME: We could fold any GEP index to zero that would cause UB if it were |
| 554 | // not zero. Currently, we only handle the first such index. Also, we could |
| 555 | // also search through non-zero constant indices if we kept track of the |
| 556 | // offsets those indices implied. |
| 557 | static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, |
| 558 | Instruction *MemI, unsigned &Idx) { |
| 559 | const DataLayout *DL = IC.getDataLayout(); |
| 560 | if (GEPI->getNumOperands() < 2 || !DL) |
| 561 | return false; |
| 562 | |
| 563 | // Find the first non-zero index of a GEP. If all indices are zero, return |
| 564 | // one past the last index. |
| 565 | auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { |
| 566 | unsigned I = 1; |
| 567 | for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { |
| 568 | Value *V = GEPI->getOperand(I); |
| 569 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) |
| 570 | if (CI->isZero()) |
| 571 | continue; |
| 572 | |
| 573 | break; |
| 574 | } |
| 575 | |
| 576 | return I; |
| 577 | }; |
| 578 | |
| 579 | // Skip through initial 'zero' indices, and find the corresponding pointer |
| 580 | // type. See if the next index is not a constant. |
| 581 | Idx = FirstNZIdx(GEPI); |
| 582 | if (Idx == GEPI->getNumOperands()) |
| 583 | return false; |
| 584 | if (isa<Constant>(GEPI->getOperand(Idx))) |
| 585 | return false; |
| 586 | |
| 587 | SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); |
| 588 | Type *AllocTy = |
| 589 | GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops); |
| 590 | if (!AllocTy || !AllocTy->isSized()) |
| 591 | return false; |
| 592 | uint64_t TyAllocSize = DL->getTypeAllocSize(AllocTy); |
| 593 | |
| 594 | // If there are more indices after the one we might replace with a zero, make |
| 595 | // sure they're all non-negative. If any of them are negative, the overall |
| 596 | // address being computed might be before the base address determined by the |
| 597 | // first non-zero index. |
| 598 | auto IsAllNonNegative = [&]() { |
| 599 | for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { |
| 600 | bool KnownNonNegative, KnownNegative; |
| 601 | IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative, |
| 602 | KnownNegative, 0, MemI); |
| 603 | if (KnownNonNegative) |
| 604 | continue; |
| 605 | return false; |
| 606 | } |
| 607 | |
| 608 | return true; |
| 609 | }; |
| 610 | |
| 611 | // FIXME: If the GEP is not inbounds, and there are extra indices after the |
| 612 | // one we'll replace, those could cause the address computation to wrap |
| 613 | // (rendering the IsAllNonNegative() check below insufficient). We can do |
| 614 | // better, ignoring zero indicies (and other indicies we can prove small |
| 615 | // enough not to wrap). |
| 616 | if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) |
| 617 | return false; |
| 618 | |
| 619 | // Note that isObjectSizeLessThanOrEq will return true only if the pointer is |
| 620 | // also known to be dereferenceable. |
| 621 | return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && |
| 622 | IsAllNonNegative(); |
| 623 | } |
| 624 | |
| 625 | // If we're indexing into an object with a variable index for the memory |
| 626 | // access, but the object has only one element, we can assume that the index |
| 627 | // will always be zero. If we replace the GEP, return it. |
| 628 | template <typename T> |
| 629 | static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, |
| 630 | T &MemI) { |
| 631 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 632 | unsigned Idx; |
| 633 | if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { |
| 634 | Instruction *NewGEPI = GEPI->clone(); |
| 635 | NewGEPI->setOperand(Idx, |
| 636 | ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); |
| 637 | NewGEPI->insertBefore(GEPI); |
| 638 | MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); |
| 639 | return NewGEPI; |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | return nullptr; |
| 644 | } |
| 645 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 646 | Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { |
| 647 | Value *Op = LI.getOperand(0); |
| 648 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 649 | // Try to canonicalize the loaded type. |
| 650 | if (Instruction *Res = combineLoadToOperationType(*this, LI)) |
| 651 | return Res; |
| 652 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 653 | // Attempt to improve the alignment. |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 654 | if (DL) { |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 655 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
| 656 | Op, DL->getPrefTypeAlignment(LI.getType()), DL, AC, &LI, DT); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 657 | unsigned LoadAlign = LI.getAlignment(); |
| 658 | unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign : |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 659 | DL->getABITypeAlignment(LI.getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 660 | |
| 661 | if (KnownAlign > EffectiveLoadAlign) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 662 | LI.setAlignment(KnownAlign); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 663 | else if (LoadAlign == 0) |
| 664 | LI.setAlignment(EffectiveLoadAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 665 | } |
| 666 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame^] | 667 | // Replace GEP indices if possible. |
| 668 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { |
| 669 | Worklist.Add(NewGEPI); |
| 670 | return &LI; |
| 671 | } |
| 672 | |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 673 | // None of the following transforms are legal for volatile/atomic loads. |
| 674 | // FIXME: Some of it is okay for atomic loads; needs refactoring. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 675 | if (!LI.isSimple()) return nullptr; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 676 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 677 | // Do really simple store-to-load forwarding and load CSE, to catch cases |
Duncan Sands | 75b5d27 | 2011-02-15 09:23:02 +0000 | [diff] [blame] | 678 | // where there are several consecutive memory accesses to the same location, |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 679 | // separated by a few arithmetic operations. |
| 680 | BasicBlock::iterator BBI = &LI; |
| 681 | if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6)) |
Chandler Carruth | eeec35a | 2014-10-20 00:24:14 +0000 | [diff] [blame] | 682 | return ReplaceInstUsesWith( |
Chandler Carruth | 1a3c2c4 | 2014-11-25 08:20:27 +0000 | [diff] [blame] | 683 | LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(), |
| 684 | LI.getName() + ".cast")); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 685 | |
| 686 | // load(gep null, ...) -> unreachable |
| 687 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { |
| 688 | const Value *GEPI0 = GEPI->getOperand(0); |
| 689 | // TODO: Consider a target hook for valid address spaces for this xform. |
| 690 | if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){ |
| 691 | // Insert a new store to null instruction before the load to indicate |
| 692 | // that this code is not reachable. We do this instead of inserting |
| 693 | // an unreachable instruction directly because we cannot modify the |
| 694 | // CFG. |
| 695 | new StoreInst(UndefValue::get(LI.getType()), |
| 696 | Constant::getNullValue(Op->getType()), &LI); |
| 697 | return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); |
| 698 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 699 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 700 | |
| 701 | // load null/undef -> unreachable |
| 702 | // TODO: Consider a target hook for valid address spaces for this xform. |
| 703 | if (isa<UndefValue>(Op) || |
| 704 | (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) { |
| 705 | // Insert a new store to null instruction before the load to indicate that |
| 706 | // this code is not reachable. We do this instead of inserting an |
| 707 | // unreachable instruction directly because we cannot modify the CFG. |
| 708 | new StoreInst(UndefValue::get(LI.getType()), |
| 709 | Constant::getNullValue(Op->getType()), &LI); |
| 710 | return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); |
| 711 | } |
| 712 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 713 | if (Op->hasOneUse()) { |
| 714 | // Change select and PHI nodes to select values instead of addresses: this |
| 715 | // helps alias analysis out a lot, allows many others simplifications, and |
| 716 | // exposes redundancy in the code. |
| 717 | // |
| 718 | // Note that we cannot do the transformation unless we know that the |
| 719 | // introduced loads cannot trap! Something like this is valid as long as |
| 720 | // the condition is always false: load (select bool %C, int* null, int* %G), |
| 721 | // but it would not be valid if we transformed it to load from null |
| 722 | // unconditionally. |
| 723 | // |
| 724 | if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { |
| 725 | // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 726 | unsigned Align = LI.getAlignment(); |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 727 | if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) && |
| 728 | isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) { |
Bob Wilson | 4b71b6c | 2010-01-30 00:41:10 +0000 | [diff] [blame] | 729 | LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 730 | SI->getOperand(1)->getName()+".val"); |
Bob Wilson | 4b71b6c | 2010-01-30 00:41:10 +0000 | [diff] [blame] | 731 | LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 732 | SI->getOperand(2)->getName()+".val"); |
| 733 | V1->setAlignment(Align); |
| 734 | V2->setAlignment(Align); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 735 | return SelectInst::Create(SI->getCondition(), V1, V2); |
| 736 | } |
| 737 | |
| 738 | // load (select (cond, null, P)) -> load P |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 739 | if (isa<ConstantPointerNull>(SI->getOperand(1)) && |
| 740 | LI.getPointerAddressSpace() == 0) { |
| 741 | LI.setOperand(0, SI->getOperand(2)); |
| 742 | return &LI; |
| 743 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 744 | |
| 745 | // load (select (cond, P, null)) -> load P |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 746 | if (isa<ConstantPointerNull>(SI->getOperand(2)) && |
| 747 | LI.getPointerAddressSpace() == 0) { |
| 748 | LI.setOperand(0, SI->getOperand(1)); |
| 749 | return &LI; |
| 750 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 751 | } |
| 752 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 753 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 754 | } |
| 755 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 756 | /// \brief Combine stores to match the type of value being stored. |
| 757 | /// |
| 758 | /// The core idea here is that the memory does not have any intrinsic type and |
| 759 | /// where we can we should match the type of a store to the type of value being |
| 760 | /// stored. |
| 761 | /// |
| 762 | /// However, this routine must never change the width of a store or the number of |
| 763 | /// stores as that would introduce a semantic change. This combine is expected to |
| 764 | /// be a semantic no-op which just allows stores to more closely model the types |
| 765 | /// of their incoming values. |
| 766 | /// |
| 767 | /// Currently, we also refuse to change the precise type used for an atomic or |
| 768 | /// volatile store. This is debatable, and might be reasonable to change later. |
| 769 | /// However, it is risky in case some backend or other part of LLVM is relying |
| 770 | /// on the exact type stored to select appropriate atomic operations. |
| 771 | /// |
| 772 | /// \returns true if the store was successfully combined away. This indicates |
| 773 | /// the caller must erase the store instruction. We have to let the caller erase |
| 774 | /// the store instruction sas otherwise there is no way to signal whether it was |
| 775 | /// combined or not: IC.EraseInstFromFunction returns a null pointer. |
| 776 | static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { |
| 777 | // FIXME: We could probably with some care handle both volatile and atomic |
| 778 | // stores here but it isn't clear that this is important. |
| 779 | if (!SI.isSimple()) |
| 780 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 781 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 782 | Value *V = SI.getValueOperand(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 783 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 784 | // Fold away bit casts of the stored value by storing the original type. |
| 785 | if (auto *BC = dyn_cast<BitCastInst>(V)) { |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 786 | V = BC->getOperand(0); |
Chandler Carruth | 2135b97 | 2015-01-21 23:45:01 +0000 | [diff] [blame] | 787 | combineStoreToNewValue(IC, SI, V); |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 788 | return true; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 789 | } |
| 790 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 791 | // FIXME: We should also canonicalize loads of vectors when their elements are |
| 792 | // cast to other types. |
| 793 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | /// equivalentAddressValues - Test if A and B will obviously have the same |
| 797 | /// value. This includes recognizing that %t0 and %t1 will have the same |
| 798 | /// value in code like this: |
| 799 | /// %t0 = getelementptr \@a, 0, 3 |
| 800 | /// store i32 0, i32* %t0 |
| 801 | /// %t1 = getelementptr \@a, 0, 3 |
| 802 | /// %t2 = load i32* %t1 |
| 803 | /// |
| 804 | static bool equivalentAddressValues(Value *A, Value *B) { |
| 805 | // Test if the values are trivially equivalent. |
| 806 | if (A == B) return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 807 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 808 | // Test if the values come form identical arithmetic instructions. |
| 809 | // This uses isIdenticalToWhenDefined instead of isIdenticalTo because |
| 810 | // its only used to compare two uses within the same basic block, which |
| 811 | // means that they'll always either have the same value or one of them |
| 812 | // will have an undefined value. |
| 813 | if (isa<BinaryOperator>(A) || |
| 814 | isa<CastInst>(A) || |
| 815 | isa<PHINode>(A) || |
| 816 | isa<GetElementPtrInst>(A)) |
| 817 | if (Instruction *BI = dyn_cast<Instruction>(B)) |
| 818 | if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) |
| 819 | return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 820 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 821 | // Otherwise they may not be equivalent. |
| 822 | return false; |
| 823 | } |
| 824 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 825 | Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { |
| 826 | Value *Val = SI.getOperand(0); |
| 827 | Value *Ptr = SI.getOperand(1); |
| 828 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 829 | // Try to canonicalize the stored type. |
| 830 | if (combineStoreToValueType(*this, SI)) |
| 831 | return EraseInstFromFunction(SI); |
| 832 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 833 | // Attempt to improve the alignment. |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 834 | if (DL) { |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 835 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
| 836 | Ptr, DL->getPrefTypeAlignment(Val->getType()), DL, AC, &SI, DT); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 837 | unsigned StoreAlign = SI.getAlignment(); |
| 838 | unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign : |
Rafael Espindola | 37dc9e1 | 2014-02-21 00:06:31 +0000 | [diff] [blame] | 839 | DL->getABITypeAlignment(Val->getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 840 | |
Bill Wendling | 55b6b2b | 2012-03-16 18:20:54 +0000 | [diff] [blame] | 841 | if (KnownAlign > EffectiveStoreAlign) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 842 | SI.setAlignment(KnownAlign); |
Bill Wendling | 55b6b2b | 2012-03-16 18:20:54 +0000 | [diff] [blame] | 843 | else if (StoreAlign == 0) |
| 844 | SI.setAlignment(EffectiveStoreAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 845 | } |
| 846 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame^] | 847 | // Replace GEP indices if possible. |
| 848 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { |
| 849 | Worklist.Add(NewGEPI); |
| 850 | return &SI; |
| 851 | } |
| 852 | |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 853 | // Don't hack volatile/atomic stores. |
| 854 | // FIXME: Some bits are legal for atomic stores; needs refactoring. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 855 | if (!SI.isSimple()) return nullptr; |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 856 | |
| 857 | // If the RHS is an alloca with a single use, zapify the store, making the |
| 858 | // alloca dead. |
| 859 | if (Ptr->hasOneUse()) { |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 860 | if (isa<AllocaInst>(Ptr)) |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 861 | return EraseInstFromFunction(SI); |
| 862 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 863 | if (isa<AllocaInst>(GEP->getOperand(0))) { |
| 864 | if (GEP->getOperand(0)->hasOneUse()) |
| 865 | return EraseInstFromFunction(SI); |
| 866 | } |
| 867 | } |
| 868 | } |
| 869 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 870 | // Do really simple DSE, to catch cases where there are several consecutive |
| 871 | // stores to the same location, separated by a few arithmetic operations. This |
| 872 | // situation often occurs with bitfield accesses. |
| 873 | BasicBlock::iterator BBI = &SI; |
| 874 | for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; |
| 875 | --ScanInsts) { |
| 876 | --BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 877 | // Don't count debug info directives, lest they affect codegen, |
| 878 | // and we skip pointer-to-pointer bitcasts, which are NOPs. |
| 879 | if (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 880 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 881 | ScanInsts++; |
| 882 | continue; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 883 | } |
| 884 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 885 | if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { |
| 886 | // Prev store isn't volatile, and stores to the same location? |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 887 | if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1), |
| 888 | SI.getOperand(1))) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 889 | ++NumDeadStore; |
| 890 | ++BBI; |
| 891 | EraseInstFromFunction(*PrevSI); |
| 892 | continue; |
| 893 | } |
| 894 | break; |
| 895 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 896 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 897 | // If this is a load, we have to stop. However, if the loaded value is from |
| 898 | // the pointer we're loading and is producing the pointer we're storing, |
| 899 | // then *this* store is dead (X = load P; store X -> P). |
| 900 | if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { |
Jin-Gu Kang | b452db0 | 2011-03-14 01:21:00 +0000 | [diff] [blame] | 901 | if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) && |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 902 | LI->isSimple()) |
Jin-Gu Kang | b452db0 | 2011-03-14 01:21:00 +0000 | [diff] [blame] | 903 | return EraseInstFromFunction(SI); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 904 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 905 | // Otherwise, this is a load from some other location. Stores before it |
| 906 | // may not be dead. |
| 907 | break; |
| 908 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 909 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 910 | // Don't skip over loads or things that can modify memory. |
| 911 | if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory()) |
| 912 | break; |
| 913 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 914 | |
| 915 | // store X, null -> turns into 'unreachable' in SimplifyCFG |
| 916 | if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { |
| 917 | if (!isa<UndefValue>(Val)) { |
| 918 | SI.setOperand(0, UndefValue::get(Val->getType())); |
| 919 | if (Instruction *U = dyn_cast<Instruction>(Val)) |
| 920 | Worklist.Add(U); // Dropped a use. |
| 921 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 922 | return nullptr; // Do not modify these! |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 923 | } |
| 924 | |
| 925 | // store undef, Ptr -> noop |
| 926 | if (isa<UndefValue>(Val)) |
| 927 | return EraseInstFromFunction(SI); |
| 928 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 929 | // If this store is the last instruction in the basic block (possibly |
Victor Hernandez | 5f5abd5 | 2010-01-21 23:07:15 +0000 | [diff] [blame] | 930 | // excepting debug info instructions), and if the block ends with an |
| 931 | // unconditional branch, try to move it to the successor block. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 932 | BBI = &SI; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 933 | do { |
| 934 | ++BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 935 | } while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 936 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 937 | if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) |
| 938 | if (BI->isUnconditional()) |
| 939 | if (SimplifyStoreAtEndOfBlock(SI)) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 940 | return nullptr; // xform done! |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 941 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 942 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 943 | } |
| 944 | |
| 945 | /// SimplifyStoreAtEndOfBlock - Turn things like: |
| 946 | /// if () { *P = v1; } else { *P = v2 } |
| 947 | /// into a phi node with a store in the successor. |
| 948 | /// |
| 949 | /// Simplify things like: |
| 950 | /// *P = v1; if () { *P = v2; } |
| 951 | /// into a phi node with a store in the successor. |
| 952 | /// |
| 953 | bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { |
| 954 | BasicBlock *StoreBB = SI.getParent(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 955 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 956 | // Check to see if the successor block has exactly two incoming edges. If |
| 957 | // so, see if the other predecessor contains a store to the same location. |
| 958 | // if so, insert a PHI node (if needed) and move the stores down. |
| 959 | BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 960 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 961 | // Determine whether Dest has exactly two predecessors and, if so, compute |
| 962 | // the other predecessor. |
| 963 | pred_iterator PI = pred_begin(DestBB); |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 964 | BasicBlock *P = *PI; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 965 | BasicBlock *OtherBB = nullptr; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 966 | |
| 967 | if (P != StoreBB) |
| 968 | OtherBB = P; |
| 969 | |
| 970 | if (++PI == pred_end(DestBB)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 971 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 972 | |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 973 | P = *PI; |
| 974 | if (P != StoreBB) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 975 | if (OtherBB) |
| 976 | return false; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 977 | OtherBB = P; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 978 | } |
| 979 | if (++PI != pred_end(DestBB)) |
| 980 | return false; |
| 981 | |
| 982 | // Bail out if all the relevant blocks aren't distinct (this can happen, |
| 983 | // for example, if SI is in an infinite loop) |
| 984 | if (StoreBB == DestBB || OtherBB == DestBB) |
| 985 | return false; |
| 986 | |
| 987 | // Verify that the other block ends in a branch and is not otherwise empty. |
| 988 | BasicBlock::iterator BBI = OtherBB->getTerminator(); |
| 989 | BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); |
| 990 | if (!OtherBr || BBI == OtherBB->begin()) |
| 991 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 992 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 993 | // If the other block ends in an unconditional branch, check for the 'if then |
| 994 | // else' case. there is an instruction before the branch. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 995 | StoreInst *OtherStore = nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 996 | if (OtherBr->isUnconditional()) { |
| 997 | --BBI; |
| 998 | // Skip over debugging info. |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 999 | while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1000 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1001 | if (BBI==OtherBB->begin()) |
| 1002 | return false; |
| 1003 | --BBI; |
| 1004 | } |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1005 | // If this isn't a store, isn't a store to the same location, or is not the |
| 1006 | // right kind of store, bail out. |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1007 | OtherStore = dyn_cast<StoreInst>(BBI); |
| 1008 | if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1009 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1010 | return false; |
| 1011 | } else { |
| 1012 | // Otherwise, the other block ended with a conditional branch. If one of the |
| 1013 | // destinations is StoreBB, then we have the if/then case. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1014 | if (OtherBr->getSuccessor(0) != StoreBB && |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1015 | OtherBr->getSuccessor(1) != StoreBB) |
| 1016 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1017 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1018 | // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an |
| 1019 | // if/then triangle. See if there is a store to the same ptr as SI that |
| 1020 | // lives in OtherBB. |
| 1021 | for (;; --BBI) { |
| 1022 | // Check to see if we find the matching store. |
| 1023 | if ((OtherStore = dyn_cast<StoreInst>(BBI))) { |
| 1024 | if (OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1025 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1026 | return false; |
| 1027 | break; |
| 1028 | } |
| 1029 | // If we find something that may be using or overwriting the stored |
| 1030 | // value, or if we run out of instructions, we can't do the xform. |
| 1031 | if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() || |
| 1032 | BBI == OtherBB->begin()) |
| 1033 | return false; |
| 1034 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1035 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1036 | // In order to eliminate the store in OtherBr, we have to |
| 1037 | // make sure nothing reads or overwrites the stored value in |
| 1038 | // StoreBB. |
| 1039 | for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { |
| 1040 | // FIXME: This should really be AA driven. |
| 1041 | if (I->mayReadFromMemory() || I->mayWriteToMemory()) |
| 1042 | return false; |
| 1043 | } |
| 1044 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1045 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1046 | // Insert a PHI node now if we need it. |
| 1047 | Value *MergedVal = OtherStore->getOperand(0); |
| 1048 | if (MergedVal != SI.getOperand(0)) { |
Jay Foad | 5213134 | 2011-03-30 11:28:46 +0000 | [diff] [blame] | 1049 | PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1050 | PN->addIncoming(SI.getOperand(0), SI.getParent()); |
| 1051 | PN->addIncoming(OtherStore->getOperand(0), OtherBB); |
| 1052 | MergedVal = InsertNewInstBefore(PN, DestBB->front()); |
| 1053 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1054 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1055 | // Advance to a place where it is safe to insert the new store and |
| 1056 | // insert it. |
Bill Wendling | 8ddfc09 | 2011-08-16 20:45:24 +0000 | [diff] [blame] | 1057 | BBI = DestBB->getFirstInsertionPt(); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1058 | StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1059 | SI.isVolatile(), |
| 1060 | SI.getAlignment(), |
| 1061 | SI.getOrdering(), |
| 1062 | SI.getSynchScope()); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1063 | InsertNewInstBefore(NewSI, *BBI); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1064 | NewSI->setDebugLoc(OtherStore->getDebugLoc()); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1065 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1066 | // If the two stores had AA tags, merge them. |
| 1067 | AAMDNodes AATags; |
| 1068 | SI.getAAMetadata(AATags); |
| 1069 | if (AATags) { |
| 1070 | OtherStore->getAAMetadata(AATags, /* Merge = */ true); |
| 1071 | NewSI->setAAMetadata(AATags); |
| 1072 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1073 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1074 | // Nuke the old stores. |
| 1075 | EraseInstFromFunction(SI); |
| 1076 | EraseInstFromFunction(*OtherStore); |
| 1077 | return true; |
| 1078 | } |