Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1 | //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the visit functions for load, store and alloca. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chandler Carruth | a917458 | 2015-01-22 05:25:13 +0000 | [diff] [blame] | 14 | #include "InstCombineInternal.h" |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 15 | #include "llvm/ADT/SmallString.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 16 | #include "llvm/ADT/Statistic.h" |
Dan Gohman | 826bdf8 | 2010-05-28 16:19:17 +0000 | [diff] [blame] | 17 | #include "llvm/Analysis/Loads.h" |
Peter Collingbourne | ecdd58f | 2016-10-21 19:59:26 +0000 | [diff] [blame] | 18 | #include "llvm/IR/ConstantRange.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 19 | #include "llvm/IR/DataLayout.h" |
Paul Robinson | 383c5c2 | 2017-02-06 22:19:04 +0000 | [diff] [blame^] | 20 | #include "llvm/IR/DebugInfo.h" |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 21 | #include "llvm/IR/LLVMContext.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 22 | #include "llvm/IR/IntrinsicInst.h" |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 23 | #include "llvm/IR/MDBuilder.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 24 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 25 | #include "llvm/Transforms/Utils/Local.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 26 | using namespace llvm; |
| 27 | |
Chandler Carruth | 964daaa | 2014-04-22 02:55:47 +0000 | [diff] [blame] | 28 | #define DEBUG_TYPE "instcombine" |
| 29 | |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 30 | STATISTIC(NumDeadStore, "Number of dead stores eliminated"); |
| 31 | STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); |
| 32 | |
| 33 | /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to |
| 34 | /// some part of a constant global variable. This intentionally only accepts |
| 35 | /// constant expressions because we can't rewrite arbitrary instructions. |
| 36 | static bool pointsToConstantGlobal(Value *V) { |
| 37 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) |
| 38 | return GV->isConstant(); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 39 | |
| 40 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 41 | if (CE->getOpcode() == Instruction::BitCast || |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 42 | CE->getOpcode() == Instruction::AddrSpaceCast || |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 43 | CE->getOpcode() == Instruction::GetElementPtr) |
| 44 | return pointsToConstantGlobal(CE->getOperand(0)); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 45 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 46 | return false; |
| 47 | } |
| 48 | |
| 49 | /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) |
| 50 | /// pointer to an alloca. Ignore any reads of the pointer, return false if we |
| 51 | /// see any stores or other unknown uses. If we see pointer arithmetic, keep |
| 52 | /// track of whether it moves the pointer (with IsOffset) but otherwise traverse |
| 53 | /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to |
| 54 | /// the alloca, and if the source pointer is a pointer to a constant global, we |
| 55 | /// can optimize this. |
| 56 | static bool |
| 57 | isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 58 | SmallVectorImpl<Instruction *> &ToDelete) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 59 | // We track lifetime intrinsics as we encounter them. If we decide to go |
| 60 | // ahead and replace the value with the global, this lets the caller quickly |
| 61 | // eliminate the markers. |
| 62 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 63 | SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 64 | ValuesToInspect.emplace_back(V, false); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 65 | while (!ValuesToInspect.empty()) { |
| 66 | auto ValuePair = ValuesToInspect.pop_back_val(); |
| 67 | const bool IsOffset = ValuePair.second; |
| 68 | for (auto &U : ValuePair.first->uses()) { |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 69 | auto *I = cast<Instruction>(U.getUser()); |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 70 | |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 71 | if (auto *LI = dyn_cast<LoadInst>(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 72 | // Ignore non-volatile loads, they are always ok. |
| 73 | if (!LI->isSimple()) return false; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 74 | continue; |
| 75 | } |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 76 | |
| 77 | if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { |
| 78 | // If uses of the bitcast are ok, we are ok. |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 79 | ValuesToInspect.emplace_back(I, IsOffset); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 80 | continue; |
| 81 | } |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 82 | if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 83 | // If the GEP has all zero indices, it doesn't offset the pointer. If it |
| 84 | // doesn't, it does. |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 85 | ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 86 | continue; |
| 87 | } |
| 88 | |
Benjamin Kramer | 3a09ef6 | 2015-04-10 14:50:08 +0000 | [diff] [blame] | 89 | if (auto CS = CallSite(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 90 | // If this is the function being called then we treat it like a load and |
| 91 | // ignore it. |
| 92 | if (CS.isCallee(&U)) |
| 93 | continue; |
| 94 | |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 95 | unsigned DataOpNo = CS.getDataOperandNo(&U); |
| 96 | bool IsArgOperand = CS.isArgOperand(&U); |
| 97 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 98 | // Inalloca arguments are clobbered by the call. |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 99 | if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 100 | return false; |
| 101 | |
| 102 | // If this is a readonly/readnone call site, then we know it is just a |
| 103 | // load (but one that potentially returns the value itself), so we can |
| 104 | // ignore it if we know that the value isn't captured. |
| 105 | if (CS.onlyReadsMemory() && |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 106 | (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 107 | continue; |
| 108 | |
| 109 | // If this is being passed as a byval argument, the caller is making a |
| 110 | // copy, so it is only a read of the alloca. |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 111 | if (IsArgOperand && CS.isByValArgument(DataOpNo)) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 112 | continue; |
| 113 | } |
| 114 | |
| 115 | // Lifetime intrinsics can be handled by the caller. |
| 116 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
| 117 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || |
| 118 | II->getIntrinsicID() == Intrinsic::lifetime_end) { |
| 119 | assert(II->use_empty() && "Lifetime markers have no result to use!"); |
| 120 | ToDelete.push_back(II); |
| 121 | continue; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | // If this is isn't our memcpy/memmove, reject it as something we can't |
| 126 | // handle. |
| 127 | MemTransferInst *MI = dyn_cast<MemTransferInst>(I); |
| 128 | if (!MI) |
| 129 | return false; |
| 130 | |
| 131 | // If the transfer is using the alloca as a source of the transfer, then |
| 132 | // ignore it since it is a load (unless the transfer is volatile). |
| 133 | if (U.getOperandNo() == 1) { |
| 134 | if (MI->isVolatile()) return false; |
| 135 | continue; |
| 136 | } |
| 137 | |
| 138 | // If we already have seen a copy, reject the second one. |
| 139 | if (TheCopy) return false; |
| 140 | |
| 141 | // If the pointer has been offset from the start of the alloca, we can't |
| 142 | // safely handle this. |
| 143 | if (IsOffset) return false; |
| 144 | |
| 145 | // If the memintrinsic isn't using the alloca as the dest, reject it. |
| 146 | if (U.getOperandNo() != 0) return false; |
| 147 | |
| 148 | // If the source of the memcpy/move is not a constant global, reject it. |
| 149 | if (!pointsToConstantGlobal(MI->getSource())) |
| 150 | return false; |
| 151 | |
| 152 | // Otherwise, the transform is safe. Remember the copy instruction. |
| 153 | TheCopy = MI; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 154 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 155 | } |
| 156 | return true; |
| 157 | } |
| 158 | |
| 159 | /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only |
| 160 | /// modified by a copy from a constant global. If we can prove this, we can |
| 161 | /// replace any uses of the alloca with uses of the global directly. |
| 162 | static MemTransferInst * |
| 163 | isOnlyCopiedFromConstantGlobal(AllocaInst *AI, |
| 164 | SmallVectorImpl<Instruction *> &ToDelete) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 165 | MemTransferInst *TheCopy = nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 166 | if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) |
| 167 | return TheCopy; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 168 | return nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 169 | } |
| 170 | |
Duncan P. N. Exon Smith | c6820ec | 2015-03-13 19:22:03 +0000 | [diff] [blame] | 171 | static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { |
Duncan P. N. Exon Smith | 720762e | 2015-03-13 19:30:44 +0000 | [diff] [blame] | 172 | // Check for array size of 1 (scalar allocation). |
Duncan P. N. Exon Smith | be95b4a | 2015-03-13 19:42:09 +0000 | [diff] [blame] | 173 | if (!AI.isArrayAllocation()) { |
| 174 | // i32 1 is the canonical array size for scalar allocations. |
| 175 | if (AI.getArraySize()->getType()->isIntegerTy(32)) |
| 176 | return nullptr; |
| 177 | |
| 178 | // Canonicalize it. |
| 179 | Value *V = IC.Builder->getInt32(1); |
| 180 | AI.setOperand(0, V); |
| 181 | return &AI; |
| 182 | } |
Duncan P. N. Exon Smith | 720762e | 2015-03-13 19:30:44 +0000 | [diff] [blame] | 183 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 184 | // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 185 | if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { |
| 186 | Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); |
| 187 | AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName()); |
| 188 | New->setAlignment(AI.getAlignment()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 189 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 190 | // Scan to the end of the allocation instructions, to skip over a block of |
| 191 | // allocas if possible...also skip interleaved debug info |
| 192 | // |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 193 | BasicBlock::iterator It(New); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 194 | while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) |
| 195 | ++It; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 196 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 197 | // Now that I is pointing to the first non-allocation-inst in the block, |
| 198 | // insert our getelementptr instruction... |
| 199 | // |
| 200 | Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); |
| 201 | Value *NullIdx = Constant::getNullValue(IdxTy); |
| 202 | Value *Idx[2] = {NullIdx, NullIdx}; |
| 203 | Instruction *GEP = |
Matt Arsenault | 640ff9d | 2013-08-14 00:24:05 +0000 | [diff] [blame] | 204 | GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 205 | IC.InsertNewInstBefore(GEP, *It); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 206 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 207 | // Now make everything use the getelementptr instead of the original |
| 208 | // allocation. |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 209 | return IC.replaceInstUsesWith(AI, GEP); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 210 | } |
| 211 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 212 | if (isa<UndefValue>(AI.getArraySize())) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 213 | return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 214 | |
Duncan P. N. Exon Smith | 07ff9b0 | 2015-03-13 19:34:55 +0000 | [diff] [blame] | 215 | // Ensure that the alloca array size argument has type intptr_t, so that |
| 216 | // any casting is exposed early. |
| 217 | Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); |
| 218 | if (AI.getArraySize()->getType() != IntPtrTy) { |
| 219 | Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false); |
| 220 | AI.setOperand(0, V); |
| 221 | return &AI; |
| 222 | } |
| 223 | |
Duncan P. N. Exon Smith | c6820ec | 2015-03-13 19:22:03 +0000 | [diff] [blame] | 224 | return nullptr; |
| 225 | } |
| 226 | |
| 227 | Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { |
| 228 | if (auto *I = simplifyAllocaArraySize(*this, AI)) |
| 229 | return I; |
| 230 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 231 | if (AI.getAllocatedType()->isSized()) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 232 | // If the alignment is 0 (unspecified), assign it the preferred alignment. |
| 233 | if (AI.getAlignment() == 0) |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 234 | AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 235 | |
| 236 | // Move all alloca's of zero byte objects to the entry block and merge them |
| 237 | // together. Note that we only do this for alloca's, because malloc should |
| 238 | // allocate and return a unique pointer, even for a zero byte allocation. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 239 | if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 240 | // For a zero sized alloca there is no point in doing an array allocation. |
| 241 | // This is helpful if the array size is a complicated expression not used |
| 242 | // elsewhere. |
| 243 | if (AI.isArrayAllocation()) { |
| 244 | AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); |
| 245 | return &AI; |
| 246 | } |
| 247 | |
| 248 | // Get the first instruction in the entry block. |
| 249 | BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); |
| 250 | Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); |
| 251 | if (FirstInst != &AI) { |
| 252 | // If the entry block doesn't start with a zero-size alloca then move |
| 253 | // this one to the start of the entry block. There is no problem with |
| 254 | // dominance as the array size was forced to a constant earlier already. |
| 255 | AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); |
| 256 | if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 257 | DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 258 | AI.moveBefore(FirstInst); |
| 259 | return &AI; |
| 260 | } |
| 261 | |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 262 | // If the alignment of the entry block alloca is 0 (unspecified), |
| 263 | // assign it the preferred alignment. |
| 264 | if (EntryAI->getAlignment() == 0) |
| 265 | EntryAI->setAlignment( |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 266 | DL.getPrefTypeAlignment(EntryAI->getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 267 | // Replace this zero-sized alloca with the one at the start of the entry |
| 268 | // block after ensuring that the address will be aligned enough for both |
| 269 | // types. |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 270 | unsigned MaxAlign = std::max(EntryAI->getAlignment(), |
| 271 | AI.getAlignment()); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 272 | EntryAI->setAlignment(MaxAlign); |
| 273 | if (AI.getType() != EntryAI->getType()) |
| 274 | return new BitCastInst(EntryAI, AI.getType()); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 275 | return replaceInstUsesWith(AI, EntryAI); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 276 | } |
| 277 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 278 | } |
| 279 | |
Eli Friedman | b14873c | 2012-11-26 23:04:53 +0000 | [diff] [blame] | 280 | if (AI.getAlignment()) { |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 281 | // Check to see if this allocation is only modified by a memcpy/memmove from |
| 282 | // a constant global whose alignment is equal to or exceeds that of the |
| 283 | // allocation. If this is the case, we can change all users to use |
| 284 | // the constant global instead. This is commonly produced by the CFE by |
| 285 | // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' |
| 286 | // is only subsequently read. |
| 287 | SmallVector<Instruction *, 4> ToDelete; |
| 288 | if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 289 | unsigned SourceAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 290 | Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); |
Eli Friedman | b14873c | 2012-11-26 23:04:53 +0000 | [diff] [blame] | 291 | if (AI.getAlignment() <= SourceAlign) { |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 292 | DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); |
| 293 | DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); |
| 294 | for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 295 | eraseInstFromFunction(*ToDelete[i]); |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 296 | Constant *TheSrc = cast<Constant>(Copy->getSource()); |
Matt Arsenault | bbf18c6 | 2013-12-07 02:58:45 +0000 | [diff] [blame] | 297 | Constant *Cast |
| 298 | = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType()); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 299 | Instruction *NewI = replaceInstUsesWith(AI, Cast); |
| 300 | eraseInstFromFunction(*Copy); |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 301 | ++NumGlobalCopies; |
| 302 | return NewI; |
| 303 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 304 | } |
| 305 | } |
| 306 | |
Nuno Lopes | 95cc4f3 | 2012-07-09 18:38:20 +0000 | [diff] [blame] | 307 | // At last, use the generic allocation site handler to aggressively remove |
| 308 | // unused allocas. |
| 309 | return visitAllocSite(AI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 310 | } |
| 311 | |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 312 | // Are we allowed to form a atomic load or store of this type? |
| 313 | static bool isSupportedAtomicType(Type *Ty) { |
| 314 | return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy(); |
| 315 | } |
| 316 | |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 317 | /// \brief Helper to combine a load to a new type. |
| 318 | /// |
| 319 | /// This just does the work of combining a load to a new type. It handles |
| 320 | /// metadata, etc., and returns the new instruction. The \c NewTy should be the |
| 321 | /// loaded *value* type. This will convert it to a pointer, cast the operand to |
| 322 | /// that pointer type, load it, etc. |
| 323 | /// |
| 324 | /// Note that this will create all of the instructions with whatever insert |
| 325 | /// point the \c InstCombiner currently is using. |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 326 | static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, |
| 327 | const Twine &Suffix = "") { |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 328 | assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) && |
| 329 | "can't fold an atomic load to requested type"); |
| 330 | |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 331 | Value *Ptr = LI.getPointerOperand(); |
| 332 | unsigned AS = LI.getPointerAddressSpace(); |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 333 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 334 | LI.getAllMetadata(MD); |
| 335 | |
| 336 | LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( |
| 337 | IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 338 | LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); |
| 339 | NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 340 | MDBuilder MDB(NewLoad->getContext()); |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 341 | for (const auto &MDPair : MD) { |
| 342 | unsigned ID = MDPair.first; |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 343 | MDNode *N = MDPair.second; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 344 | // Note, essentially every kind of metadata should be preserved here! This |
| 345 | // routine is supposed to clone a load instruction changing *only its type*. |
| 346 | // The only metadata it makes sense to drop is metadata which is invalidated |
| 347 | // when the pointer type changes. This should essentially never be the case |
| 348 | // in LLVM, but we explicitly switch over only known metadata to be |
| 349 | // conservatively correct. If you are adding metadata to LLVM which pertains |
| 350 | // to loads, you almost certainly want to add it here. |
| 351 | switch (ID) { |
| 352 | case LLVMContext::MD_dbg: |
| 353 | case LLVMContext::MD_tbaa: |
| 354 | case LLVMContext::MD_prof: |
| 355 | case LLVMContext::MD_fpmath: |
| 356 | case LLVMContext::MD_tbaa_struct: |
| 357 | case LLVMContext::MD_invariant_load: |
| 358 | case LLVMContext::MD_alias_scope: |
| 359 | case LLVMContext::MD_noalias: |
Philip Reames | 5a3f5f7 | 2014-10-21 00:13:20 +0000 | [diff] [blame] | 360 | case LLVMContext::MD_nontemporal: |
| 361 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 362 | // All of these directly apply. |
| 363 | NewLoad->setMetadata(ID, N); |
| 364 | break; |
| 365 | |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 366 | case LLVMContext::MD_nonnull: |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 367 | // This only directly applies if the new type is also a pointer. |
| 368 | if (NewTy->isPointerTy()) { |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 369 | NewLoad->setMetadata(ID, N); |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 370 | break; |
| 371 | } |
| 372 | // If it's integral now, translate it to !range metadata. |
| 373 | if (NewTy->isIntegerTy()) { |
| 374 | auto *ITy = cast<IntegerType>(NewTy); |
| 375 | auto *NullInt = ConstantExpr::getPtrToInt( |
| 376 | ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); |
| 377 | auto *NonNullInt = |
| 378 | ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); |
| 379 | NewLoad->setMetadata(LLVMContext::MD_range, |
| 380 | MDB.createRange(NonNullInt, NullInt)); |
| 381 | } |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 382 | break; |
Artur Pilipenko | 5c5011d | 2015-11-02 17:53:51 +0000 | [diff] [blame] | 383 | case LLVMContext::MD_align: |
| 384 | case LLVMContext::MD_dereferenceable: |
| 385 | case LLVMContext::MD_dereferenceable_or_null: |
| 386 | // These only directly apply if the new type is also a pointer. |
| 387 | if (NewTy->isPointerTy()) |
| 388 | NewLoad->setMetadata(ID, N); |
| 389 | break; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 390 | case LLVMContext::MD_range: |
| 391 | // FIXME: It would be nice to propagate this in some way, but the type |
David Majnemer | 80dca0c | 2016-10-11 01:00:45 +0000 | [diff] [blame] | 392 | // conversions make it hard. |
| 393 | |
| 394 | // If it's a pointer now and the range does not contain 0, make it !nonnull. |
| 395 | if (NewTy->isPointerTy()) { |
| 396 | unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy); |
| 397 | if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { |
| 398 | MDNode *NN = MDNode::get(LI.getContext(), None); |
| 399 | NewLoad->setMetadata(LLVMContext::MD_nonnull, NN); |
| 400 | } |
| 401 | } |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 402 | break; |
| 403 | } |
| 404 | } |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 405 | return NewLoad; |
| 406 | } |
| 407 | |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 408 | /// \brief Combine a store to a new type. |
| 409 | /// |
| 410 | /// Returns the newly created store instruction. |
| 411 | static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 412 | assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) && |
| 413 | "can't fold an atomic store of requested type"); |
| 414 | |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 415 | Value *Ptr = SI.getPointerOperand(); |
| 416 | unsigned AS = SI.getPointerAddressSpace(); |
| 417 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
| 418 | SI.getAllMetadata(MD); |
| 419 | |
| 420 | StoreInst *NewStore = IC.Builder->CreateAlignedStore( |
| 421 | V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 422 | SI.getAlignment(), SI.isVolatile()); |
| 423 | NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope()); |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 424 | for (const auto &MDPair : MD) { |
| 425 | unsigned ID = MDPair.first; |
| 426 | MDNode *N = MDPair.second; |
| 427 | // Note, essentially every kind of metadata should be preserved here! This |
| 428 | // routine is supposed to clone a store instruction changing *only its |
| 429 | // type*. The only metadata it makes sense to drop is metadata which is |
| 430 | // invalidated when the pointer type changes. This should essentially |
| 431 | // never be the case in LLVM, but we explicitly switch over only known |
| 432 | // metadata to be conservatively correct. If you are adding metadata to |
| 433 | // LLVM which pertains to stores, you almost certainly want to add it |
| 434 | // here. |
| 435 | switch (ID) { |
| 436 | case LLVMContext::MD_dbg: |
| 437 | case LLVMContext::MD_tbaa: |
| 438 | case LLVMContext::MD_prof: |
| 439 | case LLVMContext::MD_fpmath: |
| 440 | case LLVMContext::MD_tbaa_struct: |
| 441 | case LLVMContext::MD_alias_scope: |
| 442 | case LLVMContext::MD_noalias: |
| 443 | case LLVMContext::MD_nontemporal: |
| 444 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 445 | // All of these directly apply. |
| 446 | NewStore->setMetadata(ID, N); |
| 447 | break; |
| 448 | |
| 449 | case LLVMContext::MD_invariant_load: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 450 | case LLVMContext::MD_nonnull: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 451 | case LLVMContext::MD_range: |
Artur Pilipenko | 5c5011d | 2015-11-02 17:53:51 +0000 | [diff] [blame] | 452 | case LLVMContext::MD_align: |
| 453 | case LLVMContext::MD_dereferenceable: |
| 454 | case LLVMContext::MD_dereferenceable_or_null: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 455 | // These don't apply for stores. |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 456 | break; |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | return NewStore; |
| 461 | } |
| 462 | |
JF Bastien | 3e2e69f | 2016-04-21 19:41:48 +0000 | [diff] [blame] | 463 | /// \brief Combine loads to match the type of their uses' value after looking |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 464 | /// through intervening bitcasts. |
| 465 | /// |
| 466 | /// The core idea here is that if the result of a load is used in an operation, |
| 467 | /// we should load the type most conducive to that operation. For example, when |
| 468 | /// loading an integer and converting that immediately to a pointer, we should |
| 469 | /// instead directly load a pointer. |
| 470 | /// |
| 471 | /// However, this routine must never change the width of a load or the number of |
| 472 | /// loads as that would introduce a semantic change. This combine is expected to |
| 473 | /// be a semantic no-op which just allows loads to more closely model the types |
| 474 | /// of their consuming operations. |
| 475 | /// |
| 476 | /// Currently, we also refuse to change the precise type used for an atomic load |
| 477 | /// or a volatile load. This is debatable, and might be reasonable to change |
| 478 | /// later. However, it is risky in case some backend or other part of LLVM is |
| 479 | /// relying on the exact type loaded to select appropriate atomic operations. |
| 480 | static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 481 | // FIXME: We could probably with some care handle both volatile and ordered |
| 482 | // atomic loads here but it isn't clear that this is important. |
| 483 | if (!LI.isUnordered()) |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 484 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 485 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 486 | if (LI.use_empty()) |
| 487 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 488 | |
Arnold Schwaighofer | 5d33555 | 2016-09-10 18:14:57 +0000 | [diff] [blame] | 489 | // swifterror values can't be bitcasted. |
| 490 | if (LI.getPointerOperand()->isSwiftError()) |
| 491 | return nullptr; |
| 492 | |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 493 | Type *Ty = LI.getType(); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 494 | const DataLayout &DL = IC.getDataLayout(); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 495 | |
| 496 | // Try to canonicalize loads which are only ever stored to operate over |
| 497 | // integers instead of any other type. We only do this when the loaded type |
| 498 | // is sized and has a size exactly the same as its store size and the store |
| 499 | // size is a legal integer type. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 500 | if (!Ty->isIntegerTy() && Ty->isSized() && |
| 501 | DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && |
Sanjoy Das | ba04d3a | 2016-08-06 02:58:48 +0000 | [diff] [blame] | 502 | DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && |
| 503 | !DL.isNonIntegralPointerType(Ty)) { |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 504 | if (all_of(LI.users(), [&LI](User *U) { |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 505 | auto *SI = dyn_cast<StoreInst>(U); |
Arnold Schwaighofer | c368563 | 2017-01-31 17:53:49 +0000 | [diff] [blame] | 506 | return SI && SI->getPointerOperand() != &LI && |
| 507 | !SI->getPointerOperand()->isSwiftError(); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 508 | })) { |
| 509 | LoadInst *NewLoad = combineLoadToNewType( |
| 510 | IC, LI, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 511 | Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 512 | // Replace all the stores with stores of the newly loaded value. |
| 513 | for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { |
| 514 | auto *SI = cast<StoreInst>(*UI++); |
| 515 | IC.Builder->SetInsertPoint(SI); |
| 516 | combineStoreToNewValue(IC, *SI, NewLoad); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 517 | IC.eraseInstFromFunction(*SI); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 518 | } |
| 519 | assert(LI.use_empty() && "Failed to remove all users of the load!"); |
| 520 | // Return the old load so the combiner can delete it safely. |
| 521 | return &LI; |
| 522 | } |
| 523 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 524 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 525 | // Fold away bit casts of the loaded value by loading the desired type. |
Quentin Colombet | 490cfbe | 2016-02-11 22:30:41 +0000 | [diff] [blame] | 526 | // We can do this for BitCastInsts as well as casts from and to pointer types, |
| 527 | // as long as those are noops (i.e., the source or dest type have the same |
| 528 | // bitwidth as the target's pointers). |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 529 | if (LI.hasOneUse()) |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 530 | if (auto* CI = dyn_cast<CastInst>(LI.user_back())) |
| 531 | if (CI->isNoopCast(DL)) |
| 532 | if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) { |
| 533 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy()); |
| 534 | CI->replaceAllUsesWith(NewLoad); |
| 535 | IC.eraseInstFromFunction(*CI); |
| 536 | return &LI; |
| 537 | } |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 538 | |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 539 | // FIXME: We should also canonicalize loads of vectors when their elements are |
| 540 | // cast to other types. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 541 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 542 | } |
| 543 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 544 | static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { |
| 545 | // FIXME: We could probably with some care handle both volatile and atomic |
| 546 | // stores here but it isn't clear that this is important. |
| 547 | if (!LI.isSimple()) |
| 548 | return nullptr; |
| 549 | |
| 550 | Type *T = LI.getType(); |
| 551 | if (!T->isAggregateType()) |
| 552 | return nullptr; |
| 553 | |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 554 | StringRef Name = LI.getName(); |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 555 | assert(LI.getAlignment() && "Alignment must be set at this point"); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 556 | |
| 557 | if (auto *ST = dyn_cast<StructType>(T)) { |
| 558 | // If the struct only have one element, we unpack. |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 559 | auto NumElements = ST->getNumElements(); |
| 560 | if (NumElements == 1) { |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 561 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), |
| 562 | ".unpack"); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 563 | return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 564 | UndefValue::get(T), NewLoad, 0, Name)); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 565 | } |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 566 | |
| 567 | // We don't want to break loads with padding here as we'd loose |
| 568 | // the knowledge that padding exists for the rest of the pipeline. |
| 569 | const DataLayout &DL = IC.getDataLayout(); |
| 570 | auto *SL = DL.getStructLayout(ST); |
| 571 | if (SL->hasPadding()) |
| 572 | return nullptr; |
| 573 | |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 574 | auto Align = LI.getAlignment(); |
| 575 | if (!Align) |
| 576 | Align = DL.getABITypeAlignment(ST); |
| 577 | |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 578 | auto *Addr = LI.getPointerOperand(); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 579 | auto *IdxType = Type::getInt32Ty(T->getContext()); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 580 | auto *Zero = ConstantInt::get(IdxType, 0); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 581 | |
| 582 | Value *V = UndefValue::get(T); |
| 583 | for (unsigned i = 0; i < NumElements; i++) { |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 584 | Value *Indices[2] = { |
| 585 | Zero, |
| 586 | ConstantInt::get(IdxType, i), |
| 587 | }; |
Amaury Sechet | da71cb7 | 2016-02-17 21:21:29 +0000 | [diff] [blame] | 588 | auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 589 | Name + ".elt"); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 590 | auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 591 | auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 592 | V = IC.Builder->CreateInsertValue(V, L, i); |
| 593 | } |
| 594 | |
| 595 | V->setName(Name); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 596 | return IC.replaceInstUsesWith(LI, V); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 597 | } |
| 598 | |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 599 | if (auto *AT = dyn_cast<ArrayType>(T)) { |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 600 | auto *ET = AT->getElementType(); |
| 601 | auto NumElements = AT->getNumElements(); |
| 602 | if (NumElements == 1) { |
| 603 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 604 | return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 605 | UndefValue::get(T), NewLoad, 0, Name)); |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 606 | } |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 607 | |
Davide Italiano | da11412 | 2016-10-07 20:57:42 +0000 | [diff] [blame] | 608 | // Bail out if the array is too large. Ideally we would like to optimize |
| 609 | // arrays of arbitrary size but this has a terrible impact on compile time. |
| 610 | // The threshold here is chosen arbitrarily, maybe needs a little bit of |
| 611 | // tuning. |
| 612 | if (NumElements > 1024) |
| 613 | return nullptr; |
| 614 | |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 615 | const DataLayout &DL = IC.getDataLayout(); |
| 616 | auto EltSize = DL.getTypeAllocSize(ET); |
| 617 | auto Align = LI.getAlignment(); |
| 618 | if (!Align) |
| 619 | Align = DL.getABITypeAlignment(T); |
| 620 | |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 621 | auto *Addr = LI.getPointerOperand(); |
| 622 | auto *IdxType = Type::getInt64Ty(T->getContext()); |
| 623 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 624 | |
| 625 | Value *V = UndefValue::get(T); |
| 626 | uint64_t Offset = 0; |
| 627 | for (uint64_t i = 0; i < NumElements; i++) { |
| 628 | Value *Indices[2] = { |
| 629 | Zero, |
| 630 | ConstantInt::get(IdxType, i), |
| 631 | }; |
| 632 | auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 633 | Name + ".elt"); |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 634 | auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset), |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 635 | Name + ".unpack"); |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 636 | V = IC.Builder->CreateInsertValue(V, L, i); |
| 637 | Offset += EltSize; |
| 638 | } |
| 639 | |
| 640 | V->setName(Name); |
| 641 | return IC.replaceInstUsesWith(LI, V); |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 642 | } |
| 643 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 644 | return nullptr; |
| 645 | } |
| 646 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 647 | // If we can determine that all possible objects pointed to by the provided |
| 648 | // pointer value are, not only dereferenceable, but also definitively less than |
| 649 | // or equal to the provided maximum size, then return true. Otherwise, return |
| 650 | // false (constant global values and allocas fall into this category). |
| 651 | // |
| 652 | // FIXME: This should probably live in ValueTracking (or similar). |
| 653 | static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 654 | const DataLayout &DL) { |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 655 | SmallPtrSet<Value *, 4> Visited; |
| 656 | SmallVector<Value *, 4> Worklist(1, V); |
| 657 | |
| 658 | do { |
| 659 | Value *P = Worklist.pop_back_val(); |
| 660 | P = P->stripPointerCasts(); |
| 661 | |
| 662 | if (!Visited.insert(P).second) |
| 663 | continue; |
| 664 | |
| 665 | if (SelectInst *SI = dyn_cast<SelectInst>(P)) { |
| 666 | Worklist.push_back(SI->getTrueValue()); |
| 667 | Worklist.push_back(SI->getFalseValue()); |
| 668 | continue; |
| 669 | } |
| 670 | |
| 671 | if (PHINode *PN = dyn_cast<PHINode>(P)) { |
Pete Cooper | 833f34d | 2015-05-12 20:05:31 +0000 | [diff] [blame] | 672 | for (Value *IncValue : PN->incoming_values()) |
| 673 | Worklist.push_back(IncValue); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 674 | continue; |
| 675 | } |
| 676 | |
| 677 | if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { |
Sanjoy Das | 9904247 | 2016-04-17 04:30:43 +0000 | [diff] [blame] | 678 | if (GA->isInterposable()) |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 679 | return false; |
| 680 | Worklist.push_back(GA->getAliasee()); |
| 681 | continue; |
| 682 | } |
| 683 | |
| 684 | // If we know how big this object is, and it is less than MaxSize, continue |
| 685 | // searching. Otherwise, return false. |
| 686 | if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { |
| 687 | if (!AI->getAllocatedType()->isSized()) |
| 688 | return false; |
| 689 | |
| 690 | ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); |
| 691 | if (!CS) |
| 692 | return false; |
| 693 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 694 | uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 695 | // Make sure that, even if the multiplication below would wrap as an |
| 696 | // uint64_t, we still do the right thing. |
| 697 | if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) |
| 698 | return false; |
| 699 | continue; |
| 700 | } |
| 701 | |
| 702 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { |
| 703 | if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) |
| 704 | return false; |
| 705 | |
Manuel Jacob | 5f6eaac | 2016-01-16 20:30:46 +0000 | [diff] [blame] | 706 | uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 707 | if (InitSize > MaxSize) |
| 708 | return false; |
| 709 | continue; |
| 710 | } |
| 711 | |
| 712 | return false; |
| 713 | } while (!Worklist.empty()); |
| 714 | |
| 715 | return true; |
| 716 | } |
| 717 | |
| 718 | // If we're indexing into an object of a known size, and the outer index is |
| 719 | // not a constant, but having any value but zero would lead to undefined |
| 720 | // behavior, replace it with zero. |
| 721 | // |
| 722 | // For example, if we have: |
| 723 | // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 |
| 724 | // ... |
| 725 | // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x |
| 726 | // ... = load i32* %arrayidx, align 4 |
| 727 | // Then we know that we can replace %x in the GEP with i64 0. |
| 728 | // |
| 729 | // FIXME: We could fold any GEP index to zero that would cause UB if it were |
| 730 | // not zero. Currently, we only handle the first such index. Also, we could |
| 731 | // also search through non-zero constant indices if we kept track of the |
| 732 | // offsets those indices implied. |
| 733 | static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, |
| 734 | Instruction *MemI, unsigned &Idx) { |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 735 | if (GEPI->getNumOperands() < 2) |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 736 | return false; |
| 737 | |
| 738 | // Find the first non-zero index of a GEP. If all indices are zero, return |
| 739 | // one past the last index. |
| 740 | auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { |
| 741 | unsigned I = 1; |
| 742 | for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { |
| 743 | Value *V = GEPI->getOperand(I); |
| 744 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) |
| 745 | if (CI->isZero()) |
| 746 | continue; |
| 747 | |
| 748 | break; |
| 749 | } |
| 750 | |
| 751 | return I; |
| 752 | }; |
| 753 | |
| 754 | // Skip through initial 'zero' indices, and find the corresponding pointer |
| 755 | // type. See if the next index is not a constant. |
| 756 | Idx = FirstNZIdx(GEPI); |
| 757 | if (Idx == GEPI->getNumOperands()) |
| 758 | return false; |
| 759 | if (isa<Constant>(GEPI->getOperand(Idx))) |
| 760 | return false; |
| 761 | |
| 762 | SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); |
Eduard Burtescu | 19eb031 | 2016-01-19 17:28:00 +0000 | [diff] [blame] | 763 | Type *AllocTy = |
| 764 | GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 765 | if (!AllocTy || !AllocTy->isSized()) |
| 766 | return false; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 767 | const DataLayout &DL = IC.getDataLayout(); |
| 768 | uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 769 | |
| 770 | // If there are more indices after the one we might replace with a zero, make |
| 771 | // sure they're all non-negative. If any of them are negative, the overall |
| 772 | // address being computed might be before the base address determined by the |
| 773 | // first non-zero index. |
| 774 | auto IsAllNonNegative = [&]() { |
| 775 | for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { |
| 776 | bool KnownNonNegative, KnownNegative; |
| 777 | IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative, |
| 778 | KnownNegative, 0, MemI); |
| 779 | if (KnownNonNegative) |
| 780 | continue; |
| 781 | return false; |
| 782 | } |
| 783 | |
| 784 | return true; |
| 785 | }; |
| 786 | |
| 787 | // FIXME: If the GEP is not inbounds, and there are extra indices after the |
| 788 | // one we'll replace, those could cause the address computation to wrap |
| 789 | // (rendering the IsAllNonNegative() check below insufficient). We can do |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 790 | // better, ignoring zero indices (and other indices we can prove small |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 791 | // enough not to wrap). |
| 792 | if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) |
| 793 | return false; |
| 794 | |
| 795 | // Note that isObjectSizeLessThanOrEq will return true only if the pointer is |
| 796 | // also known to be dereferenceable. |
| 797 | return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && |
| 798 | IsAllNonNegative(); |
| 799 | } |
| 800 | |
| 801 | // If we're indexing into an object with a variable index for the memory |
| 802 | // access, but the object has only one element, we can assume that the index |
| 803 | // will always be zero. If we replace the GEP, return it. |
| 804 | template <typename T> |
| 805 | static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, |
| 806 | T &MemI) { |
| 807 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 808 | unsigned Idx; |
| 809 | if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { |
| 810 | Instruction *NewGEPI = GEPI->clone(); |
| 811 | NewGEPI->setOperand(Idx, |
| 812 | ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); |
| 813 | NewGEPI->insertBefore(GEPI); |
| 814 | MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); |
| 815 | return NewGEPI; |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | return nullptr; |
| 820 | } |
| 821 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 822 | Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { |
| 823 | Value *Op = LI.getOperand(0); |
| 824 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 825 | // Try to canonicalize the loaded type. |
| 826 | if (Instruction *Res = combineLoadToOperationType(*this, LI)) |
| 827 | return Res; |
| 828 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 829 | // Attempt to improve the alignment. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 830 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 831 | Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 832 | unsigned LoadAlign = LI.getAlignment(); |
| 833 | unsigned EffectiveLoadAlign = |
| 834 | LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 835 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 836 | if (KnownAlign > EffectiveLoadAlign) |
| 837 | LI.setAlignment(KnownAlign); |
| 838 | else if (LoadAlign == 0) |
| 839 | LI.setAlignment(EffectiveLoadAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 840 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 841 | // Replace GEP indices if possible. |
| 842 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { |
| 843 | Worklist.Add(NewGEPI); |
| 844 | return &LI; |
| 845 | } |
| 846 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 847 | if (Instruction *Res = unpackLoadToAggregate(*this, LI)) |
| 848 | return Res; |
| 849 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 850 | // Do really simple store-to-load forwarding and load CSE, to catch cases |
Duncan Sands | 75b5d27 | 2011-02-15 09:23:02 +0000 | [diff] [blame] | 851 | // where there are several consecutive memory accesses to the same location, |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 852 | // separated by a few arithmetic operations. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 853 | BasicBlock::iterator BBI(LI); |
Eli Friedman | bd254a6 | 2016-06-16 02:33:42 +0000 | [diff] [blame] | 854 | bool IsLoadCSE = false; |
Sanjay Patel | b38ad88e | 2017-01-02 23:25:28 +0000 | [diff] [blame] | 855 | if (Value *AvailableVal = FindAvailableLoadedValue( |
| 856 | &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) { |
| 857 | if (IsLoadCSE) |
| 858 | combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI); |
Bjorn Steinbrink | a91fd09 | 2015-07-10 06:55:44 +0000 | [diff] [blame] | 859 | |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 860 | return replaceInstUsesWith( |
Chandler Carruth | 1a3c2c4 | 2014-11-25 08:20:27 +0000 | [diff] [blame] | 861 | LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(), |
| 862 | LI.getName() + ".cast")); |
Bjorn Steinbrink | a91fd09 | 2015-07-10 06:55:44 +0000 | [diff] [blame] | 863 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 864 | |
Philip Reames | 3ac0718 | 2016-04-21 17:45:05 +0000 | [diff] [blame] | 865 | // None of the following transforms are legal for volatile/ordered atomic |
| 866 | // loads. Most of them do apply for unordered atomics. |
| 867 | if (!LI.isUnordered()) return nullptr; |
Philip Reames | ac55090 | 2016-04-21 17:03:33 +0000 | [diff] [blame] | 868 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 869 | // load(gep null, ...) -> unreachable |
| 870 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { |
| 871 | const Value *GEPI0 = GEPI->getOperand(0); |
| 872 | // TODO: Consider a target hook for valid address spaces for this xform. |
| 873 | if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){ |
| 874 | // Insert a new store to null instruction before the load to indicate |
| 875 | // that this code is not reachable. We do this instead of inserting |
| 876 | // an unreachable instruction directly because we cannot modify the |
| 877 | // CFG. |
| 878 | new StoreInst(UndefValue::get(LI.getType()), |
| 879 | Constant::getNullValue(Op->getType()), &LI); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 880 | return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 881 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 882 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 883 | |
| 884 | // load null/undef -> unreachable |
| 885 | // TODO: Consider a target hook for valid address spaces for this xform. |
| 886 | if (isa<UndefValue>(Op) || |
| 887 | (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) { |
| 888 | // Insert a new store to null instruction before the load to indicate that |
| 889 | // this code is not reachable. We do this instead of inserting an |
| 890 | // unreachable instruction directly because we cannot modify the CFG. |
| 891 | new StoreInst(UndefValue::get(LI.getType()), |
| 892 | Constant::getNullValue(Op->getType()), &LI); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 893 | return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 894 | } |
| 895 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 896 | if (Op->hasOneUse()) { |
| 897 | // Change select and PHI nodes to select values instead of addresses: this |
| 898 | // helps alias analysis out a lot, allows many others simplifications, and |
| 899 | // exposes redundancy in the code. |
| 900 | // |
| 901 | // Note that we cannot do the transformation unless we know that the |
| 902 | // introduced loads cannot trap! Something like this is valid as long as |
| 903 | // the condition is always false: load (select bool %C, int* null, int* %G), |
| 904 | // but it would not be valid if we transformed it to load from null |
| 905 | // unconditionally. |
| 906 | // |
| 907 | if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { |
| 908 | // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 909 | unsigned Align = LI.getAlignment(); |
Artur Pilipenko | 9bb6bea | 2016-04-27 11:00:48 +0000 | [diff] [blame] | 910 | if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && |
| 911 | isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { |
Bob Wilson | 4b71b6c | 2010-01-30 00:41:10 +0000 | [diff] [blame] | 912 | LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 913 | SI->getOperand(1)->getName()+".val"); |
Bob Wilson | 4b71b6c | 2010-01-30 00:41:10 +0000 | [diff] [blame] | 914 | LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 915 | SI->getOperand(2)->getName()+".val"); |
Philip Reames | a98c7ea | 2016-04-21 17:59:40 +0000 | [diff] [blame] | 916 | assert(LI.isUnordered() && "implied by above"); |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 917 | V1->setAlignment(Align); |
Philip Reames | a98c7ea | 2016-04-21 17:59:40 +0000 | [diff] [blame] | 918 | V1->setAtomic(LI.getOrdering(), LI.getSynchScope()); |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 919 | V2->setAlignment(Align); |
Philip Reames | a98c7ea | 2016-04-21 17:59:40 +0000 | [diff] [blame] | 920 | V2->setAtomic(LI.getOrdering(), LI.getSynchScope()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 921 | return SelectInst::Create(SI->getCondition(), V1, V2); |
| 922 | } |
| 923 | |
| 924 | // load (select (cond, null, P)) -> load P |
Larisse Voufo | 532bf71 | 2015-09-18 19:14:35 +0000 | [diff] [blame] | 925 | if (isa<ConstantPointerNull>(SI->getOperand(1)) && |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 926 | LI.getPointerAddressSpace() == 0) { |
| 927 | LI.setOperand(0, SI->getOperand(2)); |
| 928 | return &LI; |
| 929 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 930 | |
| 931 | // load (select (cond, P, null)) -> load P |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 932 | if (isa<ConstantPointerNull>(SI->getOperand(2)) && |
| 933 | LI.getPointerAddressSpace() == 0) { |
| 934 | LI.setOperand(0, SI->getOperand(1)); |
| 935 | return &LI; |
| 936 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 937 | } |
| 938 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 939 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 940 | } |
| 941 | |
Arch D. Robison | be0490a | 2016-04-25 22:22:39 +0000 | [diff] [blame] | 942 | /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast. |
| 943 | /// |
| 944 | /// \returns underlying value that was "cast", or nullptr otherwise. |
| 945 | /// |
| 946 | /// For example, if we have: |
| 947 | /// |
| 948 | /// %E0 = extractelement <2 x double> %U, i32 0 |
| 949 | /// %V0 = insertvalue [2 x double] undef, double %E0, 0 |
| 950 | /// %E1 = extractelement <2 x double> %U, i32 1 |
| 951 | /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 |
| 952 | /// |
| 953 | /// and the layout of a <2 x double> is isomorphic to a [2 x double], |
| 954 | /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. |
| 955 | /// Note that %U may contain non-undef values where %V1 has undef. |
| 956 | static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { |
| 957 | Value *U = nullptr; |
| 958 | while (auto *IV = dyn_cast<InsertValueInst>(V)) { |
| 959 | auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); |
| 960 | if (!E) |
| 961 | return nullptr; |
| 962 | auto *W = E->getVectorOperand(); |
| 963 | if (!U) |
| 964 | U = W; |
| 965 | else if (U != W) |
| 966 | return nullptr; |
| 967 | auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); |
| 968 | if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) |
| 969 | return nullptr; |
| 970 | V = IV->getAggregateOperand(); |
| 971 | } |
| 972 | if (!isa<UndefValue>(V) ||!U) |
| 973 | return nullptr; |
| 974 | |
| 975 | auto *UT = cast<VectorType>(U->getType()); |
| 976 | auto *VT = V->getType(); |
| 977 | // Check that types UT and VT are bitwise isomorphic. |
| 978 | const auto &DL = IC.getDataLayout(); |
| 979 | if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { |
| 980 | return nullptr; |
| 981 | } |
| 982 | if (auto *AT = dyn_cast<ArrayType>(VT)) { |
| 983 | if (AT->getNumElements() != UT->getNumElements()) |
| 984 | return nullptr; |
| 985 | } else { |
| 986 | auto *ST = cast<StructType>(VT); |
| 987 | if (ST->getNumElements() != UT->getNumElements()) |
| 988 | return nullptr; |
| 989 | for (const auto *EltT : ST->elements()) { |
| 990 | if (EltT != UT->getElementType()) |
| 991 | return nullptr; |
| 992 | } |
| 993 | } |
| 994 | return U; |
| 995 | } |
| 996 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 997 | /// \brief Combine stores to match the type of value being stored. |
| 998 | /// |
| 999 | /// The core idea here is that the memory does not have any intrinsic type and |
| 1000 | /// where we can we should match the type of a store to the type of value being |
| 1001 | /// stored. |
| 1002 | /// |
| 1003 | /// However, this routine must never change the width of a store or the number of |
| 1004 | /// stores as that would introduce a semantic change. This combine is expected to |
| 1005 | /// be a semantic no-op which just allows stores to more closely model the types |
| 1006 | /// of their incoming values. |
| 1007 | /// |
| 1008 | /// Currently, we also refuse to change the precise type used for an atomic or |
| 1009 | /// volatile store. This is debatable, and might be reasonable to change later. |
| 1010 | /// However, it is risky in case some backend or other part of LLVM is relying |
| 1011 | /// on the exact type stored to select appropriate atomic operations. |
| 1012 | /// |
| 1013 | /// \returns true if the store was successfully combined away. This indicates |
| 1014 | /// the caller must erase the store instruction. We have to let the caller erase |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 1015 | /// the store instruction as otherwise there is no way to signal whether it was |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1016 | /// combined or not: IC.EraseInstFromFunction returns a null pointer. |
| 1017 | static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 1018 | // FIXME: We could probably with some care handle both volatile and ordered |
| 1019 | // atomic stores here but it isn't clear that this is important. |
| 1020 | if (!SI.isUnordered()) |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1021 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1022 | |
Arnold Schwaighofer | 5d33555 | 2016-09-10 18:14:57 +0000 | [diff] [blame] | 1023 | // swifterror values can't be bitcasted. |
| 1024 | if (SI.getPointerOperand()->isSwiftError()) |
| 1025 | return false; |
| 1026 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1027 | Value *V = SI.getValueOperand(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1028 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1029 | // Fold away bit casts of the stored value by storing the original type. |
| 1030 | if (auto *BC = dyn_cast<BitCastInst>(V)) { |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 1031 | V = BC->getOperand(0); |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 1032 | if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) { |
| 1033 | combineStoreToNewValue(IC, SI, V); |
| 1034 | return true; |
| 1035 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1036 | } |
| 1037 | |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 1038 | if (Value *U = likeBitCastFromVector(IC, V)) |
| 1039 | if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) { |
| 1040 | combineStoreToNewValue(IC, SI, U); |
| 1041 | return true; |
| 1042 | } |
Arch D. Robison | be0490a | 2016-04-25 22:22:39 +0000 | [diff] [blame] | 1043 | |
JF Bastien | c22d299 | 2016-04-21 19:53:39 +0000 | [diff] [blame] | 1044 | // FIXME: We should also canonicalize stores of vectors when their elements |
| 1045 | // are cast to other types. |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1046 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1047 | } |
| 1048 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1049 | static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { |
| 1050 | // FIXME: We could probably with some care handle both volatile and atomic |
| 1051 | // stores here but it isn't clear that this is important. |
| 1052 | if (!SI.isSimple()) |
| 1053 | return false; |
| 1054 | |
| 1055 | Value *V = SI.getValueOperand(); |
| 1056 | Type *T = V->getType(); |
| 1057 | |
| 1058 | if (!T->isAggregateType()) |
| 1059 | return false; |
| 1060 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 1061 | if (auto *ST = dyn_cast<StructType>(T)) { |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1062 | // If the struct only have one element, we unpack. |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1063 | unsigned Count = ST->getNumElements(); |
| 1064 | if (Count == 1) { |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1065 | V = IC.Builder->CreateExtractValue(V, 0); |
| 1066 | combineStoreToNewValue(IC, SI, V); |
| 1067 | return true; |
| 1068 | } |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1069 | |
| 1070 | // We don't want to break loads with padding here as we'd loose |
| 1071 | // the knowledge that padding exists for the rest of the pipeline. |
| 1072 | const DataLayout &DL = IC.getDataLayout(); |
| 1073 | auto *SL = DL.getStructLayout(ST); |
| 1074 | if (SL->hasPadding()) |
| 1075 | return false; |
| 1076 | |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1077 | auto Align = SI.getAlignment(); |
| 1078 | if (!Align) |
| 1079 | Align = DL.getABITypeAlignment(ST); |
| 1080 | |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 1081 | SmallString<16> EltName = V->getName(); |
| 1082 | EltName += ".elt"; |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1083 | auto *Addr = SI.getPointerOperand(); |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 1084 | SmallString<16> AddrName = Addr->getName(); |
| 1085 | AddrName += ".repack"; |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1086 | |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1087 | auto *IdxType = Type::getInt32Ty(ST->getContext()); |
| 1088 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 1089 | for (unsigned i = 0; i < Count; i++) { |
| 1090 | Value *Indices[2] = { |
| 1091 | Zero, |
| 1092 | ConstantInt::get(IdxType, i), |
| 1093 | }; |
Amaury Sechet | da71cb7 | 2016-02-17 21:21:29 +0000 | [diff] [blame] | 1094 | auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), |
| 1095 | AddrName); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1096 | auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1097 | auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); |
| 1098 | IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1099 | } |
| 1100 | |
| 1101 | return true; |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1102 | } |
| 1103 | |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1104 | if (auto *AT = dyn_cast<ArrayType>(T)) { |
| 1105 | // If the array only have one element, we unpack. |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1106 | auto NumElements = AT->getNumElements(); |
| 1107 | if (NumElements == 1) { |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1108 | V = IC.Builder->CreateExtractValue(V, 0); |
| 1109 | combineStoreToNewValue(IC, SI, V); |
| 1110 | return true; |
| 1111 | } |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1112 | |
Davide Italiano | f6988d2 | 2016-10-07 21:53:09 +0000 | [diff] [blame] | 1113 | // Bail out if the array is too large. Ideally we would like to optimize |
| 1114 | // arrays of arbitrary size but this has a terrible impact on compile time. |
| 1115 | // The threshold here is chosen arbitrarily, maybe needs a little bit of |
| 1116 | // tuning. |
| 1117 | if (NumElements > 1024) |
| 1118 | return false; |
| 1119 | |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1120 | const DataLayout &DL = IC.getDataLayout(); |
| 1121 | auto EltSize = DL.getTypeAllocSize(AT->getElementType()); |
| 1122 | auto Align = SI.getAlignment(); |
| 1123 | if (!Align) |
| 1124 | Align = DL.getABITypeAlignment(T); |
| 1125 | |
| 1126 | SmallString<16> EltName = V->getName(); |
| 1127 | EltName += ".elt"; |
| 1128 | auto *Addr = SI.getPointerOperand(); |
| 1129 | SmallString<16> AddrName = Addr->getName(); |
| 1130 | AddrName += ".repack"; |
| 1131 | |
| 1132 | auto *IdxType = Type::getInt64Ty(T->getContext()); |
| 1133 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 1134 | |
| 1135 | uint64_t Offset = 0; |
| 1136 | for (uint64_t i = 0; i < NumElements; i++) { |
| 1137 | Value *Indices[2] = { |
| 1138 | Zero, |
| 1139 | ConstantInt::get(IdxType, i), |
| 1140 | }; |
| 1141 | auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), |
| 1142 | AddrName); |
| 1143 | auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); |
| 1144 | auto EltAlign = MinAlign(Align, Offset); |
| 1145 | IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); |
| 1146 | Offset += EltSize; |
| 1147 | } |
| 1148 | |
| 1149 | return true; |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1150 | } |
| 1151 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1152 | return false; |
| 1153 | } |
| 1154 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1155 | /// equivalentAddressValues - Test if A and B will obviously have the same |
| 1156 | /// value. This includes recognizing that %t0 and %t1 will have the same |
| 1157 | /// value in code like this: |
| 1158 | /// %t0 = getelementptr \@a, 0, 3 |
| 1159 | /// store i32 0, i32* %t0 |
| 1160 | /// %t1 = getelementptr \@a, 0, 3 |
| 1161 | /// %t2 = load i32* %t1 |
| 1162 | /// |
| 1163 | static bool equivalentAddressValues(Value *A, Value *B) { |
| 1164 | // Test if the values are trivially equivalent. |
| 1165 | if (A == B) return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1166 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1167 | // Test if the values come form identical arithmetic instructions. |
| 1168 | // This uses isIdenticalToWhenDefined instead of isIdenticalTo because |
| 1169 | // its only used to compare two uses within the same basic block, which |
| 1170 | // means that they'll always either have the same value or one of them |
| 1171 | // will have an undefined value. |
| 1172 | if (isa<BinaryOperator>(A) || |
| 1173 | isa<CastInst>(A) || |
| 1174 | isa<PHINode>(A) || |
| 1175 | isa<GetElementPtrInst>(A)) |
| 1176 | if (Instruction *BI = dyn_cast<Instruction>(B)) |
| 1177 | if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) |
| 1178 | return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1179 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1180 | // Otherwise they may not be equivalent. |
| 1181 | return false; |
| 1182 | } |
| 1183 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1184 | Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { |
| 1185 | Value *Val = SI.getOperand(0); |
| 1186 | Value *Ptr = SI.getOperand(1); |
| 1187 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1188 | // Try to canonicalize the stored type. |
| 1189 | if (combineStoreToValueType(*this, SI)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1190 | return eraseInstFromFunction(SI); |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1191 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1192 | // Attempt to improve the alignment. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1193 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 1194 | Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1195 | unsigned StoreAlign = SI.getAlignment(); |
| 1196 | unsigned EffectiveStoreAlign = |
| 1197 | StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 1198 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1199 | if (KnownAlign > EffectiveStoreAlign) |
| 1200 | SI.setAlignment(KnownAlign); |
| 1201 | else if (StoreAlign == 0) |
| 1202 | SI.setAlignment(EffectiveStoreAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1203 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1204 | // Try to canonicalize the stored type. |
| 1205 | if (unpackStoreToAggregate(*this, SI)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1206 | return eraseInstFromFunction(SI); |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1207 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 1208 | // Replace GEP indices if possible. |
| 1209 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { |
| 1210 | Worklist.Add(NewGEPI); |
| 1211 | return &SI; |
| 1212 | } |
| 1213 | |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1214 | // Don't hack volatile/ordered stores. |
| 1215 | // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. |
| 1216 | if (!SI.isUnordered()) return nullptr; |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1217 | |
| 1218 | // If the RHS is an alloca with a single use, zapify the store, making the |
| 1219 | // alloca dead. |
| 1220 | if (Ptr->hasOneUse()) { |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1221 | if (isa<AllocaInst>(Ptr)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1222 | return eraseInstFromFunction(SI); |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1223 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 1224 | if (isa<AllocaInst>(GEP->getOperand(0))) { |
| 1225 | if (GEP->getOperand(0)->hasOneUse()) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1226 | return eraseInstFromFunction(SI); |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1227 | } |
| 1228 | } |
| 1229 | } |
| 1230 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1231 | // Do really simple DSE, to catch cases where there are several consecutive |
| 1232 | // stores to the same location, separated by a few arithmetic operations. This |
| 1233 | // situation often occurs with bitfield accesses. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1234 | BasicBlock::iterator BBI(SI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1235 | for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; |
| 1236 | --ScanInsts) { |
| 1237 | --BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1238 | // Don't count debug info directives, lest they affect codegen, |
| 1239 | // and we skip pointer-to-pointer bitcasts, which are NOPs. |
| 1240 | if (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1241 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1242 | ScanInsts++; |
| 1243 | continue; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1244 | } |
| 1245 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1246 | if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { |
| 1247 | // Prev store isn't volatile, and stores to the same location? |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1248 | if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1249 | SI.getOperand(1))) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1250 | ++NumDeadStore; |
| 1251 | ++BBI; |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1252 | eraseInstFromFunction(*PrevSI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1253 | continue; |
| 1254 | } |
| 1255 | break; |
| 1256 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1257 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1258 | // If this is a load, we have to stop. However, if the loaded value is from |
| 1259 | // the pointer we're loading and is producing the pointer we're storing, |
| 1260 | // then *this* store is dead (X = load P; store X -> P). |
| 1261 | if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1262 | if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { |
| 1263 | assert(SI.isUnordered() && "can't eliminate ordering operation"); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1264 | return eraseInstFromFunction(SI); |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1265 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1266 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1267 | // Otherwise, this is a load from some other location. Stores before it |
| 1268 | // may not be dead. |
| 1269 | break; |
| 1270 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1271 | |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1272 | // Don't skip over loads, throws or things that can modify memory. |
| 1273 | if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1274 | break; |
| 1275 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1276 | |
| 1277 | // store X, null -> turns into 'unreachable' in SimplifyCFG |
| 1278 | if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { |
| 1279 | if (!isa<UndefValue>(Val)) { |
| 1280 | SI.setOperand(0, UndefValue::get(Val->getType())); |
| 1281 | if (Instruction *U = dyn_cast<Instruction>(Val)) |
| 1282 | Worklist.Add(U); // Dropped a use. |
| 1283 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1284 | return nullptr; // Do not modify these! |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1285 | } |
| 1286 | |
| 1287 | // store undef, Ptr -> noop |
| 1288 | if (isa<UndefValue>(Val)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1289 | return eraseInstFromFunction(SI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1290 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1291 | // If this store is the last instruction in the basic block (possibly |
Victor Hernandez | 5f5abd5 | 2010-01-21 23:07:15 +0000 | [diff] [blame] | 1292 | // excepting debug info instructions), and if the block ends with an |
| 1293 | // unconditional branch, try to move it to the successor block. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1294 | BBI = SI.getIterator(); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1295 | do { |
| 1296 | ++BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1297 | } while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1298 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1299 | if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) |
| 1300 | if (BI->isUnconditional()) |
| 1301 | if (SimplifyStoreAtEndOfBlock(SI)) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1302 | return nullptr; // xform done! |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1303 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1304 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1305 | } |
| 1306 | |
| 1307 | /// SimplifyStoreAtEndOfBlock - Turn things like: |
| 1308 | /// if () { *P = v1; } else { *P = v2 } |
| 1309 | /// into a phi node with a store in the successor. |
| 1310 | /// |
| 1311 | /// Simplify things like: |
| 1312 | /// *P = v1; if () { *P = v2; } |
| 1313 | /// into a phi node with a store in the successor. |
| 1314 | /// |
| 1315 | bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { |
Philip Reames | 5f0e369 | 2016-04-22 20:53:32 +0000 | [diff] [blame] | 1316 | assert(SI.isUnordered() && |
| 1317 | "this code has not been auditted for volatile or ordered store case"); |
Justin Bogner | c7e4fbe | 2016-08-05 01:09:48 +0000 | [diff] [blame] | 1318 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1319 | BasicBlock *StoreBB = SI.getParent(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1320 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1321 | // Check to see if the successor block has exactly two incoming edges. If |
| 1322 | // so, see if the other predecessor contains a store to the same location. |
| 1323 | // if so, insert a PHI node (if needed) and move the stores down. |
| 1324 | BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1325 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1326 | // Determine whether Dest has exactly two predecessors and, if so, compute |
| 1327 | // the other predecessor. |
| 1328 | pred_iterator PI = pred_begin(DestBB); |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1329 | BasicBlock *P = *PI; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1330 | BasicBlock *OtherBB = nullptr; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1331 | |
| 1332 | if (P != StoreBB) |
| 1333 | OtherBB = P; |
| 1334 | |
| 1335 | if (++PI == pred_end(DestBB)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1336 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1337 | |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1338 | P = *PI; |
| 1339 | if (P != StoreBB) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1340 | if (OtherBB) |
| 1341 | return false; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1342 | OtherBB = P; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1343 | } |
| 1344 | if (++PI != pred_end(DestBB)) |
| 1345 | return false; |
| 1346 | |
| 1347 | // Bail out if all the relevant blocks aren't distinct (this can happen, |
| 1348 | // for example, if SI is in an infinite loop) |
| 1349 | if (StoreBB == DestBB || OtherBB == DestBB) |
| 1350 | return false; |
| 1351 | |
| 1352 | // Verify that the other block ends in a branch and is not otherwise empty. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1353 | BasicBlock::iterator BBI(OtherBB->getTerminator()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1354 | BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); |
| 1355 | if (!OtherBr || BBI == OtherBB->begin()) |
| 1356 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1357 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1358 | // If the other block ends in an unconditional branch, check for the 'if then |
| 1359 | // else' case. there is an instruction before the branch. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1360 | StoreInst *OtherStore = nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1361 | if (OtherBr->isUnconditional()) { |
| 1362 | --BBI; |
| 1363 | // Skip over debugging info. |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1364 | while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1365 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1366 | if (BBI==OtherBB->begin()) |
| 1367 | return false; |
| 1368 | --BBI; |
| 1369 | } |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1370 | // If this isn't a store, isn't a store to the same location, or is not the |
| 1371 | // right kind of store, bail out. |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1372 | OtherStore = dyn_cast<StoreInst>(BBI); |
| 1373 | if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1374 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1375 | return false; |
| 1376 | } else { |
| 1377 | // Otherwise, the other block ended with a conditional branch. If one of the |
| 1378 | // destinations is StoreBB, then we have the if/then case. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1379 | if (OtherBr->getSuccessor(0) != StoreBB && |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1380 | OtherBr->getSuccessor(1) != StoreBB) |
| 1381 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1382 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1383 | // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an |
| 1384 | // if/then triangle. See if there is a store to the same ptr as SI that |
| 1385 | // lives in OtherBB. |
| 1386 | for (;; --BBI) { |
| 1387 | // Check to see if we find the matching store. |
| 1388 | if ((OtherStore = dyn_cast<StoreInst>(BBI))) { |
| 1389 | if (OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1390 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1391 | return false; |
| 1392 | break; |
| 1393 | } |
| 1394 | // If we find something that may be using or overwriting the stored |
| 1395 | // value, or if we run out of instructions, we can't do the xform. |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1396 | if (BBI->mayReadFromMemory() || BBI->mayThrow() || |
| 1397 | BBI->mayWriteToMemory() || BBI == OtherBB->begin()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1398 | return false; |
| 1399 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1400 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1401 | // In order to eliminate the store in OtherBr, we have to |
| 1402 | // make sure nothing reads or overwrites the stored value in |
| 1403 | // StoreBB. |
| 1404 | for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { |
| 1405 | // FIXME: This should really be AA driven. |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1406 | if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1407 | return false; |
| 1408 | } |
| 1409 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1410 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1411 | // Insert a PHI node now if we need it. |
| 1412 | Value *MergedVal = OtherStore->getOperand(0); |
| 1413 | if (MergedVal != SI.getOperand(0)) { |
Jay Foad | 5213134 | 2011-03-30 11:28:46 +0000 | [diff] [blame] | 1414 | PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1415 | PN->addIncoming(SI.getOperand(0), SI.getParent()); |
| 1416 | PN->addIncoming(OtherStore->getOperand(0), OtherBB); |
| 1417 | MergedVal = InsertNewInstBefore(PN, DestBB->front()); |
| 1418 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1419 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1420 | // Advance to a place where it is safe to insert the new store and |
| 1421 | // insert it. |
Bill Wendling | 8ddfc09 | 2011-08-16 20:45:24 +0000 | [diff] [blame] | 1422 | BBI = DestBB->getFirstInsertionPt(); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1423 | StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1424 | SI.isVolatile(), |
| 1425 | SI.getAlignment(), |
| 1426 | SI.getOrdering(), |
| 1427 | SI.getSynchScope()); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1428 | InsertNewInstBefore(NewSI, *BBI); |
Paul Robinson | 383c5c2 | 2017-02-06 22:19:04 +0000 | [diff] [blame^] | 1429 | // The debug locations of the original instructions might differ; merge them. |
| 1430 | NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(), |
| 1431 | OtherStore->getDebugLoc())); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1432 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1433 | // If the two stores had AA tags, merge them. |
| 1434 | AAMDNodes AATags; |
| 1435 | SI.getAAMetadata(AATags); |
| 1436 | if (AATags) { |
| 1437 | OtherStore->getAAMetadata(AATags, /* Merge = */ true); |
| 1438 | NewSI->setAAMetadata(AATags); |
| 1439 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1440 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1441 | // Nuke the old stores. |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1442 | eraseInstFromFunction(SI); |
| 1443 | eraseInstFromFunction(*OtherStore); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1444 | return true; |
| 1445 | } |