Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1 | //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the visit functions for load, store and alloca. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chandler Carruth | a917458 | 2015-01-22 05:25:13 +0000 | [diff] [blame] | 14 | #include "InstCombineInternal.h" |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 15 | #include "llvm/ADT/MapVector.h" |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 16 | #include "llvm/ADT/SmallString.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 17 | #include "llvm/ADT/Statistic.h" |
Dan Gohman | 826bdf8 | 2010-05-28 16:19:17 +0000 | [diff] [blame] | 18 | #include "llvm/Analysis/Loads.h" |
David Blaikie | 2be3922 | 2018-03-21 22:34:23 +0000 | [diff] [blame] | 19 | #include "llvm/Analysis/Utils/Local.h" |
Peter Collingbourne | ecdd58f | 2016-10-21 19:59:26 +0000 | [diff] [blame] | 20 | #include "llvm/IR/ConstantRange.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 21 | #include "llvm/IR/DataLayout.h" |
| 22 | #include "llvm/IR/IntrinsicInst.h" |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 23 | #include "llvm/IR/LLVMContext.h" |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 24 | #include "llvm/IR/MDBuilder.h" |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 25 | #include "llvm/IR/PatternMatch.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 26 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 27 | using namespace llvm; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 28 | using namespace PatternMatch; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 29 | |
Chandler Carruth | 964daaa | 2014-04-22 02:55:47 +0000 | [diff] [blame] | 30 | #define DEBUG_TYPE "instcombine" |
| 31 | |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 32 | STATISTIC(NumDeadStore, "Number of dead stores eliminated"); |
| 33 | STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); |
| 34 | |
| 35 | /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to |
| 36 | /// some part of a constant global variable. This intentionally only accepts |
| 37 | /// constant expressions because we can't rewrite arbitrary instructions. |
| 38 | static bool pointsToConstantGlobal(Value *V) { |
| 39 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) |
| 40 | return GV->isConstant(); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 41 | |
| 42 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 43 | if (CE->getOpcode() == Instruction::BitCast || |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 44 | CE->getOpcode() == Instruction::AddrSpaceCast || |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 45 | CE->getOpcode() == Instruction::GetElementPtr) |
| 46 | return pointsToConstantGlobal(CE->getOperand(0)); |
Matt Arsenault | 60728177 | 2014-04-24 00:01:09 +0000 | [diff] [blame] | 47 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 48 | return false; |
| 49 | } |
| 50 | |
| 51 | /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) |
| 52 | /// pointer to an alloca. Ignore any reads of the pointer, return false if we |
| 53 | /// see any stores or other unknown uses. If we see pointer arithmetic, keep |
| 54 | /// track of whether it moves the pointer (with IsOffset) but otherwise traverse |
| 55 | /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to |
| 56 | /// the alloca, and if the source pointer is a pointer to a constant global, we |
| 57 | /// can optimize this. |
| 58 | static bool |
| 59 | isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 60 | SmallVectorImpl<Instruction *> &ToDelete) { |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 61 | // We track lifetime intrinsics as we encounter them. If we decide to go |
| 62 | // ahead and replace the value with the global, this lets the caller quickly |
| 63 | // eliminate the markers. |
| 64 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 65 | SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 66 | ValuesToInspect.emplace_back(V, false); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 67 | while (!ValuesToInspect.empty()) { |
| 68 | auto ValuePair = ValuesToInspect.pop_back_val(); |
| 69 | const bool IsOffset = ValuePair.second; |
| 70 | for (auto &U : ValuePair.first->uses()) { |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 71 | auto *I = cast<Instruction>(U.getUser()); |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 72 | |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 73 | if (auto *LI = dyn_cast<LoadInst>(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 74 | // Ignore non-volatile loads, they are always ok. |
| 75 | if (!LI->isSimple()) return false; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 76 | continue; |
| 77 | } |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 78 | |
| 79 | if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { |
| 80 | // If uses of the bitcast are ok, we are ok. |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 81 | ValuesToInspect.emplace_back(I, IsOffset); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 82 | continue; |
| 83 | } |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 84 | if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 85 | // If the GEP has all zero indices, it doesn't offset the pointer. If it |
| 86 | // doesn't, it does. |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 87 | ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 88 | continue; |
| 89 | } |
| 90 | |
Benjamin Kramer | 3a09ef6 | 2015-04-10 14:50:08 +0000 | [diff] [blame] | 91 | if (auto CS = CallSite(I)) { |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 92 | // If this is the function being called then we treat it like a load and |
| 93 | // ignore it. |
| 94 | if (CS.isCallee(&U)) |
| 95 | continue; |
| 96 | |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 97 | unsigned DataOpNo = CS.getDataOperandNo(&U); |
| 98 | bool IsArgOperand = CS.isArgOperand(&U); |
| 99 | |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 100 | // Inalloca arguments are clobbered by the call. |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 101 | if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 102 | return false; |
| 103 | |
| 104 | // If this is a readonly/readnone call site, then we know it is just a |
| 105 | // load (but one that potentially returns the value itself), so we can |
| 106 | // ignore it if we know that the value isn't captured. |
| 107 | if (CS.onlyReadsMemory() && |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 108 | (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 109 | continue; |
| 110 | |
| 111 | // If this is being passed as a byval argument, the caller is making a |
| 112 | // copy, so it is only a read of the alloca. |
David Majnemer | 02f4787 | 2015-12-23 09:58:41 +0000 | [diff] [blame] | 113 | if (IsArgOperand && CS.isByValArgument(DataOpNo)) |
Reid Kleckner | 813dab2 | 2014-07-01 21:36:20 +0000 | [diff] [blame] | 114 | continue; |
| 115 | } |
| 116 | |
| 117 | // Lifetime intrinsics can be handled by the caller. |
| 118 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
| 119 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || |
| 120 | II->getIntrinsicID() == Intrinsic::lifetime_end) { |
| 121 | assert(II->use_empty() && "Lifetime markers have no result to use!"); |
| 122 | ToDelete.push_back(II); |
| 123 | continue; |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | // If this is isn't our memcpy/memmove, reject it as something we can't |
| 128 | // handle. |
| 129 | MemTransferInst *MI = dyn_cast<MemTransferInst>(I); |
| 130 | if (!MI) |
| 131 | return false; |
| 132 | |
| 133 | // If the transfer is using the alloca as a source of the transfer, then |
| 134 | // ignore it since it is a load (unless the transfer is volatile). |
| 135 | if (U.getOperandNo() == 1) { |
| 136 | if (MI->isVolatile()) return false; |
| 137 | continue; |
| 138 | } |
| 139 | |
| 140 | // If we already have seen a copy, reject the second one. |
| 141 | if (TheCopy) return false; |
| 142 | |
| 143 | // If the pointer has been offset from the start of the alloca, we can't |
| 144 | // safely handle this. |
| 145 | if (IsOffset) return false; |
| 146 | |
| 147 | // If the memintrinsic isn't using the alloca as the dest, reject it. |
| 148 | if (U.getOperandNo() != 0) return false; |
| 149 | |
| 150 | // If the source of the memcpy/move is not a constant global, reject it. |
| 151 | if (!pointsToConstantGlobal(MI->getSource())) |
| 152 | return false; |
| 153 | |
| 154 | // Otherwise, the transform is safe. Remember the copy instruction. |
| 155 | TheCopy = MI; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 156 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 157 | } |
| 158 | return true; |
| 159 | } |
| 160 | |
| 161 | /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only |
| 162 | /// modified by a copy from a constant global. If we can prove this, we can |
| 163 | /// replace any uses of the alloca with uses of the global directly. |
| 164 | static MemTransferInst * |
| 165 | isOnlyCopiedFromConstantGlobal(AllocaInst *AI, |
| 166 | SmallVectorImpl<Instruction *> &ToDelete) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 167 | MemTransferInst *TheCopy = nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 168 | if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) |
| 169 | return TheCopy; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 170 | return nullptr; |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 171 | } |
| 172 | |
Vitaly Buka | df19ad4 | 2017-06-24 01:35:19 +0000 | [diff] [blame] | 173 | /// Returns true if V is dereferenceable for size of alloca. |
| 174 | static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, |
| 175 | const DataLayout &DL) { |
| 176 | if (AI->isArrayAllocation()) |
| 177 | return false; |
| 178 | uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); |
| 179 | if (!AllocaSize) |
| 180 | return false; |
| 181 | return isDereferenceableAndAlignedPointer(V, AI->getAlignment(), |
| 182 | APInt(64, AllocaSize), DL); |
| 183 | } |
| 184 | |
Duncan P. N. Exon Smith | c6820ec | 2015-03-13 19:22:03 +0000 | [diff] [blame] | 185 | static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { |
Duncan P. N. Exon Smith | 720762e | 2015-03-13 19:30:44 +0000 | [diff] [blame] | 186 | // Check for array size of 1 (scalar allocation). |
Duncan P. N. Exon Smith | be95b4a | 2015-03-13 19:42:09 +0000 | [diff] [blame] | 187 | if (!AI.isArrayAllocation()) { |
| 188 | // i32 1 is the canonical array size for scalar allocations. |
| 189 | if (AI.getArraySize()->getType()->isIntegerTy(32)) |
| 190 | return nullptr; |
| 191 | |
| 192 | // Canonicalize it. |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 193 | Value *V = IC.Builder.getInt32(1); |
Duncan P. N. Exon Smith | be95b4a | 2015-03-13 19:42:09 +0000 | [diff] [blame] | 194 | AI.setOperand(0, V); |
| 195 | return &AI; |
| 196 | } |
Duncan P. N. Exon Smith | 720762e | 2015-03-13 19:30:44 +0000 | [diff] [blame] | 197 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 198 | // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 199 | if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { |
| 200 | Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 201 | AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName()); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 202 | New->setAlignment(AI.getAlignment()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 203 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 204 | // Scan to the end of the allocation instructions, to skip over a block of |
| 205 | // allocas if possible...also skip interleaved debug info |
| 206 | // |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 207 | BasicBlock::iterator It(New); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 208 | while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) |
| 209 | ++It; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 210 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 211 | // Now that I is pointing to the first non-allocation-inst in the block, |
| 212 | // insert our getelementptr instruction... |
| 213 | // |
| 214 | Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); |
| 215 | Value *NullIdx = Constant::getNullValue(IdxTy); |
| 216 | Value *Idx[2] = {NullIdx, NullIdx}; |
| 217 | Instruction *GEP = |
Matt Arsenault | 640ff9d | 2013-08-14 00:24:05 +0000 | [diff] [blame] | 218 | GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 219 | IC.InsertNewInstBefore(GEP, *It); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 220 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 221 | // Now make everything use the getelementptr instead of the original |
| 222 | // allocation. |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 223 | return IC.replaceInstUsesWith(AI, GEP); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 224 | } |
| 225 | |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 226 | if (isa<UndefValue>(AI.getArraySize())) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 227 | return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); |
Duncan P. N. Exon Smith | bb73013 | 2015-03-13 19:26:33 +0000 | [diff] [blame] | 228 | |
Duncan P. N. Exon Smith | 07ff9b0 | 2015-03-13 19:34:55 +0000 | [diff] [blame] | 229 | // Ensure that the alloca array size argument has type intptr_t, so that |
| 230 | // any casting is exposed early. |
| 231 | Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); |
| 232 | if (AI.getArraySize()->getType() != IntPtrTy) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 233 | Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false); |
Duncan P. N. Exon Smith | 07ff9b0 | 2015-03-13 19:34:55 +0000 | [diff] [blame] | 234 | AI.setOperand(0, V); |
| 235 | return &AI; |
| 236 | } |
| 237 | |
Duncan P. N. Exon Smith | c6820ec | 2015-03-13 19:22:03 +0000 | [diff] [blame] | 238 | return nullptr; |
| 239 | } |
| 240 | |
Benjamin Kramer | 03ab8a3 | 2017-02-10 22:26:35 +0000 | [diff] [blame] | 241 | namespace { |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 242 | // If I and V are pointers in different address space, it is not allowed to |
| 243 | // use replaceAllUsesWith since I and V have different types. A |
| 244 | // non-target-specific transformation should not use addrspacecast on V since |
| 245 | // the two address space may be disjoint depending on target. |
| 246 | // |
| 247 | // This class chases down uses of the old pointer until reaching the load |
| 248 | // instructions, then replaces the old pointer in the load instructions with |
| 249 | // the new pointer. If during the chasing it sees bitcast or GEP, it will |
| 250 | // create new bitcast or GEP with the new pointer and use them in the load |
| 251 | // instruction. |
| 252 | class PointerReplacer { |
| 253 | public: |
| 254 | PointerReplacer(InstCombiner &IC) : IC(IC) {} |
| 255 | void replacePointer(Instruction &I, Value *V); |
| 256 | |
| 257 | private: |
| 258 | void findLoadAndReplace(Instruction &I); |
| 259 | void replace(Instruction *I); |
| 260 | Value *getReplacement(Value *I); |
| 261 | |
| 262 | SmallVector<Instruction *, 4> Path; |
| 263 | MapVector<Value *, Value *> WorkMap; |
| 264 | InstCombiner &IC; |
| 265 | }; |
Benjamin Kramer | 03ab8a3 | 2017-02-10 22:26:35 +0000 | [diff] [blame] | 266 | } // end anonymous namespace |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 267 | |
| 268 | void PointerReplacer::findLoadAndReplace(Instruction &I) { |
| 269 | for (auto U : I.users()) { |
| 270 | auto *Inst = dyn_cast<Instruction>(&*U); |
| 271 | if (!Inst) |
| 272 | return; |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame^] | 273 | LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 274 | if (isa<LoadInst>(Inst)) { |
| 275 | for (auto P : Path) |
| 276 | replace(P); |
| 277 | replace(Inst); |
| 278 | } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) { |
| 279 | Path.push_back(Inst); |
| 280 | findLoadAndReplace(*Inst); |
| 281 | Path.pop_back(); |
| 282 | } else { |
| 283 | return; |
| 284 | } |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | Value *PointerReplacer::getReplacement(Value *V) { |
| 289 | auto Loc = WorkMap.find(V); |
| 290 | if (Loc != WorkMap.end()) |
| 291 | return Loc->second; |
| 292 | return nullptr; |
| 293 | } |
| 294 | |
| 295 | void PointerReplacer::replace(Instruction *I) { |
| 296 | if (getReplacement(I)) |
| 297 | return; |
| 298 | |
| 299 | if (auto *LT = dyn_cast<LoadInst>(I)) { |
| 300 | auto *V = getReplacement(LT->getPointerOperand()); |
| 301 | assert(V && "Operand not replaced"); |
| 302 | auto *NewI = new LoadInst(V); |
| 303 | NewI->takeName(LT); |
| 304 | IC.InsertNewInstWith(NewI, *LT); |
| 305 | IC.replaceInstUsesWith(*LT, NewI); |
| 306 | WorkMap[LT] = NewI; |
| 307 | } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { |
| 308 | auto *V = getReplacement(GEP->getPointerOperand()); |
| 309 | assert(V && "Operand not replaced"); |
| 310 | SmallVector<Value *, 8> Indices; |
| 311 | Indices.append(GEP->idx_begin(), GEP->idx_end()); |
| 312 | auto *NewI = GetElementPtrInst::Create( |
| 313 | V->getType()->getPointerElementType(), V, Indices); |
| 314 | IC.InsertNewInstWith(NewI, *GEP); |
| 315 | NewI->takeName(GEP); |
| 316 | WorkMap[GEP] = NewI; |
| 317 | } else if (auto *BC = dyn_cast<BitCastInst>(I)) { |
| 318 | auto *V = getReplacement(BC->getOperand(0)); |
| 319 | assert(V && "Operand not replaced"); |
| 320 | auto *NewT = PointerType::get(BC->getType()->getPointerElementType(), |
| 321 | V->getType()->getPointerAddressSpace()); |
| 322 | auto *NewI = new BitCastInst(V, NewT); |
| 323 | IC.InsertNewInstWith(NewI, *BC); |
| 324 | NewI->takeName(BC); |
Yaxun Liu | e6d1ce5 | 2017-02-24 20:27:25 +0000 | [diff] [blame] | 325 | WorkMap[BC] = NewI; |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 326 | } else { |
| 327 | llvm_unreachable("should never reach here"); |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | void PointerReplacer::replacePointer(Instruction &I, Value *V) { |
Benjamin Kramer | 684c87b | 2017-02-10 22:04:17 +0000 | [diff] [blame] | 332 | #ifndef NDEBUG |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 333 | auto *PT = cast<PointerType>(I.getType()); |
| 334 | auto *NT = cast<PointerType>(V->getType()); |
| 335 | assert(PT != NT && PT->getElementType() == NT->getElementType() && |
| 336 | "Invalid usage"); |
Benjamin Kramer | 684c87b | 2017-02-10 22:04:17 +0000 | [diff] [blame] | 337 | #endif |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 338 | WorkMap[&I] = V; |
| 339 | findLoadAndReplace(I); |
| 340 | } |
| 341 | |
Duncan P. N. Exon Smith | c6820ec | 2015-03-13 19:22:03 +0000 | [diff] [blame] | 342 | Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { |
| 343 | if (auto *I = simplifyAllocaArraySize(*this, AI)) |
| 344 | return I; |
| 345 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 346 | if (AI.getAllocatedType()->isSized()) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 347 | // If the alignment is 0 (unspecified), assign it the preferred alignment. |
| 348 | if (AI.getAlignment() == 0) |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 349 | AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 350 | |
| 351 | // Move all alloca's of zero byte objects to the entry block and merge them |
| 352 | // together. Note that we only do this for alloca's, because malloc should |
| 353 | // allocate and return a unique pointer, even for a zero byte allocation. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 354 | if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 355 | // For a zero sized alloca there is no point in doing an array allocation. |
| 356 | // This is helpful if the array size is a complicated expression not used |
| 357 | // elsewhere. |
| 358 | if (AI.isArrayAllocation()) { |
| 359 | AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); |
| 360 | return &AI; |
| 361 | } |
| 362 | |
| 363 | // Get the first instruction in the entry block. |
| 364 | BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); |
| 365 | Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); |
| 366 | if (FirstInst != &AI) { |
| 367 | // If the entry block doesn't start with a zero-size alloca then move |
| 368 | // this one to the start of the entry block. There is no problem with |
| 369 | // dominance as the array size was forced to a constant earlier already. |
| 370 | AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); |
| 371 | if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 372 | DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 373 | AI.moveBefore(FirstInst); |
| 374 | return &AI; |
| 375 | } |
| 376 | |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 377 | // If the alignment of the entry block alloca is 0 (unspecified), |
| 378 | // assign it the preferred alignment. |
| 379 | if (EntryAI->getAlignment() == 0) |
| 380 | EntryAI->setAlignment( |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 381 | DL.getPrefTypeAlignment(EntryAI->getAllocatedType())); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 382 | // Replace this zero-sized alloca with the one at the start of the entry |
| 383 | // block after ensuring that the address will be aligned enough for both |
| 384 | // types. |
Richard Osborne | b68053e | 2012-09-18 09:31:44 +0000 | [diff] [blame] | 385 | unsigned MaxAlign = std::max(EntryAI->getAlignment(), |
| 386 | AI.getAlignment()); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 387 | EntryAI->setAlignment(MaxAlign); |
| 388 | if (AI.getType() != EntryAI->getType()) |
| 389 | return new BitCastInst(EntryAI, AI.getType()); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 390 | return replaceInstUsesWith(AI, EntryAI); |
Duncan Sands | 8bc764a | 2012-06-26 13:39:21 +0000 | [diff] [blame] | 391 | } |
| 392 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 393 | } |
| 394 | |
Eli Friedman | b14873c | 2012-11-26 23:04:53 +0000 | [diff] [blame] | 395 | if (AI.getAlignment()) { |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 396 | // Check to see if this allocation is only modified by a memcpy/memmove from |
| 397 | // a constant global whose alignment is equal to or exceeds that of the |
| 398 | // allocation. If this is the case, we can change all users to use |
| 399 | // the constant global instead. This is commonly produced by the CFE by |
| 400 | // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' |
| 401 | // is only subsequently read. |
| 402 | SmallVector<Instruction *, 4> ToDelete; |
| 403 | if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { |
Chandler Carruth | 66b3130 | 2015-01-04 12:03:27 +0000 | [diff] [blame] | 404 | unsigned SourceAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 405 | Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); |
Vitaly Buka | df19ad4 | 2017-06-24 01:35:19 +0000 | [diff] [blame] | 406 | if (AI.getAlignment() <= SourceAlign && |
| 407 | isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame^] | 408 | LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); |
| 409 | LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 410 | for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 411 | eraseInstFromFunction(*ToDelete[i]); |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 412 | Constant *TheSrc = cast<Constant>(Copy->getSource()); |
Yaxun Liu | ba01ed0 | 2017-02-10 21:46:07 +0000 | [diff] [blame] | 413 | auto *SrcTy = TheSrc->getType(); |
| 414 | auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(), |
| 415 | SrcTy->getPointerAddressSpace()); |
| 416 | Constant *Cast = |
| 417 | ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy); |
| 418 | if (AI.getType()->getPointerAddressSpace() == |
| 419 | SrcTy->getPointerAddressSpace()) { |
| 420 | Instruction *NewI = replaceInstUsesWith(AI, Cast); |
| 421 | eraseInstFromFunction(*Copy); |
| 422 | ++NumGlobalCopies; |
| 423 | return NewI; |
| 424 | } else { |
| 425 | PointerReplacer PtrReplacer(*this); |
| 426 | PtrReplacer.replacePointer(AI, Cast); |
| 427 | ++NumGlobalCopies; |
| 428 | } |
Richard Osborne | 2fd29bf | 2012-09-24 17:10:03 +0000 | [diff] [blame] | 429 | } |
Chandler Carruth | c908ca1 | 2012-08-21 08:39:44 +0000 | [diff] [blame] | 430 | } |
| 431 | } |
| 432 | |
Nuno Lopes | 95cc4f3 | 2012-07-09 18:38:20 +0000 | [diff] [blame] | 433 | // At last, use the generic allocation site handler to aggressively remove |
| 434 | // unused allocas. |
| 435 | return visitAllocSite(AI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 436 | } |
| 437 | |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 438 | // Are we allowed to form a atomic load or store of this type? |
| 439 | static bool isSupportedAtomicType(Type *Ty) { |
| 440 | return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy(); |
| 441 | } |
| 442 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 443 | /// Helper to combine a load to a new type. |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 444 | /// |
| 445 | /// This just does the work of combining a load to a new type. It handles |
| 446 | /// metadata, etc., and returns the new instruction. The \c NewTy should be the |
| 447 | /// loaded *value* type. This will convert it to a pointer, cast the operand to |
| 448 | /// that pointer type, load it, etc. |
| 449 | /// |
| 450 | /// Note that this will create all of the instructions with whatever insert |
| 451 | /// point the \c InstCombiner currently is using. |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 452 | static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, |
| 453 | const Twine &Suffix = "") { |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 454 | assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) && |
| 455 | "can't fold an atomic load to requested type"); |
| 456 | |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 457 | Value *Ptr = LI.getPointerOperand(); |
| 458 | unsigned AS = LI.getPointerAddressSpace(); |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 459 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 460 | LI.getAllMetadata(MD); |
| 461 | |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 462 | LoadInst *NewLoad = IC.Builder.CreateAlignedLoad( |
| 463 | IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)), |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 464 | LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 465 | NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); |
Charles Davis | 33d1dc0 | 2015-02-25 05:10:25 +0000 | [diff] [blame] | 466 | MDBuilder MDB(NewLoad->getContext()); |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 467 | for (const auto &MDPair : MD) { |
| 468 | unsigned ID = MDPair.first; |
Duncan P. N. Exon Smith | de36e80 | 2014-11-11 21:30:22 +0000 | [diff] [blame] | 469 | MDNode *N = MDPair.second; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 470 | // Note, essentially every kind of metadata should be preserved here! This |
| 471 | // routine is supposed to clone a load instruction changing *only its type*. |
| 472 | // The only metadata it makes sense to drop is metadata which is invalidated |
| 473 | // when the pointer type changes. This should essentially never be the case |
| 474 | // in LLVM, but we explicitly switch over only known metadata to be |
| 475 | // conservatively correct. If you are adding metadata to LLVM which pertains |
| 476 | // to loads, you almost certainly want to add it here. |
| 477 | switch (ID) { |
| 478 | case LLVMContext::MD_dbg: |
| 479 | case LLVMContext::MD_tbaa: |
| 480 | case LLVMContext::MD_prof: |
| 481 | case LLVMContext::MD_fpmath: |
| 482 | case LLVMContext::MD_tbaa_struct: |
| 483 | case LLVMContext::MD_invariant_load: |
| 484 | case LLVMContext::MD_alias_scope: |
| 485 | case LLVMContext::MD_noalias: |
Philip Reames | 5a3f5f7 | 2014-10-21 00:13:20 +0000 | [diff] [blame] | 486 | case LLVMContext::MD_nontemporal: |
| 487 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 488 | // All of these directly apply. |
| 489 | NewLoad->setMetadata(ID, N); |
| 490 | break; |
| 491 | |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 492 | case LLVMContext::MD_nonnull: |
Chandler Carruth | 2abb65a | 2017-06-26 03:31:31 +0000 | [diff] [blame] | 493 | copyNonnullMetadata(LI, N, *NewLoad); |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 494 | break; |
Artur Pilipenko | 5c5011d | 2015-11-02 17:53:51 +0000 | [diff] [blame] | 495 | case LLVMContext::MD_align: |
| 496 | case LLVMContext::MD_dereferenceable: |
| 497 | case LLVMContext::MD_dereferenceable_or_null: |
| 498 | // These only directly apply if the new type is also a pointer. |
| 499 | if (NewTy->isPointerTy()) |
| 500 | NewLoad->setMetadata(ID, N); |
| 501 | break; |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 502 | case LLVMContext::MD_range: |
Chandler Carruth | 2abb65a | 2017-06-26 03:31:31 +0000 | [diff] [blame] | 503 | copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad); |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 504 | break; |
| 505 | } |
| 506 | } |
Chandler Carruth | bc6378d | 2014-10-19 10:46:46 +0000 | [diff] [blame] | 507 | return NewLoad; |
| 508 | } |
| 509 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 510 | /// Combine a store to a new type. |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 511 | /// |
| 512 | /// Returns the newly created store instruction. |
| 513 | static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 514 | assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) && |
| 515 | "can't fold an atomic store of requested type"); |
| 516 | |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 517 | Value *Ptr = SI.getPointerOperand(); |
| 518 | unsigned AS = SI.getPointerAddressSpace(); |
| 519 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
| 520 | SI.getAllMetadata(MD); |
| 521 | |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 522 | StoreInst *NewStore = IC.Builder.CreateAlignedStore( |
| 523 | V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 524 | SI.getAlignment(), SI.isVolatile()); |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 525 | NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 526 | for (const auto &MDPair : MD) { |
| 527 | unsigned ID = MDPair.first; |
| 528 | MDNode *N = MDPair.second; |
| 529 | // Note, essentially every kind of metadata should be preserved here! This |
| 530 | // routine is supposed to clone a store instruction changing *only its |
| 531 | // type*. The only metadata it makes sense to drop is metadata which is |
| 532 | // invalidated when the pointer type changes. This should essentially |
| 533 | // never be the case in LLVM, but we explicitly switch over only known |
| 534 | // metadata to be conservatively correct. If you are adding metadata to |
| 535 | // LLVM which pertains to stores, you almost certainly want to add it |
| 536 | // here. |
| 537 | switch (ID) { |
| 538 | case LLVMContext::MD_dbg: |
| 539 | case LLVMContext::MD_tbaa: |
| 540 | case LLVMContext::MD_prof: |
| 541 | case LLVMContext::MD_fpmath: |
| 542 | case LLVMContext::MD_tbaa_struct: |
| 543 | case LLVMContext::MD_alias_scope: |
| 544 | case LLVMContext::MD_noalias: |
| 545 | case LLVMContext::MD_nontemporal: |
| 546 | case LLVMContext::MD_mem_parallel_loop_access: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 547 | // All of these directly apply. |
| 548 | NewStore->setMetadata(ID, N); |
| 549 | break; |
| 550 | |
| 551 | case LLVMContext::MD_invariant_load: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 552 | case LLVMContext::MD_nonnull: |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 553 | case LLVMContext::MD_range: |
Artur Pilipenko | 5c5011d | 2015-11-02 17:53:51 +0000 | [diff] [blame] | 554 | case LLVMContext::MD_align: |
| 555 | case LLVMContext::MD_dereferenceable: |
| 556 | case LLVMContext::MD_dereferenceable_or_null: |
Chandler Carruth | 87fdafc | 2015-02-13 02:30:01 +0000 | [diff] [blame] | 557 | // These don't apply for stores. |
Chandler Carruth | fa11d83 | 2015-01-22 03:34:54 +0000 | [diff] [blame] | 558 | break; |
| 559 | } |
| 560 | } |
| 561 | |
| 562 | return NewStore; |
| 563 | } |
| 564 | |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 565 | /// Returns true if instruction represent minmax pattern like: |
| 566 | /// select ((cmp load V1, load V2), V1, V2). |
| 567 | static bool isMinMaxWithLoads(Value *V) { |
| 568 | assert(V->getType()->isPointerTy() && "Expected pointer type."); |
| 569 | // Ignore possible ty* to ixx* bitcast. |
| 570 | V = peekThroughBitcast(V); |
| 571 | // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax |
| 572 | // pattern. |
| 573 | CmpInst::Predicate Pred; |
| 574 | Instruction *L1; |
| 575 | Instruction *L2; |
| 576 | Value *LHS; |
| 577 | Value *RHS; |
| 578 | if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)), |
| 579 | m_Value(LHS), m_Value(RHS)))) |
| 580 | return false; |
| 581 | return (match(L1, m_Load(m_Specific(LHS))) && |
| 582 | match(L2, m_Load(m_Specific(RHS)))) || |
| 583 | (match(L1, m_Load(m_Specific(RHS))) && |
| 584 | match(L2, m_Load(m_Specific(LHS)))); |
| 585 | } |
| 586 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 587 | /// Combine loads to match the type of their uses' value after looking |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 588 | /// through intervening bitcasts. |
| 589 | /// |
| 590 | /// The core idea here is that if the result of a load is used in an operation, |
| 591 | /// we should load the type most conducive to that operation. For example, when |
| 592 | /// loading an integer and converting that immediately to a pointer, we should |
| 593 | /// instead directly load a pointer. |
| 594 | /// |
| 595 | /// However, this routine must never change the width of a load or the number of |
| 596 | /// loads as that would introduce a semantic change. This combine is expected to |
| 597 | /// be a semantic no-op which just allows loads to more closely model the types |
| 598 | /// of their consuming operations. |
| 599 | /// |
| 600 | /// Currently, we also refuse to change the precise type used for an atomic load |
| 601 | /// or a volatile load. This is debatable, and might be reasonable to change |
| 602 | /// later. However, it is risky in case some backend or other part of LLVM is |
| 603 | /// relying on the exact type loaded to select appropriate atomic operations. |
| 604 | static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 605 | // FIXME: We could probably with some care handle both volatile and ordered |
| 606 | // atomic loads here but it isn't clear that this is important. |
| 607 | if (!LI.isUnordered()) |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 608 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 609 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 610 | if (LI.use_empty()) |
| 611 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 612 | |
Arnold Schwaighofer | 5d33555 | 2016-09-10 18:14:57 +0000 | [diff] [blame] | 613 | // swifterror values can't be bitcasted. |
| 614 | if (LI.getPointerOperand()->isSwiftError()) |
| 615 | return nullptr; |
| 616 | |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 617 | Type *Ty = LI.getType(); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 618 | const DataLayout &DL = IC.getDataLayout(); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 619 | |
| 620 | // Try to canonicalize loads which are only ever stored to operate over |
| 621 | // integers instead of any other type. We only do this when the loaded type |
| 622 | // is sized and has a size exactly the same as its store size and the store |
| 623 | // size is a legal integer type. |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 624 | // Do not perform canonicalization if minmax pattern is found (to avoid |
| 625 | // infinite loop). |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 626 | if (!Ty->isIntegerTy() && Ty->isSized() && |
| 627 | DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && |
Sanjoy Das | ba04d3a | 2016-08-06 02:58:48 +0000 | [diff] [blame] | 628 | DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 629 | !DL.isNonIntegralPointerType(Ty) && |
| 630 | !isMinMaxWithLoads( |
| 631 | peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) { |
David Majnemer | 0a16c22 | 2016-08-11 21:15:00 +0000 | [diff] [blame] | 632 | if (all_of(LI.users(), [&LI](User *U) { |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 633 | auto *SI = dyn_cast<StoreInst>(U); |
Arnold Schwaighofer | c368563 | 2017-01-31 17:53:49 +0000 | [diff] [blame] | 634 | return SI && SI->getPointerOperand() != &LI && |
| 635 | !SI->getPointerOperand()->isSwiftError(); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 636 | })) { |
| 637 | LoadInst *NewLoad = combineLoadToNewType( |
| 638 | IC, LI, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 639 | Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 640 | // Replace all the stores with stores of the newly loaded value. |
| 641 | for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { |
| 642 | auto *SI = cast<StoreInst>(*UI++); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 643 | IC.Builder.SetInsertPoint(SI); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 644 | combineStoreToNewValue(IC, *SI, NewLoad); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 645 | IC.eraseInstFromFunction(*SI); |
Chandler Carruth | cd8522e | 2015-01-22 05:08:12 +0000 | [diff] [blame] | 646 | } |
| 647 | assert(LI.use_empty() && "Failed to remove all users of the load!"); |
| 648 | // Return the old load so the combiner can delete it safely. |
| 649 | return &LI; |
| 650 | } |
| 651 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 652 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 653 | // Fold away bit casts of the loaded value by loading the desired type. |
Quentin Colombet | 490cfbe | 2016-02-11 22:30:41 +0000 | [diff] [blame] | 654 | // We can do this for BitCastInsts as well as casts from and to pointer types, |
| 655 | // as long as those are noops (i.e., the source or dest type have the same |
| 656 | // bitwidth as the target's pointers). |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 657 | if (LI.hasOneUse()) |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 658 | if (auto* CI = dyn_cast<CastInst>(LI.user_back())) |
| 659 | if (CI->isNoopCast(DL)) |
| 660 | if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) { |
| 661 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy()); |
| 662 | CI->replaceAllUsesWith(NewLoad); |
| 663 | IC.eraseInstFromFunction(*CI); |
| 664 | return &LI; |
| 665 | } |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 666 | |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 667 | // FIXME: We should also canonicalize loads of vectors when their elements are |
| 668 | // cast to other types. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 669 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 670 | } |
| 671 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 672 | static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { |
| 673 | // FIXME: We could probably with some care handle both volatile and atomic |
| 674 | // stores here but it isn't clear that this is important. |
| 675 | if (!LI.isSimple()) |
| 676 | return nullptr; |
| 677 | |
| 678 | Type *T = LI.getType(); |
| 679 | if (!T->isAggregateType()) |
| 680 | return nullptr; |
| 681 | |
Benjamin Kramer | c126353 | 2016-03-11 10:20:56 +0000 | [diff] [blame] | 682 | StringRef Name = LI.getName(); |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 683 | assert(LI.getAlignment() && "Alignment must be set at this point"); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 684 | |
| 685 | if (auto *ST = dyn_cast<StructType>(T)) { |
| 686 | // If the struct only have one element, we unpack. |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 687 | auto NumElements = ST->getNumElements(); |
| 688 | if (NumElements == 1) { |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 689 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), |
| 690 | ".unpack"); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 691 | AAMDNodes AAMD; |
| 692 | LI.getAAMetadata(AAMD); |
| 693 | NewLoad->setAAMetadata(AAMD); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 694 | return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 695 | UndefValue::get(T), NewLoad, 0, Name)); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 696 | } |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 697 | |
| 698 | // We don't want to break loads with padding here as we'd loose |
| 699 | // the knowledge that padding exists for the rest of the pipeline. |
| 700 | const DataLayout &DL = IC.getDataLayout(); |
| 701 | auto *SL = DL.getStructLayout(ST); |
| 702 | if (SL->hasPadding()) |
| 703 | return nullptr; |
| 704 | |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 705 | auto Align = LI.getAlignment(); |
| 706 | if (!Align) |
| 707 | Align = DL.getABITypeAlignment(ST); |
| 708 | |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 709 | auto *Addr = LI.getPointerOperand(); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 710 | auto *IdxType = Type::getInt32Ty(T->getContext()); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 711 | auto *Zero = ConstantInt::get(IdxType, 0); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 712 | |
| 713 | Value *V = UndefValue::get(T); |
| 714 | for (unsigned i = 0; i < NumElements; i++) { |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 715 | Value *Indices[2] = { |
| 716 | Zero, |
| 717 | ConstantInt::get(IdxType, i), |
| 718 | }; |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 719 | auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), |
| 720 | Name + ".elt"); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 721 | auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 722 | auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 723 | // Propagate AA metadata. It'll still be valid on the narrowed load. |
| 724 | AAMDNodes AAMD; |
| 725 | LI.getAAMetadata(AAMD); |
| 726 | L->setAAMetadata(AAMD); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 727 | V = IC.Builder.CreateInsertValue(V, L, i); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 728 | } |
| 729 | |
| 730 | V->setName(Name); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 731 | return IC.replaceInstUsesWith(LI, V); |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 732 | } |
| 733 | |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 734 | if (auto *AT = dyn_cast<ArrayType>(T)) { |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 735 | auto *ET = AT->getElementType(); |
| 736 | auto NumElements = AT->getNumElements(); |
| 737 | if (NumElements == 1) { |
| 738 | LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 739 | AAMDNodes AAMD; |
| 740 | LI.getAAMetadata(AAMD); |
| 741 | NewLoad->setAAMetadata(AAMD); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 742 | return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 743 | UndefValue::get(T), NewLoad, 0, Name)); |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 744 | } |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 745 | |
Davide Italiano | da11412 | 2016-10-07 20:57:42 +0000 | [diff] [blame] | 746 | // Bail out if the array is too large. Ideally we would like to optimize |
| 747 | // arrays of arbitrary size but this has a terrible impact on compile time. |
| 748 | // The threshold here is chosen arbitrarily, maybe needs a little bit of |
| 749 | // tuning. |
Davide Italiano | 2133bf5 | 2017-02-07 17:56:50 +0000 | [diff] [blame] | 750 | if (NumElements > IC.MaxArraySizeForCombine) |
Davide Italiano | da11412 | 2016-10-07 20:57:42 +0000 | [diff] [blame] | 751 | return nullptr; |
| 752 | |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 753 | const DataLayout &DL = IC.getDataLayout(); |
| 754 | auto EltSize = DL.getTypeAllocSize(ET); |
| 755 | auto Align = LI.getAlignment(); |
| 756 | if (!Align) |
| 757 | Align = DL.getABITypeAlignment(T); |
| 758 | |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 759 | auto *Addr = LI.getPointerOperand(); |
| 760 | auto *IdxType = Type::getInt64Ty(T->getContext()); |
| 761 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 762 | |
| 763 | Value *V = UndefValue::get(T); |
| 764 | uint64_t Offset = 0; |
| 765 | for (uint64_t i = 0; i < NumElements; i++) { |
| 766 | Value *Indices[2] = { |
| 767 | Zero, |
| 768 | ConstantInt::get(IdxType, i), |
| 769 | }; |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 770 | auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), |
| 771 | Name + ".elt"); |
| 772 | auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset), |
| 773 | Name + ".unpack"); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 774 | AAMDNodes AAMD; |
| 775 | LI.getAAMetadata(AAMD); |
| 776 | L->setAAMetadata(AAMD); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 777 | V = IC.Builder.CreateInsertValue(V, L, i); |
Amaury Sechet | 7cd3fe7 | 2016-03-02 21:28:30 +0000 | [diff] [blame] | 778 | Offset += EltSize; |
| 779 | } |
| 780 | |
| 781 | V->setName(Name); |
| 782 | return IC.replaceInstUsesWith(LI, V); |
David Majnemer | 58fb038 | 2015-05-11 05:04:22 +0000 | [diff] [blame] | 783 | } |
| 784 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 785 | return nullptr; |
| 786 | } |
| 787 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 788 | // If we can determine that all possible objects pointed to by the provided |
| 789 | // pointer value are, not only dereferenceable, but also definitively less than |
| 790 | // or equal to the provided maximum size, then return true. Otherwise, return |
| 791 | // false (constant global values and allocas fall into this category). |
| 792 | // |
| 793 | // FIXME: This should probably live in ValueTracking (or similar). |
| 794 | static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 795 | const DataLayout &DL) { |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 796 | SmallPtrSet<Value *, 4> Visited; |
| 797 | SmallVector<Value *, 4> Worklist(1, V); |
| 798 | |
| 799 | do { |
| 800 | Value *P = Worklist.pop_back_val(); |
| 801 | P = P->stripPointerCasts(); |
| 802 | |
| 803 | if (!Visited.insert(P).second) |
| 804 | continue; |
| 805 | |
| 806 | if (SelectInst *SI = dyn_cast<SelectInst>(P)) { |
| 807 | Worklist.push_back(SI->getTrueValue()); |
| 808 | Worklist.push_back(SI->getFalseValue()); |
| 809 | continue; |
| 810 | } |
| 811 | |
| 812 | if (PHINode *PN = dyn_cast<PHINode>(P)) { |
Pete Cooper | 833f34d | 2015-05-12 20:05:31 +0000 | [diff] [blame] | 813 | for (Value *IncValue : PN->incoming_values()) |
| 814 | Worklist.push_back(IncValue); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 815 | continue; |
| 816 | } |
| 817 | |
| 818 | if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { |
Sanjoy Das | 9904247 | 2016-04-17 04:30:43 +0000 | [diff] [blame] | 819 | if (GA->isInterposable()) |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 820 | return false; |
| 821 | Worklist.push_back(GA->getAliasee()); |
| 822 | continue; |
| 823 | } |
| 824 | |
| 825 | // If we know how big this object is, and it is less than MaxSize, continue |
| 826 | // searching. Otherwise, return false. |
| 827 | if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { |
| 828 | if (!AI->getAllocatedType()->isSized()) |
| 829 | return false; |
| 830 | |
| 831 | ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); |
| 832 | if (!CS) |
| 833 | return false; |
| 834 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 835 | uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 836 | // Make sure that, even if the multiplication below would wrap as an |
| 837 | // uint64_t, we still do the right thing. |
| 838 | if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) |
| 839 | return false; |
| 840 | continue; |
| 841 | } |
| 842 | |
| 843 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { |
| 844 | if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) |
| 845 | return false; |
| 846 | |
Manuel Jacob | 5f6eaac | 2016-01-16 20:30:46 +0000 | [diff] [blame] | 847 | uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 848 | if (InitSize > MaxSize) |
| 849 | return false; |
| 850 | continue; |
| 851 | } |
| 852 | |
| 853 | return false; |
| 854 | } while (!Worklist.empty()); |
| 855 | |
| 856 | return true; |
| 857 | } |
| 858 | |
| 859 | // If we're indexing into an object of a known size, and the outer index is |
| 860 | // not a constant, but having any value but zero would lead to undefined |
| 861 | // behavior, replace it with zero. |
| 862 | // |
| 863 | // For example, if we have: |
| 864 | // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 |
| 865 | // ... |
| 866 | // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x |
| 867 | // ... = load i32* %arrayidx, align 4 |
| 868 | // Then we know that we can replace %x in the GEP with i64 0. |
| 869 | // |
| 870 | // FIXME: We could fold any GEP index to zero that would cause UB if it were |
| 871 | // not zero. Currently, we only handle the first such index. Also, we could |
| 872 | // also search through non-zero constant indices if we kept track of the |
| 873 | // offsets those indices implied. |
| 874 | static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, |
| 875 | Instruction *MemI, unsigned &Idx) { |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 876 | if (GEPI->getNumOperands() < 2) |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 877 | return false; |
| 878 | |
| 879 | // Find the first non-zero index of a GEP. If all indices are zero, return |
| 880 | // one past the last index. |
| 881 | auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { |
| 882 | unsigned I = 1; |
| 883 | for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { |
| 884 | Value *V = GEPI->getOperand(I); |
| 885 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) |
| 886 | if (CI->isZero()) |
| 887 | continue; |
| 888 | |
| 889 | break; |
| 890 | } |
| 891 | |
| 892 | return I; |
| 893 | }; |
| 894 | |
| 895 | // Skip through initial 'zero' indices, and find the corresponding pointer |
| 896 | // type. See if the next index is not a constant. |
| 897 | Idx = FirstNZIdx(GEPI); |
| 898 | if (Idx == GEPI->getNumOperands()) |
| 899 | return false; |
| 900 | if (isa<Constant>(GEPI->getOperand(Idx))) |
| 901 | return false; |
| 902 | |
| 903 | SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); |
Eduard Burtescu | 19eb031 | 2016-01-19 17:28:00 +0000 | [diff] [blame] | 904 | Type *AllocTy = |
| 905 | GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 906 | if (!AllocTy || !AllocTy->isSized()) |
| 907 | return false; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 908 | const DataLayout &DL = IC.getDataLayout(); |
| 909 | uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 910 | |
| 911 | // If there are more indices after the one we might replace with a zero, make |
| 912 | // sure they're all non-negative. If any of them are negative, the overall |
| 913 | // address being computed might be before the base address determined by the |
| 914 | // first non-zero index. |
| 915 | auto IsAllNonNegative = [&]() { |
| 916 | for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { |
Craig Topper | 1a36b7d | 2017-05-15 06:39:41 +0000 | [diff] [blame] | 917 | KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI); |
| 918 | if (Known.isNonNegative()) |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 919 | continue; |
| 920 | return false; |
| 921 | } |
| 922 | |
| 923 | return true; |
| 924 | }; |
| 925 | |
| 926 | // FIXME: If the GEP is not inbounds, and there are extra indices after the |
| 927 | // one we'll replace, those could cause the address computation to wrap |
| 928 | // (rendering the IsAllNonNegative() check below insufficient). We can do |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 929 | // better, ignoring zero indices (and other indices we can prove small |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 930 | // enough not to wrap). |
| 931 | if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) |
| 932 | return false; |
| 933 | |
| 934 | // Note that isObjectSizeLessThanOrEq will return true only if the pointer is |
| 935 | // also known to be dereferenceable. |
| 936 | return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && |
| 937 | IsAllNonNegative(); |
| 938 | } |
| 939 | |
| 940 | // If we're indexing into an object with a variable index for the memory |
| 941 | // access, but the object has only one element, we can assume that the index |
| 942 | // will always be zero. If we replace the GEP, return it. |
| 943 | template <typename T> |
| 944 | static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, |
| 945 | T &MemI) { |
| 946 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 947 | unsigned Idx; |
| 948 | if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { |
| 949 | Instruction *NewGEPI = GEPI->clone(); |
| 950 | NewGEPI->setOperand(Idx, |
| 951 | ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); |
| 952 | NewGEPI->insertBefore(GEPI); |
| 953 | MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); |
| 954 | return NewGEPI; |
| 955 | } |
| 956 | } |
| 957 | |
| 958 | return nullptr; |
| 959 | } |
| 960 | |
Anna Thomas | 2dd9835 | 2017-12-12 14:12:33 +0000 | [diff] [blame] | 961 | static bool canSimplifyNullStoreOrGEP(StoreInst &SI) { |
| 962 | if (SI.getPointerAddressSpace() != 0) |
| 963 | return false; |
| 964 | |
| 965 | auto *Ptr = SI.getPointerOperand(); |
| 966 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) |
| 967 | Ptr = GEPI->getOperand(0); |
| 968 | return isa<ConstantPointerNull>(Ptr); |
| 969 | } |
| 970 | |
Davide Italiano | ffcb4df | 2017-04-19 17:26:57 +0000 | [diff] [blame] | 971 | static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { |
| 972 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { |
| 973 | const Value *GEPI0 = GEPI->getOperand(0); |
| 974 | if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0) |
| 975 | return true; |
| 976 | } |
| 977 | if (isa<UndefValue>(Op) || |
| 978 | (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) |
| 979 | return true; |
| 980 | return false; |
| 981 | } |
| 982 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 983 | Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { |
| 984 | Value *Op = LI.getOperand(0); |
| 985 | |
Chandler Carruth | 2f75fcf | 2014-10-18 06:36:22 +0000 | [diff] [blame] | 986 | // Try to canonicalize the loaded type. |
| 987 | if (Instruction *Res = combineLoadToOperationType(*this, LI)) |
| 988 | return Res; |
| 989 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 990 | // Attempt to improve the alignment. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 991 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 992 | Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 993 | unsigned LoadAlign = LI.getAlignment(); |
| 994 | unsigned EffectiveLoadAlign = |
| 995 | LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 996 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 997 | if (KnownAlign > EffectiveLoadAlign) |
| 998 | LI.setAlignment(KnownAlign); |
| 999 | else if (LoadAlign == 0) |
| 1000 | LI.setAlignment(EffectiveLoadAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1001 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 1002 | // Replace GEP indices if possible. |
| 1003 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { |
| 1004 | Worklist.Add(NewGEPI); |
| 1005 | return &LI; |
| 1006 | } |
| 1007 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 1008 | if (Instruction *Res = unpackLoadToAggregate(*this, LI)) |
| 1009 | return Res; |
| 1010 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1011 | // Do really simple store-to-load forwarding and load CSE, to catch cases |
Duncan Sands | 75b5d27 | 2011-02-15 09:23:02 +0000 | [diff] [blame] | 1012 | // where there are several consecutive memory accesses to the same location, |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1013 | // separated by a few arithmetic operations. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1014 | BasicBlock::iterator BBI(LI); |
Eli Friedman | bd254a6 | 2016-06-16 02:33:42 +0000 | [diff] [blame] | 1015 | bool IsLoadCSE = false; |
Sanjay Patel | b38ad88e | 2017-01-02 23:25:28 +0000 | [diff] [blame] | 1016 | if (Value *AvailableVal = FindAvailableLoadedValue( |
| 1017 | &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) { |
| 1018 | if (IsLoadCSE) |
| 1019 | combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI); |
Bjorn Steinbrink | a91fd09 | 2015-07-10 06:55:44 +0000 | [diff] [blame] | 1020 | |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1021 | return replaceInstUsesWith( |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1022 | LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(), |
| 1023 | LI.getName() + ".cast")); |
Bjorn Steinbrink | a91fd09 | 2015-07-10 06:55:44 +0000 | [diff] [blame] | 1024 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1025 | |
Philip Reames | 3ac0718 | 2016-04-21 17:45:05 +0000 | [diff] [blame] | 1026 | // None of the following transforms are legal for volatile/ordered atomic |
| 1027 | // loads. Most of them do apply for unordered atomics. |
| 1028 | if (!LI.isUnordered()) return nullptr; |
Philip Reames | ac55090 | 2016-04-21 17:03:33 +0000 | [diff] [blame] | 1029 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1030 | // load(gep null, ...) -> unreachable |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1031 | // load null/undef -> unreachable |
Davide Italiano | ffcb4df | 2017-04-19 17:26:57 +0000 | [diff] [blame] | 1032 | // TODO: Consider a target hook for valid address spaces for this xforms. |
| 1033 | if (canSimplifyNullLoadOrGEP(LI, Op)) { |
| 1034 | // Insert a new store to null instruction before the load to indicate |
| 1035 | // that this code is not reachable. We do this instead of inserting |
| 1036 | // an unreachable instruction directly because we cannot modify the |
| 1037 | // CFG. |
Weiming Zhao | 984f1dc | 2017-07-19 01:27:24 +0000 | [diff] [blame] | 1038 | StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()), |
| 1039 | Constant::getNullValue(Op->getType()), &LI); |
| 1040 | SI->setDebugLoc(LI.getDebugLoc()); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1041 | return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1042 | } |
| 1043 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1044 | if (Op->hasOneUse()) { |
| 1045 | // Change select and PHI nodes to select values instead of addresses: this |
| 1046 | // helps alias analysis out a lot, allows many others simplifications, and |
| 1047 | // exposes redundancy in the code. |
| 1048 | // |
| 1049 | // Note that we cannot do the transformation unless we know that the |
| 1050 | // introduced loads cannot trap! Something like this is valid as long as |
| 1051 | // the condition is always false: load (select bool %C, int* null, int* %G), |
| 1052 | // but it would not be valid if we transformed it to load from null |
| 1053 | // unconditionally. |
| 1054 | // |
| 1055 | if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { |
| 1056 | // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 1057 | unsigned Align = LI.getAlignment(); |
Artur Pilipenko | 9bb6bea | 2016-04-27 11:00:48 +0000 | [diff] [blame] | 1058 | if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && |
| 1059 | isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1060 | LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1), |
| 1061 | SI->getOperand(1)->getName()+".val"); |
| 1062 | LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2), |
| 1063 | SI->getOperand(2)->getName()+".val"); |
Philip Reames | a98c7ea | 2016-04-21 17:59:40 +0000 | [diff] [blame] | 1064 | assert(LI.isUnordered() && "implied by above"); |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 1065 | V1->setAlignment(Align); |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 1066 | V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); |
Bob Wilson | 56600a1 | 2010-01-30 04:42:39 +0000 | [diff] [blame] | 1067 | V2->setAlignment(Align); |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 1068 | V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1069 | return SelectInst::Create(SI->getCondition(), V1, V2); |
| 1070 | } |
| 1071 | |
| 1072 | // load (select (cond, null, P)) -> load P |
Larisse Voufo | 532bf71 | 2015-09-18 19:14:35 +0000 | [diff] [blame] | 1073 | if (isa<ConstantPointerNull>(SI->getOperand(1)) && |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 1074 | LI.getPointerAddressSpace() == 0) { |
| 1075 | LI.setOperand(0, SI->getOperand(2)); |
| 1076 | return &LI; |
| 1077 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1078 | |
| 1079 | // load (select (cond, P, null)) -> load P |
Philip Reames | 5ad26c3 | 2014-12-29 22:46:21 +0000 | [diff] [blame] | 1080 | if (isa<ConstantPointerNull>(SI->getOperand(2)) && |
| 1081 | LI.getPointerAddressSpace() == 0) { |
| 1082 | LI.setOperand(0, SI->getOperand(1)); |
| 1083 | return &LI; |
| 1084 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1085 | } |
| 1086 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1087 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1088 | } |
| 1089 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1090 | /// Look for extractelement/insertvalue sequence that acts like a bitcast. |
Arch D. Robison | be0490a | 2016-04-25 22:22:39 +0000 | [diff] [blame] | 1091 | /// |
| 1092 | /// \returns underlying value that was "cast", or nullptr otherwise. |
| 1093 | /// |
| 1094 | /// For example, if we have: |
| 1095 | /// |
| 1096 | /// %E0 = extractelement <2 x double> %U, i32 0 |
| 1097 | /// %V0 = insertvalue [2 x double] undef, double %E0, 0 |
| 1098 | /// %E1 = extractelement <2 x double> %U, i32 1 |
| 1099 | /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 |
| 1100 | /// |
| 1101 | /// and the layout of a <2 x double> is isomorphic to a [2 x double], |
| 1102 | /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. |
| 1103 | /// Note that %U may contain non-undef values where %V1 has undef. |
| 1104 | static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { |
| 1105 | Value *U = nullptr; |
| 1106 | while (auto *IV = dyn_cast<InsertValueInst>(V)) { |
| 1107 | auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); |
| 1108 | if (!E) |
| 1109 | return nullptr; |
| 1110 | auto *W = E->getVectorOperand(); |
| 1111 | if (!U) |
| 1112 | U = W; |
| 1113 | else if (U != W) |
| 1114 | return nullptr; |
| 1115 | auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); |
| 1116 | if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) |
| 1117 | return nullptr; |
| 1118 | V = IV->getAggregateOperand(); |
| 1119 | } |
| 1120 | if (!isa<UndefValue>(V) ||!U) |
| 1121 | return nullptr; |
| 1122 | |
| 1123 | auto *UT = cast<VectorType>(U->getType()); |
| 1124 | auto *VT = V->getType(); |
| 1125 | // Check that types UT and VT are bitwise isomorphic. |
| 1126 | const auto &DL = IC.getDataLayout(); |
| 1127 | if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { |
| 1128 | return nullptr; |
| 1129 | } |
| 1130 | if (auto *AT = dyn_cast<ArrayType>(VT)) { |
| 1131 | if (AT->getNumElements() != UT->getNumElements()) |
| 1132 | return nullptr; |
| 1133 | } else { |
| 1134 | auto *ST = cast<StructType>(VT); |
| 1135 | if (ST->getNumElements() != UT->getNumElements()) |
| 1136 | return nullptr; |
| 1137 | for (const auto *EltT : ST->elements()) { |
| 1138 | if (EltT != UT->getElementType()) |
| 1139 | return nullptr; |
| 1140 | } |
| 1141 | } |
| 1142 | return U; |
| 1143 | } |
| 1144 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1145 | /// Combine stores to match the type of value being stored. |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1146 | /// |
| 1147 | /// The core idea here is that the memory does not have any intrinsic type and |
| 1148 | /// where we can we should match the type of a store to the type of value being |
| 1149 | /// stored. |
| 1150 | /// |
| 1151 | /// However, this routine must never change the width of a store or the number of |
| 1152 | /// stores as that would introduce a semantic change. This combine is expected to |
| 1153 | /// be a semantic no-op which just allows stores to more closely model the types |
| 1154 | /// of their incoming values. |
| 1155 | /// |
| 1156 | /// Currently, we also refuse to change the precise type used for an atomic or |
| 1157 | /// volatile store. This is debatable, and might be reasonable to change later. |
| 1158 | /// However, it is risky in case some backend or other part of LLVM is relying |
| 1159 | /// on the exact type stored to select appropriate atomic operations. |
| 1160 | /// |
| 1161 | /// \returns true if the store was successfully combined away. This indicates |
| 1162 | /// the caller must erase the store instruction. We have to let the caller erase |
Bruce Mitchener | e9ffb45 | 2015-09-12 01:17:08 +0000 | [diff] [blame] | 1163 | /// the store instruction as otherwise there is no way to signal whether it was |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1164 | /// combined or not: IC.EraseInstFromFunction returns a null pointer. |
| 1165 | static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { |
Philip Reames | 6f4d008 | 2016-05-06 22:17:01 +0000 | [diff] [blame] | 1166 | // FIXME: We could probably with some care handle both volatile and ordered |
| 1167 | // atomic stores here but it isn't clear that this is important. |
| 1168 | if (!SI.isUnordered()) |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1169 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1170 | |
Arnold Schwaighofer | 5d33555 | 2016-09-10 18:14:57 +0000 | [diff] [blame] | 1171 | // swifterror values can't be bitcasted. |
| 1172 | if (SI.getPointerOperand()->isSwiftError()) |
| 1173 | return false; |
| 1174 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1175 | Value *V = SI.getValueOperand(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1176 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1177 | // Fold away bit casts of the stored value by storing the original type. |
| 1178 | if (auto *BC = dyn_cast<BitCastInst>(V)) { |
Chandler Carruth | a7f247e | 2014-12-09 19:21:16 +0000 | [diff] [blame] | 1179 | V = BC->getOperand(0); |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 1180 | if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) { |
| 1181 | combineStoreToNewValue(IC, SI, V); |
| 1182 | return true; |
| 1183 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1184 | } |
| 1185 | |
Philip Reames | 89e92d2 | 2016-12-01 20:17:06 +0000 | [diff] [blame] | 1186 | if (Value *U = likeBitCastFromVector(IC, V)) |
| 1187 | if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) { |
| 1188 | combineStoreToNewValue(IC, SI, U); |
| 1189 | return true; |
| 1190 | } |
Arch D. Robison | be0490a | 2016-04-25 22:22:39 +0000 | [diff] [blame] | 1191 | |
JF Bastien | c22d299 | 2016-04-21 19:53:39 +0000 | [diff] [blame] | 1192 | // FIXME: We should also canonicalize stores of vectors when their elements |
| 1193 | // are cast to other types. |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1194 | return false; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1195 | } |
| 1196 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1197 | static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { |
| 1198 | // FIXME: We could probably with some care handle both volatile and atomic |
| 1199 | // stores here but it isn't clear that this is important. |
| 1200 | if (!SI.isSimple()) |
| 1201 | return false; |
| 1202 | |
| 1203 | Value *V = SI.getValueOperand(); |
| 1204 | Type *T = V->getType(); |
| 1205 | |
| 1206 | if (!T->isAggregateType()) |
| 1207 | return false; |
| 1208 | |
Mehdi Amini | 2668a48 | 2015-05-07 05:52:40 +0000 | [diff] [blame] | 1209 | if (auto *ST = dyn_cast<StructType>(T)) { |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1210 | // If the struct only have one element, we unpack. |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1211 | unsigned Count = ST->getNumElements(); |
| 1212 | if (Count == 1) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1213 | V = IC.Builder.CreateExtractValue(V, 0); |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1214 | combineStoreToNewValue(IC, SI, V); |
| 1215 | return true; |
| 1216 | } |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1217 | |
| 1218 | // We don't want to break loads with padding here as we'd loose |
| 1219 | // the knowledge that padding exists for the rest of the pipeline. |
| 1220 | const DataLayout &DL = IC.getDataLayout(); |
| 1221 | auto *SL = DL.getStructLayout(ST); |
| 1222 | if (SL->hasPadding()) |
| 1223 | return false; |
| 1224 | |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1225 | auto Align = SI.getAlignment(); |
| 1226 | if (!Align) |
| 1227 | Align = DL.getABITypeAlignment(ST); |
| 1228 | |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 1229 | SmallString<16> EltName = V->getName(); |
| 1230 | EltName += ".elt"; |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1231 | auto *Addr = SI.getPointerOperand(); |
NAKAMURA Takumi | ec6b1fc | 2015-12-15 09:37:31 +0000 | [diff] [blame] | 1232 | SmallString<16> AddrName = Addr->getName(); |
| 1233 | AddrName += ".repack"; |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1234 | |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1235 | auto *IdxType = Type::getInt32Ty(ST->getContext()); |
| 1236 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 1237 | for (unsigned i = 0; i < Count; i++) { |
| 1238 | Value *Indices[2] = { |
| 1239 | Zero, |
| 1240 | ConstantInt::get(IdxType, i), |
| 1241 | }; |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1242 | auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), |
| 1243 | AddrName); |
| 1244 | auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); |
Amaury Sechet | 61a7d62 | 2016-02-17 19:21:28 +0000 | [diff] [blame] | 1245 | auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1246 | llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 1247 | AAMDNodes AAMD; |
| 1248 | SI.getAAMetadata(AAMD); |
| 1249 | NS->setAAMetadata(AAMD); |
Mehdi Amini | 1c131b3 | 2015-12-15 01:44:07 +0000 | [diff] [blame] | 1250 | } |
| 1251 | |
| 1252 | return true; |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1253 | } |
| 1254 | |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1255 | if (auto *AT = dyn_cast<ArrayType>(T)) { |
| 1256 | // If the array only have one element, we unpack. |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1257 | auto NumElements = AT->getNumElements(); |
| 1258 | if (NumElements == 1) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1259 | V = IC.Builder.CreateExtractValue(V, 0); |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1260 | combineStoreToNewValue(IC, SI, V); |
| 1261 | return true; |
| 1262 | } |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1263 | |
Davide Italiano | f6988d2 | 2016-10-07 21:53:09 +0000 | [diff] [blame] | 1264 | // Bail out if the array is too large. Ideally we would like to optimize |
| 1265 | // arrays of arbitrary size but this has a terrible impact on compile time. |
| 1266 | // The threshold here is chosen arbitrarily, maybe needs a little bit of |
| 1267 | // tuning. |
Davide Italiano | 2133bf5 | 2017-02-07 17:56:50 +0000 | [diff] [blame] | 1268 | if (NumElements > IC.MaxArraySizeForCombine) |
Davide Italiano | f6988d2 | 2016-10-07 21:53:09 +0000 | [diff] [blame] | 1269 | return false; |
| 1270 | |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1271 | const DataLayout &DL = IC.getDataLayout(); |
| 1272 | auto EltSize = DL.getTypeAllocSize(AT->getElementType()); |
| 1273 | auto Align = SI.getAlignment(); |
| 1274 | if (!Align) |
| 1275 | Align = DL.getABITypeAlignment(T); |
| 1276 | |
| 1277 | SmallString<16> EltName = V->getName(); |
| 1278 | EltName += ".elt"; |
| 1279 | auto *Addr = SI.getPointerOperand(); |
| 1280 | SmallString<16> AddrName = Addr->getName(); |
| 1281 | AddrName += ".repack"; |
| 1282 | |
| 1283 | auto *IdxType = Type::getInt64Ty(T->getContext()); |
| 1284 | auto *Zero = ConstantInt::get(IdxType, 0); |
| 1285 | |
| 1286 | uint64_t Offset = 0; |
| 1287 | for (uint64_t i = 0; i < NumElements; i++) { |
| 1288 | Value *Indices[2] = { |
| 1289 | Zero, |
| 1290 | ConstantInt::get(IdxType, i), |
| 1291 | }; |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1292 | auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), |
| 1293 | AddrName); |
| 1294 | auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1295 | auto EltAlign = MinAlign(Align, Offset); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1296 | Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); |
Keno Fischer | a236dae | 2017-06-28 23:36:40 +0000 | [diff] [blame] | 1297 | AAMDNodes AAMD; |
| 1298 | SI.getAAMetadata(AAMD); |
| 1299 | NS->setAAMetadata(AAMD); |
Amaury Sechet | 3b8b2ea | 2016-03-02 22:36:45 +0000 | [diff] [blame] | 1300 | Offset += EltSize; |
| 1301 | } |
| 1302 | |
| 1303 | return true; |
David Majnemer | 7536460 | 2015-05-11 05:04:27 +0000 | [diff] [blame] | 1304 | } |
| 1305 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1306 | return false; |
| 1307 | } |
| 1308 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1309 | /// equivalentAddressValues - Test if A and B will obviously have the same |
| 1310 | /// value. This includes recognizing that %t0 and %t1 will have the same |
| 1311 | /// value in code like this: |
| 1312 | /// %t0 = getelementptr \@a, 0, 3 |
| 1313 | /// store i32 0, i32* %t0 |
| 1314 | /// %t1 = getelementptr \@a, 0, 3 |
| 1315 | /// %t2 = load i32* %t1 |
| 1316 | /// |
| 1317 | static bool equivalentAddressValues(Value *A, Value *B) { |
| 1318 | // Test if the values are trivially equivalent. |
| 1319 | if (A == B) return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1320 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1321 | // Test if the values come form identical arithmetic instructions. |
| 1322 | // This uses isIdenticalToWhenDefined instead of isIdenticalTo because |
| 1323 | // its only used to compare two uses within the same basic block, which |
| 1324 | // means that they'll always either have the same value or one of them |
| 1325 | // will have an undefined value. |
| 1326 | if (isa<BinaryOperator>(A) || |
| 1327 | isa<CastInst>(A) || |
| 1328 | isa<PHINode>(A) || |
| 1329 | isa<GetElementPtrInst>(A)) |
| 1330 | if (Instruction *BI = dyn_cast<Instruction>(B)) |
| 1331 | if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) |
| 1332 | return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1333 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1334 | // Otherwise they may not be equivalent. |
| 1335 | return false; |
| 1336 | } |
| 1337 | |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1338 | /// Converts store (bitcast (load (bitcast (select ...)))) to |
| 1339 | /// store (load (select ...)), where select is minmax: |
| 1340 | /// select ((cmp load V1, load V2), V1, V2). |
Alexey Bataev | 83c15b1 | 2017-12-12 20:28:46 +0000 | [diff] [blame] | 1341 | static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC, |
| 1342 | StoreInst &SI) { |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1343 | // bitcast? |
Alexey Bataev | 83c15b1 | 2017-12-12 20:28:46 +0000 | [diff] [blame] | 1344 | if (!match(SI.getPointerOperand(), m_BitCast(m_Value()))) |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1345 | return false; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1346 | // load? integer? |
| 1347 | Value *LoadAddr; |
| 1348 | if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr))))) |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1349 | return false; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1350 | auto *LI = cast<LoadInst>(SI.getValueOperand()); |
| 1351 | if (!LI->getType()->isIntegerTy()) |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1352 | return false; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1353 | if (!isMinMaxWithLoads(LoadAddr)) |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1354 | return false; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1355 | |
Alexey Bataev | 83c15b1 | 2017-12-12 20:28:46 +0000 | [diff] [blame] | 1356 | if (!all_of(LI->users(), [LI, LoadAddr](User *U) { |
| 1357 | auto *SI = dyn_cast<StoreInst>(U); |
| 1358 | return SI && SI->getPointerOperand() != LI && |
| 1359 | peekThroughBitcast(SI->getPointerOperand()) != LoadAddr && |
| 1360 | !SI->getPointerOperand()->isSwiftError(); |
| 1361 | })) |
| 1362 | return false; |
| 1363 | |
| 1364 | IC.Builder.SetInsertPoint(LI); |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1365 | LoadInst *NewLI = combineLoadToNewType( |
| 1366 | IC, *LI, LoadAddr->getType()->getPointerElementType()); |
Alexey Bataev | 83c15b1 | 2017-12-12 20:28:46 +0000 | [diff] [blame] | 1367 | // Replace all the stores with stores of the newly loaded value. |
| 1368 | for (auto *UI : LI->users()) { |
| 1369 | auto *USI = cast<StoreInst>(UI); |
| 1370 | IC.Builder.SetInsertPoint(USI); |
| 1371 | combineStoreToNewValue(IC, *USI, NewLI); |
| 1372 | } |
| 1373 | IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); |
| 1374 | IC.eraseInstFromFunction(*LI); |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1375 | return true; |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1376 | } |
| 1377 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1378 | Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { |
| 1379 | Value *Val = SI.getOperand(0); |
| 1380 | Value *Ptr = SI.getOperand(1); |
| 1381 | |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1382 | // Try to canonicalize the stored type. |
| 1383 | if (combineStoreToValueType(*this, SI)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1384 | return eraseInstFromFunction(SI); |
Chandler Carruth | 816d26f | 2014-11-25 10:09:51 +0000 | [diff] [blame] | 1385 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1386 | // Attempt to improve the alignment. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1387 | unsigned KnownAlign = getOrEnforceKnownAlignment( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 1388 | Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1389 | unsigned StoreAlign = SI.getAlignment(); |
| 1390 | unsigned EffectiveStoreAlign = |
| 1391 | StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); |
Dan Gohman | 3619660 | 2010-08-03 18:20:32 +0000 | [diff] [blame] | 1392 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1393 | if (KnownAlign > EffectiveStoreAlign) |
| 1394 | SI.setAlignment(KnownAlign); |
| 1395 | else if (StoreAlign == 0) |
| 1396 | SI.setAlignment(EffectiveStoreAlign); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1397 | |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1398 | // Try to canonicalize the stored type. |
| 1399 | if (unpackStoreToAggregate(*this, SI)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1400 | return eraseInstFromFunction(SI); |
Mehdi Amini | b344ac9 | 2015-03-14 22:19:33 +0000 | [diff] [blame] | 1401 | |
Alexey Bataev | fa0a76d | 2017-12-12 19:12:34 +0000 | [diff] [blame] | 1402 | if (removeBitcastsFromLoadStoreOnMinMax(*this, SI)) |
| 1403 | return eraseInstFromFunction(SI); |
Alexey Bataev | ec95c6c | 2017-12-08 15:32:10 +0000 | [diff] [blame] | 1404 | |
Hal Finkel | 847e05f | 2015-02-20 03:05:53 +0000 | [diff] [blame] | 1405 | // Replace GEP indices if possible. |
| 1406 | if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { |
| 1407 | Worklist.Add(NewGEPI); |
| 1408 | return &SI; |
| 1409 | } |
| 1410 | |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1411 | // Don't hack volatile/ordered stores. |
| 1412 | // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. |
| 1413 | if (!SI.isUnordered()) return nullptr; |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1414 | |
| 1415 | // If the RHS is an alloca with a single use, zapify the store, making the |
| 1416 | // alloca dead. |
| 1417 | if (Ptr->hasOneUse()) { |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1418 | if (isa<AllocaInst>(Ptr)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1419 | return eraseInstFromFunction(SI); |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1420 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { |
| 1421 | if (isa<AllocaInst>(GEP->getOperand(0))) { |
| 1422 | if (GEP->getOperand(0)->hasOneUse()) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1423 | return eraseInstFromFunction(SI); |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1424 | } |
| 1425 | } |
| 1426 | } |
| 1427 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1428 | // Do really simple DSE, to catch cases where there are several consecutive |
| 1429 | // stores to the same location, separated by a few arithmetic operations. This |
| 1430 | // situation often occurs with bitfield accesses. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1431 | BasicBlock::iterator BBI(SI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1432 | for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; |
| 1433 | --ScanInsts) { |
| 1434 | --BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1435 | // Don't count debug info directives, lest they affect codegen, |
| 1436 | // and we skip pointer-to-pointer bitcasts, which are NOPs. |
| 1437 | if (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1438 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1439 | ScanInsts++; |
| 1440 | continue; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1441 | } |
| 1442 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1443 | if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { |
| 1444 | // Prev store isn't volatile, and stores to the same location? |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1445 | if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1446 | SI.getOperand(1))) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1447 | ++NumDeadStore; |
| 1448 | ++BBI; |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1449 | eraseInstFromFunction(*PrevSI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1450 | continue; |
| 1451 | } |
| 1452 | break; |
| 1453 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1454 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1455 | // If this is a load, we have to stop. However, if the loaded value is from |
| 1456 | // the pointer we're loading and is producing the pointer we're storing, |
| 1457 | // then *this* store is dead (X = load P; store X -> P). |
| 1458 | if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1459 | if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { |
| 1460 | assert(SI.isUnordered() && "can't eliminate ordering operation"); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1461 | return eraseInstFromFunction(SI); |
Philip Reames | d7a6cc8 | 2015-12-17 22:19:27 +0000 | [diff] [blame] | 1462 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1463 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1464 | // Otherwise, this is a load from some other location. Stores before it |
| 1465 | // may not be dead. |
| 1466 | break; |
| 1467 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1468 | |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1469 | // Don't skip over loads, throws or things that can modify memory. |
| 1470 | if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1471 | break; |
| 1472 | } |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1473 | |
| 1474 | // store X, null -> turns into 'unreachable' in SimplifyCFG |
Anna Thomas | 2dd9835 | 2017-12-12 14:12:33 +0000 | [diff] [blame] | 1475 | // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG |
| 1476 | if (canSimplifyNullStoreOrGEP(SI)) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1477 | if (!isa<UndefValue>(Val)) { |
| 1478 | SI.setOperand(0, UndefValue::get(Val->getType())); |
| 1479 | if (Instruction *U = dyn_cast<Instruction>(Val)) |
| 1480 | Worklist.Add(U); // Dropped a use. |
| 1481 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1482 | return nullptr; // Do not modify these! |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1483 | } |
| 1484 | |
| 1485 | // store undef, Ptr -> noop |
| 1486 | if (isa<UndefValue>(Val)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1487 | return eraseInstFromFunction(SI); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1488 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1489 | // If this store is the last instruction in the basic block (possibly |
Victor Hernandez | 5f5abd5 | 2010-01-21 23:07:15 +0000 | [diff] [blame] | 1490 | // excepting debug info instructions), and if the block ends with an |
| 1491 | // unconditional branch, try to move it to the successor block. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1492 | BBI = SI.getIterator(); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1493 | do { |
| 1494 | ++BBI; |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1495 | } while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1496 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1497 | if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) |
| 1498 | if (BI->isUnconditional()) |
| 1499 | if (SimplifyStoreAtEndOfBlock(SI)) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1500 | return nullptr; // xform done! |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1501 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1502 | return nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1503 | } |
| 1504 | |
| 1505 | /// SimplifyStoreAtEndOfBlock - Turn things like: |
| 1506 | /// if () { *P = v1; } else { *P = v2 } |
| 1507 | /// into a phi node with a store in the successor. |
| 1508 | /// |
| 1509 | /// Simplify things like: |
| 1510 | /// *P = v1; if () { *P = v2; } |
| 1511 | /// into a phi node with a store in the successor. |
| 1512 | /// |
| 1513 | bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { |
Philip Reames | 5f0e369 | 2016-04-22 20:53:32 +0000 | [diff] [blame] | 1514 | assert(SI.isUnordered() && |
| 1515 | "this code has not been auditted for volatile or ordered store case"); |
Justin Bogner | c7e4fbe | 2016-08-05 01:09:48 +0000 | [diff] [blame] | 1516 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1517 | BasicBlock *StoreBB = SI.getParent(); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1518 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1519 | // Check to see if the successor block has exactly two incoming edges. If |
| 1520 | // so, see if the other predecessor contains a store to the same location. |
| 1521 | // if so, insert a PHI node (if needed) and move the stores down. |
| 1522 | BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1523 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1524 | // Determine whether Dest has exactly two predecessors and, if so, compute |
| 1525 | // the other predecessor. |
| 1526 | pred_iterator PI = pred_begin(DestBB); |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1527 | BasicBlock *P = *PI; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1528 | BasicBlock *OtherBB = nullptr; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1529 | |
| 1530 | if (P != StoreBB) |
| 1531 | OtherBB = P; |
| 1532 | |
| 1533 | if (++PI == pred_end(DestBB)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1534 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1535 | |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1536 | P = *PI; |
| 1537 | if (P != StoreBB) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1538 | if (OtherBB) |
| 1539 | return false; |
Gabor Greif | 1b787df | 2010-07-12 15:48:26 +0000 | [diff] [blame] | 1540 | OtherBB = P; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1541 | } |
| 1542 | if (++PI != pred_end(DestBB)) |
| 1543 | return false; |
| 1544 | |
| 1545 | // Bail out if all the relevant blocks aren't distinct (this can happen, |
| 1546 | // for example, if SI is in an infinite loop) |
| 1547 | if (StoreBB == DestBB || OtherBB == DestBB) |
| 1548 | return false; |
| 1549 | |
| 1550 | // Verify that the other block ends in a branch and is not otherwise empty. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 1551 | BasicBlock::iterator BBI(OtherBB->getTerminator()); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1552 | BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); |
| 1553 | if (!OtherBr || BBI == OtherBB->begin()) |
| 1554 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1555 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1556 | // If the other block ends in an unconditional branch, check for the 'if then |
| 1557 | // else' case. there is an instruction before the branch. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1558 | StoreInst *OtherStore = nullptr; |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1559 | if (OtherBr->isUnconditional()) { |
| 1560 | --BBI; |
| 1561 | // Skip over debugging info. |
Victor Hernandez | 5f8c8c0 | 2010-01-22 19:05:05 +0000 | [diff] [blame] | 1562 | while (isa<DbgInfoIntrinsic>(BBI) || |
Duncan Sands | 19d0b47 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 1563 | (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1564 | if (BBI==OtherBB->begin()) |
| 1565 | return false; |
| 1566 | --BBI; |
| 1567 | } |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1568 | // If this isn't a store, isn't a store to the same location, or is not the |
| 1569 | // right kind of store, bail out. |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1570 | OtherStore = dyn_cast<StoreInst>(BBI); |
| 1571 | if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1572 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1573 | return false; |
| 1574 | } else { |
| 1575 | // Otherwise, the other block ended with a conditional branch. If one of the |
| 1576 | // destinations is StoreBB, then we have the if/then case. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1577 | if (OtherBr->getSuccessor(0) != StoreBB && |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1578 | OtherBr->getSuccessor(1) != StoreBB) |
| 1579 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1580 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1581 | // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an |
| 1582 | // if/then triangle. See if there is a store to the same ptr as SI that |
| 1583 | // lives in OtherBB. |
| 1584 | for (;; --BBI) { |
| 1585 | // Check to see if we find the matching store. |
| 1586 | if ((OtherStore = dyn_cast<StoreInst>(BBI))) { |
| 1587 | if (OtherStore->getOperand(1) != SI.getOperand(1) || |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1588 | !SI.isSameOperationAs(OtherStore)) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1589 | return false; |
| 1590 | break; |
| 1591 | } |
| 1592 | // If we find something that may be using or overwriting the stored |
| 1593 | // value, or if we run out of instructions, we can't do the xform. |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1594 | if (BBI->mayReadFromMemory() || BBI->mayThrow() || |
| 1595 | BBI->mayWriteToMemory() || BBI == OtherBB->begin()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1596 | return false; |
| 1597 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1598 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1599 | // In order to eliminate the store in OtherBr, we have to |
| 1600 | // make sure nothing reads or overwrites the stored value in |
| 1601 | // StoreBB. |
| 1602 | for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { |
| 1603 | // FIXME: This should really be AA driven. |
Sanjoy Das | 679bc32 | 2017-01-17 05:45:09 +0000 | [diff] [blame] | 1604 | if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory()) |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1605 | return false; |
| 1606 | } |
| 1607 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1608 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1609 | // Insert a PHI node now if we need it. |
| 1610 | Value *MergedVal = OtherStore->getOperand(0); |
| 1611 | if (MergedVal != SI.getOperand(0)) { |
Jay Foad | 5213134 | 2011-03-30 11:28:46 +0000 | [diff] [blame] | 1612 | PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1613 | PN->addIncoming(SI.getOperand(0), SI.getParent()); |
| 1614 | PN->addIncoming(OtherStore->getOperand(0), OtherBB); |
| 1615 | MergedVal = InsertNewInstBefore(PN, DestBB->front()); |
| 1616 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1617 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1618 | // Advance to a place where it is safe to insert the new store and |
| 1619 | // insert it. |
Bill Wendling | 8ddfc09 | 2011-08-16 20:45:24 +0000 | [diff] [blame] | 1620 | BBI = DestBB->getFirstInsertionPt(); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1621 | StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), |
Eli Friedman | 8bc586e | 2011-08-15 22:09:40 +0000 | [diff] [blame] | 1622 | SI.isVolatile(), |
| 1623 | SI.getAlignment(), |
| 1624 | SI.getOrdering(), |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 1625 | SI.getSyncScopeID()); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1626 | InsertNewInstBefore(NewSI, *BBI); |
Paul Robinson | 383c5c2 | 2017-02-06 22:19:04 +0000 | [diff] [blame] | 1627 | // The debug locations of the original instructions might differ; merge them. |
Dehao Chen | f464627 | 2017-10-02 18:13:14 +0000 | [diff] [blame] | 1628 | NewSI->applyMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc()); |
Eli Friedman | 35211c6 | 2011-05-27 00:19:40 +0000 | [diff] [blame] | 1629 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1630 | // If the two stores had AA tags, merge them. |
| 1631 | AAMDNodes AATags; |
| 1632 | SI.getAAMetadata(AATags); |
| 1633 | if (AATags) { |
| 1634 | OtherStore->getAAMetadata(AATags, /* Merge = */ true); |
| 1635 | NewSI->setAAMetadata(AATags); |
| 1636 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1637 | |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1638 | // Nuke the old stores. |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1639 | eraseInstFromFunction(SI); |
| 1640 | eraseInstFromFunction(*OtherStore); |
Chris Lattner | a65e2f7 | 2010-01-05 05:57:49 +0000 | [diff] [blame] | 1641 | return true; |
| 1642 | } |