blob: 8551083698e8cbb780ca51aeaf2a95176f8447dd [file] [log] [blame]
Owen Andersone3590582007-08-02 18:11:11 +00001//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
Owen Anderson5e72db32007-07-11 00:46:18 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Owen Anderson5e72db32007-07-11 00:46:18 +00007//
8//===----------------------------------------------------------------------===//
9//
Chad Rosierd7634fc2015-12-11 18:39:41 +000010// This file implements a trivial dead store elimination that only considers
11// basic-block local redundant stores.
12//
13// FIXME: This should eventually be extended to be a post-dominator tree
14// traversal. Doing so would be pretty trivial.
Owen Anderson5e72db32007-07-11 00:46:18 +000015//
16//===----------------------------------------------------------------------===//
17
Justin Bogner594e07b2016-05-17 21:38:13 +000018#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000019#include "llvm/ADT/APInt.h"
Hal Finkela1271032016-06-23 13:46:39 +000020#include "llvm/ADT/DenseMap.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000021#include "llvm/ADT/SetVector.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000022#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000024#include "llvm/ADT/Statistic.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000025#include "llvm/ADT/StringRef.h"
Owen Andersonaa071722007-07-11 23:19:17 +000026#include "llvm/Analysis/AliasAnalysis.h"
Nick Lewycky32f80512011-10-22 21:59:35 +000027#include "llvm/Analysis/CaptureTracking.h"
Chandler Carruth7b560d42015-09-09 17:55:00 +000028#include "llvm/Analysis/GlobalsModRef.h"
Victor Hernandezf390e042009-10-27 20:05:49 +000029#include "llvm/Analysis/MemoryBuiltins.h"
Owen Anderson5e72db32007-07-11 00:46:18 +000030#include "llvm/Analysis/MemoryDependenceAnalysis.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000031#include "llvm/Analysis/MemoryLocation.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000032#include "llvm/Analysis/TargetLibraryInfo.h"
David Blaikie31b98d22018-06-04 21:23:21 +000033#include "llvm/Transforms/Utils/Local.h"
Chris Lattnerc0f33792010-11-30 23:05:20 +000034#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000035#include "llvm/IR/Argument.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000039#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000041#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000042#include "llvm/IR/Function.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000043#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/Instruction.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000045#include "llvm/IR/Instructions.h"
46#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000047#include "llvm/IR/Intrinsics.h"
Sanjay Patel1d04b5b2017-09-26 13:54:28 +000048#include "llvm/IR/LLVMContext.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000049#include "llvm/IR/Module.h"
50#include "llvm/IR/PassManager.h"
51#include "llvm/IR/Value.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000052#include "llvm/Pass.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000053#include "llvm/Support/Casting.h"
Hal Finkela1271032016-06-23 13:46:39 +000054#include "llvm/Support/CommandLine.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000055#include "llvm/Support/Debug.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000056#include "llvm/Support/ErrorHandling.h"
57#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000058#include "llvm/Support/raw_ostream.h"
Justin Bogner594e07b2016-05-17 21:38:13 +000059#include "llvm/Transforms/Scalar.h"
Eugene Zelenko3b879392017-10-13 21:17:07 +000060#include <algorithm>
61#include <cassert>
Eugene Zelenko3b879392017-10-13 21:17:07 +000062#include <cstddef>
David Blaikie2be39222018-03-21 22:34:23 +000063#include <cstdint>
Eugene Zelenko3b879392017-10-13 21:17:07 +000064#include <iterator>
Hal Finkela1271032016-06-23 13:46:39 +000065#include <map>
Eugene Zelenko3b879392017-10-13 21:17:07 +000066#include <utility>
67
Owen Anderson5e72db32007-07-11 00:46:18 +000068using namespace llvm;
69
Chandler Carruth964daaa2014-04-22 02:55:47 +000070#define DEBUG_TYPE "dse"
71
Erik Eckstein11fc8172015-08-13 15:36:11 +000072STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
Owen Anderson5e72db32007-07-11 00:46:18 +000073STATISTIC(NumFastStores, "Number of stores deleted");
Jun Limda5864c2018-08-17 18:40:41 +000074STATISTIC(NumFastOther, "Number of other instrs removed");
Hal Finkela1271032016-06-23 13:46:39 +000075STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
Sanjay Patel1d04b5b2017-09-26 13:54:28 +000076STATISTIC(NumModifiedStores, "Number of stores modified");
Hal Finkela1271032016-06-23 13:46:39 +000077
78static cl::opt<bool>
79EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80 cl::init(true), cl::Hidden,
81 cl::desc("Enable partial-overwrite tracking in DSE"));
Owen Anderson5e72db32007-07-11 00:46:18 +000082
Sanjay Patel1d04b5b2017-09-26 13:54:28 +000083static cl::opt<bool>
84EnablePartialStoreMerging("enable-dse-partial-store-merging",
85 cl::init(true), cl::Hidden,
86 cl::desc("Enable partial store merging in DSE"));
87
Chris Lattner67122512010-11-30 21:58:14 +000088//===----------------------------------------------------------------------===//
89// Helper functions
90//===----------------------------------------------------------------------===//
Eugene Zelenko3b879392017-10-13 21:17:07 +000091using OverlapIntervalsTy = std::map<int64_t, int64_t>;
92using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
Chris Lattner67122512010-11-30 21:58:14 +000093
Chad Rosiera8bc5122016-06-10 17:58:01 +000094/// Delete this instruction. Before we do, go through and zero out all the
Justin Bogner594e07b2016-05-17 21:38:13 +000095/// operands of this instruction. If any of them become dead, delete them and
96/// the computation tree that feeds them.
Eric Christopher0efe9f62015-08-19 02:15:13 +000097/// If ValueSet is non-null, remove any deleted instructions from it as well.
Justin Bogner594e07b2016-05-17 21:38:13 +000098static void
Chad Rosierdcfce2d2016-07-06 19:48:52 +000099deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
100 MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000101 InstOverlapIntervalsTy &IOL,
Eli Friedmana6707f52016-08-12 01:09:53 +0000102 DenseMap<Instruction*, size_t> *InstrOrdering,
Justin Bogner594e07b2016-05-17 21:38:13 +0000103 SmallSetVector<Value *, 16> *ValueSet = nullptr) {
Eric Christopher0efe9f62015-08-19 02:15:13 +0000104 SmallVector<Instruction*, 32> NowDeadInsts;
105
106 NowDeadInsts.push_back(I);
107 --NumFastOther;
108
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000109 // Keeping the iterator straight is a pain, so we let this routine tell the
110 // caller what the next instruction is after we're done mucking about.
111 BasicBlock::iterator NewIter = *BBI;
112
Eric Christopher0efe9f62015-08-19 02:15:13 +0000113 // Before we touch this instruction, remove it from memdep!
114 do {
115 Instruction *DeadInst = NowDeadInsts.pop_back_val();
116 ++NumFastOther;
117
Vedant Kumar35fc1032018-02-13 18:15:26 +0000118 // Try to preserve debug information attached to the dead instruction.
119 salvageDebugInfo(*DeadInst);
120
Eric Christopher0efe9f62015-08-19 02:15:13 +0000121 // This instruction is dead, zap it, in stages. Start by removing it from
122 // MemDep, which needs to know the operands and needs it to be in the
123 // function.
124 MD.removeInstruction(DeadInst);
125
126 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
127 Value *Op = DeadInst->getOperand(op);
128 DeadInst->setOperand(op, nullptr);
129
130 // If this operand just became dead, add it to the NowDeadInsts list.
131 if (!Op->use_empty()) continue;
132
133 if (Instruction *OpI = dyn_cast<Instruction>(Op))
134 if (isInstructionTriviallyDead(OpI, &TLI))
135 NowDeadInsts.push_back(OpI);
136 }
137
Eli Friedmana6707f52016-08-12 01:09:53 +0000138 if (ValueSet) ValueSet->remove(DeadInst);
139 InstrOrdering->erase(DeadInst);
140 IOL.erase(DeadInst);
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000141
142 if (NewIter == DeadInst->getIterator())
143 NewIter = DeadInst->eraseFromParent();
144 else
145 DeadInst->eraseFromParent();
Eric Christopher0efe9f62015-08-19 02:15:13 +0000146 } while (!NowDeadInsts.empty());
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000147 *BBI = NewIter;
Eric Christopher0efe9f62015-08-19 02:15:13 +0000148}
149
Justin Bogner594e07b2016-05-17 21:38:13 +0000150/// Does this instruction write some memory? This only returns true for things
151/// that we can analyze with other helpers below.
Philip Reames424e7a12018-01-21 01:44:33 +0000152static bool hasAnalyzableMemoryWrite(Instruction *I,
153 const TargetLibraryInfo &TLI) {
Nick Lewycky90271472009-11-10 06:46:40 +0000154 if (isa<StoreInst>(I))
155 return true;
156 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
157 switch (II->getIntrinsicID()) {
Chris Lattner2764b4d2009-12-02 06:35:55 +0000158 default:
159 return false;
160 case Intrinsic::memset:
161 case Intrinsic::memmove:
162 case Intrinsic::memcpy:
Daniel Neilsoncc45e922018-04-23 19:06:49 +0000163 case Intrinsic::memcpy_element_unordered_atomic:
164 case Intrinsic::memmove_element_unordered_atomic:
165 case Intrinsic::memset_element_unordered_atomic:
Chris Lattner2764b4d2009-12-02 06:35:55 +0000166 case Intrinsic::init_trampoline:
167 case Intrinsic::lifetime_end:
168 return true;
Nick Lewycky90271472009-11-10 06:46:40 +0000169 }
170 }
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000171 if (auto CS = CallSite(I)) {
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000172 if (Function *F = CS.getCalledFunction()) {
Chad Rosier624fee52016-06-16 17:06:04 +0000173 StringRef FnName = F->getName();
David L. Jonesd21529f2017-01-23 23:16:46 +0000174 if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000175 return true;
David L. Jonesd21529f2017-01-23 23:16:46 +0000176 if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000177 return true;
David L. Jonesd21529f2017-01-23 23:16:46 +0000178 if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000179 return true;
David L. Jonesd21529f2017-01-23 23:16:46 +0000180 if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000181 return true;
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000182 }
183 }
Nick Lewycky90271472009-11-10 06:46:40 +0000184 return false;
185}
186
Justin Bogner594e07b2016-05-17 21:38:13 +0000187/// Return a Location stored to by the specified instruction. If isRemovable
188/// returns true, this function and getLocForRead completely describe the memory
189/// operations for this instruction.
Philip Reamesf57714c2018-01-21 02:10:54 +0000190static MemoryLocation getLocForWrite(Instruction *Inst) {
Fangrui Songf78650a2018-07-30 19:41:25 +0000191
Chris Lattner58b779e2010-11-30 07:23:21 +0000192 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
Chandler Carruth70c61c12015-06-04 02:03:15 +0000193 return MemoryLocation::get(SI);
Owen Anderson58704ee2011-09-06 18:14:09 +0000194
Daniel Neilsoncc45e922018-04-23 19:06:49 +0000195 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
Chris Lattner58b779e2010-11-30 07:23:21 +0000196 // memcpy/memmove/memset.
Chandler Carruthac80dc72015-06-17 07:18:54 +0000197 MemoryLocation Loc = MemoryLocation::getForDest(MI);
Chris Lattner58b779e2010-11-30 07:23:21 +0000198 return Loc;
199 }
Owen Anderson58704ee2011-09-06 18:14:09 +0000200
Philip Reamesf57714c2018-01-21 02:10:54 +0000201 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
202 switch (II->getIntrinsicID()) {
203 default:
204 return MemoryLocation(); // Unhandled intrinsic.
205 case Intrinsic::init_trampoline:
206 return MemoryLocation(II->getArgOperand(0));
207 case Intrinsic::lifetime_end: {
208 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
209 return MemoryLocation(II->getArgOperand(1), Len);
210 }
211 }
Chris Lattner58b779e2010-11-30 07:23:21 +0000212 }
Philip Reamesf57714c2018-01-21 02:10:54 +0000213 if (auto CS = CallSite(Inst))
214 // All the supported TLI functions so far happen to have dest as their
215 // first argument.
216 return MemoryLocation(CS.getArgument(0));
217 return MemoryLocation();
Chris Lattner58b779e2010-11-30 07:23:21 +0000218}
219
Philip Reames424e7a12018-01-21 01:44:33 +0000220/// Return the location read by the specified "hasAnalyzableMemoryWrite"
221/// instruction if any.
Chandler Carruthdbe40fb2015-08-12 18:01:44 +0000222static MemoryLocation getLocForRead(Instruction *Inst,
223 const TargetLibraryInfo &TLI) {
Philip Reames424e7a12018-01-21 01:44:33 +0000224 assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
Owen Anderson58704ee2011-09-06 18:14:09 +0000225
Chris Lattner94fbdf32010-12-06 01:48:06 +0000226 // The only instructions that both read and write are the mem transfer
227 // instructions (memcpy/memmove).
Daniel Neilsoncc45e922018-04-23 19:06:49 +0000228 if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
Chandler Carruth70c61c12015-06-04 02:03:15 +0000229 return MemoryLocation::getForSource(MTI);
Chandler Carruthac80dc72015-06-17 07:18:54 +0000230 return MemoryLocation();
Chris Lattner94fbdf32010-12-06 01:48:06 +0000231}
232
Justin Bogner594e07b2016-05-17 21:38:13 +0000233/// If the value of this instruction and the memory it writes to is unused, may
234/// we delete this instruction?
Chris Lattner3590ef82010-11-30 05:30:45 +0000235static bool isRemovable(Instruction *I) {
Eli Friedman9a468152011-08-17 22:22:24 +0000236 // Don't remove volatile/atomic stores.
Nick Lewycky90271472009-11-10 06:46:40 +0000237 if (StoreInst *SI = dyn_cast<StoreInst>(I))
Eli Friedman9a468152011-08-17 22:22:24 +0000238 return SI->isUnordered();
Owen Anderson58704ee2011-09-06 18:14:09 +0000239
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000240 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
241 switch (II->getIntrinsicID()) {
Philip Reames424e7a12018-01-21 01:44:33 +0000242 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000243 case Intrinsic::lifetime_end:
244 // Never remove dead lifetime_end's, e.g. because it is followed by a
245 // free.
246 return false;
247 case Intrinsic::init_trampoline:
248 // Always safe to remove init_trampoline.
249 return true;
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000250 case Intrinsic::memset:
251 case Intrinsic::memmove:
252 case Intrinsic::memcpy:
253 // Don't remove volatile memory intrinsics.
254 return !cast<MemIntrinsic>(II)->isVolatile();
Daniel Neilsoncc45e922018-04-23 19:06:49 +0000255 case Intrinsic::memcpy_element_unordered_atomic:
256 case Intrinsic::memmove_element_unordered_atomic:
257 case Intrinsic::memset_element_unordered_atomic:
258 return true;
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000259 }
Chris Lattnerb63ba732010-11-30 19:12:10 +0000260 }
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000261
Philip Reames424e7a12018-01-21 01:44:33 +0000262 // note: only get here for calls with analyzable writes - i.e. libcalls
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000263 if (auto CS = CallSite(I))
Nick Lewycky42bca052012-09-25 01:55:59 +0000264 return CS.getInstruction()->use_empty();
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000265
266 return false;
Nick Lewycky90271472009-11-10 06:46:40 +0000267}
268
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000269/// Returns true if the end of this instruction can be safely shortened in
Pete Cooper856977c2011-11-09 23:07:35 +0000270/// length.
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000271static bool isShortenableAtTheEnd(Instruction *I) {
Pete Cooper856977c2011-11-09 23:07:35 +0000272 // Don't shorten stores for now
273 if (isa<StoreInst>(I))
274 return false;
Nadav Rotem465834c2012-07-24 10:51:42 +0000275
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000276 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
277 switch (II->getIntrinsicID()) {
278 default: return false;
279 case Intrinsic::memset:
280 case Intrinsic::memcpy:
Daniel Neilson71fa1b92018-05-10 15:12:49 +0000281 case Intrinsic::memcpy_element_unordered_atomic:
282 case Intrinsic::memset_element_unordered_atomic:
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000283 // Do shorten memory intrinsics.
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000284 // FIXME: Add memmove if it's also safe to transform.
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000285 return true;
286 }
Pete Cooper856977c2011-11-09 23:07:35 +0000287 }
Nick Lewycky9f4729d2012-09-24 22:09:10 +0000288
289 // Don't shorten libcalls calls for now.
290
291 return false;
Pete Cooper856977c2011-11-09 23:07:35 +0000292}
293
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000294/// Returns true if the beginning of this instruction can be safely shortened
295/// in length.
296static bool isShortenableAtTheBeginning(Instruction *I) {
297 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
298 // easily done by offsetting the source address.
Daniel Neilson71fa1b92018-05-10 15:12:49 +0000299 return isa<AnyMemSetInst>(I);
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000300}
301
Justin Bogner594e07b2016-05-17 21:38:13 +0000302/// Return the pointer that is being written to.
Chris Lattner67122512010-11-30 21:58:14 +0000303static Value *getStoredPointerOperand(Instruction *I) {
Philip Reames424e7a12018-01-21 01:44:33 +0000304 //TODO: factor this to reuse getLocForWrite
Philip Reamesf57714c2018-01-21 02:10:54 +0000305 MemoryLocation Loc = getLocForWrite(I);
306 assert(Loc.Ptr &&
Hiroshi Inouef2096492018-06-14 05:41:49 +0000307 "unable to find pointer written for analyzable instruction?");
Philip Reamesf57714c2018-01-21 02:10:54 +0000308 // TODO: most APIs don't expect const Value *
309 return const_cast<Value*>(Loc.Ptr);
Nick Lewycky90271472009-11-10 06:46:40 +0000310}
311
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000312static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
Manoj Gupta77eeac32018-07-09 22:27:23 +0000313 const TargetLibraryInfo &TLI,
314 const Function *F) {
Nuno Lopes55fff832012-06-21 15:45:28 +0000315 uint64_t Size;
Manoj Gupta77eeac32018-07-09 22:27:23 +0000316 ObjectSizeOpts Opts;
317 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
318
319 if (getObjectSize(V, Size, DL, &TLI, Opts))
Nuno Lopes55fff832012-06-21 15:45:28 +0000320 return Size;
Chandler Carruthecbd1682015-06-17 07:21:38 +0000321 return MemoryLocation::UnknownSize;
Chris Lattner903add82010-11-30 23:43:23 +0000322}
Chris Lattner51c28a92010-11-30 19:34:42 +0000323
Pete Cooper856977c2011-11-09 23:07:35 +0000324namespace {
Eugene Zelenko3b879392017-10-13 21:17:07 +0000325
Sanjay Patel1d04b5b2017-09-26 13:54:28 +0000326enum OverwriteResult {
327 OW_Begin,
328 OW_Complete,
329 OW_End,
330 OW_PartialEarlierWithFullLater,
331 OW_Unknown
332};
Eugene Zelenko3b879392017-10-13 21:17:07 +0000333
334} // end anonymous namespace
Pete Cooper856977c2011-11-09 23:07:35 +0000335
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000336/// Return 'OW_Complete' if a store to the 'Later' location completely
337/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
338/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
Sanjay Patel1d04b5b2017-09-26 13:54:28 +0000339/// beginning of the 'Earlier' location is overwritten by 'Later'.
340/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
341/// overwritten by a latter (smaller) store which doesn't write outside the big
342/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
Chandler Carruthac80dc72015-06-17 07:18:54 +0000343static OverwriteResult isOverwrite(const MemoryLocation &Later,
344 const MemoryLocation &Earlier,
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000345 const DataLayout &DL,
Chandler Carruthdbe40fb2015-08-12 18:01:44 +0000346 const TargetLibraryInfo &TLI,
Hal Finkela1271032016-06-23 13:46:39 +0000347 int64_t &EarlierOff, int64_t &LaterOff,
348 Instruction *DepWrite,
Piotr Padlewskic77ab8e2018-05-03 11:03:53 +0000349 InstOverlapIntervalsTy &IOL,
Manoj Gupta77eeac32018-07-09 22:27:23 +0000350 AliasAnalysis &AA,
351 const Function *F) {
Chad Rosier72a793c2016-06-15 22:17:38 +0000352 // If we don't know the sizes of either access, then we can't do a comparison.
353 if (Later.Size == MemoryLocation::UnknownSize ||
354 Earlier.Size == MemoryLocation::UnknownSize)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000355 return OW_Unknown;
Chad Rosier72a793c2016-06-15 22:17:38 +0000356
Chris Lattnerc0f33792010-11-30 23:05:20 +0000357 const Value *P1 = Earlier.Ptr->stripPointerCasts();
358 const Value *P2 = Later.Ptr->stripPointerCasts();
Owen Anderson58704ee2011-09-06 18:14:09 +0000359
Chris Lattnerc0f33792010-11-30 23:05:20 +0000360 // If the start pointers are the same, we just have to compare sizes to see if
361 // the later store was larger than the earlier store.
Piotr Padlewskic77ab8e2018-05-03 11:03:53 +0000362 if (P1 == P2 || AA.isMustAlias(P1, P2)) {
Chris Lattnerc0f33792010-11-30 23:05:20 +0000363 // Make sure that the Later size is >= the Earlier size.
Pete Cooper856977c2011-11-09 23:07:35 +0000364 if (Later.Size >= Earlier.Size)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000365 return OW_Complete;
Chris Lattner77d79fa2010-11-30 19:28:23 +0000366 }
Owen Anderson58704ee2011-09-06 18:14:09 +0000367
Chris Lattner903add82010-11-30 23:43:23 +0000368 // Check to see if the later store is to the entire object (either a global,
Reid Kleckner26af2ca2014-01-28 02:38:36 +0000369 // an alloca, or a byval/inalloca argument). If so, then it clearly
370 // overwrites any other store to the same object.
Rafael Espindola5f57f462014-02-21 18:34:28 +0000371 const Value *UO1 = GetUnderlyingObject(P1, DL),
372 *UO2 = GetUnderlyingObject(P2, DL);
Owen Anderson58704ee2011-09-06 18:14:09 +0000373
Chris Lattner903add82010-11-30 23:43:23 +0000374 // If we can't resolve the same pointers to the same object, then we can't
375 // analyze them at all.
376 if (UO1 != UO2)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000377 return OW_Unknown;
Owen Anderson58704ee2011-09-06 18:14:09 +0000378
Chris Lattner903add82010-11-30 23:43:23 +0000379 // If the "Later" store is to a recognizable object, get its size.
Manoj Gupta77eeac32018-07-09 22:27:23 +0000380 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
Chandler Carruthecbd1682015-06-17 07:21:38 +0000381 if (ObjectSize != MemoryLocation::UnknownSize)
Pete Coopera4237c32011-11-10 20:22:08 +0000382 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000383 return OW_Complete;
Owen Anderson58704ee2011-09-06 18:14:09 +0000384
Chris Lattnerc0f33792010-11-30 23:05:20 +0000385 // Okay, we have stores to two completely different pointers. Try to
386 // decompose the pointer into a "base + constant_offset" form. If the base
387 // pointers are equal, then we can reason about the two stores.
Pete Cooper856977c2011-11-09 23:07:35 +0000388 EarlierOff = 0;
389 LaterOff = 0;
Rafael Espindola5f57f462014-02-21 18:34:28 +0000390 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
391 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
Owen Anderson58704ee2011-09-06 18:14:09 +0000392
Chris Lattnerc0f33792010-11-30 23:05:20 +0000393 // If the base pointers still differ, we have two completely different stores.
394 if (BP1 != BP2)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000395 return OW_Unknown;
Bill Wendlingdb40b5c2011-03-26 01:20:37 +0000396
Bill Wendling19f33b92011-03-26 08:02:59 +0000397 // The later store completely overlaps the earlier store if:
Owen Anderson58704ee2011-09-06 18:14:09 +0000398 //
Bill Wendling19f33b92011-03-26 08:02:59 +0000399 // 1. Both start at the same offset and the later one's size is greater than
400 // or equal to the earlier one's, or
401 //
402 // |--earlier--|
403 // |-- later --|
Owen Anderson58704ee2011-09-06 18:14:09 +0000404 //
Bill Wendling19f33b92011-03-26 08:02:59 +0000405 // 2. The earlier store has an offset greater than the later offset, but which
406 // still lies completely within the later store.
407 //
408 // |--earlier--|
409 // |----- later ------|
Bill Wendling50341592011-03-30 21:37:19 +0000410 //
411 // We have to be careful here as *Off is signed while *.Size is unsigned.
Bill Wendlingb5139922011-03-26 09:32:07 +0000412 if (EarlierOff >= LaterOff &&
Craig Topper2a404182012-08-14 07:32:05 +0000413 Later.Size >= Earlier.Size &&
Bill Wendling50341592011-03-30 21:37:19 +0000414 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000415 return OW_Complete;
Nadav Rotem465834c2012-07-24 10:51:42 +0000416
Hal Finkela1271032016-06-23 13:46:39 +0000417 // We may now overlap, although the overlap is not complete. There might also
418 // be other incomplete overlaps, and together, they might cover the complete
419 // earlier write.
420 // Note: The correctness of this logic depends on the fact that this function
421 // is not even called providing DepWrite when there are any intervening reads.
422 if (EnablePartialOverwriteTracking &&
423 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
424 int64_t(LaterOff + Later.Size) >= EarlierOff) {
425
426 // Insert our part of the overlap into the map.
427 auto &IM = IOL[DepWrite];
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000428 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
429 << ", " << int64_t(EarlierOff + Earlier.Size)
430 << ") Later [" << LaterOff << ", "
431 << int64_t(LaterOff + Later.Size) << ")\n");
Hal Finkela1271032016-06-23 13:46:39 +0000432
433 // Make sure that we only insert non-overlapping intervals and combine
434 // adjacent intervals. The intervals are stored in the map with the ending
435 // offset as the key (in the half-open sense) and the starting offset as
436 // the value.
437 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
438
439 // Find any intervals ending at, or after, LaterIntStart which start
440 // before LaterIntEnd.
441 auto ILI = IM.lower_bound(LaterIntStart);
Jun Bum Lim596a3bd2016-06-30 15:32:20 +0000442 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
443 // This existing interval is overlapped with the current store somewhere
444 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
445 // intervals and adjusting our start and end.
Hal Finkela1271032016-06-23 13:46:39 +0000446 LaterIntStart = std::min(LaterIntStart, ILI->second);
447 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
448 ILI = IM.erase(ILI);
449
Jun Bum Lim596a3bd2016-06-30 15:32:20 +0000450 // Continue erasing and adjusting our end in case other previous
451 // intervals are also overlapped with the current store.
452 //
453 // |--- ealier 1 ---| |--- ealier 2 ---|
454 // |------- later---------|
455 //
456 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
457 assert(ILI->second > LaterIntStart && "Unexpected interval");
Hal Finkela1271032016-06-23 13:46:39 +0000458 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
Jun Bum Lim596a3bd2016-06-30 15:32:20 +0000459 ILI = IM.erase(ILI);
460 }
Hal Finkela1271032016-06-23 13:46:39 +0000461 }
462
463 IM[LaterIntEnd] = LaterIntStart;
464
465 ILI = IM.begin();
466 if (ILI->second <= EarlierOff &&
467 ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000468 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
469 << EarlierOff << ", "
470 << int64_t(EarlierOff + Earlier.Size)
471 << ") Composite Later [" << ILI->second << ", "
472 << ILI->first << ")\n");
Hal Finkela1271032016-06-23 13:46:39 +0000473 ++NumCompletePartials;
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000474 return OW_Complete;
Hal Finkela1271032016-06-23 13:46:39 +0000475 }
476 }
477
Sanjay Patel1d04b5b2017-09-26 13:54:28 +0000478 // Check for an earlier store which writes to all the memory locations that
479 // the later store writes to.
480 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
481 int64_t(EarlierOff + Earlier.Size) > LaterOff &&
482 uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000483 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
484 << EarlierOff << ", "
485 << int64_t(EarlierOff + Earlier.Size)
486 << ") by a later store [" << LaterOff << ", "
487 << int64_t(LaterOff + Later.Size) << ")\n");
Sanjay Patel1d04b5b2017-09-26 13:54:28 +0000488 // TODO: Maybe come up with a better name?
489 return OW_PartialEarlierWithFullLater;
490 }
491
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000492 // Another interesting case is if the later store overwrites the end of the
493 // earlier store.
Pete Cooper856977c2011-11-09 23:07:35 +0000494 //
495 // |--earlier--|
496 // |-- later --|
497 //
498 // In this case we may want to trim the size of earlier to avoid generating
499 // writes to addresses which will definitely be overwritten later
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000500 if (!EnablePartialOverwriteTracking &&
501 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
502 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000503 return OW_End;
Bill Wendling19f33b92011-03-26 08:02:59 +0000504
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000505 // Finally, we also need to check if the later store overwrites the beginning
506 // of the earlier store.
507 //
508 // |--earlier--|
509 // |-- later --|
510 //
511 // In this case we may want to move the destination address and trim the size
512 // of earlier to avoid generating writes to addresses which will definitely
513 // be overwritten later.
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000514 if (!EnablePartialOverwriteTracking &&
515 (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
516 assert(int64_t(LaterOff + Later.Size) <
517 int64_t(EarlierOff + Earlier.Size) &&
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000518 "Expect to be handled as OW_Complete");
519 return OW_Begin;
Jun Bum Limd29a24e2016-04-22 19:51:29 +0000520 }
Bill Wendling19f33b92011-03-26 08:02:59 +0000521 // Otherwise, they don't completely overlap.
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000522 return OW_Unknown;
Nick Lewycky90271472009-11-10 06:46:40 +0000523}
524
Justin Bogner594e07b2016-05-17 21:38:13 +0000525/// If 'Inst' might be a self read (i.e. a noop copy of a
Chris Lattner94fbdf32010-12-06 01:48:06 +0000526/// memory region into an identical pointer) then it doesn't actually make its
Owen Anderson58704ee2011-09-06 18:14:09 +0000527/// input dead in the traditional sense. Consider this case:
Chris Lattner94fbdf32010-12-06 01:48:06 +0000528///
Sanjoy Das737fa402018-02-20 23:19:34 +0000529/// memmove(A <- B)
530/// memmove(A <- A)
Chris Lattner94fbdf32010-12-06 01:48:06 +0000531///
532/// In this case, the second store to A does not make the first store to A dead.
533/// The usual situation isn't an explicit A<-A store like this (which can be
534/// trivially removed) but a case where two pointers may alias.
535///
536/// This function detects when it is unsafe to remove a dependent instruction
537/// because the DSE inducing instruction may be a self-read.
538static bool isPossibleSelfRead(Instruction *Inst,
Chandler Carruthac80dc72015-06-17 07:18:54 +0000539 const MemoryLocation &InstStoreLoc,
Chandler Carruthdbe40fb2015-08-12 18:01:44 +0000540 Instruction *DepWrite,
541 const TargetLibraryInfo &TLI,
542 AliasAnalysis &AA) {
Chris Lattner94fbdf32010-12-06 01:48:06 +0000543 // Self reads can only happen for instructions that read memory. Get the
544 // location read.
Chandler Carruthdbe40fb2015-08-12 18:01:44 +0000545 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
Sanjoy Das737fa402018-02-20 23:19:34 +0000546 if (!InstReadLoc.Ptr)
547 return false; // Not a reading instruction.
Owen Anderson58704ee2011-09-06 18:14:09 +0000548
Chris Lattner94fbdf32010-12-06 01:48:06 +0000549 // If the read and written loc obviously don't alias, it isn't a read.
Sanjoy Das737fa402018-02-20 23:19:34 +0000550 if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
Chris Lattner94fbdf32010-12-06 01:48:06 +0000551 return false;
Owen Anderson58704ee2011-09-06 18:14:09 +0000552
Daniel Neilsoncc45e922018-04-23 19:06:49 +0000553 if (isa<AnyMemCpyInst>(Inst)) {
Sanjoy Das737fa402018-02-20 23:19:34 +0000554 // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
555 // but in practice memcpy(A <- B) either means that A and B are disjoint or
556 // are equal (i.e. there are not partial overlaps). Given that, if we have:
557 //
558 // memcpy/memmove(A <- B) // DepWrite
559 // memcpy(A <- B) // Inst
560 //
561 // with Inst reading/writing a >= size than DepWrite, we can reason as
562 // follows:
563 //
564 // - If A == B then both the copies are no-ops, so the DepWrite can be
565 // removed.
566 // - If A != B then A and B are disjoint locations in Inst. Since
567 // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
568 // Therefore DepWrite can be removed.
569 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
570
571 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
572 return false;
573 }
574
Chris Lattner94fbdf32010-12-06 01:48:06 +0000575 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
576 // then it can't be considered dead.
577 return true;
578}
579
Justin Bogner594e07b2016-05-17 21:38:13 +0000580/// Returns true if the memory which is accessed by the second instruction is not
581/// modified between the first and the second instruction.
582/// Precondition: Second instruction must be dominated by the first
583/// instruction.
584static bool memoryIsNotModifiedBetween(Instruction *FirstI,
585 Instruction *SecondI,
586 AliasAnalysis *AA) {
587 SmallVector<BasicBlock *, 16> WorkList;
588 SmallPtrSet<BasicBlock *, 8> Visited;
589 BasicBlock::iterator FirstBBI(FirstI);
590 ++FirstBBI;
591 BasicBlock::iterator SecondBBI(SecondI);
592 BasicBlock *FirstBB = FirstI->getParent();
593 BasicBlock *SecondBB = SecondI->getParent();
594 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
Chris Lattner67122512010-11-30 21:58:14 +0000595
Justin Bogner594e07b2016-05-17 21:38:13 +0000596 // Start checking the store-block.
597 WorkList.push_back(SecondBB);
598 bool isFirstBlock = true;
599
600 // Check all blocks going backward until we reach the load-block.
601 while (!WorkList.empty()) {
602 BasicBlock *B = WorkList.pop_back_val();
603
604 // Ignore instructions before LI if this is the FirstBB.
605 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
606
607 BasicBlock::iterator EI;
608 if (isFirstBlock) {
609 // Ignore instructions after SI if this is the first visit of SecondBB.
610 assert(B == SecondBB && "first block is not the store block");
611 EI = SecondBBI;
612 isFirstBlock = false;
613 } else {
614 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
615 // In this case we also have to look at instructions after SI.
616 EI = B->end();
617 }
618 for (; BI != EI; ++BI) {
619 Instruction *I = &*BI;
Alina Sbirlea63d22502017-12-05 20:12:23 +0000620 if (I->mayWriteToMemory() && I != SecondI)
621 if (isModSet(AA->getModRefInfo(I, MemLoc)))
Justin Bogner594e07b2016-05-17 21:38:13 +0000622 return false;
Justin Bogner594e07b2016-05-17 21:38:13 +0000623 }
624 if (B != FirstBB) {
625 assert(B != &FirstBB->getParent()->getEntryBlock() &&
626 "Should not hit the entry block because SI must be dominated by LI");
627 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
628 if (!Visited.insert(*PredI).second)
629 continue;
630 WorkList.push_back(*PredI);
631 }
632 }
633 }
634 return true;
635}
636
637/// Find all blocks that will unconditionally lead to the block BB and append
638/// them to F.
639static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
640 BasicBlock *BB, DominatorTree *DT) {
641 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
642 BasicBlock *Pred = *I;
643 if (Pred == BB) continue;
644 TerminatorInst *PredTI = Pred->getTerminator();
645 if (PredTI->getNumSuccessors() != 1)
646 continue;
647
648 if (DT->isReachableFromEntry(Pred))
649 Blocks.push_back(Pred);
650 }
651}
652
653/// Handle frees of entire structures whose dependency is a store
654/// to a field of that structure.
655static bool handleFree(CallInst *F, AliasAnalysis *AA,
656 MemoryDependenceResults *MD, DominatorTree *DT,
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000657 const TargetLibraryInfo *TLI,
Eli Friedmana6707f52016-08-12 01:09:53 +0000658 InstOverlapIntervalsTy &IOL,
659 DenseMap<Instruction*, size_t> *InstrOrdering) {
Justin Bogner594e07b2016-05-17 21:38:13 +0000660 bool MadeChange = false;
661
662 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
663 SmallVector<BasicBlock *, 16> Blocks;
664 Blocks.push_back(F->getParent());
665 const DataLayout &DL = F->getModule()->getDataLayout();
666
667 while (!Blocks.empty()) {
668 BasicBlock *BB = Blocks.pop_back_val();
669 Instruction *InstPt = BB->getTerminator();
670 if (BB == F->getParent()) InstPt = F;
671
672 MemDepResult Dep =
673 MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
674 while (Dep.isDef() || Dep.isClobber()) {
675 Instruction *Dependency = Dep.getInst();
Philip Reames424e7a12018-01-21 01:44:33 +0000676 if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
677 !isRemovable(Dependency))
Justin Bogner594e07b2016-05-17 21:38:13 +0000678 break;
679
680 Value *DepPointer =
681 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
682
683 // Check for aliasing.
684 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
685 break;
686
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000687 LLVM_DEBUG(
688 dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
689 << *Dependency << '\n');
Chad Rosier667b1ca2016-07-19 16:50:57 +0000690
Chad Rosier840b3ef2016-06-10 17:59:22 +0000691 // DCE instructions only used to calculate that store.
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000692 BasicBlock::iterator BBI(Dependency);
Eli Friedmana6707f52016-08-12 01:09:53 +0000693 deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
Justin Bogner594e07b2016-05-17 21:38:13 +0000694 ++NumFastStores;
695 MadeChange = true;
696
697 // Inst's old Dependency is now deleted. Compute the next dependency,
698 // which may also be dead, as in
699 // s[0] = 0;
700 // s[1] = 0; // This has just been deleted.
701 // free(s);
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000702 Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
Justin Bogner594e07b2016-05-17 21:38:13 +0000703 }
704
705 if (Dep.isNonLocal())
706 findUnconditionalPreds(Blocks, BB, DT);
707 }
708
709 return MadeChange;
710}
711
712/// Check to see if the specified location may alias any of the stack objects in
713/// the DeadStackObjects set. If so, they become live because the location is
714/// being loaded.
715static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
716 SmallSetVector<Value *, 16> &DeadStackObjects,
717 const DataLayout &DL, AliasAnalysis *AA,
Manoj Gupta77eeac32018-07-09 22:27:23 +0000718 const TargetLibraryInfo *TLI,
719 const Function *F) {
Justin Bogner594e07b2016-05-17 21:38:13 +0000720 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
721
722 // A constant can't be in the dead pointer set.
723 if (isa<Constant>(UnderlyingPointer))
724 return;
725
726 // If the kill pointer can be easily reduced to an alloca, don't bother doing
727 // extraneous AA queries.
728 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
729 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
730 return;
731 }
732
733 // Remove objects that could alias LoadedLoc.
734 DeadStackObjects.remove_if([&](Value *I) {
735 // See if the loaded location could alias the stack location.
Manoj Gupta77eeac32018-07-09 22:27:23 +0000736 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
Justin Bogner594e07b2016-05-17 21:38:13 +0000737 return !AA->isNoAlias(StackLoc, LoadedLoc);
738 });
739}
740
741/// Remove dead stores to stack-allocated locations in the function end block.
742/// Ex:
743/// %A = alloca i32
744/// ...
745/// store i32 1, i32* %A
746/// ret void
747static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
748 MemoryDependenceResults *MD,
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000749 const TargetLibraryInfo *TLI,
Eli Friedmana6707f52016-08-12 01:09:53 +0000750 InstOverlapIntervalsTy &IOL,
751 DenseMap<Instruction*, size_t> *InstrOrdering) {
Justin Bogner594e07b2016-05-17 21:38:13 +0000752 bool MadeChange = false;
753
754 // Keep track of all of the stack objects that are dead at the end of the
755 // function.
756 SmallSetVector<Value*, 16> DeadStackObjects;
757
758 // Find all of the alloca'd pointers in the entry block.
759 BasicBlock &Entry = BB.getParent()->front();
760 for (Instruction &I : Entry) {
761 if (isa<AllocaInst>(&I))
762 DeadStackObjects.insert(&I);
763
764 // Okay, so these are dead heap objects, but if the pointer never escapes
765 // then it's leaked by this function anyways.
766 else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
767 DeadStackObjects.insert(&I);
768 }
769
770 // Treat byval or inalloca arguments the same, stores to them are dead at the
771 // end of the function.
772 for (Argument &AI : BB.getParent()->args())
773 if (AI.hasByValOrInAllocaAttr())
774 DeadStackObjects.insert(&AI);
775
776 const DataLayout &DL = BB.getModule()->getDataLayout();
777
778 // Scan the basic block backwards
779 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
780 --BBI;
781
782 // If we find a store, check to see if it points into a dead stack value.
Philip Reames424e7a12018-01-21 01:44:33 +0000783 if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
Justin Bogner594e07b2016-05-17 21:38:13 +0000784 // See through pointer-to-pointer bitcasts
785 SmallVector<Value *, 4> Pointers;
786 GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
787
788 // Stores to stack values are valid candidates for removal.
789 bool AllDead = true;
Benjamin Kramer135f7352016-06-26 12:28:59 +0000790 for (Value *Pointer : Pointers)
791 if (!DeadStackObjects.count(Pointer)) {
Justin Bogner594e07b2016-05-17 21:38:13 +0000792 AllDead = false;
793 break;
794 }
795
796 if (AllDead) {
Chad Rosierdcfce2d2016-07-06 19:48:52 +0000797 Instruction *Dead = &*BBI;
Justin Bogner594e07b2016-05-17 21:38:13 +0000798
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000799 LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
800 << *Dead << "\n Objects: ";
801 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
802 E = Pointers.end();
803 I != E; ++I) {
804 dbgs() << **I;
805 if (std::next(I) != E)
806 dbgs() << ", ";
807 } dbgs()
808 << '\n');
Justin Bogner594e07b2016-05-17 21:38:13 +0000809
810 // DCE instructions only used to calculate that store.
Eli Friedmana6707f52016-08-12 01:09:53 +0000811 deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
Justin Bogner594e07b2016-05-17 21:38:13 +0000812 ++NumFastStores;
813 MadeChange = true;
814 continue;
815 }
816 }
817
818 // Remove any dead non-memory-mutating instructions.
819 if (isInstructionTriviallyDead(&*BBI, TLI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000820 LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
821 << *&*BBI << '\n');
Eli Friedmana6707f52016-08-12 01:09:53 +0000822 deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
Justin Bogner594e07b2016-05-17 21:38:13 +0000823 ++NumFastOther;
824 MadeChange = true;
825 continue;
826 }
827
828 if (isa<AllocaInst>(BBI)) {
829 // Remove allocas from the list of dead stack objects; there can't be
830 // any references before the definition.
831 DeadStackObjects.remove(&*BBI);
832 continue;
833 }
834
835 if (auto CS = CallSite(&*BBI)) {
836 // Remove allocation function calls from the list of dead stack objects;
837 // there can't be any references before the definition.
838 if (isAllocLikeFn(&*BBI, TLI))
839 DeadStackObjects.remove(&*BBI);
840
841 // If this call does not access memory, it can't be loading any of our
842 // pointers.
843 if (AA->doesNotAccessMemory(CS))
844 continue;
845
846 // If the call might load from any of our allocas, then any store above
847 // the call is live.
848 DeadStackObjects.remove_if([&](Value *I) {
849 // See if the call site touches the value.
Manoj Gupta77eeac32018-07-09 22:27:23 +0000850 return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI,
851 BB.getParent())));
Justin Bogner594e07b2016-05-17 21:38:13 +0000852 });
853
854 // If all of the allocas were clobbered by the call then we're not going
855 // to find anything else to process.
856 if (DeadStackObjects.empty())
857 break;
858
859 continue;
860 }
861
Anna Thomas6a78c782016-07-07 20:51:42 +0000862 // We can remove the dead stores, irrespective of the fence and its ordering
863 // (release/acquire/seq_cst). Fences only constraints the ordering of
864 // already visible stores, it does not make a store visible to other
865 // threads. So, skipping over a fence does not change a store from being
866 // dead.
867 if (isa<FenceInst>(*BBI))
868 continue;
869
Justin Bogner594e07b2016-05-17 21:38:13 +0000870 MemoryLocation LoadedLoc;
871
872 // If we encounter a use of the pointer, it is no longer considered dead
873 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
874 if (!L->isUnordered()) // Be conservative with atomic/volatile load
875 break;
876 LoadedLoc = MemoryLocation::get(L);
877 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
878 LoadedLoc = MemoryLocation::get(V);
Justin Bogner594e07b2016-05-17 21:38:13 +0000879 } else if (!BBI->mayReadFromMemory()) {
880 // Instruction doesn't read memory. Note that stores that weren't removed
881 // above will hit this case.
882 continue;
883 } else {
884 // Unknown inst; assume it clobbers everything.
885 break;
886 }
887
888 // Remove any allocas from the DeadPointer set that are loaded, as this
889 // makes any stores above the access live.
Manoj Gupta77eeac32018-07-09 22:27:23 +0000890 removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
Justin Bogner594e07b2016-05-17 21:38:13 +0000891
892 // If all of the allocas were clobbered by the access then we're not going
893 // to find anything else to process.
894 if (DeadStackObjects.empty())
895 break;
896 }
897
898 return MadeChange;
899}
900
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000901static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
902 int64_t &EarlierSize, int64_t LaterOffset,
903 int64_t LaterSize, bool IsOverwriteEnd) {
904 // TODO: base this on the target vector size so that if the earlier
905 // store was too small to get vector writes anyway then its likely
906 // a good idea to shorten it
907 // Power of 2 vector writes are probably always a bad idea to optimize
908 // as any store/memset/memcpy is likely using vector instructions so
909 // shortening it to not vector size is likely to be slower
Daniel Neilson71fa1b92018-05-10 15:12:49 +0000910 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
Daniel Neilson83cdf682018-02-06 21:18:33 +0000911 unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000912 if (!IsOverwriteEnd)
913 LaterOffset = int64_t(LaterOffset + LaterSize);
914
Eugene Zelenko3b879392017-10-13 21:17:07 +0000915 if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000916 !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
917 return false;
918
Daniel Neilson71fa1b92018-05-10 15:12:49 +0000919 int64_t NewLength = IsOverwriteEnd
920 ? LaterOffset - EarlierOffset
921 : EarlierSize - (LaterOffset - EarlierOffset);
922
923 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
924 // When shortening an atomic memory intrinsic, the newly shortened
925 // length must remain an integer multiple of the element size.
926 const uint32_t ElementSize = AMI->getElementSizeInBytes();
927 if (0 != NewLength % ElementSize)
928 return false;
929 }
930
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000931 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
932 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
933 << *EarlierWrite << "\n KILLER (offset " << LaterOffset
934 << ", " << EarlierSize << ")\n");
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000935
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000936 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
937 Value *TrimmedLength =
938 ConstantInt::get(EarlierWriteLength->getType(), NewLength);
939 EarlierIntrinsic->setLength(TrimmedLength);
940
941 EarlierSize = NewLength;
942 if (!IsOverwriteEnd) {
943 int64_t OffsetMoved = (LaterOffset - EarlierOffset);
944 Value *Indices[1] = {
945 ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
946 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
947 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
948 EarlierIntrinsic->setDest(NewDestGEP);
949 EarlierOffset = EarlierOffset + OffsetMoved;
950 }
951 return true;
952}
953
954static bool tryToShortenEnd(Instruction *EarlierWrite,
955 OverlapIntervalsTy &IntervalMap,
956 int64_t &EarlierStart, int64_t &EarlierSize) {
957 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
958 return false;
959
960 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
961 int64_t LaterStart = OII->second;
962 int64_t LaterSize = OII->first - LaterStart;
963
964 if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
965 LaterStart + LaterSize >= EarlierStart + EarlierSize) {
966 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
967 LaterSize, true)) {
968 IntervalMap.erase(OII);
969 return true;
970 }
971 }
972 return false;
973}
974
975static bool tryToShortenBegin(Instruction *EarlierWrite,
976 OverlapIntervalsTy &IntervalMap,
977 int64_t &EarlierStart, int64_t &EarlierSize) {
978 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
979 return false;
980
981 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
982 int64_t LaterStart = OII->second;
983 int64_t LaterSize = OII->first - LaterStart;
984
985 if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
986 assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
Filipe Cabecinhas8b942732017-03-29 14:42:27 +0000987 "Should have been handled as OW_Complete");
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +0000988 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
989 LaterSize, false)) {
990 IntervalMap.erase(OII);
991 return true;
992 }
993 }
994 return false;
995}
996
997static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
998 const DataLayout &DL,
999 InstOverlapIntervalsTy &IOL) {
1000 bool Changed = false;
1001 for (auto OI : IOL) {
1002 Instruction *EarlierWrite = OI.first;
Philip Reamesf57714c2018-01-21 02:10:54 +00001003 MemoryLocation Loc = getLocForWrite(EarlierWrite);
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001004 assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1005 assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
1006
1007 const Value *Ptr = Loc.Ptr->stripPointerCasts();
1008 int64_t EarlierStart = 0;
1009 int64_t EarlierSize = int64_t(Loc.Size);
1010 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1011 OverlapIntervalsTy &IntervalMap = OI.second;
Jun Bum Lima0331392016-07-27 17:25:20 +00001012 Changed |=
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001013 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1014 if (IntervalMap.empty())
1015 continue;
1016 Changed |=
1017 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1018 }
1019 return Changed;
1020}
1021
Chad Rosier89c32a92016-07-08 16:48:40 +00001022static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1023 AliasAnalysis *AA, MemoryDependenceResults *MD,
1024 const DataLayout &DL,
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001025 const TargetLibraryInfo *TLI,
Eli Friedmana6707f52016-08-12 01:09:53 +00001026 InstOverlapIntervalsTy &IOL,
1027 DenseMap<Instruction*, size_t> *InstrOrdering) {
Chad Rosier89c32a92016-07-08 16:48:40 +00001028 // Must be a store instruction.
1029 StoreInst *SI = dyn_cast<StoreInst>(Inst);
1030 if (!SI)
1031 return false;
1032
1033 // If we're storing the same value back to a pointer that we just loaded from,
1034 // then the store can be removed.
1035 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1036 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1037 isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1038
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001039 LLVM_DEBUG(
1040 dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1041 << *DepLoad << "\n STORE: " << *SI << '\n');
Chad Rosier89c32a92016-07-08 16:48:40 +00001042
Eli Friedmana6707f52016-08-12 01:09:53 +00001043 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
Chad Rosier89c32a92016-07-08 16:48:40 +00001044 ++NumRedundantStores;
1045 return true;
1046 }
1047 }
1048
1049 // Remove null stores into the calloc'ed objects
1050 Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1051 if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1052 Instruction *UnderlyingPointer =
1053 dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1054
1055 if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1056 memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001057 LLVM_DEBUG(
Chad Rosier89c32a92016-07-08 16:48:40 +00001058 dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1059 << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1060
Eli Friedmana6707f52016-08-12 01:09:53 +00001061 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
Chad Rosier89c32a92016-07-08 16:48:40 +00001062 ++NumRedundantStores;
1063 return true;
1064 }
1065 }
1066 return false;
1067}
1068
Justin Bogner594e07b2016-05-17 21:38:13 +00001069static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1070 MemoryDependenceResults *MD, DominatorTree *DT,
1071 const TargetLibraryInfo *TLI) {
Igor Laevsky029bd932015-09-23 11:38:44 +00001072 const DataLayout &DL = BB.getModule()->getDataLayout();
Owen Anderson5e72db32007-07-11 00:46:18 +00001073 bool MadeChange = false;
Owen Anderson58704ee2011-09-06 18:14:09 +00001074
Eli Friedmana6707f52016-08-12 01:09:53 +00001075 // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1076 // The current OrderedBasicBlock can't deal with mutation at the moment.
1077 size_t LastThrowingInstIndex = 0;
1078 DenseMap<Instruction*, size_t> InstrOrdering;
1079 size_t InstrIndex = 1;
1080
Hal Finkela1271032016-06-23 13:46:39 +00001081 // A map of interval maps representing partially-overwritten value parts.
1082 InstOverlapIntervalsTy IOL;
1083
Chris Lattner49162672009-09-02 06:31:02 +00001084 // Do a top-down walk on the BB.
Chris Lattnerf2a8ba42008-11-28 21:29:52 +00001085 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
Chris Lattner9d179d92010-11-30 01:28:33 +00001086 // Handle 'free' calls specially.
Chad Rosierdcfce2d2016-07-06 19:48:52 +00001087 if (CallInst *F = isFreeCall(&*BBI, TLI)) {
Eli Friedmana6707f52016-08-12 01:09:53 +00001088 MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
Chad Rosierdcfce2d2016-07-06 19:48:52 +00001089 // Increment BBI after handleFree has potentially deleted instructions.
1090 // This ensures we maintain a valid iterator.
1091 ++BBI;
Chris Lattner9d179d92010-11-30 01:28:33 +00001092 continue;
1093 }
Owen Anderson58704ee2011-09-06 18:14:09 +00001094
Chad Rosierdcfce2d2016-07-06 19:48:52 +00001095 Instruction *Inst = &*BBI++;
1096
Eli Friedmana6707f52016-08-12 01:09:53 +00001097 size_t CurInstNumber = InstrIndex++;
1098 InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1099 if (Inst->mayThrow()) {
1100 LastThrowingInstIndex = CurInstNumber;
1101 continue;
1102 }
1103
Chad Rosier89c32a92016-07-08 16:48:40 +00001104 // Check to see if Inst writes to memory. If not, continue.
Philip Reames424e7a12018-01-21 01:44:33 +00001105 if (!hasAnalyzableMemoryWrite(Inst, *TLI))
Owen Anderson0aecf0e2007-08-08 04:52:29 +00001106 continue;
Chris Lattnerd4f10902010-11-30 00:01:19 +00001107
Chad Rosier89c32a92016-07-08 16:48:40 +00001108 // eliminateNoopStore will update in iterator, if necessary.
Eli Friedmana6707f52016-08-12 01:09:53 +00001109 if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
Chad Rosier89c32a92016-07-08 16:48:40 +00001110 MadeChange = true;
1111 continue;
Owen Anderson5e72db32007-07-11 00:46:18 +00001112 }
Owen Anderson58704ee2011-09-06 18:14:09 +00001113
Chad Rosier89c32a92016-07-08 16:48:40 +00001114 // If we find something that writes memory, get its memory dependence.
Erik Eckstein11fc8172015-08-13 15:36:11 +00001115 MemDepResult InstDep = MD->getDependency(Inst);
1116
Chad Rosierd7634fc2015-12-11 18:39:41 +00001117 // Ignore any store where we can't find a local dependence.
1118 // FIXME: cross-block DSE would be fun. :)
1119 if (!InstDep.isDef() && !InstDep.isClobber())
1120 continue;
Erik Eckstein11fc8172015-08-13 15:36:11 +00001121
Chad Rosierd7634fc2015-12-11 18:39:41 +00001122 // Figure out what location is being stored to.
Philip Reamesf57714c2018-01-21 02:10:54 +00001123 MemoryLocation Loc = getLocForWrite(Inst);
Chris Lattner58b779e2010-11-30 07:23:21 +00001124
Chad Rosierd7634fc2015-12-11 18:39:41 +00001125 // If we didn't get a useful location, fail.
1126 if (!Loc.Ptr)
1127 continue;
1128
Bob Haarman3db17642016-08-26 16:34:27 +00001129 // Loop until we find a store we can eliminate or a load that
1130 // invalidates the analysis. Without an upper bound on the number of
1131 // instructions examined, this analysis can become very time-consuming.
1132 // However, the potential gain diminishes as we process more instructions
1133 // without eliminating any of them. Therefore, we limit the number of
1134 // instructions we look at.
1135 auto Limit = MD->getDefaultBlockScanLimit();
Chad Rosierd7634fc2015-12-11 18:39:41 +00001136 while (InstDep.isDef() || InstDep.isClobber()) {
1137 // Get the memory clobbered by the instruction we depend on. MemDep will
1138 // skip any instructions that 'Loc' clearly doesn't interact with. If we
1139 // end up depending on a may- or must-aliased load, then we can't optimize
Chad Rosier844e2df2016-06-15 21:41:22 +00001140 // away the store and we bail out. However, if we depend on something
Chad Rosierd7634fc2015-12-11 18:39:41 +00001141 // that overwrites the memory location we *can* potentially optimize it.
1142 //
1143 // Find out what memory location the dependent instruction stores.
1144 Instruction *DepWrite = InstDep.getInst();
Philip Reamesf57714c2018-01-21 02:10:54 +00001145 if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1146 break;
1147 MemoryLocation DepLoc = getLocForWrite(DepWrite);
Chad Rosierd7634fc2015-12-11 18:39:41 +00001148 // If we didn't get a useful location, or if it isn't a size, bail out.
1149 if (!DepLoc.Ptr)
1150 break;
1151
Eli Friedmana6707f52016-08-12 01:09:53 +00001152 // Make sure we don't look past a call which might throw. This is an
1153 // issue because MemoryDependenceAnalysis works in the wrong direction:
1154 // it finds instructions which dominate the current instruction, rather than
1155 // instructions which are post-dominated by the current instruction.
1156 //
1157 // If the underlying object is a non-escaping memory allocation, any store
1158 // to it is dead along the unwind edge. Otherwise, we need to preserve
1159 // the store.
1160 size_t DepIndex = InstrOrdering.lookup(DepWrite);
1161 assert(DepIndex && "Unexpected instruction");
1162 if (DepIndex <= LastThrowingInstIndex) {
1163 const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1164 bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1165 if (!IsStoreDeadOnUnwind) {
1166 // We're looking for a call to an allocation function
1167 // where the allocation doesn't escape before the last
1168 // throwing instruction; PointerMayBeCaptured
1169 // reasonably fast approximation.
1170 IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1171 !PointerMayBeCaptured(Underlying, false, true);
1172 }
1173 if (!IsStoreDeadOnUnwind)
1174 break;
1175 }
1176
Chad Rosierd7634fc2015-12-11 18:39:41 +00001177 // If we find a write that is a) removable (i.e., non-volatile), b) is
1178 // completely obliterated by the store to 'Loc', and c) which we know that
1179 // 'Inst' doesn't load from, then we can remove it.
Sanjay Patel1d04b5b2017-09-26 13:54:28 +00001180 // Also try to merge two stores if a later one only touches memory written
1181 // to by the earlier one.
Chad Rosierd7634fc2015-12-11 18:39:41 +00001182 if (isRemovable(DepWrite) &&
1183 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1184 int64_t InstWriteOffset, DepWriteOffset;
Piotr Padlewskic77ab8e2018-05-03 11:03:53 +00001185 OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
Manoj Gupta77eeac32018-07-09 22:27:23 +00001186 InstWriteOffset, DepWrite, IOL, *AA,
1187 BB.getParent());
Filipe Cabecinhas8b942732017-03-29 14:42:27 +00001188 if (OR == OW_Complete) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001189 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite
1190 << "\n KILLER: " << *Inst << '\n');
Alexander Kornienko63dd36f2016-07-18 15:51:31 +00001191
Chad Rosierd7634fc2015-12-11 18:39:41 +00001192 // Delete the store and now-dead instructions that feed it.
Eli Friedmana6707f52016-08-12 01:09:53 +00001193 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
Chad Rosierd7634fc2015-12-11 18:39:41 +00001194 ++NumFastStores;
1195 MadeChange = true;
1196
Chad Rosierdcfce2d2016-07-06 19:48:52 +00001197 // We erased DepWrite; start over.
1198 InstDep = MD->getDependency(Inst);
1199 continue;
Filipe Cabecinhas8b942732017-03-29 14:42:27 +00001200 } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1201 ((OR == OW_Begin &&
Jun Bum Limd29a24e2016-04-22 19:51:29 +00001202 isShortenableAtTheBeginning(DepWrite)))) {
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001203 assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1204 "when partial-overwrite "
1205 "tracking is enabled");
1206 int64_t EarlierSize = DepLoc.Size;
1207 int64_t LaterSize = Loc.Size;
Filipe Cabecinhas8b942732017-03-29 14:42:27 +00001208 bool IsOverwriteEnd = (OR == OW_End);
Jun Bum Lima0331392016-07-27 17:25:20 +00001209 MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001210 InstWriteOffset, LaterSize, IsOverwriteEnd);
Sanjay Patel1d04b5b2017-09-26 13:54:28 +00001211 } else if (EnablePartialStoreMerging &&
1212 OR == OW_PartialEarlierWithFullLater) {
1213 auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1214 auto *Later = dyn_cast<StoreInst>(Inst);
1215 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
Sanjay Patel1aef27f2018-01-30 13:53:59 +00001216 Later && isa<ConstantInt>(Later->getValueOperand()) &&
1217 memoryIsNotModifiedBetween(Earlier, Later, AA)) {
Sanjay Patel1d04b5b2017-09-26 13:54:28 +00001218 // If the store we find is:
1219 // a) partially overwritten by the store to 'Loc'
1220 // b) the later store is fully contained in the earlier one and
1221 // c) they both have a constant value
1222 // Merge the two stores, replacing the earlier store's value with a
1223 // merge of both values.
1224 // TODO: Deal with other constant types (vectors, etc), and probably
1225 // some mem intrinsics (if needed)
1226
1227 APInt EarlierValue =
1228 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1229 APInt LaterValue =
1230 cast<ConstantInt>(Later->getValueOperand())->getValue();
1231 unsigned LaterBits = LaterValue.getBitWidth();
1232 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1233 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1234
1235 // Offset of the smaller store inside the larger store
1236 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1237 unsigned LShiftAmount =
1238 DL.isBigEndian()
1239 ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1240 : BitOffsetDiff;
1241 APInt Mask =
1242 APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1243 LShiftAmount + LaterBits);
1244 // Clear the bits we'll be replacing, then OR with the smaller
1245 // store, shifted appropriately.
1246 APInt Merged =
1247 (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001248 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
1249 << "\n Later: " << *Inst
1250 << "\n Merged Value: " << Merged << '\n');
Sanjay Patel1d04b5b2017-09-26 13:54:28 +00001251
1252 auto *SI = new StoreInst(
1253 ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1254 Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1255 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1256
1257 unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1258 LLVMContext::MD_alias_scope,
1259 LLVMContext::MD_noalias,
1260 LLVMContext::MD_nontemporal};
1261 SI->copyMetadata(*DepWrite, MDToKeep);
1262 ++NumModifiedStores;
1263
1264 // Remove earlier, wider, store
1265 size_t Idx = InstrOrdering.lookup(DepWrite);
1266 InstrOrdering.erase(DepWrite);
1267 InstrOrdering.insert(std::make_pair(SI, Idx));
1268
1269 // Delete the old stores and now-dead instructions that feed them.
1270 deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1271 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1272 &InstrOrdering);
1273 MadeChange = true;
1274
1275 // We erased DepWrite and Inst (Loc); start over.
1276 break;
1277 }
Pete Cooper856977c2011-11-09 23:07:35 +00001278 }
Chris Lattner58b779e2010-11-30 07:23:21 +00001279 }
Chad Rosierd7634fc2015-12-11 18:39:41 +00001280
1281 // If this is a may-aliased store that is clobbering the store value, we
1282 // can keep searching past it for another must-aliased pointer that stores
1283 // to the same location. For example, in:
1284 // store -> P
1285 // store -> Q
1286 // store -> P
1287 // we can remove the first store to P even though we don't know if P and Q
1288 // alias.
1289 if (DepWrite == &BB.front()) break;
1290
1291 // Can't look past this instruction if it might read 'Loc'.
Alina Sbirlea63d22502017-12-05 20:12:23 +00001292 if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
Chad Rosierd7634fc2015-12-11 18:39:41 +00001293 break;
1294
Bob Haarman3db17642016-08-26 16:34:27 +00001295 InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1296 DepWrite->getIterator(), &BB,
1297 /*QueryInst=*/ nullptr, &Limit);
Owen Anderson2b2bd282009-10-28 07:05:35 +00001298 }
Owen Anderson5e72db32007-07-11 00:46:18 +00001299 }
Owen Anderson58704ee2011-09-06 18:14:09 +00001300
Jun Bum Lim6a7dc5c2016-07-22 18:27:24 +00001301 if (EnablePartialOverwriteTracking)
1302 MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1303
Chris Lattnerf2a8ba42008-11-28 21:29:52 +00001304 // If this block ends in a return, unwind, or unreachable, all allocas are
1305 // dead at its end, which means stores to them are also dead.
Owen Anderson32c4a052007-07-12 21:41:30 +00001306 if (BB.getTerminator()->getNumSuccessors() == 0)
Eli Friedmana6707f52016-08-12 01:09:53 +00001307 MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
Owen Anderson58704ee2011-09-06 18:14:09 +00001308
Owen Anderson5e72db32007-07-11 00:46:18 +00001309 return MadeChange;
1310}
1311
Justin Bogner594e07b2016-05-17 21:38:13 +00001312static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1313 MemoryDependenceResults *MD, DominatorTree *DT,
1314 const TargetLibraryInfo *TLI) {
Eli Friedman7d58bc72011-06-15 00:47:34 +00001315 bool MadeChange = false;
Justin Bogner594e07b2016-05-17 21:38:13 +00001316 for (BasicBlock &BB : F)
1317 // Only check non-dead blocks. Dead blocks may have strange pointer
1318 // cycles that will confuse alias analysis.
1319 if (DT->isReachableFromEntry(&BB))
1320 MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
Eli Friedmana6707f52016-08-12 01:09:53 +00001321
Eli Friedman7d58bc72011-06-15 00:47:34 +00001322 return MadeChange;
Owen Andersonaa071722007-07-11 23:19:17 +00001323}
1324
Justin Bogner594e07b2016-05-17 21:38:13 +00001325//===----------------------------------------------------------------------===//
1326// DSE Pass
1327//===----------------------------------------------------------------------===//
1328PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1329 AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1330 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1331 MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1332 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
Owen Anderson58704ee2011-09-06 18:14:09 +00001333
Justin Bogner594e07b2016-05-17 21:38:13 +00001334 if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1335 return PreservedAnalyses::all();
Chandler Carruthca68a3e2017-01-15 06:32:49 +00001336
Justin Bogner594e07b2016-05-17 21:38:13 +00001337 PreservedAnalyses PA;
Chandler Carruthca68a3e2017-01-15 06:32:49 +00001338 PA.preserveSet<CFGAnalyses>();
Justin Bogner594e07b2016-05-17 21:38:13 +00001339 PA.preserve<GlobalsAA>();
1340 PA.preserve<MemoryDependenceAnalysis>();
1341 return PA;
Owen Anderson32c4a052007-07-12 21:41:30 +00001342}
1343
Benjamin Kramer4d098922016-07-10 11:28:51 +00001344namespace {
Eugene Zelenko3b879392017-10-13 21:17:07 +00001345
Justin Bogner594e07b2016-05-17 21:38:13 +00001346/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1347class DSELegacyPass : public FunctionPass {
1348public:
Eugene Zelenko3b879392017-10-13 21:17:07 +00001349 static char ID; // Pass identification, replacement for typeid
1350
Justin Bogner594e07b2016-05-17 21:38:13 +00001351 DSELegacyPass() : FunctionPass(ID) {
1352 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
Owen Andersonddf4aee2007-08-08 18:38:28 +00001353 }
Owen Anderson58704ee2011-09-06 18:14:09 +00001354
Justin Bogner594e07b2016-05-17 21:38:13 +00001355 bool runOnFunction(Function &F) override {
1356 if (skipFunction(F))
1357 return false;
1358
1359 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1360 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1361 MemoryDependenceResults *MD =
1362 &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1363 const TargetLibraryInfo *TLI =
1364 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1365
1366 return eliminateDeadStores(F, AA, MD, DT, TLI);
1367 }
1368
1369 void getAnalysisUsage(AnalysisUsage &AU) const override {
1370 AU.setPreservesCFG();
1371 AU.addRequired<DominatorTreeWrapperPass>();
1372 AU.addRequired<AAResultsWrapperPass>();
1373 AU.addRequired<MemoryDependenceWrapperPass>();
1374 AU.addRequired<TargetLibraryInfoWrapperPass>();
1375 AU.addPreserved<DominatorTreeWrapperPass>();
1376 AU.addPreserved<GlobalsAAWrapperPass>();
1377 AU.addPreserved<MemoryDependenceWrapperPass>();
1378 }
Justin Bogner594e07b2016-05-17 21:38:13 +00001379};
Eugene Zelenko3b879392017-10-13 21:17:07 +00001380
Benjamin Kramer4d098922016-07-10 11:28:51 +00001381} // end anonymous namespace
Justin Bogner594e07b2016-05-17 21:38:13 +00001382
1383char DSELegacyPass::ID = 0;
Eugene Zelenko3b879392017-10-13 21:17:07 +00001384
Justin Bogner594e07b2016-05-17 21:38:13 +00001385INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1386 false)
1387INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1388INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1389INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1390INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1391INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1392INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1393 false)
1394
1395FunctionPass *llvm::createDeadStoreEliminationPass() {
1396 return new DSELegacyPass();
Owen Anderson32c4a052007-07-12 21:41:30 +00001397}