Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1 | //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// |
| 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// \file |
| 9 | /// This transformation implements the well known scalar replacement of |
| 10 | /// aggregates transformation. It tries to identify promotable elements of an |
| 11 | /// aggregate alloca, and promote them to registers. It will also try to |
| 12 | /// convert uses of an element (or set of elements) of an alloca into a vector |
| 13 | /// or bitfield-style integer scalar if appropriate. |
| 14 | /// |
| 15 | /// It works to do this with minimal slicing of the alloca so that regions |
| 16 | /// which are merely transferred in and out of external memory remain unchanged |
| 17 | /// and are not decomposed to scalar code. |
| 18 | /// |
| 19 | /// Because this also performs alloca promotion, it can be thought of as also |
| 20 | /// serving the purpose of SSA formation. The algorithm iterates on the |
| 21 | /// function until all opportunities for promotion have been realized. |
| 22 | /// |
| 23 | //===----------------------------------------------------------------------===// |
| 24 | |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 25 | #include "llvm/Transforms/Scalar/SROA.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 26 | #include "llvm/ADT/APInt.h" |
| 27 | #include "llvm/ADT/ArrayRef.h" |
| 28 | #include "llvm/ADT/DenseMap.h" |
| 29 | #include "llvm/ADT/PointerIntPair.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 30 | #include "llvm/ADT/STLExtras.h" |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 31 | #include "llvm/ADT/SetVector.h" |
Hiroshi Inoue | 48e4c7a | 2017-12-01 06:05:05 +0000 | [diff] [blame] | 32 | #include "llvm/ADT/SmallBitVector.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 33 | #include "llvm/ADT/SmallPtrSet.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 34 | #include "llvm/ADT/SmallVector.h" |
| 35 | #include "llvm/ADT/Statistic.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 36 | #include "llvm/ADT/StringRef.h" |
| 37 | #include "llvm/ADT/Twine.h" |
| 38 | #include "llvm/ADT/iterator.h" |
| 39 | #include "llvm/ADT/iterator_range.h" |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 40 | #include "llvm/Analysis/AssumptionCache.h" |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 41 | #include "llvm/Analysis/GlobalsModRef.h" |
Chandler Carruth | ed0881b | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 42 | #include "llvm/Analysis/Loads.h" |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 43 | #include "llvm/Analysis/PtrUseVisitor.h" |
David Blaikie | 31b98d2 | 2018-06-04 21:23:21 +0000 | [diff] [blame] | 44 | #include "llvm/Transforms/Utils/Local.h" |
Nico Weber | 432a388 | 2018-04-30 14:59:11 +0000 | [diff] [blame] | 45 | #include "llvm/Config/llvm-config.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 46 | #include "llvm/IR/BasicBlock.h" |
| 47 | #include "llvm/IR/Constant.h" |
| 48 | #include "llvm/IR/ConstantFolder.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 49 | #include "llvm/IR/Constants.h" |
Chandler Carruth | 12664a0 | 2014-03-06 00:22:06 +0000 | [diff] [blame] | 50 | #include "llvm/IR/DIBuilder.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 51 | #include "llvm/IR/DataLayout.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 52 | #include "llvm/IR/DebugInfoMetadata.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 53 | #include "llvm/IR/DerivedTypes.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 54 | #include "llvm/IR/Dominators.h" |
| 55 | #include "llvm/IR/Function.h" |
| 56 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 57 | #include "llvm/IR/GlobalAlias.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 58 | #include "llvm/IR/IRBuilder.h" |
Chandler Carruth | 7da14f1 | 2014-03-06 03:23:41 +0000 | [diff] [blame] | 59 | #include "llvm/IR/InstVisitor.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 60 | #include "llvm/IR/InstrTypes.h" |
| 61 | #include "llvm/IR/Instruction.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 62 | #include "llvm/IR/Instructions.h" |
| 63 | #include "llvm/IR/IntrinsicInst.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 64 | #include "llvm/IR/Intrinsics.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 65 | #include "llvm/IR/LLVMContext.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 66 | #include "llvm/IR/Metadata.h" |
| 67 | #include "llvm/IR/Module.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 68 | #include "llvm/IR/Operator.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 69 | #include "llvm/IR/PassManager.h" |
| 70 | #include "llvm/IR/Type.h" |
| 71 | #include "llvm/IR/Use.h" |
| 72 | #include "llvm/IR/User.h" |
| 73 | #include "llvm/IR/Value.h" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 74 | #include "llvm/Pass.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 75 | #include "llvm/Support/Casting.h" |
Chandler Carruth | 70b44c5 | 2012-09-15 11:43:14 +0000 | [diff] [blame] | 76 | #include "llvm/Support/CommandLine.h" |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 77 | #include "llvm/Support/Compiler.h" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 78 | #include "llvm/Support/Debug.h" |
| 79 | #include "llvm/Support/ErrorHandling.h" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 80 | #include "llvm/Support/MathExtras.h" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 81 | #include "llvm/Support/raw_ostream.h" |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 82 | #include "llvm/Transforms/Scalar.h" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 83 | #include "llvm/Transforms/Utils/PromoteMemToReg.h" |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 84 | #include <algorithm> |
| 85 | #include <cassert> |
| 86 | #include <chrono> |
| 87 | #include <cstddef> |
| 88 | #include <cstdint> |
| 89 | #include <cstring> |
| 90 | #include <iterator> |
| 91 | #include <string> |
| 92 | #include <tuple> |
| 93 | #include <utility> |
| 94 | #include <vector> |
Chandler Carruth | 83cee77 | 2014-02-25 03:59:29 +0000 | [diff] [blame] | 95 | |
Hal Finkel | 29f5131 | 2016-03-28 11:13:03 +0000 | [diff] [blame] | 96 | #ifndef NDEBUG |
| 97 | // We only use this for a debug check. |
Chandler Carruth | 83cee77 | 2014-02-25 03:59:29 +0000 | [diff] [blame] | 98 | #include <random> |
| 99 | #endif |
| 100 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 101 | using namespace llvm; |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 102 | using namespace llvm::sroa; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 103 | |
Chandler Carruth | 964daaa | 2014-04-22 02:55:47 +0000 | [diff] [blame] | 104 | #define DEBUG_TYPE "sroa" |
| 105 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 106 | STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); |
Chandler Carruth | 5f5b616 | 2013-03-20 06:30:46 +0000 | [diff] [blame] | 107 | STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 108 | STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); |
| 109 | STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); |
| 110 | STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); |
Chandler Carruth | 5f5b616 | 2013-03-20 06:30:46 +0000 | [diff] [blame] | 111 | STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); |
| 112 | STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 113 | STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); |
Chandler Carruth | 5f5b616 | 2013-03-20 06:30:46 +0000 | [diff] [blame] | 114 | STATISTIC(NumDeleted, "Number of instructions deleted"); |
| 115 | STATISTIC(NumVectorized, "Number of vectorized aggregates"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 116 | |
Chandler Carruth | 83cee77 | 2014-02-25 03:59:29 +0000 | [diff] [blame] | 117 | /// Hidden option to enable randomly shuffling the slices to help uncover |
| 118 | /// instability in their order. |
| 119 | static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", |
| 120 | cl::init(false), cl::Hidden); |
| 121 | |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 122 | /// Hidden option to experiment with completely strict handling of inbounds |
| 123 | /// GEPs. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 124 | static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), |
| 125 | cl::Hidden); |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 126 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 127 | namespace { |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 128 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 129 | /// A custom IRBuilder inserter which prefixes all names, but only in |
Mehdi Amini | 1e9c925 | 2016-03-11 17:15:34 +0000 | [diff] [blame] | 130 | /// Assert builds. |
Mehdi Amini | ba9fba8 | 2016-03-13 21:05:13 +0000 | [diff] [blame] | 131 | class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter { |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 132 | std::string Prefix; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 133 | |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 134 | const Twine getNameWithPrefix(const Twine &Name) const { |
| 135 | return Name.isTriviallyEmpty() ? Name : Prefix + Name; |
| 136 | } |
| 137 | |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 138 | public: |
| 139 | void SetNamePrefix(const Twine &P) { Prefix = P.str(); } |
| 140 | |
| 141 | protected: |
| 142 | void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, |
| 143 | BasicBlock::iterator InsertPt) const { |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 144 | IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, |
| 145 | InsertPt); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 146 | } |
| 147 | }; |
| 148 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 149 | /// Provide a type for IRBuilder that drops names in release builds. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 150 | using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 151 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 152 | /// A used slice of an alloca. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 153 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 154 | /// This structure represents a slice of an alloca used by some instruction. It |
| 155 | /// stores both the begin and end offsets of this use, a pointer to the use |
| 156 | /// itself, and a flag indicating whether we can classify the use as splittable |
| 157 | /// or not when forming partitions of the alloca. |
| 158 | class Slice { |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 159 | /// The beginning offset of the range. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 160 | uint64_t BeginOffset = 0; |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 161 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 162 | /// The ending offset, not included in the range. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 163 | uint64_t EndOffset = 0; |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 164 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 165 | /// Storage for both the use of this slice and whether it can be |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 166 | /// split. |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 167 | PointerIntPair<Use *, 1, bool> UseAndIsSplittable; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 168 | |
| 169 | public: |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 170 | Slice() = default; |
| 171 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 172 | Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 173 | : BeginOffset(BeginOffset), EndOffset(EndOffset), |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 174 | UseAndIsSplittable(U, IsSplittable) {} |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 175 | |
| 176 | uint64_t beginOffset() const { return BeginOffset; } |
| 177 | uint64_t endOffset() const { return EndOffset; } |
| 178 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 179 | bool isSplittable() const { return UseAndIsSplittable.getInt(); } |
| 180 | void makeUnsplittable() { UseAndIsSplittable.setInt(false); } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 181 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 182 | Use *getUse() const { return UseAndIsSplittable.getPointer(); } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 183 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 184 | bool isDead() const { return getUse() == nullptr; } |
| 185 | void kill() { UseAndIsSplittable.setPointer(nullptr); } |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 186 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 187 | /// Support for ordering ranges. |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 188 | /// |
| 189 | /// This provides an ordering over ranges such that start offsets are |
| 190 | /// always increasing, and within equal start offsets, the end offsets are |
| 191 | /// decreasing. Thus the spanning range comes first in a cluster with the |
| 192 | /// same start position. |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 193 | bool operator<(const Slice &RHS) const { |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 194 | if (beginOffset() < RHS.beginOffset()) |
| 195 | return true; |
| 196 | if (beginOffset() > RHS.beginOffset()) |
| 197 | return false; |
| 198 | if (isSplittable() != RHS.isSplittable()) |
| 199 | return !isSplittable(); |
| 200 | if (endOffset() > RHS.endOffset()) |
| 201 | return true; |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 202 | return false; |
| 203 | } |
| 204 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 205 | /// Support comparison with a single offset to allow binary searches. |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 206 | friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 207 | uint64_t RHSOffset) { |
| 208 | return LHS.beginOffset() < RHSOffset; |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 209 | } |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 210 | friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 211 | const Slice &RHS) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 212 | return LHSOffset < RHS.beginOffset(); |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 213 | } |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 214 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 215 | bool operator==(const Slice &RHS) const { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 216 | return isSplittable() == RHS.isSplittable() && |
| 217 | beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 218 | } |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 219 | bool operator!=(const Slice &RHS) const { return !operator==(RHS); } |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 220 | }; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 221 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 222 | } // end anonymous namespace |
Chandler Carruth | f74654d | 2013-03-18 08:36:46 +0000 | [diff] [blame] | 223 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 224 | /// Representation of the alloca slices. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 225 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 226 | /// This class represents the slices of an alloca which are formed by its |
| 227 | /// various uses. If a pointer escapes, we can't fully build a representation |
| 228 | /// for the slices used and we reflect that in this structure. The uses are |
| 229 | /// stored, sorted by increasing beginning offset and with unsplittable slices |
| 230 | /// starting at a particular offset before splittable slices. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 231 | class llvm::sroa::AllocaSlices { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 232 | public: |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 233 | /// Construct the slices of a particular alloca. |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 234 | AllocaSlices(const DataLayout &DL, AllocaInst &AI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 235 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 236 | /// Test whether a pointer to the allocation escapes our analysis. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 237 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 238 | /// If this is true, the slices are never fully built and should be |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 239 | /// ignored. |
| 240 | bool isEscaped() const { return PointerEscapingInstr; } |
| 241 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 242 | /// Support for iterating over the slices. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 243 | /// @{ |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 244 | using iterator = SmallVectorImpl<Slice>::iterator; |
| 245 | using range = iterator_range<iterator>; |
| 246 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 247 | iterator begin() { return Slices.begin(); } |
| 248 | iterator end() { return Slices.end(); } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 249 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 250 | using const_iterator = SmallVectorImpl<Slice>::const_iterator; |
| 251 | using const_range = iterator_range<const_iterator>; |
| 252 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 253 | const_iterator begin() const { return Slices.begin(); } |
| 254 | const_iterator end() const { return Slices.end(); } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 255 | /// @} |
| 256 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 257 | /// Erase a range of slices. |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 258 | void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 259 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 260 | /// Insert new slices for this alloca. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 261 | /// |
| 262 | /// This moves the slices into the alloca's slices collection, and re-sorts |
| 263 | /// everything so that the usual ordering properties of the alloca's slices |
| 264 | /// hold. |
| 265 | void insert(ArrayRef<Slice> NewSlices) { |
| 266 | int OldSize = Slices.size(); |
Benjamin Kramer | 4f6ac16 | 2015-02-28 10:11:12 +0000 | [diff] [blame] | 267 | Slices.append(NewSlices.begin(), NewSlices.end()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 268 | auto SliceI = Slices.begin() + OldSize; |
Mandeep Singh Grang | 636d94d | 2018-04-13 19:47:57 +0000 | [diff] [blame] | 269 | llvm::sort(SliceI, Slices.end()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 270 | std::inplace_merge(Slices.begin(), SliceI, Slices.end()); |
| 271 | } |
| 272 | |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 273 | // Forward declare the iterator and range accessor for walking the |
| 274 | // partitions. |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 275 | class partition_iterator; |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 276 | iterator_range<partition_iterator> partitions(); |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 277 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 278 | /// Access the dead users for this alloca. |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 279 | ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 280 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 281 | /// Access the dead operands referring to this alloca. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 282 | /// |
| 283 | /// These are operands which have cannot actually be used to refer to the |
| 284 | /// alloca as they are outside its range and the user doesn't correct for |
| 285 | /// that. These mostly consist of PHI node inputs and the like which we just |
| 286 | /// need to replace with undef. |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 287 | ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 288 | |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 289 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 290 | void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 291 | void printSlice(raw_ostream &OS, const_iterator I, |
| 292 | StringRef Indent = " ") const; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 293 | void printUse(raw_ostream &OS, const_iterator I, |
| 294 | StringRef Indent = " ") const; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 295 | void print(raw_ostream &OS) const; |
Alp Toker | f929e09 | 2014-01-04 22:47:48 +0000 | [diff] [blame] | 296 | void dump(const_iterator I) const; |
| 297 | void dump() const; |
Chandler Carruth | 25fb23d | 2012-09-14 10:18:51 +0000 | [diff] [blame] | 298 | #endif |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 299 | |
| 300 | private: |
| 301 | template <typename DerivedT, typename RetT = void> class BuilderBase; |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 302 | class SliceBuilder; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 303 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 304 | friend class AllocaSlices::SliceBuilder; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 305 | |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 306 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 307 | /// Handle to alloca instruction to simplify method interfaces. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 308 | AllocaInst &AI; |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 309 | #endif |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 310 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 311 | /// The instruction responsible for this alloca not having a known set |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 312 | /// of slices. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 313 | /// |
| 314 | /// When an instruction (potentially) escapes the pointer to the alloca, we |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 315 | /// store a pointer to that here and abort trying to form slices of the |
| 316 | /// alloca. This will be null if the alloca slices are analyzed successfully. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 317 | Instruction *PointerEscapingInstr; |
| 318 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 319 | /// The slices of the alloca. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 320 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 321 | /// We store a vector of the slices formed by uses of the alloca here. This |
| 322 | /// vector is sorted by increasing begin offset, and then the unsplittable |
| 323 | /// slices before the splittable ones. See the Slice inner class for more |
| 324 | /// details. |
| 325 | SmallVector<Slice, 8> Slices; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 326 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 327 | /// Instructions which will become dead if we rewrite the alloca. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 328 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 329 | /// Note that these are not separated by slice. This is because we expect an |
| 330 | /// alloca to be completely rewritten or not rewritten at all. If rewritten, |
| 331 | /// all these instructions can simply be removed and replaced with undef as |
| 332 | /// they come from outside of the allocated space. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 333 | SmallVector<Instruction *, 8> DeadUsers; |
| 334 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 335 | /// Operands which will become dead if we rewrite the alloca. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 336 | /// |
| 337 | /// These are operands that in their particular use can be replaced with |
| 338 | /// undef when we rewrite the alloca. These show up in out-of-bounds inputs |
| 339 | /// to PHI nodes and the like. They aren't entirely dead (there might be |
| 340 | /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we |
| 341 | /// want to swap this particular input for undef to simplify the use lists of |
| 342 | /// the alloca. |
| 343 | SmallVector<Use *, 8> DeadOperands; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 344 | }; |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 345 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 346 | /// A partition of the slices. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 347 | /// |
| 348 | /// An ephemeral representation for a range of slices which can be viewed as |
| 349 | /// a partition of the alloca. This range represents a span of the alloca's |
| 350 | /// memory which cannot be split, and provides access to all of the slices |
| 351 | /// overlapping some part of the partition. |
| 352 | /// |
| 353 | /// Objects of this type are produced by traversing the alloca's slices, but |
| 354 | /// are only ephemeral and not persistent. |
| 355 | class llvm::sroa::Partition { |
| 356 | private: |
| 357 | friend class AllocaSlices; |
| 358 | friend class AllocaSlices::partition_iterator; |
| 359 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 360 | using iterator = AllocaSlices::iterator; |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 361 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 362 | /// The beginning and ending offsets of the alloca for this |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 363 | /// partition. |
| 364 | uint64_t BeginOffset, EndOffset; |
| 365 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 366 | /// The start and end iterators of this partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 367 | iterator SI, SJ; |
| 368 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 369 | /// A collection of split slice tails overlapping the partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 370 | SmallVector<Slice *, 4> SplitTails; |
| 371 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 372 | /// Raw constructor builds an empty partition starting and ending at |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 373 | /// the given iterator. |
| 374 | Partition(iterator SI) : SI(SI), SJ(SI) {} |
| 375 | |
| 376 | public: |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 377 | /// The start offset of this partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 378 | /// |
| 379 | /// All of the contained slices start at or after this offset. |
| 380 | uint64_t beginOffset() const { return BeginOffset; } |
| 381 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 382 | /// The end offset of this partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 383 | /// |
| 384 | /// All of the contained slices end at or before this offset. |
| 385 | uint64_t endOffset() const { return EndOffset; } |
| 386 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 387 | /// The size of the partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 388 | /// |
| 389 | /// Note that this can never be zero. |
| 390 | uint64_t size() const { |
| 391 | assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); |
| 392 | return EndOffset - BeginOffset; |
| 393 | } |
| 394 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 395 | /// Test whether this partition contains no slices, and merely spans |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 396 | /// a region occupied by split slices. |
| 397 | bool empty() const { return SI == SJ; } |
| 398 | |
| 399 | /// \name Iterate slices that start within the partition. |
| 400 | /// These may be splittable or unsplittable. They have a begin offset >= the |
| 401 | /// partition begin offset. |
| 402 | /// @{ |
| 403 | // FIXME: We should probably define a "concat_iterator" helper and use that |
| 404 | // to stitch together pointee_iterators over the split tails and the |
| 405 | // contiguous iterators of the partition. That would give a much nicer |
| 406 | // interface here. We could then additionally expose filtered iterators for |
| 407 | // split, unsplit, and unsplittable splices based on the usage patterns. |
| 408 | iterator begin() const { return SI; } |
| 409 | iterator end() const { return SJ; } |
| 410 | /// @} |
| 411 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 412 | /// Get the sequence of split slice tails. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 413 | /// |
| 414 | /// These tails are of slices which start before this partition but are |
| 415 | /// split and overlap into the partition. We accumulate these while forming |
| 416 | /// partitions. |
| 417 | ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } |
| 418 | }; |
| 419 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 420 | /// An iterator over partitions of the alloca's slices. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 421 | /// |
| 422 | /// This iterator implements the core algorithm for partitioning the alloca's |
| 423 | /// slices. It is a forward iterator as we don't support backtracking for |
| 424 | /// efficiency reasons, and re-use a single storage area to maintain the |
| 425 | /// current set of split slices. |
| 426 | /// |
| 427 | /// It is templated on the slice iterator type to use so that it can operate |
| 428 | /// with either const or non-const slice iterators. |
| 429 | class AllocaSlices::partition_iterator |
| 430 | : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, |
| 431 | Partition> { |
| 432 | friend class AllocaSlices; |
| 433 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 434 | /// Most of the state for walking the partitions is held in a class |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 435 | /// with a nice interface for examining them. |
| 436 | Partition P; |
| 437 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 438 | /// We need to keep the end of the slices to know when to stop. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 439 | AllocaSlices::iterator SE; |
| 440 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 441 | /// We also need to keep track of the maximum split end offset seen. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 442 | /// FIXME: Do we really? |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 443 | uint64_t MaxSplitSliceEndOffset = 0; |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 444 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 445 | /// Sets the partition to be empty at given iterator, and sets the |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 446 | /// end iterator. |
| 447 | partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 448 | : P(SI), SE(SE) { |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 449 | // If not already at the end, advance our state to form the initial |
| 450 | // partition. |
| 451 | if (SI != SE) |
| 452 | advance(); |
| 453 | } |
| 454 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 455 | /// Advance the iterator to the next partition. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 456 | /// |
| 457 | /// Requires that the iterator not be at the end of the slices. |
| 458 | void advance() { |
| 459 | assert((P.SI != SE || !P.SplitTails.empty()) && |
| 460 | "Cannot advance past the end of the slices!"); |
| 461 | |
| 462 | // Clear out any split uses which have ended. |
| 463 | if (!P.SplitTails.empty()) { |
| 464 | if (P.EndOffset >= MaxSplitSliceEndOffset) { |
| 465 | // If we've finished all splits, this is easy. |
| 466 | P.SplitTails.clear(); |
| 467 | MaxSplitSliceEndOffset = 0; |
| 468 | } else { |
| 469 | // Remove the uses which have ended in the prior partition. This |
| 470 | // cannot change the max split slice end because we just checked that |
| 471 | // the prior partition ended prior to that max. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 472 | P.SplitTails.erase(llvm::remove_if(P.SplitTails, |
| 473 | [&](Slice *S) { |
| 474 | return S->endOffset() <= |
| 475 | P.EndOffset; |
| 476 | }), |
| 477 | P.SplitTails.end()); |
| 478 | assert(llvm::any_of(P.SplitTails, |
| 479 | [&](Slice *S) { |
| 480 | return S->endOffset() == MaxSplitSliceEndOffset; |
| 481 | }) && |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 482 | "Could not find the current max split slice offset!"); |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 483 | assert(llvm::all_of(P.SplitTails, |
| 484 | [&](Slice *S) { |
| 485 | return S->endOffset() <= MaxSplitSliceEndOffset; |
| 486 | }) && |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 487 | "Max split slice end offset is not actually the max!"); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | // If P.SI is already at the end, then we've cleared the split tail and |
| 492 | // now have an end iterator. |
| 493 | if (P.SI == SE) { |
| 494 | assert(P.SplitTails.empty() && "Failed to clear the split slices!"); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | // If we had a non-empty partition previously, set up the state for |
| 499 | // subsequent partitions. |
| 500 | if (P.SI != P.SJ) { |
| 501 | // Accumulate all the splittable slices which started in the old |
| 502 | // partition into the split list. |
| 503 | for (Slice &S : P) |
| 504 | if (S.isSplittable() && S.endOffset() > P.EndOffset) { |
| 505 | P.SplitTails.push_back(&S); |
| 506 | MaxSplitSliceEndOffset = |
| 507 | std::max(S.endOffset(), MaxSplitSliceEndOffset); |
| 508 | } |
| 509 | |
| 510 | // Start from the end of the previous partition. |
| 511 | P.SI = P.SJ; |
| 512 | |
| 513 | // If P.SI is now at the end, we at most have a tail of split slices. |
| 514 | if (P.SI == SE) { |
| 515 | P.BeginOffset = P.EndOffset; |
| 516 | P.EndOffset = MaxSplitSliceEndOffset; |
| 517 | return; |
| 518 | } |
| 519 | |
| 520 | // If the we have split slices and the next slice is after a gap and is |
| 521 | // not splittable immediately form an empty partition for the split |
| 522 | // slices up until the next slice begins. |
| 523 | if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && |
| 524 | !P.SI->isSplittable()) { |
| 525 | P.BeginOffset = P.EndOffset; |
| 526 | P.EndOffset = P.SI->beginOffset(); |
| 527 | return; |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | // OK, we need to consume new slices. Set the end offset based on the |
| 532 | // current slice, and step SJ past it. The beginning offset of the |
| 533 | // partition is the beginning offset of the next slice unless we have |
| 534 | // pre-existing split slices that are continuing, in which case we begin |
| 535 | // at the prior end offset. |
| 536 | P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; |
| 537 | P.EndOffset = P.SI->endOffset(); |
| 538 | ++P.SJ; |
| 539 | |
| 540 | // There are two strategies to form a partition based on whether the |
| 541 | // partition starts with an unsplittable slice or a splittable slice. |
| 542 | if (!P.SI->isSplittable()) { |
| 543 | // When we're forming an unsplittable region, it must always start at |
| 544 | // the first slice and will extend through its end. |
| 545 | assert(P.BeginOffset == P.SI->beginOffset()); |
| 546 | |
| 547 | // Form a partition including all of the overlapping slices with this |
| 548 | // unsplittable slice. |
| 549 | while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { |
| 550 | if (!P.SJ->isSplittable()) |
| 551 | P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); |
| 552 | ++P.SJ; |
| 553 | } |
| 554 | |
| 555 | // We have a partition across a set of overlapping unsplittable |
| 556 | // partitions. |
| 557 | return; |
| 558 | } |
| 559 | |
| 560 | // If we're starting with a splittable slice, then we need to form |
| 561 | // a synthetic partition spanning it and any other overlapping splittable |
| 562 | // splices. |
| 563 | assert(P.SI->isSplittable() && "Forming a splittable partition!"); |
| 564 | |
| 565 | // Collect all of the overlapping splittable slices. |
| 566 | while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && |
| 567 | P.SJ->isSplittable()) { |
| 568 | P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); |
| 569 | ++P.SJ; |
| 570 | } |
| 571 | |
| 572 | // Back upiP.EndOffset if we ended the span early when encountering an |
| 573 | // unsplittable slice. This synthesizes the early end offset of |
| 574 | // a partition spanning only splittable slices. |
| 575 | if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { |
| 576 | assert(!P.SJ->isSplittable()); |
| 577 | P.EndOffset = P.SJ->beginOffset(); |
| 578 | } |
| 579 | } |
| 580 | |
| 581 | public: |
| 582 | bool operator==(const partition_iterator &RHS) const { |
| 583 | assert(SE == RHS.SE && |
| 584 | "End iterators don't match between compared partition iterators!"); |
| 585 | |
| 586 | // The observed positions of partitions is marked by the P.SI iterator and |
| 587 | // the emptiness of the split slices. The latter is only relevant when |
| 588 | // P.SI == SE, as the end iterator will additionally have an empty split |
| 589 | // slices list, but the prior may have the same P.SI and a tail of split |
| 590 | // slices. |
| 591 | if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { |
| 592 | assert(P.SJ == RHS.P.SJ && |
| 593 | "Same set of slices formed two different sized partitions!"); |
| 594 | assert(P.SplitTails.size() == RHS.P.SplitTails.size() && |
| 595 | "Same slice position with differently sized non-empty split " |
| 596 | "slice tails!"); |
| 597 | return true; |
| 598 | } |
| 599 | return false; |
| 600 | } |
| 601 | |
| 602 | partition_iterator &operator++() { |
| 603 | advance(); |
| 604 | return *this; |
| 605 | } |
| 606 | |
| 607 | Partition &operator*() { return P; } |
| 608 | }; |
| 609 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 610 | /// A forward range over the partitions of the alloca's slices. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 611 | /// |
| 612 | /// This accesses an iterator range over the partitions of the alloca's |
| 613 | /// slices. It computes these partitions on the fly based on the overlapping |
| 614 | /// offsets of the slices and the ability to split them. It will visit "empty" |
| 615 | /// partitions to cover regions of the alloca only accessed via split |
| 616 | /// slices. |
| 617 | iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { |
| 618 | return make_range(partition_iterator(begin(), end()), |
| 619 | partition_iterator(end(), end())); |
Alexander Kornienko | f00654e | 2015-06-23 09:49:53 +0000 | [diff] [blame] | 620 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 621 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 622 | static Value *foldSelectInst(SelectInst &SI) { |
| 623 | // If the condition being selected on is a constant or the same value is |
| 624 | // being selected between, fold the select. Yes this does (rarely) happen |
| 625 | // early on. |
| 626 | if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 627 | return SI.getOperand(1 + CI->isZero()); |
Jakub Staszak | 3c6583a | 2013-02-19 22:14:45 +0000 | [diff] [blame] | 628 | if (SI.getOperand(1) == SI.getOperand(2)) |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 629 | return SI.getOperand(1); |
Jakub Staszak | 3c6583a | 2013-02-19 22:14:45 +0000 | [diff] [blame] | 630 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 631 | return nullptr; |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 632 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 633 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 634 | /// A helper that folds a PHI node or a select. |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 635 | static Value *foldPHINodeOrSelectInst(Instruction &I) { |
| 636 | if (PHINode *PN = dyn_cast<PHINode>(&I)) { |
| 637 | // If PN merges together the same value, return that value. |
| 638 | return PN->hasConstantValue(); |
| 639 | } |
| 640 | return foldSelectInst(cast<SelectInst>(I)); |
| 641 | } |
| 642 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 643 | /// Builder for the alloca slices. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 644 | /// |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 645 | /// This class builds a set of alloca slices by recursively visiting the uses |
| 646 | /// of an alloca and making a slice for each load and store at each offset. |
| 647 | class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { |
| 648 | friend class PtrUseVisitor<SliceBuilder>; |
| 649 | friend class InstVisitor<SliceBuilder>; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 650 | |
| 651 | using Base = PtrUseVisitor<SliceBuilder>; |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 652 | |
| 653 | const uint64_t AllocSize; |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 654 | AllocaSlices &AS; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 655 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 656 | SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 657 | SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; |
| 658 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 659 | /// Set to de-duplicate dead instructions found in the use walk. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 660 | SmallPtrSet<Instruction *, 4> VisitedDeadInsts; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 661 | |
| 662 | public: |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 663 | SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 664 | : PtrUseVisitor<SliceBuilder>(DL), |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 665 | AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 666 | |
| 667 | private: |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 668 | void markAsDead(Instruction &I) { |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 669 | if (VisitedDeadInsts.insert(&I).second) |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 670 | AS.DeadUsers.push_back(&I); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 671 | } |
| 672 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 673 | void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, |
Chandler Carruth | 9712117 | 2012-09-16 19:39:50 +0000 | [diff] [blame] | 674 | bool IsSplittable = false) { |
Chandler Carruth | f02b8bf | 2012-12-03 10:59:55 +0000 | [diff] [blame] | 675 | // Completely skip uses which have a zero size or start either before or |
| 676 | // past the end of the allocation. |
Chandler Carruth | 6aedc10 | 2014-02-26 03:14:14 +0000 | [diff] [blame] | 677 | if (Size == 0 || Offset.uge(AllocSize)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 678 | LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" |
| 679 | << Offset |
| 680 | << " which has zero size or starts outside of the " |
| 681 | << AllocSize << " byte alloca:\n" |
| 682 | << " alloca: " << AS.AI << "\n" |
| 683 | << " use: " << I << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 684 | return markAsDead(I); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 685 | } |
| 686 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 687 | uint64_t BeginOffset = Offset.getZExtValue(); |
| 688 | uint64_t EndOffset = BeginOffset + Size; |
Chandler Carruth | e7a1ba5 | 2012-09-23 11:43:14 +0000 | [diff] [blame] | 689 | |
| 690 | // Clamp the end offset to the end of the allocation. Note that this is |
| 691 | // formulated to handle even the case where "BeginOffset + Size" overflows. |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 692 | // This may appear superficially to be something we could ignore entirely, |
| 693 | // but that is not so! There may be widened loads or PHI-node uses where |
| 694 | // some instructions are dead but not others. We can't completely ignore |
| 695 | // them, and so have to record at least the information here. |
Chandler Carruth | e7a1ba5 | 2012-09-23 11:43:14 +0000 | [diff] [blame] | 696 | assert(AllocSize >= BeginOffset); // Established above. |
| 697 | if (Size > AllocSize - BeginOffset) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 698 | LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" |
| 699 | << Offset << " to remain within the " << AllocSize |
| 700 | << " byte alloca:\n" |
| 701 | << " alloca: " << AS.AI << "\n" |
| 702 | << " use: " << I << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 703 | EndOffset = AllocSize; |
| 704 | } |
| 705 | |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 706 | AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | void visitBitCastInst(BitCastInst &BC) { |
| 710 | if (BC.use_empty()) |
| 711 | return markAsDead(BC); |
| 712 | |
| 713 | return Base::visitBitCastInst(BC); |
| 714 | } |
| 715 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 716 | void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { |
| 717 | if (ASC.use_empty()) |
| 718 | return markAsDead(ASC); |
| 719 | |
| 720 | return Base::visitAddrSpaceCastInst(ASC); |
| 721 | } |
| 722 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 723 | void visitGetElementPtrInst(GetElementPtrInst &GEPI) { |
| 724 | if (GEPI.use_empty()) |
| 725 | return markAsDead(GEPI); |
| 726 | |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 727 | if (SROAStrictInbounds && GEPI.isInBounds()) { |
| 728 | // FIXME: This is a manually un-factored variant of the basic code inside |
| 729 | // of GEPs with checking of the inbounds invariant specified in the |
| 730 | // langref in a very strict sense. If we ever want to enable |
| 731 | // SROAStrictInbounds, this code should be factored cleanly into |
| 732 | // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds |
Hal Finkel | 5c83a09 | 2016-03-28 11:23:21 +0000 | [diff] [blame] | 733 | // by writing out the code here where we have the underlying allocation |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 734 | // size readily available. |
| 735 | APInt GEPOffset = Offset; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 736 | const DataLayout &DL = GEPI.getModule()->getDataLayout(); |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 737 | for (gep_type_iterator GTI = gep_type_begin(GEPI), |
| 738 | GTE = gep_type_end(GEPI); |
| 739 | GTI != GTE; ++GTI) { |
| 740 | ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); |
| 741 | if (!OpC) |
| 742 | break; |
| 743 | |
| 744 | // Handle a struct index, which adds its field offset to the pointer. |
Peter Collingbourne | ab85225b | 2016-12-02 02:24:42 +0000 | [diff] [blame] | 745 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 746 | unsigned ElementIdx = OpC->getZExtValue(); |
| 747 | const StructLayout *SL = DL.getStructLayout(STy); |
| 748 | GEPOffset += |
| 749 | APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); |
| 750 | } else { |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 751 | // For array or vector indices, scale the index by the size of the |
| 752 | // type. |
Chandler Carruth | 3b79b2a | 2014-02-25 21:24:45 +0000 | [diff] [blame] | 753 | APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); |
| 754 | GEPOffset += Index * APInt(Offset.getBitWidth(), |
| 755 | DL.getTypeAllocSize(GTI.getIndexedType())); |
| 756 | } |
| 757 | |
| 758 | // If this index has computed an intermediate pointer which is not |
| 759 | // inbounds, then the result of the GEP is a poison value and we can |
| 760 | // delete it and all uses. |
| 761 | if (GEPOffset.ugt(AllocSize)) |
| 762 | return markAsDead(GEPI); |
| 763 | } |
| 764 | } |
| 765 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 766 | return Base::visitGetElementPtrInst(GEPI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 767 | } |
| 768 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 769 | void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 770 | uint64_t Size, bool IsVolatile) { |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 771 | // We allow splitting of non-volatile loads and stores where the type is an |
| 772 | // integer type. These may be used to implement 'memcpy' or other "transfer |
| 773 | // of bits" patterns. |
| 774 | bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 775 | |
| 776 | insertUse(I, Offset, Size, IsSplittable); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 777 | } |
| 778 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 779 | void visitLoadInst(LoadInst &LI) { |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 780 | assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && |
| 781 | "All simple FCA loads should have been pre-split"); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 782 | |
| 783 | if (!IsOffsetKnown) |
| 784 | return PI.setAborted(&LI); |
| 785 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 786 | if (LI.isVolatile() && |
| 787 | LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) |
| 788 | return PI.setAborted(&LI); |
| 789 | |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 790 | uint64_t Size = DL.getTypeStoreSize(LI.getType()); |
| 791 | return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 792 | } |
| 793 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 794 | void visitStoreInst(StoreInst &SI) { |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 795 | Value *ValOp = SI.getValueOperand(); |
| 796 | if (ValOp == *U) |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 797 | return PI.setEscapedAndAborted(&SI); |
| 798 | if (!IsOffsetKnown) |
| 799 | return PI.setAborted(&SI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 800 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 801 | if (SI.isVolatile() && |
| 802 | SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) |
| 803 | return PI.setAborted(&SI); |
| 804 | |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 805 | uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); |
| 806 | |
| 807 | // If this memory access can be shown to *statically* extend outside the |
Hiroshi Inoue | 0909ca1 | 2018-01-26 08:15:29 +0000 | [diff] [blame] | 808 | // bounds of the allocation, it's behavior is undefined, so simply |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 809 | // ignore it. Note that this is more strict than the generic clamping |
| 810 | // behavior of insertUse. We also try to handle cases which might run the |
| 811 | // risk of overflow. |
| 812 | // FIXME: We should instead consider the pointer to have escaped if this |
| 813 | // function is being instrumented for addressing bugs or race conditions. |
Chandler Carruth | 6aedc10 | 2014-02-26 03:14:14 +0000 | [diff] [blame] | 814 | if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 815 | LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" |
| 816 | << Offset << " which extends past the end of the " |
| 817 | << AllocSize << " byte alloca:\n" |
| 818 | << " alloca: " << AS.AI << "\n" |
| 819 | << " use: " << SI << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 820 | return markAsDead(SI); |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 821 | } |
| 822 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 823 | assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && |
| 824 | "All simple FCA stores should have been pre-split"); |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 825 | handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 826 | } |
| 827 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 828 | void visitMemSetInst(MemSetInst &II) { |
Chandler Carruth | b0de6dd | 2012-09-14 10:26:34 +0000 | [diff] [blame] | 829 | assert(II.getRawDest() == *U && "Pointer use is not the destination?"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 830 | ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 831 | if ((Length && Length->getValue() == 0) || |
Chandler Carruth | 6aedc10 | 2014-02-26 03:14:14 +0000 | [diff] [blame] | 832 | (IsOffsetKnown && Offset.uge(AllocSize))) |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 833 | // Zero-length mem transfer intrinsics can be ignored entirely. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 834 | return markAsDead(II); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 835 | |
| 836 | if (!IsOffsetKnown) |
| 837 | return PI.setAborted(&II); |
| 838 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 839 | // Don't replace this with a store with a different address space. TODO: |
| 840 | // Use a store with the casted new alloca? |
| 841 | if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace()) |
| 842 | return PI.setAborted(&II); |
| 843 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 844 | insertUse(II, Offset, Length ? Length->getLimitedValue() |
| 845 | : AllocSize - Offset.getLimitedValue(), |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 846 | (bool)Length); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 847 | } |
| 848 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 849 | void visitMemTransferInst(MemTransferInst &II) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 850 | ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 851 | if (Length && Length->getValue() == 0) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 852 | // Zero-length mem transfer intrinsics can be ignored entirely. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 853 | return markAsDead(II); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 854 | |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 855 | // Because we can visit these intrinsics twice, also check to see if the |
| 856 | // first time marked this instruction as dead. If so, skip it. |
| 857 | if (VisitedDeadInsts.count(&II)) |
| 858 | return; |
| 859 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 860 | if (!IsOffsetKnown) |
| 861 | return PI.setAborted(&II); |
| 862 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 863 | // Don't replace this with a load/store with a different address space. |
| 864 | // TODO: Use a store with the casted new alloca? |
| 865 | if (II.isVolatile() && |
| 866 | (II.getDestAddressSpace() != DL.getAllocaAddrSpace() || |
| 867 | II.getSourceAddressSpace() != DL.getAllocaAddrSpace())) |
| 868 | return PI.setAborted(&II); |
| 869 | |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 870 | // This side of the transfer is completely out-of-bounds, and so we can |
| 871 | // nuke the entire transfer. However, we also need to nuke the other side |
| 872 | // if already added to our partitions. |
| 873 | // FIXME: Yet another place we really should bypass this when |
| 874 | // instrumenting for ASan. |
Chandler Carruth | 6aedc10 | 2014-02-26 03:14:14 +0000 | [diff] [blame] | 875 | if (Offset.uge(AllocSize)) { |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 876 | SmallDenseMap<Instruction *, unsigned>::iterator MTPI = |
| 877 | MemTransferSliceMap.find(&II); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 878 | if (MTPI != MemTransferSliceMap.end()) |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 879 | AS.Slices[MTPI->second].kill(); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 880 | return markAsDead(II); |
| 881 | } |
| 882 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 883 | uint64_t RawOffset = Offset.getLimitedValue(); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 884 | uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 885 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 886 | // Check for the special case where the same exact value is used for both |
| 887 | // source and dest. |
| 888 | if (*U == II.getRawDest() && *U == II.getRawSource()) { |
| 889 | // For non-volatile transfers this is a no-op. |
| 890 | if (!II.isVolatile()) |
| 891 | return markAsDead(II); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 892 | |
Nick Lewycky | 6ab9d93 | 2013-07-22 23:38:27 +0000 | [diff] [blame] | 893 | return insertUse(II, Offset, Size, /*IsSplittable=*/false); |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 894 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 895 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 896 | // If we have seen both source and destination for a mem transfer, then |
| 897 | // they both point to the same alloca. |
| 898 | bool Inserted; |
| 899 | SmallDenseMap<Instruction *, unsigned>::iterator MTPI; |
Benjamin Kramer | d6f1f84 | 2014-03-02 13:30:33 +0000 | [diff] [blame] | 900 | std::tie(MTPI, Inserted) = |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 901 | MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 902 | unsigned PrevIdx = MTPI->second; |
| 903 | if (!Inserted) { |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 904 | Slice &PrevP = AS.Slices[PrevIdx]; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 905 | |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 906 | // Check if the begin offsets match and this is a non-volatile transfer. |
| 907 | // In that case, we can completely elide the transfer. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 908 | if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { |
| 909 | PrevP.kill(); |
| 910 | return markAsDead(II); |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 911 | } |
| 912 | |
| 913 | // Otherwise we have an offset transfer within the same alloca. We can't |
| 914 | // split those. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 915 | PrevP.makeUnsplittable(); |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 916 | } |
| 917 | |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 918 | // Insert the use now that we've fixed up the splittable nature. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 919 | insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 920 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 921 | // Check that we ended up with a valid index in the map. |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 922 | assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 923 | "Map index doesn't point back to a slice with this user."); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | // Disable SRoA for any intrinsics except for lifetime invariants. |
Jakub Staszak | 086f6cd | 2013-02-19 22:02:21 +0000 | [diff] [blame] | 927 | // FIXME: What about debug intrinsics? This matches old behavior, but |
Chandler Carruth | 4b40e00 | 2012-09-14 10:26:36 +0000 | [diff] [blame] | 928 | // doesn't make sense. |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 929 | void visitIntrinsicInst(IntrinsicInst &II) { |
| 930 | if (!IsOffsetKnown) |
| 931 | return PI.setAborted(&II); |
| 932 | |
Vedant Kumar | b264d69 | 2018-12-21 21:49:40 +0000 | [diff] [blame] | 933 | if (II.isLifetimeStartOrEnd()) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 934 | ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 935 | uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), |
| 936 | Length->getLimitedValue()); |
Chandler Carruth | 9712117 | 2012-09-16 19:39:50 +0000 | [diff] [blame] | 937 | insertUse(II, Offset, Size, true); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 938 | return; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 939 | } |
| 940 | |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 941 | Base::visitIntrinsicInst(II); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 942 | } |
| 943 | |
| 944 | Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { |
| 945 | // We consider any PHI or select that results in a direct load or store of |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 946 | // the same offset to be a viable use for slicing purposes. These uses |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 947 | // are considered unsplittable and the size is the maximum loaded or stored |
| 948 | // size. |
| 949 | SmallPtrSet<Instruction *, 4> Visited; |
| 950 | SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; |
| 951 | Visited.insert(Root); |
| 952 | Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 953 | const DataLayout &DL = Root->getModule()->getDataLayout(); |
Chandler Carruth | 8b907e8 | 2012-09-25 10:03:40 +0000 | [diff] [blame] | 954 | // If there are no loads or stores, the access is dead. We mark that as |
| 955 | // a size zero access. |
| 956 | Size = 0; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 957 | do { |
| 958 | Instruction *I, *UsedI; |
Benjamin Kramer | d6f1f84 | 2014-03-02 13:30:33 +0000 | [diff] [blame] | 959 | std::tie(UsedI, I) = Uses.pop_back_val(); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 960 | |
| 961 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 962 | Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 963 | continue; |
| 964 | } |
| 965 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { |
| 966 | Value *Op = SI->getOperand(0); |
| 967 | if (Op == UsedI) |
| 968 | return SI; |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 969 | Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 970 | continue; |
| 971 | } |
| 972 | |
| 973 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { |
| 974 | if (!GEP->hasAllZeroIndices()) |
| 975 | return GEP; |
| 976 | } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 977 | !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 978 | return I; |
| 979 | } |
| 980 | |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 981 | for (User *U : I->users()) |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 982 | if (Visited.insert(cast<Instruction>(U)).second) |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 983 | Uses.push_back(std::make_pair(I, cast<Instruction>(U))); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 984 | } while (!Uses.empty()); |
| 985 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 986 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 987 | } |
| 988 | |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 989 | void visitPHINodeOrSelectInst(Instruction &I) { |
| 990 | assert(isa<PHINode>(I) || isa<SelectInst>(I)); |
| 991 | if (I.use_empty()) |
| 992 | return markAsDead(I); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 993 | |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 994 | // TODO: We could use SimplifyInstruction here to fold PHINodes and |
| 995 | // SelectInsts. However, doing so requires to change the current |
| 996 | // dead-operand-tracking mechanism. For instance, suppose neither loading |
| 997 | // from %U nor %other traps. Then "load (select undef, %U, %other)" does not |
| 998 | // trap either. However, if we simply replace %U with undef using the |
| 999 | // current dead-operand-tracking mechanism, "load (select undef, undef, |
| 1000 | // %other)" may trap because the select may return the first operand |
| 1001 | // "undef". |
| 1002 | if (Value *Result = foldPHINodeOrSelectInst(I)) { |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 1003 | if (Result == *U) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1004 | // If the result of the constant fold will be the pointer, recurse |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1005 | // through the PHI/select as if we had RAUW'ed it. |
| 1006 | enqueueUsers(I); |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 1007 | else |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1008 | // Otherwise the operand to the PHI/select is dead, and we can replace |
| 1009 | // it with undef. |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 1010 | AS.DeadOperands.push_back(U); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1011 | |
| 1012 | return; |
| 1013 | } |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1014 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1015 | if (!IsOffsetKnown) |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1016 | return PI.setAborted(&I); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1017 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1018 | // See if we already have computed info on this node. |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1019 | uint64_t &Size = PHIOrSelectSizes[&I]; |
| 1020 | if (!Size) { |
| 1021 | // This is a new PHI/Select, check for an unsafe use of it. |
| 1022 | if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1023 | return PI.setAborted(UnsafeI); |
| 1024 | } |
| 1025 | |
| 1026 | // For PHI and select operands outside the alloca, we can't nuke the entire |
| 1027 | // phi or select -- the other side might still be relevant, so we special |
| 1028 | // case them here and use a separate structure to track the operands |
| 1029 | // themselves which should be replaced with undef. |
| 1030 | // FIXME: This should instead be escaped in the event we're instrumenting |
| 1031 | // for address sanitization. |
Chandler Carruth | 6aedc10 | 2014-02-26 03:14:14 +0000 | [diff] [blame] | 1032 | if (Offset.uge(AllocSize)) { |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 1033 | AS.DeadOperands.push_back(U); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1034 | return; |
| 1035 | } |
| 1036 | |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1037 | insertUse(I, Offset, Size); |
| 1038 | } |
| 1039 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1040 | void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } |
Jingyue Wu | ec33fa9 | 2014-08-22 22:45:57 +0000 | [diff] [blame] | 1041 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1042 | void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1043 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1044 | /// Disable SROA entirely if there are unhandled users of the alloca. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1045 | void visitInstruction(Instruction &I) { PI.setAborted(&I); } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1046 | }; |
| 1047 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1048 | AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 1049 | : |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 1050 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 1051 | AI(AI), |
| 1052 | #endif |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1053 | PointerEscapingInstr(nullptr) { |
Nick Lewycky | c7776f7 | 2013-08-13 22:51:58 +0000 | [diff] [blame] | 1054 | SliceBuilder PB(DL, AI, *this); |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1055 | SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 1056 | if (PtrI.isEscaped() || PtrI.isAborted()) { |
| 1057 | // FIXME: We should sink the escape vs. abort info into the caller nicely, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1058 | // possibly by just storing the PtrInfo in the AllocaSlices. |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 1059 | PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() |
| 1060 | : PtrI.getAbortingInst(); |
| 1061 | assert(PointerEscapingInstr && "Did not track a bad instruction"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1062 | return; |
Chandler Carruth | e41e7b7 | 2012-12-10 08:28:39 +0000 | [diff] [blame] | 1063 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1064 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 1065 | Slices.erase( |
| 1066 | llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }), |
| 1067 | Slices.end()); |
Benjamin Kramer | 08e5070 | 2013-07-20 08:38:34 +0000 | [diff] [blame] | 1068 | |
Hal Finkel | 29f5131 | 2016-03-28 11:13:03 +0000 | [diff] [blame] | 1069 | #ifndef NDEBUG |
Chandler Carruth | 83cee77 | 2014-02-25 03:59:29 +0000 | [diff] [blame] | 1070 | if (SROARandomShuffleSlices) { |
Pavel Labath | c207bec | 2016-11-09 12:07:12 +0000 | [diff] [blame] | 1071 | std::mt19937 MT(static_cast<unsigned>( |
| 1072 | std::chrono::system_clock::now().time_since_epoch().count())); |
Chandler Carruth | 83cee77 | 2014-02-25 03:59:29 +0000 | [diff] [blame] | 1073 | std::shuffle(Slices.begin(), Slices.end(), MT); |
| 1074 | } |
| 1075 | #endif |
| 1076 | |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 1077 | // Sort the uses. This arranges for the offsets to be in ascending order, |
| 1078 | // and the sizes to be in descending order. |
Fangrui Song | 0cac726 | 2018-09-27 02:13:45 +0000 | [diff] [blame] | 1079 | llvm::sort(Slices); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1080 | } |
| 1081 | |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 1082 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Chandler Carruth | 25fb23d | 2012-09-14 10:18:51 +0000 | [diff] [blame] | 1083 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1084 | void AllocaSlices::print(raw_ostream &OS, const_iterator I, |
| 1085 | StringRef Indent) const { |
| 1086 | printSlice(OS, I, Indent); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 1087 | OS << "\n"; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1088 | printUse(OS, I, Indent); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1089 | } |
| 1090 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1091 | void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, |
| 1092 | StringRef Indent) const { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1093 | OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1094 | << " slice #" << (I - begin()) |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 1095 | << (I->isSplittable() ? " (splittable)" : ""); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1096 | } |
| 1097 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1098 | void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, |
| 1099 | StringRef Indent) const { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1100 | OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1101 | } |
| 1102 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1103 | void AllocaSlices::print(raw_ostream &OS) const { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1104 | if (PointerEscapingInstr) { |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1105 | OS << "Can't analyze slices for alloca: " << AI << "\n" |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1106 | << " A pointer to this alloca escaped by:\n" |
| 1107 | << " " << *PointerEscapingInstr << "\n"; |
| 1108 | return; |
| 1109 | } |
| 1110 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1111 | OS << "Slices of alloca: " << AI << "\n"; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1112 | for (const_iterator I = begin(), E = end(); I != E; ++I) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1113 | print(OS, I); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1114 | } |
| 1115 | |
Alp Toker | f929e09 | 2014-01-04 22:47:48 +0000 | [diff] [blame] | 1116 | LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { |
| 1117 | print(dbgs(), I); |
| 1118 | } |
| 1119 | LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1120 | |
Aaron Ballman | 615eb47 | 2017-10-15 14:32:27 +0000 | [diff] [blame] | 1121 | #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
Chandler Carruth | 25fb23d | 2012-09-14 10:18:51 +0000 | [diff] [blame] | 1122 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1123 | /// Walk the range of a partitioning looking for a common type to cover this |
| 1124 | /// sequence of slices. |
| 1125 | static Type *findCommonType(AllocaSlices::const_iterator B, |
| 1126 | AllocaSlices::const_iterator E, |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1127 | uint64_t EndOffset) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1128 | Type *Ty = nullptr; |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1129 | bool TyIsCommon = true; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1130 | IntegerType *ITy = nullptr; |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1131 | |
| 1132 | // Note that we need to look at *every* alloca slice's Use to ensure we |
| 1133 | // always get consistent results regardless of the order of slices. |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1134 | for (AllocaSlices::const_iterator I = B; I != E; ++I) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1135 | Use *U = I->getUse(); |
| 1136 | if (isa<IntrinsicInst>(*U->getUser())) |
| 1137 | continue; |
| 1138 | if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) |
| 1139 | continue; |
Chandler Carruth | 90c4a3a | 2012-10-05 01:29:06 +0000 | [diff] [blame] | 1140 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1141 | Type *UserTy = nullptr; |
Chandler Carruth | a126200 | 2013-11-19 09:03:18 +0000 | [diff] [blame] | 1142 | if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1143 | UserTy = LI->getType(); |
Chandler Carruth | a126200 | 2013-11-19 09:03:18 +0000 | [diff] [blame] | 1144 | } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1145 | UserTy = SI->getValueOperand()->getType(); |
Chandler Carruth | a126200 | 2013-11-19 09:03:18 +0000 | [diff] [blame] | 1146 | } |
Chandler Carruth | 90c4a3a | 2012-10-05 01:29:06 +0000 | [diff] [blame] | 1147 | |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1148 | if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1149 | // If the type is larger than the partition, skip it. We only encounter |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1150 | // this for split integer operations where we want to use the type of the |
Chandler Carruth | a126200 | 2013-11-19 09:03:18 +0000 | [diff] [blame] | 1151 | // entity causing the split. Also skip if the type is not a byte width |
| 1152 | // multiple. |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1153 | if (UserITy->getBitWidth() % 8 != 0 || |
| 1154 | UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1155 | continue; |
Chandler Carruth | 90c4a3a | 2012-10-05 01:29:06 +0000 | [diff] [blame] | 1156 | |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1157 | // Track the largest bitwidth integer type used in this way in case there |
| 1158 | // is no common type. |
| 1159 | if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) |
| 1160 | ITy = UserITy; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1161 | } |
Duncan P. N. Exon Smith | 73686d3 | 2014-06-17 00:19:35 +0000 | [diff] [blame] | 1162 | |
| 1163 | // To avoid depending on the order of slices, Ty and TyIsCommon must not |
| 1164 | // depend on types skipped above. |
| 1165 | if (!UserTy || (Ty && Ty != UserTy)) |
| 1166 | TyIsCommon = false; // Give up on anything but an iN type. |
| 1167 | else |
| 1168 | Ty = UserTy; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1169 | } |
Chandler Carruth | 4de3154 | 2014-01-21 23:16:05 +0000 | [diff] [blame] | 1170 | |
| 1171 | return TyIsCommon ? Ty : ITy; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1172 | } |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1173 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1174 | /// PHI instructions that use an alloca and are subsequently loaded can be |
| 1175 | /// rewritten to load both input pointers in the pred blocks and then PHI the |
| 1176 | /// results, allowing the load of the alloca to be promoted. |
| 1177 | /// From this: |
| 1178 | /// %P2 = phi [i32* %Alloca, i32* %Other] |
| 1179 | /// %V = load i32* %P2 |
| 1180 | /// to: |
| 1181 | /// %V1 = load i32* %Alloca -> will be mem2reg'd |
| 1182 | /// ... |
| 1183 | /// %V2 = load i32* %Other |
| 1184 | /// ... |
| 1185 | /// %V = phi [i32 %V1, i32 %V2] |
| 1186 | /// |
| 1187 | /// We can do this to a select if its only uses are loads and if the operands |
| 1188 | /// to the select can be loaded unconditionally. |
| 1189 | /// |
| 1190 | /// FIXME: This should be hoisted into a generic utility, likely in |
| 1191 | /// Transforms/Util/Local.h |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1192 | static bool isSafePHIToSpeculate(PHINode &PN) { |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1193 | const DataLayout &DL = PN.getModule()->getDataLayout(); |
| 1194 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1195 | // For now, we can only do this promotion if the load is in the same block |
| 1196 | // as the PHI, and if there are no stores between the phi and load. |
| 1197 | // TODO: Allow recursive phi users. |
| 1198 | // TODO: Allow stores. |
| 1199 | BasicBlock *BB = PN.getParent(); |
| 1200 | unsigned MaxAlign = 0; |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1201 | uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); |
| 1202 | APInt MaxSize(APWidth, 0); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1203 | bool HaveLoad = false; |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 1204 | for (User *U : PN.users()) { |
| 1205 | LoadInst *LI = dyn_cast<LoadInst>(U); |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1206 | if (!LI || !LI->isSimple()) |
Chandler Carruth | e74ff4c | 2013-07-15 10:30:19 +0000 | [diff] [blame] | 1207 | return false; |
Chandler Carruth | e74ff4c | 2013-07-15 10:30:19 +0000 | [diff] [blame] | 1208 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1209 | // For now we only allow loads in the same block as the PHI. This is |
| 1210 | // a common case that happens when instcombine merges two loads through |
| 1211 | // a PHI. |
| 1212 | if (LI->getParent() != BB) |
| 1213 | return false; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1214 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1215 | // Ensure that there are no instructions between the PHI and the load that |
| 1216 | // could store. |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 1217 | for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1218 | if (BBI->mayWriteToMemory()) |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1219 | return false; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1220 | |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1221 | uint64_t Size = DL.getTypeStoreSizeInBits(LI->getType()); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1222 | MaxAlign = std::max(MaxAlign, LI->getAlignment()); |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1223 | MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1224 | HaveLoad = true; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1225 | } |
| 1226 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1227 | if (!HaveLoad) |
| 1228 | return false; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1229 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1230 | // We can only transform this if it is safe to push the loads into the |
| 1231 | // predecessor blocks. The only thing to watch out for is that we can't put |
| 1232 | // a possibly trapping load in the predecessor if it is a critical edge. |
| 1233 | for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { |
Chandler Carruth | edb12a8 | 2018-10-15 10:04:59 +0000 | [diff] [blame] | 1234 | Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1235 | Value *InVal = PN.getIncomingValue(Idx); |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1236 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1237 | // If the value is produced by the terminator of the predecessor (an |
| 1238 | // invoke) or it has side-effects, there is no valid place to put a load |
| 1239 | // in the predecessor. |
| 1240 | if (TI == InVal || TI->mayHaveSideEffects()) |
| 1241 | return false; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1242 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1243 | // If the predecessor has a single successor, then the edge isn't |
| 1244 | // critical. |
| 1245 | if (TI->getNumSuccessors() == 1) |
| 1246 | continue; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1247 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1248 | // If this pointer is always safe to load, or if we can prove that there |
| 1249 | // is already a load in the block, then we can move the load to the pred |
| 1250 | // block. |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1251 | if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1252 | continue; |
| 1253 | |
| 1254 | return false; |
| 1255 | } |
| 1256 | |
| 1257 | return true; |
| 1258 | } |
| 1259 | |
| 1260 | static void speculatePHINodeLoads(PHINode &PN) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1261 | LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1262 | |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 1263 | LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); |
| 1264 | Type *LoadTy = SomeLoad->getType(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1265 | IRBuilderTy PHIBuilder(&PN); |
| 1266 | PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), |
| 1267 | PN.getName() + ".sroa.speculated"); |
| 1268 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1269 | // Get the AA tags and alignment to use from one of the loads. It doesn't |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1270 | // matter which one we get and if any differ. |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1271 | AAMDNodes AATags; |
| 1272 | SomeLoad->getAAMetadata(AATags); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1273 | unsigned Align = SomeLoad->getAlignment(); |
| 1274 | |
| 1275 | // Rewrite all loads of the PN to use the new PHI. |
| 1276 | while (!PN.use_empty()) { |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 1277 | LoadInst *LI = cast<LoadInst>(PN.user_back()); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1278 | LI->replaceAllUsesWith(NewPN); |
| 1279 | LI->eraseFromParent(); |
| 1280 | } |
| 1281 | |
| 1282 | // Inject loads into all of the pred blocks. |
Bjorn Pettersson | 81a76a3 | 2018-05-17 07:21:41 +0000 | [diff] [blame] | 1283 | DenseMap<BasicBlock*, Value*> InjectedLoads; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1284 | for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { |
| 1285 | BasicBlock *Pred = PN.getIncomingBlock(Idx); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1286 | Value *InVal = PN.getIncomingValue(Idx); |
Bjorn Pettersson | 81a76a3 | 2018-05-17 07:21:41 +0000 | [diff] [blame] | 1287 | |
| 1288 | // A PHI node is allowed to have multiple (duplicated) entries for the same |
| 1289 | // basic block, as long as the value is the same. So if we already injected |
| 1290 | // a load in the predecessor, then we should reuse the same load for all |
| 1291 | // duplicated entries. |
| 1292 | if (Value* V = InjectedLoads.lookup(Pred)) { |
| 1293 | NewPN->addIncoming(V, Pred); |
| 1294 | continue; |
| 1295 | } |
| 1296 | |
Chandler Carruth | edb12a8 | 2018-10-15 10:04:59 +0000 | [diff] [blame] | 1297 | Instruction *TI = Pred->getTerminator(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1298 | IRBuilderTy PredBuilder(TI); |
| 1299 | |
| 1300 | LoadInst *Load = PredBuilder.CreateLoad( |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 1301 | LoadTy, InVal, |
| 1302 | (PN.getName() + ".sroa.speculate.load." + Pred->getName())); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1303 | ++NumLoadsSpeculated; |
| 1304 | Load->setAlignment(Align); |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1305 | if (AATags) |
| 1306 | Load->setAAMetadata(AATags); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1307 | NewPN->addIncoming(Load, Pred); |
Bjorn Pettersson | 81a76a3 | 2018-05-17 07:21:41 +0000 | [diff] [blame] | 1308 | InjectedLoads[Pred] = Load; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1309 | } |
| 1310 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1311 | LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1312 | PN.eraseFromParent(); |
| 1313 | } |
| 1314 | |
| 1315 | /// Select instructions that use an alloca and are subsequently loaded can be |
| 1316 | /// rewritten to load both input pointers and then select between the result, |
| 1317 | /// allowing the load of the alloca to be promoted. |
| 1318 | /// From this: |
| 1319 | /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other |
| 1320 | /// %V = load i32* %P2 |
| 1321 | /// to: |
| 1322 | /// %V1 = load i32* %Alloca -> will be mem2reg'd |
| 1323 | /// %V2 = load i32* %Other |
| 1324 | /// %V = select i1 %cond, i32 %V1, i32 %V2 |
| 1325 | /// |
| 1326 | /// We can do this to a select if its only uses are loads and if the operand |
| 1327 | /// to the select can be loaded unconditionally. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1328 | static bool isSafeSelectToSpeculate(SelectInst &SI) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1329 | Value *TValue = SI.getTrueValue(); |
| 1330 | Value *FValue = SI.getFalseValue(); |
Artur Pilipenko | 9bb6bea | 2016-04-27 11:00:48 +0000 | [diff] [blame] | 1331 | const DataLayout &DL = SI.getModule()->getDataLayout(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1332 | |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 1333 | for (User *U : SI.users()) { |
| 1334 | LoadInst *LI = dyn_cast<LoadInst>(U); |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1335 | if (!LI || !LI->isSimple()) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1336 | return false; |
| 1337 | |
Hiroshi Inoue | b300824 | 2017-06-24 15:43:33 +0000 | [diff] [blame] | 1338 | // Both operands to the select need to be dereferenceable, either |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1339 | // absolutely (e.g. allocas) or at this point because we can see other |
| 1340 | // accesses to it. |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1341 | if (!isSafeToLoadUnconditionally(TValue, LI->getType(), LI->getAlignment(), |
| 1342 | DL, LI)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1343 | return false; |
Tim Northover | 60afa49 | 2019-07-09 11:35:35 +0000 | [diff] [blame^] | 1344 | if (!isSafeToLoadUnconditionally(FValue, LI->getType(), LI->getAlignment(), |
| 1345 | DL, LI)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1346 | return false; |
| 1347 | } |
| 1348 | |
| 1349 | return true; |
| 1350 | } |
| 1351 | |
| 1352 | static void speculateSelectInstLoads(SelectInst &SI) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1353 | LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1354 | |
| 1355 | IRBuilderTy IRB(&SI); |
| 1356 | Value *TV = SI.getTrueValue(); |
| 1357 | Value *FV = SI.getFalseValue(); |
| 1358 | // Replace the loads of the select with a select of two loads. |
| 1359 | while (!SI.use_empty()) { |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 1360 | LoadInst *LI = cast<LoadInst>(SI.user_back()); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1361 | assert(LI->isSimple() && "We only speculate simple loads"); |
| 1362 | |
| 1363 | IRB.SetInsertPoint(LI); |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 1364 | LoadInst *TL = IRB.CreateLoad(LI->getType(), TV, |
| 1365 | LI->getName() + ".sroa.speculate.load.true"); |
| 1366 | LoadInst *FL = IRB.CreateLoad(LI->getType(), FV, |
| 1367 | LI->getName() + ".sroa.speculate.load.false"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1368 | NumLoadsSpeculated += 2; |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1369 | |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1370 | // Transfer alignment and AA info if present. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1371 | TL->setAlignment(LI->getAlignment()); |
| 1372 | FL->setAlignment(LI->getAlignment()); |
Hal Finkel | cc39b67 | 2014-07-24 12:16:19 +0000 | [diff] [blame] | 1373 | |
| 1374 | AAMDNodes Tags; |
| 1375 | LI->getAAMetadata(Tags); |
| 1376 | if (Tags) { |
| 1377 | TL->setAAMetadata(Tags); |
| 1378 | FL->setAAMetadata(Tags); |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1379 | } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1380 | |
| 1381 | Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, |
| 1382 | LI->getName() + ".sroa.speculated"); |
| 1383 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1384 | LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1385 | LI->replaceAllUsesWith(V); |
| 1386 | LI->eraseFromParent(); |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 1387 | } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1388 | SI.eraseFromParent(); |
Chandler Carruth | 90c4a3a | 2012-10-05 01:29:06 +0000 | [diff] [blame] | 1389 | } |
| 1390 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1391 | /// Build a GEP out of a base pointer and indices. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1392 | /// |
| 1393 | /// This will return the BasePtr if that is valid, or build a new GEP |
| 1394 | /// instruction using the IRBuilder if GEP-ing is needed. |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 1395 | static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 1396 | SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1397 | if (Indices.empty()) |
| 1398 | return BasePtr; |
| 1399 | |
| 1400 | // A single zero index is a no-op, so check for this and avoid building a GEP |
| 1401 | // in that case. |
| 1402 | if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) |
| 1403 | return BasePtr; |
| 1404 | |
James Y Knight | 7716075 | 2019-02-01 20:44:47 +0000 | [diff] [blame] | 1405 | return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(), |
| 1406 | BasePtr, Indices, NamePrefix + "sroa_idx"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1407 | } |
| 1408 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1409 | /// Get a natural GEP off of the BasePtr walking through Ty toward |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1410 | /// TargetTy without changing the offset of the pointer. |
| 1411 | /// |
| 1412 | /// This routine assumes we've already established a properly offset GEP with |
| 1413 | /// Indices, and arrived at the Ty type. The goal is to continue to GEP with |
| 1414 | /// zero-indices down through type layers until we find one the same as |
| 1415 | /// TargetTy. If we can't find one with the same type, we at least try to use |
| 1416 | /// one with the same size. If none of that works, we just produce the GEP as |
| 1417 | /// indicated by Indices to have the correct offset. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1418 | static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1419 | Value *BasePtr, Type *Ty, Type *TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1420 | SmallVectorImpl<Value *> &Indices, |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 1421 | Twine NamePrefix) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1422 | if (Ty == TargetTy) |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1423 | return buildGEP(IRB, BasePtr, Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1424 | |
Nicola Zaghen | f96383c | 2018-10-30 11:15:04 +0000 | [diff] [blame] | 1425 | // Offset size to use for the indices. |
| 1426 | unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); |
Chandler Carruth | dfb2efd | 2014-02-26 10:08:16 +0000 | [diff] [blame] | 1427 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1428 | // See if we can descend into a struct and locate a field with the correct |
| 1429 | // type. |
| 1430 | unsigned NumLayers = 0; |
| 1431 | Type *ElementTy = Ty; |
| 1432 | do { |
| 1433 | if (ElementTy->isPointerTy()) |
| 1434 | break; |
Chandler Carruth | dfb2efd | 2014-02-26 10:08:16 +0000 | [diff] [blame] | 1435 | |
| 1436 | if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { |
| 1437 | ElementTy = ArrayTy->getElementType(); |
Nicola Zaghen | f96383c | 2018-10-30 11:15:04 +0000 | [diff] [blame] | 1438 | Indices.push_back(IRB.getIntN(OffsetSize, 0)); |
Chandler Carruth | dfb2efd | 2014-02-26 10:08:16 +0000 | [diff] [blame] | 1439 | } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { |
| 1440 | ElementTy = VectorTy->getElementType(); |
| 1441 | Indices.push_back(IRB.getInt32(0)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1442 | } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { |
Chandler Carruth | 503eb2b | 2012-10-09 01:58:35 +0000 | [diff] [blame] | 1443 | if (STy->element_begin() == STy->element_end()) |
| 1444 | break; // Nothing left to descend into. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1445 | ElementTy = *STy->element_begin(); |
| 1446 | Indices.push_back(IRB.getInt32(0)); |
| 1447 | } else { |
| 1448 | break; |
| 1449 | } |
| 1450 | ++NumLayers; |
| 1451 | } while (ElementTy != TargetTy); |
| 1452 | if (ElementTy != TargetTy) |
| 1453 | Indices.erase(Indices.end() - NumLayers, Indices.end()); |
| 1454 | |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1455 | return buildGEP(IRB, BasePtr, Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1456 | } |
| 1457 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1458 | /// Recursively compute indices for a natural GEP. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1459 | /// |
| 1460 | /// This is the recursive step for getNaturalGEPWithOffset that walks down the |
| 1461 | /// element types adding appropriate indices for the GEP. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1462 | static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1463 | Value *Ptr, Type *Ty, APInt &Offset, |
| 1464 | Type *TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1465 | SmallVectorImpl<Value *> &Indices, |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 1466 | Twine NamePrefix) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1467 | if (Offset == 0) |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1468 | return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, |
| 1469 | NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1470 | |
| 1471 | // We can't recurse through pointer types. |
| 1472 | if (Ty->isPointerTy()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1473 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1474 | |
Chandler Carruth | dd3cea8 | 2012-09-14 10:30:40 +0000 | [diff] [blame] | 1475 | // We try to analyze GEPs over vectors here, but note that these GEPs are |
| 1476 | // extremely poorly defined currently. The long-term goal is to remove GEPing |
| 1477 | // over a vector from the IR completely. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1478 | if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1479 | unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1480 | if (ElementSizeInBits % 8 != 0) { |
| 1481 | // GEPs over non-multiple of 8 size vector elements are invalid. |
| 1482 | return nullptr; |
| 1483 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1484 | APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); |
Chandler Carruth | 6fab42a | 2012-10-17 09:23:48 +0000 | [diff] [blame] | 1485 | APInt NumSkippedElements = Offset.sdiv(ElementSize); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1486 | if (NumSkippedElements.ugt(VecTy->getNumElements())) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1487 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1488 | Offset -= NumSkippedElements * ElementSize; |
| 1489 | Indices.push_back(IRB.getInt(NumSkippedElements)); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1490 | return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1491 | Offset, TargetTy, Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1492 | } |
| 1493 | |
| 1494 | if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { |
| 1495 | Type *ElementTy = ArrTy->getElementType(); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1496 | APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); |
Chandler Carruth | 6fab42a | 2012-10-17 09:23:48 +0000 | [diff] [blame] | 1497 | APInt NumSkippedElements = Offset.sdiv(ElementSize); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1498 | if (NumSkippedElements.ugt(ArrTy->getNumElements())) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1499 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1500 | |
| 1501 | Offset -= NumSkippedElements * ElementSize; |
| 1502 | Indices.push_back(IRB.getInt(NumSkippedElements)); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1503 | return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1504 | Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1505 | } |
| 1506 | |
| 1507 | StructType *STy = dyn_cast<StructType>(Ty); |
| 1508 | if (!STy) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1509 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1510 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1511 | const StructLayout *SL = DL.getStructLayout(STy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1512 | uint64_t StructOffset = Offset.getZExtValue(); |
Chandler Carruth | cabd96c | 2012-09-14 10:30:42 +0000 | [diff] [blame] | 1513 | if (StructOffset >= SL->getSizeInBytes()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1514 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1515 | unsigned Index = SL->getElementContainingOffset(StructOffset); |
| 1516 | Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); |
| 1517 | Type *ElementTy = STy->getElementType(Index); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1518 | if (Offset.uge(DL.getTypeAllocSize(ElementTy))) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1519 | return nullptr; // The offset points into alignment padding. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1520 | |
| 1521 | Indices.push_back(IRB.getInt32(Index)); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1522 | return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1523 | Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1524 | } |
| 1525 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1526 | /// Get a natural GEP from a base pointer to a particular offset and |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1527 | /// resulting in a particular type. |
| 1528 | /// |
| 1529 | /// The goal is to produce a "natural" looking GEP that works with the existing |
| 1530 | /// composite types to arrive at the appropriate offset and element type for |
| 1531 | /// a pointer. TargetTy is the element type the returned GEP should point-to if |
| 1532 | /// possible. We recurse by decreasing Offset, adding the appropriate index to |
| 1533 | /// Indices, and setting Ty to the result subtype. |
| 1534 | /// |
Chandler Carruth | 93a21e7 | 2012-09-14 10:18:49 +0000 | [diff] [blame] | 1535 | /// If no natural GEP can be constructed, this function returns null. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1536 | static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1537 | Value *Ptr, APInt Offset, Type *TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1538 | SmallVectorImpl<Value *> &Indices, |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 1539 | Twine NamePrefix) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1540 | PointerType *Ty = cast<PointerType>(Ptr->getType()); |
| 1541 | |
| 1542 | // Don't consider any GEPs through an i8* as natural unless the TargetTy is |
| 1543 | // an i8. |
Chandler Carruth | 286d87e | 2014-02-26 08:25:02 +0000 | [diff] [blame] | 1544 | if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1545 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1546 | |
| 1547 | Type *ElementTy = Ty->getElementType(); |
Chandler Carruth | 3f882d4 | 2012-09-18 22:37:19 +0000 | [diff] [blame] | 1548 | if (!ElementTy->isSized()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1549 | return nullptr; // We can't GEP through an unsized element. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1550 | APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1551 | if (ElementSize == 0) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1552 | return nullptr; // Zero-length arrays can't help us build a natural GEP. |
Chandler Carruth | 6fab42a | 2012-10-17 09:23:48 +0000 | [diff] [blame] | 1553 | APInt NumSkippedElements = Offset.sdiv(ElementSize); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1554 | |
| 1555 | Offset -= NumSkippedElements * ElementSize; |
| 1556 | Indices.push_back(IRB.getInt(NumSkippedElements)); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1557 | return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1558 | Indices, NamePrefix); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1559 | } |
| 1560 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1561 | /// Compute an adjusted pointer from Ptr by Offset bytes where the |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1562 | /// resulting pointer has PointerTy. |
| 1563 | /// |
| 1564 | /// This tries very hard to compute a "natural" GEP which arrives at the offset |
| 1565 | /// and produces the pointer type desired. Where it cannot, it will try to use |
| 1566 | /// the natural GEP to arrive at the offset and bitcast to the type. Where that |
| 1567 | /// fails, it will try to use an existing i8* and GEP to the byte offset and |
| 1568 | /// bitcast to the type. |
| 1569 | /// |
| 1570 | /// The strategy for finding the more natural GEPs is to peel off layers of the |
| 1571 | /// pointer, walking back through bit casts and GEPs, searching for a base |
| 1572 | /// pointer from which we can compute a natural GEP with the desired |
Jakub Staszak | 086f6cd | 2013-02-19 22:02:21 +0000 | [diff] [blame] | 1573 | /// properties. The algorithm tries to fold as many constant indices into |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1574 | /// a single GEP as possible, thus making each GEP more independent of the |
| 1575 | /// surrounding code. |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1576 | static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, |
Zachary Turner | 41a9ee9 | 2017-10-11 23:54:34 +0000 | [diff] [blame] | 1577 | APInt Offset, Type *PointerTy, Twine NamePrefix) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1578 | // Even though we don't look through PHI nodes, we could be called on an |
| 1579 | // instruction in an unreachable block, which may be on a cycle. |
| 1580 | SmallPtrSet<Value *, 4> Visited; |
| 1581 | Visited.insert(Ptr); |
| 1582 | SmallVector<Value *, 4> Indices; |
| 1583 | |
| 1584 | // We may end up computing an offset pointer that has the wrong type. If we |
| 1585 | // never are able to compute one directly that has the correct type, we'll |
Chandler Carruth | 5986b54 | 2015-01-02 02:47:38 +0000 | [diff] [blame] | 1586 | // fall back to it, so keep it and the base it was computed from around here. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1587 | Value *OffsetPtr = nullptr; |
Chandler Carruth | 5986b54 | 2015-01-02 02:47:38 +0000 | [diff] [blame] | 1588 | Value *OffsetBasePtr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1589 | |
| 1590 | // Remember any i8 pointer we come across to re-use if we need to do a raw |
| 1591 | // byte offset. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1592 | Value *Int8Ptr = nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1593 | APInt Int8PtrOffset(Offset.getBitWidth(), 0); |
| 1594 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 1595 | PointerType *TargetPtrTy = cast<PointerType>(PointerTy); |
| 1596 | Type *TargetTy = TargetPtrTy->getElementType(); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1597 | |
Michael Liao | 4f7f70e | 2019-06-18 21:41:13 +0000 | [diff] [blame] | 1598 | // As `addrspacecast` is , `Ptr` (the storage pointer) may have different |
| 1599 | // address space from the expected `PointerTy` (the pointer to be used). |
| 1600 | // Adjust the pointer type based the original storage pointer. |
| 1601 | auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); |
| 1602 | PointerTy = TargetTy->getPointerTo(AS); |
| 1603 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1604 | do { |
| 1605 | // First fold any existing GEPs into the offset. |
| 1606 | while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { |
| 1607 | APInt GEPOffset(Offset.getBitWidth(), 0); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1608 | if (!GEP->accumulateConstantOffset(DL, GEPOffset)) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1609 | break; |
| 1610 | Offset += GEPOffset; |
| 1611 | Ptr = GEP->getPointerOperand(); |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 1612 | if (!Visited.insert(Ptr).second) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1613 | break; |
| 1614 | } |
| 1615 | |
| 1616 | // See if we can perform a natural GEP here. |
| 1617 | Indices.clear(); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 1618 | if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 1619 | Indices, NamePrefix)) { |
Chandler Carruth | 5986b54 | 2015-01-02 02:47:38 +0000 | [diff] [blame] | 1620 | // If we have a new natural pointer at the offset, clear out any old |
| 1621 | // offset pointer we computed. Unless it is the base pointer or |
| 1622 | // a non-instruction, we built a GEP we don't need. Zap it. |
| 1623 | if (OffsetPtr && OffsetPtr != OffsetBasePtr) |
| 1624 | if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { |
| 1625 | assert(I->use_empty() && "Built a GEP with uses some how!"); |
| 1626 | I->eraseFromParent(); |
| 1627 | } |
| 1628 | OffsetPtr = P; |
| 1629 | OffsetBasePtr = Ptr; |
| 1630 | // If we also found a pointer of the right type, we're done. |
| 1631 | if (P->getType() == PointerTy) |
Michael Liao | 4f7f70e | 2019-06-18 21:41:13 +0000 | [diff] [blame] | 1632 | break; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1633 | } |
| 1634 | |
| 1635 | // Stash this pointer if we've found an i8*. |
| 1636 | if (Ptr->getType()->isIntegerTy(8)) { |
| 1637 | Int8Ptr = Ptr; |
| 1638 | Int8PtrOffset = Offset; |
| 1639 | } |
| 1640 | |
| 1641 | // Peel off a layer of the pointer and update the offset appropriately. |
| 1642 | if (Operator::getOpcode(Ptr) == Instruction::BitCast) { |
| 1643 | Ptr = cast<Operator>(Ptr)->getOperand(0); |
| 1644 | } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { |
Sanjoy Das | 5ce3272 | 2016-04-08 00:48:30 +0000 | [diff] [blame] | 1645 | if (GA->isInterposable()) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1646 | break; |
| 1647 | Ptr = GA->getAliasee(); |
| 1648 | } else { |
| 1649 | break; |
| 1650 | } |
| 1651 | assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 1652 | } while (Visited.insert(Ptr).second); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1653 | |
| 1654 | if (!OffsetPtr) { |
| 1655 | if (!Int8Ptr) { |
Chandler Carruth | 286d87e | 2014-02-26 08:25:02 +0000 | [diff] [blame] | 1656 | Int8Ptr = IRB.CreateBitCast( |
| 1657 | Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), |
| 1658 | NamePrefix + "sroa_raw_cast"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1659 | Int8PtrOffset = Offset; |
| 1660 | } |
| 1661 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1662 | OffsetPtr = Int8PtrOffset == 0 |
| 1663 | ? Int8Ptr |
David Blaikie | aa41cd5 | 2015-04-03 21:33:42 +0000 | [diff] [blame] | 1664 | : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, |
| 1665 | IRB.getInt(Int8PtrOffset), |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 1666 | NamePrefix + "sroa_raw_idx"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1667 | } |
| 1668 | Ptr = OffsetPtr; |
| 1669 | |
| 1670 | // On the off chance we were targeting i8*, guard the bitcast here. |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 1671 | if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) { |
| 1672 | Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, |
| 1673 | TargetPtrTy, |
| 1674 | NamePrefix + "sroa_cast"); |
| 1675 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1676 | |
| 1677 | return Ptr; |
| 1678 | } |
| 1679 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1680 | /// Compute the adjusted alignment for a load or store from an offset. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 1681 | static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, |
| 1682 | const DataLayout &DL) { |
| 1683 | unsigned Alignment; |
| 1684 | Type *Ty; |
| 1685 | if (auto *LI = dyn_cast<LoadInst>(I)) { |
| 1686 | Alignment = LI->getAlignment(); |
| 1687 | Ty = LI->getType(); |
| 1688 | } else if (auto *SI = dyn_cast<StoreInst>(I)) { |
| 1689 | Alignment = SI->getAlignment(); |
| 1690 | Ty = SI->getValueOperand()->getType(); |
| 1691 | } else { |
| 1692 | llvm_unreachable("Only loads and stores are allowed!"); |
| 1693 | } |
| 1694 | |
| 1695 | if (!Alignment) |
| 1696 | Alignment = DL.getABITypeAlignment(Ty); |
| 1697 | |
| 1698 | return MinAlign(Alignment, Offset); |
| 1699 | } |
| 1700 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1701 | /// Test whether we can convert a value from the old to the new type. |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1702 | /// |
| 1703 | /// This predicate should be used to guard calls to convertValue in order to |
| 1704 | /// ensure that we only try to convert viable values. The strategy is that we |
| 1705 | /// will peel off single element struct and array wrappings to get to an |
| 1706 | /// underlying value, and convert that value. |
| 1707 | static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { |
| 1708 | if (OldTy == NewTy) |
| 1709 | return true; |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 1710 | |
| 1711 | // For integer types, we can't handle any bit-width differences. This would |
| 1712 | // break both vector conversions with extension and introduce endianness |
| 1713 | // issues when in conjunction with loads and stores. |
| 1714 | if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { |
| 1715 | assert(cast<IntegerType>(OldTy)->getBitWidth() != |
| 1716 | cast<IntegerType>(NewTy)->getBitWidth() && |
| 1717 | "We can't have the same bitwidth for different int types"); |
| 1718 | return false; |
| 1719 | } |
| 1720 | |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1721 | if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) |
| 1722 | return false; |
| 1723 | if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) |
| 1724 | return false; |
| 1725 | |
Benjamin Kramer | 5626259 | 2013-09-22 11:24:58 +0000 | [diff] [blame] | 1726 | // We can convert pointers to integers and vice-versa. Same for vectors |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1727 | // of pointers and integers. |
| 1728 | OldTy = OldTy->getScalarType(); |
| 1729 | NewTy = NewTy->getScalarType(); |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1730 | if (NewTy->isPointerTy() || OldTy->isPointerTy()) { |
Jack Liu | f101c0f | 2016-05-03 19:30:48 +0000 | [diff] [blame] | 1731 | if (NewTy->isPointerTy() && OldTy->isPointerTy()) { |
| 1732 | return cast<PointerType>(NewTy)->getPointerAddressSpace() == |
| 1733 | cast<PointerType>(OldTy)->getPointerAddressSpace(); |
| 1734 | } |
Sanjoy Das | b70ddd8 | 2017-06-17 20:28:13 +0000 | [diff] [blame] | 1735 | |
| 1736 | // We can convert integers to integral pointers, but not to non-integral |
| 1737 | // pointers. |
| 1738 | if (OldTy->isIntegerTy()) |
| 1739 | return !DL.isNonIntegralPointerType(NewTy); |
| 1740 | |
| 1741 | // We can convert integral pointers to integers, but non-integral pointers |
| 1742 | // need to remain pointers. |
| 1743 | if (!DL.isNonIntegralPointerType(OldTy)) |
| 1744 | return NewTy->isIntegerTy(); |
| 1745 | |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1746 | return false; |
| 1747 | } |
| 1748 | |
| 1749 | return true; |
| 1750 | } |
| 1751 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1752 | /// Generic routine to convert an SSA value to a value of a different |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1753 | /// type. |
| 1754 | /// |
| 1755 | /// This will try various different casting techniques, such as bitcasts, |
| 1756 | /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test |
| 1757 | /// two types for viability with this routine. |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 1758 | static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1759 | Type *NewTy) { |
| 1760 | Type *OldTy = V->getType(); |
| 1761 | assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); |
| 1762 | |
| 1763 | if (OldTy == NewTy) |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1764 | return V; |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1765 | |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 1766 | assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && |
| 1767 | "Integer types must be the exact same to convert."); |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1768 | |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1769 | // See if we need inttoptr for this type pair. A cast involving both scalars |
| 1770 | // and vectors requires and additional bitcast. |
Craig Topper | 95d2347 | 2017-07-09 07:04:00 +0000 | [diff] [blame] | 1771 | if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1772 | // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* |
| 1773 | if (OldTy->isVectorTy() && !NewTy->isVectorTy()) |
| 1774 | return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), |
| 1775 | NewTy); |
| 1776 | |
| 1777 | // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> |
| 1778 | if (!OldTy->isVectorTy() && NewTy->isVectorTy()) |
| 1779 | return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), |
| 1780 | NewTy); |
| 1781 | |
| 1782 | return IRB.CreateIntToPtr(V, NewTy); |
| 1783 | } |
| 1784 | |
| 1785 | // See if we need ptrtoint for this type pair. A cast involving both scalars |
| 1786 | // and vectors requires and additional bitcast. |
Craig Topper | 95d2347 | 2017-07-09 07:04:00 +0000 | [diff] [blame] | 1787 | if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { |
Benjamin Kramer | 90901a3 | 2013-09-21 20:36:04 +0000 | [diff] [blame] | 1788 | // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 |
| 1789 | if (OldTy->isVectorTy() && !NewTy->isVectorTy()) |
| 1790 | return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), |
| 1791 | NewTy); |
| 1792 | |
| 1793 | // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> |
| 1794 | if (!OldTy->isVectorTy() && NewTy->isVectorTy()) |
| 1795 | return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), |
| 1796 | NewTy); |
| 1797 | |
| 1798 | return IRB.CreatePtrToInt(V, NewTy); |
| 1799 | } |
| 1800 | |
| 1801 | return IRB.CreateBitCast(V, NewTy); |
Chandler Carruth | aa6afbb | 2012-10-15 08:40:22 +0000 | [diff] [blame] | 1802 | } |
| 1803 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1804 | /// Test whether the given slice use can be promoted to a vector. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1805 | /// |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 1806 | /// This function is called to test each entry in a partition which is slated |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1807 | /// for a single slice. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 1808 | static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, |
| 1809 | VectorType *Ty, |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1810 | uint64_t ElementSize, |
| 1811 | const DataLayout &DL) { |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1812 | // First validate the slice offsets. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1813 | uint64_t BeginOffset = |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1814 | std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1815 | uint64_t BeginIndex = BeginOffset / ElementSize; |
| 1816 | if (BeginIndex * ElementSize != BeginOffset || |
| 1817 | BeginIndex >= Ty->getNumElements()) |
| 1818 | return false; |
| 1819 | uint64_t EndOffset = |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1820 | std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1821 | uint64_t EndIndex = EndOffset / ElementSize; |
| 1822 | if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) |
| 1823 | return false; |
| 1824 | |
| 1825 | assert(EndIndex > BeginIndex && "Empty vector!"); |
| 1826 | uint64_t NumElements = EndIndex - BeginIndex; |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 1827 | Type *SliceTy = (NumElements == 1) |
| 1828 | ? Ty->getElementType() |
| 1829 | : VectorType::get(Ty->getElementType(), NumElements); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1830 | |
| 1831 | Type *SplitIntTy = |
| 1832 | Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); |
| 1833 | |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 1834 | Use *U = S.getUse(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1835 | |
| 1836 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { |
| 1837 | if (MI->isVolatile()) |
| 1838 | return false; |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 1839 | if (!S.isSplittable()) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1840 | return false; // Skip any unsplittable intrinsics. |
Owen Anderson | 6c19ab1 | 2014-08-07 21:07:35 +0000 | [diff] [blame] | 1841 | } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { |
Vedant Kumar | b264d69 | 2018-12-21 21:49:40 +0000 | [diff] [blame] | 1842 | if (!II->isLifetimeStartOrEnd()) |
Owen Anderson | 6c19ab1 | 2014-08-07 21:07:35 +0000 | [diff] [blame] | 1843 | return false; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1844 | } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { |
| 1845 | // Disable vector promotion when there are loads or stores of an FCA. |
| 1846 | return false; |
| 1847 | } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { |
| 1848 | if (LI->isVolatile()) |
| 1849 | return false; |
| 1850 | Type *LTy = LI->getType(); |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1851 | if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1852 | assert(LTy->isIntegerTy()); |
| 1853 | LTy = SplitIntTy; |
| 1854 | } |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1855 | if (!canConvertValue(DL, SliceTy, LTy)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1856 | return false; |
| 1857 | } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { |
| 1858 | if (SI->isVolatile()) |
| 1859 | return false; |
| 1860 | Type *STy = SI->getValueOperand()->getType(); |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1861 | if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1862 | assert(STy->isIntegerTy()); |
| 1863 | STy = SplitIntTy; |
| 1864 | } |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1865 | if (!canConvertValue(DL, STy, SliceTy)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1866 | return false; |
Chandler Carruth | 1ed848d | 2013-07-19 10:57:32 +0000 | [diff] [blame] | 1867 | } else { |
| 1868 | return false; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1869 | } |
| 1870 | |
| 1871 | return true; |
| 1872 | } |
| 1873 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1874 | /// Test whether the given alloca partitioning and range of slices can be |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1875 | /// promoted to a vector. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1876 | /// |
| 1877 | /// This is a quick test to check whether we can rewrite a particular alloca |
| 1878 | /// partition (and its newly formed alloca) into a vector alloca with only |
| 1879 | /// whole-vector loads and stores such that it could be promoted to a vector |
| 1880 | /// SSA value. We only can ensure this for a limited set of operations, and we |
| 1881 | /// don't want to do the rewrites unless we are confident that the result will |
| 1882 | /// be promotable, so we have an early test here. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 1883 | static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1884 | // Collect the candidate types for vector-based promotion. Also track whether |
| 1885 | // we have different element types. |
| 1886 | SmallVector<VectorType *, 4> CandidateTys; |
| 1887 | Type *CommonEltTy = nullptr; |
| 1888 | bool HaveCommonEltTy = true; |
| 1889 | auto CheckCandidateType = [&](Type *Ty) { |
| 1890 | if (auto *VTy = dyn_cast<VectorType>(Ty)) { |
| 1891 | CandidateTys.push_back(VTy); |
| 1892 | if (!CommonEltTy) |
| 1893 | CommonEltTy = VTy->getElementType(); |
| 1894 | else if (CommonEltTy != VTy->getElementType()) |
| 1895 | HaveCommonEltTy = false; |
| 1896 | } |
| 1897 | }; |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1898 | // Consider any loads or stores that are the exact size of the slice. |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1899 | for (const Slice &S : P) |
| 1900 | if (S.beginOffset() == P.beginOffset() && |
| 1901 | S.endOffset() == P.endOffset()) { |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1902 | if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) |
| 1903 | CheckCandidateType(LI->getType()); |
| 1904 | else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) |
| 1905 | CheckCandidateType(SI->getValueOperand()->getType()); |
| 1906 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 1907 | |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1908 | // If we didn't find a vector type, nothing to do here. |
| 1909 | if (CandidateTys.empty()) |
| 1910 | return nullptr; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1911 | |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1912 | // Remove non-integer vector types if we had multiple common element types. |
| 1913 | // FIXME: It'd be nice to replace them with integer vector types, but we can't |
| 1914 | // do that until all the backends are known to produce good code for all |
| 1915 | // integer vector types. |
| 1916 | if (!HaveCommonEltTy) { |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 1917 | CandidateTys.erase( |
| 1918 | llvm::remove_if(CandidateTys, |
| 1919 | [](VectorType *VTy) { |
| 1920 | return !VTy->getElementType()->isIntegerTy(); |
| 1921 | }), |
| 1922 | CandidateTys.end()); |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1923 | |
| 1924 | // If there were no integer vector types, give up. |
| 1925 | if (CandidateTys.empty()) |
| 1926 | return nullptr; |
| 1927 | |
| 1928 | // Rank the remaining candidate vector types. This is easy because we know |
| 1929 | // they're all integer vectors. We sort by ascending number of elements. |
| 1930 | auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { |
David L. Jones | 41cecba | 2017-01-13 21:02:41 +0000 | [diff] [blame] | 1931 | (void)DL; |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1932 | assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && |
| 1933 | "Cannot have vector types of different sizes!"); |
| 1934 | assert(RHSTy->getElementType()->isIntegerTy() && |
| 1935 | "All non-integer types eliminated!"); |
| 1936 | assert(LHSTy->getElementType()->isIntegerTy() && |
| 1937 | "All non-integer types eliminated!"); |
| 1938 | return RHSTy->getNumElements() < LHSTy->getNumElements(); |
| 1939 | }; |
Fangrui Song | 0cac726 | 2018-09-27 02:13:45 +0000 | [diff] [blame] | 1940 | llvm::sort(CandidateTys, RankVectorTypes); |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1941 | CandidateTys.erase( |
| 1942 | std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), |
| 1943 | CandidateTys.end()); |
| 1944 | } else { |
| 1945 | // The only way to have the same element type in every vector type is to |
| 1946 | // have the same vector type. Check that and remove all but one. |
| 1947 | #ifndef NDEBUG |
| 1948 | for (VectorType *VTy : CandidateTys) { |
| 1949 | assert(VTy->getElementType() == CommonEltTy && |
| 1950 | "Unaccounted for element type!"); |
| 1951 | assert(VTy == CandidateTys[0] && |
| 1952 | "Different vector types with the same element type!"); |
| 1953 | } |
| 1954 | #endif |
| 1955 | CandidateTys.resize(1); |
| 1956 | } |
| 1957 | |
| 1958 | // Try each vector type, and return the one which works. |
| 1959 | auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { |
| 1960 | uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); |
| 1961 | |
| 1962 | // While the definition of LLVM vectors is bitpacked, we don't support sizes |
| 1963 | // that aren't byte sized. |
| 1964 | if (ElementSize % 8) |
| 1965 | return false; |
| 1966 | assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && |
| 1967 | "vector size not a multiple of element size?"); |
| 1968 | ElementSize /= 8; |
| 1969 | |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1970 | for (const Slice &S : P) |
| 1971 | if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1972 | return false; |
| 1973 | |
Chandler Carruth | ffb7ce5 | 2014-12-24 01:48:09 +0000 | [diff] [blame] | 1974 | for (const Slice *S : P.splitSliceTails()) |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1975 | if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 1976 | return false; |
| 1977 | |
| 1978 | return true; |
| 1979 | }; |
| 1980 | for (VectorType *VTy : CandidateTys) |
| 1981 | if (CheckVectorTypeForPromotion(VTy)) |
| 1982 | return VTy; |
| 1983 | |
| 1984 | return nullptr; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1985 | } |
| 1986 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 1987 | /// Test whether a slice of an alloca is valid for integer widening. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 1988 | /// |
| 1989 | /// This implements the necessary checking for the \c isIntegerWideningViable |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1990 | /// test below on a single slice of the alloca. |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1991 | static bool isIntegerWideningViableForSlice(const Slice &S, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1992 | uint64_t AllocBeginOffset, |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1993 | Type *AllocaTy, |
| 1994 | const DataLayout &DL, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 1995 | bool &WholeAllocaOp) { |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 1996 | uint64_t Size = DL.getTypeStoreSize(AllocaTy); |
| 1997 | |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 1998 | uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; |
| 1999 | uint64_t RelEnd = S.endOffset() - AllocBeginOffset; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2000 | |
| 2001 | // We can't reasonably handle cases where the load or store extends past |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 2002 | // the end of the alloca's type and into its padding. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2003 | if (RelEnd > Size) |
| 2004 | return false; |
| 2005 | |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 2006 | Use *U = S.getUse(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2007 | |
| 2008 | if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { |
| 2009 | if (LI->isVolatile()) |
| 2010 | return false; |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2011 | // We can't handle loads that extend past the allocated memory. |
| 2012 | if (DL.getTypeStoreSize(LI->getType()) > Size) |
| 2013 | return false; |
Hiroshi Inoue | f5c0e6c | 2018-05-17 06:32:17 +0000 | [diff] [blame] | 2014 | // So far, AllocaSliceRewriter does not support widening split slice tails |
| 2015 | // in rewriteIntegerLoad. |
| 2016 | if (S.beginOffset() < AllocBeginOffset) |
| 2017 | return false; |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2018 | // Note that we don't count vector loads or stores as whole-alloca |
| 2019 | // operations which enable integer widening because we would prefer to use |
| 2020 | // vector widening instead. |
| 2021 | if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2022 | WholeAllocaOp = true; |
| 2023 | if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2024 | if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) |
Chandler Carruth | e3899f2 | 2013-07-15 17:36:21 +0000 | [diff] [blame] | 2025 | return false; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2026 | } else if (RelBegin != 0 || RelEnd != Size || |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2027 | !canConvertValue(DL, AllocaTy, LI->getType())) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2028 | // Non-integer loads need to be convertible from the alloca type so that |
| 2029 | // they are promotable. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2030 | return false; |
| 2031 | } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2032 | } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { |
| 2033 | Type *ValueTy = SI->getValueOperand()->getType(); |
| 2034 | if (SI->isVolatile()) |
| 2035 | return false; |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2036 | // We can't handle stores that extend past the allocated memory. |
| 2037 | if (DL.getTypeStoreSize(ValueTy) > Size) |
| 2038 | return false; |
Hiroshi Inoue | f5c0e6c | 2018-05-17 06:32:17 +0000 | [diff] [blame] | 2039 | // So far, AllocaSliceRewriter does not support widening split slice tails |
| 2040 | // in rewriteIntegerStore. |
| 2041 | if (S.beginOffset() < AllocBeginOffset) |
| 2042 | return false; |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2043 | // Note that we don't count vector loads or stores as whole-alloca |
| 2044 | // operations which enable integer widening because we would prefer to use |
| 2045 | // vector widening instead. |
| 2046 | if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2047 | WholeAllocaOp = true; |
| 2048 | if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2049 | if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2050 | return false; |
| 2051 | } else if (RelBegin != 0 || RelEnd != Size || |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2052 | !canConvertValue(DL, ValueTy, AllocaTy)) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2053 | // Non-integer stores need to be convertible to the alloca type so that |
| 2054 | // they are promotable. |
| 2055 | return false; |
| 2056 | } |
| 2057 | } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { |
| 2058 | if (MI->isVolatile() || !isa<Constant>(MI->getLength())) |
| 2059 | return false; |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 2060 | if (!S.isSplittable()) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2061 | return false; // Skip any unsplittable intrinsics. |
| 2062 | } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { |
Vedant Kumar | b264d69 | 2018-12-21 21:49:40 +0000 | [diff] [blame] | 2063 | if (!II->isLifetimeStartOrEnd()) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2064 | return false; |
| 2065 | } else { |
| 2066 | return false; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2067 | } |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2068 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2069 | return true; |
| 2070 | } |
| 2071 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 2072 | /// Test whether the given alloca partition's integer operations can be |
Chandler Carruth | 435c4e0 | 2012-10-15 08:40:30 +0000 | [diff] [blame] | 2073 | /// widened to promotable ones. |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2074 | /// |
Chandler Carruth | 435c4e0 | 2012-10-15 08:40:30 +0000 | [diff] [blame] | 2075 | /// This is a quick test to check whether we can rewrite the integer loads and |
| 2076 | /// stores to a particular alloca into wider loads and stores and be able to |
| 2077 | /// promote the resulting alloca. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 2078 | static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 2079 | const DataLayout &DL) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2080 | uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); |
Benjamin Kramer | 47534c7 | 2012-12-01 11:53:32 +0000 | [diff] [blame] | 2081 | // Don't create integer types larger than the maximum bitwidth. |
| 2082 | if (SizeInBits > IntegerType::MAX_INT_BITS) |
| 2083 | return false; |
Chandler Carruth | 435c4e0 | 2012-10-15 08:40:30 +0000 | [diff] [blame] | 2084 | |
| 2085 | // Don't try to handle allocas with bit-padding. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2086 | if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2087 | return false; |
| 2088 | |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2089 | // We need to ensure that an integer type with the appropriate bitwidth can |
| 2090 | // be converted to the alloca type, whatever that is. We don't want to force |
| 2091 | // the alloca itself to have an integer type if there is a more suitable one. |
| 2092 | Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2093 | if (!canConvertValue(DL, AllocaTy, IntTy) || |
| 2094 | !canConvertValue(DL, IntTy, AllocaTy)) |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2095 | return false; |
| 2096 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2097 | // While examining uses, we ensure that the alloca has a covering load or |
| 2098 | // store. We don't want to widen the integer operations only to fail to |
| 2099 | // promote due to some other unsplittable entry (which we may make splittable |
Chandler Carruth | 5955c9e | 2013-07-19 07:12:23 +0000 | [diff] [blame] | 2100 | // later). However, if there are only splittable uses, go ahead and assume |
| 2101 | // that we cover the alloca. |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 2102 | // FIXME: We shouldn't consider split slices that happen to start in the |
| 2103 | // partition here... |
Chandler Carruth | c659df9 | 2014-10-16 20:24:07 +0000 | [diff] [blame] | 2104 | bool WholeAllocaOp = |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 2105 | P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); |
Chandler Carruth | 43c8b46 | 2012-10-04 10:39:28 +0000 | [diff] [blame] | 2106 | |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 2107 | for (const Slice &S : P) |
| 2108 | if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, |
| 2109 | WholeAllocaOp)) |
Chandler Carruth | 43c8b46 | 2012-10-04 10:39:28 +0000 | [diff] [blame] | 2110 | return false; |
| 2111 | |
Chandler Carruth | ffb7ce5 | 2014-12-24 01:48:09 +0000 | [diff] [blame] | 2112 | for (const Slice *S : P.splitSliceTails()) |
Chandler Carruth | 5031bbe | 2014-12-24 01:05:14 +0000 | [diff] [blame] | 2113 | if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, |
| 2114 | WholeAllocaOp)) |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2115 | return false; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2116 | |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2117 | return WholeAllocaOp; |
| 2118 | } |
| 2119 | |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 2120 | static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2121 | IntegerType *Ty, uint64_t Offset, |
| 2122 | const Twine &Name) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2123 | LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2124 | IntegerType *IntTy = cast<IntegerType>(V->getType()); |
| 2125 | assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && |
| 2126 | "Element extends past full value"); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2127 | uint64_t ShAmt = 8 * Offset; |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2128 | if (DL.isBigEndian()) |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2129 | ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2130 | if (ShAmt) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2131 | V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2132 | LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2133 | } |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2134 | assert(Ty->getBitWidth() <= IntTy->getBitWidth() && |
| 2135 | "Cannot extract to a larger integer!"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2136 | if (Ty != IntTy) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2137 | V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2138 | LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2139 | } |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2140 | return V; |
| 2141 | } |
| 2142 | |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 2143 | static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2144 | Value *V, uint64_t Offset, const Twine &Name) { |
| 2145 | IntegerType *IntTy = cast<IntegerType>(Old->getType()); |
| 2146 | IntegerType *Ty = cast<IntegerType>(V->getType()); |
| 2147 | assert(Ty->getBitWidth() <= IntTy->getBitWidth() && |
| 2148 | "Cannot insert a larger integer!"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2149 | LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2150 | if (Ty != IntTy) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2151 | V = IRB.CreateZExt(V, IntTy, Name + ".ext"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2152 | LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2153 | } |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2154 | assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && |
| 2155 | "Element store outside of alloca store"); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2156 | uint64_t ShAmt = 8 * Offset; |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2157 | if (DL.isBigEndian()) |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2158 | ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2159 | if (ShAmt) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2160 | V = IRB.CreateShl(V, ShAmt, Name + ".shift"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2161 | LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2162 | } |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2163 | |
| 2164 | if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { |
| 2165 | APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); |
| 2166 | Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2167 | LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2168 | V = IRB.CreateOr(Old, V, Name + ".insert"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2169 | LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2170 | } |
| 2171 | return V; |
| 2172 | } |
| 2173 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2174 | static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, |
| 2175 | unsigned EndIndex, const Twine &Name) { |
Chandler Carruth | b6bc874 | 2012-12-17 13:07:30 +0000 | [diff] [blame] | 2176 | VectorType *VecTy = cast<VectorType>(V->getType()); |
| 2177 | unsigned NumElements = EndIndex - BeginIndex; |
| 2178 | assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); |
| 2179 | |
| 2180 | if (NumElements == VecTy->getNumElements()) |
| 2181 | return V; |
| 2182 | |
| 2183 | if (NumElements == 1) { |
| 2184 | V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), |
| 2185 | Name + ".extract"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2186 | LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); |
Chandler Carruth | b6bc874 | 2012-12-17 13:07:30 +0000 | [diff] [blame] | 2187 | return V; |
| 2188 | } |
| 2189 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2190 | SmallVector<Constant *, 8> Mask; |
Chandler Carruth | b6bc874 | 2012-12-17 13:07:30 +0000 | [diff] [blame] | 2191 | Mask.reserve(NumElements); |
| 2192 | for (unsigned i = BeginIndex; i != EndIndex; ++i) |
| 2193 | Mask.push_back(IRB.getInt32(i)); |
| 2194 | V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2195 | ConstantVector::get(Mask), Name + ".extract"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2196 | LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); |
Chandler Carruth | b6bc874 | 2012-12-17 13:07:30 +0000 | [diff] [blame] | 2197 | return V; |
| 2198 | } |
| 2199 | |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 2200 | static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, |
Chandler Carruth | ce4562b | 2012-12-17 13:41:21 +0000 | [diff] [blame] | 2201 | unsigned BeginIndex, const Twine &Name) { |
| 2202 | VectorType *VecTy = cast<VectorType>(Old->getType()); |
| 2203 | assert(VecTy && "Can only insert a vector into a vector"); |
| 2204 | |
| 2205 | VectorType *Ty = dyn_cast<VectorType>(V->getType()); |
| 2206 | if (!Ty) { |
| 2207 | // Single element to insert. |
| 2208 | V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), |
| 2209 | Name + ".insert"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2210 | LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); |
Chandler Carruth | ce4562b | 2012-12-17 13:41:21 +0000 | [diff] [blame] | 2211 | return V; |
| 2212 | } |
| 2213 | |
| 2214 | assert(Ty->getNumElements() <= VecTy->getNumElements() && |
| 2215 | "Too many elements!"); |
| 2216 | if (Ty->getNumElements() == VecTy->getNumElements()) { |
| 2217 | assert(V->getType() == VecTy && "Vector type mismatch"); |
| 2218 | return V; |
| 2219 | } |
| 2220 | unsigned EndIndex = BeginIndex + Ty->getNumElements(); |
| 2221 | |
| 2222 | // When inserting a smaller vector into the larger to store, we first |
| 2223 | // use a shuffle vector to widen it with undef elements, and then |
| 2224 | // a second shuffle vector to select between the loaded vector and the |
| 2225 | // incoming vector. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2226 | SmallVector<Constant *, 8> Mask; |
Chandler Carruth | ce4562b | 2012-12-17 13:41:21 +0000 | [diff] [blame] | 2227 | Mask.reserve(VecTy->getNumElements()); |
| 2228 | for (unsigned i = 0; i != VecTy->getNumElements(); ++i) |
| 2229 | if (i >= BeginIndex && i < EndIndex) |
| 2230 | Mask.push_back(IRB.getInt32(i - BeginIndex)); |
| 2231 | else |
| 2232 | Mask.push_back(UndefValue::get(IRB.getInt32Ty())); |
| 2233 | V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2234 | ConstantVector::get(Mask), Name + ".expand"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2235 | LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); |
Chandler Carruth | ce4562b | 2012-12-17 13:41:21 +0000 | [diff] [blame] | 2236 | |
| 2237 | Mask.clear(); |
| 2238 | for (unsigned i = 0; i != VecTy->getNumElements(); ++i) |
Nadav Rotem | 1e21191 | 2013-05-01 19:53:30 +0000 | [diff] [blame] | 2239 | Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); |
| 2240 | |
| 2241 | V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); |
| 2242 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2243 | LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); |
Chandler Carruth | ce4562b | 2012-12-17 13:41:21 +0000 | [diff] [blame] | 2244 | return V; |
| 2245 | } |
| 2246 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 2247 | /// Visitor to rewrite instructions using p particular slice of an alloca |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 2248 | /// to use a new alloca. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2249 | /// |
| 2250 | /// Also implements the rewriting to vector-based accesses when the partition |
| 2251 | /// passes the isVectorPromotionViable predicate. Most of the rewriting logic |
| 2252 | /// lives here. |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 2253 | class llvm::sroa::AllocaSliceRewriter |
| 2254 | : public InstVisitor<AllocaSliceRewriter, bool> { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2255 | // Befriend the base class so it can delegate to private visit methods. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 2256 | friend class InstVisitor<AllocaSliceRewriter, bool>; |
| 2257 | |
| 2258 | using Base = InstVisitor<AllocaSliceRewriter, bool>; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2259 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2260 | const DataLayout &DL; |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 2261 | AllocaSlices &AS; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2262 | SROA &Pass; |
| 2263 | AllocaInst &OldAI, &NewAI; |
| 2264 | const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; |
Chandler Carruth | 891fec0 | 2012-10-13 02:41:05 +0000 | [diff] [blame] | 2265 | Type *NewAllocaTy; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2266 | |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2267 | // This is a convenience and flag variable that will be null unless the new |
| 2268 | // alloca's integer operations should be widened to this integer type due to |
| 2269 | // passing isIntegerWideningViable above. If it is non-null, the desired |
| 2270 | // integer type will be stored here for easy access during rewriting. |
| 2271 | IntegerType *IntTy; |
| 2272 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2273 | // If we are rewriting an alloca partition which can be written as pure |
| 2274 | // vector operations, we stash extra information here. When VecTy is |
Jakub Staszak | 086f6cd | 2013-02-19 22:02:21 +0000 | [diff] [blame] | 2275 | // non-null, we have some strict guarantees about the rewritten alloca: |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2276 | // - The new alloca is exactly the size of the vector type here. |
| 2277 | // - The accesses all either map to the entire vector or to a single |
| 2278 | // element. |
| 2279 | // - The set of accessing instructions is only one of those handled above |
| 2280 | // in isVectorPromotionViable. Generally these are the same access kinds |
| 2281 | // which are promotable via mem2reg. |
| 2282 | VectorType *VecTy; |
| 2283 | Type *ElementTy; |
| 2284 | uint64_t ElementSize; |
| 2285 | |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2286 | // The original offset of the slice currently being rewritten relative to |
| 2287 | // the original alloca. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 2288 | uint64_t BeginOffset = 0; |
| 2289 | uint64_t EndOffset = 0; |
| 2290 | |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2291 | // The new offsets of the slice currently being rewritten relative to the |
| 2292 | // original alloca. |
| 2293 | uint64_t NewBeginOffset, NewEndOffset; |
| 2294 | |
| 2295 | uint64_t SliceSize; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 2296 | bool IsSplittable = false; |
| 2297 | bool IsSplit = false; |
| 2298 | Use *OldUse = nullptr; |
| 2299 | Instruction *OldPtr = nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2300 | |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 2301 | // Track post-rewrite users which are PHI nodes and Selects. |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 2302 | SmallSetVector<PHINode *, 8> &PHIUsers; |
| 2303 | SmallSetVector<SelectInst *, 8> &SelectUsers; |
Chandler Carruth | 83ea195 | 2013-07-24 09:47:28 +0000 | [diff] [blame] | 2304 | |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2305 | // Utility IR builder, whose name prefix is setup for each visited use, and |
| 2306 | // the insertion point is set to point to the user. |
| 2307 | IRBuilderTy IRB; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2308 | |
| 2309 | public: |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 2310 | AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 2311 | AllocaInst &OldAI, AllocaInst &NewAI, |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2312 | uint64_t NewAllocaBeginOffset, |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2313 | uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, |
| 2314 | VectorType *PromotableVecTy, |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 2315 | SmallSetVector<PHINode *, 8> &PHIUsers, |
| 2316 | SmallSetVector<SelectInst *, 8> &SelectUsers) |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 2317 | : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2318 | NewAllocaBeginOffset(NewAllocaBeginOffset), |
| 2319 | NewAllocaEndOffset(NewAllocaEndOffset), |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2320 | NewAllocaTy(NewAI.getAllocatedType()), |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2321 | IntTy(IsIntegerPromotable |
| 2322 | ? Type::getIntNTy( |
| 2323 | NewAI.getContext(), |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2324 | DL.getTypeSizeInBits(NewAI.getAllocatedType())) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 2325 | : nullptr), |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2326 | VecTy(PromotableVecTy), |
| 2327 | ElementTy(VecTy ? VecTy->getElementType() : nullptr), |
| 2328 | ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 2329 | PHIUsers(PHIUsers), SelectUsers(SelectUsers), |
Chandler Carruth | 83ea195 | 2013-07-24 09:47:28 +0000 | [diff] [blame] | 2330 | IRB(NewAI.getContext(), ConstantFolder()) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2331 | if (VecTy) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2332 | assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2333 | "Only multiple-of-8 sized vector elements are viable"); |
| 2334 | ++NumVectorized; |
| 2335 | } |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 2336 | assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2337 | } |
| 2338 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 2339 | bool visit(AllocaSlices::const_iterator I) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2340 | bool CanSROA = true; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2341 | BeginOffset = I->beginOffset(); |
| 2342 | EndOffset = I->endOffset(); |
| 2343 | IsSplittable = I->isSplittable(); |
| 2344 | IsSplit = |
| 2345 | BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2346 | LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); |
| 2347 | LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); |
| 2348 | LLVM_DEBUG(dbgs() << "\n"); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2349 | |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2350 | // Compute the intersecting offset range. |
| 2351 | assert(BeginOffset < NewAllocaEndOffset); |
| 2352 | assert(EndOffset > NewAllocaBeginOffset); |
| 2353 | NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); |
| 2354 | NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); |
| 2355 | |
| 2356 | SliceSize = NewEndOffset - NewBeginOffset; |
| 2357 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2358 | OldUse = I->getUse(); |
| 2359 | OldPtr = cast<Instruction>(OldUse->get()); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2360 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2361 | Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); |
| 2362 | IRB.SetInsertPoint(OldUserI); |
| 2363 | IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); |
| 2364 | IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); |
| 2365 | |
| 2366 | CanSROA &= visit(cast<Instruction>(OldUse->getUser())); |
| 2367 | if (VecTy || IntTy) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2368 | assert(CanSROA); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2369 | return CanSROA; |
| 2370 | } |
| 2371 | |
| 2372 | private: |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2373 | // Make sure the other visit overloads are visible. |
| 2374 | using Base::visit; |
| 2375 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2376 | // Every instruction which can end up as a user must have a rewrite rule. |
| 2377 | bool visitInstruction(Instruction &I) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2378 | LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2379 | llvm_unreachable("No rewrite rule for this instruction!"); |
| 2380 | } |
| 2381 | |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2382 | Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { |
| 2383 | // Note that the offset computation can use BeginOffset or NewBeginOffset |
| 2384 | // interchangeably for unsplit slices. |
| 2385 | assert(IsSplit || BeginOffset == NewBeginOffset); |
| 2386 | uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; |
| 2387 | |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 2388 | #ifndef NDEBUG |
| 2389 | StringRef OldName = OldPtr->getName(); |
| 2390 | // Skip through the last '.sroa.' component of the name. |
| 2391 | size_t LastSROAPrefix = OldName.rfind(".sroa."); |
| 2392 | if (LastSROAPrefix != StringRef::npos) { |
| 2393 | OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); |
| 2394 | // Look for an SROA slice index. |
| 2395 | size_t IndexEnd = OldName.find_first_not_of("0123456789"); |
| 2396 | if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { |
| 2397 | // Strip the index and look for the offset. |
| 2398 | OldName = OldName.substr(IndexEnd + 1); |
| 2399 | size_t OffsetEnd = OldName.find_first_not_of("0123456789"); |
| 2400 | if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') |
| 2401 | // Strip the offset. |
| 2402 | OldName = OldName.substr(OffsetEnd + 1); |
| 2403 | } |
| 2404 | } |
| 2405 | // Strip any SROA suffixes as well. |
| 2406 | OldName = OldName.substr(0, OldName.find(".sroa_")); |
| 2407 | #endif |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2408 | |
| 2409 | return getAdjustedPtr(IRB, DL, &NewAI, |
Nicola Zaghen | f96383c | 2018-10-30 11:15:04 +0000 | [diff] [blame] | 2410 | APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), |
Matt Arsenault | 3c1fc76 | 2017-04-10 22:27:50 +0000 | [diff] [blame] | 2411 | PointerTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 2412 | #ifndef NDEBUG |
| 2413 | Twine(OldName) + "." |
| 2414 | #else |
| 2415 | Twine() |
| 2416 | #endif |
| 2417 | ); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2418 | } |
| 2419 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 2420 | /// Compute suitable alignment to access this slice of the *new* |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2421 | /// alloca. |
Chandler Carruth | 2659e50 | 2014-02-26 05:02:19 +0000 | [diff] [blame] | 2422 | /// |
| 2423 | /// You can optionally pass a type to this routine and if that type's ABI |
| 2424 | /// alignment is itself suitable, this will return zero. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 2425 | unsigned getSliceAlign(Type *Ty = nullptr) { |
Chandler Carruth | 176ca71 | 2012-10-01 12:16:54 +0000 | [diff] [blame] | 2426 | unsigned NewAIAlign = NewAI.getAlignment(); |
| 2427 | if (!NewAIAlign) |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2428 | NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2429 | unsigned Align = |
| 2430 | MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); |
Chandler Carruth | 2659e50 | 2014-02-26 05:02:19 +0000 | [diff] [blame] | 2431 | return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; |
Chandler Carruth | 4b2b38d | 2012-10-03 08:14:02 +0000 | [diff] [blame] | 2432 | } |
| 2433 | |
Chandler Carruth | 845b73c | 2012-11-21 08:16:30 +0000 | [diff] [blame] | 2434 | unsigned getIndex(uint64_t Offset) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2435 | assert(VecTy && "Can only call getIndex when rewriting a vector"); |
| 2436 | uint64_t RelOffset = Offset - NewAllocaBeginOffset; |
| 2437 | assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); |
| 2438 | uint32_t Index = RelOffset / ElementSize; |
| 2439 | assert(Index * ElementSize == RelOffset); |
Chandler Carruth | 845b73c | 2012-11-21 08:16:30 +0000 | [diff] [blame] | 2440 | return Index; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2441 | } |
| 2442 | |
| 2443 | void deleteIfTriviallyDead(Value *V) { |
| 2444 | Instruction *I = cast<Instruction>(V); |
| 2445 | if (isInstructionTriviallyDead(I)) |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2446 | Pass.DeadInsts.insert(I); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2447 | } |
| 2448 | |
Chandler Carruth | ea27cf0 | 2014-02-26 04:25:04 +0000 | [diff] [blame] | 2449 | Value *rewriteVectorizedLoadInst() { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2450 | unsigned BeginIndex = getIndex(NewBeginOffset); |
| 2451 | unsigned EndIndex = getIndex(NewEndOffset); |
Chandler Carruth | 769445e | 2012-12-17 12:50:21 +0000 | [diff] [blame] | 2452 | assert(EndIndex > BeginIndex && "Empty vector!"); |
Chandler Carruth | b6bc874 | 2012-12-17 13:07:30 +0000 | [diff] [blame] | 2453 | |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2454 | Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2455 | NewAI.getAlignment(), "load"); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2456 | return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); |
Chandler Carruth | 769445e | 2012-12-17 12:50:21 +0000 | [diff] [blame] | 2457 | } |
| 2458 | |
Chandler Carruth | ea27cf0 | 2014-02-26 04:25:04 +0000 | [diff] [blame] | 2459 | Value *rewriteIntegerLoad(LoadInst &LI) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2460 | assert(IntTy && "We cannot insert an integer to the alloca"); |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2461 | assert(!LI.isVolatile()); |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2462 | Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2463 | NewAI.getAlignment(), "load"); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2464 | V = convertValue(DL, IRB, V, IntTy); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2465 | assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); |
| 2466 | uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; |
Chandler Carruth | 4b682f6 | 2015-08-28 09:03:52 +0000 | [diff] [blame] | 2467 | if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { |
| 2468 | IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); |
| 2469 | V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); |
| 2470 | } |
| 2471 | // It is possible that the extracted type is not the load type. This |
| 2472 | // happens if there is a load past the end of the alloca, and as |
| 2473 | // a consequence the slice is narrower but still a candidate for integer |
| 2474 | // lowering. To handle this case, we just zero extend the extracted |
| 2475 | // integer. |
| 2476 | assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && |
| 2477 | "Can only handle an extract for an overly wide load"); |
| 2478 | if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) |
| 2479 | V = IRB.CreateZExt(V, LI.getType()); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2480 | return V; |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2481 | } |
| 2482 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2483 | bool visitLoadInst(LoadInst &LI) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2484 | LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2485 | Value *OldOp = LI.getOperand(0); |
| 2486 | assert(OldOp == OldPtr); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2487 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2488 | AAMDNodes AATags; |
| 2489 | LI.getAAMetadata(AATags); |
| 2490 | |
Matt Arsenault | 3c1fc76 | 2017-04-10 22:27:50 +0000 | [diff] [blame] | 2491 | unsigned AS = LI.getPointerAddressSpace(); |
| 2492 | |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2493 | Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 2494 | : LI.getType(); |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2495 | const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2496 | bool IsPtrAdjusted = false; |
| 2497 | Value *V; |
| 2498 | if (VecTy) { |
Chandler Carruth | ea27cf0 | 2014-02-26 04:25:04 +0000 | [diff] [blame] | 2499 | V = rewriteVectorizedLoadInst(); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2500 | } else if (IntTy && LI.getType()->isIntegerTy()) { |
Chandler Carruth | ea27cf0 | 2014-02-26 04:25:04 +0000 | [diff] [blame] | 2501 | V = rewriteIntegerLoad(LI); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2502 | } else if (NewBeginOffset == NewAllocaBeginOffset && |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2503 | NewEndOffset == NewAllocaEndOffset && |
| 2504 | (canConvertValue(DL, NewAllocaTy, TargetTy) || |
| 2505 | (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && |
| 2506 | TargetTy->isIntegerTy()))) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2507 | LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2508 | NewAI.getAlignment(), |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2509 | LI.isVolatile(), LI.getName()); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2510 | if (AATags) |
| 2511 | NewLI->setAAMetadata(AATags); |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2512 | if (LI.isVolatile()) |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 2513 | NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); |
Luqman Aden | 3f807c9 | 2017-03-22 19:16:39 +0000 | [diff] [blame] | 2514 | |
Chandler Carruth | 3f81d80 | 2017-06-27 08:32:03 +0000 | [diff] [blame] | 2515 | // Any !nonnull metadata or !range metadata on the old load is also valid |
| 2516 | // on the new load. This is even true in some cases even when the loads |
| 2517 | // are different types, for example by mapping !nonnull metadata to |
| 2518 | // !range metadata by modeling the null pointer constant converted to the |
| 2519 | // integer type. |
Rafael Espindola | c06f55e | 2017-11-28 01:25:38 +0000 | [diff] [blame] | 2520 | // FIXME: Add support for range metadata here. Currently the utilities |
| 2521 | // for this don't propagate range metadata in trivial cases from one |
| 2522 | // integer load to another, don't handle non-addrspace-0 null pointers |
| 2523 | // correctly, and don't have any support for mapping ranges as the |
| 2524 | // integer type becomes winder or narrower. |
Chandler Carruth | 3f81d80 | 2017-06-27 08:32:03 +0000 | [diff] [blame] | 2525 | if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) |
| 2526 | copyNonnullMetadata(LI, N, *NewLI); |
Rafael Espindola | c06f55e | 2017-11-28 01:25:38 +0000 | [diff] [blame] | 2527 | |
| 2528 | // Try to preserve nonnull metadata |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2529 | V = NewLI; |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2530 | |
| 2531 | // If this is an integer load past the end of the slice (which means the |
| 2532 | // bytes outside the slice are undef or this load is dead) just forcibly |
| 2533 | // fix the integer size with correct handling of endianness. |
| 2534 | if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) |
| 2535 | if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) |
| 2536 | if (AITy->getBitWidth() < TITy->getBitWidth()) { |
| 2537 | V = IRB.CreateZExt(V, TITy, "load.ext"); |
| 2538 | if (DL.isBigEndian()) |
| 2539 | V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), |
| 2540 | "endian_shift"); |
| 2541 | } |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2542 | } else { |
Matt Arsenault | 3c1fc76 | 2017-04-10 22:27:50 +0000 | [diff] [blame] | 2543 | Type *LTy = TargetTy->getPointerTo(AS); |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2544 | LoadInst *NewLI = IRB.CreateAlignedLoad( |
| 2545 | TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy), |
| 2546 | LI.isVolatile(), LI.getName()); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2547 | if (AATags) |
| 2548 | NewLI->setAAMetadata(AATags); |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2549 | if (LI.isVolatile()) |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 2550 | NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2551 | |
| 2552 | V = NewLI; |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2553 | IsPtrAdjusted = true; |
| 2554 | } |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2555 | V = convertValue(DL, IRB, V, TargetTy); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2556 | |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 2557 | if (IsSplit) { |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2558 | assert(!LI.isVolatile()); |
| 2559 | assert(LI.getType()->isIntegerTy() && |
| 2560 | "Only integer type loads and stores are split"); |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2561 | assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && |
Chandler Carruth | a1c54bb | 2013-03-14 11:32:24 +0000 | [diff] [blame] | 2562 | "Split load isn't smaller than original load"); |
Bjorn Pettersson | b477142 | 2019-05-24 09:20:20 +0000 | [diff] [blame] | 2563 | assert(DL.typeSizeEqualsStoreSize(LI.getType()) && |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2564 | "Non-byte-multiple bit width"); |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2565 | // Move the insertion point just past the load so that we can refer to it. |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 2566 | IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2567 | // Create a placeholder value with the same type as LI to use as the |
| 2568 | // basis for the new value. This allows us to replace the uses of LI with |
| 2569 | // the computed value, and then replace the placeholder with LI, leaving |
| 2570 | // LI only used for this computation. |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2571 | Value *Placeholder = new LoadInst( |
| 2572 | LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS))); |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 2573 | V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, |
| 2574 | "insert"); |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2575 | LI.replaceAllUsesWith(V); |
| 2576 | Placeholder->replaceAllUsesWith(&LI); |
Reid Kleckner | 96ab872 | 2017-05-18 17:24:10 +0000 | [diff] [blame] | 2577 | Placeholder->deleteValue(); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2578 | } else { |
| 2579 | LI.replaceAllUsesWith(V); |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 2580 | } |
| 2581 | |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2582 | Pass.DeadInsts.insert(&LI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2583 | deleteIfTriviallyDead(OldOp); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2584 | LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2585 | return !LI.isVolatile() && !IsPtrAdjusted; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2586 | } |
| 2587 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2588 | bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, |
| 2589 | AAMDNodes AATags) { |
Bob Wilson | acfc01d | 2013-06-25 19:09:50 +0000 | [diff] [blame] | 2590 | if (V->getType() != VecTy) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2591 | unsigned BeginIndex = getIndex(NewBeginOffset); |
| 2592 | unsigned EndIndex = getIndex(NewEndOffset); |
Bob Wilson | acfc01d | 2013-06-25 19:09:50 +0000 | [diff] [blame] | 2593 | assert(EndIndex > BeginIndex && "Empty vector!"); |
| 2594 | unsigned NumElements = EndIndex - BeginIndex; |
| 2595 | assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2596 | Type *SliceTy = (NumElements == 1) |
| 2597 | ? ElementTy |
| 2598 | : VectorType::get(ElementTy, NumElements); |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 2599 | if (V->getType() != SliceTy) |
| 2600 | V = convertValue(DL, IRB, V, SliceTy); |
Chandler Carruth | 845b73c | 2012-11-21 08:16:30 +0000 | [diff] [blame] | 2601 | |
Bob Wilson | acfc01d | 2013-06-25 19:09:50 +0000 | [diff] [blame] | 2602 | // Mix in the existing elements. |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2603 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2604 | NewAI.getAlignment(), "load"); |
Bob Wilson | acfc01d | 2013-06-25 19:09:50 +0000 | [diff] [blame] | 2605 | V = insertVector(IRB, Old, V, BeginIndex, "vec"); |
| 2606 | } |
Chandler Carruth | 871ba72 | 2012-09-26 10:27:46 +0000 | [diff] [blame] | 2607 | StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2608 | if (AATags) |
| 2609 | Store->setAAMetadata(AATags); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2610 | Pass.DeadInsts.insert(&SI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2611 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2612 | LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2613 | return true; |
| 2614 | } |
| 2615 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2616 | bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2617 | assert(IntTy && "We cannot extract an integer from the alloca"); |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2618 | assert(!SI.isVolatile()); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2619 | if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2620 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2621 | NewAI.getAlignment(), "oldload"); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2622 | Old = convertValue(DL, IRB, Old, IntTy); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2623 | assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); |
| 2624 | uint64_t Offset = BeginOffset - NewAllocaBeginOffset; |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2625 | V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2626 | } |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2627 | V = convertValue(DL, IRB, V, NewAllocaTy); |
Chandler Carruth | 59ff93af | 2012-10-18 09:56:08 +0000 | [diff] [blame] | 2628 | StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 2629 | Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, |
| 2630 | LLVMContext::MD_access_group}); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2631 | if (AATags) |
| 2632 | Store->setAAMetadata(AATags); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2633 | Pass.DeadInsts.insert(&SI); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2634 | LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); |
Chandler Carruth | 92924fd | 2012-09-24 00:34:20 +0000 | [diff] [blame] | 2635 | return true; |
| 2636 | } |
| 2637 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2638 | bool visitStoreInst(StoreInst &SI) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2639 | LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2640 | Value *OldOp = SI.getOperand(1); |
| 2641 | assert(OldOp == OldPtr); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2642 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2643 | AAMDNodes AATags; |
| 2644 | SI.getAAMetadata(AATags); |
| 2645 | |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2646 | Value *V = SI.getValueOperand(); |
Chandler Carruth | 891fec0 | 2012-10-13 02:41:05 +0000 | [diff] [blame] | 2647 | |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 2648 | // Strip all inbounds GEPs and pointer casts to try to dig out any root |
| 2649 | // alloca that should be re-examined after promoting this alloca. |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2650 | if (V->getType()->isPointerTy()) |
| 2651 | if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 2652 | Pass.PostPromotionWorklist.insert(AI); |
| 2653 | |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2654 | if (SliceSize < DL.getTypeStoreSize(V->getType())) { |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2655 | assert(!SI.isVolatile()); |
| 2656 | assert(V->getType()->isIntegerTy() && |
| 2657 | "Only integer type loads and stores are split"); |
Bjorn Pettersson | b477142 | 2019-05-24 09:20:20 +0000 | [diff] [blame] | 2658 | assert(DL.typeSizeEqualsStoreSize(V->getType()) && |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2659 | "Non-byte-multiple bit width"); |
Chandler Carruth | c46b6eb | 2014-02-26 04:20:00 +0000 | [diff] [blame] | 2660 | IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 2661 | V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, |
| 2662 | "extract"); |
Chandler Carruth | 891fec0 | 2012-10-13 02:41:05 +0000 | [diff] [blame] | 2663 | } |
| 2664 | |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2665 | if (VecTy) |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2666 | return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2667 | if (IntTy && V->getType()->isIntegerTy()) |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2668 | return rewriteIntegerStore(V, SI, AATags); |
Chandler Carruth | 435c4e0 | 2012-10-15 08:40:30 +0000 | [diff] [blame] | 2669 | |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2670 | const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2671 | StoreInst *NewSI; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2672 | if (NewBeginOffset == NewAllocaBeginOffset && |
| 2673 | NewEndOffset == NewAllocaEndOffset && |
Chandler Carruth | ccffdaf | 2015-07-22 03:32:42 +0000 | [diff] [blame] | 2674 | (canConvertValue(DL, V->getType(), NewAllocaTy) || |
| 2675 | (IsStorePastEnd && NewAllocaTy->isIntegerTy() && |
| 2676 | V->getType()->isIntegerTy()))) { |
| 2677 | // If this is an integer store past the end of slice (and thus the bytes |
| 2678 | // past that point are irrelevant or this is unreachable), truncate the |
| 2679 | // value prior to storing. |
| 2680 | if (auto *VITy = dyn_cast<IntegerType>(V->getType())) |
| 2681 | if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) |
| 2682 | if (VITy->getBitWidth() > AITy->getBitWidth()) { |
| 2683 | if (DL.isBigEndian()) |
| 2684 | V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), |
| 2685 | "endian_shift"); |
| 2686 | V = IRB.CreateTrunc(V, AITy, "load.trunc"); |
| 2687 | } |
| 2688 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2689 | V = convertValue(DL, IRB, V, NewAllocaTy); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2690 | NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), |
| 2691 | SI.isVolatile()); |
| 2692 | } else { |
Matt Arsenault | 3c1fc76 | 2017-04-10 22:27:50 +0000 | [diff] [blame] | 2693 | unsigned AS = SI.getPointerAddressSpace(); |
| 2694 | Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); |
Chandler Carruth | 2659e50 | 2014-02-26 05:02:19 +0000 | [diff] [blame] | 2695 | NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), |
| 2696 | SI.isVolatile()); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2697 | } |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 2698 | NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, |
| 2699 | LLVMContext::MD_access_group}); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2700 | if (AATags) |
| 2701 | NewSI->setAAMetadata(AATags); |
David Majnemer | 62690b1 | 2015-07-14 06:19:58 +0000 | [diff] [blame] | 2702 | if (SI.isVolatile()) |
Konstantin Zhuravlyov | bb80d3e | 2017-07-11 22:23:00 +0000 | [diff] [blame] | 2703 | NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2704 | Pass.DeadInsts.insert(&SI); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2705 | deleteIfTriviallyDead(OldOp); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2706 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2707 | LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2708 | return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2709 | } |
| 2710 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 2711 | /// Compute an integer value from splatting an i8 across the given |
Chandler Carruth | 514f34f | 2012-12-17 04:07:30 +0000 | [diff] [blame] | 2712 | /// number of bytes. |
| 2713 | /// |
| 2714 | /// Note that this routine assumes an i8 is a byte. If that isn't true, don't |
| 2715 | /// call this routine. |
Jakub Staszak | 086f6cd | 2013-02-19 22:02:21 +0000 | [diff] [blame] | 2716 | /// FIXME: Heed the advice above. |
Chandler Carruth | 514f34f | 2012-12-17 04:07:30 +0000 | [diff] [blame] | 2717 | /// |
| 2718 | /// \param V The i8 value to splat. |
| 2719 | /// \param Size The number of bytes in the output (assuming i8 is one byte) |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2720 | Value *getIntegerSplat(Value *V, unsigned Size) { |
Chandler Carruth | 514f34f | 2012-12-17 04:07:30 +0000 | [diff] [blame] | 2721 | assert(Size > 0 && "Expected a positive number of bytes."); |
| 2722 | IntegerType *VTy = cast<IntegerType>(V->getType()); |
| 2723 | assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); |
| 2724 | if (Size == 1) |
| 2725 | return V; |
| 2726 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2727 | Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); |
| 2728 | V = IRB.CreateMul( |
| 2729 | IRB.CreateZExt(V, SplatIntTy, "zext"), |
| 2730 | ConstantExpr::getUDiv( |
| 2731 | Constant::getAllOnesValue(SplatIntTy), |
| 2732 | ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), |
| 2733 | SplatIntTy)), |
| 2734 | "isplat"); |
Chandler Carruth | 514f34f | 2012-12-17 04:07:30 +0000 | [diff] [blame] | 2735 | return V; |
| 2736 | } |
| 2737 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 2738 | /// Compute a vector splat for a given element value. |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2739 | Value *getVectorSplat(Value *V, unsigned NumElements) { |
| 2740 | V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2741 | LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2742 | return V; |
| 2743 | } |
| 2744 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2745 | bool visitMemSetInst(MemSetInst &II) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2746 | LLVM_DEBUG(dbgs() << " original: " << II << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2747 | assert(II.getRawDest() == OldPtr); |
| 2748 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2749 | AAMDNodes AATags; |
| 2750 | II.getAAMetadata(AATags); |
| 2751 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2752 | // If the memset has a variable size, it cannot be split, just adjust the |
| 2753 | // pointer to the new alloca. |
| 2754 | if (!isa<Constant>(II.getLength())) { |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2755 | assert(!IsSplit); |
Chandler Carruth | 735d5be | 2014-02-26 04:45:24 +0000 | [diff] [blame] | 2756 | assert(NewBeginOffset == BeginOffset); |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2757 | II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2758 | II.setDestAlignment(getSliceAlign()); |
Chandler Carruth | 208124f | 2012-09-26 10:59:22 +0000 | [diff] [blame] | 2759 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2760 | deleteIfTriviallyDead(OldPtr); |
| 2761 | return false; |
| 2762 | } |
| 2763 | |
| 2764 | // Record this instruction for deletion. |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2765 | Pass.DeadInsts.insert(&II); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2766 | |
| 2767 | Type *AllocaTy = NewAI.getAllocatedType(); |
| 2768 | Type *ScalarTy = AllocaTy->getScalarType(); |
Philip Reames | 9b6b4fa | 2019-03-12 20:15:05 +0000 | [diff] [blame] | 2769 | |
| 2770 | const bool CanContinue = [&]() { |
| 2771 | if (VecTy || IntTy) |
| 2772 | return true; |
| 2773 | if (BeginOffset > NewAllocaBeginOffset || |
| 2774 | EndOffset < NewAllocaEndOffset) |
| 2775 | return false; |
| 2776 | auto *C = cast<ConstantInt>(II.getLength()); |
| 2777 | if (C->getBitWidth() > 64) |
| 2778 | return false; |
| 2779 | const auto Len = C->getZExtValue(); |
| 2780 | auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); |
| 2781 | auto *SrcTy = VectorType::get(Int8Ty, Len); |
| 2782 | return canConvertValue(DL, SrcTy, AllocaTy) && |
| 2783 | DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)); |
| 2784 | }(); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2785 | |
| 2786 | // If this doesn't map cleanly onto the alloca type, and that type isn't |
| 2787 | // a single value type, just emit a memset. |
Philip Reames | 9b6b4fa | 2019-03-12 20:15:05 +0000 | [diff] [blame] | 2788 | if (!CanContinue) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2789 | Type *SizeTy = II.getLength()->getType(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2790 | Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); |
| 2791 | CallInst *New = IRB.CreateMemSet( |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2792 | getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, |
| 2793 | getSliceAlign(), II.isVolatile()); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2794 | if (AATags) |
| 2795 | New->setAAMetadata(AATags); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2796 | LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2797 | return false; |
| 2798 | } |
| 2799 | |
| 2800 | // If we can represent this as a simple value, we have to build the actual |
| 2801 | // value to store, which requires expanding the byte present in memset to |
| 2802 | // a sensible representation for the alloca type. This is essentially |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2803 | // splatting the byte to a sufficiently wide integer, splatting it across |
| 2804 | // any desired vector width, and bitcasting to the final type. |
Benjamin Kramer | c003a45 | 2013-01-01 16:13:35 +0000 | [diff] [blame] | 2805 | Value *V; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2806 | |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2807 | if (VecTy) { |
| 2808 | // If this is a memset of a vectorized alloca, insert it. |
| 2809 | assert(ElementTy == ScalarTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2810 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2811 | unsigned BeginIndex = getIndex(NewBeginOffset); |
| 2812 | unsigned EndIndex = getIndex(NewEndOffset); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2813 | assert(EndIndex > BeginIndex && "Empty vector!"); |
| 2814 | unsigned NumElements = EndIndex - BeginIndex; |
| 2815 | assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); |
| 2816 | |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2817 | Value *Splat = |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2818 | getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); |
| 2819 | Splat = convertValue(DL, IRB, Splat, ElementTy); |
Chandler Carruth | cacda25 | 2012-12-17 14:03:01 +0000 | [diff] [blame] | 2820 | if (NumElements > 1) |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2821 | Splat = getVectorSplat(Splat, NumElements); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2822 | |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2823 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2824 | NewAI.getAlignment(), "oldload"); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2825 | V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2826 | } else if (IntTy) { |
| 2827 | // If this is a memset on an alloca where we can widen stores, insert the |
| 2828 | // set integer. |
Chandler Carruth | 9d966a2 | 2012-10-15 10:24:40 +0000 | [diff] [blame] | 2829 | assert(!II.isVolatile()); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2830 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2831 | uint64_t Size = NewEndOffset - NewBeginOffset; |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2832 | V = getIntegerSplat(II.getValue(), Size); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2833 | |
| 2834 | if (IntTy && (BeginOffset != NewAllocaBeginOffset || |
| 2835 | EndOffset != NewAllocaBeginOffset)) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2836 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 2837 | NewAI.getAlignment(), "oldload"); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2838 | Old = convertValue(DL, IRB, Old, IntTy); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2839 | uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2840 | V = insertInteger(DL, IRB, Old, V, Offset, "insert"); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2841 | } else { |
| 2842 | assert(V->getType() == IntTy && |
| 2843 | "Wrong type for an alloca wide integer!"); |
| 2844 | } |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2845 | V = convertValue(DL, IRB, V, AllocaTy); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2846 | } else { |
| 2847 | // Established these invariants above. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2848 | assert(NewBeginOffset == NewAllocaBeginOffset); |
| 2849 | assert(NewEndOffset == NewAllocaEndOffset); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2850 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2851 | V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); |
Chandler Carruth | ccca504 | 2012-12-17 04:07:37 +0000 | [diff] [blame] | 2852 | if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 2853 | V = getVectorSplat(V, AllocaVecTy->getNumElements()); |
Chandler Carruth | 95e1fb8 | 2012-12-17 13:51:03 +0000 | [diff] [blame] | 2854 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 2855 | V = convertValue(DL, IRB, V, AllocaTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2856 | } |
| 2857 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2858 | StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), |
| 2859 | II.isVolatile()); |
| 2860 | if (AATags) |
| 2861 | New->setAAMetadata(AATags); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2862 | LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2863 | return !II.isVolatile(); |
| 2864 | } |
| 2865 | |
| 2866 | bool visitMemTransferInst(MemTransferInst &II) { |
| 2867 | // Rewriting of memory transfer instructions can be a bit tricky. We break |
| 2868 | // them into two categories: split intrinsics and unsplit intrinsics. |
| 2869 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2870 | LLVM_DEBUG(dbgs() << " original: " << II << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2871 | |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2872 | AAMDNodes AATags; |
| 2873 | II.getAAMetadata(AATags); |
| 2874 | |
Chandler Carruth | bb2a932 | 2014-02-25 03:50:14 +0000 | [diff] [blame] | 2875 | bool IsDest = &II.getRawDestUse() == OldUse; |
Alexey Samsonov | 26af6f7 | 2014-02-25 07:56:00 +0000 | [diff] [blame] | 2876 | assert((IsDest && II.getRawDest() == OldPtr) || |
Chandler Carruth | bb2a932 | 2014-02-25 03:50:14 +0000 | [diff] [blame] | 2877 | (!IsDest && II.getRawSource() == OldPtr)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2878 | |
Chandler Carruth | aa72b93 | 2014-02-26 07:29:54 +0000 | [diff] [blame] | 2879 | unsigned SliceAlign = getSliceAlign(); |
Chandler Carruth | 176ca71 | 2012-10-01 12:16:54 +0000 | [diff] [blame] | 2880 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2881 | // For unsplit intrinsics, we simply modify the source and destination |
| 2882 | // pointers in place. This isn't just an optimization, it is a matter of |
| 2883 | // correctness. With unsplit intrinsics we may be dealing with transfers |
| 2884 | // within a single alloca before SROA ran, or with transfers that have |
| 2885 | // a variable length. We may also be dealing with memmove instead of |
| 2886 | // memcpy, and so simply updating the pointers is the necessary for us to |
| 2887 | // update both source and dest of a single call. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2888 | if (!IsSplittable) { |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2889 | Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2890 | if (IsDest) { |
Chandler Carruth | 8183a50 | 2014-02-25 11:08:02 +0000 | [diff] [blame] | 2891 | II.setDest(AdjustedPtr); |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2892 | II.setDestAlignment(SliceAlign); |
| 2893 | } |
| 2894 | else { |
Chandler Carruth | 8183a50 | 2014-02-25 11:08:02 +0000 | [diff] [blame] | 2895 | II.setSource(AdjustedPtr); |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2896 | II.setSourceAlignment(SliceAlign); |
Chandler Carruth | 181ed05 | 2014-02-26 05:33:36 +0000 | [diff] [blame] | 2897 | } |
Chandler Carruth | 208124f | 2012-09-26 10:59:22 +0000 | [diff] [blame] | 2898 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2899 | LLVM_DEBUG(dbgs() << " to: " << II << "\n"); |
Chandler Carruth | 8183a50 | 2014-02-25 11:08:02 +0000 | [diff] [blame] | 2900 | deleteIfTriviallyDead(OldPtr); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2901 | return false; |
| 2902 | } |
| 2903 | // For split transfer intrinsics we have an incredibly useful assurance: |
| 2904 | // the source and destination do not reside within the same alloca, and at |
| 2905 | // least one of them does not escape. This means that we can replace |
| 2906 | // memmove with memcpy, and we don't need to worry about all manner of |
| 2907 | // downsides to splitting and transforming the operations. |
| 2908 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2909 | // If this doesn't map cleanly onto the alloca type, and that type isn't |
| 2910 | // a single value type, just emit a memcpy. |
Reid Kleckner | c36f48f | 2014-08-22 00:09:56 +0000 | [diff] [blame] | 2911 | bool EmitMemCpy = |
| 2912 | !VecTy && !IntTy && |
| 2913 | (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || |
| 2914 | SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || |
| 2915 | !NewAI.getAllocatedType()->isSingleValueType()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2916 | |
| 2917 | // If we're just going to emit a memcpy, the alloca hasn't changed, and the |
| 2918 | // size hasn't been shrunk based on analysis of the viable range, this is |
| 2919 | // a no-op. |
| 2920 | if (EmitMemCpy && &OldAI == &NewAI) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2921 | // Ensure the start lines up. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2922 | assert(NewBeginOffset == BeginOffset); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2923 | |
| 2924 | // Rewrite the size as needed. |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2925 | if (NewEndOffset != EndOffset) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2926 | II.setLength(ConstantInt::get(II.getLength()->getType(), |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2927 | NewEndOffset - NewBeginOffset)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2928 | return false; |
| 2929 | } |
| 2930 | // Record this instruction for deletion. |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 2931 | Pass.DeadInsts.insert(&II); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2932 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2933 | // Strip all inbounds GEPs and pointer casts to try to dig out any root |
| 2934 | // alloca that should be re-examined after rewriting this instruction. |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2935 | Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2936 | if (AllocaInst *AI = |
| 2937 | dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 2938 | assert(AI != &OldAI && AI != &NewAI && |
| 2939 | "Splittable transfers cannot reach the same alloca on both ends."); |
Chandler Carruth | 4bd8f66 | 2012-09-26 07:41:40 +0000 | [diff] [blame] | 2940 | Pass.Worklist.insert(AI); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 2941 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2942 | |
Chandler Carruth | 286d87e | 2014-02-26 08:25:02 +0000 | [diff] [blame] | 2943 | Type *OtherPtrTy = OtherPtr->getType(); |
| 2944 | unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); |
| 2945 | |
Chandler Carruth | 181ed05 | 2014-02-26 05:33:36 +0000 | [diff] [blame] | 2946 | // Compute the relative offset for the other pointer within the transfer. |
Nicola Zaghen | f96383c | 2018-10-30 11:15:04 +0000 | [diff] [blame] | 2947 | unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); |
| 2948 | APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2949 | unsigned OtherAlign = |
| 2950 | IsDest ? II.getSourceAlignment() : II.getDestAlignment(); |
| 2951 | OtherAlign = MinAlign(OtherAlign ? OtherAlign : 1, |
| 2952 | OtherOffset.zextOrTrunc(64).getZExtValue()); |
Chandler Carruth | 181ed05 | 2014-02-26 05:33:36 +0000 | [diff] [blame] | 2953 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2954 | if (EmitMemCpy) { |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2955 | // Compute the other pointer, folding as much as possible to produce |
| 2956 | // a single, simple GEP in most cases. |
Chandler Carruth | 181ed05 | 2014-02-26 05:33:36 +0000 | [diff] [blame] | 2957 | OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 2958 | OtherPtr->getName() + "."); |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2959 | |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 2960 | Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2961 | Type *SizeTy = II.getLength()->getType(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2962 | Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2963 | |
Daniel Neilson | 41e781d | 2018-03-13 14:25:33 +0000 | [diff] [blame] | 2964 | Value *DestPtr, *SrcPtr; |
| 2965 | unsigned DestAlign, SrcAlign; |
| 2966 | // Note: IsDest is true iff we're copying into the new alloca slice |
| 2967 | if (IsDest) { |
| 2968 | DestPtr = OurPtr; |
| 2969 | DestAlign = SliceAlign; |
| 2970 | SrcPtr = OtherPtr; |
| 2971 | SrcAlign = OtherAlign; |
| 2972 | } else { |
| 2973 | DestPtr = OtherPtr; |
| 2974 | DestAlign = OtherAlign; |
| 2975 | SrcPtr = OurPtr; |
| 2976 | SrcAlign = SliceAlign; |
| 2977 | } |
| 2978 | CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, |
| 2979 | Size, II.isVolatile()); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 2980 | if (AATags) |
| 2981 | New->setAAMetadata(AATags); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2982 | LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 2983 | return false; |
| 2984 | } |
| 2985 | |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 2986 | bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && |
| 2987 | NewEndOffset == NewAllocaEndOffset; |
| 2988 | uint64_t Size = NewEndOffset - NewBeginOffset; |
| 2989 | unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; |
| 2990 | unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2991 | unsigned NumElements = EndIndex - BeginIndex; |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 2992 | IntegerType *SubIntTy = |
| 2993 | IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2994 | |
Chandler Carruth | 286d87e | 2014-02-26 08:25:02 +0000 | [diff] [blame] | 2995 | // Reset the other pointer type to match the register type we're going to |
| 2996 | // use, but using the address space of the original other pointer. |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 2997 | Type *OtherTy; |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 2998 | if (VecTy && !IsWholeAlloca) { |
| 2999 | if (NumElements == 1) |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3000 | OtherTy = VecTy->getElementType(); |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3001 | else |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3002 | OtherTy = VectorType::get(VecTy->getElementType(), NumElements); |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3003 | } else if (IntTy && !IsWholeAlloca) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3004 | OtherTy = SubIntTy; |
Chandler Carruth | 286d87e | 2014-02-26 08:25:02 +0000 | [diff] [blame] | 3005 | } else { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3006 | OtherTy = NewAllocaTy; |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3007 | } |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3008 | OtherPtrTy = OtherTy->getPointerTo(OtherAS); |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3009 | |
Chandler Carruth | 181ed05 | 2014-02-26 05:33:36 +0000 | [diff] [blame] | 3010 | Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, |
Chandler Carruth | cb93cd2 | 2014-02-25 11:19:56 +0000 | [diff] [blame] | 3011 | OtherPtr->getName() + "."); |
Pete Cooper | 67cf9a7 | 2015-11-19 05:56:52 +0000 | [diff] [blame] | 3012 | unsigned SrcAlign = OtherAlign; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3013 | Value *DstPtr = &NewAI; |
Chandler Carruth | aa72b93 | 2014-02-26 07:29:54 +0000 | [diff] [blame] | 3014 | unsigned DstAlign = SliceAlign; |
| 3015 | if (!IsDest) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3016 | std::swap(SrcPtr, DstPtr); |
Chandler Carruth | aa72b93 | 2014-02-26 07:29:54 +0000 | [diff] [blame] | 3017 | std::swap(SrcAlign, DstAlign); |
| 3018 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3019 | |
| 3020 | Value *Src; |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3021 | if (VecTy && !IsWholeAlloca && !IsDest) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3022 | Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 3023 | NewAI.getAlignment(), "load"); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 3024 | Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); |
Chandler Carruth | 49c8eea | 2012-10-15 10:24:43 +0000 | [diff] [blame] | 3025 | } else if (IntTy && !IsWholeAlloca && !IsDest) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3026 | Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 3027 | NewAI.getAlignment(), "load"); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3028 | Src = convertValue(DL, IRB, Src, IntTy); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3029 | uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3030 | Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3031 | } else { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3032 | LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, |
| 3033 | II.isVolatile(), "copyload"); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3034 | if (AATags) |
| 3035 | Load->setAAMetadata(AATags); |
| 3036 | Src = Load; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3037 | } |
| 3038 | |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3039 | if (VecTy && !IsWholeAlloca && IsDest) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3040 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 3041 | NewAI.getAlignment(), "oldload"); |
Chandler Carruth | 34f0c7f | 2013-03-21 09:52:18 +0000 | [diff] [blame] | 3042 | Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); |
Chandler Carruth | 21eb4e9 | 2012-12-17 14:51:24 +0000 | [diff] [blame] | 3043 | } else if (IntTy && !IsWholeAlloca && IsDest) { |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3044 | Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, |
| 3045 | NewAI.getAlignment(), "oldload"); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3046 | Old = convertValue(DL, IRB, Old, IntTy); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3047 | uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3048 | Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); |
| 3049 | Src = convertValue(DL, IRB, Src, NewAllocaTy); |
Chandler Carruth | 49c8eea | 2012-10-15 10:24:43 +0000 | [diff] [blame] | 3050 | } |
| 3051 | |
Chandler Carruth | 871ba72 | 2012-09-26 10:27:46 +0000 | [diff] [blame] | 3052 | StoreInst *Store = cast<StoreInst>( |
Chandler Carruth | aa72b93 | 2014-02-26 07:29:54 +0000 | [diff] [blame] | 3053 | IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3054 | if (AATags) |
| 3055 | Store->setAAMetadata(AATags); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3056 | LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3057 | return !II.isVolatile(); |
| 3058 | } |
| 3059 | |
| 3060 | bool visitIntrinsicInst(IntrinsicInst &II) { |
Vedant Kumar | b264d69 | 2018-12-21 21:49:40 +0000 | [diff] [blame] | 3061 | assert(II.isLifetimeStartOrEnd()); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3062 | LLVM_DEBUG(dbgs() << " original: " << II << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3063 | assert(II.getArgOperand(1) == OldPtr); |
| 3064 | |
| 3065 | // Record this instruction for deletion. |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 3066 | Pass.DeadInsts.insert(&II); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3067 | |
Eli Friedman | 5096775 | 2016-11-28 21:50:34 +0000 | [diff] [blame] | 3068 | // Lifetime intrinsics are only promotable if they cover the whole alloca. |
| 3069 | // Therefore, we drop lifetime intrinsics which don't cover the whole |
| 3070 | // alloca. |
| 3071 | // (In theory, intrinsics which partially cover an alloca could be |
| 3072 | // promoted, but PromoteMemToReg doesn't handle that case.) |
| 3073 | // FIXME: Check whether the alloca is promotable before dropping the |
| 3074 | // lifetime intrinsics? |
| 3075 | if (NewBeginOffset != NewAllocaBeginOffset || |
| 3076 | NewEndOffset != NewAllocaEndOffset) |
| 3077 | return true; |
| 3078 | |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 3079 | ConstantInt *Size = |
| 3080 | ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3081 | NewEndOffset - NewBeginOffset); |
Gabor Buella | 3ec170c | 2019-01-16 12:06:17 +0000 | [diff] [blame] | 3082 | // Lifetime intrinsics always expect an i8* so directly get such a pointer |
| 3083 | // for the new alloca slice. |
| 3084 | Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace()); |
| 3085 | Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3086 | Value *New; |
| 3087 | if (II.getIntrinsicID() == Intrinsic::lifetime_start) |
| 3088 | New = IRB.CreateLifetimeStart(Ptr, Size); |
| 3089 | else |
| 3090 | New = IRB.CreateLifetimeEnd(Ptr, Size); |
| 3091 | |
Edwin Vane | 82f80d4 | 2013-01-29 17:42:24 +0000 | [diff] [blame] | 3092 | (void)New; |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3093 | LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); |
Eli Friedman | 2a65dd1 | 2016-08-08 01:30:53 +0000 | [diff] [blame] | 3094 | |
Eli Friedman | 5096775 | 2016-11-28 21:50:34 +0000 | [diff] [blame] | 3095 | return true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3096 | } |
| 3097 | |
Eli Friedman | 94d3e4d | 2018-08-30 18:59:24 +0000 | [diff] [blame] | 3098 | void fixLoadStoreAlign(Instruction &Root) { |
| 3099 | // This algorithm implements the same visitor loop as |
| 3100 | // hasUnsafePHIOrSelectUse, and fixes the alignment of each load |
| 3101 | // or store found. |
| 3102 | SmallPtrSet<Instruction *, 4> Visited; |
| 3103 | SmallVector<Instruction *, 4> Uses; |
| 3104 | Visited.insert(&Root); |
| 3105 | Uses.push_back(&Root); |
| 3106 | do { |
| 3107 | Instruction *I = Uses.pop_back_val(); |
| 3108 | |
| 3109 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { |
| 3110 | unsigned LoadAlign = LI->getAlignment(); |
| 3111 | if (!LoadAlign) |
| 3112 | LoadAlign = DL.getABITypeAlignment(LI->getType()); |
| 3113 | LI->setAlignment(std::min(LoadAlign, getSliceAlign())); |
| 3114 | continue; |
| 3115 | } |
| 3116 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { |
| 3117 | unsigned StoreAlign = SI->getAlignment(); |
| 3118 | if (!StoreAlign) { |
| 3119 | Value *Op = SI->getOperand(0); |
| 3120 | StoreAlign = DL.getABITypeAlignment(Op->getType()); |
| 3121 | } |
| 3122 | SI->setAlignment(std::min(StoreAlign, getSliceAlign())); |
| 3123 | continue; |
| 3124 | } |
| 3125 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 3126 | assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || |
| 3127 | isa<PHINode>(I) || isa<SelectInst>(I) || |
| 3128 | isa<GetElementPtrInst>(I)); |
Eli Friedman | 94d3e4d | 2018-08-30 18:59:24 +0000 | [diff] [blame] | 3129 | for (User *U : I->users()) |
| 3130 | if (Visited.insert(cast<Instruction>(U)).second) |
| 3131 | Uses.push_back(cast<Instruction>(U)); |
| 3132 | } while (!Uses.empty()); |
| 3133 | } |
| 3134 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3135 | bool visitPHINode(PHINode &PN) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3136 | LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3137 | assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); |
| 3138 | assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); |
Chandler Carruth | 82a5754 | 2012-10-01 10:54:05 +0000 | [diff] [blame] | 3139 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3140 | // We would like to compute a new pointer in only one place, but have it be |
| 3141 | // as local as possible to the PHI. To do that, we re-use the location of |
| 3142 | // the old pointer, which necessarily must be in the right position to |
| 3143 | // dominate the PHI. |
Chandler Carruth | 5117553 | 2014-02-25 11:12:04 +0000 | [diff] [blame] | 3144 | IRBuilderTy PtrBuilder(IRB); |
David Majnemer | d4cffcf | 2014-09-01 21:20:14 +0000 | [diff] [blame] | 3145 | if (isa<PHINode>(OldPtr)) |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 3146 | PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); |
David Majnemer | d4cffcf | 2014-09-01 21:20:14 +0000 | [diff] [blame] | 3147 | else |
| 3148 | PtrBuilder.SetInsertPoint(OldPtr); |
Chandler Carruth | 5117553 | 2014-02-25 11:12:04 +0000 | [diff] [blame] | 3149 | PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3150 | |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 3151 | Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); |
Chandler Carruth | 82a5754 | 2012-10-01 10:54:05 +0000 | [diff] [blame] | 3152 | // Replace the operands which were using the old pointer. |
Benjamin Kramer | 7ddd705 | 2012-10-20 12:04:57 +0000 | [diff] [blame] | 3153 | std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3154 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3155 | LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); |
Chandler Carruth | 82a5754 | 2012-10-01 10:54:05 +0000 | [diff] [blame] | 3156 | deleteIfTriviallyDead(OldPtr); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3157 | |
Eli Friedman | 94d3e4d | 2018-08-30 18:59:24 +0000 | [diff] [blame] | 3158 | // Fix the alignment of any loads or stores using this PHI node. |
| 3159 | fixLoadStoreAlign(PN); |
| 3160 | |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 3161 | // PHIs can't be promoted on their own, but often can be speculated. We |
| 3162 | // check the speculation outside of the rewriter so that we see the |
| 3163 | // fully-rewritten alloca. |
| 3164 | PHIUsers.insert(&PN); |
| 3165 | return true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3166 | } |
| 3167 | |
| 3168 | bool visitSelectInst(SelectInst &SI) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3169 | LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); |
Benjamin Kramer | 0212dc2 | 2013-04-21 17:48:39 +0000 | [diff] [blame] | 3170 | assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && |
| 3171 | "Pointer isn't an operand!"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3172 | assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); |
| 3173 | assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); |
Chandler Carruth | 82a5754 | 2012-10-01 10:54:05 +0000 | [diff] [blame] | 3174 | |
Chandler Carruth | 47954c8 | 2014-02-26 05:12:43 +0000 | [diff] [blame] | 3175 | Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); |
Benjamin Kramer | 0212dc2 | 2013-04-21 17:48:39 +0000 | [diff] [blame] | 3176 | // Replace the operands which were using the old pointer. |
| 3177 | if (SI.getOperand(1) == OldPtr) |
| 3178 | SI.setOperand(1, NewPtr); |
| 3179 | if (SI.getOperand(2) == OldPtr) |
| 3180 | SI.setOperand(2, NewPtr); |
| 3181 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3182 | LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3183 | deleteIfTriviallyDead(OldPtr); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 3184 | |
Eli Friedman | 94d3e4d | 2018-08-30 18:59:24 +0000 | [diff] [blame] | 3185 | // Fix the alignment of any loads or stores using this select. |
| 3186 | fixLoadStoreAlign(SI); |
| 3187 | |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 3188 | // Selects can't be promoted on their own, but often can be speculated. We |
| 3189 | // check the speculation outside of the rewriter so that we see the |
| 3190 | // fully-rewritten alloca. |
| 3191 | SelectUsers.insert(&SI); |
| 3192 | return true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3193 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3194 | }; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3195 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3196 | namespace { |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3197 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3198 | /// Visitor to rewrite aggregate loads and stores as scalar. |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3199 | /// |
| 3200 | /// This pass aggressively rewrites all aggregate loads and stores on |
| 3201 | /// a particular pointer (or any pointer derived from it which we can identify) |
| 3202 | /// with scalar loads and stores. |
| 3203 | class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { |
| 3204 | // Befriend the base class so it can delegate to private visit methods. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3205 | friend class InstVisitor<AggLoadStoreRewriter, bool>; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3206 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3207 | /// Queue of pointer uses to analyze and potentially rewrite. |
| 3208 | SmallVector<Use *, 8> Queue; |
| 3209 | |
| 3210 | /// Set to prevent us from cycling with phi nodes and loops. |
| 3211 | SmallPtrSet<User *, 8> Visited; |
| 3212 | |
| 3213 | /// The current pointer use being rewritten. This is used to dig up the used |
| 3214 | /// value (as opposed to the user). |
| 3215 | Use *U; |
| 3216 | |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3217 | /// Used to calculate offsets, and hence alignment, of subobjects. |
| 3218 | const DataLayout &DL; |
| 3219 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3220 | public: |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3221 | AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} |
| 3222 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3223 | /// Rewrite loads and stores through a pointer and all pointers derived from |
| 3224 | /// it. |
| 3225 | bool rewrite(Instruction &I) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3226 | LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3227 | enqueueUsers(I); |
| 3228 | bool Changed = false; |
| 3229 | while (!Queue.empty()) { |
| 3230 | U = Queue.pop_back_val(); |
| 3231 | Changed |= visit(cast<Instruction>(U->getUser())); |
| 3232 | } |
| 3233 | return Changed; |
| 3234 | } |
| 3235 | |
| 3236 | private: |
| 3237 | /// Enqueue all the users of the given instruction for further processing. |
| 3238 | /// This uses a set to de-duplicate users. |
| 3239 | void enqueueUsers(Instruction &I) { |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 3240 | for (Use &U : I.uses()) |
David Blaikie | 70573dc | 2014-11-19 07:49:26 +0000 | [diff] [blame] | 3241 | if (Visited.insert(U.getUser()).second) |
Chandler Carruth | cdf4788 | 2014-03-09 03:16:01 +0000 | [diff] [blame] | 3242 | Queue.push_back(&U); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3243 | } |
| 3244 | |
| 3245 | // Conservative default is to not rewrite anything. |
| 3246 | bool visitInstruction(Instruction &I) { return false; } |
| 3247 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3248 | /// Generic recursive split emission class. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 3249 | template <typename Derived> class OpSplitter { |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3250 | protected: |
| 3251 | /// The builder used to form new instructions. |
Chandler Carruth | d177f86 | 2013-03-20 07:30:36 +0000 | [diff] [blame] | 3252 | IRBuilderTy IRB; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3253 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3254 | /// The indices which to be used with insert- or extractvalue to select the |
| 3255 | /// appropriate value within the aggregate. |
| 3256 | SmallVector<unsigned, 4> Indices; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3257 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3258 | /// The indices to a GEP instruction which will move Ptr to the correct slot |
| 3259 | /// within the aggregate. |
| 3260 | SmallVector<Value *, 4> GEPIndices; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3261 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3262 | /// The base pointer of the original op, used as a base for GEPing the |
| 3263 | /// split operations. |
| 3264 | Value *Ptr; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3265 | |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3266 | /// The base pointee type being GEPed into. |
| 3267 | Type *BaseTy; |
| 3268 | |
| 3269 | /// Known alignment of the base pointer. |
| 3270 | unsigned BaseAlign; |
| 3271 | |
| 3272 | /// To calculate offset of each component so we can correctly deduce |
| 3273 | /// alignments. |
| 3274 | const DataLayout &DL; |
| 3275 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3276 | /// Initialize the splitter with an insertion point, Ptr and start with a |
| 3277 | /// single zero GEP index. |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3278 | OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, |
| 3279 | unsigned BaseAlign, const DataLayout &DL) |
| 3280 | : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), |
| 3281 | BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {} |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3282 | |
| 3283 | public: |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3284 | /// Generic recursive split emission routine. |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3285 | /// |
| 3286 | /// This method recursively splits an aggregate op (load or store) into |
| 3287 | /// scalar or vector ops. It splits recursively until it hits a single value |
| 3288 | /// and emits that single value operation via the template argument. |
| 3289 | /// |
| 3290 | /// The logic of this routine relies on GEPs and insertvalue and |
| 3291 | /// extractvalue all operating with the same fundamental index list, merely |
| 3292 | /// formatted differently (GEPs need actual values). |
| 3293 | /// |
| 3294 | /// \param Ty The type being split recursively into smaller ops. |
| 3295 | /// \param Agg The aggregate value being built up or stored, depending on |
| 3296 | /// whether this is splitting a load or a store respectively. |
| 3297 | void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3298 | if (Ty->isSingleValueType()) { |
| 3299 | unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); |
| 3300 | return static_cast<Derived *>(this)->emitFunc( |
| 3301 | Ty, Agg, MinAlign(BaseAlign, Offset), Name); |
| 3302 | } |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3303 | |
| 3304 | if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { |
| 3305 | unsigned OldSize = Indices.size(); |
| 3306 | (void)OldSize; |
| 3307 | for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; |
| 3308 | ++Idx) { |
| 3309 | assert(Indices.size() == OldSize && "Did not return to the old size"); |
| 3310 | Indices.push_back(Idx); |
| 3311 | GEPIndices.push_back(IRB.getInt32(Idx)); |
| 3312 | emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); |
| 3313 | GEPIndices.pop_back(); |
| 3314 | Indices.pop_back(); |
| 3315 | } |
| 3316 | return; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3317 | } |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3318 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3319 | if (StructType *STy = dyn_cast<StructType>(Ty)) { |
| 3320 | unsigned OldSize = Indices.size(); |
| 3321 | (void)OldSize; |
| 3322 | for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; |
| 3323 | ++Idx) { |
| 3324 | assert(Indices.size() == OldSize && "Did not return to the old size"); |
| 3325 | Indices.push_back(Idx); |
| 3326 | GEPIndices.push_back(IRB.getInt32(Idx)); |
| 3327 | emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); |
| 3328 | GEPIndices.pop_back(); |
| 3329 | Indices.pop_back(); |
| 3330 | } |
| 3331 | return; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3332 | } |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3333 | |
| 3334 | llvm_unreachable("Only arrays and structs are aggregate loadable types"); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3335 | } |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3336 | }; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3337 | |
Benjamin Kramer | 73a9e4a | 2012-09-18 17:06:32 +0000 | [diff] [blame] | 3338 | struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3339 | AAMDNodes AATags; |
| 3340 | |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3341 | LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, |
| 3342 | AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL) |
| 3343 | : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, |
| 3344 | DL), AATags(AATags) {} |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3345 | |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3346 | /// Emit a leaf load of a single value. This is called at the leaves of the |
| 3347 | /// recursive emission to actually load values. |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3348 | void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) { |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3349 | assert(Ty->isSingleValueType()); |
| 3350 | // Load the single value and insert it using the indices. |
David Blaikie | aa41cd5 | 2015-04-03 21:33:42 +0000 | [diff] [blame] | 3351 | Value *GEP = |
James Y Knight | 7716075 | 2019-02-01 20:44:47 +0000 | [diff] [blame] | 3352 | IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3353 | LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load"); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3354 | if (AATags) |
| 3355 | Load->setAAMetadata(AATags); |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3356 | Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3357 | LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3358 | } |
| 3359 | }; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3360 | |
| 3361 | bool visitLoadInst(LoadInst &LI) { |
| 3362 | assert(LI.getPointerOperand() == *U); |
| 3363 | if (!LI.isSimple() || LI.getType()->isSingleValueType()) |
| 3364 | return false; |
| 3365 | |
| 3366 | // We have an aggregate being loaded, split it apart. |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3367 | LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3368 | AAMDNodes AATags; |
| 3369 | LI.getAAMetadata(AATags); |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3370 | LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags, |
| 3371 | getAdjustedAlignment(&LI, 0, DL), DL); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3372 | Value *V = UndefValue::get(LI.getType()); |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3373 | Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3374 | LI.replaceAllUsesWith(V); |
| 3375 | LI.eraseFromParent(); |
| 3376 | return true; |
| 3377 | } |
| 3378 | |
Benjamin Kramer | 73a9e4a | 2012-09-18 17:06:32 +0000 | [diff] [blame] | 3379 | struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3380 | StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, |
| 3381 | AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL) |
| 3382 | : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, |
| 3383 | DL), |
| 3384 | AATags(AATags) {} |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3385 | AAMDNodes AATags; |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3386 | /// Emit a leaf store of a single value. This is called at the leaves of the |
| 3387 | /// recursive emission to actually produce stores. |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3388 | void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) { |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3389 | assert(Ty->isSingleValueType()); |
| 3390 | // Extract the single value and store it using the indices. |
Patrik Hagglund | a83706e | 2016-06-20 10:19:00 +0000 | [diff] [blame] | 3391 | // |
| 3392 | // The gep and extractvalue values are factored out of the CreateStore |
| 3393 | // call to make the output independent of the argument evaluation order. |
Patrik Hagglund | 4e0bd84 | 2016-06-20 11:19:58 +0000 | [diff] [blame] | 3394 | Value *ExtractValue = |
| 3395 | IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); |
| 3396 | Value *InBoundsGEP = |
James Y Knight | 7716075 | 2019-02-01 20:44:47 +0000 | [diff] [blame] | 3397 | IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3398 | StoreInst *Store = |
| 3399 | IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Align); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3400 | if (AATags) |
| 3401 | Store->setAAMetadata(AATags); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3402 | LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3403 | } |
| 3404 | }; |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3405 | |
| 3406 | bool visitStoreInst(StoreInst &SI) { |
| 3407 | if (!SI.isSimple() || SI.getPointerOperand() != *U) |
| 3408 | return false; |
| 3409 | Value *V = SI.getValueOperand(); |
| 3410 | if (V->getType()->isSingleValueType()) |
| 3411 | return false; |
| 3412 | |
| 3413 | // We have an aggregate being stored, split it apart. |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3414 | LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); |
Ivan A. Kosarev | 53270d0 | 2018-02-16 10:10:29 +0000 | [diff] [blame] | 3415 | AAMDNodes AATags; |
| 3416 | SI.getAAMetadata(AATags); |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 3417 | StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags, |
| 3418 | getAdjustedAlignment(&SI, 0, DL), DL); |
Benjamin Kramer | 65f8c88 | 2012-09-18 16:20:46 +0000 | [diff] [blame] | 3419 | Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3420 | SI.eraseFromParent(); |
| 3421 | return true; |
| 3422 | } |
| 3423 | |
| 3424 | bool visitBitCastInst(BitCastInst &BC) { |
| 3425 | enqueueUsers(BC); |
| 3426 | return false; |
| 3427 | } |
| 3428 | |
Matt Arsenault | 282dac7 | 2019-06-14 21:38:31 +0000 | [diff] [blame] | 3429 | bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { |
| 3430 | enqueueUsers(ASC); |
| 3431 | return false; |
| 3432 | } |
| 3433 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3434 | bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { |
| 3435 | enqueueUsers(GEPI); |
| 3436 | return false; |
| 3437 | } |
| 3438 | |
| 3439 | bool visitPHINode(PHINode &PN) { |
| 3440 | enqueueUsers(PN); |
| 3441 | return false; |
| 3442 | } |
| 3443 | |
| 3444 | bool visitSelectInst(SelectInst &SI) { |
| 3445 | enqueueUsers(SI); |
| 3446 | return false; |
| 3447 | } |
| 3448 | }; |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3449 | |
| 3450 | } // end anonymous namespace |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 3451 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3452 | /// Strip aggregate type wrapping. |
Chandler Carruth | ba93199 | 2012-10-13 10:49:33 +0000 | [diff] [blame] | 3453 | /// |
| 3454 | /// This removes no-op aggregate types wrapping an underlying type. It will |
| 3455 | /// strip as many layers of types as it can without changing either the type |
| 3456 | /// size or the allocated size. |
| 3457 | static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { |
| 3458 | if (Ty->isSingleValueType()) |
| 3459 | return Ty; |
| 3460 | |
| 3461 | uint64_t AllocSize = DL.getTypeAllocSize(Ty); |
| 3462 | uint64_t TypeSize = DL.getTypeSizeInBits(Ty); |
| 3463 | |
| 3464 | Type *InnerTy; |
| 3465 | if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { |
| 3466 | InnerTy = ArrTy->getElementType(); |
| 3467 | } else if (StructType *STy = dyn_cast<StructType>(Ty)) { |
| 3468 | const StructLayout *SL = DL.getStructLayout(STy); |
| 3469 | unsigned Index = SL->getElementContainingOffset(0); |
| 3470 | InnerTy = STy->getElementType(Index); |
| 3471 | } else { |
| 3472 | return Ty; |
| 3473 | } |
| 3474 | |
| 3475 | if (AllocSize > DL.getTypeAllocSize(InnerTy) || |
| 3476 | TypeSize > DL.getTypeSizeInBits(InnerTy)) |
| 3477 | return Ty; |
| 3478 | |
| 3479 | return stripAggregateTypeWrapping(DL, InnerTy); |
| 3480 | } |
| 3481 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3482 | /// Try to find a partition of the aggregate type passed in for a given |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3483 | /// offset and size. |
| 3484 | /// |
| 3485 | /// This recurses through the aggregate type and tries to compute a subtype |
| 3486 | /// based on the offset and size. When the offset and size span a sub-section |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3487 | /// of an array, it will even compute a new array type for that sub-section, |
| 3488 | /// and the same for structs. |
| 3489 | /// |
| 3490 | /// Note that this routine is very strict and tries to find a partition of the |
| 3491 | /// type which produces the *exact* right offset and size. It is not forgiving |
| 3492 | /// when the size or offset cause either end of type-based partition to be off. |
| 3493 | /// Also, this is a best-effort routine. It is reasonable to give up and not |
| 3494 | /// return a type if necessary. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 3495 | static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, |
| 3496 | uint64_t Size) { |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3497 | if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) |
| 3498 | return stripAggregateTypeWrapping(DL, Ty); |
| 3499 | if (Offset > DL.getTypeAllocSize(Ty) || |
| 3500 | (DL.getTypeAllocSize(Ty) - Offset) < Size) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3501 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3502 | |
| 3503 | if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3504 | Type *ElementTy = SeqTy->getElementType(); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3505 | uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3506 | uint64_t NumSkippedElements = Offset / ElementSize; |
Peter Collingbourne | bc07052 | 2016-12-02 03:20:58 +0000 | [diff] [blame] | 3507 | if (NumSkippedElements >= SeqTy->getNumElements()) |
| 3508 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3509 | Offset -= NumSkippedElements * ElementSize; |
| 3510 | |
| 3511 | // First check if we need to recurse. |
| 3512 | if (Offset > 0 || Size < ElementSize) { |
| 3513 | // Bail if the partition ends in a different array element. |
| 3514 | if ((Offset + Size) > ElementSize) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3515 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3516 | // Recurse through the element type trying to peel off offset bytes. |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3517 | return getTypePartition(DL, ElementTy, Offset, Size); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3518 | } |
| 3519 | assert(Offset == 0); |
| 3520 | |
| 3521 | if (Size == ElementSize) |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3522 | return stripAggregateTypeWrapping(DL, ElementTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3523 | assert(Size > ElementSize); |
| 3524 | uint64_t NumElements = Size / ElementSize; |
| 3525 | if (NumElements * ElementSize != Size) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3526 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3527 | return ArrayType::get(ElementTy, NumElements); |
| 3528 | } |
| 3529 | |
| 3530 | StructType *STy = dyn_cast<StructType>(Ty); |
| 3531 | if (!STy) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3532 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3533 | |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3534 | const StructLayout *SL = DL.getStructLayout(STy); |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3535 | if (Offset >= SL->getSizeInBytes()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3536 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3537 | uint64_t EndOffset = Offset + Size; |
| 3538 | if (EndOffset > SL->getSizeInBytes()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3539 | return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3540 | |
| 3541 | unsigned Index = SL->getElementContainingOffset(Offset); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3542 | Offset -= SL->getElementOffset(Index); |
| 3543 | |
| 3544 | Type *ElementTy = STy->getElementType(Index); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3545 | uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3546 | if (Offset >= ElementSize) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3547 | return nullptr; // The offset points into alignment padding. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3548 | |
| 3549 | // See if any partition must be contained by the element. |
| 3550 | if (Offset > 0 || Size < ElementSize) { |
| 3551 | if ((Offset + Size) > ElementSize) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3552 | return nullptr; |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3553 | return getTypePartition(DL, ElementTy, Offset, Size); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3554 | } |
| 3555 | assert(Offset == 0); |
| 3556 | |
| 3557 | if (Size == ElementSize) |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3558 | return stripAggregateTypeWrapping(DL, ElementTy); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3559 | |
| 3560 | StructType::element_iterator EI = STy->element_begin() + Index, |
| 3561 | EE = STy->element_end(); |
| 3562 | if (EndOffset < SL->getSizeInBytes()) { |
| 3563 | unsigned EndIndex = SL->getElementContainingOffset(EndOffset); |
| 3564 | if (Index == EndIndex) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3565 | return nullptr; // Within a single element and its padding. |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3566 | |
| 3567 | // Don't try to form "natural" types if the elements don't line up with the |
| 3568 | // expected size. |
| 3569 | // FIXME: We could potentially recurse down through the last element in the |
| 3570 | // sub-struct to find a natural end point. |
| 3571 | if (SL->getElementOffset(EndIndex) != EndOffset) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3572 | return nullptr; |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3573 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3574 | assert(Index < EndIndex); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3575 | EE = STy->element_begin() + EndIndex; |
| 3576 | } |
| 3577 | |
| 3578 | // Try to build up a sub-structure. |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 3579 | StructType *SubTy = |
| 3580 | StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); |
Chandler Carruth | 90a735d | 2013-07-19 07:21:28 +0000 | [diff] [blame] | 3581 | const StructLayout *SubSL = DL.getStructLayout(SubTy); |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3582 | if (Size != SubSL->getSizeInBytes()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 3583 | return nullptr; // The sub-struct doesn't have quite the size needed. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3584 | |
Chandler Carruth | 054a40a | 2012-09-14 11:08:31 +0000 | [diff] [blame] | 3585 | return SubTy; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 3586 | } |
| 3587 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 3588 | /// Pre-split loads and stores to simplify rewriting. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3589 | /// |
| 3590 | /// We want to break up the splittable load+store pairs as much as |
| 3591 | /// possible. This is important to do as a preprocessing step, as once we |
| 3592 | /// start rewriting the accesses to partitions of the alloca we lose the |
| 3593 | /// necessary information to correctly split apart paired loads and stores |
| 3594 | /// which both point into this alloca. The case to consider is something like |
| 3595 | /// the following: |
| 3596 | /// |
| 3597 | /// %a = alloca [12 x i8] |
| 3598 | /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 |
| 3599 | /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 |
| 3600 | /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 |
| 3601 | /// %iptr1 = bitcast i8* %gep1 to i64* |
| 3602 | /// %iptr2 = bitcast i8* %gep2 to i64* |
| 3603 | /// %fptr1 = bitcast i8* %gep1 to float* |
| 3604 | /// %fptr2 = bitcast i8* %gep2 to float* |
| 3605 | /// %fptr3 = bitcast i8* %gep3 to float* |
| 3606 | /// store float 0.0, float* %fptr1 |
| 3607 | /// store float 1.0, float* %fptr2 |
| 3608 | /// %v = load i64* %iptr1 |
| 3609 | /// store i64 %v, i64* %iptr2 |
| 3610 | /// %f1 = load float* %fptr2 |
| 3611 | /// %f2 = load float* %fptr3 |
| 3612 | /// |
| 3613 | /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and |
| 3614 | /// promote everything so we recover the 2 SSA values that should have been |
| 3615 | /// there all along. |
| 3616 | /// |
| 3617 | /// \returns true if any changes are made. |
| 3618 | bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3619 | LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3620 | |
| 3621 | // Track the loads and stores which are candidates for pre-splitting here, in |
| 3622 | // the order they first appear during the partition scan. These give stable |
| 3623 | // iteration order and a basis for tracking which loads and stores we |
| 3624 | // actually split. |
| 3625 | SmallVector<LoadInst *, 4> Loads; |
| 3626 | SmallVector<StoreInst *, 4> Stores; |
| 3627 | |
| 3628 | // We need to accumulate the splits required of each load or store where we |
| 3629 | // can find them via a direct lookup. This is important to cross-check loads |
| 3630 | // and stores against each other. We also track the slice so that we can kill |
| 3631 | // all the slices that end up split. |
| 3632 | struct SplitOffsets { |
| 3633 | Slice *S; |
| 3634 | std::vector<uint64_t> Splits; |
| 3635 | }; |
| 3636 | SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; |
| 3637 | |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3638 | // Track loads out of this alloca which cannot, for any reason, be pre-split. |
| 3639 | // This is important as we also cannot pre-split stores of those loads! |
| 3640 | // FIXME: This is all pretty gross. It means that we can be more aggressive |
| 3641 | // in pre-splitting when the load feeding the store happens to come from |
| 3642 | // a separate alloca. Put another way, the effectiveness of SROA would be |
| 3643 | // decreased by a frontend which just concatenated all of its local allocas |
| 3644 | // into one big flat alloca. But defeating such patterns is exactly the job |
| 3645 | // SROA is tasked with! Sadly, to not have this discrepancy we would have |
| 3646 | // change store pre-splitting to actually force pre-splitting of the load |
| 3647 | // that feeds it *and all stores*. That makes pre-splitting much harder, but |
| 3648 | // maybe it would make it more principled? |
| 3649 | SmallPtrSet<LoadInst *, 8> UnsplittableLoads; |
| 3650 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3651 | LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3652 | for (auto &P : AS.partitions()) { |
| 3653 | for (Slice &S : P) { |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3654 | Instruction *I = cast<Instruction>(S.getUse()->getUser()); |
Chandler Carruth | 37f1f12 | 2016-03-10 15:31:17 +0000 | [diff] [blame] | 3655 | if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { |
| 3656 | // If this is a load we have to track that it can't participate in any |
| 3657 | // pre-splitting. If this is a store of a load we have to track that |
| 3658 | // that load also can't participate in any pre-splitting. |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3659 | if (auto *LI = dyn_cast<LoadInst>(I)) |
| 3660 | UnsplittableLoads.insert(LI); |
Chandler Carruth | 37f1f12 | 2016-03-10 15:31:17 +0000 | [diff] [blame] | 3661 | else if (auto *SI = dyn_cast<StoreInst>(I)) |
| 3662 | if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) |
| 3663 | UnsplittableLoads.insert(LI); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3664 | continue; |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3665 | } |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3666 | assert(P.endOffset() > S.beginOffset() && |
| 3667 | "Empty or backwards partition!"); |
| 3668 | |
| 3669 | // Determine if this is a pre-splittable slice. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3670 | if (auto *LI = dyn_cast<LoadInst>(I)) { |
| 3671 | assert(!LI->isVolatile() && "Cannot split volatile loads!"); |
| 3672 | |
| 3673 | // The load must be used exclusively to store into other pointers for |
| 3674 | // us to be able to arbitrarily pre-split it. The stores must also be |
| 3675 | // simple to avoid changing semantics. |
| 3676 | auto IsLoadSimplyStored = [](LoadInst *LI) { |
| 3677 | for (User *LU : LI->users()) { |
| 3678 | auto *SI = dyn_cast<StoreInst>(LU); |
| 3679 | if (!SI || !SI->isSimple()) |
| 3680 | return false; |
| 3681 | } |
| 3682 | return true; |
| 3683 | }; |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3684 | if (!IsLoadSimplyStored(LI)) { |
| 3685 | UnsplittableLoads.insert(LI); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3686 | continue; |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3687 | } |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3688 | |
| 3689 | Loads.push_back(LI); |
Chandler Carruth | d94a596 | 2016-03-10 14:16:18 +0000 | [diff] [blame] | 3690 | } else if (auto *SI = dyn_cast<StoreInst>(I)) { |
| 3691 | if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) |
| 3692 | // Skip stores *of* pointers. FIXME: This shouldn't even be possible! |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 3693 | continue; |
| 3694 | auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); |
| 3695 | if (!StoredLoad || !StoredLoad->isSimple()) |
| 3696 | continue; |
| 3697 | assert(!SI->isVolatile() && "Cannot split volatile stores!"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3698 | |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 3699 | Stores.push_back(SI); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3700 | } else { |
| 3701 | // Other uses cannot be pre-split. |
| 3702 | continue; |
| 3703 | } |
| 3704 | |
| 3705 | // Record the initial split. |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3706 | LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3707 | auto &Offsets = SplitOffsetsMap[I]; |
| 3708 | assert(Offsets.Splits.empty() && |
| 3709 | "Should not have splits the first time we see an instruction!"); |
| 3710 | Offsets.S = &S; |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 3711 | Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3712 | } |
| 3713 | |
| 3714 | // Now scan the already split slices, and add a split for any of them which |
| 3715 | // we're going to pre-split. |
| 3716 | for (Slice *S : P.splitSliceTails()) { |
| 3717 | auto SplitOffsetsMapI = |
| 3718 | SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); |
| 3719 | if (SplitOffsetsMapI == SplitOffsetsMap.end()) |
| 3720 | continue; |
| 3721 | auto &Offsets = SplitOffsetsMapI->second; |
| 3722 | |
| 3723 | assert(Offsets.S == S && "Found a mismatched slice!"); |
| 3724 | assert(!Offsets.Splits.empty() && |
| 3725 | "Cannot have an empty set of splits on the second partition!"); |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 3726 | assert(Offsets.Splits.back() == |
| 3727 | P.beginOffset() - Offsets.S->beginOffset() && |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3728 | "Previous split does not end where this one begins!"); |
| 3729 | |
| 3730 | // Record each split. The last partition's end isn't needed as the size |
| 3731 | // of the slice dictates that. |
| 3732 | if (S->endOffset() > P.endOffset()) |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 3733 | Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3734 | } |
| 3735 | } |
| 3736 | |
| 3737 | // We may have split loads where some of their stores are split stores. For |
| 3738 | // such loads and stores, we can only pre-split them if their splits exactly |
| 3739 | // match relative to their starting offset. We have to verify this prior to |
| 3740 | // any rewriting. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3741 | Stores.erase( |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3742 | llvm::remove_if(Stores, |
| 3743 | [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { |
| 3744 | // Lookup the load we are storing in our map of split |
| 3745 | // offsets. |
| 3746 | auto *LI = cast<LoadInst>(SI->getValueOperand()); |
| 3747 | // If it was completely unsplittable, then we're done, |
| 3748 | // and this store can't be pre-split. |
| 3749 | if (UnsplittableLoads.count(LI)) |
| 3750 | return true; |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3751 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3752 | auto LoadOffsetsI = SplitOffsetsMap.find(LI); |
| 3753 | if (LoadOffsetsI == SplitOffsetsMap.end()) |
| 3754 | return false; // Unrelated loads are definitely safe. |
| 3755 | auto &LoadOffsets = LoadOffsetsI->second; |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3756 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3757 | // Now lookup the store's offsets. |
| 3758 | auto &StoreOffsets = SplitOffsetsMap[SI]; |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3759 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3760 | // If the relative offsets of each split in the load and |
| 3761 | // store match exactly, then we can split them and we |
| 3762 | // don't need to remove them here. |
| 3763 | if (LoadOffsets.Splits == StoreOffsets.Splits) |
| 3764 | return false; |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3765 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3766 | LLVM_DEBUG( |
| 3767 | dbgs() |
| 3768 | << " Mismatched splits for load and store:\n" |
| 3769 | << " " << *LI << "\n" |
| 3770 | << " " << *SI << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3771 | |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3772 | // We've found a store and load that we need to split |
| 3773 | // with mismatched relative splits. Just give up on them |
| 3774 | // and remove both instructions from our list of |
| 3775 | // candidates. |
| 3776 | UnsplittableLoads.insert(LI); |
| 3777 | return true; |
| 3778 | }), |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3779 | Stores.end()); |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 3780 | // Now we have to go *back* through all the stores, because a later store may |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3781 | // have caused an earlier store's load to become unsplittable and if it is |
| 3782 | // unsplittable for the later store, then we can't rely on it being split in |
| 3783 | // the earlier store either. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3784 | Stores.erase(llvm::remove_if(Stores, |
| 3785 | [&UnsplittableLoads](StoreInst *SI) { |
| 3786 | auto *LI = |
| 3787 | cast<LoadInst>(SI->getValueOperand()); |
| 3788 | return UnsplittableLoads.count(LI); |
| 3789 | }), |
Chandler Carruth | 73b0164 | 2015-01-05 04:17:53 +0000 | [diff] [blame] | 3790 | Stores.end()); |
| 3791 | // Once we've established all the loads that can't be split for some reason, |
| 3792 | // filter any that made it into our list out. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 3793 | Loads.erase(llvm::remove_if(Loads, |
| 3794 | [&UnsplittableLoads](LoadInst *LI) { |
| 3795 | return UnsplittableLoads.count(LI); |
| 3796 | }), |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3797 | Loads.end()); |
| 3798 | |
| 3799 | // If no loads or stores are left, there is no pre-splitting to be done for |
| 3800 | // this alloca. |
| 3801 | if (Loads.empty() && Stores.empty()) |
| 3802 | return false; |
| 3803 | |
| 3804 | // From here on, we can't fail and will be building new accesses, so rig up |
| 3805 | // an IR builder. |
| 3806 | IRBuilderTy IRB(&AI); |
| 3807 | |
| 3808 | // Collect the new slices which we will merge into the alloca slices. |
| 3809 | SmallVector<Slice, 4> NewSlices; |
| 3810 | |
| 3811 | // Track any allocas we end up splitting loads and stores for so we iterate |
| 3812 | // on them. |
| 3813 | SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; |
| 3814 | |
| 3815 | // At this point, we have collected all of the loads and stores we can |
| 3816 | // pre-split, and the specific splits needed for them. We actually do the |
| 3817 | // splitting in a specific order in order to handle when one of the loads in |
| 3818 | // the value operand to one of the stores. |
| 3819 | // |
| 3820 | // First, we rewrite all of the split loads, and just accumulate each split |
| 3821 | // load in a parallel structure. We also build the slices for them and append |
| 3822 | // them to the alloca slices. |
| 3823 | SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; |
| 3824 | std::vector<LoadInst *> SplitLoads; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3825 | const DataLayout &DL = AI.getModule()->getDataLayout(); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3826 | for (LoadInst *LI : Loads) { |
| 3827 | SplitLoads.clear(); |
| 3828 | |
| 3829 | IntegerType *Ty = cast<IntegerType>(LI->getType()); |
| 3830 | uint64_t LoadSize = Ty->getBitWidth() / 8; |
| 3831 | assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); |
| 3832 | |
| 3833 | auto &Offsets = SplitOffsetsMap[LI]; |
| 3834 | assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && |
| 3835 | "Slice size should always match load size exactly!"); |
| 3836 | uint64_t BaseOffset = Offsets.S->beginOffset(); |
| 3837 | assert(BaseOffset + LoadSize > BaseOffset && |
| 3838 | "Cannot represent alloca access size using 64-bit integers!"); |
| 3839 | |
| 3840 | Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 3841 | IRB.SetInsertPoint(LI); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3842 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3843 | LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3844 | |
| 3845 | uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); |
| 3846 | int Idx = 0, Size = Offsets.Splits.size(); |
| 3847 | for (;;) { |
| 3848 | auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); |
Yaxun Liu | 7c44f34 | 2017-06-27 18:26:06 +0000 | [diff] [blame] | 3849 | auto AS = LI->getPointerAddressSpace(); |
| 3850 | auto *PartPtrTy = PartTy->getPointerTo(AS); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3851 | LoadInst *PLoad = IRB.CreateAlignedLoad( |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3852 | PartTy, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3853 | getAdjustedPtr(IRB, DL, BasePtr, |
Elena Demikhovsky | 945b7e5 | 2018-02-14 06:58:08 +0000 | [diff] [blame] | 3854 | APInt(DL.getIndexSizeInBits(AS), PartOffset), |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 3855 | PartPtrTy, BasePtr->getName() + "."), |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3856 | getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3857 | LI->getName()); |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 3858 | PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, |
| 3859 | LLVMContext::MD_access_group}); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3860 | |
| 3861 | // Append this load onto the list of split loads so we can find it later |
| 3862 | // to rewrite the stores. |
| 3863 | SplitLoads.push_back(PLoad); |
| 3864 | |
| 3865 | // Now build a new slice for the alloca. |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 3866 | NewSlices.push_back( |
| 3867 | Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, |
| 3868 | &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 3869 | /*IsSplittable*/ false)); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3870 | LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() |
| 3871 | << ", " << NewSlices.back().endOffset() |
| 3872 | << "): " << *PLoad << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3873 | |
Chandler Carruth | 29c22fa | 2015-01-02 00:10:22 +0000 | [diff] [blame] | 3874 | // See if we've handled all the splits. |
| 3875 | if (Idx >= Size) |
| 3876 | break; |
| 3877 | |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3878 | // Setup the next partition. |
| 3879 | PartOffset = Offsets.Splits[Idx]; |
| 3880 | ++Idx; |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3881 | PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; |
| 3882 | } |
| 3883 | |
| 3884 | // Now that we have the split loads, do the slow walk over all uses of the |
| 3885 | // load and rewrite them as split stores, or save the split loads to use |
| 3886 | // below if the store is going to be split there anyways. |
| 3887 | bool DeferredStores = false; |
| 3888 | for (User *LU : LI->users()) { |
| 3889 | StoreInst *SI = cast<StoreInst>(LU); |
| 3890 | if (!Stores.empty() && SplitOffsetsMap.count(SI)) { |
| 3891 | DeferredStores = true; |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3892 | LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI |
| 3893 | << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3894 | continue; |
| 3895 | } |
| 3896 | |
Chandler Carruth | c39eaa5 | 2015-01-01 23:26:16 +0000 | [diff] [blame] | 3897 | Value *StoreBasePtr = SI->getPointerOperand(); |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 3898 | IRB.SetInsertPoint(SI); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3899 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3900 | LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3901 | |
| 3902 | for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { |
| 3903 | LoadInst *PLoad = SplitLoads[Idx]; |
| 3904 | uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; |
Chandler Carruth | 994cde8 | 2015-01-01 12:01:03 +0000 | [diff] [blame] | 3905 | auto *PartPtrTy = |
| 3906 | PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3907 | |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 3908 | auto AS = SI->getPointerAddressSpace(); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3909 | StoreInst *PStore = IRB.CreateAlignedStore( |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 3910 | PLoad, |
| 3911 | getAdjustedPtr(IRB, DL, StoreBasePtr, |
Elena Demikhovsky | 945b7e5 | 2018-02-14 06:58:08 +0000 | [diff] [blame] | 3912 | APInt(DL.getIndexSizeInBits(AS), PartOffset), |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 3913 | PartPtrTy, StoreBasePtr->getName() + "."), |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3914 | getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 3915 | PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, |
| 3916 | LLVMContext::MD_access_group}); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3917 | LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3918 | } |
| 3919 | |
| 3920 | // We want to immediately iterate on any allocas impacted by splitting |
| 3921 | // this store, and we have to track any promotable alloca (indicated by |
| 3922 | // a direct store) as needing to be resplit because it is no longer |
| 3923 | // promotable. |
| 3924 | if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { |
| 3925 | ResplitPromotableAllocas.insert(OtherAI); |
| 3926 | Worklist.insert(OtherAI); |
| 3927 | } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( |
| 3928 | StoreBasePtr->stripInBoundsOffsets())) { |
| 3929 | Worklist.insert(OtherAI); |
| 3930 | } |
| 3931 | |
| 3932 | // Mark the original store as dead. |
| 3933 | DeadInsts.insert(SI); |
| 3934 | } |
| 3935 | |
| 3936 | // Save the split loads if there are deferred stores among the users. |
| 3937 | if (DeferredStores) |
| 3938 | SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); |
| 3939 | |
| 3940 | // Mark the original load as dead and kill the original slice. |
| 3941 | DeadInsts.insert(LI); |
| 3942 | Offsets.S->kill(); |
| 3943 | } |
| 3944 | |
| 3945 | // Second, we rewrite all of the split stores. At this point, we know that |
| 3946 | // all loads from this alloca have been split already. For stores of such |
| 3947 | // loads, we can simply look up the pre-existing split loads. For stores of |
| 3948 | // other loads, we split those loads first and then write split stores of |
| 3949 | // them. |
| 3950 | for (StoreInst *SI : Stores) { |
| 3951 | auto *LI = cast<LoadInst>(SI->getValueOperand()); |
| 3952 | IntegerType *Ty = cast<IntegerType>(LI->getType()); |
| 3953 | uint64_t StoreSize = Ty->getBitWidth() / 8; |
| 3954 | assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); |
| 3955 | |
| 3956 | auto &Offsets = SplitOffsetsMap[SI]; |
| 3957 | assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && |
| 3958 | "Slice size should always match load size exactly!"); |
| 3959 | uint64_t BaseOffset = Offsets.S->beginOffset(); |
| 3960 | assert(BaseOffset + StoreSize > BaseOffset && |
| 3961 | "Cannot represent alloca access size using 64-bit integers!"); |
| 3962 | |
Chandler Carruth | c39eaa5 | 2015-01-01 23:26:16 +0000 | [diff] [blame] | 3963 | Value *LoadBasePtr = LI->getPointerOperand(); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3964 | Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); |
| 3965 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3966 | LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3967 | |
| 3968 | // Check whether we have an already split load. |
| 3969 | auto SplitLoadsMapI = SplitLoadsMap.find(LI); |
| 3970 | std::vector<LoadInst *> *SplitLoads = nullptr; |
| 3971 | if (SplitLoadsMapI != SplitLoadsMap.end()) { |
| 3972 | SplitLoads = &SplitLoadsMapI->second; |
| 3973 | assert(SplitLoads->size() == Offsets.Splits.size() + 1 && |
| 3974 | "Too few split loads for the number of splits in the store!"); |
| 3975 | } else { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 3976 | LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3977 | } |
| 3978 | |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3979 | uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); |
| 3980 | int Idx = 0, Size = Offsets.Splits.size(); |
| 3981 | for (;;) { |
| 3982 | auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); |
Keno Fischer | 514a6a5 | 2017-06-02 19:04:17 +0000 | [diff] [blame] | 3983 | auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); |
| 3984 | auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3985 | |
| 3986 | // Either lookup a split load or create one. |
| 3987 | LoadInst *PLoad; |
| 3988 | if (SplitLoads) { |
| 3989 | PLoad = (*SplitLoads)[Idx]; |
| 3990 | } else { |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 3991 | IRB.SetInsertPoint(LI); |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 3992 | auto AS = LI->getPointerAddressSpace(); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3993 | PLoad = IRB.CreateAlignedLoad( |
James Y Knight | 14359ef | 2019-02-01 20:44:24 +0000 | [diff] [blame] | 3994 | PartTy, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3995 | getAdjustedPtr(IRB, DL, LoadBasePtr, |
Elena Demikhovsky | 945b7e5 | 2018-02-14 06:58:08 +0000 | [diff] [blame] | 3996 | APInt(DL.getIndexSizeInBits(AS), PartOffset), |
Keno Fischer | 514a6a5 | 2017-06-02 19:04:17 +0000 | [diff] [blame] | 3997 | LoadPartPtrTy, LoadBasePtr->getName() + "."), |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 3998 | getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 3999 | LI->getName()); |
| 4000 | } |
| 4001 | |
| 4002 | // And store this partition. |
Duncan P. N. Exon Smith | be4d8cb | 2015-10-13 19:26:58 +0000 | [diff] [blame] | 4003 | IRB.SetInsertPoint(SI); |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 4004 | auto AS = SI->getPointerAddressSpace(); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4005 | StoreInst *PStore = IRB.CreateAlignedStore( |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 4006 | PLoad, |
| 4007 | getAdjustedPtr(IRB, DL, StoreBasePtr, |
Elena Demikhovsky | 945b7e5 | 2018-02-14 06:58:08 +0000 | [diff] [blame] | 4008 | APInt(DL.getIndexSizeInBits(AS), PartOffset), |
Yaxun Liu | 6455b0d | 2017-06-09 20:46:29 +0000 | [diff] [blame] | 4009 | StorePartPtrTy, StoreBasePtr->getName() + "."), |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4010 | getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4011 | |
| 4012 | // Now build a new slice for the alloca. |
| 4013 | NewSlices.push_back( |
| 4014 | Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, |
| 4015 | &PStore->getOperandUse(PStore->getPointerOperandIndex()), |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4016 | /*IsSplittable*/ false)); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4017 | LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() |
| 4018 | << ", " << NewSlices.back().endOffset() |
| 4019 | << "): " << *PStore << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4020 | if (!SplitLoads) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4021 | LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4022 | } |
| 4023 | |
Chandler Carruth | 29c22fa | 2015-01-02 00:10:22 +0000 | [diff] [blame] | 4024 | // See if we've finished all the splits. |
| 4025 | if (Idx >= Size) |
| 4026 | break; |
| 4027 | |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4028 | // Setup the next partition. |
| 4029 | PartOffset = Offsets.Splits[Idx]; |
| 4030 | ++Idx; |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4031 | PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; |
| 4032 | } |
| 4033 | |
| 4034 | // We want to immediately iterate on any allocas impacted by splitting |
| 4035 | // this load, which is only relevant if it isn't a load of this alloca and |
| 4036 | // thus we didn't already split the loads above. We also have to keep track |
| 4037 | // of any promotable allocas we split loads on as they can no longer be |
| 4038 | // promoted. |
| 4039 | if (!SplitLoads) { |
| 4040 | if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { |
| 4041 | assert(OtherAI != &AI && "We can't re-split our own alloca!"); |
| 4042 | ResplitPromotableAllocas.insert(OtherAI); |
| 4043 | Worklist.insert(OtherAI); |
| 4044 | } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( |
| 4045 | LoadBasePtr->stripInBoundsOffsets())) { |
| 4046 | assert(OtherAI != &AI && "We can't re-split our own alloca!"); |
| 4047 | Worklist.insert(OtherAI); |
| 4048 | } |
| 4049 | } |
| 4050 | |
| 4051 | // Mark the original store as dead now that we've split it up and kill its |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4052 | // slice. Note that we leave the original load in place unless this store |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 4053 | // was its only use. It may in turn be split up if it is an alloca load |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4054 | // for some other alloca, but it may be a normal load. This may introduce |
| 4055 | // redundant loads, but where those can be merged the rest of the optimizer |
| 4056 | // should handle the merging, and this uncovers SSA splits which is more |
| 4057 | // important. In practice, the original loads will almost always be fully |
| 4058 | // split and removed eventually, and the splits will be merged by any |
| 4059 | // trivial CSE, including instcombine. |
| 4060 | if (LI->hasOneUse()) { |
| 4061 | assert(*LI->user_begin() == SI && "Single use isn't this store!"); |
| 4062 | DeadInsts.insert(LI); |
| 4063 | } |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4064 | DeadInsts.insert(SI); |
| 4065 | Offsets.S->kill(); |
| 4066 | } |
| 4067 | |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4068 | // Remove the killed slices that have ben pre-split. |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4069 | AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }), |
| 4070 | AS.end()); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4071 | |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4072 | // Insert our new slices. This will sort and merge them into the sorted |
| 4073 | // sequence. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4074 | AS.insert(NewSlices); |
| 4075 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4076 | LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4077 | #ifndef NDEBUG |
| 4078 | for (auto I = AS.begin(), E = AS.end(); I != E; ++I) |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4079 | LLVM_DEBUG(AS.print(dbgs(), I, " ")); |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4080 | #endif |
| 4081 | |
| 4082 | // Finally, don't try to promote any allocas that new require re-splitting. |
| 4083 | // They have already been added to the worklist above. |
| 4084 | PromotableAllocas.erase( |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4085 | llvm::remove_if( |
David Majnemer | c700490 | 2016-08-12 04:32:37 +0000 | [diff] [blame] | 4086 | PromotableAllocas, |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4087 | [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), |
| 4088 | PromotableAllocas.end()); |
| 4089 | |
| 4090 | return true; |
| 4091 | } |
| 4092 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4093 | /// Rewrite an alloca partition's users. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4094 | /// |
| 4095 | /// This routine drives both of the rewriting goals of the SROA pass. It tries |
| 4096 | /// to rewrite uses of an alloca partition to be conducive for SSA value |
| 4097 | /// promotion. If the partition needs a new, more refined alloca, this will |
| 4098 | /// build that new alloca, preserving as much type information as possible, and |
| 4099 | /// rewrite the uses of the old alloca to point at the new one and have the |
| 4100 | /// appropriate new offsets. It also evaluates how successful the rewrite was |
| 4101 | /// at enabling promotion and if it was successful queues the alloca to be |
| 4102 | /// promoted. |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4103 | AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4104 | Partition &P) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4105 | // Try to compute a friendly type for this partition of the alloca. This |
| 4106 | // won't always succeed, in which case we fall back to a legal integer type |
| 4107 | // or an i8 array of an appropriate size. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 4108 | Type *SliceTy = nullptr; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4109 | const DataLayout &DL = AI.getModule()->getDataLayout(); |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4110 | if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4111 | if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4112 | SliceTy = CommonUseTy; |
| 4113 | if (!SliceTy) |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4114 | if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4115 | P.beginOffset(), P.size())) |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4116 | SliceTy = TypePartitionTy; |
| 4117 | if ((!SliceTy || (SliceTy->isArrayTy() && |
| 4118 | SliceTy->getArrayElementType()->isIntegerTy())) && |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4119 | DL.isLegalInteger(P.size() * 8)) |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4120 | SliceTy = Type::getIntNTy(*C, P.size() * 8); |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4121 | if (!SliceTy) |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4122 | SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4123 | assert(DL.getTypeAllocSize(SliceTy) >= P.size()); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4124 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4125 | bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4126 | |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 4127 | VectorType *VecTy = |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4128 | IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); |
Chandler Carruth | 2dc9682 | 2014-10-18 00:44:02 +0000 | [diff] [blame] | 4129 | if (VecTy) |
| 4130 | SliceTy = VecTy; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4131 | |
| 4132 | // Check for the case where we're going to rewrite to a new alloca of the |
| 4133 | // exact same type as the original, and with the same access offsets. In that |
| 4134 | // case, re-use the existing alloca, but still run through the rewriter to |
Jakub Staszak | 086f6cd | 2013-02-19 22:02:21 +0000 | [diff] [blame] | 4135 | // perform phi and select speculation. |
Hiroshi Inoue | 99a8faa | 2018-01-16 06:23:05 +0000 | [diff] [blame] | 4136 | // P.beginOffset() can be non-zero even with the same type in a case with |
| 4137 | // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4138 | AllocaInst *NewAI; |
Hiroshi Inoue | 99a8faa | 2018-01-16 06:23:05 +0000 | [diff] [blame] | 4139 | if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4140 | NewAI = &AI; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4141 | // FIXME: We should be able to bail at this point with "nothing changed". |
| 4142 | // FIXME: We might want to defer PHI speculation until after here. |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4143 | // FIXME: return nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4144 | } else { |
Chandler Carruth | 903790e | 2012-09-29 10:41:21 +0000 | [diff] [blame] | 4145 | unsigned Alignment = AI.getAlignment(); |
| 4146 | if (!Alignment) { |
| 4147 | // The minimum alignment which users can rely on when the explicit |
| 4148 | // alignment is omitted or zero is that required by the ABI for this |
| 4149 | // type. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4150 | Alignment = DL.getABITypeAlignment(AI.getAllocatedType()); |
Chandler Carruth | 903790e | 2012-09-29 10:41:21 +0000 | [diff] [blame] | 4151 | } |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4152 | Alignment = MinAlign(Alignment, P.beginOffset()); |
Chandler Carruth | 903790e | 2012-09-29 10:41:21 +0000 | [diff] [blame] | 4153 | // If we will get at least this much alignment from the type alone, leave |
| 4154 | // the alloca's alignment unconstrained. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4155 | if (Alignment <= DL.getABITypeAlignment(SliceTy)) |
Chandler Carruth | 903790e | 2012-09-29 10:41:21 +0000 | [diff] [blame] | 4156 | Alignment = 0; |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4157 | NewAI = new AllocaInst( |
Matt Arsenault | 3c1fc76 | 2017-04-10 22:27:50 +0000 | [diff] [blame] | 4158 | SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment, |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4159 | AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); |
Anastasis Grammenos | 425df22 | 2018-06-28 18:58:30 +0000 | [diff] [blame] | 4160 | // Copy the old AI debug location over to the new one. |
| 4161 | NewAI->setDebugLoc(AI.getDebugLoc()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4162 | ++NumNewAllocas; |
| 4163 | } |
| 4164 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4165 | LLVM_DEBUG(dbgs() << "Rewriting alloca partition " |
| 4166 | << "[" << P.beginOffset() << "," << P.endOffset() |
| 4167 | << ") to: " << *NewAI << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4168 | |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4169 | // Track the high watermark on the worklist as it is only relevant for |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4170 | // promoted allocas. We will reset it to this point if the alloca is not in |
| 4171 | // fact scheduled for promotion. |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4172 | unsigned PPWOldSize = PostPromotionWorklist.size(); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4173 | unsigned NumUses = 0; |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 4174 | SmallSetVector<PHINode *, 8> PHIUsers; |
| 4175 | SmallSetVector<SelectInst *, 8> SelectUsers; |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4176 | |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4177 | AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4178 | P.endOffset(), IsIntegerPromotable, VecTy, |
| 4179 | PHIUsers, SelectUsers); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4180 | bool Promotable = true; |
Chandler Carruth | ffb7ce5 | 2014-12-24 01:48:09 +0000 | [diff] [blame] | 4181 | for (Slice *S : P.splitSliceTails()) { |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4182 | Promotable &= Rewriter.visit(S); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4183 | ++NumUses; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4184 | } |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4185 | for (Slice &S : P) { |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4186 | Promotable &= Rewriter.visit(&S); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4187 | ++NumUses; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4188 | } |
| 4189 | |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4190 | NumAllocaPartitionUses += NumUses; |
Craig Topper | 8a95027 | 2017-05-18 00:51:39 +0000 | [diff] [blame] | 4191 | MaxUsesPerAllocaPartition.updateMax(NumUses); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4192 | |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4193 | // Now that we've processed all the slices in the new partition, check if any |
| 4194 | // PHIs or Selects would block promotion. |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 4195 | for (PHINode *PHI : PHIUsers) |
| 4196 | if (!isSafePHIToSpeculate(*PHI)) { |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4197 | Promotable = false; |
| 4198 | PHIUsers.clear(); |
| 4199 | SelectUsers.clear(); |
Chandler Carruth | a8c4cc6 | 2014-02-25 09:45:27 +0000 | [diff] [blame] | 4200 | break; |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4201 | } |
Davide Italiano | 81a26da | 2017-04-27 23:09:01 +0000 | [diff] [blame] | 4202 | |
| 4203 | for (SelectInst *Sel : SelectUsers) |
| 4204 | if (!isSafeSelectToSpeculate(*Sel)) { |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4205 | Promotable = false; |
| 4206 | PHIUsers.clear(); |
| 4207 | SelectUsers.clear(); |
Chandler Carruth | a8c4cc6 | 2014-02-25 09:45:27 +0000 | [diff] [blame] | 4208 | break; |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4209 | } |
| 4210 | |
| 4211 | if (Promotable) { |
| 4212 | if (PHIUsers.empty() && SelectUsers.empty()) { |
| 4213 | // Promote the alloca. |
| 4214 | PromotableAllocas.push_back(NewAI); |
| 4215 | } else { |
| 4216 | // If we have either PHIs or Selects to speculate, add them to those |
| 4217 | // worklists and re-queue the new alloca so that we promote in on the |
| 4218 | // next iteration. |
Chandler Carruth | 6174704 | 2014-10-16 21:05:14 +0000 | [diff] [blame] | 4219 | for (PHINode *PHIUser : PHIUsers) |
| 4220 | SpeculatablePHIs.insert(PHIUser); |
| 4221 | for (SelectInst *SelectUser : SelectUsers) |
| 4222 | SpeculatableSelects.insert(SelectUser); |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4223 | Worklist.insert(NewAI); |
| 4224 | } |
| 4225 | } else { |
Chandler Carruth | 3bf18ed | 2014-02-25 00:07:09 +0000 | [diff] [blame] | 4226 | // Drop any post-promotion work items if promotion didn't happen. |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4227 | while (PostPromotionWorklist.size() > PPWOldSize) |
| 4228 | PostPromotionWorklist.pop_back(); |
David Majnemer | 30ffc4c | 2016-04-26 01:05:00 +0000 | [diff] [blame] | 4229 | |
| 4230 | // We couldn't promote and we didn't create a new partition, nothing |
| 4231 | // happened. |
| 4232 | if (NewAI == &AI) |
| 4233 | return nullptr; |
| 4234 | |
| 4235 | // If we can't promote the alloca, iterate on it to check for new |
| 4236 | // refinements exposed by splitting the current alloca. Don't iterate on an |
| 4237 | // alloca which didn't actually change and didn't get promoted. |
| 4238 | Worklist.insert(NewAI); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4239 | } |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4240 | |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4241 | return NewAI; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4242 | } |
| 4243 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4244 | /// Walks the slices of an alloca and form partitions based on them, |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4245 | /// rewriting each of their uses. |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4246 | bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { |
| 4247 | if (AS.begin() == AS.end()) |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4248 | return false; |
| 4249 | |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4250 | unsigned NumPartitions = 0; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4251 | bool Changed = false; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4252 | const DataLayout &DL = AI.getModule()->getDataLayout(); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4253 | |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4254 | // First try to pre-split loads and stores. |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4255 | Changed |= presplitLoadsAndStores(AI, AS); |
| 4256 | |
Hiroshi Inoue | 48e4c7a | 2017-12-01 06:05:05 +0000 | [diff] [blame] | 4257 | // Now that we have identified any pre-splitting opportunities, |
| 4258 | // mark loads and stores unsplittable except for the following case. |
| 4259 | // We leave a slice splittable if all other slices are disjoint or fully |
| 4260 | // included in the slice, such as whole-alloca loads and stores. |
| 4261 | // If we fail to split these during pre-splitting, we want to force them |
| 4262 | // to be rewritten into a partition. |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4263 | bool IsSorted = true; |
Hiroshi Inoue | 48e4c7a | 2017-12-01 06:05:05 +0000 | [diff] [blame] | 4264 | |
| 4265 | uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType()); |
| 4266 | const uint64_t MaxBitVectorSize = 1024; |
Hiroshi Inoue | 99a8faa | 2018-01-16 06:23:05 +0000 | [diff] [blame] | 4267 | if (AllocaSize <= MaxBitVectorSize) { |
Hiroshi Inoue | 48e4c7a | 2017-12-01 06:05:05 +0000 | [diff] [blame] | 4268 | // If a byte boundary is included in any load or store, a slice starting or |
| 4269 | // ending at the boundary is not splittable. |
| 4270 | SmallBitVector SplittableOffset(AllocaSize + 1, true); |
| 4271 | for (Slice &S : AS) |
| 4272 | for (unsigned O = S.beginOffset() + 1; |
| 4273 | O < S.endOffset() && O < AllocaSize; O++) |
| 4274 | SplittableOffset.reset(O); |
| 4275 | |
| 4276 | for (Slice &S : AS) { |
| 4277 | if (!S.isSplittable()) |
| 4278 | continue; |
| 4279 | |
| 4280 | if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && |
| 4281 | (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) |
| 4282 | continue; |
| 4283 | |
| 4284 | if (isa<LoadInst>(S.getUse()->getUser()) || |
| 4285 | isa<StoreInst>(S.getUse()->getUser())) { |
| 4286 | S.makeUnsplittable(); |
| 4287 | IsSorted = false; |
| 4288 | } |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4289 | } |
| 4290 | } |
Hiroshi Inoue | 48e4c7a | 2017-12-01 06:05:05 +0000 | [diff] [blame] | 4291 | else { |
| 4292 | // We only allow whole-alloca splittable loads and stores |
| 4293 | // for a large alloca to avoid creating too large BitVector. |
| 4294 | for (Slice &S : AS) { |
| 4295 | if (!S.isSplittable()) |
| 4296 | continue; |
| 4297 | |
| 4298 | if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) |
| 4299 | continue; |
| 4300 | |
| 4301 | if (isa<LoadInst>(S.getUse()->getUser()) || |
| 4302 | isa<StoreInst>(S.getUse()->getUser())) { |
| 4303 | S.makeUnsplittable(); |
| 4304 | IsSorted = false; |
| 4305 | } |
| 4306 | } |
| 4307 | } |
| 4308 | |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4309 | if (!IsSorted) |
Fangrui Song | 0cac726 | 2018-09-27 02:13:45 +0000 | [diff] [blame] | 4310 | llvm::sort(AS); |
Chandler Carruth | 24ac830 | 2015-01-02 03:55:54 +0000 | [diff] [blame] | 4311 | |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4312 | /// Describes the allocas introduced by rewritePartition in order to migrate |
| 4313 | /// the debug info. |
| 4314 | struct Fragment { |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4315 | AllocaInst *Alloca; |
| 4316 | uint64_t Offset; |
| 4317 | uint64_t Size; |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4318 | Fragment(AllocaInst *AI, uint64_t O, uint64_t S) |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4319 | : Alloca(AI), Offset(O), Size(S) {} |
| 4320 | }; |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4321 | SmallVector<Fragment, 4> Fragments; |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4322 | |
Chandler Carruth | 0715cba | 2015-01-01 11:54:38 +0000 | [diff] [blame] | 4323 | // Rewrite each partition. |
Chandler Carruth | e2f66ce | 2014-12-22 22:46:00 +0000 | [diff] [blame] | 4324 | for (auto &P : AS.partitions()) { |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4325 | if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { |
| 4326 | Changed = true; |
Adrian Prantl | 34e7590 | 2015-02-09 23:57:22 +0000 | [diff] [blame] | 4327 | if (NewAI != &AI) { |
| 4328 | uint64_t SizeOfByte = 8; |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4329 | uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); |
Adrian Prantl | 34e7590 | 2015-02-09 23:57:22 +0000 | [diff] [blame] | 4330 | // Don't include any padding. |
| 4331 | uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4332 | Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); |
Adrian Prantl | 34e7590 | 2015-02-09 23:57:22 +0000 | [diff] [blame] | 4333 | } |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4334 | } |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4335 | ++NumPartitions; |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4336 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4337 | |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4338 | NumAllocaPartitions += NumPartitions; |
Craig Topper | 8a95027 | 2017-05-18 00:51:39 +0000 | [diff] [blame] | 4339 | MaxPartitionsPerAlloca.updateMax(NumPartitions); |
Chandler Carruth | 6c321c1 | 2013-07-19 10:57:36 +0000 | [diff] [blame] | 4340 | |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4341 | // Migrate debug information from the old alloca to the new alloca(s) |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 4342 | // and the individual partitions. |
Hsiangkai Wang | ef72e48 | 2018-08-06 03:59:47 +0000 | [diff] [blame] | 4343 | TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4344 | if (!DbgDeclares.empty()) { |
| 4345 | auto *Var = DbgDeclares.front()->getVariable(); |
| 4346 | auto *Expr = DbgDeclares.front()->getExpression(); |
Adrian Prantl | d7f6f16 | 2017-11-28 00:57:53 +0000 | [diff] [blame] | 4347 | auto VarSize = Var->getSizeInBits(); |
Sanjay Patel | af674fb | 2015-12-14 17:24:23 +0000 | [diff] [blame] | 4348 | DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); |
Keno Fischer | d5354fd | 2016-01-14 20:06:34 +0000 | [diff] [blame] | 4349 | uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType()); |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4350 | for (auto Fragment : Fragments) { |
| 4351 | // Create a fragment expression describing the new partition or reuse AI's |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4352 | // expression if there is only one partition. |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4353 | auto *FragmentExpr = Expr; |
| 4354 | if (Fragment.Size < AllocaSize || Expr->isFragment()) { |
Adrian Prantl | 152ac39 | 2015-02-01 00:58:04 +0000 | [diff] [blame] | 4355 | // If this alloca is already a scalar replacement of a larger aggregate, |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4356 | // Fragment.Offset describes the offset inside the scalar. |
Adrian Prantl | 49797ca | 2016-12-22 05:27:12 +0000 | [diff] [blame] | 4357 | auto ExprFragment = Expr->getFragmentInfo(); |
| 4358 | uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4359 | uint64_t Start = Offset + Fragment.Offset; |
| 4360 | uint64_t Size = Fragment.Size; |
Adrian Prantl | 49797ca | 2016-12-22 05:27:12 +0000 | [diff] [blame] | 4361 | if (ExprFragment) { |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4362 | uint64_t AbsEnd = |
NAKAMURA Takumi | a1e97a7 | 2017-08-28 06:47:47 +0000 | [diff] [blame] | 4363 | ExprFragment->OffsetInBits + ExprFragment->SizeInBits; |
Adrian Prantl | 34e7590 | 2015-02-09 23:57:22 +0000 | [diff] [blame] | 4364 | if (Start >= AbsEnd) |
| 4365 | // No need to describe a SROAed padding. |
| 4366 | continue; |
| 4367 | Size = std::min(Size, AbsEnd - Start); |
| 4368 | } |
Adrian Prantl | b192b54 | 2017-08-30 20:04:17 +0000 | [diff] [blame] | 4369 | // The new, smaller fragment is stenciled out from the old fragment. |
| 4370 | if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { |
| 4371 | assert(Start >= OrigFragment->OffsetInBits && |
| 4372 | "new fragment is outside of original fragment"); |
| 4373 | Start -= OrigFragment->OffsetInBits; |
| 4374 | } |
Adrian Prantl | 77d90b0 | 2017-11-28 21:30:38 +0000 | [diff] [blame] | 4375 | |
| 4376 | // The alloca may be larger than the variable. |
| 4377 | if (VarSize) { |
| 4378 | if (Size > *VarSize) |
| 4379 | Size = *VarSize; |
| 4380 | if (Size == 0 || Start + Size > *VarSize) |
| 4381 | continue; |
| 4382 | } |
| 4383 | |
Adrian Prantl | d7f6f16 | 2017-11-28 00:57:53 +0000 | [diff] [blame] | 4384 | // Avoid creating a fragment expression that covers the entire variable. |
| 4385 | if (!VarSize || *VarSize != Size) { |
| 4386 | if (auto E = |
| 4387 | DIExpression::createFragmentExpression(Expr, Start, Size)) |
| 4388 | FragmentExpr = *E; |
| 4389 | else |
| 4390 | continue; |
| 4391 | } |
Adrian Prantl | 152ac39 | 2015-02-01 00:58:04 +0000 | [diff] [blame] | 4392 | } |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4393 | |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4394 | // Remove any existing intrinsics describing the same alloca. |
Hsiangkai Wang | ef72e48 | 2018-08-06 03:59:47 +0000 | [diff] [blame] | 4395 | for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4396 | OldDII->eraseFromParent(); |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4397 | |
Adrian Prantl | 941fa75 | 2016-12-05 18:04:47 +0000 | [diff] [blame] | 4398 | DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr, |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4399 | DbgDeclares.front()->getDebugLoc(), &AI); |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4400 | } |
| 4401 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4402 | return Changed; |
| 4403 | } |
| 4404 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4405 | /// Clobber a use with undef, deleting the used value if it becomes dead. |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4406 | void SROA::clobberUse(Use &U) { |
| 4407 | Value *OldV = U; |
| 4408 | // Replace the use with an undef value. |
| 4409 | U = UndefValue::get(OldV->getType()); |
| 4410 | |
| 4411 | // Check for this making an instruction dead. We have to garbage collect |
| 4412 | // all the dead instructions to ensure the uses of any alloca end up being |
| 4413 | // minimal. |
| 4414 | if (Instruction *OldI = dyn_cast<Instruction>(OldV)) |
| 4415 | if (isInstructionTriviallyDead(OldI)) { |
| 4416 | DeadInsts.insert(OldI); |
| 4417 | } |
| 4418 | } |
| 4419 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4420 | /// Analyze an alloca for SROA. |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4421 | /// |
| 4422 | /// This analyzes the alloca to ensure we can reason about it, builds |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4423 | /// the slices of the alloca, and then hands it off to be split and |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4424 | /// rewritten as needed. |
| 4425 | bool SROA::runOnAlloca(AllocaInst &AI) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4426 | LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4427 | ++NumAllocasAnalyzed; |
| 4428 | |
| 4429 | // Special case dead allocas, as they're trivial. |
| 4430 | if (AI.use_empty()) { |
| 4431 | AI.eraseFromParent(); |
| 4432 | return true; |
| 4433 | } |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4434 | const DataLayout &DL = AI.getModule()->getDataLayout(); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4435 | |
| 4436 | // Skip alloca forms that this analysis can't handle. |
| 4437 | if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4438 | DL.getTypeAllocSize(AI.getAllocatedType()) == 0) |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4439 | return false; |
| 4440 | |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 4441 | bool Changed = false; |
| 4442 | |
| 4443 | // First, split any FCA loads and stores touching this alloca to promote |
| 4444 | // better splitting and promotion opportunities. |
Tim Northover | 856628f | 2018-12-18 09:29:39 +0000 | [diff] [blame] | 4445 | AggLoadStoreRewriter AggRewriter(DL); |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 4446 | Changed |= AggRewriter.rewrite(AI); |
| 4447 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4448 | // Build the slices using a recursive instruction-visiting builder. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 4449 | AllocaSlices AS(DL, AI); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4450 | LLVM_DEBUG(AS.print(dbgs())); |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4451 | if (AS.isEscaped()) |
Chandler Carruth | 42cb9cb | 2012-09-18 12:57:43 +0000 | [diff] [blame] | 4452 | return Changed; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4453 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4454 | // Delete all the dead users of this alloca before splitting and rewriting it. |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4455 | for (Instruction *DeadUser : AS.getDeadUsers()) { |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4456 | // Free up everything used by this instruction. |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 4457 | for (Use &DeadOp : DeadUser->operands()) |
Chandler Carruth | 1583e99 | 2014-03-03 10:42:58 +0000 | [diff] [blame] | 4458 | clobberUse(DeadOp); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4459 | |
| 4460 | // Now replace the uses of this instruction. |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 4461 | DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4462 | |
| 4463 | // And mark it for deletion. |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 4464 | DeadInsts.insert(DeadUser); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4465 | Changed = true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4466 | } |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4467 | for (Use *DeadOp : AS.getDeadOperands()) { |
Chandler Carruth | 57d4cae | 2014-10-16 20:42:08 +0000 | [diff] [blame] | 4468 | clobberUse(*DeadOp); |
Chandler Carruth | 1bf38c6 | 2014-01-19 12:16:54 +0000 | [diff] [blame] | 4469 | Changed = true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4470 | } |
| 4471 | |
Chandler Carruth | 9f21fe1 | 2013-07-19 09:13:58 +0000 | [diff] [blame] | 4472 | // No slices to split. Leave the dead alloca for a later pass to clean up. |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4473 | if (AS.begin() == AS.end()) |
Chandler Carruth | e5b7a2c | 2012-10-05 01:29:09 +0000 | [diff] [blame] | 4474 | return Changed; |
| 4475 | |
Chandler Carruth | 8393406 | 2014-10-16 21:11:55 +0000 | [diff] [blame] | 4476 | Changed |= splitAlloca(AI, AS); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4477 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4478 | LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4479 | while (!SpeculatablePHIs.empty()) |
| 4480 | speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); |
| 4481 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4482 | LLVM_DEBUG(dbgs() << " Speculating Selects\n"); |
Chandler Carruth | f054640 | 2013-07-18 07:15:00 +0000 | [diff] [blame] | 4483 | while (!SpeculatableSelects.empty()) |
| 4484 | speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); |
| 4485 | |
| 4486 | return Changed; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4487 | } |
| 4488 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4489 | /// Delete the dead instructions accumulated in this run. |
Chandler Carruth | 19450da | 2012-09-14 10:26:38 +0000 | [diff] [blame] | 4490 | /// |
| 4491 | /// Recursively deletes the dead instructions we've accumulated. This is done |
| 4492 | /// at the very end to maximize locality of the recursive delete and to |
| 4493 | /// minimize the problems of invalidated instruction pointers as such pointers |
| 4494 | /// are used heavily in the intermediate stages of the algorithm. |
| 4495 | /// |
| 4496 | /// We also record the alloca instructions deleted here so that they aren't |
| 4497 | /// subsequently handed to mem2reg to promote. |
Teresa Johnson | 3309002 | 2017-11-20 18:33:38 +0000 | [diff] [blame] | 4498 | bool SROA::deleteDeadInstructions( |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 4499 | SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { |
Teresa Johnson | 3309002 | 2017-11-20 18:33:38 +0000 | [diff] [blame] | 4500 | bool Changed = false; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4501 | while (!DeadInsts.empty()) { |
| 4502 | Instruction *I = DeadInsts.pop_back_val(); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4503 | LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4504 | |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4505 | // If the instruction is an alloca, find the possible dbg.declare connected |
| 4506 | // to it, and remove it too. We must do this before calling RAUW or we will |
| 4507 | // not be able to find it. |
| 4508 | if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { |
| 4509 | DeletedAllocas.insert(AI); |
Hsiangkai Wang | ef72e48 | 2018-08-06 03:59:47 +0000 | [diff] [blame] | 4510 | for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) |
Reid Kleckner | 0fe506b | 2017-09-21 19:52:03 +0000 | [diff] [blame] | 4511 | OldDII->eraseFromParent(); |
| 4512 | } |
| 4513 | |
Chandler Carruth | 58d0556 | 2012-10-25 04:37:07 +0000 | [diff] [blame] | 4514 | I->replaceAllUsesWith(UndefValue::get(I->getType())); |
| 4515 | |
Chandler Carruth | 1583e99 | 2014-03-03 10:42:58 +0000 | [diff] [blame] | 4516 | for (Use &Operand : I->operands()) |
| 4517 | if (Instruction *U = dyn_cast<Instruction>(Operand)) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4518 | // Zero out the operand and see if it becomes trivially dead. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 4519 | Operand = nullptr; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4520 | if (isInstructionTriviallyDead(U)) |
Chandler Carruth | 18db795 | 2012-11-20 01:12:50 +0000 | [diff] [blame] | 4521 | DeadInsts.insert(U); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4522 | } |
| 4523 | |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4524 | ++NumDeleted; |
| 4525 | I->eraseFromParent(); |
Teresa Johnson | 3309002 | 2017-11-20 18:33:38 +0000 | [diff] [blame] | 4526 | Changed = true; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4527 | } |
Teresa Johnson | 3309002 | 2017-11-20 18:33:38 +0000 | [diff] [blame] | 4528 | return Changed; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4529 | } |
| 4530 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 4531 | /// Promote the allocas, using the best available technique. |
Chandler Carruth | 70b44c5 | 2012-09-15 11:43:14 +0000 | [diff] [blame] | 4532 | /// |
| 4533 | /// This attempts to promote whatever allocas have been identified as viable in |
| 4534 | /// the PromotableAllocas list. If that list is empty, there is nothing to do. |
Chandler Carruth | 748d095 | 2015-08-26 09:09:29 +0000 | [diff] [blame] | 4535 | /// This function returns whether any promotion occurred. |
Chandler Carruth | 70b44c5 | 2012-09-15 11:43:14 +0000 | [diff] [blame] | 4536 | bool SROA::promoteAllocas(Function &F) { |
| 4537 | if (PromotableAllocas.empty()) |
| 4538 | return false; |
| 4539 | |
| 4540 | NumPromoted += PromotableAllocas.size(); |
| 4541 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4542 | LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); |
Davide Italiano | 612d5a9 | 2017-04-09 20:47:14 +0000 | [diff] [blame] | 4543 | PromoteMemToReg(PromotableAllocas, *DT, AC); |
Chandler Carruth | 70b44c5 | 2012-09-15 11:43:14 +0000 | [diff] [blame] | 4544 | PromotableAllocas.clear(); |
| 4545 | return true; |
| 4546 | } |
| 4547 | |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4548 | PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, |
| 4549 | AssumptionCache &RunAC) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 4550 | LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4551 | C = &F.getContext(); |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4552 | DT = &RunDT; |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4553 | AC = &RunAC; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4554 | |
| 4555 | BasicBlock &EntryBB = F.getEntryBlock(); |
Benjamin Kramer | b6d0bd4 | 2014-03-02 12:27:27 +0000 | [diff] [blame] | 4556 | for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4557 | I != E; ++I) { |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4558 | if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) |
| 4559 | Worklist.insert(AI); |
Adrian Prantl | 565cc18 | 2015-01-20 19:42:22 +0000 | [diff] [blame] | 4560 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4561 | |
| 4562 | bool Changed = false; |
Chandler Carruth | 19450da | 2012-09-14 10:26:38 +0000 | [diff] [blame] | 4563 | // A set of deleted alloca instruction pointers which should be removed from |
| 4564 | // the list of promotable allocas. |
| 4565 | SmallPtrSet<AllocaInst *, 4> DeletedAllocas; |
| 4566 | |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4567 | do { |
| 4568 | while (!Worklist.empty()) { |
| 4569 | Changed |= runOnAlloca(*Worklist.pop_back_val()); |
Teresa Johnson | 3309002 | 2017-11-20 18:33:38 +0000 | [diff] [blame] | 4570 | Changed |= deleteDeadInstructions(DeletedAllocas); |
Chandler Carruth | b09f0a3 | 2012-10-02 22:46:45 +0000 | [diff] [blame] | 4571 | |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4572 | // Remove the deleted allocas from various lists so that we don't try to |
| 4573 | // continue processing them. |
| 4574 | if (!DeletedAllocas.empty()) { |
Chandler Carruth | 113dc64 | 2014-12-20 02:39:18 +0000 | [diff] [blame] | 4575 | auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; |
Benjamin Kramer | 3a377bc | 2014-03-01 11:47:00 +0000 | [diff] [blame] | 4576 | Worklist.remove_if(IsInSet); |
| 4577 | PostPromotionWorklist.remove_if(IsInSet); |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4578 | PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet), |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4579 | PromotableAllocas.end()); |
| 4580 | DeletedAllocas.clear(); |
| 4581 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4582 | } |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4583 | |
Chandler Carruth | ac8317f | 2012-10-04 12:33:50 +0000 | [diff] [blame] | 4584 | Changed |= promoteAllocas(F); |
| 4585 | |
| 4586 | Worklist = PostPromotionWorklist; |
| 4587 | PostPromotionWorklist.clear(); |
| 4588 | } while (!Worklist.empty()); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4589 | |
Davide Italiano | 16e96d4 | 2016-06-07 13:21:17 +0000 | [diff] [blame] | 4590 | if (!Changed) |
| 4591 | return PreservedAnalyses::all(); |
| 4592 | |
Davide Italiano | 16e96d4 | 2016-06-07 13:21:17 +0000 | [diff] [blame] | 4593 | PreservedAnalyses PA; |
Chandler Carruth | ca68a3e | 2017-01-15 06:32:49 +0000 | [diff] [blame] | 4594 | PA.preserveSet<CFGAnalyses>(); |
Davide Italiano | 16e96d4 | 2016-06-07 13:21:17 +0000 | [diff] [blame] | 4595 | PA.preserve<GlobalsAA>(); |
| 4596 | return PA; |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4597 | } |
| 4598 | |
Sean Silva | 36e0d01 | 2016-08-09 00:28:15 +0000 | [diff] [blame] | 4599 | PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) { |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4600 | return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), |
| 4601 | AM.getResult<AssumptionAnalysis>(F)); |
Chandler Carruth | 1b398ae | 2012-09-14 09:22:59 +0000 | [diff] [blame] | 4602 | } |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4603 | |
| 4604 | /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. |
| 4605 | /// |
| 4606 | /// This is in the llvm namespace purely to allow it to be a friend of the \c |
| 4607 | /// SROA pass. |
| 4608 | class llvm::sroa::SROALegacyPass : public FunctionPass { |
| 4609 | /// The SROA implementation. |
| 4610 | SROA Impl; |
| 4611 | |
| 4612 | public: |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4613 | static char ID; |
| 4614 | |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4615 | SROALegacyPass() : FunctionPass(ID) { |
| 4616 | initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); |
| 4617 | } |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4618 | |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4619 | bool runOnFunction(Function &F) override { |
Andrew Kaylor | aa641a5 | 2016-04-22 22:06:11 +0000 | [diff] [blame] | 4620 | if (skipFunction(F)) |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4621 | return false; |
| 4622 | |
| 4623 | auto PA = Impl.runImpl( |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4624 | F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), |
| 4625 | getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4626 | return !PA.areAllPreserved(); |
| 4627 | } |
Eugene Zelenko | 75075ef | 2017-09-01 21:37:29 +0000 | [diff] [blame] | 4628 | |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4629 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4630 | AU.addRequired<AssumptionCacheTracker>(); |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4631 | AU.addRequired<DominatorTreeWrapperPass>(); |
| 4632 | AU.addPreserved<GlobalsAAWrapperPass>(); |
| 4633 | AU.setPreservesCFG(); |
| 4634 | } |
| 4635 | |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 4636 | StringRef getPassName() const override { return "SROA"; } |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4637 | }; |
| 4638 | |
| 4639 | char SROALegacyPass::ID = 0; |
| 4640 | |
| 4641 | FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } |
| 4642 | |
| 4643 | INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", |
| 4644 | "Scalar Replacement Of Aggregates", false, false) |
Daniel Jasper | aec2fa3 | 2016-12-19 08:22:17 +0000 | [diff] [blame] | 4645 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) |
Chandler Carruth | 29a18a4 | 2015-09-12 09:09:14 +0000 | [diff] [blame] | 4646 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
| 4647 | INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", |
| 4648 | false, false) |