blob: e559eed33c9ebd0d1b95594b60dcf69c3baf9540 [file] [log] [blame]
Chad Rosiera306eeb2016-05-02 14:32:17 +00001//===--------------------- InterleavedAccessPass.cpp ----------------------===//
Hao Liu1c1e0c92015-06-26 02:10:27 +00002//
Chad Rosiera306eeb2016-05-02 14:32:17 +00003// The LLVM Compiler Infrastructure
Hao Liu1c1e0c92015-06-26 02:10:27 +00004//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Interleaved Access pass, which identifies
Chad Rosiera306eeb2016-05-02 14:32:17 +000011// interleaved memory accesses and transforms them into target specific
12// intrinsics.
Hao Liu1c1e0c92015-06-26 02:10:27 +000013//
14// An interleaved load reads data from memory into several vectors, with
15// DE-interleaving the data on a factor. An interleaved store writes several
16// vectors to memory with RE-interleaving the data on a factor.
17//
Chad Rosiera306eeb2016-05-02 14:32:17 +000018// As interleaved accesses are difficult to identified in CodeGen (mainly
19// because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector
20// IR), we identify and transform them to intrinsics in this pass so the
21// intrinsics can be easily matched into target specific instructions later in
22// CodeGen.
Hao Liu1c1e0c92015-06-26 02:10:27 +000023//
24// E.g. An interleaved load (Factor = 2):
25// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
26// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
27// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
28//
29// It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2
30// intrinsic in ARM backend.
31//
David L Kreitzer01a057a2016-10-14 18:20:41 +000032// In X86, this can be further optimized into a set of target
33// specific loads followed by an optimized sequence of shuffles.
34//
Hao Liu1c1e0c92015-06-26 02:10:27 +000035// E.g. An interleaved store (Factor = 3):
36// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
37// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
38// store <12 x i32> %i.vec, <12 x i32>* %ptr
39//
40// It could be transformed into a st3 intrinsic in AArch64 backend or a vst3
41// intrinsic in ARM backend.
42//
David L Kreitzer01a057a2016-10-14 18:20:41 +000043// Similarly, a set of interleaved stores can be transformed into an optimized
44// sequence of shuffles followed by a set of target specific stores for X86.
Hao Liu1c1e0c92015-06-26 02:10:27 +000045//===----------------------------------------------------------------------===//
46
47#include "llvm/CodeGen/Passes.h"
Matthew Simpson476c0af2016-05-19 21:39:00 +000048#include "llvm/IR/Dominators.h"
Hao Liu1c1e0c92015-06-26 02:10:27 +000049#include "llvm/IR/InstIterator.h"
50#include "llvm/Support/Debug.h"
51#include "llvm/Support/MathExtras.h"
Hao Liub41c0b42015-06-26 04:38:21 +000052#include "llvm/Support/raw_ostream.h"
Hao Liu1c1e0c92015-06-26 02:10:27 +000053#include "llvm/Target/TargetLowering.h"
54#include "llvm/Target/TargetSubtargetInfo.h"
55
56using namespace llvm;
57
58#define DEBUG_TYPE "interleaved-access"
59
60static cl::opt<bool> LowerInterleavedAccesses(
61 "lower-interleaved-accesses",
62 cl::desc("Enable lowering interleaved accesses to intrinsics"),
Silviu Baranga6d3f05c2015-09-01 11:12:35 +000063 cl::init(true), cl::Hidden);
Hao Liu1c1e0c92015-06-26 02:10:27 +000064
Hao Liu1c1e0c92015-06-26 02:10:27 +000065namespace {
66
67class InterleavedAccess : public FunctionPass {
68
69public:
70 static char ID;
71 InterleavedAccess(const TargetMachine *TM = nullptr)
Matthew Simpson476c0af2016-05-19 21:39:00 +000072 : FunctionPass(ID), DT(nullptr), TM(TM), TLI(nullptr) {
Hao Liu1c1e0c92015-06-26 02:10:27 +000073 initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
74 }
75
Mehdi Amini117296c2016-10-01 02:56:57 +000076 StringRef getPassName() const override { return "Interleaved Access Pass"; }
Hao Liu1c1e0c92015-06-26 02:10:27 +000077
78 bool runOnFunction(Function &F) override;
79
Matthew Simpson476c0af2016-05-19 21:39:00 +000080 void getAnalysisUsage(AnalysisUsage &AU) const override {
81 AU.addRequired<DominatorTreeWrapperPass>();
82 AU.addPreserved<DominatorTreeWrapperPass>();
83 }
84
Hao Liu1c1e0c92015-06-26 02:10:27 +000085private:
Matthew Simpson476c0af2016-05-19 21:39:00 +000086 DominatorTree *DT;
Hao Liu1c1e0c92015-06-26 02:10:27 +000087 const TargetMachine *TM;
88 const TargetLowering *TLI;
89
Benjamin Kramer1e425c92016-10-18 18:59:58 +000090 /// The maximum supported interleave factor.
91 unsigned MaxFactor;
92
Hao Liu1c1e0c92015-06-26 02:10:27 +000093 /// \brief Transform an interleaved load into target specific intrinsics.
94 bool lowerInterleavedLoad(LoadInst *LI,
95 SmallVector<Instruction *, 32> &DeadInsts);
96
97 /// \brief Transform an interleaved store into target specific intrinsics.
98 bool lowerInterleavedStore(StoreInst *SI,
99 SmallVector<Instruction *, 32> &DeadInsts);
Matthew Simpson476c0af2016-05-19 21:39:00 +0000100
101 /// \brief Returns true if the uses of an interleaved load by the
102 /// extractelement instructions in \p Extracts can be replaced by uses of the
103 /// shufflevector instructions in \p Shuffles instead. If so, the necessary
104 /// replacements are also performed.
105 bool tryReplaceExtracts(ArrayRef<ExtractElementInst *> Extracts,
106 ArrayRef<ShuffleVectorInst *> Shuffles);
Hao Liu1c1e0c92015-06-26 02:10:27 +0000107};
108} // end anonymous namespace.
109
110char InterleavedAccess::ID = 0;
Matthew Simpson476c0af2016-05-19 21:39:00 +0000111INITIALIZE_TM_PASS_BEGIN(
112 InterleavedAccess, "interleaved-access",
113 "Lower interleaved memory accesses to target specific intrinsics", false,
114 false)
115INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
116INITIALIZE_TM_PASS_END(
117 InterleavedAccess, "interleaved-access",
118 "Lower interleaved memory accesses to target specific intrinsics", false,
119 false)
Hao Liu1c1e0c92015-06-26 02:10:27 +0000120
121FunctionPass *llvm::createInterleavedAccessPass(const TargetMachine *TM) {
122 return new InterleavedAccess(TM);
123}
124
125/// \brief Check if the mask is a DE-interleave mask of the given factor
126/// \p Factor like:
127/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
128static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
129 unsigned &Index) {
130 // Check all potential start indices from 0 to (Factor - 1).
131 for (Index = 0; Index < Factor; Index++) {
132 unsigned i = 0;
133
134 // Check that elements are in ascending order by Factor. Ignore undef
135 // elements.
136 for (; i < Mask.size(); i++)
137 if (Mask[i] >= 0 && static_cast<unsigned>(Mask[i]) != Index + i * Factor)
138 break;
139
140 if (i == Mask.size())
141 return true;
142 }
143
144 return false;
145}
146
147/// \brief Check if the mask is a DE-interleave mask for an interleaved load.
148///
149/// E.g. DE-interleave masks (Factor = 2) could be:
150/// <0, 2, 4, 6> (mask of index 0 to extract even elements)
151/// <1, 3, 5, 7> (mask of index 1 to extract odd elements)
152static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
Benjamin Kramer1e425c92016-10-18 18:59:58 +0000153 unsigned &Index, unsigned MaxFactor) {
Hao Liu1c1e0c92015-06-26 02:10:27 +0000154 if (Mask.size() < 2)
155 return false;
156
157 // Check potential Factors.
158 for (Factor = 2; Factor <= MaxFactor; Factor++)
159 if (isDeInterleaveMaskOfFactor(Mask, Factor, Index))
160 return true;
161
162 return false;
163}
164
165/// \brief Check if the mask is RE-interleave mask for an interleaved store.
166///
167/// I.e. <0, NumSubElts, ... , NumSubElts*(Factor - 1), 1, NumSubElts + 1, ...>
168///
169/// E.g. The RE-interleave mask (Factor = 2) could be:
170/// <0, 4, 1, 5, 2, 6, 3, 7>
Benjamin Kramer1e425c92016-10-18 18:59:58 +0000171static bool isReInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
172 unsigned MaxFactor) {
Hao Liu1c1e0c92015-06-26 02:10:27 +0000173 unsigned NumElts = Mask.size();
174 if (NumElts < 4)
175 return false;
176
177 // Check potential Factors.
178 for (Factor = 2; Factor <= MaxFactor; Factor++) {
179 if (NumElts % Factor)
180 continue;
181
182 unsigned NumSubElts = NumElts / Factor;
183 if (!isPowerOf2_32(NumSubElts))
184 continue;
185
186 // Check whether each element matchs the RE-interleaved rule. Ignore undef
187 // elements.
188 unsigned i = 0;
189 for (; i < NumElts; i++)
190 if (Mask[i] >= 0 &&
191 static_cast<unsigned>(Mask[i]) !=
192 (i % Factor) * NumSubElts + i / Factor)
193 break;
194
195 // Find a RE-interleaved mask of current factor.
196 if (i == NumElts)
197 return true;
198 }
199
200 return false;
201}
202
203bool InterleavedAccess::lowerInterleavedLoad(
204 LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) {
205 if (!LI->isSimple())
206 return false;
207
208 SmallVector<ShuffleVectorInst *, 4> Shuffles;
Matthew Simpson476c0af2016-05-19 21:39:00 +0000209 SmallVector<ExtractElementInst *, 4> Extracts;
Hao Liu1c1e0c92015-06-26 02:10:27 +0000210
Matthew Simpson476c0af2016-05-19 21:39:00 +0000211 // Check if all users of this load are shufflevectors. If we encounter any
212 // users that are extractelement instructions, we save them to later check if
213 // they can be modifed to extract from one of the shufflevectors instead of
214 // the load.
Hao Liu1c1e0c92015-06-26 02:10:27 +0000215 for (auto UI = LI->user_begin(), E = LI->user_end(); UI != E; UI++) {
Matthew Simpson476c0af2016-05-19 21:39:00 +0000216 auto *Extract = dyn_cast<ExtractElementInst>(*UI);
217 if (Extract && isa<ConstantInt>(Extract->getIndexOperand())) {
218 Extracts.push_back(Extract);
219 continue;
220 }
Hao Liu1c1e0c92015-06-26 02:10:27 +0000221 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(*UI);
222 if (!SVI || !isa<UndefValue>(SVI->getOperand(1)))
223 return false;
224
225 Shuffles.push_back(SVI);
226 }
227
228 if (Shuffles.empty())
229 return false;
230
231 unsigned Factor, Index;
232
233 // Check if the first shufflevector is DE-interleave shuffle.
Benjamin Kramer1e425c92016-10-18 18:59:58 +0000234 if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index,
235 MaxFactor))
Hao Liu1c1e0c92015-06-26 02:10:27 +0000236 return false;
237
238 // Holds the corresponding index for each DE-interleave shuffle.
239 SmallVector<unsigned, 4> Indices;
240 Indices.push_back(Index);
241
242 Type *VecTy = Shuffles[0]->getType();
243
244 // Check if other shufflevectors are also DE-interleaved of the same type
245 // and factor as the first shufflevector.
246 for (unsigned i = 1; i < Shuffles.size(); i++) {
247 if (Shuffles[i]->getType() != VecTy)
248 return false;
249
250 if (!isDeInterleaveMaskOfFactor(Shuffles[i]->getShuffleMask(), Factor,
251 Index))
252 return false;
253
254 Indices.push_back(Index);
255 }
256
Matthew Simpson476c0af2016-05-19 21:39:00 +0000257 // Try and modify users of the load that are extractelement instructions to
258 // use the shufflevector instructions instead of the load.
259 if (!tryReplaceExtracts(Extracts, Shuffles))
260 return false;
261
Hao Liu1c1e0c92015-06-26 02:10:27 +0000262 DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
263
264 // Try to create target specific intrinsics to replace the load and shuffles.
265 if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor))
266 return false;
267
268 for (auto SVI : Shuffles)
269 DeadInsts.push_back(SVI);
270
271 DeadInsts.push_back(LI);
272 return true;
273}
274
Matthew Simpson476c0af2016-05-19 21:39:00 +0000275bool InterleavedAccess::tryReplaceExtracts(
276 ArrayRef<ExtractElementInst *> Extracts,
277 ArrayRef<ShuffleVectorInst *> Shuffles) {
278
279 // If there aren't any extractelement instructions to modify, there's nothing
280 // to do.
281 if (Extracts.empty())
282 return true;
283
284 // Maps extractelement instructions to vector-index pairs. The extractlement
285 // instructions will be modified to use the new vector and index operands.
286 DenseMap<ExtractElementInst *, std::pair<Value *, int>> ReplacementMap;
287
288 for (auto *Extract : Extracts) {
289
290 // The vector index that is extracted.
291 auto *IndexOperand = cast<ConstantInt>(Extract->getIndexOperand());
292 auto Index = IndexOperand->getSExtValue();
293
294 // Look for a suitable shufflevector instruction. The goal is to modify the
295 // extractelement instruction (which uses an interleaved load) to use one
296 // of the shufflevector instructions instead of the load.
297 for (auto *Shuffle : Shuffles) {
298
299 // If the shufflevector instruction doesn't dominate the extract, we
300 // can't create a use of it.
301 if (!DT->dominates(Shuffle, Extract))
302 continue;
303
304 // Inspect the indices of the shufflevector instruction. If the shuffle
305 // selects the same index that is extracted, we can modify the
306 // extractelement instruction.
307 SmallVector<int, 4> Indices;
308 Shuffle->getShuffleMask(Indices);
309 for (unsigned I = 0; I < Indices.size(); ++I)
310 if (Indices[I] == Index) {
311 assert(Extract->getOperand(0) == Shuffle->getOperand(0) &&
312 "Vector operations do not match");
313 ReplacementMap[Extract] = std::make_pair(Shuffle, I);
314 break;
315 }
316
317 // If we found a suitable shufflevector instruction, stop looking.
318 if (ReplacementMap.count(Extract))
319 break;
320 }
321
322 // If we did not find a suitable shufflevector instruction, the
323 // extractelement instruction cannot be modified, so we must give up.
324 if (!ReplacementMap.count(Extract))
325 return false;
326 }
327
328 // Finally, perform the replacements.
329 IRBuilder<> Builder(Extracts[0]->getContext());
330 for (auto &Replacement : ReplacementMap) {
331 auto *Extract = Replacement.first;
332 auto *Vector = Replacement.second.first;
333 auto Index = Replacement.second.second;
334 Builder.SetInsertPoint(Extract);
335 Extract->replaceAllUsesWith(Builder.CreateExtractElement(Vector, Index));
336 Extract->eraseFromParent();
337 }
338
339 return true;
340}
341
Hao Liu1c1e0c92015-06-26 02:10:27 +0000342bool InterleavedAccess::lowerInterleavedStore(
343 StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts) {
344 if (!SI->isSimple())
345 return false;
346
347 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
348 if (!SVI || !SVI->hasOneUse())
349 return false;
350
351 // Check if the shufflevector is RE-interleave shuffle.
352 unsigned Factor;
Benjamin Kramer1e425c92016-10-18 18:59:58 +0000353 if (!isReInterleaveMask(SVI->getShuffleMask(), Factor, MaxFactor))
Hao Liu1c1e0c92015-06-26 02:10:27 +0000354 return false;
355
356 DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
357
358 // Try to create target specific intrinsics to replace the store and shuffle.
359 if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
360 return false;
361
362 // Already have a new target specific interleaved store. Erase the old store.
363 DeadInsts.push_back(SI);
364 DeadInsts.push_back(SVI);
365 return true;
366}
367
368bool InterleavedAccess::runOnFunction(Function &F) {
369 if (!TM || !LowerInterleavedAccesses)
370 return false;
371
372 DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
373
Matthew Simpson476c0af2016-05-19 21:39:00 +0000374 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Hao Liu1c1e0c92015-06-26 02:10:27 +0000375 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
376 MaxFactor = TLI->getMaxSupportedInterleaveFactor();
377
378 // Holds dead instructions that will be erased later.
379 SmallVector<Instruction *, 32> DeadInsts;
380 bool Changed = false;
381
Nico Rieck78199512015-08-06 19:10:45 +0000382 for (auto &I : instructions(F)) {
Hao Liu1c1e0c92015-06-26 02:10:27 +0000383 if (LoadInst *LI = dyn_cast<LoadInst>(&I))
384 Changed |= lowerInterleavedLoad(LI, DeadInsts);
385
386 if (StoreInst *SI = dyn_cast<StoreInst>(&I))
387 Changed |= lowerInterleavedStore(SI, DeadInsts);
388 }
389
390 for (auto I : DeadInsts)
391 I->eraseFromParent();
392
393 return Changed;
394}