blob: 806d6cc888f0f73fcde962a7d7a5b052eb423289 [file] [log] [blame]
David L Kreitzer0e3ae302016-12-01 19:56:39 +00001//===--------- X86InterleavedAccess.cpp ----------------------------------===//
David L Kreitzer01a057a2016-10-14 18:20:41 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
David L Kreitzer0e3ae302016-12-01 19:56:39 +00008//===--------------------------------------------------------------------===//
9///
10/// \file
11/// This file contains the X86 implementation of the interleaved accesses
12/// optimization generating X86-specific instructions/intrinsics for
13/// interleaved access groups.
14///
15//===--------------------------------------------------------------------===//
David L Kreitzer01a057a2016-10-14 18:20:41 +000016
17#include "X86ISelLowering.h"
18#include "X86TargetMachine.h"
19
20using namespace llvm;
21
Benjamin Kramerefcf06f2017-02-11 11:06:55 +000022namespace {
David L Kreitzer0e3ae302016-12-01 19:56:39 +000023/// \brief This class holds necessary information to represent an interleaved
24/// access group and supports utilities to lower the group into
25/// X86-specific instructions/intrinsics.
26/// E.g. A group of interleaving access loads (Factor = 2; accessing every
27/// other element)
28/// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
29/// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
30/// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
David L Kreitzer0e3ae302016-12-01 19:56:39 +000031class X86InterleavedAccessGroup {
32 /// \brief Reference to the wide-load instruction of an interleaved access
33 /// group.
34 Instruction *const Inst;
35
36 /// \brief Reference to the shuffle(s), consumer(s) of the (load) 'Inst'.
37 ArrayRef<ShuffleVectorInst *> Shuffles;
38
39 /// \brief Reference to the starting index of each user-shuffle.
40 ArrayRef<unsigned> Indices;
41
42 /// \brief Reference to the interleaving stride in terms of elements.
43 const unsigned Factor;
44
45 /// \brief Reference to the underlying target.
46 const X86Subtarget &Subtarget;
47
48 const DataLayout &DL;
49
50 IRBuilder<> &Builder;
51
52 /// \brief Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors
53 /// sub vectors of type \p T. Returns true and the sub-vectors in
54 /// \p DecomposedVectors if it decomposes the Inst, returns false otherwise.
55 bool decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T,
56 SmallVectorImpl<Instruction *> &DecomposedVectors);
57
58 /// \brief Performs matrix transposition on a 4x4 matrix \p InputVectors and
59 /// returns the transposed-vectors in \p TransposedVectors.
60 /// E.g.
61 /// InputVectors:
62 /// In-V0 = p1, p2, p3, p4
63 /// In-V1 = q1, q2, q3, q4
64 /// In-V2 = r1, r2, r3, r4
65 /// In-V3 = s1, s2, s3, s4
66 /// OutputVectors:
67 /// Out-V0 = p1, q1, r1, s1
68 /// Out-V1 = p2, q2, r2, s2
69 /// Out-V2 = p3, q3, r3, s3
70 /// Out-V3 = P4, q4, r4, s4
71 void transpose_4x4(ArrayRef<Instruction *> InputVectors,
72 SmallVectorImpl<Value *> &TrasposedVectors);
73
74public:
75 /// In order to form an interleaved access group X86InterleavedAccessGroup
76 /// requires a wide-load instruction \p 'I', a group of interleaved-vectors
77 /// \p Shuffs, reference to the first indices of each interleaved-vector
78 /// \p 'Ind' and the interleaving stride factor \p F. In order to generate
79 /// X86-specific instructions/intrinsics it also requires the underlying
80 /// target information \p STarget.
81 explicit X86InterleavedAccessGroup(Instruction *I,
82 ArrayRef<ShuffleVectorInst *> Shuffs,
83 ArrayRef<unsigned> Ind,
84 const unsigned F,
85 const X86Subtarget &STarget,
86 IRBuilder<> &B)
87 : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget),
88 DL(Inst->getModule()->getDataLayout()), Builder(B) {}
89
90 /// \brief Returns true if this interleaved access group can be lowered into
91 /// x86-specific instructions/intrinsics, false otherwise.
92 bool isSupported() const;
93
94 /// \brief Lowers this interleaved access group into X86-specific
95 /// instructions/intrinsics.
96 bool lowerIntoOptimizedSequence();
97};
Benjamin Kramerefcf06f2017-02-11 11:06:55 +000098} // end anonymous namespace
David L Kreitzer0e3ae302016-12-01 19:56:39 +000099
100bool X86InterleavedAccessGroup::isSupported() const {
David L Kreitzer01a057a2016-10-14 18:20:41 +0000101 VectorType *ShuffleVecTy = Shuffles[0]->getType();
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000102 uint64_t ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy);
David L Kreitzer01a057a2016-10-14 18:20:41 +0000103 Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType();
104
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000105 if (DL.getTypeSizeInBits(Inst->getType()) < Factor * ShuffleVecSize)
David L Kreitzer01a057a2016-10-14 18:20:41 +0000106 return false;
107
108 // Currently, lowering is supported for 64 bits on AVX.
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000109 if (!Subtarget.hasAVX() || ShuffleVecSize != 256 ||
110 DL.getTypeSizeInBits(ShuffleEltTy) != 64 || Factor != 4)
David L Kreitzer01a057a2016-10-14 18:20:41 +0000111 return false;
112
113 return true;
114}
115
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000116bool X86InterleavedAccessGroup::decompose(
117 Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
118 SmallVectorImpl<Instruction *> &DecomposedVectors) {
119 Type *VecTy = VecInst->getType();
Benjamin Kramer215b22e2016-12-01 20:49:34 +0000120 (void)VecTy;
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000121 assert(VecTy->isVectorTy() &&
122 DL.getTypeSizeInBits(VecTy) >=
123 DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&
124 "Invalid Inst-size!!!");
125 assert(VecTy->getVectorElementType() == SubVecTy->getVectorElementType() &&
126 "Element type mismatched!!!");
127
128 if (!isa<LoadInst>(VecInst))
129 return false;
130
131 LoadInst *LI = cast<LoadInst>(VecInst);
132 Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
133
134 Value *VecBasePtr =
135 Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
136
137 // Generate N loads of T type
138 for (unsigned i = 0; i < NumSubVectors; i++) {
139 // TODO: Support inbounds GEP
140 Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
141 Instruction *NewLoad =
142 Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
143 DecomposedVectors.push_back(NewLoad);
144 }
145
146 return true;
147}
148
149void X86InterleavedAccessGroup::transpose_4x4(
150 ArrayRef<Instruction *> Matrix,
151 SmallVectorImpl<Value *> &TransposedMatrix) {
152 assert(Matrix.size() == 4 && "Invalid matrix size");
153 TransposedMatrix.resize(4);
154
155 // dst = src1[0,1],src2[0,1]
156 uint32_t IntMask1[] = {0, 1, 4, 5};
157 ArrayRef<uint32_t> Mask = makeArrayRef(IntMask1, 4);
158 Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
159 Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
160
161 // dst = src1[2,3],src2[2,3]
162 uint32_t IntMask2[] = {2, 3, 6, 7};
163 Mask = makeArrayRef(IntMask2, 4);
164 Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
165 Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
166
167 // dst = src1[0],src2[0],src1[2],src2[2]
168 uint32_t IntMask3[] = {0, 4, 2, 6};
169 Mask = makeArrayRef(IntMask3, 4);
170 TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
171 TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
172
173 // dst = src1[1],src2[1],src1[3],src2[3]
174 uint32_t IntMask4[] = {1, 5, 3, 7};
175 Mask = makeArrayRef(IntMask4, 4);
176 TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
177 TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
178}
179
180// Lowers this interleaved access group into X86-specific
181// instructions/intrinsics.
182bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() {
183 SmallVector<Instruction *, 4> DecomposedVectors;
184 VectorType *VecTy = Shuffles[0]->getType();
185 // Try to generate target-sized register(/instruction).
186 if (!decompose(Inst, Factor, VecTy, DecomposedVectors))
187 return false;
188
189 SmallVector<Value *, 4> TransposedVectors;
190 // Perform matrix-transposition in order to compute interleaved
191 // results by generating some sort of (optimized) target-specific
192 // instructions.
193 transpose_4x4(DecomposedVectors, TransposedVectors);
194
195 // Now replace the unoptimized-interleaved-vectors with the
196 // transposed-interleaved vectors.
197 for (unsigned i = 0; i < Shuffles.size(); i++)
198 Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]);
199
200 return true;
201}
202
203// Lower interleaved load(s) into target specific instructions/
204// intrinsics. Lowering sequence varies depending on the vector-types, factor,
205// number of shuffles and ISA.
206// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX.
David L Kreitzer01a057a2016-10-14 18:20:41 +0000207bool X86TargetLowering::lowerInterleavedLoad(
208 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
209 ArrayRef<unsigned> Indices, unsigned Factor) const {
210 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
211 "Invalid interleave factor");
212 assert(!Shuffles.empty() && "Empty shufflevector input");
213 assert(Shuffles.size() == Indices.size() &&
214 "Unmatched number of shufflevectors and indices");
215
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000216 // Create an interleaved access group.
David L Kreitzer01a057a2016-10-14 18:20:41 +0000217 IRBuilder<> Builder(LI);
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000218 X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget,
219 Builder);
David L Kreitzer01a057a2016-10-14 18:20:41 +0000220
David L Kreitzer0e3ae302016-12-01 19:56:39 +0000221 return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
David L Kreitzer01a057a2016-10-14 18:20:41 +0000222}