blob: 2e8db08830e369edd5d249ca33db24e340f03c80 [file] [log] [blame]
Matt Arsenault86de4862016-06-24 07:07:55 +00001//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This pass does misc. AMDGPU optimizations on IR before instruction
12/// selection.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPU.h"
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000017#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault86de4862016-06-24 07:07:55 +000018#include "AMDGPUSubtarget.h"
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000019#include "AMDGPUTargetMachine.h"
Matt Arsenault86de4862016-06-24 07:07:55 +000020
21#include "llvm/Analysis/DivergenceAnalysis.h"
22#include "llvm/CodeGen/Passes.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/raw_ostream.h"
27
28#define DEBUG_TYPE "amdgpu-codegenprepare"
29
30using namespace llvm;
31
32namespace {
33
34class AMDGPUCodeGenPrepare : public FunctionPass,
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000035 public InstVisitor<AMDGPUCodeGenPrepare, bool> {
36 const GCNTargetMachine *TM;
37 const SISubtarget *ST;
Matt Arsenault86de4862016-06-24 07:07:55 +000038 DivergenceAnalysis *DA;
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000039 Module *Mod;
40 bool HasUnsafeFPMath;
Matt Arsenault86de4862016-06-24 07:07:55 +000041
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000042 /// \brief Copies exact/nsw/nuw flags (if any) from binary operation \p I to
43 /// binary operation \p V.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000044 ///
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000045 /// \returns Binary operation \p V.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000046 Value *copyFlags(const BinaryOperator &I, Value *V) const;
47
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000048 /// \returns \p T's base element bit width.
49 unsigned getBaseElementBitWidth(const Type *T) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000050
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000051 /// \returns Equivalent 32 bit integer type for given type \p T. For example,
52 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
53 /// is returned.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000054 Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
55
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000056 /// \returns True if binary operation \p I is a signed binary operation, false
57 /// otherwise.
58 bool isSigned(const BinaryOperator &I) const;
59
60 /// \returns True if the condition of 'select' operation \p I comes from a
61 /// signed 'icmp' operation, false otherwise.
62 bool isSigned(const SelectInst &I) const;
63
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000064 /// \returns True if type \p T needs to be promoted to 32 bit integer type,
65 /// false otherwise.
66 bool needsPromotionToI32(const Type *T) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000067
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000068 /// \brief Promotes uniform binary operation \p I to equivalent 32 bit binary
69 /// operation.
70 ///
71 /// \details \p I's base element bit width must be greater than 1 and less
72 /// than or equal 16. Promotion is done by sign or zero extending operands to
73 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
74 /// truncating the result of 32 bit binary operation back to \p I's original
75 /// type. Division operation is not promoted.
76 ///
77 /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
78 /// false otherwise.
79 bool promoteUniformOpToI32(BinaryOperator &I) const;
80
81 /// \brief Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
82 ///
83 /// \details \p I's base element bit width must be greater than 1 and less
84 /// than or equal 16. Promotion is done by sign or zero extending operands to
85 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000086 ///
87 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000088 bool promoteUniformOpToI32(ICmpInst &I) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000089
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000090 /// \brief Promotes uniform 'select' operation \p I to 32 bit 'select'
91 /// operation.
92 ///
93 /// \details \p I's base element bit width must be greater than 1 and less
94 /// than or equal 16. Promotion is done by sign or zero extending operands to
95 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
96 /// result of 32 bit 'select' operation back to \p I's original type.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000097 ///
98 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000099 bool promoteUniformOpToI32(SelectInst &I) const;
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000100
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000101 /// \brief Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
102 /// intrinsic.
103 ///
104 /// \details \p I's base element bit width must be greater than 1 and less
105 /// than or equal 16. Promotion is done by zero extending the operand to 32
106 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
107 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
108 /// shift amount is 32 minus \p I's base element bit width), and truncating
109 /// the result of the shift operation back to \p I's original type.
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000110 ///
111 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000112 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000113
Matt Arsenault86de4862016-06-24 07:07:55 +0000114public:
115 static char ID;
116 AMDGPUCodeGenPrepare(const TargetMachine *TM = nullptr) :
117 FunctionPass(ID),
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000118 TM(static_cast<const GCNTargetMachine *>(TM)),
119 ST(nullptr),
120 DA(nullptr),
121 Mod(nullptr),
122 HasUnsafeFPMath(false) { }
123
124 bool visitFDiv(BinaryOperator &I);
125
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000126 bool visitInstruction(Instruction &I) { return false; }
127 bool visitBinaryOperator(BinaryOperator &I);
128 bool visitICmpInst(ICmpInst &I);
129 bool visitSelectInst(SelectInst &I);
Matt Arsenault86de4862016-06-24 07:07:55 +0000130
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000131 bool visitIntrinsicInst(IntrinsicInst &I);
132 bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
133
Matt Arsenault86de4862016-06-24 07:07:55 +0000134 bool doInitialization(Module &M) override;
135 bool runOnFunction(Function &F) override;
136
Mehdi Amini117296c2016-10-01 02:56:57 +0000137 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
Matt Arsenault86de4862016-06-24 07:07:55 +0000138
139 void getAnalysisUsage(AnalysisUsage &AU) const override {
140 AU.addRequired<DivergenceAnalysis>();
141 AU.setPreservesAll();
142 }
143};
144
145} // End anonymous namespace
146
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000147Value *AMDGPUCodeGenPrepare::copyFlags(
148 const BinaryOperator &I, Value *V) const {
Matt Arsenault269ffda2016-12-06 23:18:06 +0000149 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(V);
150 if (!BinOp) // Possibly constant expression.
151 return V;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000152
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000153 if (isa<OverflowingBinaryOperator>(BinOp)) {
154 BinOp->setHasNoSignedWrap(I.hasNoSignedWrap());
155 BinOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000156 } else if (isa<PossiblyExactOperator>(BinOp))
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000157 BinOp->setIsExact(I.isExact());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000158
159 return V;
160}
161
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000162unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
163 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000164
165 if (T->isIntegerTy())
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000166 return T->getIntegerBitWidth();
167 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000168}
169
170Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000171 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000172
173 if (T->isIntegerTy())
174 return B.getInt32Ty();
175 return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements());
176}
177
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000178bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
Konstantin Zhuravlyov691e2e02016-10-03 18:29:01 +0000179 return I.getOpcode() == Instruction::AShr ||
180 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000181}
182
183bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const {
184 return isa<ICmpInst>(I.getOperand(0)) ?
185 cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
186}
187
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000188bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
189 if (T->isIntegerTy() && T->getIntegerBitWidth() > 1 &&
190 T->getIntegerBitWidth() <= 16)
191 return true;
192 if (!T->isVectorTy())
193 return false;
194 return needsPromotionToI32(cast<VectorType>(T)->getElementType());
195}
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000196
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000197bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
198 assert(needsPromotionToI32(I.getType()) &&
199 "I does not need promotion to i32");
200
201 if (I.getOpcode() == Instruction::SDiv ||
202 I.getOpcode() == Instruction::UDiv)
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000203 return false;
204
205 IRBuilder<> Builder(&I);
206 Builder.SetCurrentDebugLocation(I.getDebugLoc());
207
208 Type *I32Ty = getI32Ty(Builder, I.getType());
209 Value *ExtOp0 = nullptr;
210 Value *ExtOp1 = nullptr;
211 Value *ExtRes = nullptr;
212 Value *TruncRes = nullptr;
213
214 if (isSigned(I)) {
215 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
216 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
217 } else {
218 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
219 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
220 }
221 ExtRes = copyFlags(I, Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1));
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000222 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000223
224 I.replaceAllUsesWith(TruncRes);
225 I.eraseFromParent();
226
227 return true;
228}
229
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000230bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
231 assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
232 "I does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000233
234 IRBuilder<> Builder(&I);
235 Builder.SetCurrentDebugLocation(I.getDebugLoc());
236
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000237 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000238 Value *ExtOp0 = nullptr;
239 Value *ExtOp1 = nullptr;
240 Value *NewICmp = nullptr;
241
242 if (I.isSigned()) {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000243 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
244 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000245 } else {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000246 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
247 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000248 }
249 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
250
251 I.replaceAllUsesWith(NewICmp);
252 I.eraseFromParent();
253
254 return true;
255}
256
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000257bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
258 assert(needsPromotionToI32(I.getType()) &&
259 "I does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000260
261 IRBuilder<> Builder(&I);
262 Builder.SetCurrentDebugLocation(I.getDebugLoc());
263
264 Type *I32Ty = getI32Ty(Builder, I.getType());
265 Value *ExtOp1 = nullptr;
266 Value *ExtOp2 = nullptr;
267 Value *ExtRes = nullptr;
268 Value *TruncRes = nullptr;
269
270 if (isSigned(I)) {
271 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
272 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
273 } else {
274 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
275 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
276 }
277 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000278 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000279
280 I.replaceAllUsesWith(TruncRes);
281 I.eraseFromParent();
282
283 return true;
284}
285
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000286bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000287 IntrinsicInst &I) const {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000288 assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
289 "I must be bitreverse intrinsic");
290 assert(needsPromotionToI32(I.getType()) &&
291 "I does not need promotion to i32");
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000292
293 IRBuilder<> Builder(&I);
294 Builder.SetCurrentDebugLocation(I.getDebugLoc());
295
296 Type *I32Ty = getI32Ty(Builder, I.getType());
297 Function *I32 =
Konstantin Zhuravlyovc09e2d72016-10-07 14:39:53 +0000298 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000299 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
300 Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000301 Value *LShrOp =
302 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000303 Value *TruncRes =
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000304 Builder.CreateTrunc(LShrOp, I.getType());
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000305
306 I.replaceAllUsesWith(TruncRes);
307 I.eraseFromParent();
308
309 return true;
310}
311
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000312static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv) {
313 const ConstantFP *CNum = dyn_cast<ConstantFP>(Num);
314 if (!CNum)
315 return false;
316
317 // Reciprocal f32 is handled separately without denormals.
Matt Arsenaulte3862cd2016-07-26 23:25:44 +0000318 return UnsafeDiv || CNum->isExactlyValue(+1.0);
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000319}
320
321// Insert an intrinsic for fast fdiv for safe math situations where we can
322// reduce precision. Leave fdiv for situations where the generic node is
323// expected to be optimized.
324bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
325 Type *Ty = FDiv.getType();
326
327 // TODO: Handle half
328 if (!Ty->getScalarType()->isFloatTy())
329 return false;
330
331 MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath);
332 if (!FPMath)
333 return false;
334
335 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
336 float ULP = FPOp->getFPAccuracy();
337 if (ULP < 2.5f)
338 return false;
339
340 FastMathFlags FMF = FPOp->getFastMathFlags();
341 bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() ||
342 FMF.allowReciprocal();
343 if (ST->hasFP32Denormals() && !UnsafeDiv)
344 return false;
345
346 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
347 Builder.setFastMathFlags(FMF);
348 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
349
350 const AMDGPUIntrinsicInfo *II = TM->getIntrinsicInfo();
351 Function *Decl
352 = II->getDeclaration(Mod, AMDGPUIntrinsic::amdgcn_fdiv_fast, {});
353
354 Value *Num = FDiv.getOperand(0);
355 Value *Den = FDiv.getOperand(1);
356
357 Value *NewFDiv = nullptr;
358
359 if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
360 NewFDiv = UndefValue::get(VT);
361
362 // FIXME: Doesn't do the right thing for cases where the vector is partially
363 // constant. This works when the scalarizer pass is run first.
364 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
365 Value *NumEltI = Builder.CreateExtractElement(Num, I);
366 Value *DenEltI = Builder.CreateExtractElement(Den, I);
367 Value *NewElt;
368
369 if (shouldKeepFDivF32(NumEltI, UnsafeDiv)) {
370 NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
371 } else {
372 NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI });
373 }
374
375 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
376 }
377 } else {
378 if (!shouldKeepFDivF32(Num, UnsafeDiv))
379 NewFDiv = Builder.CreateCall(Decl, { Num, Den });
380 }
381
382 if (NewFDiv) {
383 FDiv.replaceAllUsesWith(NewFDiv);
384 NewFDiv->takeName(&FDiv);
385 FDiv.eraseFromParent();
386 }
387
388 return true;
389}
390
391static bool hasUnsafeFPMath(const Function &F) {
392 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
393 return Attr.getValueAsString() == "true";
394}
395
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000396bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
397 bool Changed = false;
398
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000399 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
400 DA->isUniform(&I))
401 Changed |= promoteUniformOpToI32(I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000402
403 return Changed;
404}
405
406bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
407 bool Changed = false;
408
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000409 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
410 DA->isUniform(&I))
411 Changed |= promoteUniformOpToI32(I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000412
413 return Changed;
414}
415
416bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
417 bool Changed = false;
418
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000419 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
420 DA->isUniform(&I))
421 Changed |= promoteUniformOpToI32(I);
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000422
423 return Changed;
424}
425
426bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
427 switch (I.getIntrinsicID()) {
428 case Intrinsic::bitreverse:
429 return visitBitreverseIntrinsicInst(I);
430 default:
431 return false;
432 }
433}
434
435bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
436 bool Changed = false;
437
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000438 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
439 DA->isUniform(&I))
440 Changed |= promoteUniformBitreverseToI32(I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000441
442 return Changed;
443}
444
Matt Arsenault86de4862016-06-24 07:07:55 +0000445bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000446 Mod = &M;
Matt Arsenault86de4862016-06-24 07:07:55 +0000447 return false;
448}
449
450bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
451 if (!TM || skipFunction(F))
452 return false;
453
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000454 ST = &TM->getSubtarget<SISubtarget>(F);
Matt Arsenault86de4862016-06-24 07:07:55 +0000455 DA = &getAnalysis<DivergenceAnalysis>();
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000456 HasUnsafeFPMath = hasUnsafeFPMath(F);
Matt Arsenault86de4862016-06-24 07:07:55 +0000457
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000458 bool MadeChange = false;
459
460 for (BasicBlock &BB : F) {
461 BasicBlock::iterator Next;
462 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) {
463 Next = std::next(I);
464 MadeChange |= visit(*I);
465 }
466 }
467
468 return MadeChange;
Matt Arsenault86de4862016-06-24 07:07:55 +0000469}
470
471INITIALIZE_TM_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
472 "AMDGPU IR optimizations", false, false)
473INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
474INITIALIZE_TM_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE,
475 "AMDGPU IR optimizations", false, false)
476
477char AMDGPUCodeGenPrepare::ID = 0;
478
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000479FunctionPass *llvm::createAMDGPUCodeGenPreparePass(const GCNTargetMachine *TM) {
Matt Arsenault86de4862016-06-24 07:07:55 +0000480 return new AMDGPUCodeGenPrepare(TM);
481}