blob: 1840f7196310131c740a663850cc10c63b237698 [file] [log] [blame]
Matt Arsenault86de4862016-06-24 07:07:55 +00001//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault86de4862016-06-24 07:07:55 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000017#include "AMDGPUTargetMachine.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000018#include "llvm/ADT/StringRef.h"
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +000019#include "llvm/Analysis/AssumptionCache.h"
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000020#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
Wei Dinga126a132017-07-26 21:07:28 +000021#include "llvm/Analysis/Loads.h"
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +000022#include "llvm/Analysis/ValueTracking.h"
Matt Arsenault86de4862016-06-24 07:07:55 +000023#include "llvm/CodeGen/Passes.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000024#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000025#include "llvm/IR/Attributes.h"
26#include "llvm/IR/BasicBlock.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000030#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/InstVisitor.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000032#include "llvm/IR/InstrTypes.h"
33#include "llvm/IR/Instruction.h"
34#include "llvm/IR/Instructions.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000035#include "llvm/IR/IntrinsicInst.h"
36#include "llvm/IR/Intrinsics.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000037#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Operator.h"
39#include "llvm/IR/Type.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/Casting.h"
43#include <cassert>
44#include <iterator>
Matt Arsenault86de4862016-06-24 07:07:55 +000045
46#define DEBUG_TYPE "amdgpu-codegenprepare"
47
48using namespace llvm;
49
50namespace {
51
Matt Arsenault90083d32018-06-07 09:54:49 +000052static cl::opt<bool> WidenLoads(
53 "amdgpu-codegenprepare-widen-constant-loads",
54 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
55 cl::ReallyHidden,
56 cl::init(true));
57
Matt Arsenaultb3dd3812019-08-24 22:14:41 +000058static cl::opt<bool> UseMul24Intrin(
59 "amdgpu-codegenprepare-mul24",
60 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
61 cl::ReallyHidden,
62 cl::init(true));
63
Matt Arsenault86de4862016-06-24 07:07:55 +000064class AMDGPUCodeGenPrepare : public FunctionPass,
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +000065 public InstVisitor<AMDGPUCodeGenPrepare, bool> {
Tom Stellard5bfbae52018-07-11 20:59:01 +000066 const GCNSubtarget *ST = nullptr;
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +000067 AssumptionCache *AC = nullptr;
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000068 LegacyDivergenceAnalysis *DA = nullptr;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000069 Module *Mod = nullptr;
Matt Arsenault49169a92019-07-15 17:50:31 +000070 const DataLayout *DL = nullptr;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000071 bool HasUnsafeFPMath = false;
Matt Arsenault86de4862016-06-24 07:07:55 +000072
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000073 /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000074 /// binary operation \p V.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000075 ///
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000076 /// \returns Binary operation \p V.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000077 /// \returns \p T's base element bit width.
78 unsigned getBaseElementBitWidth(const Type *T) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000079
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000080 /// \returns Equivalent 32 bit integer type for given type \p T. For example,
81 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
82 /// is returned.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000083 Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
84
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000085 /// \returns True if binary operation \p I is a signed binary operation, false
86 /// otherwise.
87 bool isSigned(const BinaryOperator &I) const;
88
89 /// \returns True if the condition of 'select' operation \p I comes from a
90 /// signed 'icmp' operation, false otherwise.
91 bool isSigned(const SelectInst &I) const;
92
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000093 /// \returns True if type \p T needs to be promoted to 32 bit integer type,
94 /// false otherwise.
95 bool needsPromotionToI32(const Type *T) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +000096
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000097 /// Promotes uniform binary operation \p I to equivalent 32 bit binary
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000098 /// operation.
99 ///
100 /// \details \p I's base element bit width must be greater than 1 and less
101 /// than or equal 16. Promotion is done by sign or zero extending operands to
102 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
103 /// truncating the result of 32 bit binary operation back to \p I's original
104 /// type. Division operation is not promoted.
105 ///
106 /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
107 /// false otherwise.
108 bool promoteUniformOpToI32(BinaryOperator &I) const;
109
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000110 /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000111 ///
112 /// \details \p I's base element bit width must be greater than 1 and less
113 /// than or equal 16. Promotion is done by sign or zero extending operands to
114 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000115 ///
116 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000117 bool promoteUniformOpToI32(ICmpInst &I) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000118
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000119 /// Promotes uniform 'select' operation \p I to 32 bit 'select'
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000120 /// operation.
121 ///
122 /// \details \p I's base element bit width must be greater than 1 and less
123 /// than or equal 16. Promotion is done by sign or zero extending operands to
124 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
125 /// result of 32 bit 'select' operation back to \p I's original type.
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000126 ///
127 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000128 bool promoteUniformOpToI32(SelectInst &I) const;
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000129
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000130 /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000131 /// intrinsic.
132 ///
133 /// \details \p I's base element bit width must be greater than 1 and less
134 /// than or equal 16. Promotion is done by zero extending the operand to 32
135 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
136 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
137 /// shift amount is 32 minus \p I's base element bit width), and truncating
138 /// the result of the shift operation back to \p I's original type.
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000139 ///
140 /// \returns True.
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000141 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000142
Matt Arsenault49169a92019-07-15 17:50:31 +0000143
144 unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const;
145 unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const;
146 bool isI24(Value *V, unsigned ScalarSize) const;
147 bool isU24(Value *V, unsigned ScalarSize) const;
148
149 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
150 /// SelectionDAG has an issue where an and asserting the bits are known
151 bool replaceMulWithMul24(BinaryOperator &I) const;
152
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000153 /// Expands 24 bit div or rem.
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000154 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
155 Value *Num, Value *Den,
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000156 bool IsDiv, bool IsSigned) const;
157
158 /// Expands 32 bit div or rem.
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000159 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000160 Value *Num, Value *Den) const;
161
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000162 /// Widen a scalar load.
Wei Dinga126a132017-07-26 21:07:28 +0000163 ///
164 /// \details \p Widen scalar load for uniform, small type loads from constant
165 // memory / to a full 32-bits and then truncate the input to allow a scalar
166 // load instead of a vector load.
167 //
168 /// \returns True.
169
170 bool canWidenScalarExtLoad(LoadInst &I) const;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000171
Matt Arsenault86de4862016-06-24 07:07:55 +0000172public:
173 static char ID;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000174
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000175 AMDGPUCodeGenPrepare() : FunctionPass(ID) {}
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000176
177 bool visitFDiv(BinaryOperator &I);
178
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000179 bool visitInstruction(Instruction &I) { return false; }
180 bool visitBinaryOperator(BinaryOperator &I);
Wei Dinga126a132017-07-26 21:07:28 +0000181 bool visitLoadInst(LoadInst &I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000182 bool visitICmpInst(ICmpInst &I);
183 bool visitSelectInst(SelectInst &I);
Matt Arsenault86de4862016-06-24 07:07:55 +0000184
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000185 bool visitIntrinsicInst(IntrinsicInst &I);
186 bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
187
Matt Arsenault86de4862016-06-24 07:07:55 +0000188 bool doInitialization(Module &M) override;
189 bool runOnFunction(Function &F) override;
190
Mehdi Amini117296c2016-10-01 02:56:57 +0000191 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
Matt Arsenault86de4862016-06-24 07:07:55 +0000192
193 void getAnalysisUsage(AnalysisUsage &AU) const override {
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000194 AU.addRequired<AssumptionCacheTracker>();
Nicolai Haehnle35617ed2018-08-30 14:21:36 +0000195 AU.addRequired<LegacyDivergenceAnalysis>();
Matt Arsenault86de4862016-06-24 07:07:55 +0000196 AU.setPreservesAll();
197 }
198};
199
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000200} // end anonymous namespace
Matt Arsenault86de4862016-06-24 07:07:55 +0000201
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000202unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
203 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000204
205 if (T->isIntegerTy())
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000206 return T->getIntegerBitWidth();
207 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000208}
209
210Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000211 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000212
213 if (T->isIntegerTy())
214 return B.getInt32Ty();
215 return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements());
216}
217
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000218bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
Konstantin Zhuravlyov691e2e02016-10-03 18:29:01 +0000219 return I.getOpcode() == Instruction::AShr ||
220 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000221}
222
223bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const {
224 return isa<ICmpInst>(I.getOperand(0)) ?
225 cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
226}
227
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000228bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000229 const IntegerType *IntTy = dyn_cast<IntegerType>(T);
230 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000231 return true;
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000232
233 if (const VectorType *VT = dyn_cast<VectorType>(T)) {
234 // TODO: The set of packed operations is more limited, so may want to
235 // promote some anyway.
236 if (ST->hasVOP3PInsts())
237 return false;
238
239 return needsPromotionToI32(VT->getElementType());
240 }
241
242 return false;
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000243}
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000244
Matt Arsenaultd59e6402017-02-01 16:25:23 +0000245// Return true if the op promoted to i32 should have nsw set.
246static bool promotedOpIsNSW(const Instruction &I) {
247 switch (I.getOpcode()) {
248 case Instruction::Shl:
249 case Instruction::Add:
250 case Instruction::Sub:
251 return true;
252 case Instruction::Mul:
253 return I.hasNoUnsignedWrap();
254 default:
255 return false;
256 }
257}
258
259// Return true if the op promoted to i32 should have nuw set.
260static bool promotedOpIsNUW(const Instruction &I) {
261 switch (I.getOpcode()) {
262 case Instruction::Shl:
263 case Instruction::Add:
264 case Instruction::Mul:
265 return true;
266 case Instruction::Sub:
267 return I.hasNoUnsignedWrap();
268 default:
269 return false;
270 }
271}
272
Wei Dinga126a132017-07-26 21:07:28 +0000273bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
274 Type *Ty = I.getType();
275 const DataLayout &DL = Mod->getDataLayout();
276 int TySize = DL.getTypeSizeInBits(Ty);
277 unsigned Align = I.getAlignment() ?
278 I.getAlignment() : DL.getABITypeAlignment(Ty);
279
280 return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I);
281}
282
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000283bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
284 assert(needsPromotionToI32(I.getType()) &&
285 "I does not need promotion to i32");
286
287 if (I.getOpcode() == Instruction::SDiv ||
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000288 I.getOpcode() == Instruction::UDiv ||
289 I.getOpcode() == Instruction::SRem ||
290 I.getOpcode() == Instruction::URem)
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000291 return false;
292
293 IRBuilder<> Builder(&I);
294 Builder.SetCurrentDebugLocation(I.getDebugLoc());
295
296 Type *I32Ty = getI32Ty(Builder, I.getType());
297 Value *ExtOp0 = nullptr;
298 Value *ExtOp1 = nullptr;
299 Value *ExtRes = nullptr;
300 Value *TruncRes = nullptr;
301
302 if (isSigned(I)) {
303 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
304 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
305 } else {
306 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
307 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
308 }
Matt Arsenaultd59e6402017-02-01 16:25:23 +0000309
310 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
311 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
312 if (promotedOpIsNSW(cast<Instruction>(I)))
313 Inst->setHasNoSignedWrap();
314
315 if (promotedOpIsNUW(cast<Instruction>(I)))
316 Inst->setHasNoUnsignedWrap();
317
318 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
319 Inst->setIsExact(ExactOp->isExact());
320 }
321
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000322 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000323
324 I.replaceAllUsesWith(TruncRes);
325 I.eraseFromParent();
326
327 return true;
328}
329
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000330bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
331 assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
332 "I does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000333
334 IRBuilder<> Builder(&I);
335 Builder.SetCurrentDebugLocation(I.getDebugLoc());
336
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000337 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000338 Value *ExtOp0 = nullptr;
339 Value *ExtOp1 = nullptr;
340 Value *NewICmp = nullptr;
341
342 if (I.isSigned()) {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000343 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
344 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000345 } else {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000346 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
347 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000348 }
349 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
350
351 I.replaceAllUsesWith(NewICmp);
352 I.eraseFromParent();
353
354 return true;
355}
356
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000357bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
358 assert(needsPromotionToI32(I.getType()) &&
359 "I does not need promotion to i32");
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000360
361 IRBuilder<> Builder(&I);
362 Builder.SetCurrentDebugLocation(I.getDebugLoc());
363
364 Type *I32Ty = getI32Ty(Builder, I.getType());
365 Value *ExtOp1 = nullptr;
366 Value *ExtOp2 = nullptr;
367 Value *ExtRes = nullptr;
368 Value *TruncRes = nullptr;
369
370 if (isSigned(I)) {
371 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
372 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
373 } else {
374 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
375 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
376 }
377 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000378 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000379
380 I.replaceAllUsesWith(TruncRes);
381 I.eraseFromParent();
382
383 return true;
384}
385
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000386bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000387 IntrinsicInst &I) const {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000388 assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
389 "I must be bitreverse intrinsic");
390 assert(needsPromotionToI32(I.getType()) &&
391 "I does not need promotion to i32");
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000392
393 IRBuilder<> Builder(&I);
394 Builder.SetCurrentDebugLocation(I.getDebugLoc());
395
396 Type *I32Ty = getI32Ty(Builder, I.getType());
397 Function *I32 =
Konstantin Zhuravlyovc09e2d72016-10-07 14:39:53 +0000398 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000399 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
400 Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000401 Value *LShrOp =
402 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000403 Value *TruncRes =
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000404 Builder.CreateTrunc(LShrOp, I.getType());
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000405
406 I.replaceAllUsesWith(TruncRes);
407 I.eraseFromParent();
408
409 return true;
410}
411
Matt Arsenault49169a92019-07-15 17:50:31 +0000412unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op,
413 unsigned ScalarSize) const {
414 KnownBits Known = computeKnownBits(Op, *DL, 0, AC);
415 return ScalarSize - Known.countMinLeadingZeros();
416}
417
418unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op,
419 unsigned ScalarSize) const {
420 // In order for this to be a signed 24-bit value, bit 23, must
421 // be a sign bit.
422 return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC);
423}
424
425bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const {
426 return ScalarSize >= 24 && // Types less than 24-bit should be treated
427 // as unsigned 24-bit values.
428 numBitsSigned(V, ScalarSize) < 24;
429}
430
431bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const {
432 return numBitsUnsigned(V, ScalarSize) <= 24;
433}
434
435static void extractValues(IRBuilder<> &Builder,
436 SmallVectorImpl<Value *> &Values, Value *V) {
437 VectorType *VT = dyn_cast<VectorType>(V->getType());
438 if (!VT) {
439 Values.push_back(V);
440 return;
441 }
442
443 for (int I = 0, E = VT->getNumElements(); I != E; ++I)
444 Values.push_back(Builder.CreateExtractElement(V, I));
445}
446
447static Value *insertValues(IRBuilder<> &Builder,
448 Type *Ty,
449 SmallVectorImpl<Value *> &Values) {
450 if (Values.size() == 1)
451 return Values[0];
452
453 Value *NewVal = UndefValue::get(Ty);
454 for (int I = 0, E = Values.size(); I != E; ++I)
455 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
456
457 return NewVal;
458}
459
460bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const {
461 if (I.getOpcode() != Instruction::Mul)
462 return false;
463
464 Type *Ty = I.getType();
465 unsigned Size = Ty->getScalarSizeInBits();
466 if (Size <= 16 && ST->has16BitInsts())
467 return false;
468
469 // Prefer scalar if this could be s_mul_i32
470 if (DA->isUniform(&I))
471 return false;
472
473 Value *LHS = I.getOperand(0);
474 Value *RHS = I.getOperand(1);
475 IRBuilder<> Builder(&I);
476 Builder.SetCurrentDebugLocation(I.getDebugLoc());
477
478 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
479
480 // TODO: Should this try to match mulhi24?
481 if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) {
482 IntrID = Intrinsic::amdgcn_mul_u24;
483 } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) {
484 IntrID = Intrinsic::amdgcn_mul_i24;
485 } else
486 return false;
487
488 SmallVector<Value *, 4> LHSVals;
489 SmallVector<Value *, 4> RHSVals;
490 SmallVector<Value *, 4> ResultVals;
491 extractValues(Builder, LHSVals, LHS);
492 extractValues(Builder, RHSVals, RHS);
493
494
495 IntegerType *I32Ty = Builder.getInt32Ty();
496 FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID);
497 for (int I = 0, E = LHSVals.size(); I != E; ++I) {
498 Value *LHS, *RHS;
499 if (IntrID == Intrinsic::amdgcn_mul_u24) {
500 LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
501 RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
502 } else {
503 LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty);
504 RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty);
505 }
506
507 Value *Result = Builder.CreateCall(Intrin, {LHS, RHS});
508
509 if (IntrID == Intrinsic::amdgcn_mul_u24) {
510 ResultVals.push_back(Builder.CreateZExtOrTrunc(Result,
511 LHSVals[I]->getType()));
512 } else {
513 ResultVals.push_back(Builder.CreateSExtOrTrunc(Result,
514 LHSVals[I]->getType()));
515 }
516 }
517
518 I.replaceAllUsesWith(insertValues(Builder, Ty, ResultVals));
519 I.eraseFromParent();
520
521 return true;
522}
523
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000524static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv, bool HasDenormals) {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000525 const ConstantFP *CNum = dyn_cast<ConstantFP>(Num);
526 if (!CNum)
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000527 return HasDenormals;
528
529 if (UnsafeDiv)
530 return true;
531
532 bool IsOne = CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0);
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000533
534 // Reciprocal f32 is handled separately without denormals.
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000535 return HasDenormals ^ IsOne;
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000536}
537
538// Insert an intrinsic for fast fdiv for safe math situations where we can
539// reduce precision. Leave fdiv for situations where the generic node is
540// expected to be optimized.
541bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
542 Type *Ty = FDiv.getType();
543
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000544 if (!Ty->getScalarType()->isFloatTy())
545 return false;
546
547 MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath);
548 if (!FPMath)
549 return false;
550
551 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
552 float ULP = FPOp->getFPAccuracy();
553 if (ULP < 2.5f)
554 return false;
555
556 FastMathFlags FMF = FPOp->getFastMathFlags();
Sanjay Patel629c4112017-11-06 16:27:15 +0000557 bool UnsafeDiv = HasUnsafeFPMath || FMF.isFast() ||
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000558 FMF.allowReciprocal();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +0000559
560 // With UnsafeDiv node will be optimized to just rcp and mul.
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000561 if (UnsafeDiv)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000562 return false;
563
564 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
565 Builder.setFastMathFlags(FMF);
566 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
567
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000568 Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast);
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000569
570 Value *Num = FDiv.getOperand(0);
571 Value *Den = FDiv.getOperand(1);
572
573 Value *NewFDiv = nullptr;
574
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000575 bool HasDenormals = ST->hasFP32Denormals();
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000576 if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
577 NewFDiv = UndefValue::get(VT);
578
579 // FIXME: Doesn't do the right thing for cases where the vector is partially
580 // constant. This works when the scalarizer pass is run first.
581 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
582 Value *NumEltI = Builder.CreateExtractElement(Num, I);
583 Value *DenEltI = Builder.CreateExtractElement(Den, I);
584 Value *NewElt;
585
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000586 if (shouldKeepFDivF32(NumEltI, UnsafeDiv, HasDenormals)) {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000587 NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
588 } else {
589 NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI });
590 }
591
592 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
593 }
594 } else {
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000595 if (!shouldKeepFDivF32(Num, UnsafeDiv, HasDenormals))
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000596 NewFDiv = Builder.CreateCall(Decl, { Num, Den });
597 }
598
599 if (NewFDiv) {
600 FDiv.replaceAllUsesWith(NewFDiv);
601 NewFDiv->takeName(&FDiv);
602 FDiv.eraseFromParent();
603 }
604
Stanislav Mekhanoshindf61be72018-06-06 22:22:32 +0000605 return !!NewFDiv;
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000606}
607
608static bool hasUnsafeFPMath(const Function &F) {
609 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
610 return Attr.getValueAsString() == "true";
611}
612
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000613static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
614 Value *LHS, Value *RHS) {
615 Type *I32Ty = Builder.getInt32Ty();
616 Type *I64Ty = Builder.getInt64Ty();
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000617
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000618 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
619 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
620 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
621 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
622 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
623 Hi = Builder.CreateTrunc(Hi, I32Ty);
624 return std::make_pair(Lo, Hi);
625}
626
627static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
628 return getMul64(Builder, LHS, RHS).second;
629}
630
631// The fractional part of a float is enough to accurately represent up to
632// a 24-bit signed integer.
633Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000634 BinaryOperator &I,
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000635 Value *Num, Value *Den,
636 bool IsDiv, bool IsSigned) const {
637 assert(Num->getType()->isIntegerTy(32));
638
639 const DataLayout &DL = Mod->getDataLayout();
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000640 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000641 if (LHSSignBits < 9)
642 return nullptr;
643
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000644 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000645 if (RHSSignBits < 9)
646 return nullptr;
647
648
649 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
650 unsigned DivBits = 32 - SignBits;
651 if (IsSigned)
652 ++DivBits;
653
654 Type *Ty = Num->getType();
655 Type *I32Ty = Builder.getInt32Ty();
656 Type *F32Ty = Builder.getFloatTy();
657 ConstantInt *One = Builder.getInt32(1);
658 Value *JQ = One;
659
660 if (IsSigned) {
661 // char|short jq = ia ^ ib;
662 JQ = Builder.CreateXor(Num, Den);
663
664 // jq = jq >> (bitsize - 2)
665 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
666
667 // jq = jq | 0x1
668 JQ = Builder.CreateOr(JQ, One);
669 }
670
671 // int ia = (int)LHS;
672 Value *IA = Num;
673
674 // int ib, (int)RHS;
675 Value *IB = Den;
676
677 // float fa = (float)ia;
678 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
679 : Builder.CreateUIToFP(IA, F32Ty);
680
681 // float fb = (float)ib;
682 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
683 : Builder.CreateUIToFP(IB,F32Ty);
684
685 Value *RCP = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), FB);
686 Value *FQM = Builder.CreateFMul(FA, RCP);
687
688 // fq = trunc(fqm);
Neil Henning57f5d0a2018-10-08 10:32:33 +0000689 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000690 FQ->copyFastMathFlags(Builder.getFastMathFlags());
691
692 // float fqneg = -fq;
693 Value *FQNeg = Builder.CreateFNeg(FQ);
694
695 // float fr = mad(fqneg, fb, fa);
696 Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz,
Neil Henning57f5d0a2018-10-08 10:32:33 +0000697 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000698
699 // int iq = (int)fq;
700 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
701 : Builder.CreateFPToUI(FQ, I32Ty);
702
703 // fr = fabs(fr);
Neil Henning57f5d0a2018-10-08 10:32:33 +0000704 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000705
706 // fb = fabs(fb);
Neil Henning57f5d0a2018-10-08 10:32:33 +0000707 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000708
709 // int cv = fr >= fb;
710 Value *CV = Builder.CreateFCmpOGE(FR, FB);
711
712 // jq = (cv ? jq : 0);
713 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
714
715 // dst = iq + jq;
716 Value *Div = Builder.CreateAdd(IQ, JQ);
717
718 Value *Res = Div;
719 if (!IsDiv) {
720 // Rem needs compensation, it's easier to recompute it
721 Value *Rem = Builder.CreateMul(Div, Den);
722 Res = Builder.CreateSub(Num, Rem);
723 }
724
725 // Truncate to number of bits this divide really is.
726 if (IsSigned) {
727 Res = Builder.CreateTrunc(Res, Builder.getIntNTy(DivBits));
728 Res = Builder.CreateSExt(Res, Ty);
729 } else {
730 ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
731 Res = Builder.CreateAnd(Res, TruncMask);
732 }
733
734 return Res;
735}
736
737Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000738 BinaryOperator &I,
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000739 Value *Num, Value *Den) const {
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000740 Instruction::BinaryOps Opc = I.getOpcode();
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000741 assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
742 Opc == Instruction::SRem || Opc == Instruction::SDiv);
743
744 FastMathFlags FMF;
745 FMF.setFast();
746 Builder.setFastMathFlags(FMF);
747
748 if (isa<Constant>(Den))
749 return nullptr; // Keep it for optimization
750
751 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
752 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
753
754 Type *Ty = Num->getType();
755 Type *I32Ty = Builder.getInt32Ty();
756 Type *F32Ty = Builder.getFloatTy();
757
758 if (Ty->getScalarSizeInBits() < 32) {
759 if (IsSigned) {
760 Num = Builder.CreateSExt(Num, I32Ty);
761 Den = Builder.CreateSExt(Den, I32Ty);
762 } else {
763 Num = Builder.CreateZExt(Num, I32Ty);
764 Den = Builder.CreateZExt(Den, I32Ty);
765 }
766 }
767
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000768 if (Value *Res = expandDivRem24(Builder, I, Num, Den, IsDiv, IsSigned)) {
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000769 Res = Builder.CreateTrunc(Res, Ty);
770 return Res;
771 }
772
773 ConstantInt *Zero = Builder.getInt32(0);
774 ConstantInt *One = Builder.getInt32(1);
775 ConstantInt *MinusOne = Builder.getInt32(~0);
776
777 Value *Sign = nullptr;
778 if (IsSigned) {
779 ConstantInt *K31 = Builder.getInt32(31);
780 Value *LHSign = Builder.CreateAShr(Num, K31);
781 Value *RHSign = Builder.CreateAShr(Den, K31);
782 // Remainder sign is the same as LHS
783 Sign = IsDiv ? Builder.CreateXor(LHSign, RHSign) : LHSign;
784
785 Num = Builder.CreateAdd(Num, LHSign);
786 Den = Builder.CreateAdd(Den, RHSign);
787
788 Num = Builder.CreateXor(Num, LHSign);
789 Den = Builder.CreateXor(Den, RHSign);
790 }
791
792 // RCP = URECIP(Den) = 2^32 / Den + e
793 // e is rounding error.
794 Value *DEN_F32 = Builder.CreateUIToFP(Den, F32Ty);
795 Value *RCP_F32 = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), DEN_F32);
796 Constant *UINT_MAX_PLUS_1 = ConstantFP::get(F32Ty, BitsToFloat(0x4f800000));
797 Value *RCP_SCALE = Builder.CreateFMul(RCP_F32, UINT_MAX_PLUS_1);
798 Value *RCP = Builder.CreateFPToUI(RCP_SCALE, I32Ty);
799
800 // RCP_LO, RCP_HI = mul(RCP, Den) */
801 Value *RCP_LO, *RCP_HI;
802 std::tie(RCP_LO, RCP_HI) = getMul64(Builder, RCP, Den);
803
804 // NEG_RCP_LO = -RCP_LO
805 Value *NEG_RCP_LO = Builder.CreateNeg(RCP_LO);
806
807 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
808 Value *RCP_HI_0_CC = Builder.CreateICmpEQ(RCP_HI, Zero);
809 Value *ABS_RCP_LO = Builder.CreateSelect(RCP_HI_0_CC, NEG_RCP_LO, RCP_LO);
810
811 // Calculate the rounding error from the URECIP instruction
812 // E = mulhu(ABS_RCP_LO, RCP)
813 Value *E = getMulHu(Builder, ABS_RCP_LO, RCP);
814
815 // RCP_A_E = RCP + E
816 Value *RCP_A_E = Builder.CreateAdd(RCP, E);
817
818 // RCP_S_E = RCP - E
819 Value *RCP_S_E = Builder.CreateSub(RCP, E);
820
821 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
822 Value *Tmp0 = Builder.CreateSelect(RCP_HI_0_CC, RCP_A_E, RCP_S_E);
823
824 // Quotient = mulhu(Tmp0, Num)
825 Value *Quotient = getMulHu(Builder, Tmp0, Num);
826
827 // Num_S_Remainder = Quotient * Den
828 Value *Num_S_Remainder = Builder.CreateMul(Quotient, Den);
829
830 // Remainder = Num - Num_S_Remainder
831 Value *Remainder = Builder.CreateSub(Num, Num_S_Remainder);
832
833 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
834 Value *Rem_GE_Den_CC = Builder.CreateICmpUGE(Remainder, Den);
835 Value *Remainder_GE_Den = Builder.CreateSelect(Rem_GE_Den_CC, MinusOne, Zero);
836
837 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
838 Value *Num_GE_Num_S_Rem_CC = Builder.CreateICmpUGE(Num, Num_S_Remainder);
839 Value *Remainder_GE_Zero = Builder.CreateSelect(Num_GE_Num_S_Rem_CC,
840 MinusOne, Zero);
841
842 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
843 Value *Tmp1 = Builder.CreateAnd(Remainder_GE_Den, Remainder_GE_Zero);
844 Value *Tmp1_0_CC = Builder.CreateICmpEQ(Tmp1, Zero);
845
846 Value *Res;
847 if (IsDiv) {
848 // Quotient_A_One = Quotient + 1
849 Value *Quotient_A_One = Builder.CreateAdd(Quotient, One);
850
851 // Quotient_S_One = Quotient - 1
852 Value *Quotient_S_One = Builder.CreateSub(Quotient, One);
853
854 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
855 Value *Div = Builder.CreateSelect(Tmp1_0_CC, Quotient, Quotient_A_One);
856
857 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
858 Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Div, Quotient_S_One);
859 } else {
860 // Remainder_S_Den = Remainder - Den
861 Value *Remainder_S_Den = Builder.CreateSub(Remainder, Den);
862
863 // Remainder_A_Den = Remainder + Den
864 Value *Remainder_A_Den = Builder.CreateAdd(Remainder, Den);
865
866 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
867 Value *Rem = Builder.CreateSelect(Tmp1_0_CC, Remainder, Remainder_S_Den);
868
869 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
870 Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Rem, Remainder_A_Den);
871 }
872
873 if (IsSigned) {
874 Res = Builder.CreateXor(Res, Sign);
875 Res = Builder.CreateSub(Res, Sign);
876 }
877
878 Res = Builder.CreateTrunc(Res, Ty);
879
880 return Res;
881}
882
883bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000884 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000885 DA->isUniform(&I) && promoteUniformOpToI32(I))
886 return true;
887
Matt Arsenaultb3dd3812019-08-24 22:14:41 +0000888 if (UseMul24Intrin && replaceMulWithMul24(I))
Matt Arsenault49169a92019-07-15 17:50:31 +0000889 return true;
890
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000891 bool Changed = false;
892 Instruction::BinaryOps Opc = I.getOpcode();
893 Type *Ty = I.getType();
894 Value *NewDiv = nullptr;
895 if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
896 Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
897 Ty->getScalarSizeInBits() <= 32) {
898 Value *Num = I.getOperand(0);
899 Value *Den = I.getOperand(1);
900 IRBuilder<> Builder(&I);
901 Builder.SetCurrentDebugLocation(I.getDebugLoc());
902
903 if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
904 NewDiv = UndefValue::get(VT);
905
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000906 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
907 Value *NumEltN = Builder.CreateExtractElement(Num, N);
908 Value *DenEltN = Builder.CreateExtractElement(Den, N);
909 Value *NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000910 if (!NewElt)
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000911 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
912 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000913 }
914 } else {
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +0000915 NewDiv = expandDivRem32(Builder, I, Num, Den);
Stanislav Mekhanoshin67aa18f2018-06-28 15:59:18 +0000916 }
917
918 if (NewDiv) {
919 I.replaceAllUsesWith(NewDiv);
920 I.eraseFromParent();
921 Changed = true;
922 }
923 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000924
925 return Changed;
926}
927
Matt Arsenault57e541e2018-06-05 19:52:56 +0000928bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
Matt Arsenault90083d32018-06-07 09:54:49 +0000929 if (!WidenLoads)
930 return false;
931
Matt Arsenault0da63502018-08-31 05:49:54 +0000932 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
933 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Wei Dinga126a132017-07-26 21:07:28 +0000934 canWidenScalarExtLoad(I)) {
935 IRBuilder<> Builder(&I);
936 Builder.SetCurrentDebugLocation(I.getDebugLoc());
937
938 Type *I32Ty = Builder.getInt32Ty();
939 Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
940 Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
James Y Knight14359ef2019-02-01 20:44:24 +0000941 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast);
Matt Arsenault57e541e2018-06-05 19:52:56 +0000942 WidenLoad->copyMetadata(I);
943
944 // If we have range metadata, we need to convert the type, and not make
945 // assumptions about the high bits.
946 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
947 ConstantInt *Lower =
948 mdconst::extract<ConstantInt>(Range->getOperand(0));
949
950 if (Lower->getValue().isNullValue()) {
951 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
952 } else {
953 Metadata *LowAndHigh[] = {
954 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
955 // Don't make assumptions about the high bits.
956 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
957 };
958
959 WidenLoad->setMetadata(LLVMContext::MD_range,
960 MDNode::get(Mod->getContext(), LowAndHigh));
961 }
962 }
Wei Dinga126a132017-07-26 21:07:28 +0000963
964 int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
965 Type *IntNTy = Builder.getIntNTy(TySize);
966 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
967 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
968 I.replaceAllUsesWith(ValOrig);
969 I.eraseFromParent();
970 return true;
971 }
972
973 return false;
974}
975
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000976bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
977 bool Changed = false;
978
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000979 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
980 DA->isUniform(&I))
981 Changed |= promoteUniformOpToI32(I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000982
983 return Changed;
984}
985
986bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
987 bool Changed = false;
988
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000989 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
990 DA->isUniform(&I))
991 Changed |= promoteUniformOpToI32(I);
Konstantin Zhuravlyovb4eb5d52016-10-06 02:20:46 +0000992
993 return Changed;
994}
995
996bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
997 switch (I.getIntrinsicID()) {
998 case Intrinsic::bitreverse:
999 return visitBitreverseIntrinsicInst(I);
1000 default:
1001 return false;
1002 }
1003}
1004
1005bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
1006 bool Changed = false;
1007
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +00001008 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1009 DA->isUniform(&I))
1010 Changed |= promoteUniformBitreverseToI32(I);
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001011
1012 return Changed;
1013}
1014
Matt Arsenault86de4862016-06-24 07:07:55 +00001015bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00001016 Mod = &M;
Matt Arsenault49169a92019-07-15 17:50:31 +00001017 DL = &Mod->getDataLayout();
Matt Arsenault86de4862016-06-24 07:07:55 +00001018 return false;
1019}
1020
1021bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +00001022 if (skipFunction(F))
Matt Arsenault86de4862016-06-24 07:07:55 +00001023 return false;
1024
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +00001025 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1026 if (!TPC)
1027 return false;
1028
Matt Arsenault12269dd2018-06-28 10:18:23 +00001029 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001030 ST = &TM.getSubtarget<GCNSubtarget>(F);
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +00001031 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
Nicolai Haehnle35617ed2018-08-30 14:21:36 +00001032 DA = &getAnalysis<LegacyDivergenceAnalysis>();
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00001033 HasUnsafeFPMath = hasUnsafeFPMath(F);
Matt Arsenault86de4862016-06-24 07:07:55 +00001034
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00001035 bool MadeChange = false;
1036
1037 for (BasicBlock &BB : F) {
1038 BasicBlock::iterator Next;
1039 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) {
1040 Next = std::next(I);
1041 MadeChange |= visit(*I);
1042 }
1043 }
1044
1045 return MadeChange;
Matt Arsenault86de4862016-06-24 07:07:55 +00001046}
1047
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +00001048INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
Matt Arsenault86de4862016-06-24 07:07:55 +00001049 "AMDGPU IR optimizations", false, false)
Stanislav Mekhanoshin7e7268a2018-07-25 17:02:11 +00001050INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Nicolai Haehnle35617ed2018-08-30 14:21:36 +00001051INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +00001052INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
1053 false, false)
Matt Arsenault86de4862016-06-24 07:07:55 +00001054
1055char AMDGPUCodeGenPrepare::ID = 0;
1056
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +00001057FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
1058 return new AMDGPUCodeGenPrepare();
Matt Arsenault86de4862016-06-24 07:07:55 +00001059}