Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 1 | //===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===// |
| 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
| 10 | /// This pass optimizes atomic operations by using a single lane of a wavefront |
| 11 | /// to perform the atomic operation, thus reducing contention on that memory |
| 12 | /// location. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "AMDGPU.h" |
| 17 | #include "AMDGPUSubtarget.h" |
| 18 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" |
| 19 | #include "llvm/CodeGen/TargetPassConfig.h" |
| 20 | #include "llvm/IR/IRBuilder.h" |
| 21 | #include "llvm/IR/InstVisitor.h" |
| 22 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 23 | |
| 24 | #define DEBUG_TYPE "amdgpu-atomic-optimizer" |
| 25 | |
| 26 | using namespace llvm; |
| 27 | |
| 28 | namespace { |
| 29 | |
| 30 | enum DPP_CTRL { |
| 31 | DPP_ROW_SR1 = 0x111, |
| 32 | DPP_ROW_SR2 = 0x112, |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 33 | DPP_ROW_SR3 = 0x113, |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 34 | DPP_ROW_SR4 = 0x114, |
| 35 | DPP_ROW_SR8 = 0x118, |
| 36 | DPP_WF_SR1 = 0x138, |
| 37 | DPP_ROW_BCAST15 = 0x142, |
| 38 | DPP_ROW_BCAST31 = 0x143 |
| 39 | }; |
| 40 | |
| 41 | struct ReplacementInfo { |
| 42 | Instruction *I; |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 43 | AtomicRMWInst::BinOp Op; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 44 | unsigned ValIdx; |
| 45 | bool ValDivergent; |
| 46 | }; |
| 47 | |
| 48 | class AMDGPUAtomicOptimizer : public FunctionPass, |
| 49 | public InstVisitor<AMDGPUAtomicOptimizer> { |
| 50 | private: |
| 51 | SmallVector<ReplacementInfo, 8> ToReplace; |
| 52 | const LegacyDivergenceAnalysis *DA; |
| 53 | const DataLayout *DL; |
| 54 | DominatorTree *DT; |
| 55 | bool HasDPP; |
Neil Henning | 233a02d | 2018-11-05 12:04:48 +0000 | [diff] [blame] | 56 | bool IsPixelShader; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 57 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 58 | void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx, |
| 59 | bool ValDivergent) const; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 60 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 61 | public: |
| 62 | static char ID; |
| 63 | |
| 64 | AMDGPUAtomicOptimizer() : FunctionPass(ID) {} |
| 65 | |
| 66 | bool runOnFunction(Function &F) override; |
| 67 | |
| 68 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 69 | AU.addPreserved<DominatorTreeWrapperPass>(); |
| 70 | AU.addRequired<LegacyDivergenceAnalysis>(); |
| 71 | AU.addRequired<TargetPassConfig>(); |
| 72 | } |
| 73 | |
| 74 | void visitAtomicRMWInst(AtomicRMWInst &I); |
| 75 | void visitIntrinsicInst(IntrinsicInst &I); |
| 76 | }; |
| 77 | |
| 78 | } // namespace |
| 79 | |
| 80 | char AMDGPUAtomicOptimizer::ID = 0; |
| 81 | |
| 82 | char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID; |
| 83 | |
| 84 | bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) { |
| 85 | if (skipFunction(F)) { |
| 86 | return false; |
| 87 | } |
| 88 | |
| 89 | DA = &getAnalysis<LegacyDivergenceAnalysis>(); |
| 90 | DL = &F.getParent()->getDataLayout(); |
| 91 | DominatorTreeWrapperPass *const DTW = |
| 92 | getAnalysisIfAvailable<DominatorTreeWrapperPass>(); |
| 93 | DT = DTW ? &DTW->getDomTree() : nullptr; |
| 94 | const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); |
| 95 | const TargetMachine &TM = TPC.getTM<TargetMachine>(); |
| 96 | const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); |
| 97 | HasDPP = ST.hasDPP(); |
Neil Henning | 233a02d | 2018-11-05 12:04:48 +0000 | [diff] [blame] | 98 | IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 99 | |
| 100 | visit(F); |
| 101 | |
| 102 | const bool Changed = !ToReplace.empty(); |
| 103 | |
| 104 | for (ReplacementInfo &Info : ToReplace) { |
| 105 | optimizeAtomic(*Info.I, Info.Op, Info.ValIdx, Info.ValDivergent); |
| 106 | } |
| 107 | |
| 108 | ToReplace.clear(); |
| 109 | |
| 110 | return Changed; |
| 111 | } |
| 112 | |
| 113 | void AMDGPUAtomicOptimizer::visitAtomicRMWInst(AtomicRMWInst &I) { |
| 114 | // Early exit for unhandled address space atomic instructions. |
| 115 | switch (I.getPointerAddressSpace()) { |
| 116 | default: |
| 117 | return; |
| 118 | case AMDGPUAS::GLOBAL_ADDRESS: |
| 119 | case AMDGPUAS::LOCAL_ADDRESS: |
| 120 | break; |
| 121 | } |
| 122 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 123 | AtomicRMWInst::BinOp Op = I.getOperation(); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 124 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 125 | switch (Op) { |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 126 | default: |
| 127 | return; |
| 128 | case AtomicRMWInst::Add: |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 129 | case AtomicRMWInst::Sub: |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 130 | case AtomicRMWInst::And: |
| 131 | case AtomicRMWInst::Or: |
| 132 | case AtomicRMWInst::Xor: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 133 | case AtomicRMWInst::Max: |
| 134 | case AtomicRMWInst::Min: |
| 135 | case AtomicRMWInst::UMax: |
| 136 | case AtomicRMWInst::UMin: |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 137 | break; |
| 138 | } |
| 139 | |
| 140 | const unsigned PtrIdx = 0; |
| 141 | const unsigned ValIdx = 1; |
| 142 | |
| 143 | // If the pointer operand is divergent, then each lane is doing an atomic |
| 144 | // operation on a different address, and we cannot optimize that. |
Jay Foad | dcb7532 | 2019-07-29 10:22:09 +0000 | [diff] [blame^] | 145 | if (DA->isDivergentUse(&I.getOperandUse(PtrIdx))) { |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 146 | return; |
| 147 | } |
| 148 | |
Jay Foad | dcb7532 | 2019-07-29 10:22:09 +0000 | [diff] [blame^] | 149 | const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx)); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 150 | |
| 151 | // If the value operand is divergent, each lane is contributing a different |
| 152 | // value to the atomic calculation. We can only optimize divergent values if |
| 153 | // we have DPP available on our subtarget, and the atomic operation is 32 |
| 154 | // bits. |
| 155 | if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) { |
| 156 | return; |
| 157 | } |
| 158 | |
| 159 | // If we get here, we can optimize the atomic using a single wavefront-wide |
| 160 | // atomic operation to do the calculation for the entire wavefront, so |
| 161 | // remember the instruction so we can come back to it. |
| 162 | const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent}; |
| 163 | |
| 164 | ToReplace.push_back(Info); |
| 165 | } |
| 166 | |
| 167 | void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) { |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 168 | AtomicRMWInst::BinOp Op; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 169 | |
| 170 | switch (I.getIntrinsicID()) { |
| 171 | default: |
| 172 | return; |
| 173 | case Intrinsic::amdgcn_buffer_atomic_add: |
| 174 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
| 175 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 176 | Op = AtomicRMWInst::Add; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 177 | break; |
| 178 | case Intrinsic::amdgcn_buffer_atomic_sub: |
| 179 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
| 180 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 181 | Op = AtomicRMWInst::Sub; |
| 182 | break; |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 183 | case Intrinsic::amdgcn_buffer_atomic_and: |
| 184 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
| 185 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
| 186 | Op = AtomicRMWInst::And; |
| 187 | break; |
| 188 | case Intrinsic::amdgcn_buffer_atomic_or: |
| 189 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
| 190 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
| 191 | Op = AtomicRMWInst::Or; |
| 192 | break; |
| 193 | case Intrinsic::amdgcn_buffer_atomic_xor: |
| 194 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
| 195 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
| 196 | Op = AtomicRMWInst::Xor; |
| 197 | break; |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 198 | case Intrinsic::amdgcn_buffer_atomic_smin: |
| 199 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
| 200 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
| 201 | Op = AtomicRMWInst::Min; |
| 202 | break; |
| 203 | case Intrinsic::amdgcn_buffer_atomic_umin: |
| 204 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
| 205 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
| 206 | Op = AtomicRMWInst::UMin; |
| 207 | break; |
| 208 | case Intrinsic::amdgcn_buffer_atomic_smax: |
| 209 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
| 210 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
| 211 | Op = AtomicRMWInst::Max; |
| 212 | break; |
| 213 | case Intrinsic::amdgcn_buffer_atomic_umax: |
| 214 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
| 215 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
| 216 | Op = AtomicRMWInst::UMax; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 217 | break; |
| 218 | } |
| 219 | |
| 220 | const unsigned ValIdx = 0; |
| 221 | |
Jay Foad | dcb7532 | 2019-07-29 10:22:09 +0000 | [diff] [blame^] | 222 | const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx)); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 223 | |
| 224 | // If the value operand is divergent, each lane is contributing a different |
| 225 | // value to the atomic calculation. We can only optimize divergent values if |
| 226 | // we have DPP available on our subtarget, and the atomic operation is 32 |
| 227 | // bits. |
| 228 | if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) { |
| 229 | return; |
| 230 | } |
| 231 | |
| 232 | // If any of the other arguments to the intrinsic are divergent, we can't |
| 233 | // optimize the operation. |
| 234 | for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) { |
Jay Foad | dcb7532 | 2019-07-29 10:22:09 +0000 | [diff] [blame^] | 235 | if (DA->isDivergentUse(&I.getOperandUse(Idx))) { |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 236 | return; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | // If we get here, we can optimize the atomic using a single wavefront-wide |
| 241 | // atomic operation to do the calculation for the entire wavefront, so |
| 242 | // remember the instruction so we can come back to it. |
| 243 | const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent}; |
| 244 | |
| 245 | ToReplace.push_back(Info); |
| 246 | } |
| 247 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 248 | // Use the builder to create the non-atomic counterpart of the specified |
| 249 | // atomicrmw binary op. |
| 250 | static Value *buildNonAtomicBinOp(IRBuilder<> &B, AtomicRMWInst::BinOp Op, |
| 251 | Value *LHS, Value *RHS) { |
| 252 | CmpInst::Predicate Pred; |
| 253 | |
| 254 | switch (Op) { |
| 255 | default: |
| 256 | llvm_unreachable("Unhandled atomic op"); |
| 257 | case AtomicRMWInst::Add: |
| 258 | return B.CreateBinOp(Instruction::Add, LHS, RHS); |
| 259 | case AtomicRMWInst::Sub: |
| 260 | return B.CreateBinOp(Instruction::Sub, LHS, RHS); |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 261 | case AtomicRMWInst::And: |
| 262 | return B.CreateBinOp(Instruction::And, LHS, RHS); |
| 263 | case AtomicRMWInst::Or: |
| 264 | return B.CreateBinOp(Instruction::Or, LHS, RHS); |
| 265 | case AtomicRMWInst::Xor: |
| 266 | return B.CreateBinOp(Instruction::Xor, LHS, RHS); |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 267 | |
| 268 | case AtomicRMWInst::Max: |
| 269 | Pred = CmpInst::ICMP_SGT; |
| 270 | break; |
| 271 | case AtomicRMWInst::Min: |
| 272 | Pred = CmpInst::ICMP_SLT; |
| 273 | break; |
| 274 | case AtomicRMWInst::UMax: |
| 275 | Pred = CmpInst::ICMP_UGT; |
| 276 | break; |
| 277 | case AtomicRMWInst::UMin: |
| 278 | Pred = CmpInst::ICMP_ULT; |
| 279 | break; |
| 280 | } |
| 281 | Value *Cond = B.CreateICmp(Pred, LHS, RHS); |
| 282 | return B.CreateSelect(Cond, LHS, RHS); |
| 283 | } |
| 284 | |
| 285 | static APInt getIdentityValueForAtomicOp(AtomicRMWInst::BinOp Op, |
| 286 | unsigned BitWidth) { |
| 287 | switch (Op) { |
| 288 | default: |
| 289 | llvm_unreachable("Unhandled atomic op"); |
| 290 | case AtomicRMWInst::Add: |
| 291 | case AtomicRMWInst::Sub: |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 292 | case AtomicRMWInst::Or: |
| 293 | case AtomicRMWInst::Xor: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 294 | case AtomicRMWInst::UMax: |
| 295 | return APInt::getMinValue(BitWidth); |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 296 | case AtomicRMWInst::And: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 297 | case AtomicRMWInst::UMin: |
| 298 | return APInt::getMaxValue(BitWidth); |
| 299 | case AtomicRMWInst::Max: |
| 300 | return APInt::getSignedMinValue(BitWidth); |
| 301 | case AtomicRMWInst::Min: |
| 302 | return APInt::getSignedMaxValue(BitWidth); |
| 303 | } |
| 304 | } |
| 305 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 306 | void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I, |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 307 | AtomicRMWInst::BinOp Op, |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 308 | unsigned ValIdx, |
| 309 | bool ValDivergent) const { |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 310 | // Start building just before the instruction. |
| 311 | IRBuilder<> B(&I); |
| 312 | |
Neil Henning | 233a02d | 2018-11-05 12:04:48 +0000 | [diff] [blame] | 313 | // If we are in a pixel shader, because of how we have to mask out helper |
| 314 | // lane invocations, we need to record the entry and exit BB's. |
| 315 | BasicBlock *PixelEntryBB = nullptr; |
| 316 | BasicBlock *PixelExitBB = nullptr; |
| 317 | |
| 318 | // If we're optimizing an atomic within a pixel shader, we need to wrap the |
| 319 | // entire atomic operation in a helper-lane check. We do not want any helper |
| 320 | // lanes that are around only for the purposes of derivatives to take part |
| 321 | // in any cross-lane communication, and we use a branch on whether the lane is |
| 322 | // live to do this. |
| 323 | if (IsPixelShader) { |
| 324 | // Record I's original position as the entry block. |
| 325 | PixelEntryBB = I.getParent(); |
| 326 | |
| 327 | Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {}); |
| 328 | Instruction *const NonHelperTerminator = |
| 329 | SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr); |
| 330 | |
| 331 | // Record I's new position as the exit block. |
| 332 | PixelExitBB = I.getParent(); |
| 333 | |
| 334 | I.moveBefore(NonHelperTerminator); |
| 335 | B.SetInsertPoint(&I); |
| 336 | } |
| 337 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 338 | Type *const Ty = I.getType(); |
| 339 | const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); |
| 340 | Type *const VecTy = VectorType::get(B.getInt32Ty(), 2); |
| 341 | |
| 342 | // This is the value in the atomic operation we need to combine in order to |
| 343 | // reduce the number of atomic operations. |
| 344 | Value *const V = I.getOperand(ValIdx); |
| 345 | |
| 346 | // We need to know how many lanes are active within the wavefront, and we do |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 347 | // this by doing a ballot of active lanes. |
Jay Foad | 3890235 | 2019-07-08 07:04:58 +0000 | [diff] [blame] | 348 | CallInst *const Ballot = B.CreateIntrinsic( |
| 349 | Intrinsic::amdgcn_icmp, {B.getInt64Ty(), B.getInt32Ty()}, |
| 350 | {B.getInt32(1), B.getInt32(0), B.getInt32(CmpInst::ICMP_NE)}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 351 | |
| 352 | // We need to know how many lanes are active within the wavefront that are |
| 353 | // below us. If we counted each lane linearly starting from 0, a lane is |
| 354 | // below us only if its associated index was less than ours. We do this by |
| 355 | // using the mbcnt intrinsic. |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 356 | Value *const BitCast = B.CreateBitCast(Ballot, VecTy); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 357 | Value *const ExtractLo = B.CreateExtractElement(BitCast, B.getInt32(0)); |
| 358 | Value *const ExtractHi = B.CreateExtractElement(BitCast, B.getInt32(1)); |
| 359 | CallInst *const PartialMbcnt = B.CreateIntrinsic( |
| 360 | Intrinsic::amdgcn_mbcnt_lo, {}, {ExtractLo, B.getInt32(0)}); |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 361 | Value *const Mbcnt = |
| 362 | B.CreateIntCast(B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, |
| 363 | {ExtractHi, PartialMbcnt}), |
| 364 | Ty, false); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 365 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 366 | Value *const Identity = B.getInt(getIdentityValueForAtomicOp(Op, TyBitWidth)); |
| 367 | |
| 368 | Value *ExclScan = nullptr; |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 369 | Value *NewV = nullptr; |
| 370 | |
| 371 | // If we have a divergent value in each lane, we need to combine the value |
| 372 | // using DPP. |
| 373 | if (ValDivergent) { |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 374 | // First we need to set all inactive invocations to the identity value, so |
| 375 | // that they can correctly contribute to the final result. |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 376 | CallInst *const SetInactive = |
| 377 | B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty, {V, Identity}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 378 | |
Jay Foad | 7d06fff | 2019-07-19 08:40:37 +0000 | [diff] [blame] | 379 | ExclScan = |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 380 | B.CreateIntrinsic(Intrinsic::amdgcn_update_dpp, Ty, |
| 381 | {Identity, SetInactive, B.getInt32(DPP_WF_SR1), |
| 382 | B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 383 | |
Jay Foad | 7d06fff | 2019-07-19 08:40:37 +0000 | [diff] [blame] | 384 | const unsigned Iters = 6; |
| 385 | const unsigned DPPCtrl[Iters] = {DPP_ROW_SR1, DPP_ROW_SR2, |
| 386 | DPP_ROW_SR4, DPP_ROW_SR8, |
| 387 | DPP_ROW_BCAST15, DPP_ROW_BCAST31}; |
| 388 | const unsigned RowMask[Iters] = {0xf, 0xf, 0xf, 0xf, 0xa, 0xc}; |
| 389 | const unsigned BankMask[Iters] = {0xf, 0xf, 0xe, 0xc, 0xf, 0xf}; |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 390 | |
| 391 | // This loop performs an exclusive scan across the wavefront, with all lanes |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 392 | // active (by using the WWM intrinsic). |
| 393 | for (unsigned Idx = 0; Idx < Iters; Idx++) { |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 394 | CallInst *const DPP = B.CreateIntrinsic( |
| 395 | Intrinsic::amdgcn_update_dpp, Ty, |
Jay Foad | 7d06fff | 2019-07-19 08:40:37 +0000 | [diff] [blame] | 396 | {Identity, ExclScan, B.getInt32(DPPCtrl[Idx]), |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 397 | B.getInt32(RowMask[Idx]), B.getInt32(BankMask[Idx]), B.getFalse()}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 398 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 399 | ExclScan = buildNonAtomicBinOp(B, Op, ExclScan, DPP); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 400 | } |
| 401 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 402 | NewV = buildNonAtomicBinOp(B, Op, SetInactive, ExclScan); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 403 | |
| 404 | // Read the value from the last lane, which has accumlated the values of |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 405 | // each active lane in the wavefront. This will be our new value which we |
| 406 | // will provide to the atomic operation. |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 407 | if (TyBitWidth == 64) { |
| 408 | Value *const ExtractLo = B.CreateTrunc(NewV, B.getInt32Ty()); |
| 409 | Value *const ExtractHi = |
| 410 | B.CreateTrunc(B.CreateLShr(NewV, B.getInt64(32)), B.getInt32Ty()); |
| 411 | CallInst *const ReadLaneLo = B.CreateIntrinsic( |
| 412 | Intrinsic::amdgcn_readlane, {}, {ExtractLo, B.getInt32(63)}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 413 | CallInst *const ReadLaneHi = B.CreateIntrinsic( |
| 414 | Intrinsic::amdgcn_readlane, {}, {ExtractHi, B.getInt32(63)}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 415 | Value *const PartialInsert = B.CreateInsertElement( |
| 416 | UndefValue::get(VecTy), ReadLaneLo, B.getInt32(0)); |
| 417 | Value *const Insert = |
| 418 | B.CreateInsertElement(PartialInsert, ReadLaneHi, B.getInt32(1)); |
| 419 | NewV = B.CreateBitCast(Insert, Ty); |
| 420 | } else if (TyBitWidth == 32) { |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 421 | NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, |
| 422 | {NewV, B.getInt32(63)}); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 423 | } else { |
| 424 | llvm_unreachable("Unhandled atomic bit width"); |
| 425 | } |
Neil Henning | 8c10fa1 | 2019-02-11 14:44:14 +0000 | [diff] [blame] | 426 | |
| 427 | // Finally mark the readlanes in the WWM section. |
| 428 | NewV = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, NewV); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 429 | } else { |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 430 | switch (Op) { |
| 431 | default: |
| 432 | llvm_unreachable("Unhandled atomic op"); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 433 | |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 434 | case AtomicRMWInst::Add: |
| 435 | case AtomicRMWInst::Sub: { |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 436 | // The new value we will be contributing to the atomic operation is the |
| 437 | // old value times the number of active lanes. |
| 438 | Value *const Ctpop = B.CreateIntCast( |
| 439 | B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); |
| 440 | NewV = B.CreateMul(V, Ctpop); |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 441 | break; |
| 442 | } |
| 443 | |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 444 | case AtomicRMWInst::And: |
| 445 | case AtomicRMWInst::Or: |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 446 | case AtomicRMWInst::Max: |
| 447 | case AtomicRMWInst::Min: |
| 448 | case AtomicRMWInst::UMax: |
| 449 | case AtomicRMWInst::UMin: |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 450 | // These operations with a uniform value are idempotent: doing the atomic |
| 451 | // operation multiple times has the same effect as doing it once. |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 452 | NewV = V; |
| 453 | break; |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 454 | |
| 455 | case AtomicRMWInst::Xor: |
| 456 | // The new value we will be contributing to the atomic operation is the |
| 457 | // old value times the parity of the number of active lanes. |
| 458 | Value *const Ctpop = B.CreateIntCast( |
| 459 | B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); |
| 460 | NewV = B.CreateMul(V, B.CreateAnd(Ctpop, 1)); |
| 461 | break; |
Jay Foad | 17060f0 | 2019-07-16 17:44:54 +0000 | [diff] [blame] | 462 | } |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | // We only want a single lane to enter our new control flow, and we do this |
| 466 | // by checking if there are any active lanes below us. Only one lane will |
| 467 | // have 0 active lanes below us, so that will be the only one to progress. |
Jay Foad | 70235c6 | 2019-07-17 13:40:03 +0000 | [diff] [blame] | 468 | Value *const Cond = B.CreateICmpEQ(Mbcnt, B.getIntN(TyBitWidth, 0)); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 469 | |
| 470 | // Store I's original basic block before we split the block. |
| 471 | BasicBlock *const EntryBB = I.getParent(); |
| 472 | |
| 473 | // We need to introduce some new control flow to force a single lane to be |
| 474 | // active. We do this by splitting I's basic block at I, and introducing the |
| 475 | // new block such that: |
| 476 | // entry --> single_lane -\ |
| 477 | // \------------------> exit |
| 478 | Instruction *const SingleLaneTerminator = |
| 479 | SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr); |
| 480 | |
| 481 | // Move the IR builder into single_lane next. |
| 482 | B.SetInsertPoint(SingleLaneTerminator); |
| 483 | |
| 484 | // Clone the original atomic operation into single lane, replacing the |
| 485 | // original value with our newly created one. |
| 486 | Instruction *const NewI = I.clone(); |
| 487 | B.Insert(NewI); |
| 488 | NewI->setOperand(ValIdx, NewV); |
| 489 | |
| 490 | // Move the IR builder into exit next, and start inserting just before the |
| 491 | // original instruction. |
| 492 | B.SetInsertPoint(&I); |
| 493 | |
Jay Foad | 298500a | 2019-07-22 07:19:44 +0000 | [diff] [blame] | 494 | const bool NeedResult = !I.use_empty(); |
| 495 | if (NeedResult) { |
| 496 | // Create a PHI node to get our new atomic result into the exit block. |
Neil Henning | 233a02d | 2018-11-05 12:04:48 +0000 | [diff] [blame] | 497 | PHINode *const PHI = B.CreatePHI(Ty, 2); |
Jay Foad | 298500a | 2019-07-22 07:19:44 +0000 | [diff] [blame] | 498 | PHI->addIncoming(UndefValue::get(Ty), EntryBB); |
| 499 | PHI->addIncoming(NewI, SingleLaneTerminator->getParent()); |
| 500 | |
| 501 | // We need to broadcast the value who was the lowest active lane (the first |
| 502 | // lane) to all other lanes in the wavefront. We use an intrinsic for this, |
| 503 | // but have to handle 64-bit broadcasts with two calls to this intrinsic. |
| 504 | Value *BroadcastI = nullptr; |
| 505 | |
| 506 | if (TyBitWidth == 64) { |
| 507 | Value *const ExtractLo = B.CreateTrunc(PHI, B.getInt32Ty()); |
| 508 | Value *const ExtractHi = |
| 509 | B.CreateTrunc(B.CreateLShr(PHI, B.getInt64(32)), B.getInt32Ty()); |
| 510 | CallInst *const ReadFirstLaneLo = |
| 511 | B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo); |
| 512 | CallInst *const ReadFirstLaneHi = |
| 513 | B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi); |
| 514 | Value *const PartialInsert = B.CreateInsertElement( |
| 515 | UndefValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0)); |
| 516 | Value *const Insert = |
| 517 | B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1)); |
| 518 | BroadcastI = B.CreateBitCast(Insert, Ty); |
| 519 | } else if (TyBitWidth == 32) { |
| 520 | |
| 521 | BroadcastI = B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, PHI); |
| 522 | } else { |
| 523 | llvm_unreachable("Unhandled atomic bit width"); |
| 524 | } |
| 525 | |
| 526 | // Now that we have the result of our single atomic operation, we need to |
| 527 | // get our individual lane's slice into the result. We use the lane offset |
| 528 | // we previously calculated combined with the atomic result value we got |
| 529 | // from the first lane, to get our lane's index into the atomic result. |
| 530 | Value *LaneOffset = nullptr; |
| 531 | if (ValDivergent) { |
| 532 | LaneOffset = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, ExclScan); |
| 533 | } else { |
| 534 | switch (Op) { |
| 535 | default: |
| 536 | llvm_unreachable("Unhandled atomic op"); |
| 537 | case AtomicRMWInst::Add: |
| 538 | case AtomicRMWInst::Sub: |
| 539 | LaneOffset = B.CreateMul(V, Mbcnt); |
| 540 | break; |
| 541 | case AtomicRMWInst::And: |
| 542 | case AtomicRMWInst::Or: |
| 543 | case AtomicRMWInst::Max: |
| 544 | case AtomicRMWInst::Min: |
| 545 | case AtomicRMWInst::UMax: |
| 546 | case AtomicRMWInst::UMin: |
| 547 | LaneOffset = B.CreateSelect(Cond, Identity, V); |
| 548 | break; |
| 549 | case AtomicRMWInst::Xor: |
| 550 | LaneOffset = B.CreateMul(V, B.CreateAnd(Mbcnt, 1)); |
| 551 | break; |
| 552 | } |
| 553 | } |
| 554 | Value *const Result = buildNonAtomicBinOp(B, Op, BroadcastI, LaneOffset); |
| 555 | |
| 556 | if (IsPixelShader) { |
| 557 | // Need a final PHI to reconverge to above the helper lane branch mask. |
| 558 | B.SetInsertPoint(PixelExitBB->getFirstNonPHI()); |
| 559 | |
| 560 | PHINode *const PHI = B.CreatePHI(Ty, 2); |
| 561 | PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB); |
| 562 | PHI->addIncoming(Result, I.getParent()); |
| 563 | I.replaceAllUsesWith(PHI); |
| 564 | } else { |
| 565 | // Replace the original atomic instruction with the new one. |
| 566 | I.replaceAllUsesWith(Result); |
| 567 | } |
Neil Henning | 233a02d | 2018-11-05 12:04:48 +0000 | [diff] [blame] | 568 | } |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 569 | |
| 570 | // And delete the original. |
| 571 | I.eraseFromParent(); |
| 572 | } |
| 573 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 574 | INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE, |
| 575 | "AMDGPU atomic optimizations", false, false) |
| 576 | INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) |
| 577 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
| 578 | INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE, |
| 579 | "AMDGPU atomic optimizations", false, false) |
| 580 | |
| 581 | FunctionPass *llvm::createAMDGPUAtomicOptimizerPass() { |
| 582 | return new AMDGPUAtomicOptimizer(); |
| 583 | } |