blob: 6fa52f57601be4844849fa52ff54c738dc4cc5e1 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- SIMemoryLegalizer.cpp ----------------------------------------------===//
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Memory legalizer - implements memory model. More information can be
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000012/// found here:
13/// http://llvm.org/docs/AMDGPUUsage.html#memory-model
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000014//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPU.h"
18#include "AMDGPUMachineModuleInfo.h"
19#include "AMDGPUSubtarget.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000020#include "SIDefines.h"
21#include "SIInstrInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000022#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000023#include "Utils/AMDGPUBaseInfo.h"
Tony Tyea5a7c332018-06-07 22:28:32 +000024#include "llvm/ADT/BitmaskEnum.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000025#include "llvm/ADT/None.h"
26#include "llvm/ADT/Optional.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFunction.h"
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000029#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000031#include "llvm/CodeGen/MachineMemOperand.h"
32#include "llvm/CodeGen/MachineModuleInfo.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/IR/DebugLoc.h"
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000035#include "llvm/IR/DiagnosticInfo.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000036#include "llvm/IR/Function.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/MC/MCInstrDesc.h"
39#include "llvm/Pass.h"
40#include "llvm/Support/AtomicOrdering.h"
Tony Tyea5a7c332018-06-07 22:28:32 +000041#include "llvm/Support/MathExtras.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000042#include <cassert>
43#include <list>
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +000044
45using namespace llvm;
46using namespace llvm::AMDGPU;
47
48#define DEBUG_TYPE "si-memory-legalizer"
49#define PASS_NAME "SI Memory Legalizer"
50
51namespace {
52
Tony Tyea5a7c332018-06-07 22:28:32 +000053LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
54
55/// Memory operation flags. Can be ORed together.
56enum class SIMemOp {
57 NONE = 0u,
58 LOAD = 1u << 0,
59 STORE = 1u << 1,
60 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ STORE)
61};
62
63/// Position to insert a new instruction relative to an existing
64/// instruction.
65enum class Position {
66 BEFORE,
67 AFTER
68};
69
70/// The atomic synchronization scopes supported by the AMDGPU target.
71enum class SIAtomicScope {
72 NONE,
73 SINGLETHREAD,
74 WAVEFRONT,
75 WORKGROUP,
76 AGENT,
77 SYSTEM
78};
79
80/// The distinct address spaces supported by the AMDGPU target for
81/// atomic memory operation. Can be ORed toether.
82enum class SIAtomicAddrSpace {
83 NONE = 0u,
84 GLOBAL = 1u << 0,
85 LDS = 1u << 1,
86 SCRATCH = 1u << 2,
87 GDS = 1u << 3,
88 OTHER = 1u << 4,
89
90 /// The address spaces that can be accessed by a FLAT instruction.
91 FLAT = GLOBAL | LDS | SCRATCH,
92
93 /// The address spaces that support atomic instructions.
94 ATOMIC = GLOBAL | LDS | SCRATCH | GDS,
95
96 /// All address spaces.
97 ALL = GLOBAL | LDS | SCRATCH | GDS | OTHER,
98
99 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL)
100};
101
102/// Sets named bit \p BitName to "true" if present in instruction \p MI.
103/// \returns Returns true if \p MI is modified, false otherwise.
104template <uint16_t BitName>
105bool enableNamedBit(const MachineBasicBlock::iterator &MI) {
106 int BitIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), BitName);
107 if (BitIdx == -1)
108 return false;
109
110 MachineOperand &Bit = MI->getOperand(BitIdx);
111 if (Bit.getImm() != 0)
112 return false;
113
114 Bit.setImm(1);
115 return true;
116}
117
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000118class SIMemOpInfo final {
119private:
Tony Tyea5a7c332018-06-07 22:28:32 +0000120
121 friend class SIMemOpAccess;
122
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000123 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
124 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
Tony Tyea5a7c332018-06-07 22:28:32 +0000125 SIAtomicScope Scope = SIAtomicScope::SYSTEM;
126 SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
127 SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::NONE;
128 bool IsCrossAddressSpaceOrdering = false;
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000129 bool IsNonTemporal = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000130
Tony Tyea5a7c332018-06-07 22:28:32 +0000131 SIMemOpInfo(AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent,
132 SIAtomicScope Scope = SIAtomicScope::SYSTEM,
133 SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::ATOMIC,
134 SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::ALL,
135 bool IsCrossAddressSpaceOrdering = true,
136 AtomicOrdering FailureOrdering =
137 AtomicOrdering::SequentiallyConsistent,
138 bool IsNonTemporal = false)
139 : Ordering(Ordering), FailureOrdering(FailureOrdering),
140 Scope(Scope), OrderingAddrSpace(OrderingAddrSpace),
141 InstrAddrSpace(InstrAddrSpace),
142 IsCrossAddressSpaceOrdering(IsCrossAddressSpaceOrdering),
143 IsNonTemporal(IsNonTemporal) {
144 // There is also no cross address space ordering if the ordering
145 // address space is the same as the instruction address space and
146 // only contains a single address space.
147 if ((OrderingAddrSpace == InstrAddrSpace) &&
148 isPowerOf2_32(uint32_t(InstrAddrSpace)))
149 IsCrossAddressSpaceOrdering = false;
150 }
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000151
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000152public:
Tony Tyea5a7c332018-06-07 22:28:32 +0000153 /// \returns Atomic synchronization scope of the machine instruction used to
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000154 /// create this SIMemOpInfo.
Tony Tyea5a7c332018-06-07 22:28:32 +0000155 SIAtomicScope getScope() const {
156 return Scope;
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000157 }
Tony Tyea5a7c332018-06-07 22:28:32 +0000158
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000159 /// \returns Ordering constraint of the machine instruction used to
160 /// create this SIMemOpInfo.
161 AtomicOrdering getOrdering() const {
162 return Ordering;
163 }
Tony Tyea5a7c332018-06-07 22:28:32 +0000164
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000165 /// \returns Failure ordering constraint of the machine instruction used to
166 /// create this SIMemOpInfo.
167 AtomicOrdering getFailureOrdering() const {
168 return FailureOrdering;
169 }
Tony Tyea5a7c332018-06-07 22:28:32 +0000170
171 /// \returns The address spaces be accessed by the machine
172 /// instruction used to create this SiMemOpInfo.
173 SIAtomicAddrSpace getInstrAddrSpace() const {
174 return InstrAddrSpace;
175 }
176
177 /// \returns The address spaces that must be ordered by the machine
178 /// instruction used to create this SiMemOpInfo.
179 SIAtomicAddrSpace getOrderingAddrSpace() const {
180 return OrderingAddrSpace;
181 }
182
183 /// \returns Return true iff memory ordering of operations on
184 /// different address spaces is required.
185 bool getIsCrossAddressSpaceOrdering() const {
186 return IsCrossAddressSpaceOrdering;
187 }
188
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000189 /// \returns True if memory access of the machine instruction used to
190 /// create this SIMemOpInfo is non-temporal, false otherwise.
191 bool isNonTemporal() const {
192 return IsNonTemporal;
193 }
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000194
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000195 /// \returns True if ordering constraint of the machine instruction used to
196 /// create this SIMemOpInfo is unordered or higher, false otherwise.
197 bool isAtomic() const {
198 return Ordering != AtomicOrdering::NotAtomic;
199 }
200
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000201};
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000202
Tony Tyea5a7c332018-06-07 22:28:32 +0000203class SIMemOpAccess final {
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000204private:
Tony Tyea5a7c332018-06-07 22:28:32 +0000205 AMDGPUMachineModuleInfo *MMI = nullptr;
206
207 /// Reports unsupported message \p Msg for \p MI to LLVM context.
208 void reportUnsupported(const MachineBasicBlock::iterator &MI,
209 const char *Msg) const;
210
211 /// Inspects the target synchonization scope \p SSID and determines
212 /// the SI atomic scope it corresponds to, the address spaces it
213 /// covers, and whether the memory ordering applies between address
214 /// spaces.
215 Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
216 toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrScope) const;
217
218 /// \return Return a bit set of the address spaces accessed by \p AS.
219 SIAtomicAddrSpace toSIAtomicAddrSpace(unsigned AS) const;
220
221 /// \returns Info constructed from \p MI, which has at least machine memory
222 /// operand.
223 Optional<SIMemOpInfo> constructFromMIWithMMO(
224 const MachineBasicBlock::iterator &MI) const;
225
226public:
227 /// Construct class to support accessing the machine memory operands
228 /// of instructions in the machine function \p MF.
229 SIMemOpAccess(MachineFunction &MF);
230
231 /// \returns Load info if \p MI is a load operation, "None" otherwise.
232 Optional<SIMemOpInfo> getLoadInfo(
233 const MachineBasicBlock::iterator &MI) const;
234
235 /// \returns Store info if \p MI is a store operation, "None" otherwise.
236 Optional<SIMemOpInfo> getStoreInfo(
237 const MachineBasicBlock::iterator &MI) const;
238
239 /// \returns Atomic fence info if \p MI is an atomic fence operation,
240 /// "None" otherwise.
241 Optional<SIMemOpInfo> getAtomicFenceInfo(
242 const MachineBasicBlock::iterator &MI) const;
243
244 /// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or
245 /// rmw operation, "None" otherwise.
246 Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo(
247 const MachineBasicBlock::iterator &MI) const;
248};
249
250class SICacheControl {
251protected:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000252
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000253 /// Instruction info.
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000254 const SIInstrInfo *TII = nullptr;
255
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000256 IsaVersion IV;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000257
Tom Stellard5bfbae52018-07-11 20:59:01 +0000258 SICacheControl(const GCNSubtarget &ST);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000259
Tony Tyea5a7c332018-06-07 22:28:32 +0000260public:
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000261
Tony Tyea5a7c332018-06-07 22:28:32 +0000262 /// Create a cache control for the subtarget \p ST.
Tom Stellard5bfbae52018-07-11 20:59:01 +0000263 static std::unique_ptr<SICacheControl> create(const GCNSubtarget &ST);
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000264
Tony Tyea5a7c332018-06-07 22:28:32 +0000265 /// Update \p MI memory load instruction to bypass any caches up to
266 /// the \p Scope memory scope for address spaces \p
267 /// AddrSpace. Return true iff the instruction was modified.
268 virtual bool enableLoadCacheBypass(const MachineBasicBlock::iterator &MI,
269 SIAtomicScope Scope,
270 SIAtomicAddrSpace AddrSpace) const = 0;
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000271
Tony Tyea5a7c332018-06-07 22:28:32 +0000272 /// Update \p MI memory instruction to indicate it is
273 /// nontemporal. Return true iff the instruction was modified.
274 virtual bool enableNonTemporal(const MachineBasicBlock::iterator &MI)
275 const = 0;
276
277 /// Inserts any necessary instructions at position \p Pos relative
278 /// to instruction \p MI to ensure any caches associated with
279 /// address spaces \p AddrSpace for memory scopes up to memory scope
280 /// \p Scope are invalidated. Returns true iff any instructions
281 /// inserted.
282 virtual bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
283 SIAtomicScope Scope,
284 SIAtomicAddrSpace AddrSpace,
285 Position Pos) const = 0;
286
287 /// Inserts any necessary instructions at position \p Pos relative
288 /// to instruction \p MI to ensure memory instructions of kind \p Op
289 /// associated with address spaces \p AddrSpace have completed as
290 /// observed by other memory instructions executing in memory scope
291 /// \p Scope. \p IsCrossAddrSpaceOrdering indicates if the memory
292 /// ordering is between address spaces. Returns true iff any
293 /// instructions inserted.
294 virtual bool insertWait(MachineBasicBlock::iterator &MI,
295 SIAtomicScope Scope,
296 SIAtomicAddrSpace AddrSpace,
297 SIMemOp Op,
298 bool IsCrossAddrSpaceOrdering,
299 Position Pos) const = 0;
Tony Tye6db1f5d2018-06-08 01:00:11 +0000300
301 /// Virtual destructor to allow derivations to be deleted.
302 virtual ~SICacheControl() = default;
303
Tony Tyea5a7c332018-06-07 22:28:32 +0000304};
305
306class SIGfx6CacheControl : public SICacheControl {
307protected:
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000308
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000309 /// Sets GLC bit to "true" if present in \p MI. Returns true if \p MI
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000310 /// is modified, false otherwise.
311 bool enableGLCBit(const MachineBasicBlock::iterator &MI) const {
312 return enableNamedBit<AMDGPU::OpName::glc>(MI);
313 }
314
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000315 /// Sets SLC bit to "true" if present in \p MI. Returns true if \p MI
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000316 /// is modified, false otherwise.
317 bool enableSLCBit(const MachineBasicBlock::iterator &MI) const {
318 return enableNamedBit<AMDGPU::OpName::slc>(MI);
319 }
320
Tony Tyea5a7c332018-06-07 22:28:32 +0000321public:
322
Tom Stellard5bfbae52018-07-11 20:59:01 +0000323 SIGfx6CacheControl(const GCNSubtarget &ST) : SICacheControl(ST) {};
Tony Tyea5a7c332018-06-07 22:28:32 +0000324
325 bool enableLoadCacheBypass(const MachineBasicBlock::iterator &MI,
326 SIAtomicScope Scope,
327 SIAtomicAddrSpace AddrSpace) const override;
328
329 bool enableNonTemporal(const MachineBasicBlock::iterator &MI) const override;
330
331 bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
332 SIAtomicScope Scope,
333 SIAtomicAddrSpace AddrSpace,
334 Position Pos) const override;
335
336 bool insertWait(MachineBasicBlock::iterator &MI,
337 SIAtomicScope Scope,
338 SIAtomicAddrSpace AddrSpace,
339 SIMemOp Op,
340 bool IsCrossAddrSpaceOrdering,
341 Position Pos) const override;
342};
343
344class SIGfx7CacheControl : public SIGfx6CacheControl {
345public:
346
Tom Stellard5bfbae52018-07-11 20:59:01 +0000347 SIGfx7CacheControl(const GCNSubtarget &ST) : SIGfx6CacheControl(ST) {};
Tony Tyea5a7c332018-06-07 22:28:32 +0000348
349 bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
350 SIAtomicScope Scope,
351 SIAtomicAddrSpace AddrSpace,
352 Position Pos) const override;
353
354};
355
356class SIMemoryLegalizer final : public MachineFunctionPass {
357private:
358
359 /// Cache Control.
360 std::unique_ptr<SICacheControl> CC = nullptr;
361
362 /// List of atomic pseudo instructions.
363 std::list<MachineBasicBlock::iterator> AtomicPseudoMIs;
364
365 /// Return true iff instruction \p MI is a atomic instruction that
366 /// returns a result.
367 bool isAtomicRet(const MachineInstr &MI) const {
368 return AMDGPU::getAtomicNoRetOp(MI.getOpcode()) != -1;
369 }
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000370
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000371 /// Removes all processed atomic pseudo instructions from the current
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000372 /// function. Returns true if current function is modified, false otherwise.
373 bool removeAtomicPseudoMIs();
374
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000375 /// Expands load operation \p MI. Returns true if instructions are
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +0000376 /// added/deleted or \p MI is modified, false otherwise.
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000377 bool expandLoad(const SIMemOpInfo &MOI,
378 MachineBasicBlock::iterator &MI);
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000379 /// Expands store operation \p MI. Returns true if instructions are
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +0000380 /// added/deleted or \p MI is modified, false otherwise.
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000381 bool expandStore(const SIMemOpInfo &MOI,
382 MachineBasicBlock::iterator &MI);
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000383 /// Expands atomic fence operation \p MI. Returns true if
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000384 /// instructions are added/deleted or \p MI is modified, false otherwise.
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000385 bool expandAtomicFence(const SIMemOpInfo &MOI,
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000386 MachineBasicBlock::iterator &MI);
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000387 /// Expands atomic cmpxchg or rmw operation \p MI. Returns true if
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000388 /// instructions are added/deleted or \p MI is modified, false otherwise.
Stanislav Mekhanoshin9c6cd042018-02-09 06:05:33 +0000389 bool expandAtomicCmpxchgOrRmw(const SIMemOpInfo &MOI,
390 MachineBasicBlock::iterator &MI);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000391
392public:
393 static char ID;
394
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000395 SIMemoryLegalizer() : MachineFunctionPass(ID) {}
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000396
397 void getAnalysisUsage(AnalysisUsage &AU) const override {
398 AU.setPreservesCFG();
399 MachineFunctionPass::getAnalysisUsage(AU);
400 }
401
402 StringRef getPassName() const override {
403 return PASS_NAME;
404 }
405
406 bool runOnMachineFunction(MachineFunction &MF) override;
407};
408
409} // end namespace anonymous
410
Tony Tyea5a7c332018-06-07 22:28:32 +0000411void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI,
412 const char *Msg) const {
413 const Function &Func = MI->getParent()->getParent()->getFunction();
414 DiagnosticInfoUnsupported Diag(Func, Msg, MI->getDebugLoc());
415 Func.getContext().diagnose(Diag);
416}
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000417
Tony Tyea5a7c332018-06-07 22:28:32 +0000418Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
419SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
420 SIAtomicAddrSpace InstrScope) const {
421 /// TODO: For now assume OpenCL memory model which treats each
422 /// address space as having a separate happens-before relation, and
423 /// so an instruction only has ordering with respect to the address
424 /// space it accesses, and if it accesses multiple address spaces it
425 /// does not require ordering of operations in different address
426 /// spaces.
427 if (SSID == SyncScope::System)
428 return std::make_tuple(SIAtomicScope::SYSTEM,
429 SIAtomicAddrSpace::ATOMIC & InstrScope,
430 false);
431 if (SSID == MMI->getAgentSSID())
432 return std::make_tuple(SIAtomicScope::AGENT,
433 SIAtomicAddrSpace::ATOMIC & InstrScope,
434 false);
435 if (SSID == MMI->getWorkgroupSSID())
436 return std::make_tuple(SIAtomicScope::WORKGROUP,
437 SIAtomicAddrSpace::ATOMIC & InstrScope,
438 false);
439 if (SSID == MMI->getWavefrontSSID())
440 return std::make_tuple(SIAtomicScope::WAVEFRONT,
441 SIAtomicAddrSpace::ATOMIC & InstrScope,
442 false);
443 if (SSID == SyncScope::SingleThread)
444 return std::make_tuple(SIAtomicScope::SINGLETHREAD,
445 SIAtomicAddrSpace::ATOMIC & InstrScope,
446 false);
447 /// TODO: To support HSA Memory Model need to add additional memory
448 /// scopes that specify that do require cross address space
449 /// ordering.
450 return None;
451}
452
453SIAtomicAddrSpace SIMemOpAccess::toSIAtomicAddrSpace(unsigned AS) const {
Matt Arsenault0da63502018-08-31 05:49:54 +0000454 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tony Tyea5a7c332018-06-07 22:28:32 +0000455 return SIAtomicAddrSpace::FLAT;
Matt Arsenault0da63502018-08-31 05:49:54 +0000456 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
Tony Tyea5a7c332018-06-07 22:28:32 +0000457 return SIAtomicAddrSpace::GLOBAL;
Matt Arsenault0da63502018-08-31 05:49:54 +0000458 if (AS == AMDGPUAS::LOCAL_ADDRESS)
Tony Tyea5a7c332018-06-07 22:28:32 +0000459 return SIAtomicAddrSpace::LDS;
Matt Arsenault0da63502018-08-31 05:49:54 +0000460 if (AS == AMDGPUAS::PRIVATE_ADDRESS)
Tony Tyea5a7c332018-06-07 22:28:32 +0000461 return SIAtomicAddrSpace::SCRATCH;
Matt Arsenault0da63502018-08-31 05:49:54 +0000462 if (AS == AMDGPUAS::REGION_ADDRESS)
Tony Tyea5a7c332018-06-07 22:28:32 +0000463 return SIAtomicAddrSpace::GDS;
464
465 return SIAtomicAddrSpace::OTHER;
466}
467
468SIMemOpAccess::SIMemOpAccess(MachineFunction &MF) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000469 MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
470}
471
472Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
473 const MachineBasicBlock::iterator &MI) const {
474 assert(MI->getNumMemOperands() > 0);
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000475
476 SyncScope::ID SSID = SyncScope::SingleThread;
477 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
478 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
Tony Tyea5a7c332018-06-07 22:28:32 +0000479 SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::NONE;
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000480 bool IsNonTemporal = true;
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000481
482 // Validator should check whether or not MMOs cover the entire set of
483 // locations accessed by the memory instruction.
484 for (const auto &MMO : MI->memoperands()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000485 IsNonTemporal &= MMO->isNonTemporal();
486 InstrAddrSpace |=
487 toSIAtomicAddrSpace(MMO->getPointerInfo().getAddrSpace());
488 AtomicOrdering OpOrdering = MMO->getOrdering();
489 if (OpOrdering != AtomicOrdering::NotAtomic) {
490 const auto &IsSyncScopeInclusion =
491 MMI->isSyncScopeInclusion(SSID, MMO->getSyncScopeID());
492 if (!IsSyncScopeInclusion) {
493 reportUnsupported(MI,
494 "Unsupported non-inclusive atomic synchronization scope");
495 return None;
496 }
497
498 SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID();
499 Ordering =
500 isStrongerThan(Ordering, OpOrdering) ?
501 Ordering : MMO->getOrdering();
502 assert(MMO->getFailureOrdering() != AtomicOrdering::Release &&
503 MMO->getFailureOrdering() != AtomicOrdering::AcquireRelease);
504 FailureOrdering =
505 isStrongerThan(FailureOrdering, MMO->getFailureOrdering()) ?
506 FailureOrdering : MMO->getFailureOrdering();
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000507 }
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000508 }
509
Tony Tyea5a7c332018-06-07 22:28:32 +0000510 SIAtomicScope Scope = SIAtomicScope::NONE;
511 SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
512 bool IsCrossAddressSpaceOrdering = false;
513 if (Ordering != AtomicOrdering::NotAtomic) {
514 auto ScopeOrNone = toSIAtomicScope(SSID, InstrAddrSpace);
515 if (!ScopeOrNone) {
516 reportUnsupported(MI, "Unsupported atomic synchronization scope");
517 return None;
518 }
519 std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
520 ScopeOrNone.getValue();
521 if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
522 ((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) {
523 reportUnsupported(MI, "Unsupported atomic address space");
524 return None;
525 }
526 }
527 return SIMemOpInfo(Ordering, Scope, OrderingAddrSpace, InstrAddrSpace,
528 IsCrossAddressSpaceOrdering, FailureOrdering, IsNonTemporal);
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000529}
530
Tony Tyea5a7c332018-06-07 22:28:32 +0000531Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
532 const MachineBasicBlock::iterator &MI) const {
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000533 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
534
535 if (!(MI->mayLoad() && !MI->mayStore()))
536 return None;
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000537
538 // Be conservative if there are no memory operands.
539 if (MI->getNumMemOperands() == 0)
Tony Tyea5a7c332018-06-07 22:28:32 +0000540 return SIMemOpInfo();
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000541
Tony Tyea5a7c332018-06-07 22:28:32 +0000542 return constructFromMIWithMMO(MI);
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000543}
544
Tony Tyea5a7c332018-06-07 22:28:32 +0000545Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
546 const MachineBasicBlock::iterator &MI) const {
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000547 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
548
549 if (!(!MI->mayLoad() && MI->mayStore()))
550 return None;
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000551
552 // Be conservative if there are no memory operands.
553 if (MI->getNumMemOperands() == 0)
Tony Tyea5a7c332018-06-07 22:28:32 +0000554 return SIMemOpInfo();
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000555
Tony Tyea5a7c332018-06-07 22:28:32 +0000556 return constructFromMIWithMMO(MI);
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000557}
558
Tony Tyea5a7c332018-06-07 22:28:32 +0000559Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
560 const MachineBasicBlock::iterator &MI) const {
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000561 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
562
563 if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
564 return None;
565
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000566 AtomicOrdering Ordering =
Tony Tyea5a7c332018-06-07 22:28:32 +0000567 static_cast<AtomicOrdering>(MI->getOperand(0).getImm());
568
569 SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
570 auto ScopeOrNone = toSIAtomicScope(SSID, SIAtomicAddrSpace::ATOMIC);
571 if (!ScopeOrNone) {
572 reportUnsupported(MI, "Unsupported atomic synchronization scope");
573 return None;
574 }
575
576 SIAtomicScope Scope = SIAtomicScope::NONE;
577 SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
578 bool IsCrossAddressSpaceOrdering = false;
579 std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
580 ScopeOrNone.getValue();
581
582 if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
583 ((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) {
584 reportUnsupported(MI, "Unsupported atomic address space");
585 return None;
586 }
587
588 return SIMemOpInfo(Ordering, Scope, OrderingAddrSpace, SIAtomicAddrSpace::ATOMIC,
589 IsCrossAddressSpaceOrdering);
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000590}
591
Tony Tyea5a7c332018-06-07 22:28:32 +0000592Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
593 const MachineBasicBlock::iterator &MI) const {
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000594 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
595
596 if (!(MI->mayLoad() && MI->mayStore()))
597 return None;
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000598
599 // Be conservative if there are no memory operands.
600 if (MI->getNumMemOperands() == 0)
Tony Tyea5a7c332018-06-07 22:28:32 +0000601 return SIMemOpInfo();
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000602
Tony Tyea5a7c332018-06-07 22:28:32 +0000603 return constructFromMIWithMMO(MI);
604}
605
Tom Stellard5bfbae52018-07-11 20:59:01 +0000606SICacheControl::SICacheControl(const GCNSubtarget &ST) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000607 TII = ST.getInstrInfo();
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000608 IV = getIsaVersion(ST.getCPU());
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000609}
610
611/* static */
Tom Stellard5bfbae52018-07-11 20:59:01 +0000612std::unique_ptr<SICacheControl> SICacheControl::create(const GCNSubtarget &ST) {
613 GCNSubtarget::Generation Generation = ST.getGeneration();
Tony Tyea5a7c332018-06-07 22:28:32 +0000614 if (Generation <= AMDGPUSubtarget::SOUTHERN_ISLANDS)
615 return make_unique<SIGfx6CacheControl>(ST);
616 return make_unique<SIGfx7CacheControl>(ST);
Konstantin Zhuravlyov1aa667f2017-09-05 16:41:25 +0000617}
618
Tony Tyea5a7c332018-06-07 22:28:32 +0000619bool SIGfx6CacheControl::enableLoadCacheBypass(
620 const MachineBasicBlock::iterator &MI,
621 SIAtomicScope Scope,
622 SIAtomicAddrSpace AddrSpace) const {
623 assert(MI->mayLoad() && !MI->mayStore());
624 bool Changed = false;
625
626 if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
627 /// TODO: Do not set glc for rmw atomic operations as they
628 /// implicitly bypass the L1 cache.
629
630 switch (Scope) {
631 case SIAtomicScope::SYSTEM:
632 case SIAtomicScope::AGENT:
633 Changed |= enableGLCBit(MI);
634 break;
635 case SIAtomicScope::WORKGROUP:
636 case SIAtomicScope::WAVEFRONT:
637 case SIAtomicScope::SINGLETHREAD:
638 // No cache to bypass.
639 break;
640 default:
641 llvm_unreachable("Unsupported synchronization scope");
642 }
643 }
644
645 /// The scratch address space does not need the global memory caches
646 /// to be bypassed as all memory operations by the same thread are
647 /// sequentially consistent, and no other thread can access scratch
648 /// memory.
649
650 /// Other address spaces do not hava a cache.
651
652 return Changed;
653}
654
655bool SIGfx6CacheControl::enableNonTemporal(
656 const MachineBasicBlock::iterator &MI) const {
657 assert(MI->mayLoad() ^ MI->mayStore());
658 bool Changed = false;
659
660 /// TODO: Do not enableGLCBit if rmw atomic.
661 Changed |= enableGLCBit(MI);
662 Changed |= enableSLCBit(MI);
663
664 return Changed;
665}
666
667bool SIGfx6CacheControl::insertCacheInvalidate(MachineBasicBlock::iterator &MI,
668 SIAtomicScope Scope,
669 SIAtomicAddrSpace AddrSpace,
670 Position Pos) const {
671 bool Changed = false;
672
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000673 MachineBasicBlock &MBB = *MI->getParent();
674 DebugLoc DL = MI->getDebugLoc();
675
Tony Tyea5a7c332018-06-07 22:28:32 +0000676 if (Pos == Position::AFTER)
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000677 ++MI;
678
Tony Tyea5a7c332018-06-07 22:28:32 +0000679 if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
680 switch (Scope) {
681 case SIAtomicScope::SYSTEM:
682 case SIAtomicScope::AGENT:
683 BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_WBINVL1));
684 Changed = true;
685 break;
686 case SIAtomicScope::WORKGROUP:
687 case SIAtomicScope::WAVEFRONT:
688 case SIAtomicScope::SINGLETHREAD:
689 // No cache to invalidate.
690 break;
691 default:
692 llvm_unreachable("Unsupported synchronization scope");
693 }
694 }
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000695
Tony Tyea5a7c332018-06-07 22:28:32 +0000696 /// The scratch address space does not need the global memory cache
697 /// to be flushed as all memory operations by the same thread are
698 /// sequentially consistent, and no other thread can access scratch
699 /// memory.
700
701 /// Other address spaces do not hava a cache.
702
703 if (Pos == Position::AFTER)
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000704 --MI;
705
Tony Tyea5a7c332018-06-07 22:28:32 +0000706 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000707}
708
Tony Tyea5a7c332018-06-07 22:28:32 +0000709bool SIGfx6CacheControl::insertWait(MachineBasicBlock::iterator &MI,
710 SIAtomicScope Scope,
711 SIAtomicAddrSpace AddrSpace,
712 SIMemOp Op,
713 bool IsCrossAddrSpaceOrdering,
714 Position Pos) const {
715 bool Changed = false;
716
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000717 MachineBasicBlock &MBB = *MI->getParent();
718 DebugLoc DL = MI->getDebugLoc();
719
Tony Tyea5a7c332018-06-07 22:28:32 +0000720 if (Pos == Position::AFTER)
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000721 ++MI;
722
Tony Tyea5a7c332018-06-07 22:28:32 +0000723 bool VMCnt = false;
724 bool LGKMCnt = false;
725 bool EXPCnt = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000726
Tony Tyea5a7c332018-06-07 22:28:32 +0000727 if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
728 switch (Scope) {
729 case SIAtomicScope::SYSTEM:
730 case SIAtomicScope::AGENT:
731 VMCnt = true;
732 break;
733 case SIAtomicScope::WORKGROUP:
734 case SIAtomicScope::WAVEFRONT:
735 case SIAtomicScope::SINGLETHREAD:
736 // The L1 cache keeps all memory operations in order for
Mark Searles72da47d2018-07-16 10:02:41 +0000737 // wavefronts in the same work-group.
Tony Tyea5a7c332018-06-07 22:28:32 +0000738 break;
739 default:
740 llvm_unreachable("Unsupported synchronization scope");
741 }
742 }
743
744 if ((AddrSpace & SIAtomicAddrSpace::LDS) != SIAtomicAddrSpace::NONE) {
745 switch (Scope) {
746 case SIAtomicScope::SYSTEM:
747 case SIAtomicScope::AGENT:
748 case SIAtomicScope::WORKGROUP:
749 // If no cross address space ordering then an LDS waitcnt is not
750 // needed as LDS operations for all waves are executed in a
751 // total global ordering as observed by all waves. Required if
752 // also synchronizing with global/GDS memory as LDS operations
753 // could be reordered with respect to later global/GDS memory
754 // operations of the same wave.
755 LGKMCnt = IsCrossAddrSpaceOrdering;
756 break;
757 case SIAtomicScope::WAVEFRONT:
758 case SIAtomicScope::SINGLETHREAD:
759 // The LDS keeps all memory operations in order for
760 // the same wavesfront.
761 break;
762 default:
763 llvm_unreachable("Unsupported synchronization scope");
764 }
765 }
766
767 if ((AddrSpace & SIAtomicAddrSpace::GDS) != SIAtomicAddrSpace::NONE) {
768 switch (Scope) {
769 case SIAtomicScope::SYSTEM:
770 case SIAtomicScope::AGENT:
771 // If no cross address space ordering then an GDS waitcnt is not
772 // needed as GDS operations for all waves are executed in a
773 // total global ordering as observed by all waves. Required if
774 // also synchronizing with global/LDS memory as GDS operations
775 // could be reordered with respect to later global/LDS memory
776 // operations of the same wave.
777 EXPCnt = IsCrossAddrSpaceOrdering;
778 break;
779 case SIAtomicScope::WORKGROUP:
780 case SIAtomicScope::WAVEFRONT:
781 case SIAtomicScope::SINGLETHREAD:
782 // The GDS keeps all memory operations in order for
783 // the same work-group.
784 break;
785 default:
786 llvm_unreachable("Unsupported synchronization scope");
787 }
788 }
789
790 if (VMCnt || LGKMCnt || EXPCnt) {
791 unsigned WaitCntImmediate =
792 AMDGPU::encodeWaitcnt(IV,
793 VMCnt ? 0 : getVmcntBitMask(IV),
794 EXPCnt ? 0 : getExpcntBitMask(IV),
795 LGKMCnt ? 0 : getLgkmcntBitMask(IV));
796 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(WaitCntImmediate);
797 Changed = true;
798 }
799
800 if (Pos == Position::AFTER)
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000801 --MI;
802
Tony Tyea5a7c332018-06-07 22:28:32 +0000803 return Changed;
804}
805
806bool SIGfx7CacheControl::insertCacheInvalidate(MachineBasicBlock::iterator &MI,
807 SIAtomicScope Scope,
808 SIAtomicAddrSpace AddrSpace,
809 Position Pos) const {
810 bool Changed = false;
811
812 MachineBasicBlock &MBB = *MI->getParent();
813 DebugLoc DL = MI->getDebugLoc();
814
815 if (Pos == Position::AFTER)
816 ++MI;
817
818 if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
819 switch (Scope) {
820 case SIAtomicScope::SYSTEM:
821 case SIAtomicScope::AGENT:
822 BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_WBINVL1_VOL));
823 Changed = true;
824 break;
825 case SIAtomicScope::WORKGROUP:
826 case SIAtomicScope::WAVEFRONT:
827 case SIAtomicScope::SINGLETHREAD:
828 // No cache to invalidate.
829 break;
830 default:
831 llvm_unreachable("Unsupported synchronization scope");
832 }
833 }
834
835 /// The scratch address space does not need the global memory cache
836 /// to be flushed as all memory operations by the same thread are
837 /// sequentially consistent, and no other thread can access scratch
838 /// memory.
839
840 /// Other address spaces do not hava a cache.
841
842 if (Pos == Position::AFTER)
843 --MI;
844
845 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000846}
847
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000848bool SIMemoryLegalizer::removeAtomicPseudoMIs() {
849 if (AtomicPseudoMIs.empty())
850 return false;
851
852 for (auto &MI : AtomicPseudoMIs)
853 MI->eraseFromParent();
854
855 AtomicPseudoMIs.clear();
856 return true;
857}
858
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000859bool SIMemoryLegalizer::expandLoad(const SIMemOpInfo &MOI,
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +0000860 MachineBasicBlock::iterator &MI) {
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000861 assert(MI->mayLoad() && !MI->mayStore());
862
863 bool Changed = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000864
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000865 if (MOI.isAtomic()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000866 if (MOI.getOrdering() == AtomicOrdering::Monotonic ||
867 MOI.getOrdering() == AtomicOrdering::Acquire ||
868 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) {
869 Changed |= CC->enableLoadCacheBypass(MI, MOI.getScope(),
870 MOI.getOrderingAddrSpace());
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000871 }
872
Tony Tyea5a7c332018-06-07 22:28:32 +0000873 if (MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
874 Changed |= CC->insertWait(MI, MOI.getScope(),
875 MOI.getOrderingAddrSpace(),
876 SIMemOp::LOAD | SIMemOp::STORE,
877 MOI.getIsCrossAddressSpaceOrdering(),
878 Position::BEFORE);
879
880 if (MOI.getOrdering() == AtomicOrdering::Acquire ||
881 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) {
882 Changed |= CC->insertWait(MI, MOI.getScope(),
883 MOI.getInstrAddrSpace(),
884 SIMemOp::LOAD,
885 MOI.getIsCrossAddressSpaceOrdering(),
886 Position::AFTER);
887 Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
888 MOI.getOrderingAddrSpace(),
889 Position::AFTER);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000890 }
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +0000891
Tony Tyea5a7c332018-06-07 22:28:32 +0000892 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000893 }
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000894
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000895 // Atomic instructions do not have the nontemporal attribute.
896 if (MOI.isNonTemporal()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000897 Changed |= CC->enableNonTemporal(MI);
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000898 return Changed;
899 }
900
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000901 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000902}
903
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000904bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI,
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +0000905 MachineBasicBlock::iterator &MI) {
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000906 assert(!MI->mayLoad() && MI->mayStore());
907
908 bool Changed = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000909
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000910 if (MOI.isAtomic()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000911 if (MOI.getOrdering() == AtomicOrdering::Release ||
912 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
913 Changed |= CC->insertWait(MI, MOI.getScope(),
914 MOI.getOrderingAddrSpace(),
915 SIMemOp::LOAD | SIMemOp::STORE,
916 MOI.getIsCrossAddressSpaceOrdering(),
917 Position::BEFORE);
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000918
Tony Tyea5a7c332018-06-07 22:28:32 +0000919 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000920 }
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000921
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000922 // Atomic instructions do not have the nontemporal attribute.
923 if (MOI.isNonTemporal()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000924 Changed |= CC->enableNonTemporal(MI);
Konstantin Zhuravlyov5f5b5862017-09-07 17:14:54 +0000925 return Changed;
926 }
927
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000928 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000929}
930
Konstantin Zhuravlyov844845a2017-09-05 16:18:05 +0000931bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000932 MachineBasicBlock::iterator &MI) {
933 assert(MI->getOpcode() == AMDGPU::ATOMIC_FENCE);
934
Tony Tyea5a7c332018-06-07 22:28:32 +0000935 AtomicPseudoMIs.push_back(MI);
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000936 bool Changed = false;
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000937
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000938 if (MOI.isAtomic()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000939 if (MOI.getOrdering() == AtomicOrdering::Acquire ||
940 MOI.getOrdering() == AtomicOrdering::Release ||
941 MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
942 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
943 /// TODO: This relies on a barrier always generating a waitcnt
944 /// for LDS to ensure it is not reordered with the completion of
945 /// the proceeding LDS operations. If barrier had a memory
946 /// ordering and memory scope, then library does not need to
947 /// generate a fence. Could add support in this file for
948 /// barrier. SIInsertWaitcnt.cpp could then stop unconditionally
949 /// adding waitcnt before a S_BARRIER.
950 Changed |= CC->insertWait(MI, MOI.getScope(),
951 MOI.getOrderingAddrSpace(),
952 SIMemOp::LOAD | SIMemOp::STORE,
953 MOI.getIsCrossAddressSpaceOrdering(),
954 Position::BEFORE);
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000955
Tony Tyea5a7c332018-06-07 22:28:32 +0000956 if (MOI.getOrdering() == AtomicOrdering::Acquire ||
957 MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
958 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
959 Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
960 MOI.getOrderingAddrSpace(),
961 Position::BEFORE);
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000962
Tony Tyea5a7c332018-06-07 22:28:32 +0000963 return Changed;
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000964 }
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000965
966 return Changed;
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +0000967}
968
Stanislav Mekhanoshin9c6cd042018-02-09 06:05:33 +0000969bool SIMemoryLegalizer::expandAtomicCmpxchgOrRmw(const SIMemOpInfo &MOI,
970 MachineBasicBlock::iterator &MI) {
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000971 assert(MI->mayLoad() && MI->mayStore());
972
973 bool Changed = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000974
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000975 if (MOI.isAtomic()) {
Tony Tyea5a7c332018-06-07 22:28:32 +0000976 if (MOI.getOrdering() == AtomicOrdering::Release ||
977 MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
978 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent ||
979 MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
980 Changed |= CC->insertWait(MI, MOI.getScope(),
981 MOI.getOrderingAddrSpace(),
982 SIMemOp::LOAD | SIMemOp::STORE,
983 MOI.getIsCrossAddressSpaceOrdering(),
984 Position::BEFORE);
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +0000985
Tony Tyea5a7c332018-06-07 22:28:32 +0000986 if (MOI.getOrdering() == AtomicOrdering::Acquire ||
987 MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
988 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent ||
989 MOI.getFailureOrdering() == AtomicOrdering::Acquire ||
990 MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) {
991 Changed |= CC->insertWait(MI, MOI.getScope(),
992 MOI.getOrderingAddrSpace(),
993 isAtomicRet(*MI) ? SIMemOp::LOAD :
994 SIMemOp::STORE,
995 MOI.getIsCrossAddressSpaceOrdering(),
996 Position::AFTER);
997 Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
998 MOI.getOrderingAddrSpace(),
999 Position::AFTER);
Konstantin Zhuravlyovc8c9d4a2017-09-07 16:14:21 +00001000 }
1001
Tony Tyea5a7c332018-06-07 22:28:32 +00001002 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001003 }
Konstantin Zhuravlyov80528702017-09-05 19:01:10 +00001004
1005 return Changed;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001006}
1007
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001008bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) {
1009 bool Changed = false;
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001010
Tony Tyea5a7c332018-06-07 22:28:32 +00001011 SIMemOpAccess MOA(MF);
Tom Stellard5bfbae52018-07-11 20:59:01 +00001012 CC = SICacheControl::create(MF.getSubtarget<GCNSubtarget>());
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001013
1014 for (auto &MBB : MF) {
1015 for (auto MI = MBB.begin(); MI != MBB.end(); ++MI) {
1016 if (!(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic))
1017 continue;
1018
Tony Tyea5a7c332018-06-07 22:28:32 +00001019 if (const auto &MOI = MOA.getLoadInfo(MI))
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +00001020 Changed |= expandLoad(MOI.getValue(), MI);
Tony Tyea5a7c332018-06-07 22:28:32 +00001021 else if (const auto &MOI = MOA.getStoreInfo(MI))
Konstantin Zhuravlyovf5d826a2017-08-18 17:30:02 +00001022 Changed |= expandStore(MOI.getValue(), MI);
Tony Tyea5a7c332018-06-07 22:28:32 +00001023 else if (const auto &MOI = MOA.getAtomicFenceInfo(MI))
Konstantin Zhuravlyov89377c42017-08-19 18:44:27 +00001024 Changed |= expandAtomicFence(MOI.getValue(), MI);
Tony Tyea5a7c332018-06-07 22:28:32 +00001025 else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI))
Stanislav Mekhanoshin9c6cd042018-02-09 06:05:33 +00001026 Changed |= expandAtomicCmpxchgOrRmw(MOI.getValue(), MI);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +00001027 }
1028 }
1029
1030 Changed |= removeAtomicPseudoMIs();
1031 return Changed;
1032}
1033
1034INITIALIZE_PASS(SIMemoryLegalizer, DEBUG_TYPE, PASS_NAME, false, false)
1035
1036char SIMemoryLegalizer::ID = 0;
1037char &llvm::SIMemoryLegalizerID = SIMemoryLegalizer::ID;
1038
1039FunctionPass *llvm::createSIMemoryLegalizerPass() {
1040 return new SIMemoryLegalizer();
1041}