xref: /llvm-project/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp (revision a5a7c331e7894293410967e6081aca7ea10364ef)
1 //===- SIMemoryLegalizer.cpp ----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// Memory legalizer - implements memory model. More information can be
12 /// found here:
13 ///   http://llvm.org/docs/AMDGPUUsage.html#memory-model
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPU.h"
18 #include "AMDGPUMachineModuleInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "SIDefines.h"
21 #include "SIInstrInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "Utils/AMDGPUBaseInfo.h"
24 #include "llvm/ADT/BitmaskEnum.h"
25 #include "llvm/ADT/None.h"
26 #include "llvm/ADT/Optional.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/IR/DebugLoc.h"
35 #include "llvm/IR/DiagnosticInfo.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/MC/MCInstrDesc.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/AtomicOrdering.h"
41 #include "llvm/Support/MathExtras.h"
42 #include <cassert>
43 #include <list>
44 
45 using namespace llvm;
46 using namespace llvm::AMDGPU;
47 
48 #define DEBUG_TYPE "si-memory-legalizer"
49 #define PASS_NAME "SI Memory Legalizer"
50 
51 namespace {
52 
53 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
54 
55 /// Memory operation flags. Can be ORed together.
56 enum class SIMemOp {
57   NONE = 0u,
58   LOAD = 1u << 0,
59   STORE = 1u << 1,
60   LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ STORE)
61 };
62 
63 /// Position to insert a new instruction relative to an existing
64 /// instruction.
65 enum class Position {
66   BEFORE,
67   AFTER
68 };
69 
70 /// The atomic synchronization scopes supported by the AMDGPU target.
71 enum class SIAtomicScope {
72   NONE,
73   SINGLETHREAD,
74   WAVEFRONT,
75   WORKGROUP,
76   AGENT,
77   SYSTEM
78 };
79 
80 /// The distinct address spaces supported by the AMDGPU target for
81 /// atomic memory operation. Can be ORed toether.
82 enum class SIAtomicAddrSpace {
83   NONE = 0u,
84   GLOBAL = 1u << 0,
85   LDS = 1u << 1,
86   SCRATCH = 1u << 2,
87   GDS = 1u << 3,
88   OTHER = 1u << 4,
89 
90   /// The address spaces that can be accessed by a FLAT instruction.
91   FLAT = GLOBAL | LDS | SCRATCH,
92 
93   /// The address spaces that support atomic instructions.
94   ATOMIC = GLOBAL | LDS | SCRATCH | GDS,
95 
96   /// All address spaces.
97   ALL = GLOBAL | LDS | SCRATCH | GDS | OTHER,
98 
99   LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL)
100 };
101 
102 /// Sets named bit \p BitName to "true" if present in instruction \p MI.
103 /// \returns Returns true if \p MI is modified, false otherwise.
104 template <uint16_t BitName>
105 bool enableNamedBit(const MachineBasicBlock::iterator &MI) {
106   int BitIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), BitName);
107   if (BitIdx == -1)
108     return false;
109 
110   MachineOperand &Bit = MI->getOperand(BitIdx);
111   if (Bit.getImm() != 0)
112     return false;
113 
114   Bit.setImm(1);
115   return true;
116 }
117 
118 class SIMemOpInfo final {
119 private:
120 
121   friend class SIMemOpAccess;
122 
123   AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
124   AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
125   SIAtomicScope Scope = SIAtomicScope::SYSTEM;
126   SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
127   SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::NONE;
128   bool IsCrossAddressSpaceOrdering = false;
129   bool IsNonTemporal = false;
130 
131   SIMemOpInfo(AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent,
132               SIAtomicScope Scope = SIAtomicScope::SYSTEM,
133               SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::ATOMIC,
134               SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::ALL,
135               bool IsCrossAddressSpaceOrdering = true,
136               AtomicOrdering FailureOrdering =
137                 AtomicOrdering::SequentiallyConsistent,
138               bool IsNonTemporal = false)
139     : Ordering(Ordering), FailureOrdering(FailureOrdering),
140       Scope(Scope), OrderingAddrSpace(OrderingAddrSpace),
141       InstrAddrSpace(InstrAddrSpace),
142       IsCrossAddressSpaceOrdering(IsCrossAddressSpaceOrdering),
143       IsNonTemporal(IsNonTemporal) {
144     // There is also no cross address space ordering if the ordering
145     // address space is the same as the instruction address space and
146     // only contains a single address space.
147     if ((OrderingAddrSpace == InstrAddrSpace) &&
148         isPowerOf2_32(uint32_t(InstrAddrSpace)))
149       IsCrossAddressSpaceOrdering = false;
150   }
151 
152 public:
153   /// \returns Atomic synchronization scope of the machine instruction used to
154   /// create this SIMemOpInfo.
155   SIAtomicScope getScope() const {
156     return Scope;
157   }
158 
159   /// \returns Ordering constraint of the machine instruction used to
160   /// create this SIMemOpInfo.
161   AtomicOrdering getOrdering() const {
162     return Ordering;
163   }
164 
165   /// \returns Failure ordering constraint of the machine instruction used to
166   /// create this SIMemOpInfo.
167   AtomicOrdering getFailureOrdering() const {
168     return FailureOrdering;
169   }
170 
171   /// \returns The address spaces be accessed by the machine
172   /// instruction used to create this SiMemOpInfo.
173   SIAtomicAddrSpace getInstrAddrSpace() const {
174     return InstrAddrSpace;
175   }
176 
177   /// \returns The address spaces that must be ordered by the machine
178   /// instruction used to create this SiMemOpInfo.
179   SIAtomicAddrSpace getOrderingAddrSpace() const {
180     return OrderingAddrSpace;
181   }
182 
183   /// \returns Return true iff memory ordering of operations on
184   /// different address spaces is required.
185   bool getIsCrossAddressSpaceOrdering() const {
186     return IsCrossAddressSpaceOrdering;
187   }
188 
189   /// \returns True if memory access of the machine instruction used to
190   /// create this SIMemOpInfo is non-temporal, false otherwise.
191   bool isNonTemporal() const {
192     return IsNonTemporal;
193   }
194 
195   /// \returns True if ordering constraint of the machine instruction used to
196   /// create this SIMemOpInfo is unordered or higher, false otherwise.
197   bool isAtomic() const {
198     return Ordering != AtomicOrdering::NotAtomic;
199   }
200 
201 };
202 
203 class SIMemOpAccess final {
204 private:
205 
206   AMDGPUAS SIAddrSpaceInfo;
207   AMDGPUMachineModuleInfo *MMI = nullptr;
208 
209   /// Reports unsupported message \p Msg for \p MI to LLVM context.
210   void reportUnsupported(const MachineBasicBlock::iterator &MI,
211                          const char *Msg) const;
212 
213   /// Inspects the target synchonization scope \p SSID and determines
214   /// the SI atomic scope it corresponds to, the address spaces it
215   /// covers, and whether the memory ordering applies between address
216   /// spaces.
217   Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
218   toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrScope) const;
219 
220   /// \return Return a bit set of the address spaces accessed by \p AS.
221   SIAtomicAddrSpace toSIAtomicAddrSpace(unsigned AS) const;
222 
223   /// \returns Info constructed from \p MI, which has at least machine memory
224   /// operand.
225   Optional<SIMemOpInfo> constructFromMIWithMMO(
226       const MachineBasicBlock::iterator &MI) const;
227 
228 public:
229   /// Construct class to support accessing the machine memory operands
230   /// of instructions in the machine function \p MF.
231   SIMemOpAccess(MachineFunction &MF);
232 
233   /// \returns Load info if \p MI is a load operation, "None" otherwise.
234   Optional<SIMemOpInfo> getLoadInfo(
235       const MachineBasicBlock::iterator &MI) const;
236 
237   /// \returns Store info if \p MI is a store operation, "None" otherwise.
238   Optional<SIMemOpInfo> getStoreInfo(
239       const MachineBasicBlock::iterator &MI) const;
240 
241   /// \returns Atomic fence info if \p MI is an atomic fence operation,
242   /// "None" otherwise.
243   Optional<SIMemOpInfo> getAtomicFenceInfo(
244       const MachineBasicBlock::iterator &MI) const;
245 
246   /// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or
247   /// rmw operation, "None" otherwise.
248   Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo(
249       const MachineBasicBlock::iterator &MI) const;
250 };
251 
252 class SICacheControl {
253 protected:
254 
255   /// Instruction info.
256   const SIInstrInfo *TII = nullptr;
257 
258   IsaInfo::IsaVersion IV;
259 
260   SICacheControl(const SISubtarget &ST);
261 
262 public:
263 
264   /// Create a cache control for the subtarget \p ST.
265   static std::unique_ptr<SICacheControl> create(const SISubtarget &ST);
266 
267   /// Update \p MI memory load instruction to bypass any caches up to
268   /// the \p Scope memory scope for address spaces \p
269   /// AddrSpace. Return true iff the instruction was modified.
270   virtual bool enableLoadCacheBypass(const MachineBasicBlock::iterator &MI,
271                                      SIAtomicScope Scope,
272                                      SIAtomicAddrSpace AddrSpace) const = 0;
273 
274   /// Update \p MI memory instruction to indicate it is
275   /// nontemporal. Return true iff the instruction was modified.
276   virtual bool enableNonTemporal(const MachineBasicBlock::iterator &MI)
277     const = 0;
278 
279   /// Inserts any necessary instructions at position \p Pos relative
280   /// to instruction \p MI to ensure any caches associated with
281   /// address spaces \p AddrSpace for memory scopes up to memory scope
282   /// \p Scope are invalidated. Returns true iff any instructions
283   /// inserted.
284   virtual bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
285                                      SIAtomicScope Scope,
286                                      SIAtomicAddrSpace AddrSpace,
287                                      Position Pos) const = 0;
288 
289   /// Inserts any necessary instructions at position \p Pos relative
290   /// to instruction \p MI to ensure memory instructions of kind \p Op
291   /// associated with address spaces \p AddrSpace have completed as
292   /// observed by other memory instructions executing in memory scope
293   /// \p Scope. \p IsCrossAddrSpaceOrdering indicates if the memory
294   /// ordering is between address spaces. Returns true iff any
295   /// instructions inserted.
296   virtual bool insertWait(MachineBasicBlock::iterator &MI,
297                           SIAtomicScope Scope,
298                           SIAtomicAddrSpace AddrSpace,
299                           SIMemOp Op,
300                           bool IsCrossAddrSpaceOrdering,
301                           Position Pos) const = 0;
302 };
303 
304 class SIGfx6CacheControl : public SICacheControl {
305 protected:
306 
307   /// Sets GLC bit to "true" if present in \p MI. Returns true if \p MI
308   /// is modified, false otherwise.
309   bool enableGLCBit(const MachineBasicBlock::iterator &MI) const {
310     return enableNamedBit<AMDGPU::OpName::glc>(MI);
311   }
312 
313   /// Sets SLC bit to "true" if present in \p MI. Returns true if \p MI
314   /// is modified, false otherwise.
315   bool enableSLCBit(const MachineBasicBlock::iterator &MI) const {
316     return enableNamedBit<AMDGPU::OpName::slc>(MI);
317   }
318 
319 public:
320 
321   SIGfx6CacheControl(const SISubtarget &ST) : SICacheControl(ST) {};
322 
323   bool enableLoadCacheBypass(const MachineBasicBlock::iterator &MI,
324                              SIAtomicScope Scope,
325                              SIAtomicAddrSpace AddrSpace) const override;
326 
327   bool enableNonTemporal(const MachineBasicBlock::iterator &MI) const override;
328 
329   bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
330                              SIAtomicScope Scope,
331                              SIAtomicAddrSpace AddrSpace,
332                              Position Pos) const override;
333 
334   bool insertWait(MachineBasicBlock::iterator &MI,
335                   SIAtomicScope Scope,
336                   SIAtomicAddrSpace AddrSpace,
337                   SIMemOp Op,
338                   bool IsCrossAddrSpaceOrdering,
339                   Position Pos) const override;
340 };
341 
342 class SIGfx7CacheControl : public SIGfx6CacheControl {
343 public:
344 
345   SIGfx7CacheControl(const SISubtarget &ST) : SIGfx6CacheControl(ST) {};
346 
347   bool insertCacheInvalidate(MachineBasicBlock::iterator &MI,
348                              SIAtomicScope Scope,
349                              SIAtomicAddrSpace AddrSpace,
350                              Position Pos) const override;
351 
352 };
353 
354 class SIMemoryLegalizer final : public MachineFunctionPass {
355 private:
356 
357   /// Cache Control.
358   std::unique_ptr<SICacheControl> CC = nullptr;
359 
360   /// List of atomic pseudo instructions.
361   std::list<MachineBasicBlock::iterator> AtomicPseudoMIs;
362 
363   /// Return true iff instruction \p MI is a atomic instruction that
364   /// returns a result.
365   bool isAtomicRet(const MachineInstr &MI) const {
366     return AMDGPU::getAtomicNoRetOp(MI.getOpcode()) != -1;
367   }
368 
369   /// Removes all processed atomic pseudo instructions from the current
370   /// function. Returns true if current function is modified, false otherwise.
371   bool removeAtomicPseudoMIs();
372 
373   /// Expands load operation \p MI. Returns true if instructions are
374   /// added/deleted or \p MI is modified, false otherwise.
375   bool expandLoad(const SIMemOpInfo &MOI,
376                   MachineBasicBlock::iterator &MI);
377   /// Expands store operation \p MI. Returns true if instructions are
378   /// added/deleted or \p MI is modified, false otherwise.
379   bool expandStore(const SIMemOpInfo &MOI,
380                    MachineBasicBlock::iterator &MI);
381   /// Expands atomic fence operation \p MI. Returns true if
382   /// instructions are added/deleted or \p MI is modified, false otherwise.
383   bool expandAtomicFence(const SIMemOpInfo &MOI,
384                          MachineBasicBlock::iterator &MI);
385   /// Expands atomic cmpxchg or rmw operation \p MI. Returns true if
386   /// instructions are added/deleted or \p MI is modified, false otherwise.
387   bool expandAtomicCmpxchgOrRmw(const SIMemOpInfo &MOI,
388                                 MachineBasicBlock::iterator &MI);
389 
390 public:
391   static char ID;
392 
393   SIMemoryLegalizer() : MachineFunctionPass(ID) {}
394 
395   void getAnalysisUsage(AnalysisUsage &AU) const override {
396     AU.setPreservesCFG();
397     MachineFunctionPass::getAnalysisUsage(AU);
398   }
399 
400   StringRef getPassName() const override {
401     return PASS_NAME;
402   }
403 
404   bool runOnMachineFunction(MachineFunction &MF) override;
405 };
406 
407 } // end namespace anonymous
408 
409 void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI,
410                                       const char *Msg) const {
411   const Function &Func = MI->getParent()->getParent()->getFunction();
412   DiagnosticInfoUnsupported Diag(Func, Msg, MI->getDebugLoc());
413   Func.getContext().diagnose(Diag);
414 }
415 
416 Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
417 SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
418                                SIAtomicAddrSpace InstrScope) const {
419   /// TODO: For now assume OpenCL memory model which treats each
420   /// address space as having a separate happens-before relation, and
421   /// so an instruction only has ordering with respect to the address
422   /// space it accesses, and if it accesses multiple address spaces it
423   /// does not require ordering of operations in different address
424   /// spaces.
425  if (SSID == SyncScope::System)
426     return std::make_tuple(SIAtomicScope::SYSTEM,
427                            SIAtomicAddrSpace::ATOMIC & InstrScope,
428                            false);
429   if (SSID == MMI->getAgentSSID())
430     return std::make_tuple(SIAtomicScope::AGENT,
431                            SIAtomicAddrSpace::ATOMIC & InstrScope,
432                            false);
433   if (SSID == MMI->getWorkgroupSSID())
434     return std::make_tuple(SIAtomicScope::WORKGROUP,
435                            SIAtomicAddrSpace::ATOMIC & InstrScope,
436                            false);
437   if (SSID == MMI->getWavefrontSSID())
438     return std::make_tuple(SIAtomicScope::WAVEFRONT,
439                            SIAtomicAddrSpace::ATOMIC & InstrScope,
440                            false);
441   if (SSID == SyncScope::SingleThread)
442     return std::make_tuple(SIAtomicScope::SINGLETHREAD,
443                            SIAtomicAddrSpace::ATOMIC & InstrScope,
444                            false);
445   /// TODO: To support HSA Memory Model need to add additional memory
446   /// scopes that specify that do require cross address space
447   /// ordering.
448   return None;
449 }
450 
451 SIAtomicAddrSpace SIMemOpAccess::toSIAtomicAddrSpace(unsigned AS) const {
452   if (AS == SIAddrSpaceInfo.FLAT_ADDRESS)
453     return SIAtomicAddrSpace::FLAT;
454   if (AS == SIAddrSpaceInfo.GLOBAL_ADDRESS)
455     return SIAtomicAddrSpace::GLOBAL;
456   if (AS == SIAddrSpaceInfo.LOCAL_ADDRESS)
457     return SIAtomicAddrSpace::LDS;
458   if (AS == SIAddrSpaceInfo.PRIVATE_ADDRESS)
459     return SIAtomicAddrSpace::SCRATCH;
460   if (AS == SIAddrSpaceInfo.REGION_ADDRESS)
461     return SIAtomicAddrSpace::GDS;
462 
463   return SIAtomicAddrSpace::OTHER;
464 }
465 
466 SIMemOpAccess::SIMemOpAccess(MachineFunction &MF) {
467   SIAddrSpaceInfo = getAMDGPUAS(MF.getTarget());
468   MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
469 }
470 
471 Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
472     const MachineBasicBlock::iterator &MI) const {
473   assert(MI->getNumMemOperands() > 0);
474 
475   SyncScope::ID SSID = SyncScope::SingleThread;
476   AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
477   AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
478   SIAtomicAddrSpace InstrAddrSpace = SIAtomicAddrSpace::NONE;
479   bool IsNonTemporal = true;
480 
481   // Validator should check whether or not MMOs cover the entire set of
482   // locations accessed by the memory instruction.
483   for (const auto &MMO : MI->memoperands()) {
484     IsNonTemporal &= MMO->isNonTemporal();
485     InstrAddrSpace |=
486       toSIAtomicAddrSpace(MMO->getPointerInfo().getAddrSpace());
487     AtomicOrdering OpOrdering = MMO->getOrdering();
488     if (OpOrdering != AtomicOrdering::NotAtomic) {
489       const auto &IsSyncScopeInclusion =
490           MMI->isSyncScopeInclusion(SSID, MMO->getSyncScopeID());
491       if (!IsSyncScopeInclusion) {
492         reportUnsupported(MI,
493           "Unsupported non-inclusive atomic synchronization scope");
494         return None;
495       }
496 
497       SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID();
498       Ordering =
499           isStrongerThan(Ordering, OpOrdering) ?
500               Ordering : MMO->getOrdering();
501       assert(MMO->getFailureOrdering() != AtomicOrdering::Release &&
502              MMO->getFailureOrdering() != AtomicOrdering::AcquireRelease);
503       FailureOrdering =
504           isStrongerThan(FailureOrdering, MMO->getFailureOrdering()) ?
505               FailureOrdering : MMO->getFailureOrdering();
506     }
507   }
508 
509   SIAtomicScope Scope = SIAtomicScope::NONE;
510   SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
511   bool IsCrossAddressSpaceOrdering = false;
512   if (Ordering != AtomicOrdering::NotAtomic) {
513     auto ScopeOrNone = toSIAtomicScope(SSID, InstrAddrSpace);
514     if (!ScopeOrNone) {
515       reportUnsupported(MI, "Unsupported atomic synchronization scope");
516       return None;
517     }
518     std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
519       ScopeOrNone.getValue();
520     if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
521         ((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) {
522       reportUnsupported(MI, "Unsupported atomic address space");
523       return None;
524     }
525   }
526   return SIMemOpInfo(Ordering, Scope, OrderingAddrSpace, InstrAddrSpace,
527                      IsCrossAddressSpaceOrdering, FailureOrdering, IsNonTemporal);
528 }
529 
530 Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
531     const MachineBasicBlock::iterator &MI) const {
532   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
533 
534   if (!(MI->mayLoad() && !MI->mayStore()))
535     return None;
536 
537   // Be conservative if there are no memory operands.
538   if (MI->getNumMemOperands() == 0)
539     return SIMemOpInfo();
540 
541   return constructFromMIWithMMO(MI);
542 }
543 
544 Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
545     const MachineBasicBlock::iterator &MI) const {
546   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
547 
548   if (!(!MI->mayLoad() && MI->mayStore()))
549     return None;
550 
551   // Be conservative if there are no memory operands.
552   if (MI->getNumMemOperands() == 0)
553     return SIMemOpInfo();
554 
555   return constructFromMIWithMMO(MI);
556 }
557 
558 Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
559     const MachineBasicBlock::iterator &MI) const {
560   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
561 
562   if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
563     return None;
564 
565   AtomicOrdering Ordering =
566     static_cast<AtomicOrdering>(MI->getOperand(0).getImm());
567 
568   SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
569   auto ScopeOrNone = toSIAtomicScope(SSID, SIAtomicAddrSpace::ATOMIC);
570   if (!ScopeOrNone) {
571     reportUnsupported(MI, "Unsupported atomic synchronization scope");
572     return None;
573   }
574 
575   SIAtomicScope Scope = SIAtomicScope::NONE;
576   SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
577   bool IsCrossAddressSpaceOrdering = false;
578   std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
579     ScopeOrNone.getValue();
580 
581   if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
582       ((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) {
583     reportUnsupported(MI, "Unsupported atomic address space");
584     return None;
585   }
586 
587   return SIMemOpInfo(Ordering, Scope, OrderingAddrSpace, SIAtomicAddrSpace::ATOMIC,
588                      IsCrossAddressSpaceOrdering);
589 }
590 
591 Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
592     const MachineBasicBlock::iterator &MI) const {
593   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
594 
595   if (!(MI->mayLoad() && MI->mayStore()))
596     return None;
597 
598   // Be conservative if there are no memory operands.
599   if (MI->getNumMemOperands() == 0)
600     return SIMemOpInfo();
601 
602   return constructFromMIWithMMO(MI);
603 }
604 
605 SICacheControl::SICacheControl(const SISubtarget &ST) {
606   TII = ST.getInstrInfo();
607   IV = IsaInfo::getIsaVersion(ST.getFeatureBits());
608 }
609 
610 /* static */
611 std::unique_ptr<SICacheControl> SICacheControl::create(const SISubtarget &ST) {
612   AMDGPUSubtarget::Generation Generation = ST.getGeneration();
613   if (Generation <= AMDGPUSubtarget::SOUTHERN_ISLANDS)
614     return make_unique<SIGfx6CacheControl>(ST);
615   return make_unique<SIGfx7CacheControl>(ST);
616 }
617 
618 bool SIGfx6CacheControl::enableLoadCacheBypass(
619     const MachineBasicBlock::iterator &MI,
620     SIAtomicScope Scope,
621     SIAtomicAddrSpace AddrSpace) const {
622   assert(MI->mayLoad() && !MI->mayStore());
623   bool Changed = false;
624 
625   if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
626     /// TODO: Do not set glc for rmw atomic operations as they
627     /// implicitly bypass the L1 cache.
628 
629     switch (Scope) {
630     case SIAtomicScope::SYSTEM:
631     case SIAtomicScope::AGENT:
632       Changed |= enableGLCBit(MI);
633       break;
634     case SIAtomicScope::WORKGROUP:
635     case SIAtomicScope::WAVEFRONT:
636     case SIAtomicScope::SINGLETHREAD:
637       // No cache to bypass.
638       break;
639     default:
640       llvm_unreachable("Unsupported synchronization scope");
641     }
642   }
643 
644   /// The scratch address space does not need the global memory caches
645   /// to be bypassed as all memory operations by the same thread are
646   /// sequentially consistent, and no other thread can access scratch
647   /// memory.
648 
649   /// Other address spaces do not hava a cache.
650 
651   return Changed;
652 }
653 
654 bool SIGfx6CacheControl::enableNonTemporal(
655     const MachineBasicBlock::iterator &MI) const {
656   assert(MI->mayLoad() ^ MI->mayStore());
657   bool Changed = false;
658 
659   /// TODO: Do not enableGLCBit if rmw atomic.
660   Changed |= enableGLCBit(MI);
661   Changed |= enableSLCBit(MI);
662 
663   return Changed;
664 }
665 
666 bool SIGfx6CacheControl::insertCacheInvalidate(MachineBasicBlock::iterator &MI,
667                                                SIAtomicScope Scope,
668                                                SIAtomicAddrSpace AddrSpace,
669                                                Position Pos) const {
670   bool Changed = false;
671 
672   MachineBasicBlock &MBB = *MI->getParent();
673   DebugLoc DL = MI->getDebugLoc();
674 
675   if (Pos == Position::AFTER)
676     ++MI;
677 
678   if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
679     switch (Scope) {
680     case SIAtomicScope::SYSTEM:
681     case SIAtomicScope::AGENT:
682       BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_WBINVL1));
683       Changed = true;
684       break;
685     case SIAtomicScope::WORKGROUP:
686     case SIAtomicScope::WAVEFRONT:
687     case SIAtomicScope::SINGLETHREAD:
688       // No cache to invalidate.
689       break;
690     default:
691       llvm_unreachable("Unsupported synchronization scope");
692     }
693   }
694 
695   /// The scratch address space does not need the global memory cache
696   /// to be flushed as all memory operations by the same thread are
697   /// sequentially consistent, and no other thread can access scratch
698   /// memory.
699 
700   /// Other address spaces do not hava a cache.
701 
702   if (Pos == Position::AFTER)
703     --MI;
704 
705   return Changed;
706 }
707 
708 bool SIGfx6CacheControl::insertWait(MachineBasicBlock::iterator &MI,
709                                     SIAtomicScope Scope,
710                                     SIAtomicAddrSpace AddrSpace,
711                                     SIMemOp Op,
712                                     bool IsCrossAddrSpaceOrdering,
713                                     Position Pos) const {
714   bool Changed = false;
715 
716   MachineBasicBlock &MBB = *MI->getParent();
717   DebugLoc DL = MI->getDebugLoc();
718 
719   if (Pos == Position::AFTER)
720     ++MI;
721 
722   bool VMCnt = false;
723   bool LGKMCnt = false;
724   bool EXPCnt = false;
725 
726   if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
727     switch (Scope) {
728     case SIAtomicScope::SYSTEM:
729     case SIAtomicScope::AGENT:
730       VMCnt = true;
731       break;
732     case SIAtomicScope::WORKGROUP:
733     case SIAtomicScope::WAVEFRONT:
734     case SIAtomicScope::SINGLETHREAD:
735       // The L1 cache keeps all memory operations in order for
736       // wavesfronts in the same work-group.
737       break;
738     default:
739       llvm_unreachable("Unsupported synchronization scope");
740     }
741   }
742 
743   if ((AddrSpace & SIAtomicAddrSpace::LDS) != SIAtomicAddrSpace::NONE) {
744     switch (Scope) {
745     case SIAtomicScope::SYSTEM:
746     case SIAtomicScope::AGENT:
747     case SIAtomicScope::WORKGROUP:
748       // If no cross address space ordering then an LDS waitcnt is not
749       // needed as LDS operations for all waves are executed in a
750       // total global ordering as observed by all waves. Required if
751       // also synchronizing with global/GDS memory as LDS operations
752       // could be reordered with respect to later global/GDS memory
753       // operations of the same wave.
754       LGKMCnt = IsCrossAddrSpaceOrdering;
755       break;
756     case SIAtomicScope::WAVEFRONT:
757     case SIAtomicScope::SINGLETHREAD:
758       // The LDS keeps all memory operations in order for
759       // the same wavesfront.
760       break;
761     default:
762       llvm_unreachable("Unsupported synchronization scope");
763     }
764   }
765 
766   if ((AddrSpace & SIAtomicAddrSpace::GDS) != SIAtomicAddrSpace::NONE) {
767     switch (Scope) {
768     case SIAtomicScope::SYSTEM:
769     case SIAtomicScope::AGENT:
770       // If no cross address space ordering then an GDS waitcnt is not
771       // needed as GDS operations for all waves are executed in a
772       // total global ordering as observed by all waves. Required if
773       // also synchronizing with global/LDS memory as GDS operations
774       // could be reordered with respect to later global/LDS memory
775       // operations of the same wave.
776       EXPCnt = IsCrossAddrSpaceOrdering;
777       break;
778     case SIAtomicScope::WORKGROUP:
779     case SIAtomicScope::WAVEFRONT:
780     case SIAtomicScope::SINGLETHREAD:
781       // The GDS keeps all memory operations in order for
782       // the same work-group.
783       break;
784     default:
785       llvm_unreachable("Unsupported synchronization scope");
786     }
787   }
788 
789   if (VMCnt || LGKMCnt || EXPCnt) {
790     unsigned WaitCntImmediate =
791       AMDGPU::encodeWaitcnt(IV,
792                             VMCnt ? 0 : getVmcntBitMask(IV),
793                             EXPCnt ? 0 : getExpcntBitMask(IV),
794                             LGKMCnt ? 0 : getLgkmcntBitMask(IV));
795     BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(WaitCntImmediate);
796     Changed = true;
797   }
798 
799   if (Pos == Position::AFTER)
800     --MI;
801 
802   return Changed;
803 }
804 
805 bool SIGfx7CacheControl::insertCacheInvalidate(MachineBasicBlock::iterator &MI,
806                                                SIAtomicScope Scope,
807                                                SIAtomicAddrSpace AddrSpace,
808                                                Position Pos) const {
809   bool Changed = false;
810 
811   MachineBasicBlock &MBB = *MI->getParent();
812   DebugLoc DL = MI->getDebugLoc();
813 
814   if (Pos == Position::AFTER)
815     ++MI;
816 
817   if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
818     switch (Scope) {
819     case SIAtomicScope::SYSTEM:
820     case SIAtomicScope::AGENT:
821       BuildMI(MBB, MI, DL, TII->get(AMDGPU::BUFFER_WBINVL1_VOL));
822       Changed = true;
823       break;
824     case SIAtomicScope::WORKGROUP:
825     case SIAtomicScope::WAVEFRONT:
826     case SIAtomicScope::SINGLETHREAD:
827       // No cache to invalidate.
828       break;
829     default:
830       llvm_unreachable("Unsupported synchronization scope");
831     }
832   }
833 
834   /// The scratch address space does not need the global memory cache
835   /// to be flushed as all memory operations by the same thread are
836   /// sequentially consistent, and no other thread can access scratch
837   /// memory.
838 
839   /// Other address spaces do not hava a cache.
840 
841   if (Pos == Position::AFTER)
842     --MI;
843 
844   return Changed;
845 }
846 
847 bool SIMemoryLegalizer::removeAtomicPseudoMIs() {
848   if (AtomicPseudoMIs.empty())
849     return false;
850 
851   for (auto &MI : AtomicPseudoMIs)
852     MI->eraseFromParent();
853 
854   AtomicPseudoMIs.clear();
855   return true;
856 }
857 
858 bool SIMemoryLegalizer::expandLoad(const SIMemOpInfo &MOI,
859                                    MachineBasicBlock::iterator &MI) {
860   assert(MI->mayLoad() && !MI->mayStore());
861 
862   bool Changed = false;
863 
864   if (MOI.isAtomic()) {
865     if (MOI.getOrdering() == AtomicOrdering::Monotonic ||
866         MOI.getOrdering() == AtomicOrdering::Acquire ||
867         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) {
868       Changed |= CC->enableLoadCacheBypass(MI, MOI.getScope(),
869                                            MOI.getOrderingAddrSpace());
870     }
871 
872     if (MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
873       Changed |= CC->insertWait(MI, MOI.getScope(),
874                                 MOI.getOrderingAddrSpace(),
875                                 SIMemOp::LOAD | SIMemOp::STORE,
876                                 MOI.getIsCrossAddressSpaceOrdering(),
877                                 Position::BEFORE);
878 
879     if (MOI.getOrdering() == AtomicOrdering::Acquire ||
880         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) {
881       Changed |= CC->insertWait(MI, MOI.getScope(),
882                                 MOI.getInstrAddrSpace(),
883                                 SIMemOp::LOAD,
884                                 MOI.getIsCrossAddressSpaceOrdering(),
885                                 Position::AFTER);
886       Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
887                                            MOI.getOrderingAddrSpace(),
888                                            Position::AFTER);
889     }
890 
891     return Changed;
892   }
893 
894   // Atomic instructions do not have the nontemporal attribute.
895   if (MOI.isNonTemporal()) {
896     Changed |= CC->enableNonTemporal(MI);
897     return Changed;
898   }
899 
900   return Changed;
901 }
902 
903 bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI,
904                                     MachineBasicBlock::iterator &MI) {
905   assert(!MI->mayLoad() && MI->mayStore());
906 
907   bool Changed = false;
908 
909   if (MOI.isAtomic()) {
910     if (MOI.getOrdering() == AtomicOrdering::Release ||
911         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
912       Changed |= CC->insertWait(MI, MOI.getScope(),
913                                 MOI.getOrderingAddrSpace(),
914                                 SIMemOp::LOAD | SIMemOp::STORE,
915                                 MOI.getIsCrossAddressSpaceOrdering(),
916                                 Position::BEFORE);
917 
918     return Changed;
919   }
920 
921   // Atomic instructions do not have the nontemporal attribute.
922   if (MOI.isNonTemporal()) {
923     Changed |= CC->enableNonTemporal(MI);
924     return Changed;
925   }
926 
927   return Changed;
928 }
929 
930 bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
931                                           MachineBasicBlock::iterator &MI) {
932   assert(MI->getOpcode() == AMDGPU::ATOMIC_FENCE);
933 
934   AtomicPseudoMIs.push_back(MI);
935   bool Changed = false;
936 
937   if (MOI.isAtomic()) {
938     if (MOI.getOrdering() == AtomicOrdering::Acquire ||
939         MOI.getOrdering() == AtomicOrdering::Release ||
940         MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
941         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
942       /// TODO: This relies on a barrier always generating a waitcnt
943       /// for LDS to ensure it is not reordered with the completion of
944       /// the proceeding LDS operations. If barrier had a memory
945       /// ordering and memory scope, then library does not need to
946       /// generate a fence. Could add support in this file for
947       /// barrier. SIInsertWaitcnt.cpp could then stop unconditionally
948       /// adding waitcnt before a S_BARRIER.
949       Changed |= CC->insertWait(MI, MOI.getScope(),
950                                 MOI.getOrderingAddrSpace(),
951                                 SIMemOp::LOAD | SIMemOp::STORE,
952                                 MOI.getIsCrossAddressSpaceOrdering(),
953                                 Position::BEFORE);
954 
955     if (MOI.getOrdering() == AtomicOrdering::Acquire ||
956         MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
957         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
958       Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
959                                            MOI.getOrderingAddrSpace(),
960                                            Position::BEFORE);
961 
962     return Changed;
963   }
964 
965   return Changed;
966 }
967 
968 bool SIMemoryLegalizer::expandAtomicCmpxchgOrRmw(const SIMemOpInfo &MOI,
969   MachineBasicBlock::iterator &MI) {
970   assert(MI->mayLoad() && MI->mayStore());
971 
972   bool Changed = false;
973 
974   if (MOI.isAtomic()) {
975     if (MOI.getOrdering() == AtomicOrdering::Release ||
976         MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
977         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent ||
978         MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
979       Changed |= CC->insertWait(MI, MOI.getScope(),
980                                 MOI.getOrderingAddrSpace(),
981                                 SIMemOp::LOAD | SIMemOp::STORE,
982                                 MOI.getIsCrossAddressSpaceOrdering(),
983                                 Position::BEFORE);
984 
985     if (MOI.getOrdering() == AtomicOrdering::Acquire ||
986         MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
987         MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent ||
988         MOI.getFailureOrdering() == AtomicOrdering::Acquire ||
989         MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) {
990       Changed |= CC->insertWait(MI, MOI.getScope(),
991                                 MOI.getOrderingAddrSpace(),
992                                 isAtomicRet(*MI) ? SIMemOp::LOAD :
993                                                    SIMemOp::STORE,
994                                 MOI.getIsCrossAddressSpaceOrdering(),
995                                 Position::AFTER);
996       Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
997                                            MOI.getOrderingAddrSpace(),
998                                            Position::AFTER);
999     }
1000 
1001     return Changed;
1002   }
1003 
1004   return Changed;
1005 }
1006 
1007 bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) {
1008   bool Changed = false;
1009 
1010   SIMemOpAccess MOA(MF);
1011   CC = SICacheControl::create(MF.getSubtarget<SISubtarget>());
1012 
1013   for (auto &MBB : MF) {
1014     for (auto MI = MBB.begin(); MI != MBB.end(); ++MI) {
1015       if (!(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic))
1016         continue;
1017 
1018       if (const auto &MOI = MOA.getLoadInfo(MI))
1019         Changed |= expandLoad(MOI.getValue(), MI);
1020       else if (const auto &MOI = MOA.getStoreInfo(MI))
1021         Changed |= expandStore(MOI.getValue(), MI);
1022       else if (const auto &MOI = MOA.getAtomicFenceInfo(MI))
1023         Changed |= expandAtomicFence(MOI.getValue(), MI);
1024       else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI))
1025         Changed |= expandAtomicCmpxchgOrRmw(MOI.getValue(), MI);
1026     }
1027   }
1028 
1029   Changed |= removeAtomicPseudoMIs();
1030   return Changed;
1031 }
1032 
1033 INITIALIZE_PASS(SIMemoryLegalizer, DEBUG_TYPE, PASS_NAME, false, false)
1034 
1035 char SIMemoryLegalizer::ID = 0;
1036 char &llvm::SIMemoryLegalizerID = SIMemoryLegalizer::ID;
1037 
1038 FunctionPass *llvm::createSIMemoryLegalizerPass() {
1039   return new SIMemoryLegalizer();
1040 }
1041