xref: /llvm-project/llvm/include/llvm/CodeGen/MachineMemOperand.h (revision 9d6527bc12547e28b86d180b76fe934a96aa518e)
1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the declaration of the MachineMemOperand class, which is a
10 // description of a memory reference. It is used to help track dependencies
11 // in the backend.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H
17 
18 #include "llvm/ADT/BitmaskEnum.h"
19 #include "llvm/ADT/PointerUnion.h"
20 #include "llvm/Analysis/MemoryLocation.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGenTypes/LowLevelType.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
27 #include "llvm/Support/AtomicOrdering.h"
28 #include "llvm/Support/DataTypes.h"
29 
30 namespace llvm {
31 
32 class MDNode;
33 class raw_ostream;
34 class MachineFunction;
35 class ModuleSlotTracker;
36 class TargetInstrInfo;
37 
38 /// This class contains a discriminated union of information about pointers in
39 /// memory operands, relating them back to LLVM IR or to virtual locations (such
40 /// as frame indices) that are exposed during codegen.
41 struct MachinePointerInfo {
42   /// This is the IR pointer value for the access, or it is null if unknown.
43   PointerUnion<const Value *, const PseudoSourceValue *> V;
44 
45   /// Offset - This is an offset from the base Value*.
46   int64_t Offset;
47 
48   unsigned AddrSpace = 0;
49 
50   uint8_t StackID;
51 
52   explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
53                               uint8_t ID = 0)
54       : V(v), Offset(offset), StackID(ID) {
55     AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
56   }
57 
58   explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
59                               uint8_t ID = 0)
60       : V(v), Offset(offset), StackID(ID) {
61     AddrSpace = v ? v->getAddressSpace() : 0;
62   }
63 
64   explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
65       : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
66         StackID(0) {}
67 
68   explicit MachinePointerInfo(
69     PointerUnion<const Value *, const PseudoSourceValue *> v,
70     int64_t offset = 0,
71     uint8_t ID = 0)
72     : V(v), Offset(offset), StackID(ID) {
73     if (V) {
74       if (const auto *ValPtr = dyn_cast_if_present<const Value *>(V))
75         AddrSpace = ValPtr->getType()->getPointerAddressSpace();
76       else
77         AddrSpace = cast<const PseudoSourceValue *>(V)->getAddressSpace();
78     }
79   }
80 
81   MachinePointerInfo getWithOffset(int64_t O) const {
82     if (V.isNull())
83       return MachinePointerInfo(AddrSpace, Offset + O);
84     if (isa<const Value *>(V))
85       return MachinePointerInfo(cast<const Value *>(V), Offset + O, StackID);
86     return MachinePointerInfo(cast<const PseudoSourceValue *>(V), Offset + O,
87                               StackID);
88   }
89 
90   /// Return true if memory region [V, V+Offset+Size) is known to be
91   /// dereferenceable.
92   bool isDereferenceable(unsigned Size, LLVMContext &C,
93                          const DataLayout &DL) const;
94 
95   /// Return the LLVM IR address space number that this pointer points into.
96   unsigned getAddrSpace() const;
97 
98   /// Return a MachinePointerInfo record that refers to the constant pool.
99   static MachinePointerInfo getConstantPool(MachineFunction &MF);
100 
101   /// Return a MachinePointerInfo record that refers to the specified
102   /// FrameIndex.
103   static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
104                                           int64_t Offset = 0);
105 
106   /// Return a MachinePointerInfo record that refers to a jump table entry.
107   static MachinePointerInfo getJumpTable(MachineFunction &MF);
108 
109   /// Return a MachinePointerInfo record that refers to a GOT entry.
110   static MachinePointerInfo getGOT(MachineFunction &MF);
111 
112   /// Stack pointer relative access.
113   static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
114                                      uint8_t ID = 0);
115 
116   /// Stack memory without other information.
117   static MachinePointerInfo getUnknownStack(MachineFunction &MF);
118 };
119 
120 
121 //===----------------------------------------------------------------------===//
122 /// A description of a memory reference used in the backend.
123 /// Instead of holding a StoreInst or LoadInst, this class holds the address
124 /// Value of the reference along with a byte size and offset. This allows it
125 /// to describe lowered loads and stores. Also, the special PseudoSourceValue
126 /// objects can be used to represent loads and stores to memory locations
127 /// that aren't explicit in the regular LLVM IR.
128 ///
129 class MachineMemOperand {
130 public:
131   /// Flags values. These may be or'd together.
132   enum Flags : uint16_t {
133     // No flags set.
134     MONone = 0,
135     /// The memory access reads data.
136     MOLoad = 1u << 0,
137     /// The memory access writes data.
138     MOStore = 1u << 1,
139     /// The memory access is volatile.
140     MOVolatile = 1u << 2,
141     /// The memory access is non-temporal.
142     MONonTemporal = 1u << 3,
143     /// The memory access is dereferenceable (i.e., doesn't trap).
144     MODereferenceable = 1u << 4,
145     /// The memory access always returns the same value (or traps).
146     MOInvariant = 1u << 5,
147 
148     // Reserved for use by target-specific passes.
149     // Targets may override getSerializableMachineMemOperandTargetFlags() to
150     // enable MIR serialization/parsing of these flags.  If more of these flags
151     // are added, the MIR printing/parsing code will need to be updated as well.
152     MOTargetFlag1 = 1u << 6,
153     MOTargetFlag2 = 1u << 7,
154     MOTargetFlag3 = 1u << 8,
155     MOTargetFlag4 = 1u << 9,
156 
157     LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag4)
158   };
159 
160 private:
161   /// Atomic information for this memory operation.
162   struct MachineAtomicInfo {
163     /// Synchronization scope ID for this memory operation.
164     unsigned SSID : 8;            // SyncScope::ID
165     /// Atomic ordering requirements for this memory operation. For cmpxchg
166     /// atomic operations, atomic ordering requirements when store occurs.
167     unsigned Ordering : 4;        // enum AtomicOrdering
168     /// For cmpxchg atomic operations, atomic ordering requirements when store
169     /// does not occur.
170     unsigned FailureOrdering : 4; // enum AtomicOrdering
171   };
172 
173   MachinePointerInfo PtrInfo;
174 
175   /// Track the memory type of the access. An access size which is unknown or
176   /// too large to be represented by LLT should use the invalid LLT.
177   LLT MemoryType;
178 
179   Flags FlagVals;
180   Align BaseAlign;
181   MachineAtomicInfo AtomicInfo;
182   AAMDNodes AAInfo;
183   const MDNode *Ranges;
184 
185 public:
186   /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
187   /// size, and base alignment. For atomic operations the synchronization scope
188   /// and atomic ordering requirements must also be specified. For cmpxchg
189   /// atomic operations the atomic ordering requirements when store does not
190   /// occur must also be specified.
191   MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LocationSize TS,
192                     Align a, const AAMDNodes &AAInfo = AAMDNodes(),
193                     const MDNode *Ranges = nullptr,
194                     SyncScope::ID SSID = SyncScope::System,
195                     AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
196                     AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
197   MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
198                     const AAMDNodes &AAInfo = AAMDNodes(),
199                     const MDNode *Ranges = nullptr,
200                     SyncScope::ID SSID = SyncScope::System,
201                     AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
202                     AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
203 
204   const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
205 
206   /// Return the base address of the memory access. This may either be a normal
207   /// LLVM IR Value, or one of the special values used in CodeGen.
208   /// Special values are those obtained via
209   /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
210   /// other PseudoSourceValue member functions which return objects which stand
211   /// for frame/stack pointer relative references and other special references
212   /// which are not representable in the high-level IR.
213   const Value *getValue() const {
214     return dyn_cast_if_present<const Value *>(PtrInfo.V);
215   }
216 
217   const PseudoSourceValue *getPseudoValue() const {
218     return dyn_cast_if_present<const PseudoSourceValue *>(PtrInfo.V);
219   }
220 
221   const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
222 
223   /// Return the raw flags of the source value, \see Flags.
224   Flags getFlags() const { return FlagVals; }
225 
226   /// Bitwise OR the current flags with the given flags.
227   void setFlags(Flags f) { FlagVals |= f; }
228 
229   /// For normal values, this is a byte offset added to the base address.
230   /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
231   int64_t getOffset() const { return PtrInfo.Offset; }
232 
233   unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
234 
235   /// Return the memory type of the memory reference. This should only be relied
236   /// on for GlobalISel G_* operation legalization.
237   LLT getMemoryType() const { return MemoryType; }
238 
239   /// Return the size in bytes of the memory reference.
240   LocationSize getSize() const {
241     return MemoryType.isValid()
242                ? LocationSize::precise(MemoryType.getSizeInBytes())
243                : LocationSize::beforeOrAfterPointer();
244   }
245 
246   /// Return the size in bits of the memory reference.
247   LocationSize getSizeInBits() const {
248     return MemoryType.isValid()
249                ? LocationSize::precise(MemoryType.getSizeInBits())
250                : LocationSize::beforeOrAfterPointer();
251   }
252 
253   LLT getType() const {
254     return MemoryType;
255   }
256 
257   /// Return the minimum known alignment in bytes of the actual memory
258   /// reference.
259   Align getAlign() const;
260 
261   /// Return the minimum known alignment in bytes of the base address, without
262   /// the offset.
263   Align getBaseAlign() const { return BaseAlign; }
264 
265   /// Return the AA tags for the memory reference.
266   AAMDNodes getAAInfo() const { return AAInfo; }
267 
268   /// Return the range tag for the memory reference.
269   const MDNode *getRanges() const { return Ranges; }
270 
271   /// Returns the synchronization scope ID for this memory operation.
272   SyncScope::ID getSyncScopeID() const {
273     return static_cast<SyncScope::ID>(AtomicInfo.SSID);
274   }
275 
276   /// Return the atomic ordering requirements for this memory operation. For
277   /// cmpxchg atomic operations, return the atomic ordering requirements when
278   /// store occurs.
279   AtomicOrdering getSuccessOrdering() const {
280     return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
281   }
282 
283   /// For cmpxchg atomic operations, return the atomic ordering requirements
284   /// when store does not occur.
285   AtomicOrdering getFailureOrdering() const {
286     return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
287   }
288 
289   /// Return a single atomic ordering that is at least as strong as both the
290   /// success and failure orderings for an atomic operation.  (For operations
291   /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
292   AtomicOrdering getMergedOrdering() const {
293     return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering());
294   }
295 
296   bool isLoad() const { return FlagVals & MOLoad; }
297   bool isStore() const { return FlagVals & MOStore; }
298   bool isVolatile() const { return FlagVals & MOVolatile; }
299   bool isNonTemporal() const { return FlagVals & MONonTemporal; }
300   bool isDereferenceable() const { return FlagVals & MODereferenceable; }
301   bool isInvariant() const { return FlagVals & MOInvariant; }
302 
303   /// Returns true if this operation has an atomic ordering requirement of
304   /// unordered or higher, false otherwise.
305   bool isAtomic() const {
306     return getSuccessOrdering() != AtomicOrdering::NotAtomic;
307   }
308 
309   /// Returns true if this memory operation doesn't have any ordering
310   /// constraints other than normal aliasing. Volatile and (ordered) atomic
311   /// memory operations can't be reordered.
312   bool isUnordered() const {
313     return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
314             getSuccessOrdering() == AtomicOrdering::Unordered) &&
315            !isVolatile();
316   }
317 
318   /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
319   /// greater alignment. This must only be used when the new alignment applies
320   /// to all users of this MachineMemOperand.
321   void refineAlignment(const MachineMemOperand *MMO);
322 
323   /// Change the SourceValue for this MachineMemOperand. This should only be
324   /// used when an object is being relocated and all references to it are being
325   /// updated.
326   void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
327   void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
328   void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
329 
330   /// Reset the tracked memory type.
331   void setType(LLT NewTy) {
332     MemoryType = NewTy;
333   }
334 
335   /// Unset the tracked range metadata.
336   void clearRanges() { Ranges = nullptr; }
337 
338   /// Support for operator<<.
339   /// @{
340   void print(raw_ostream &OS, ModuleSlotTracker &MST,
341              SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
342              const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
343   /// @}
344 
345   friend bool operator==(const MachineMemOperand &LHS,
346                          const MachineMemOperand &RHS) {
347     return LHS.getValue() == RHS.getValue() &&
348            LHS.getPseudoValue() == RHS.getPseudoValue() &&
349            LHS.getSize() == RHS.getSize() &&
350            LHS.getOffset() == RHS.getOffset() &&
351            LHS.getFlags() == RHS.getFlags() &&
352            LHS.getAAInfo() == RHS.getAAInfo() &&
353            LHS.getRanges() == RHS.getRanges() &&
354            LHS.getAlign() == RHS.getAlign() &&
355            LHS.getAddrSpace() == RHS.getAddrSpace();
356   }
357 
358   friend bool operator!=(const MachineMemOperand &LHS,
359                          const MachineMemOperand &RHS) {
360     return !(LHS == RHS);
361   }
362 };
363 
364 } // End llvm namespace
365 
366 #endif
367