Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the declaration of the MachineMemOperand class, which is a
  10. // description of a memory reference. It is used to help track dependencies
  11. // in the backend.
  12. //
  13. //===----------------------------------------------------------------------===//
  14.  
  15. #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
  16. #define LLVM_CODEGEN_MACHINEMEMOPERAND_H
  17.  
  18. #include "llvm/ADT/BitmaskEnum.h"
  19. #include "llvm/ADT/PointerUnion.h"
  20. #include "llvm/CodeGen/PseudoSourceValue.h"
  21. #include "llvm/IR/DerivedTypes.h"
  22. #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
  23. #include "llvm/Support/AtomicOrdering.h"
  24. #include "llvm/Support/DataTypes.h"
  25. #include "llvm/Support/LowLevelTypeImpl.h"
  26.  
  27. namespace llvm {
  28.  
  29. class FoldingSetNodeID;
  30. class MDNode;
  31. class raw_ostream;
  32. class MachineFunction;
  33. class ModuleSlotTracker;
  34. class TargetInstrInfo;
  35.  
  36. /// This class contains a discriminated union of information about pointers in
  37. /// memory operands, relating them back to LLVM IR or to virtual locations (such
  38. /// as frame indices) that are exposed during codegen.
  39. struct MachinePointerInfo {
  40.   /// This is the IR pointer value for the access, or it is null if unknown.
  41.   PointerUnion<const Value *, const PseudoSourceValue *> V;
  42.  
  43.   /// Offset - This is an offset from the base Value*.
  44.   int64_t Offset;
  45.  
  46.   unsigned AddrSpace = 0;
  47.  
  48.   uint8_t StackID;
  49.  
  50.   explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
  51.                               uint8_t ID = 0)
  52.       : V(v), Offset(offset), StackID(ID) {
  53.     AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
  54.   }
  55.  
  56.   explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
  57.                               uint8_t ID = 0)
  58.       : V(v), Offset(offset), StackID(ID) {
  59.     AddrSpace = v ? v->getAddressSpace() : 0;
  60.   }
  61.  
  62.   explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
  63.       : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
  64.         StackID(0) {}
  65.  
  66.   explicit MachinePointerInfo(
  67.     PointerUnion<const Value *, const PseudoSourceValue *> v,
  68.     int64_t offset = 0,
  69.     uint8_t ID = 0)
  70.     : V(v), Offset(offset), StackID(ID) {
  71.     if (V) {
  72.       if (const auto *ValPtr = V.dyn_cast<const Value*>())
  73.         AddrSpace = ValPtr->getType()->getPointerAddressSpace();
  74.       else
  75.         AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
  76.     }
  77.   }
  78.  
  79.   MachinePointerInfo getWithOffset(int64_t O) const {
  80.     if (V.isNull())
  81.       return MachinePointerInfo(AddrSpace, Offset + O);
  82.     if (V.is<const Value*>())
  83.       return MachinePointerInfo(V.get<const Value*>(), Offset + O, StackID);
  84.     return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset + O,
  85.                               StackID);
  86.   }
  87.  
  88.   /// Return true if memory region [V, V+Offset+Size) is known to be
  89.   /// dereferenceable.
  90.   bool isDereferenceable(unsigned Size, LLVMContext &C,
  91.                          const DataLayout &DL) const;
  92.  
  93.   /// Return the LLVM IR address space number that this pointer points into.
  94.   unsigned getAddrSpace() const;
  95.  
  96.   /// Return a MachinePointerInfo record that refers to the constant pool.
  97.   static MachinePointerInfo getConstantPool(MachineFunction &MF);
  98.  
  99.   /// Return a MachinePointerInfo record that refers to the specified
  100.   /// FrameIndex.
  101.   static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
  102.                                           int64_t Offset = 0);
  103.  
  104.   /// Return a MachinePointerInfo record that refers to a jump table entry.
  105.   static MachinePointerInfo getJumpTable(MachineFunction &MF);
  106.  
  107.   /// Return a MachinePointerInfo record that refers to a GOT entry.
  108.   static MachinePointerInfo getGOT(MachineFunction &MF);
  109.  
  110.   /// Stack pointer relative access.
  111.   static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
  112.                                      uint8_t ID = 0);
  113.  
  114.   /// Stack memory without other information.
  115.   static MachinePointerInfo getUnknownStack(MachineFunction &MF);
  116. };
  117.  
  118.  
  119. //===----------------------------------------------------------------------===//
  120. /// A description of a memory reference used in the backend.
  121. /// Instead of holding a StoreInst or LoadInst, this class holds the address
  122. /// Value of the reference along with a byte size and offset. This allows it
  123. /// to describe lowered loads and stores. Also, the special PseudoSourceValue
  124. /// objects can be used to represent loads and stores to memory locations
  125. /// that aren't explicit in the regular LLVM IR.
  126. ///
  127. class MachineMemOperand {
  128. public:
  129.   /// Flags values. These may be or'd together.
  130.   enum Flags : uint16_t {
  131.     // No flags set.
  132.     MONone = 0,
  133.     /// The memory access reads data.
  134.     MOLoad = 1u << 0,
  135.     /// The memory access writes data.
  136.     MOStore = 1u << 1,
  137.     /// The memory access is volatile.
  138.     MOVolatile = 1u << 2,
  139.     /// The memory access is non-temporal.
  140.     MONonTemporal = 1u << 3,
  141.     /// The memory access is dereferenceable (i.e., doesn't trap).
  142.     MODereferenceable = 1u << 4,
  143.     /// The memory access always returns the same value (or traps).
  144.     MOInvariant = 1u << 5,
  145.  
  146.     // Reserved for use by target-specific passes.
  147.     // Targets may override getSerializableMachineMemOperandTargetFlags() to
  148.     // enable MIR serialization/parsing of these flags.  If more of these flags
  149.     // are added, the MIR printing/parsing code will need to be updated as well.
  150.     MOTargetFlag1 = 1u << 6,
  151.     MOTargetFlag2 = 1u << 7,
  152.     MOTargetFlag3 = 1u << 8,
  153.  
  154.     LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
  155.   };
  156.  
  157. private:
  158.   /// Atomic information for this memory operation.
  159.   struct MachineAtomicInfo {
  160.     /// Synchronization scope ID for this memory operation.
  161.     unsigned SSID : 8;            // SyncScope::ID
  162.     /// Atomic ordering requirements for this memory operation. For cmpxchg
  163.     /// atomic operations, atomic ordering requirements when store occurs.
  164.     unsigned Ordering : 4;        // enum AtomicOrdering
  165.     /// For cmpxchg atomic operations, atomic ordering requirements when store
  166.     /// does not occur.
  167.     unsigned FailureOrdering : 4; // enum AtomicOrdering
  168.   };
  169.  
  170.   MachinePointerInfo PtrInfo;
  171.  
  172.   /// Track the memory type of the access. An access size which is unknown or
  173.   /// too large to be represented by LLT should use the invalid LLT.
  174.   LLT MemoryType;
  175.  
  176.   Flags FlagVals;
  177.   Align BaseAlign;
  178.   MachineAtomicInfo AtomicInfo;
  179.   AAMDNodes AAInfo;
  180.   const MDNode *Ranges;
  181.  
  182. public:
  183.   /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
  184.   /// size, and base alignment. For atomic operations the synchronization scope
  185.   /// and atomic ordering requirements must also be specified. For cmpxchg
  186.   /// atomic operations the atomic ordering requirements when store does not
  187.   /// occur must also be specified.
  188.   MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
  189.                     Align a, const AAMDNodes &AAInfo = AAMDNodes(),
  190.                     const MDNode *Ranges = nullptr,
  191.                     SyncScope::ID SSID = SyncScope::System,
  192.                     AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
  193.                     AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
  194.   MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
  195.                     const AAMDNodes &AAInfo = AAMDNodes(),
  196.                     const MDNode *Ranges = nullptr,
  197.                     SyncScope::ID SSID = SyncScope::System,
  198.                     AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
  199.                     AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
  200.  
  201.   const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
  202.  
  203.   /// Return the base address of the memory access. This may either be a normal
  204.   /// LLVM IR Value, or one of the special values used in CodeGen.
  205.   /// Special values are those obtained via
  206.   /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
  207.   /// other PseudoSourceValue member functions which return objects which stand
  208.   /// for frame/stack pointer relative references and other special references
  209.   /// which are not representable in the high-level IR.
  210.   const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
  211.  
  212.   const PseudoSourceValue *getPseudoValue() const {
  213.     return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
  214.   }
  215.  
  216.   const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
  217.  
  218.   /// Return the raw flags of the source value, \see Flags.
  219.   Flags getFlags() const { return FlagVals; }
  220.  
  221.   /// Bitwise OR the current flags with the given flags.
  222.   void setFlags(Flags f) { FlagVals |= f; }
  223.  
  224.   /// For normal values, this is a byte offset added to the base address.
  225.   /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
  226.   int64_t getOffset() const { return PtrInfo.Offset; }
  227.  
  228.   unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
  229.  
  230.   /// Return the memory type of the memory reference. This should only be relied
  231.   /// on for GlobalISel G_* operation legalization.
  232.   LLT getMemoryType() const { return MemoryType; }
  233.  
  234.   /// Return the size in bytes of the memory reference.
  235.   uint64_t getSize() const {
  236.     return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0);
  237.   }
  238.  
  239.   /// Return the size in bits of the memory reference.
  240.   uint64_t getSizeInBits() const {
  241.     return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0);
  242.   }
  243.  
  244.   LLT getType() const {
  245.     return MemoryType;
  246.   }
  247.  
  248.   /// Return the minimum known alignment in bytes of the actual memory
  249.   /// reference.
  250.   Align getAlign() const;
  251.  
  252.   /// Return the minimum known alignment in bytes of the base address, without
  253.   /// the offset.
  254.   Align getBaseAlign() const { return BaseAlign; }
  255.  
  256.   /// Return the AA tags for the memory reference.
  257.   AAMDNodes getAAInfo() const { return AAInfo; }
  258.  
  259.   /// Return the range tag for the memory reference.
  260.   const MDNode *getRanges() const { return Ranges; }
  261.  
  262.   /// Returns the synchronization scope ID for this memory operation.
  263.   SyncScope::ID getSyncScopeID() const {
  264.     return static_cast<SyncScope::ID>(AtomicInfo.SSID);
  265.   }
  266.  
  267.   /// Return the atomic ordering requirements for this memory operation. For
  268.   /// cmpxchg atomic operations, return the atomic ordering requirements when
  269.   /// store occurs.
  270.   AtomicOrdering getSuccessOrdering() const {
  271.     return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
  272.   }
  273.  
  274.   /// For cmpxchg atomic operations, return the atomic ordering requirements
  275.   /// when store does not occur.
  276.   AtomicOrdering getFailureOrdering() const {
  277.     return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
  278.   }
  279.  
  280.   /// Return a single atomic ordering that is at least as strong as both the
  281.   /// success and failure orderings for an atomic operation.  (For operations
  282.   /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
  283.   AtomicOrdering getMergedOrdering() const {
  284.     return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering());
  285.   }
  286.  
  287.   bool isLoad() const { return FlagVals & MOLoad; }
  288.   bool isStore() const { return FlagVals & MOStore; }
  289.   bool isVolatile() const { return FlagVals & MOVolatile; }
  290.   bool isNonTemporal() const { return FlagVals & MONonTemporal; }
  291.   bool isDereferenceable() const { return FlagVals & MODereferenceable; }
  292.   bool isInvariant() const { return FlagVals & MOInvariant; }
  293.  
  294.   /// Returns true if this operation has an atomic ordering requirement of
  295.   /// unordered or higher, false otherwise.
  296.   bool isAtomic() const {
  297.     return getSuccessOrdering() != AtomicOrdering::NotAtomic;
  298.   }
  299.  
  300.   /// Returns true if this memory operation doesn't have any ordering
  301.   /// constraints other than normal aliasing. Volatile and (ordered) atomic
  302.   /// memory operations can't be reordered.
  303.   bool isUnordered() const {
  304.     return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
  305.             getSuccessOrdering() == AtomicOrdering::Unordered) &&
  306.            !isVolatile();
  307.   }
  308.  
  309.   /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
  310.   /// greater alignment. This must only be used when the new alignment applies
  311.   /// to all users of this MachineMemOperand.
  312.   void refineAlignment(const MachineMemOperand *MMO);
  313.  
  314.   /// Change the SourceValue for this MachineMemOperand. This should only be
  315.   /// used when an object is being relocated and all references to it are being
  316.   /// updated.
  317.   void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
  318.   void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
  319.   void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
  320.  
  321.   /// Reset the tracked memory type.
  322.   void setType(LLT NewTy) {
  323.     MemoryType = NewTy;
  324.   }
  325.  
  326.   /// Profile - Gather unique data for the object.
  327.   ///
  328.   void Profile(FoldingSetNodeID &ID) const;
  329.  
  330.   /// Support for operator<<.
  331.   /// @{
  332.   void print(raw_ostream &OS, ModuleSlotTracker &MST,
  333.              SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
  334.              const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
  335.   /// @}
  336.  
  337.   friend bool operator==(const MachineMemOperand &LHS,
  338.                          const MachineMemOperand &RHS) {
  339.     return LHS.getValue() == RHS.getValue() &&
  340.            LHS.getPseudoValue() == RHS.getPseudoValue() &&
  341.            LHS.getSize() == RHS.getSize() &&
  342.            LHS.getOffset() == RHS.getOffset() &&
  343.            LHS.getFlags() == RHS.getFlags() &&
  344.            LHS.getAAInfo() == RHS.getAAInfo() &&
  345.            LHS.getRanges() == RHS.getRanges() &&
  346.            LHS.getAlign() == RHS.getAlign() &&
  347.            LHS.getAddrSpace() == RHS.getAddrSpace();
  348.   }
  349.  
  350.   friend bool operator!=(const MachineMemOperand &LHS,
  351.                          const MachineMemOperand &RHS) {
  352.     return !(LHS == RHS);
  353.   }
  354. };
  355.  
  356. } // End llvm namespace
  357.  
  358. #endif
  359.