Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes an abstract interface used to get information about a
  10. // target machines register file.  This information is used for a variety of
  11. // purposed, especially register allocation.
  12. //
  13. //===----------------------------------------------------------------------===//
  14.  
  15. #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
  16. #define LLVM_CODEGEN_TARGETREGISTERINFO_H
  17.  
  18. #include "llvm/ADT/ArrayRef.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/ADT/StringRef.h"
  21. #include "llvm/ADT/iterator_range.h"
  22. #include "llvm/CodeGen/MachineBasicBlock.h"
  23. #include "llvm/IR/CallingConv.h"
  24. #include "llvm/MC/LaneBitmask.h"
  25. #include "llvm/MC/MCRegisterInfo.h"
  26. #include "llvm/Support/ErrorHandling.h"
  27. #include "llvm/Support/MachineValueType.h"
  28. #include "llvm/Support/MathExtras.h"
  29. #include "llvm/Support/Printable.h"
  30. #include <cassert>
  31. #include <cstdint>
  32.  
  33. namespace llvm {
  34.  
  35. class BitVector;
  36. class DIExpression;
  37. class LiveRegMatrix;
  38. class MachineFunction;
  39. class MachineInstr;
  40. class RegScavenger;
  41. class VirtRegMap;
  42. class LiveIntervals;
  43. class LiveInterval;
  44.  
  45. class TargetRegisterClass {
  46. public:
  47.   using iterator = const MCPhysReg *;
  48.   using const_iterator = const MCPhysReg *;
  49.   using sc_iterator = const TargetRegisterClass* const *;
  50.  
  51.   // Instance variables filled by tablegen, do not use!
  52.   const MCRegisterClass *MC;
  53.   const uint32_t *SubClassMask;
  54.   const uint16_t *SuperRegIndices;
  55.   const LaneBitmask LaneMask;
  56.   /// Classes with a higher priority value are assigned first by register
  57.   /// allocators using a greedy heuristic. The value is in the range [0,31].
  58.   const uint8_t AllocationPriority;
  59.  
  60.   // Change allocation priority heuristic used by greedy.
  61.   const bool GlobalPriority;
  62.  
  63.   /// Configurable target specific flags.
  64.   const uint8_t TSFlags;
  65.   /// Whether the class supports two (or more) disjunct subregister indices.
  66.   const bool HasDisjunctSubRegs;
  67.   /// Whether a combination of subregisters can cover every register in the
  68.   /// class. See also the CoveredBySubRegs description in Target.td.
  69.   const bool CoveredBySubRegs;
  70.   const sc_iterator SuperClasses;
  71.   ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
  72.  
  73.   /// Return the register class ID number.
  74.   unsigned getID() const { return MC->getID(); }
  75.  
  76.   /// begin/end - Return all of the registers in this class.
  77.   ///
  78.   iterator       begin() const { return MC->begin(); }
  79.   iterator         end() const { return MC->end(); }
  80.  
  81.   /// Return the number of registers in this class.
  82.   unsigned getNumRegs() const { return MC->getNumRegs(); }
  83.  
  84.   iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
  85.   getRegisters() const {
  86.     return make_range(MC->begin(), MC->end());
  87.   }
  88.  
  89.   /// Return the specified register in the class.
  90.   MCRegister getRegister(unsigned i) const {
  91.     return MC->getRegister(i);
  92.   }
  93.  
  94.   /// Return true if the specified register is included in this register class.
  95.   /// This does not include virtual registers.
  96.   bool contains(Register Reg) const {
  97.     /// FIXME: Historically this function has returned false when given vregs
  98.     ///        but it should probably only receive physical registers
  99.     if (!Reg.isPhysical())
  100.       return false;
  101.     return MC->contains(Reg.asMCReg());
  102.   }
  103.  
  104.   /// Return true if both registers are in this class.
  105.   bool contains(Register Reg1, Register Reg2) const {
  106.     /// FIXME: Historically this function has returned false when given a vregs
  107.     ///        but it should probably only receive physical registers
  108.     if (!Reg1.isPhysical() || !Reg2.isPhysical())
  109.       return false;
  110.     return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
  111.   }
  112.  
  113.   /// Return the cost of copying a value between two registers in this class.
  114.   /// A negative number means the register class is very expensive
  115.   /// to copy e.g. status flag register classes.
  116.   int getCopyCost() const { return MC->getCopyCost(); }
  117.  
  118.   /// Return true if this register class may be used to create virtual
  119.   /// registers.
  120.   bool isAllocatable() const { return MC->isAllocatable(); }
  121.  
  122.   /// Return true if the specified TargetRegisterClass
  123.   /// is a proper sub-class of this TargetRegisterClass.
  124.   bool hasSubClass(const TargetRegisterClass *RC) const {
  125.     return RC != this && hasSubClassEq(RC);
  126.   }
  127.  
  128.   /// Returns true if RC is a sub-class of or equal to this class.
  129.   bool hasSubClassEq(const TargetRegisterClass *RC) const {
  130.     unsigned ID = RC->getID();
  131.     return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
  132.   }
  133.  
  134.   /// Return true if the specified TargetRegisterClass is a
  135.   /// proper super-class of this TargetRegisterClass.
  136.   bool hasSuperClass(const TargetRegisterClass *RC) const {
  137.     return RC->hasSubClass(this);
  138.   }
  139.  
  140.   /// Returns true if RC is a super-class of or equal to this class.
  141.   bool hasSuperClassEq(const TargetRegisterClass *RC) const {
  142.     return RC->hasSubClassEq(this);
  143.   }
  144.  
  145.   /// Returns a bit vector of subclasses, including this one.
  146.   /// The vector is indexed by class IDs.
  147.   ///
  148.   /// To use it, consider the returned array as a chunk of memory that
  149.   /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
  150.   /// contains a bitset of the ID of the subclasses in big-endian style.
  151.  
  152.   /// I.e., the representation of the memory from left to right at the
  153.   /// bit level looks like:
  154.   /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
  155.   ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
  156.   /// Where the number represents the class ID and XXX bits that
  157.   /// should be ignored.
  158.   ///
  159.   /// See the implementation of hasSubClassEq for an example of how it
  160.   /// can be used.
  161.   const uint32_t *getSubClassMask() const {
  162.     return SubClassMask;
  163.   }
  164.  
  165.   /// Returns a 0-terminated list of sub-register indices that project some
  166.   /// super-register class into this register class. The list has an entry for
  167.   /// each Idx such that:
  168.   ///
  169.   ///   There exists SuperRC where:
  170.   ///     For all Reg in SuperRC:
  171.   ///       this->contains(Reg:Idx)
  172.   const uint16_t *getSuperRegIndices() const {
  173.     return SuperRegIndices;
  174.   }
  175.  
  176.   /// Returns a NULL-terminated list of super-classes.  The
  177.   /// classes are ordered by ID which is also a topological ordering from large
  178.   /// to small classes.  The list does NOT include the current class.
  179.   sc_iterator getSuperClasses() const {
  180.     return SuperClasses;
  181.   }
  182.  
  183.   /// Return true if this TargetRegisterClass is a subset
  184.   /// class of at least one other TargetRegisterClass.
  185.   bool isASubClass() const {
  186.     return SuperClasses[0] != nullptr;
  187.   }
  188.  
  189.   /// Returns the preferred order for allocating registers from this register
  190.   /// class in MF. The raw order comes directly from the .td file and may
  191.   /// include reserved registers that are not allocatable.
  192.   /// Register allocators should also make sure to allocate
  193.   /// callee-saved registers only after all the volatiles are used. The
  194.   /// RegisterClassInfo class provides filtered allocation orders with
  195.   /// callee-saved registers moved to the end.
  196.   ///
  197.   /// The MachineFunction argument can be used to tune the allocatable
  198.   /// registers based on the characteristics of the function, subtarget, or
  199.   /// other criteria.
  200.   ///
  201.   /// By default, this method returns all registers in the class.
  202.   ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
  203.     return OrderFunc ? OrderFunc(MF) : ArrayRef(begin(), getNumRegs());
  204.   }
  205.  
  206.   /// Returns the combination of all lane masks of register in this class.
  207.   /// The lane masks of the registers are the combination of all lane masks
  208.   /// of their subregisters. Returns 1 if there are no subregisters.
  209.   LaneBitmask getLaneMask() const {
  210.     return LaneMask;
  211.   }
  212. };
  213.  
  214. /// Extra information, not in MCRegisterDesc, about registers.
  215. /// These are used by codegen, not by MC.
  216. struct TargetRegisterInfoDesc {
  217.   const uint8_t *CostPerUse; // Extra cost of instructions using register.
  218.   unsigned NumCosts; // Number of cost values associated with each register.
  219.   const bool
  220.       *InAllocatableClass; // Register belongs to an allocatable regclass.
  221. };
  222.  
  223. /// Each TargetRegisterClass has a per register weight, and weight
  224. /// limit which must be less than the limits of its pressure sets.
  225. struct RegClassWeight {
  226.   unsigned RegWeight;
  227.   unsigned WeightLimit;
  228. };
  229.  
  230. /// TargetRegisterInfo base class - We assume that the target defines a static
  231. /// array of TargetRegisterDesc objects that represent all of the machine
  232. /// registers that the target has.  As such, we simply have to track a pointer
  233. /// to this array so that we can turn register number into a register
  234. /// descriptor.
  235. ///
  236. class TargetRegisterInfo : public MCRegisterInfo {
  237. public:
  238.   using regclass_iterator = const TargetRegisterClass * const *;
  239.   using vt_iterator = const MVT::SimpleValueType *;
  240.   struct RegClassInfo {
  241.     unsigned RegSize, SpillSize, SpillAlignment;
  242.     vt_iterator VTList;
  243.   };
  244. private:
  245.   const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
  246.   const char *const *SubRegIndexNames;        // Names of subreg indexes.
  247.   // Pointer to array of lane masks, one per sub-reg index.
  248.   const LaneBitmask *SubRegIndexLaneMasks;
  249.  
  250.   regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
  251.   LaneBitmask CoveringLanes;
  252.   const RegClassInfo *const RCInfos;
  253.   unsigned HwMode;
  254.  
  255. protected:
  256.   TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
  257.                      regclass_iterator RCB,
  258.                      regclass_iterator RCE,
  259.                      const char *const *SRINames,
  260.                      const LaneBitmask *SRILaneMasks,
  261.                      LaneBitmask CoveringLanes,
  262.                      const RegClassInfo *const RCIs,
  263.                      unsigned Mode = 0);
  264.   virtual ~TargetRegisterInfo();
  265.  
  266. public:
  267.   // Register numbers can represent physical registers, virtual registers, and
  268.   // sometimes stack slots. The unsigned values are divided into these ranges:
  269.   //
  270.   //   0           Not a register, can be used as a sentinel.
  271.   //   [1;2^30)    Physical registers assigned by TableGen.
  272.   //   [2^30;2^31) Stack slots. (Rarely used.)
  273.   //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
  274.   //
  275.   // Further sentinels can be allocated from the small negative integers.
  276.   // DenseMapInfo<unsigned> uses -1u and -2u.
  277.  
  278.   /// Return the size in bits of a register from class RC.
  279.   unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
  280.     return getRegClassInfo(RC).RegSize;
  281.   }
  282.  
  283.   /// Return the size in bytes of the stack slot allocated to hold a spilled
  284.   /// copy of a register from class RC.
  285.   unsigned getSpillSize(const TargetRegisterClass &RC) const {
  286.     return getRegClassInfo(RC).SpillSize / 8;
  287.   }
  288.  
  289.   /// Return the minimum required alignment in bytes for a spill slot for
  290.   /// a register of this class.
  291.   Align getSpillAlign(const TargetRegisterClass &RC) const {
  292.     return Align(getRegClassInfo(RC).SpillAlignment / 8);
  293.   }
  294.  
  295.   /// Return true if the given TargetRegisterClass has the ValueType T.
  296.   bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
  297.     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
  298.       if (MVT(*I) == T)
  299.         return true;
  300.     return false;
  301.   }
  302.  
  303.   /// Return true if the given TargetRegisterClass is compatible with LLT T.
  304.   bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
  305.     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
  306.       MVT VT(*I);
  307.       if (VT == MVT::Untyped)
  308.         return true;
  309.  
  310.       if (LLT(VT) == T)
  311.         return true;
  312.     }
  313.     return false;
  314.   }
  315.  
  316.   /// Loop over all of the value types that can be represented by values
  317.   /// in the given register class.
  318.   vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
  319.     return getRegClassInfo(RC).VTList;
  320.   }
  321.  
  322.   vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
  323.     vt_iterator I = legalclasstypes_begin(RC);
  324.     while (*I != MVT::Other)
  325.       ++I;
  326.     return I;
  327.   }
  328.  
  329.   /// Returns the Register Class of a physical register of the given type,
  330.   /// picking the most sub register class of the right type that contains this
  331.   /// physreg.
  332.   const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
  333.                                                     MVT VT = MVT::Other) const;
  334.  
  335.   /// Returns the Register Class of a physical register of the given type,
  336.   /// picking the most sub register class of the right type that contains this
  337.   /// physreg. If there is no register class compatible with the given type,
  338.   /// returns nullptr.
  339.   const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
  340.                                                        LLT Ty = LLT()) const;
  341.  
  342.   /// Return the maximal subclass of the given register class that is
  343.   /// allocatable or NULL.
  344.   const TargetRegisterClass *
  345.     getAllocatableClass(const TargetRegisterClass *RC) const;
  346.  
  347.   /// Returns a bitset indexed by register number indicating if a register is
  348.   /// allocatable or not. If a register class is specified, returns the subset
  349.   /// for the class.
  350.   BitVector getAllocatableSet(const MachineFunction &MF,
  351.                               const TargetRegisterClass *RC = nullptr) const;
  352.  
  353.   /// Get a list of cost values for all registers that correspond to the index
  354.   /// returned by RegisterCostTableIndex.
  355.   ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
  356.     unsigned Idx = getRegisterCostTableIndex(MF);
  357.     unsigned NumRegs = getNumRegs();
  358.     assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
  359.  
  360.     return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
  361.   }
  362.  
  363.   /// Return true if the register is in the allocation of any register class.
  364.   bool isInAllocatableClass(MCRegister RegNo) const {
  365.     return InfoDesc->InAllocatableClass[RegNo];
  366.   }
  367.  
  368.   /// Return the human-readable symbolic target-specific
  369.   /// name for the specified SubRegIndex.
  370.   const char *getSubRegIndexName(unsigned SubIdx) const {
  371.     assert(SubIdx && SubIdx < getNumSubRegIndices() &&
  372.            "This is not a subregister index");
  373.     return SubRegIndexNames[SubIdx-1];
  374.   }
  375.  
  376.   /// Return a bitmask representing the parts of a register that are covered by
  377.   /// SubIdx \see LaneBitmask.
  378.   ///
  379.   /// SubIdx == 0 is allowed, it has the lane mask ~0u.
  380.   LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
  381.     assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
  382.     return SubRegIndexLaneMasks[SubIdx];
  383.   }
  384.  
  385.   /// Try to find one or more subregister indexes to cover \p LaneMask.
  386.   ///
  387.   /// If this is possible, returns true and appends the best matching set of
  388.   /// indexes to \p Indexes. If this is not possible, returns false.
  389.   bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
  390.                                 const TargetRegisterClass *RC,
  391.                                 LaneBitmask LaneMask,
  392.                                 SmallVectorImpl<unsigned> &Indexes) const;
  393.  
  394.   /// The lane masks returned by getSubRegIndexLaneMask() above can only be
  395.   /// used to determine if sub-registers overlap - they can't be used to
  396.   /// determine if a set of sub-registers completely cover another
  397.   /// sub-register.
  398.   ///
  399.   /// The X86 general purpose registers have two lanes corresponding to the
  400.   /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
  401.   /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
  402.   /// sub_32bit sub-register.
  403.   ///
  404.   /// On the other hand, the ARM NEON lanes fully cover their registers: The
  405.   /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
  406.   /// This is related to the CoveredBySubRegs property on register definitions.
  407.   ///
  408.   /// This function returns a bit mask of lanes that completely cover their
  409.   /// sub-registers. More precisely, given:
  410.   ///
  411.   ///   Covering = getCoveringLanes();
  412.   ///   MaskA = getSubRegIndexLaneMask(SubA);
  413.   ///   MaskB = getSubRegIndexLaneMask(SubB);
  414.   ///
  415.   /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
  416.   /// SubB.
  417.   LaneBitmask getCoveringLanes() const { return CoveringLanes; }
  418.  
  419.   /// Returns true if the two registers are equal or alias each other.
  420.   /// The registers may be virtual registers.
  421.   bool regsOverlap(Register RegA, Register RegB) const {
  422.     if (RegA == RegB)
  423.       return true;
  424.     if (RegA.isPhysical() && RegB.isPhysical())
  425.       return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg());
  426.     return false;
  427.   }
  428.  
  429.   /// Returns true if Reg contains RegUnit.
  430.   bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
  431.     for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
  432.       if (Register(*Units) == RegUnit)
  433.         return true;
  434.     return false;
  435.   }
  436.  
  437.   /// Returns the original SrcReg unless it is the target of a copy-like
  438.   /// operation, in which case we chain backwards through all such operations
  439.   /// to the ultimate source register.  If a physical register is encountered,
  440.   /// we stop the search.
  441.   virtual Register lookThruCopyLike(Register SrcReg,
  442.                                     const MachineRegisterInfo *MRI) const;
  443.  
  444.   /// Find the original SrcReg unless it is the target of a copy-like operation,
  445.   /// in which case we chain backwards through all such operations to the
  446.   /// ultimate source register. If a physical register is encountered, we stop
  447.   /// the search.
  448.   /// Return the original SrcReg if all the definitions in the chain only have
  449.   /// one user and not a physical register.
  450.   virtual Register
  451.   lookThruSingleUseCopyChain(Register SrcReg,
  452.                              const MachineRegisterInfo *MRI) const;
  453.  
  454.   /// Return a null-terminated list of all of the callee-saved registers on
  455.   /// this target. The register should be in the order of desired callee-save
  456.   /// stack frame offset. The first register is closest to the incoming stack
  457.   /// pointer if stack grows down, and vice versa.
  458.   /// Notice: This function does not take into account disabled CSRs.
  459.   ///         In most cases you will want to use instead the function
  460.   ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
  461.   virtual const MCPhysReg*
  462.   getCalleeSavedRegs(const MachineFunction *MF) const = 0;
  463.  
  464.   /// Return a mask of call-preserved registers for the given calling convention
  465.   /// on the current function. The mask should include all call-preserved
  466.   /// aliases. This is used by the register allocator to determine which
  467.   /// registers can be live across a call.
  468.   ///
  469.   /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
  470.   /// A set bit indicates that all bits of the corresponding register are
  471.   /// preserved across the function call.  The bit mask is expected to be
  472.   /// sub-register complete, i.e. if A is preserved, so are all its
  473.   /// sub-registers.
  474.   ///
  475.   /// Bits are numbered from the LSB, so the bit for physical register Reg can
  476.   /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
  477.   ///
  478.   /// A NULL pointer means that no register mask will be used, and call
  479.   /// instructions should use implicit-def operands to indicate call clobbered
  480.   /// registers.
  481.   ///
  482.   virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
  483.                                                CallingConv::ID) const {
  484.     // The default mask clobbers everything.  All targets should override.
  485.     return nullptr;
  486.   }
  487.  
  488.   /// Return a register mask for the registers preserved by the unwinder,
  489.   /// or nullptr if no custom mask is needed.
  490.   virtual const uint32_t *
  491.   getCustomEHPadPreservedMask(const MachineFunction &MF) const {
  492.     return nullptr;
  493.   }
  494.  
  495.   /// Return a register mask that clobbers everything.
  496.   virtual const uint32_t *getNoPreservedMask() const {
  497.     llvm_unreachable("target does not provide no preserved mask");
  498.   }
  499.  
  500.   /// Return a list of all of the registers which are clobbered "inside" a call
  501.   /// to the given function. For example, these might be needed for PLT
  502.   /// sequences of long-branch veneers.
  503.   virtual ArrayRef<MCPhysReg>
  504.   getIntraCallClobberedRegs(const MachineFunction *MF) const {
  505.     return {};
  506.   }
  507.  
  508.   /// Return true if all bits that are set in mask \p mask0 are also set in
  509.   /// \p mask1.
  510.   bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
  511.  
  512.   /// Return all the call-preserved register masks defined for this target.
  513.   virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
  514.   virtual ArrayRef<const char *> getRegMaskNames() const = 0;
  515.  
  516.   /// Returns a bitset indexed by physical register number indicating if a
  517.   /// register is a special register that has particular uses and should be
  518.   /// considered unavailable at all times, e.g. stack pointer, return address.
  519.   /// A reserved register:
  520.   /// - is not allocatable
  521.   /// - is considered always live
  522.   /// - is ignored by liveness tracking
  523.   /// It is often necessary to reserve the super registers of a reserved
  524.   /// register as well, to avoid them getting allocated indirectly. You may use
  525.   /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
  526.   virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
  527.  
  528.   /// Returns either a string explaining why the given register is reserved for
  529.   /// this function, or an empty optional if no explanation has been written.
  530.   /// The absence of an explanation does not mean that the register is not
  531.   /// reserved (meaning, you should check that PhysReg is in fact reserved
  532.   /// before calling this).
  533.   virtual std::optional<std::string>
  534.   explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
  535.     return {};
  536.   }
  537.  
  538.   /// Returns false if we can't guarantee that Physreg, specified as an IR asm
  539.   /// clobber constraint, will be preserved across the statement.
  540.   virtual bool isAsmClobberable(const MachineFunction &MF,
  541.                                 MCRegister PhysReg) const {
  542.     return true;
  543.   }
  544.  
  545.   /// Returns true if PhysReg cannot be written to in inline asm statements.
  546.   virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
  547.                                       unsigned PhysReg) const {
  548.     return false;
  549.   }
  550.  
  551.   /// Returns true if PhysReg is unallocatable and constant throughout the
  552.   /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
  553.   virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
  554.  
  555.   /// Returns true if the register class is considered divergent.
  556.   virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
  557.     return false;
  558.   }
  559.  
  560.   /// Physical registers that may be modified within a function but are
  561.   /// guaranteed to be restored before any uses. This is useful for targets that
  562.   /// have call sequences where a GOT register may be updated by the caller
  563.   /// prior to a call and is guaranteed to be restored (also by the caller)
  564.   /// after the call.
  565.   virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
  566.                                         const MachineFunction &MF) const {
  567.     return false;
  568.   }
  569.  
  570.   /// This is a wrapper around getCallPreservedMask().
  571.   /// Return true if the register is preserved after the call.
  572.   virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
  573.                                     const MachineFunction &MF) const;
  574.  
  575.   /// Returns true if PhysReg can be used as an argument to a function.
  576.   virtual bool isArgumentRegister(const MachineFunction &MF,
  577.                                   MCRegister PhysReg) const {
  578.     return false;
  579.   }
  580.  
  581.   /// Returns true if PhysReg is a fixed register.
  582.   virtual bool isFixedRegister(const MachineFunction &MF,
  583.                                MCRegister PhysReg) const {
  584.     return false;
  585.   }
  586.  
  587.   /// Returns true if PhysReg is a general purpose register.
  588.   virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
  589.                                         MCRegister PhysReg) const {
  590.     return false;
  591.   }
  592.  
  593.   /// Prior to adding the live-out mask to a stackmap or patchpoint
  594.   /// instruction, provide the target the opportunity to adjust it (mainly to
  595.   /// remove pseudo-registers that should be ignored).
  596.   virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
  597.  
  598.   /// Return a super-register of the specified register
  599.   /// Reg so its sub-register of index SubIdx is Reg.
  600.   MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
  601.                                  const TargetRegisterClass *RC) const {
  602.     return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
  603.   }
  604.  
  605.   /// Return a subclass of the specified register
  606.   /// class A so that each register in it has a sub-register of the
  607.   /// specified sub-register index which is in the specified register class B.
  608.   ///
  609.   /// TableGen will synthesize missing A sub-classes.
  610.   virtual const TargetRegisterClass *
  611.   getMatchingSuperRegClass(const TargetRegisterClass *A,
  612.                            const TargetRegisterClass *B, unsigned Idx) const;
  613.  
  614.   // For a copy-like instruction that defines a register of class DefRC with
  615.   // subreg index DefSubReg, reading from another source with class SrcRC and
  616.   // subregister SrcSubReg return true if this is a preferable copy
  617.   // instruction or an earlier use should be used.
  618.   virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
  619.                                     unsigned DefSubReg,
  620.                                     const TargetRegisterClass *SrcRC,
  621.                                     unsigned SrcSubReg) const;
  622.  
  623.   /// Returns the largest legal sub-class of RC that
  624.   /// supports the sub-register index Idx.
  625.   /// If no such sub-class exists, return NULL.
  626.   /// If all registers in RC already have an Idx sub-register, return RC.
  627.   ///
  628.   /// TableGen generates a version of this function that is good enough in most
  629.   /// cases.  Targets can override if they have constraints that TableGen
  630.   /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
  631.   /// supported by the full GR32 register class in 64-bit mode, but only by the
  632.   /// GR32_ABCD regiister class in 32-bit mode.
  633.   ///
  634.   /// TableGen will synthesize missing RC sub-classes.
  635.   virtual const TargetRegisterClass *
  636.   getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
  637.     assert(Idx == 0 && "Target has no sub-registers");
  638.     return RC;
  639.   }
  640.  
  641.   /// Return a register class that can be used for a subregister copy from/into
  642.   /// \p SuperRC at \p SubRegIdx.
  643.   virtual const TargetRegisterClass *
  644.   getSubRegisterClass(const TargetRegisterClass *SuperRC,
  645.                       unsigned SubRegIdx) const {
  646.     return nullptr;
  647.   }
  648.  
  649.   /// Return the subregister index you get from composing
  650.   /// two subregister indices.
  651.   ///
  652.   /// The special null sub-register index composes as the identity.
  653.   ///
  654.   /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
  655.   /// returns c. Note that composeSubRegIndices does not tell you about illegal
  656.   /// compositions. If R does not have a subreg a, or R:a does not have a subreg
  657.   /// b, composeSubRegIndices doesn't tell you.
  658.   ///
  659.   /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
  660.   /// ssub_0:S0 - ssub_3:S3 subregs.
  661.   /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
  662.   unsigned composeSubRegIndices(unsigned a, unsigned b) const {
  663.     if (!a) return b;
  664.     if (!b) return a;
  665.     return composeSubRegIndicesImpl(a, b);
  666.   }
  667.  
  668.   /// Transforms a LaneMask computed for one subregister to the lanemask that
  669.   /// would have been computed when composing the subsubregisters with IdxA
  670.   /// first. @sa composeSubRegIndices()
  671.   LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
  672.                                          LaneBitmask Mask) const {
  673.     if (!IdxA)
  674.       return Mask;
  675.     return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
  676.   }
  677.  
  678.   /// Transform a lanemask given for a virtual register to the corresponding
  679.   /// lanemask before using subregister with index \p IdxA.
  680.   /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
  681.   /// valie lane mask (no invalid bits set) the following holds:
  682.   /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
  683.   /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
  684.   /// => X1 == Mask
  685.   LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
  686.                                                 LaneBitmask LaneMask) const {
  687.     if (!IdxA)
  688.       return LaneMask;
  689.     return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
  690.   }
  691.  
  692.   /// Debugging helper: dump register in human readable form to dbgs() stream.
  693.   static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
  694.                       const TargetRegisterInfo *TRI = nullptr);
  695.  
  696.   /// Return target defined base register class for a physical register.
  697.   /// This is the register class with the lowest BaseClassOrder containing the
  698.   /// register.
  699.   /// Will be nullptr if the register is not in any base register class.
  700.   virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const {
  701.     return nullptr;
  702.   }
  703.  
  704. protected:
  705.   /// Overridden by TableGen in targets that have sub-registers.
  706.   virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
  707.     llvm_unreachable("Target has no sub-registers");
  708.   }
  709.  
  710.   /// Overridden by TableGen in targets that have sub-registers.
  711.   virtual LaneBitmask
  712.   composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
  713.     llvm_unreachable("Target has no sub-registers");
  714.   }
  715.  
  716.   virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
  717.                                                             LaneBitmask) const {
  718.     llvm_unreachable("Target has no sub-registers");
  719.   }
  720.  
  721.   /// Return the register cost table index. This implementation is sufficient
  722.   /// for most architectures and can be overriden by targets in case there are
  723.   /// multiple cost values associated with each register.
  724.   virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
  725.     return 0;
  726.   }
  727.  
  728. public:
  729.   /// Find a common super-register class if it exists.
  730.   ///
  731.   /// Find a register class, SuperRC and two sub-register indices, PreA and
  732.   /// PreB, such that:
  733.   ///
  734.   ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
  735.   ///
  736.   ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
  737.   ///
  738.   ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
  739.   ///
  740.   /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
  741.   /// requirements, and there is no register class with a smaller spill size
  742.   /// that satisfies the requirements.
  743.   ///
  744.   /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
  745.   ///
  746.   /// Either of the PreA and PreB sub-register indices may be returned as 0. In
  747.   /// that case, the returned register class will be a sub-class of the
  748.   /// corresponding argument register class.
  749.   ///
  750.   /// The function returns NULL if no register class can be found.
  751.   const TargetRegisterClass*
  752.   getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
  753.                          const TargetRegisterClass *RCB, unsigned SubB,
  754.                          unsigned &PreA, unsigned &PreB) const;
  755.  
  756.   //===--------------------------------------------------------------------===//
  757.   // Register Class Information
  758.   //
  759. protected:
  760.   const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
  761.     return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
  762.   }
  763.  
  764. public:
  765.   /// Register class iterators
  766.   regclass_iterator regclass_begin() const { return RegClassBegin; }
  767.   regclass_iterator regclass_end() const { return RegClassEnd; }
  768.   iterator_range<regclass_iterator> regclasses() const {
  769.     return make_range(regclass_begin(), regclass_end());
  770.   }
  771.  
  772.   unsigned getNumRegClasses() const {
  773.     return (unsigned)(regclass_end()-regclass_begin());
  774.   }
  775.  
  776.   /// Returns the register class associated with the enumeration value.
  777.   /// See class MCOperandInfo.
  778.   const TargetRegisterClass *getRegClass(unsigned i) const {
  779.     assert(i < getNumRegClasses() && "Register Class ID out of range");
  780.     return RegClassBegin[i];
  781.   }
  782.  
  783.   /// Returns the name of the register class.
  784.   const char *getRegClassName(const TargetRegisterClass *Class) const {
  785.     return MCRegisterInfo::getRegClassName(Class->MC);
  786.   }
  787.  
  788.   /// Find the largest common subclass of A and B.
  789.   /// Return NULL if there is no common subclass.
  790.   const TargetRegisterClass *
  791.   getCommonSubClass(const TargetRegisterClass *A,
  792.                     const TargetRegisterClass *B) const;
  793.  
  794.   /// Returns a TargetRegisterClass used for pointer values.
  795.   /// If a target supports multiple different pointer register classes,
  796.   /// kind specifies which one is indicated.
  797.   virtual const TargetRegisterClass *
  798.   getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
  799.     llvm_unreachable("Target didn't implement getPointerRegClass!");
  800.   }
  801.  
  802.   /// Returns a legal register class to copy a register in the specified class
  803.   /// to or from. If it is possible to copy the register directly without using
  804.   /// a cross register class copy, return the specified RC. Returns NULL if it
  805.   /// is not possible to copy between two registers of the specified class.
  806.   virtual const TargetRegisterClass *
  807.   getCrossCopyRegClass(const TargetRegisterClass *RC) const {
  808.     return RC;
  809.   }
  810.  
  811.   /// Returns the largest super class of RC that is legal to use in the current
  812.   /// sub-target and has the same spill size.
  813.   /// The returned register class can be used to create virtual registers which
  814.   /// means that all its registers can be copied and spilled.
  815.   virtual const TargetRegisterClass *
  816.   getLargestLegalSuperClass(const TargetRegisterClass *RC,
  817.                             const MachineFunction &) const {
  818.     /// The default implementation is very conservative and doesn't allow the
  819.     /// register allocator to inflate register classes.
  820.     return RC;
  821.   }
  822.  
  823.   /// Return the register pressure "high water mark" for the specific register
  824.   /// class. The scheduler is in high register pressure mode (for the specific
  825.   /// register class) if it goes over the limit.
  826.   ///
  827.   /// Note: this is the old register pressure model that relies on a manually
  828.   /// specified representative register class per value type.
  829.   virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
  830.                                        MachineFunction &MF) const {
  831.     return 0;
  832.   }
  833.  
  834.   /// Return a heuristic for the machine scheduler to compare the profitability
  835.   /// of increasing one register pressure set versus another.  The scheduler
  836.   /// will prefer increasing the register pressure of the set which returns
  837.   /// the largest value for this function.
  838.   virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
  839.                                           unsigned PSetID) const {
  840.     return PSetID;
  841.   }
  842.  
  843.   /// Get the weight in units of pressure for this register class.
  844.   virtual const RegClassWeight &getRegClassWeight(
  845.     const TargetRegisterClass *RC) const = 0;
  846.  
  847.   /// Returns size in bits of a phys/virtual/generic register.
  848.   unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
  849.  
  850.   /// Get the weight in units of pressure for this register unit.
  851.   virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
  852.  
  853.   /// Get the number of dimensions of register pressure.
  854.   virtual unsigned getNumRegPressureSets() const = 0;
  855.  
  856.   /// Get the name of this register unit pressure set.
  857.   virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
  858.  
  859.   /// Get the register unit pressure limit for this dimension.
  860.   /// This limit must be adjusted dynamically for reserved registers.
  861.   virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
  862.                                           unsigned Idx) const = 0;
  863.  
  864.   /// Get the dimensions of register pressure impacted by this register class.
  865.   /// Returns a -1 terminated array of pressure set IDs.
  866.   virtual const int *getRegClassPressureSets(
  867.     const TargetRegisterClass *RC) const = 0;
  868.  
  869.   /// Get the dimensions of register pressure impacted by this register unit.
  870.   /// Returns a -1 terminated array of pressure set IDs.
  871.   virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
  872.  
  873.   /// Get a list of 'hint' registers that the register allocator should try
  874.   /// first when allocating a physical register for the virtual register
  875.   /// VirtReg. These registers are effectively moved to the front of the
  876.   /// allocation order. If true is returned, regalloc will try to only use
  877.   /// hints to the greatest extent possible even if it means spilling.
  878.   ///
  879.   /// The Order argument is the allocation order for VirtReg's register class
  880.   /// as returned from RegisterClassInfo::getOrder(). The hint registers must
  881.   /// come from Order, and they must not be reserved.
  882.   ///
  883.   /// The default implementation of this function will only add target
  884.   /// independent register allocation hints. Targets that override this
  885.   /// function should typically call this default implementation as well and
  886.   /// expect to see generic copy hints added.
  887.   virtual bool
  888.   getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
  889.                         SmallVectorImpl<MCPhysReg> &Hints,
  890.                         const MachineFunction &MF,
  891.                         const VirtRegMap *VRM = nullptr,
  892.                         const LiveRegMatrix *Matrix = nullptr) const;
  893.  
  894.   /// A callback to allow target a chance to update register allocation hints
  895.   /// when a register is "changed" (e.g. coalesced) to another register.
  896.   /// e.g. On ARM, some virtual registers should target register pairs,
  897.   /// if one of pair is coalesced to another register, the allocation hint of
  898.   /// the other half of the pair should be changed to point to the new register.
  899.   virtual void updateRegAllocHint(Register Reg, Register NewReg,
  900.                                   MachineFunction &MF) const {
  901.     // Do nothing.
  902.   }
  903.  
  904.   /// Allow the target to reverse allocation order of local live ranges. This
  905.   /// will generally allocate shorter local live ranges first. For targets with
  906.   /// many registers, this could reduce regalloc compile time by a large
  907.   /// factor. It is disabled by default for three reasons:
  908.   /// (1) Top-down allocation is simpler and easier to debug for targets that
  909.   /// don't benefit from reversing the order.
  910.   /// (2) Bottom-up allocation could result in poor evicition decisions on some
  911.   /// targets affecting the performance of compiled code.
  912.   /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
  913.   virtual bool reverseLocalAssignment() const { return false; }
  914.  
  915.   /// Allow the target to override the cost of using a callee-saved register for
  916.   /// the first time. Default value of 0 means we will use a callee-saved
  917.   /// register if it is available.
  918.   virtual unsigned getCSRFirstUseCost() const { return 0; }
  919.  
  920.   /// Returns true if the target requires (and can make use of) the register
  921.   /// scavenger.
  922.   virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
  923.     return false;
  924.   }
  925.  
  926.   /// Returns true if the target wants to use frame pointer based accesses to
  927.   /// spill to the scavenger emergency spill slot.
  928.   virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
  929.     return true;
  930.   }
  931.  
  932.   /// Returns true if the target requires post PEI scavenging of registers for
  933.   /// materializing frame index constants.
  934.   virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
  935.     return false;
  936.   }
  937.  
  938.   /// Returns true if the target requires using the RegScavenger directly for
  939.   /// frame elimination despite using requiresFrameIndexScavenging.
  940.   virtual bool requiresFrameIndexReplacementScavenging(
  941.       const MachineFunction &MF) const {
  942.     return false;
  943.   }
  944.  
  945.   /// Returns true if the target wants the LocalStackAllocation pass to be run
  946.   /// and virtual base registers used for more efficient stack access.
  947.   virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
  948.     return false;
  949.   }
  950.  
  951.   /// Return true if target has reserved a spill slot in the stack frame of
  952.   /// the given function for the specified register. e.g. On x86, if the frame
  953.   /// register is required, the first fixed stack object is reserved as its
  954.   /// spill slot. This tells PEI not to create a new stack frame
  955.   /// object for the given register. It should be called only after
  956.   /// determineCalleeSaves().
  957.   virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
  958.                                     int &FrameIdx) const {
  959.     return false;
  960.   }
  961.  
  962.   /// Returns true if the live-ins should be tracked after register allocation.
  963.   virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
  964.     return true;
  965.   }
  966.  
  967.   /// True if the stack can be realigned for the target.
  968.   virtual bool canRealignStack(const MachineFunction &MF) const;
  969.  
  970.   /// True if storage within the function requires the stack pointer to be
  971.   /// aligned more than the normal calling convention calls for.
  972.   virtual bool shouldRealignStack(const MachineFunction &MF) const;
  973.  
  974.   /// True if stack realignment is required and still possible.
  975.   bool hasStackRealignment(const MachineFunction &MF) const {
  976.     return shouldRealignStack(MF) && canRealignStack(MF);
  977.   }
  978.  
  979.   /// Get the offset from the referenced frame index in the instruction,
  980.   /// if there is one.
  981.   virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
  982.                                            int Idx) const {
  983.     return 0;
  984.   }
  985.  
  986.   /// Returns true if the instruction's frame index reference would be better
  987.   /// served by a base register other than FP or SP.
  988.   /// Used by LocalStackFrameAllocation to determine which frame index
  989.   /// references it should create new base registers for.
  990.   virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
  991.     return false;
  992.   }
  993.  
  994.   /// Insert defining instruction(s) for a pointer to FrameIdx before
  995.   /// insertion point I. Return materialized frame pointer.
  996.   virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
  997.                                                 int FrameIdx,
  998.                                                 int64_t Offset) const {
  999.     llvm_unreachable("materializeFrameBaseRegister does not exist on this "
  1000.                      "target");
  1001.   }
  1002.  
  1003.   /// Resolve a frame index operand of an instruction
  1004.   /// to reference the indicated base register plus offset instead.
  1005.   virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
  1006.                                  int64_t Offset) const {
  1007.     llvm_unreachable("resolveFrameIndex does not exist on this target");
  1008.   }
  1009.  
  1010.   /// Determine whether a given base register plus offset immediate is
  1011.   /// encodable to resolve a frame index.
  1012.   virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
  1013.                                   int64_t Offset) const {
  1014.     llvm_unreachable("isFrameOffsetLegal does not exist on this target");
  1015.   }
  1016.  
  1017.   /// Gets the DWARF expression opcodes for \p Offset.
  1018.   virtual void getOffsetOpcodes(const StackOffset &Offset,
  1019.                                 SmallVectorImpl<uint64_t> &Ops) const;
  1020.  
  1021.   /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
  1022.   DIExpression *
  1023.   prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
  1024.                           const StackOffset &Offset) const;
  1025.  
  1026.   /// Spill the register so it can be used by the register scavenger.
  1027.   /// Return true if the register was spilled, false otherwise.
  1028.   /// If this function does not spill the register, the scavenger
  1029.   /// will instead spill it to the emergency spill slot.
  1030.   virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
  1031.                                      MachineBasicBlock::iterator I,
  1032.                                      MachineBasicBlock::iterator &UseMI,
  1033.                                      const TargetRegisterClass *RC,
  1034.                                      Register Reg) const {
  1035.     return false;
  1036.   }
  1037.  
  1038.   /// Process frame indices in reverse block order. This changes the behavior of
  1039.   /// the RegScavenger passed to eliminateFrameIndex. If this is true targets
  1040.   /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets
  1041.   /// should prefer reverse scavenging behavior.
  1042.   virtual bool supportsBackwardScavenger() const { return false; }
  1043.  
  1044.   /// This method must be overriden to eliminate abstract frame indices from
  1045.   /// instructions which may use them. The instruction referenced by the
  1046.   /// iterator contains an MO_FrameIndex operand which must be eliminated by
  1047.   /// this method. This method may modify or replace the specified instruction,
  1048.   /// as long as it keeps the iterator pointing at the finished product.
  1049.   /// SPAdj is the SP adjustment due to call frame setup instruction.
  1050.   /// FIOperandNum is the FI operand number.
  1051.   /// Returns true if the current instruction was removed and the iterator
  1052.   /// is not longer valid
  1053.   virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
  1054.                                    int SPAdj, unsigned FIOperandNum,
  1055.                                    RegScavenger *RS = nullptr) const = 0;
  1056.  
  1057.   /// Return the assembly name for \p Reg.
  1058.   virtual StringRef getRegAsmName(MCRegister Reg) const {
  1059.     // FIXME: We are assuming that the assembly name is equal to the TableGen
  1060.     // name converted to lower case
  1061.     //
  1062.     // The TableGen name is the name of the definition for this register in the
  1063.     // target's tablegen files.  For example, the TableGen name of
  1064.     // def EAX : Register <...>; is "EAX"
  1065.     return StringRef(getName(Reg));
  1066.   }
  1067.  
  1068.   //===--------------------------------------------------------------------===//
  1069.   /// Subtarget Hooks
  1070.  
  1071.   /// SrcRC and DstRC will be morphed into NewRC if this returns true.
  1072.   virtual bool shouldCoalesce(MachineInstr *MI,
  1073.                               const TargetRegisterClass *SrcRC,
  1074.                               unsigned SubReg,
  1075.                               const TargetRegisterClass *DstRC,
  1076.                               unsigned DstSubReg,
  1077.                               const TargetRegisterClass *NewRC,
  1078.                               LiveIntervals &LIS) const
  1079.   { return true; }
  1080.  
  1081.   /// Region split has a high compile time cost especially for large live range.
  1082.   /// This method is used to decide whether or not \p VirtReg should
  1083.   /// go through this expensive splitting heuristic.
  1084.   virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
  1085.                                            const LiveInterval &VirtReg) const;
  1086.  
  1087.   /// Last chance recoloring has a high compile time cost especially for
  1088.   /// targets with a lot of registers.
  1089.   /// This method is used to decide whether or not \p VirtReg should
  1090.   /// go through this expensive heuristic.
  1091.   /// When this target hook is hit, by returning false, there is a high
  1092.   /// chance that the register allocation will fail altogether (usually with
  1093.   /// "ran out of registers").
  1094.   /// That said, this error usually points to another problem in the
  1095.   /// optimization pipeline.
  1096.   virtual bool
  1097.   shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
  1098.                                           const LiveInterval &VirtReg) const {
  1099.     return true;
  1100.   }
  1101.  
  1102.   /// Deferred spilling delays the spill insertion of a virtual register
  1103.   /// after every other allocation. By deferring the spilling, it is
  1104.   /// sometimes possible to eliminate that spilling altogether because
  1105.   /// something else could have been eliminated, thus leaving some space
  1106.   /// for the virtual register.
  1107.   /// However, this comes with a compile time impact because it adds one
  1108.   /// more stage to the greedy register allocator.
  1109.   /// This method is used to decide whether \p VirtReg should use the deferred
  1110.   /// spilling stage instead of being spilled right away.
  1111.   virtual bool
  1112.   shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
  1113.                                       const LiveInterval &VirtReg) const {
  1114.     return false;
  1115.   }
  1116.  
  1117.   /// When prioritizing live ranges in register allocation, if this hook returns
  1118.   /// true then the AllocationPriority of the register class will be treated as
  1119.   /// more important than whether the range is local to a basic block or global.
  1120.   virtual bool
  1121.   regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
  1122.     return false;
  1123.   }
  1124.  
  1125.   //===--------------------------------------------------------------------===//
  1126.   /// Debug information queries.
  1127.  
  1128.   /// getFrameRegister - This method should return the register used as a base
  1129.   /// for values allocated in the current stack frame.
  1130.   virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
  1131.  
  1132.   /// Mark a register and all its aliases as reserved in the given set.
  1133.   void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
  1134.  
  1135.   /// Returns true if for every register in the set all super registers are part
  1136.   /// of the set as well.
  1137.   bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
  1138.       ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
  1139.  
  1140.   virtual const TargetRegisterClass *
  1141.   getConstrainedRegClassForOperand(const MachineOperand &MO,
  1142.                                    const MachineRegisterInfo &MRI) const {
  1143.     return nullptr;
  1144.   }
  1145.  
  1146.   /// Returns the physical register number of sub-register "Index"
  1147.   /// for physical register RegNo. Return zero if the sub-register does not
  1148.   /// exist.
  1149.   inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
  1150.     return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
  1151.   }
  1152.  
  1153.   /// Some targets have non-allocatable registers that aren't technically part
  1154.   /// of the explicit callee saved register list, but should be handled as such
  1155.   /// in certain cases.
  1156.   virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
  1157.     return false;
  1158.   }
  1159. };
  1160.  
  1161. //===----------------------------------------------------------------------===//
  1162. //                           SuperRegClassIterator
  1163. //===----------------------------------------------------------------------===//
  1164. //
  1165. // Iterate over the possible super-registers for a given register class. The
  1166. // iterator will visit a list of pairs (Idx, Mask) corresponding to the
  1167. // possible classes of super-registers.
  1168. //
  1169. // Each bit mask will have at least one set bit, and each set bit in Mask
  1170. // corresponds to a SuperRC such that:
  1171. //
  1172. //   For all Reg in SuperRC: Reg:Idx is in RC.
  1173. //
  1174. // The iterator can include (O, RC->getSubClassMask()) as the first entry which
  1175. // also satisfies the above requirement, assuming Reg:0 == Reg.
  1176. //
  1177. class SuperRegClassIterator {
  1178.   const unsigned RCMaskWords;
  1179.   unsigned SubReg = 0;
  1180.   const uint16_t *Idx;
  1181.   const uint32_t *Mask;
  1182.  
  1183. public:
  1184.   /// Create a SuperRegClassIterator that visits all the super-register classes
  1185.   /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
  1186.   SuperRegClassIterator(const TargetRegisterClass *RC,
  1187.                         const TargetRegisterInfo *TRI,
  1188.                         bool IncludeSelf = false)
  1189.     : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
  1190.       Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
  1191.     if (!IncludeSelf)
  1192.       ++*this;
  1193.   }
  1194.  
  1195.   /// Returns true if this iterator is still pointing at a valid entry.
  1196.   bool isValid() const { return Idx; }
  1197.  
  1198.   /// Returns the current sub-register index.
  1199.   unsigned getSubReg() const { return SubReg; }
  1200.  
  1201.   /// Returns the bit mask of register classes that getSubReg() projects into
  1202.   /// RC.
  1203.   /// See TargetRegisterClass::getSubClassMask() for how to use it.
  1204.   const uint32_t *getMask() const { return Mask; }
  1205.  
  1206.   /// Advance iterator to the next entry.
  1207.   void operator++() {
  1208.     assert(isValid() && "Cannot move iterator past end.");
  1209.     Mask += RCMaskWords;
  1210.     SubReg = *Idx++;
  1211.     if (!SubReg)
  1212.       Idx = nullptr;
  1213.   }
  1214. };
  1215.  
  1216. //===----------------------------------------------------------------------===//
  1217. //                           BitMaskClassIterator
  1218. //===----------------------------------------------------------------------===//
  1219. /// This class encapuslates the logic to iterate over bitmask returned by
  1220. /// the various RegClass related APIs.
  1221. /// E.g., this class can be used to iterate over the subclasses provided by
  1222. /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
  1223. class BitMaskClassIterator {
  1224.   /// Total number of register classes.
  1225.   const unsigned NumRegClasses;
  1226.   /// Base index of CurrentChunk.
  1227.   /// In other words, the number of bit we read to get at the
  1228.   /// beginning of that chunck.
  1229.   unsigned Base = 0;
  1230.   /// Adjust base index of CurrentChunk.
  1231.   /// Base index + how many bit we read within CurrentChunk.
  1232.   unsigned Idx = 0;
  1233.   /// Current register class ID.
  1234.   unsigned ID = 0;
  1235.   /// Mask we are iterating over.
  1236.   const uint32_t *Mask;
  1237.   /// Current chunk of the Mask we are traversing.
  1238.   uint32_t CurrentChunk;
  1239.  
  1240.   /// Move ID to the next set bit.
  1241.   void moveToNextID() {
  1242.     // If the current chunk of memory is empty, move to the next one,
  1243.     // while making sure we do not go pass the number of register
  1244.     // classes.
  1245.     while (!CurrentChunk) {
  1246.       // Move to the next chunk.
  1247.       Base += 32;
  1248.       if (Base >= NumRegClasses) {
  1249.         ID = NumRegClasses;
  1250.         return;
  1251.       }
  1252.       CurrentChunk = *++Mask;
  1253.       Idx = Base;
  1254.     }
  1255.     // Otherwise look for the first bit set from the right
  1256.     // (representation of the class ID is big endian).
  1257.     // See getSubClassMask for more details on the representation.
  1258.     unsigned Offset = countTrailingZeros(CurrentChunk);
  1259.     // Add the Offset to the adjusted base number of this chunk: Idx.
  1260.     // This is the ID of the register class.
  1261.     ID = Idx + Offset;
  1262.  
  1263.     // Consume the zeros, if any, and the bit we just read
  1264.     // so that we are at the right spot for the next call.
  1265.     // Do not do Offset + 1 because Offset may be 31 and 32
  1266.     // will be UB for the shift, though in that case we could
  1267.     // have make the chunk being equal to 0, but that would
  1268.     // have introduced a if statement.
  1269.     moveNBits(Offset);
  1270.     moveNBits(1);
  1271.   }
  1272.  
  1273.   /// Move \p NumBits Bits forward in CurrentChunk.
  1274.   void moveNBits(unsigned NumBits) {
  1275.     assert(NumBits < 32 && "Undefined behavior spotted!");
  1276.     // Consume the bit we read for the next call.
  1277.     CurrentChunk >>= NumBits;
  1278.     // Adjust the base for the chunk.
  1279.     Idx += NumBits;
  1280.   }
  1281.  
  1282. public:
  1283.   /// Create a BitMaskClassIterator that visits all the register classes
  1284.   /// represented by \p Mask.
  1285.   ///
  1286.   /// \pre \p Mask != nullptr
  1287.   BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
  1288.       : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
  1289.     // Move to the first ID.
  1290.     moveToNextID();
  1291.   }
  1292.  
  1293.   /// Returns true if this iterator is still pointing at a valid entry.
  1294.   bool isValid() const { return getID() != NumRegClasses; }
  1295.  
  1296.   /// Returns the current register class ID.
  1297.   unsigned getID() const { return ID; }
  1298.  
  1299.   /// Advance iterator to the next entry.
  1300.   void operator++() {
  1301.     assert(isValid() && "Cannot move iterator past end.");
  1302.     moveToNextID();
  1303.   }
  1304. };
  1305.  
  1306. // This is useful when building IndexedMaps keyed on virtual registers
  1307. struct VirtReg2IndexFunctor {
  1308.   using argument_type = Register;
  1309.   unsigned operator()(Register Reg) const {
  1310.     return Register::virtReg2Index(Reg);
  1311.   }
  1312. };
  1313.  
  1314. /// Prints virtual and physical registers with or without a TRI instance.
  1315. ///
  1316. /// The format is:
  1317. ///   %noreg          - NoRegister
  1318. ///   %5              - a virtual register.
  1319. ///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
  1320. ///   %eax            - a physical register
  1321. ///   %physreg17      - a physical register when no TRI instance given.
  1322. ///
  1323. /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
  1324. Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
  1325.                    unsigned SubIdx = 0,
  1326.                    const MachineRegisterInfo *MRI = nullptr);
  1327.  
  1328. /// Create Printable object to print register units on a \ref raw_ostream.
  1329. ///
  1330. /// Register units are named after their root registers:
  1331. ///
  1332. ///   al      - Single root.
  1333. ///   fp0~st7 - Dual roots.
  1334. ///
  1335. /// Usage: OS << printRegUnit(Unit, TRI) << '\n';
  1336. Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
  1337.  
  1338. /// Create Printable object to print virtual registers and physical
  1339. /// registers on a \ref raw_ostream.
  1340. Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
  1341.  
  1342. /// Create Printable object to print register classes or register banks
  1343. /// on a \ref raw_ostream.
  1344. Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
  1345.                               const TargetRegisterInfo *TRI);
  1346.  
  1347. } // end namespace llvm
  1348.  
  1349. #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
  1350.