Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// \file
  9. /// This file declares the IRTranslator pass.
  10. /// This pass is responsible for translating LLVM IR into MachineInstr.
  11. /// It uses target hooks to lower the ABI but aside from that, the pass
  12. /// generated code is generic. This is the default translator used for
  13. /// GlobalISel.
  14. ///
  15. /// \todo Replace the comments with actual doxygen comments.
  16. //===----------------------------------------------------------------------===//
  17.  
  18. #ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  19. #define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  20.  
  21. #include "llvm/ADT/DenseMap.h"
  22. #include "llvm/ADT/SmallVector.h"
  23. #include "llvm/CodeGen/CodeGenCommonISel.h"
  24. #include "llvm/CodeGen/FunctionLoweringInfo.h"
  25. #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
  26. #include "llvm/CodeGen/MachineFunctionPass.h"
  27. #include "llvm/CodeGen/SwiftErrorValueTracking.h"
  28. #include "llvm/CodeGen/SwitchLoweringUtils.h"
  29. #include "llvm/Support/Allocator.h"
  30. #include "llvm/Support/CodeGen.h"
  31. #include <memory>
  32. #include <utility>
  33.  
  34. namespace llvm {
  35.  
  36. class AllocaInst;
  37. class AssumptionCache;
  38. class BasicBlock;
  39. class CallInst;
  40. class CallLowering;
  41. class Constant;
  42. class ConstrainedFPIntrinsic;
  43. class DataLayout;
  44. class Instruction;
  45. class MachineBasicBlock;
  46. class MachineFunction;
  47. class MachineInstr;
  48. class MachineRegisterInfo;
  49. class OptimizationRemarkEmitter;
  50. class PHINode;
  51. class TargetLibraryInfo;
  52. class TargetPassConfig;
  53. class User;
  54. class Value;
  55.  
  56. // Technically the pass should run on an hypothetical MachineModule,
  57. // since it should translate Global into some sort of MachineGlobal.
  58. // The MachineGlobal should ultimately just be a transfer of ownership of
  59. // the interesting bits that are relevant to represent a global value.
  60. // That being said, we could investigate what would it cost to just duplicate
  61. // the information from the LLVM IR.
  62. // The idea is that ultimately we would be able to free up the memory used
  63. // by the LLVM IR as soon as the translation is over.
  64. class IRTranslator : public MachineFunctionPass {
  65. public:
  66.   static char ID;
  67.  
  68. private:
  69.   /// Interface used to lower the everything related to calls.
  70.   const CallLowering *CLI;
  71.  
  72.   /// This class contains the mapping between the Values to vreg related data.
  73.   class ValueToVRegInfo {
  74.   public:
  75.     ValueToVRegInfo() = default;
  76.  
  77.     using VRegListT = SmallVector<Register, 1>;
  78.     using OffsetListT = SmallVector<uint64_t, 1>;
  79.  
  80.     using const_vreg_iterator =
  81.         DenseMap<const Value *, VRegListT *>::const_iterator;
  82.     using const_offset_iterator =
  83.         DenseMap<const Value *, OffsetListT *>::const_iterator;
  84.  
  85.     inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
  86.  
  87.     VRegListT *getVRegs(const Value &V) {
  88.       auto It = ValToVRegs.find(&V);
  89.       if (It != ValToVRegs.end())
  90.         return It->second;
  91.  
  92.       return insertVRegs(V);
  93.     }
  94.  
  95.     OffsetListT *getOffsets(const Value &V) {
  96.       auto It = TypeToOffsets.find(V.getType());
  97.       if (It != TypeToOffsets.end())
  98.         return It->second;
  99.  
  100.       return insertOffsets(V);
  101.     }
  102.  
  103.     const_vreg_iterator findVRegs(const Value &V) const {
  104.       return ValToVRegs.find(&V);
  105.     }
  106.  
  107.     bool contains(const Value &V) const {
  108.       return ValToVRegs.find(&V) != ValToVRegs.end();
  109.     }
  110.  
  111.     void reset() {
  112.       ValToVRegs.clear();
  113.       TypeToOffsets.clear();
  114.       VRegAlloc.DestroyAll();
  115.       OffsetAlloc.DestroyAll();
  116.     }
  117.  
  118.   private:
  119.     VRegListT *insertVRegs(const Value &V) {
  120.       assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists");
  121.  
  122.       // We placement new using our fast allocator since we never try to free
  123.       // the vectors until translation is finished.
  124.       auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
  125.       ValToVRegs[&V] = VRegList;
  126.       return VRegList;
  127.     }
  128.  
  129.     OffsetListT *insertOffsets(const Value &V) {
  130.       assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
  131.              "Type already exists");
  132.  
  133.       auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
  134.       TypeToOffsets[V.getType()] = OffsetList;
  135.       return OffsetList;
  136.     }
  137.     SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
  138.     SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
  139.  
  140.     // We store pointers to vectors here since references may be invalidated
  141.     // while we hold them if we stored the vectors directly.
  142.     DenseMap<const Value *, VRegListT*> ValToVRegs;
  143.     DenseMap<const Type *, OffsetListT*> TypeToOffsets;
  144.   };
  145.  
  146.   /// Mapping of the values of the current LLVM IR function to the related
  147.   /// virtual registers and offsets.
  148.   ValueToVRegInfo VMap;
  149.  
  150.   // N.b. it's not completely obvious that this will be sufficient for every
  151.   // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
  152.   // lives.
  153.   DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
  154.  
  155.   // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
  156.   // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
  157.   // a mapping between the edges arriving at the BasicBlock to the corresponding
  158.   // created MachineBasicBlocks. Some BasicBlocks that get translated to a
  159.   // single MachineBasicBlock may also end up in this Map.
  160.   using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
  161.   DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
  162.  
  163.   // List of stubbed PHI instructions, for values and basic blocks to be filled
  164.   // in once all MachineBasicBlocks have been created.
  165.   SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
  166.       PendingPHIs;
  167.  
  168.   /// Record of what frame index has been allocated to specified allocas for
  169.   /// this function.
  170.   DenseMap<const AllocaInst *, int> FrameIndices;
  171.  
  172.   SwiftErrorValueTracking SwiftError;
  173.  
  174.   /// \name Methods for translating form LLVM IR to MachineInstr.
  175.   /// \see ::translate for general information on the translate methods.
  176.   /// @{
  177.  
  178.   /// Translate \p Inst into its corresponding MachineInstr instruction(s).
  179.   /// Insert the newly translated instruction(s) right where the CurBuilder
  180.   /// is set.
  181.   ///
  182.   /// The general algorithm is:
  183.   /// 1. Look for a virtual register for each operand or
  184.   ///    create one.
  185.   /// 2 Update the VMap accordingly.
  186.   /// 2.alt. For constant arguments, if they are compile time constants,
  187.   ///   produce an immediate in the right operand and do not touch
  188.   ///   ValToReg. Actually we will go with a virtual register for each
  189.   ///   constants because it may be expensive to actually materialize the
  190.   ///   constant. Moreover, if the constant spans on several instructions,
  191.   ///   CSE may not catch them.
  192.   ///   => Update ValToVReg and remember that we saw a constant in Constants.
  193.   ///   We will materialize all the constants in finalize.
  194.   /// Note: we would need to do something so that we can recognize such operand
  195.   ///       as constants.
  196.   /// 3. Create the generic instruction.
  197.   ///
  198.   /// \return true if the translation succeeded.
  199.   bool translate(const Instruction &Inst);
  200.  
  201.   /// Materialize \p C into virtual-register \p Reg. The generic instructions
  202.   /// performing this materialization will be inserted into the entry block of
  203.   /// the function.
  204.   ///
  205.   /// \return true if the materialization succeeded.
  206.   bool translate(const Constant &C, Register Reg);
  207.  
  208.   // Translate U as a copy of V.
  209.   bool translateCopy(const User &U, const Value &V,
  210.                      MachineIRBuilder &MIRBuilder);
  211.  
  212.   /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
  213.   /// emitted.
  214.   bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
  215.  
  216.   /// Translate an LLVM load instruction into generic IR.
  217.   bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
  218.  
  219.   /// Translate an LLVM store instruction into generic IR.
  220.   bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
  221.  
  222.   /// Translate an LLVM string intrinsic (memcpy, memset, ...).
  223.   bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
  224.                         unsigned Opcode);
  225.  
  226.   void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
  227.  
  228.   bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
  229.                                   MachineIRBuilder &MIRBuilder);
  230.   bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
  231.                                     MachineIRBuilder &MIRBuilder);
  232.  
  233.   /// Helper function for translateSimpleIntrinsic.
  234.   /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
  235.   /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
  236.   /// Intrinsic::not_intrinsic.
  237.   unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
  238.  
  239.   /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
  240.   /// \return true if the translation succeeded.
  241.   bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
  242.                                 MachineIRBuilder &MIRBuilder);
  243.  
  244.   bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
  245.                                        MachineIRBuilder &MIRBuilder);
  246.  
  247.   bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
  248.                                MachineIRBuilder &MIRBuilder);
  249.  
  250.   bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
  251.  
  252.   /// Common code for translating normal calls or invokes.
  253.   bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
  254.  
  255.   /// Translate call instruction.
  256.   /// \pre \p U is a call instruction.
  257.   bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
  258.  
  259.   /// When an invoke or a cleanupret unwinds to the next EH pad, there are
  260.   /// many places it could ultimately go. In the IR, we have a single unwind
  261.   /// destination, but in the machine CFG, we enumerate all the possible blocks.
  262.   /// This function skips over imaginary basic blocks that hold catchswitch
  263.   /// instructions, and finds all the "real" machine
  264.   /// basic block destinations. As those destinations may not be successors of
  265.   /// EHPadBB, here we also calculate the edge probability to those
  266.   /// destinations. The passed-in Prob is the edge probability to EHPadBB.
  267.   bool findUnwindDestinations(
  268.       const BasicBlock *EHPadBB, BranchProbability Prob,
  269.       SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
  270.           &UnwindDests);
  271.  
  272.   bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
  273.  
  274.   bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
  275.  
  276.   bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
  277.  
  278.   /// Translate one of LLVM's cast instructions into MachineInstrs, with the
  279.   /// given generic Opcode.
  280.   bool translateCast(unsigned Opcode, const User &U,
  281.                      MachineIRBuilder &MIRBuilder);
  282.  
  283.   /// Translate a phi instruction.
  284.   bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
  285.  
  286.   /// Translate a comparison (icmp or fcmp) instruction or constant.
  287.   bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
  288.  
  289.   /// Translate an integer compare instruction (or constant).
  290.   bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
  291.     return translateCompare(U, MIRBuilder);
  292.   }
  293.  
  294.   /// Translate a floating-point compare instruction (or constant).
  295.   bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
  296.     return translateCompare(U, MIRBuilder);
  297.   }
  298.  
  299.   /// Add remaining operands onto phis we've translated. Executed after all
  300.   /// MachineBasicBlocks for the function have been created.
  301.   void finishPendingPhis();
  302.  
  303.   /// Translate \p Inst into a unary operation \p Opcode.
  304.   /// \pre \p U is a unary operation.
  305.   bool translateUnaryOp(unsigned Opcode, const User &U,
  306.                         MachineIRBuilder &MIRBuilder);
  307.  
  308.   /// Translate \p Inst into a binary operation \p Opcode.
  309.   /// \pre \p U is a binary operation.
  310.   bool translateBinaryOp(unsigned Opcode, const User &U,
  311.                          MachineIRBuilder &MIRBuilder);
  312.  
  313.   /// If the set of cases should be emitted as a series of branches, return
  314.   /// true. If we should emit this as a bunch of and/or'd together conditions,
  315.   /// return false.
  316.   bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
  317.   /// Helper method for findMergedConditions.
  318.   /// This function emits a branch and is used at the leaves of an OR or an
  319.   /// AND operator tree.
  320.   void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
  321.                                     MachineBasicBlock *FBB,
  322.                                     MachineBasicBlock *CurBB,
  323.                                     MachineBasicBlock *SwitchBB,
  324.                                     BranchProbability TProb,
  325.                                     BranchProbability FProb, bool InvertCond);
  326.   /// Used during condbr translation to find trees of conditions that can be
  327.   /// optimized.
  328.   void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
  329.                             MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
  330.                             MachineBasicBlock *SwitchBB,
  331.                             Instruction::BinaryOps Opc, BranchProbability TProb,
  332.                             BranchProbability FProb, bool InvertCond);
  333.  
  334.   /// Translate branch (br) instruction.
  335.   /// \pre \p U is a branch instruction.
  336.   bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
  337.  
  338.   // Begin switch lowering functions.
  339.   bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
  340.                            SwitchCG::JumpTableHeader &JTH,
  341.                            MachineBasicBlock *HeaderBB);
  342.   void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
  343.  
  344.   void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
  345.                       MachineIRBuilder &MIB);
  346.  
  347.   /// Generate for for the BitTest header block, which precedes each sequence of
  348.   /// BitTestCases.
  349.   void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
  350.                          MachineBasicBlock *SwitchMBB);
  351.   /// Generate code to produces one "bit test" for a given BitTestCase \p B.
  352.   void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
  353.                        BranchProbability BranchProbToNext, Register Reg,
  354.                        SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
  355.  
  356.   bool lowerJumpTableWorkItem(
  357.       SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
  358.       MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
  359.       MachineIRBuilder &MIB, MachineFunction::iterator BBI,
  360.       BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
  361.       MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
  362.  
  363.   bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
  364.                                 MachineBasicBlock *Fallthrough,
  365.                                 bool FallthroughUnreachable,
  366.                                 BranchProbability UnhandledProbs,
  367.                                 MachineBasicBlock *CurMBB,
  368.                                 MachineIRBuilder &MIB,
  369.                                 MachineBasicBlock *SwitchMBB);
  370.  
  371.   bool lowerBitTestWorkItem(
  372.       SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
  373.       MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
  374.       MachineIRBuilder &MIB, MachineFunction::iterator BBI,
  375.       BranchProbability DefaultProb, BranchProbability UnhandledProbs,
  376.       SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
  377.       bool FallthroughUnreachable);
  378.  
  379.   bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
  380.                            MachineBasicBlock *SwitchMBB,
  381.                            MachineBasicBlock *DefaultMBB,
  382.                            MachineIRBuilder &MIB);
  383.  
  384.   bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
  385.   // End switch lowering section.
  386.  
  387.   bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
  388.  
  389.   bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
  390.  
  391.   bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
  392.  
  393.   bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
  394.  
  395.   bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
  396.  
  397.   bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
  398.  
  399.   /// Translate return (ret) instruction.
  400.   /// The target needs to implement CallLowering::lowerReturn for
  401.   /// this to succeed.
  402.   /// \pre \p U is a return instruction.
  403.   bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
  404.  
  405.   bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
  406.  
  407.   bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
  408.     return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
  409.   }
  410.   bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
  411.     return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
  412.   }
  413.   bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
  414.     return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
  415.   }
  416.   bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
  417.     return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
  418.   }
  419.   bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
  420.     return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
  421.   }
  422.   bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
  423.     return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
  424.   }
  425.  
  426.   bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  427.     return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
  428.   }
  429.   bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  430.     return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
  431.   }
  432.   bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
  433.     return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
  434.   }
  435.   bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
  436.     return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
  437.   }
  438.   bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
  439.     return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
  440.   }
  441.   bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
  442.     return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
  443.   }
  444.   bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
  445.     return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
  446.   }
  447.   bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
  448.     return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
  449.   }
  450.   bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
  451.     return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
  452.   }
  453.   bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
  454.     return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
  455.   }
  456.   bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
  457.     return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
  458.   }
  459.   bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
  460.     return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
  461.   }
  462.   bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
  463.     return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
  464.   }
  465.   bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
  466.  
  467.   bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
  468.     return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
  469.   }
  470.  
  471.   bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
  472.     return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
  473.   }
  474.  
  475.   bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
  476.     return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
  477.   }
  478.   bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
  479.     return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
  480.   }
  481.   bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
  482.     return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
  483.   }
  484.  
  485.   bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
  486.     return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
  487.   }
  488.   bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
  489.     return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
  490.   }
  491.   bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
  492.     return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
  493.   }
  494.   bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  495.     return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
  496.   }
  497.   bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
  498.     return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
  499.   }
  500.  
  501.   bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
  502.  
  503.   bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
  504.  
  505.   bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
  506.  
  507.   bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
  508.  
  509.   bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
  510.   bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
  511.   bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
  512.   bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
  513.  
  514.   // Stubs to keep the compiler happy while we implement the rest of the
  515.   // translation.
  516.   bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
  517.     return false;
  518.   }
  519.   bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
  520.     return false;
  521.   }
  522.   bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
  523.     return false;
  524.   }
  525.   bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
  526.     return false;
  527.   }
  528.   bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
  529.     return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
  530.   }
  531.   bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
  532.     return false;
  533.   }
  534.   bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
  535.     return false;
  536.   }
  537.   bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
  538.     return false;
  539.   }
  540.   bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
  541.     return false;
  542.   }
  543.  
  544.   /// @}
  545.  
  546.   // Builder for machine instruction a la IRBuilder.
  547.   // I.e., compared to regular MIBuilder, this one also inserts the instruction
  548.   // in the current block, it can creates block, etc., basically a kind of
  549.   // IRBuilder, but for Machine IR.
  550.   // CSEMIRBuilder CurBuilder;
  551.   std::unique_ptr<MachineIRBuilder> CurBuilder;
  552.  
  553.   // Builder set to the entry block (just after ABI lowering instructions). Used
  554.   // as a convenient location for Constants.
  555.   // CSEMIRBuilder EntryBuilder;
  556.   std::unique_ptr<MachineIRBuilder> EntryBuilder;
  557.  
  558.   // The MachineFunction currently being translated.
  559.   MachineFunction *MF;
  560.  
  561.   /// MachineRegisterInfo used to create virtual registers.
  562.   MachineRegisterInfo *MRI = nullptr;
  563.  
  564.   const DataLayout *DL;
  565.  
  566.   /// Current target configuration. Controls how the pass handles errors.
  567.   const TargetPassConfig *TPC;
  568.  
  569.   CodeGenOpt::Level OptLevel;
  570.  
  571.   /// Current optimization remark emitter. Used to report failures.
  572.   std::unique_ptr<OptimizationRemarkEmitter> ORE;
  573.  
  574.   AAResults *AA;
  575.   AssumptionCache *AC;
  576.   const TargetLibraryInfo *LibInfo;
  577.   FunctionLoweringInfo FuncInfo;
  578.  
  579.   // True when either the Target Machine specifies no optimizations or the
  580.   // function has the optnone attribute.
  581.   bool EnableOpts = false;
  582.  
  583.   /// True when the block contains a tail call. This allows the IRTranslator to
  584.   /// stop translating such blocks early.
  585.   bool HasTailCall = false;
  586.  
  587.   StackProtectorDescriptor SPDescriptor;
  588.  
  589.   /// Switch analysis and optimization.
  590.   class GISelSwitchLowering : public SwitchCG::SwitchLowering {
  591.   public:
  592.     GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
  593.         : SwitchLowering(funcinfo), IRT(irt) {
  594.       assert(irt && "irt is null!");
  595.     }
  596.  
  597.     void addSuccessorWithProb(
  598.         MachineBasicBlock *Src, MachineBasicBlock *Dst,
  599.         BranchProbability Prob = BranchProbability::getUnknown()) override {
  600.       IRT->addSuccessorWithProb(Src, Dst, Prob);
  601.     }
  602.  
  603.     virtual ~GISelSwitchLowering() = default;
  604.  
  605.   private:
  606.     IRTranslator *IRT;
  607.   };
  608.  
  609.   std::unique_ptr<GISelSwitchLowering> SL;
  610.  
  611.   // * Insert all the code needed to materialize the constants
  612.   // at the proper place. E.g., Entry block or dominator block
  613.   // of each constant depending on how fancy we want to be.
  614.   // * Clear the different maps.
  615.   void finalizeFunction();
  616.  
  617.   // Processing steps done per block. E.g. emitting jump tables, stack
  618.   // protectors etc. Returns true if no errors, false if there was a problem
  619.   // that caused an abort.
  620.   bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
  621.  
  622.   /// Codegen a new tail for a stack protector check ParentMBB which has had its
  623.   /// tail spliced into a stack protector check success bb.
  624.   ///
  625.   /// For a high level explanation of how this fits into the stack protector
  626.   /// generation see the comment on the declaration of class
  627.   /// StackProtectorDescriptor.
  628.   ///
  629.   /// \return true if there were no problems.
  630.   bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
  631.                               MachineBasicBlock *ParentBB);
  632.  
  633.   /// Codegen the failure basic block for a stack protector check.
  634.   ///
  635.   /// A failure stack protector machine basic block consists simply of a call to
  636.   /// __stack_chk_fail().
  637.   ///
  638.   /// For a high level explanation of how this fits into the stack protector
  639.   /// generation see the comment on the declaration of class
  640.   /// StackProtectorDescriptor.
  641.   ///
  642.   /// \return true if there were no problems.
  643.   bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
  644.                                MachineBasicBlock *FailureBB);
  645.  
  646.   /// Get the VRegs that represent \p Val.
  647.   /// Non-aggregate types have just one corresponding VReg and the list can be
  648.   /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
  649.   /// not exist, they are created.
  650.   ArrayRef<Register> getOrCreateVRegs(const Value &Val);
  651.  
  652.   Register getOrCreateVReg(const Value &Val) {
  653.     auto Regs = getOrCreateVRegs(Val);
  654.     if (Regs.empty())
  655.       return 0;
  656.     assert(Regs.size() == 1 &&
  657.            "attempt to get single VReg for aggregate or void");
  658.     return Regs[0];
  659.   }
  660.  
  661.   /// Allocate some vregs and offsets in the VMap. Then populate just the
  662.   /// offsets while leaving the vregs empty.
  663.   ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
  664.  
  665.   /// Get the frame index that represents \p Val.
  666.   /// If such VReg does not exist, it is created.
  667.   int getOrCreateFrameIndex(const AllocaInst &AI);
  668.  
  669.   /// Get the alignment of the given memory operation instruction. This will
  670.   /// either be the explicitly specified value or the ABI-required alignment for
  671.   /// the type being accessed (according to the Module's DataLayout).
  672.   Align getMemOpAlign(const Instruction &I);
  673.  
  674.   /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
  675.   /// returned will be the head of the translated block (suitable for branch
  676.   /// destinations).
  677.   MachineBasicBlock &getMBB(const BasicBlock &BB);
  678.  
  679.   /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
  680.   /// to `Edge.first` at the IR level. This is used when IRTranslation creates
  681.   /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
  682.   /// represented simply by the IR-level CFG.
  683.   void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
  684.  
  685.   /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
  686.   /// this is just the single MachineBasicBlock corresponding to the predecessor
  687.   /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
  688.   /// preceding the original though (e.g. switch instructions).
  689.   SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
  690.     auto RemappedEdge = MachinePreds.find(Edge);
  691.     if (RemappedEdge != MachinePreds.end())
  692.       return RemappedEdge->second;
  693.     return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
  694.   }
  695.  
  696.   /// Return branch probability calculated by BranchProbabilityInfo for IR
  697.   /// blocks.
  698.   BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
  699.                                        const MachineBasicBlock *Dst) const;
  700.  
  701.   void addSuccessorWithProb(
  702.       MachineBasicBlock *Src, MachineBasicBlock *Dst,
  703.       BranchProbability Prob = BranchProbability::getUnknown());
  704.  
  705. public:
  706.   IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
  707.  
  708.   StringRef getPassName() const override { return "IRTranslator"; }
  709.  
  710.   void getAnalysisUsage(AnalysisUsage &AU) const override;
  711.  
  712.   // Algo:
  713.   //   CallLowering = MF.subtarget.getCallLowering()
  714.   //   F = MF.getParent()
  715.   //   MIRBuilder.reset(MF)
  716.   //   getMBB(F.getEntryBB())
  717.   //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
  718.   //   for each bb in F
  719.   //     getMBB(bb)
  720.   //     for each inst in bb
  721.   //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
  722.   //         report_fatal_error("Don't know how to translate input");
  723.   //   finalize()
  724.   bool runOnMachineFunction(MachineFunction &MF) override;
  725. };
  726.  
  727. } // end namespace llvm
  728.  
  729. #endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  730.