Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. /// \file
  10. /// This file exposes an interface to building/using memory SSA to
  11. /// walk memory instructions using a use/def graph.
  12. ///
  13. /// Memory SSA class builds an SSA form that links together memory access
  14. /// instructions such as loads, stores, atomics, and calls. Additionally, it
  15. /// does a trivial form of "heap versioning" Every time the memory state changes
  16. /// in the program, we generate a new heap version. It generates
  17. /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
  18. ///
  19. /// As a trivial example,
  20. /// define i32 @main() #0 {
  21. /// entry:
  22. ///   %call = call noalias i8* @_Znwm(i64 4) #2
  23. ///   %0 = bitcast i8* %call to i32*
  24. ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
  25. ///   %1 = bitcast i8* %call1 to i32*
  26. ///   store i32 5, i32* %0, align 4
  27. ///   store i32 7, i32* %1, align 4
  28. ///   %2 = load i32* %0, align 4
  29. ///   %3 = load i32* %1, align 4
  30. ///   %add = add nsw i32 %2, %3
  31. ///   ret i32 %add
  32. /// }
  33. ///
  34. /// Will become
  35. /// define i32 @main() #0 {
  36. /// entry:
  37. ///   ; 1 = MemoryDef(0)
  38. ///   %call = call noalias i8* @_Znwm(i64 4) #3
  39. ///   %2 = bitcast i8* %call to i32*
  40. ///   ; 2 = MemoryDef(1)
  41. ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
  42. ///   %4 = bitcast i8* %call1 to i32*
  43. ///   ; 3 = MemoryDef(2)
  44. ///   store i32 5, i32* %2, align 4
  45. ///   ; 4 = MemoryDef(3)
  46. ///   store i32 7, i32* %4, align 4
  47. ///   ; MemoryUse(3)
  48. ///   %7 = load i32* %2, align 4
  49. ///   ; MemoryUse(4)
  50. ///   %8 = load i32* %4, align 4
  51. ///   %add = add nsw i32 %7, %8
  52. ///   ret i32 %add
  53. /// }
  54. ///
  55. /// Given this form, all the stores that could ever effect the load at %8 can be
  56. /// gotten by using the MemoryUse associated with it, and walking from use to
  57. /// def until you hit the top of the function.
  58. ///
  59. /// Each def also has a list of users associated with it, so you can walk from
  60. /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
  61. /// but not the RHS of MemoryDefs. You can see this above at %7, which would
  62. /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
  63. /// store, all the MemoryUses on its use lists are may-aliases of that store
  64. /// (but the MemoryDefs on its use list may not be).
  65. ///
  66. /// MemoryDefs are not disambiguated because it would require multiple reaching
  67. /// definitions, which would require multiple phis, and multiple memoryaccesses
  68. /// per instruction.
  69. ///
  70. /// In addition to the def/use graph described above, MemoryDefs also contain
  71. /// an "optimized" definition use.  The "optimized" use points to some def
  72. /// reachable through the memory def chain.  The optimized def *may* (but is
  73. /// not required to) alias the original MemoryDef, but no def *closer* to the
  74. /// source def may alias it.  As the name implies, the purpose of the optimized
  75. /// use is to allow caching of clobber searches for memory defs.  The optimized
  76. /// def may be nullptr, in which case clients must walk the defining access
  77. /// chain.
  78. ///
  79. /// When iterating the uses of a MemoryDef, both defining uses and optimized
  80. /// uses will be encountered.  If only one type is needed, the client must
  81. /// filter the use walk.
  82. //
  83. //===----------------------------------------------------------------------===//
  84.  
  85. #ifndef LLVM_ANALYSIS_MEMORYSSA_H
  86. #define LLVM_ANALYSIS_MEMORYSSA_H
  87.  
  88. #include "llvm/ADT/DenseMap.h"
  89. #include "llvm/ADT/SmallPtrSet.h"
  90. #include "llvm/ADT/SmallVector.h"
  91. #include "llvm/ADT/ilist_node.h"
  92. #include "llvm/ADT/iterator_range.h"
  93. #include "llvm/Analysis/AliasAnalysis.h"
  94. #include "llvm/Analysis/MemoryLocation.h"
  95. #include "llvm/Analysis/PHITransAddr.h"
  96. #include "llvm/IR/DerivedUser.h"
  97. #include "llvm/IR/Dominators.h"
  98. #include "llvm/IR/Type.h"
  99. #include "llvm/IR/User.h"
  100. #include "llvm/Pass.h"
  101. #include <algorithm>
  102. #include <cassert>
  103. #include <cstddef>
  104. #include <iterator>
  105. #include <memory>
  106. #include <utility>
  107.  
  108. namespace llvm {
  109.  
  110. template <class GraphType> struct GraphTraits;
  111. class BasicBlock;
  112. class Function;
  113. class Instruction;
  114. class LLVMContext;
  115. class MemoryAccess;
  116. class MemorySSAWalker;
  117. class Module;
  118. class Use;
  119. class Value;
  120. class raw_ostream;
  121.  
  122. namespace MSSAHelpers {
  123.  
  124. struct AllAccessTag {};
  125. struct DefsOnlyTag {};
  126.  
  127. } // end namespace MSSAHelpers
  128.  
  129. enum : unsigned {
  130.   // Used to signify what the default invalid ID is for MemoryAccess's
  131.   // getID()
  132.   INVALID_MEMORYACCESS_ID = -1U
  133. };
  134.  
  135. template <class T> class memoryaccess_def_iterator_base;
  136. using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
  137. using const_memoryaccess_def_iterator =
  138.     memoryaccess_def_iterator_base<const MemoryAccess>;
  139.  
  140. // The base for all memory accesses. All memory accesses in a block are
  141. // linked together using an intrusive list.
  142. class MemoryAccess
  143.     : public DerivedUser,
  144.       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
  145.       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
  146. public:
  147.   using AllAccessType =
  148.       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
  149.   using DefsOnlyType =
  150.       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
  151.  
  152.   MemoryAccess(const MemoryAccess &) = delete;
  153.   MemoryAccess &operator=(const MemoryAccess &) = delete;
  154.  
  155.   void *operator new(size_t) = delete;
  156.  
  157.   // Methods for support type inquiry through isa, cast, and
  158.   // dyn_cast
  159.   static bool classof(const Value *V) {
  160.     unsigned ID = V->getValueID();
  161.     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
  162.   }
  163.  
  164.   BasicBlock *getBlock() const { return Block; }
  165.  
  166.   void print(raw_ostream &OS) const;
  167.   void dump() const;
  168.  
  169.   /// The user iterators for a memory access
  170.   using iterator = user_iterator;
  171.   using const_iterator = const_user_iterator;
  172.  
  173.   /// This iterator walks over all of the defs in a given
  174.   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
  175.   /// MemoryUse/MemoryDef, this walks the defining access.
  176.   memoryaccess_def_iterator defs_begin();
  177.   const_memoryaccess_def_iterator defs_begin() const;
  178.   memoryaccess_def_iterator defs_end();
  179.   const_memoryaccess_def_iterator defs_end() const;
  180.  
  181.   /// Get the iterators for the all access list and the defs only list
  182.   /// We default to the all access list.
  183.   AllAccessType::self_iterator getIterator() {
  184.     return this->AllAccessType::getIterator();
  185.   }
  186.   AllAccessType::const_self_iterator getIterator() const {
  187.     return this->AllAccessType::getIterator();
  188.   }
  189.   AllAccessType::reverse_self_iterator getReverseIterator() {
  190.     return this->AllAccessType::getReverseIterator();
  191.   }
  192.   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
  193.     return this->AllAccessType::getReverseIterator();
  194.   }
  195.   DefsOnlyType::self_iterator getDefsIterator() {
  196.     return this->DefsOnlyType::getIterator();
  197.   }
  198.   DefsOnlyType::const_self_iterator getDefsIterator() const {
  199.     return this->DefsOnlyType::getIterator();
  200.   }
  201.   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
  202.     return this->DefsOnlyType::getReverseIterator();
  203.   }
  204.   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
  205.     return this->DefsOnlyType::getReverseIterator();
  206.   }
  207.  
  208. protected:
  209.   friend class MemoryDef;
  210.   friend class MemoryPhi;
  211.   friend class MemorySSA;
  212.   friend class MemoryUse;
  213.   friend class MemoryUseOrDef;
  214.  
  215.   /// Used by MemorySSA to change the block of a MemoryAccess when it is
  216.   /// moved.
  217.   void setBlock(BasicBlock *BB) { Block = BB; }
  218.  
  219.   /// Used for debugging and tracking things about MemoryAccesses.
  220.   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
  221.   inline unsigned getID() const;
  222.  
  223.   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
  224.                BasicBlock *BB, unsigned NumOperands)
  225.       : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
  226.         Block(BB) {}
  227.  
  228.   // Use deleteValue() to delete a generic MemoryAccess.
  229.   ~MemoryAccess() = default;
  230.  
  231. private:
  232.   BasicBlock *Block;
  233. };
  234.  
  235. template <>
  236. struct ilist_alloc_traits<MemoryAccess> {
  237.   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
  238. };
  239.  
  240. inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
  241.   MA.print(OS);
  242.   return OS;
  243. }
  244.  
  245. /// Class that has the common methods + fields of memory uses/defs. It's
  246. /// a little awkward to have, but there are many cases where we want either a
  247. /// use or def, and there are many cases where uses are needed (defs aren't
  248. /// acceptable), and vice-versa.
  249. ///
  250. /// This class should never be instantiated directly; make a MemoryUse or
  251. /// MemoryDef instead.
  252. class MemoryUseOrDef : public MemoryAccess {
  253. public:
  254.   void *operator new(size_t) = delete;
  255.  
  256.   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
  257.  
  258.   /// Get the instruction that this MemoryUse represents.
  259.   Instruction *getMemoryInst() const { return MemoryInstruction; }
  260.  
  261.   /// Get the access that produces the memory state used by this Use.
  262.   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
  263.  
  264.   static bool classof(const Value *MA) {
  265.     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
  266.   }
  267.  
  268.   /// Do we have an optimized use?
  269.   inline bool isOptimized() const;
  270.   /// Return the MemoryAccess associated with the optimized use, or nullptr.
  271.   inline MemoryAccess *getOptimized() const;
  272.   /// Sets the optimized use for a MemoryDef.
  273.   inline void setOptimized(MemoryAccess *);
  274.  
  275.   /// Reset the ID of what this MemoryUse was optimized to, causing it to
  276.   /// be rewalked by the walker if necessary.
  277.   /// This really should only be called by tests.
  278.   inline void resetOptimized();
  279.  
  280. protected:
  281.   friend class MemorySSA;
  282.   friend class MemorySSAUpdater;
  283.  
  284.   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
  285.                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
  286.                  unsigned NumOperands)
  287.       : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
  288.         MemoryInstruction(MI) {
  289.     setDefiningAccess(DMA);
  290.   }
  291.  
  292.   // Use deleteValue() to delete a generic MemoryUseOrDef.
  293.   ~MemoryUseOrDef() = default;
  294.  
  295.   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
  296.     if (!Optimized) {
  297.       setOperand(0, DMA);
  298.       return;
  299.     }
  300.     setOptimized(DMA);
  301.   }
  302.  
  303. private:
  304.   Instruction *MemoryInstruction;
  305. };
  306.  
  307. /// Represents read-only accesses to memory
  308. ///
  309. /// In particular, the set of Instructions that will be represented by
  310. /// MemoryUse's is exactly the set of Instructions for which
  311. /// AliasAnalysis::getModRefInfo returns "Ref".
  312. class MemoryUse final : public MemoryUseOrDef {
  313. public:
  314.   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
  315.  
  316.   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
  317.       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
  318.                        /*NumOperands=*/1) {}
  319.  
  320.   // allocate space for exactly one operand
  321.   void *operator new(size_t S) { return User::operator new(S, 1); }
  322.   void operator delete(void *Ptr) { User::operator delete(Ptr); }
  323.  
  324.   static bool classof(const Value *MA) {
  325.     return MA->getValueID() == MemoryUseVal;
  326.   }
  327.  
  328.   void print(raw_ostream &OS) const;
  329.  
  330.   void setOptimized(MemoryAccess *DMA) {
  331.     OptimizedID = DMA->getID();
  332.     setOperand(0, DMA);
  333.   }
  334.  
  335.   /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called,
  336.   /// uses will usually be optimized, but this is not guaranteed (e.g. due to
  337.   /// invalidation and optimization limits.)
  338.   bool isOptimized() const {
  339.     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
  340.   }
  341.  
  342.   MemoryAccess *getOptimized() const {
  343.     return getDefiningAccess();
  344.   }
  345.  
  346.   void resetOptimized() {
  347.     OptimizedID = INVALID_MEMORYACCESS_ID;
  348.   }
  349.  
  350. protected:
  351.   friend class MemorySSA;
  352.  
  353. private:
  354.   static void deleteMe(DerivedUser *Self);
  355.  
  356.   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
  357. };
  358.  
  359. template <>
  360. struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
  361. DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
  362.  
  363. /// Represents a read-write access to memory, whether it is a must-alias,
  364. /// or a may-alias.
  365. ///
  366. /// In particular, the set of Instructions that will be represented by
  367. /// MemoryDef's is exactly the set of Instructions for which
  368. /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
  369. /// Note that, in order to provide def-def chains, all defs also have a use
  370. /// associated with them. This use points to the nearest reaching
  371. /// MemoryDef/MemoryPhi.
  372. class MemoryDef final : public MemoryUseOrDef {
  373. public:
  374.   friend class MemorySSA;
  375.  
  376.   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
  377.  
  378.   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
  379.             unsigned Ver)
  380.       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
  381.                        /*NumOperands=*/2),
  382.         ID(Ver) {}
  383.  
  384.   // allocate space for exactly two operands
  385.   void *operator new(size_t S) { return User::operator new(S, 2); }
  386.   void operator delete(void *Ptr) { User::operator delete(Ptr); }
  387.  
  388.   static bool classof(const Value *MA) {
  389.     return MA->getValueID() == MemoryDefVal;
  390.   }
  391.  
  392.   void setOptimized(MemoryAccess *MA) {
  393.     setOperand(1, MA);
  394.     OptimizedID = MA->getID();
  395.   }
  396.  
  397.   MemoryAccess *getOptimized() const {
  398.     return cast_or_null<MemoryAccess>(getOperand(1));
  399.   }
  400.  
  401.   bool isOptimized() const {
  402.     return getOptimized() && OptimizedID == getOptimized()->getID();
  403.   }
  404.  
  405.   void resetOptimized() {
  406.     OptimizedID = INVALID_MEMORYACCESS_ID;
  407.     setOperand(1, nullptr);
  408.   }
  409.  
  410.   void print(raw_ostream &OS) const;
  411.  
  412.   unsigned getID() const { return ID; }
  413.  
  414. private:
  415.   static void deleteMe(DerivedUser *Self);
  416.  
  417.   const unsigned ID;
  418.   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
  419. };
  420.  
  421. template <>
  422. struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
  423. DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
  424.  
  425. template <>
  426. struct OperandTraits<MemoryUseOrDef> {
  427.   static Use *op_begin(MemoryUseOrDef *MUD) {
  428.     if (auto *MU = dyn_cast<MemoryUse>(MUD))
  429.       return OperandTraits<MemoryUse>::op_begin(MU);
  430.     return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
  431.   }
  432.  
  433.   static Use *op_end(MemoryUseOrDef *MUD) {
  434.     if (auto *MU = dyn_cast<MemoryUse>(MUD))
  435.       return OperandTraits<MemoryUse>::op_end(MU);
  436.     return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
  437.   }
  438.  
  439.   static unsigned operands(const MemoryUseOrDef *MUD) {
  440.     if (const auto *MU = dyn_cast<MemoryUse>(MUD))
  441.       return OperandTraits<MemoryUse>::operands(MU);
  442.     return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
  443.   }
  444. };
  445. DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
  446.  
  447. /// Represents phi nodes for memory accesses.
  448. ///
  449. /// These have the same semantic as regular phi nodes, with the exception that
  450. /// only one phi will ever exist in a given basic block.
  451. /// Guaranteeing one phi per block means guaranteeing there is only ever one
  452. /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
  453. /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
  454. /// a MemoryPhi's operands.
  455. /// That is, given
  456. /// if (a) {
  457. ///   store %a
  458. ///   store %b
  459. /// }
  460. /// it *must* be transformed into
  461. /// if (a) {
  462. ///    1 = MemoryDef(liveOnEntry)
  463. ///    store %a
  464. ///    2 = MemoryDef(1)
  465. ///    store %b
  466. /// }
  467. /// and *not*
  468. /// if (a) {
  469. ///    1 = MemoryDef(liveOnEntry)
  470. ///    store %a
  471. ///    2 = MemoryDef(liveOnEntry)
  472. ///    store %b
  473. /// }
  474. /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
  475. /// end of the branch, and if there are not two phi nodes, one will be
  476. /// disconnected completely from the SSA graph below that point.
  477. /// Because MemoryUse's do not generate new definitions, they do not have this
  478. /// issue.
  479. class MemoryPhi final : public MemoryAccess {
  480.   // allocate space for exactly zero operands
  481.   void *operator new(size_t S) { return User::operator new(S); }
  482.  
  483. public:
  484.   void operator delete(void *Ptr) { User::operator delete(Ptr); }
  485.  
  486.   /// Provide fast operand accessors
  487.   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
  488.  
  489.   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
  490.       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
  491.         ReservedSpace(NumPreds) {
  492.     allocHungoffUses(ReservedSpace);
  493.   }
  494.  
  495.   // Block iterator interface. This provides access to the list of incoming
  496.   // basic blocks, which parallels the list of incoming values.
  497.   using block_iterator = BasicBlock **;
  498.   using const_block_iterator = BasicBlock *const *;
  499.  
  500.   block_iterator block_begin() {
  501.     return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
  502.   }
  503.  
  504.   const_block_iterator block_begin() const {
  505.     return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
  506.   }
  507.  
  508.   block_iterator block_end() { return block_begin() + getNumOperands(); }
  509.  
  510.   const_block_iterator block_end() const {
  511.     return block_begin() + getNumOperands();
  512.   }
  513.  
  514.   iterator_range<block_iterator> blocks() {
  515.     return make_range(block_begin(), block_end());
  516.   }
  517.  
  518.   iterator_range<const_block_iterator> blocks() const {
  519.     return make_range(block_begin(), block_end());
  520.   }
  521.  
  522.   op_range incoming_values() { return operands(); }
  523.  
  524.   const_op_range incoming_values() const { return operands(); }
  525.  
  526.   /// Return the number of incoming edges
  527.   unsigned getNumIncomingValues() const { return getNumOperands(); }
  528.  
  529.   /// Return incoming value number x
  530.   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
  531.   void setIncomingValue(unsigned I, MemoryAccess *V) {
  532.     assert(V && "PHI node got a null value!");
  533.     setOperand(I, V);
  534.   }
  535.  
  536.   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
  537.   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
  538.  
  539.   /// Return incoming basic block number @p i.
  540.   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
  541.  
  542.   /// Return incoming basic block corresponding
  543.   /// to an operand of the PHI.
  544.   BasicBlock *getIncomingBlock(const Use &U) const {
  545.     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
  546.     return getIncomingBlock(unsigned(&U - op_begin()));
  547.   }
  548.  
  549.   /// Return incoming basic block corresponding
  550.   /// to value use iterator.
  551.   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
  552.     return getIncomingBlock(I.getUse());
  553.   }
  554.  
  555.   void setIncomingBlock(unsigned I, BasicBlock *BB) {
  556.     assert(BB && "PHI node got a null basic block!");
  557.     block_begin()[I] = BB;
  558.   }
  559.  
  560.   /// Add an incoming value to the end of the PHI list
  561.   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
  562.     if (getNumOperands() == ReservedSpace)
  563.       growOperands(); // Get more space!
  564.     // Initialize some new operands.
  565.     setNumHungOffUseOperands(getNumOperands() + 1);
  566.     setIncomingValue(getNumOperands() - 1, V);
  567.     setIncomingBlock(getNumOperands() - 1, BB);
  568.   }
  569.  
  570.   /// Return the first index of the specified basic
  571.   /// block in the value list for this PHI.  Returns -1 if no instance.
  572.   int getBasicBlockIndex(const BasicBlock *BB) const {
  573.     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
  574.       if (block_begin()[I] == BB)
  575.         return I;
  576.     return -1;
  577.   }
  578.  
  579.   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
  580.     int Idx = getBasicBlockIndex(BB);
  581.     assert(Idx >= 0 && "Invalid basic block argument!");
  582.     return getIncomingValue(Idx);
  583.   }
  584.  
  585.   // After deleting incoming position I, the order of incoming may be changed.
  586.   void unorderedDeleteIncoming(unsigned I) {
  587.     unsigned E = getNumOperands();
  588.     assert(I < E && "Cannot remove out of bounds Phi entry.");
  589.     // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
  590.     // itself should be deleted.
  591.     assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
  592.                      "at least 2 values.");
  593.     setIncomingValue(I, getIncomingValue(E - 1));
  594.     setIncomingBlock(I, block_begin()[E - 1]);
  595.     setOperand(E - 1, nullptr);
  596.     block_begin()[E - 1] = nullptr;
  597.     setNumHungOffUseOperands(getNumOperands() - 1);
  598.   }
  599.  
  600.   // After deleting entries that satisfy Pred, remaining entries may have
  601.   // changed order.
  602.   template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
  603.     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
  604.       if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
  605.         unorderedDeleteIncoming(I);
  606.         E = getNumOperands();
  607.         --I;
  608.       }
  609.     assert(getNumOperands() >= 1 &&
  610.            "Cannot remove all incoming blocks in a MemoryPhi.");
  611.   }
  612.  
  613.   // After deleting incoming block BB, the incoming blocks order may be changed.
  614.   void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
  615.     unorderedDeleteIncomingIf(
  616.         [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
  617.   }
  618.  
  619.   // After deleting incoming memory access MA, the incoming accesses order may
  620.   // be changed.
  621.   void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
  622.     unorderedDeleteIncomingIf(
  623.         [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
  624.   }
  625.  
  626.   static bool classof(const Value *V) {
  627.     return V->getValueID() == MemoryPhiVal;
  628.   }
  629.  
  630.   void print(raw_ostream &OS) const;
  631.  
  632.   unsigned getID() const { return ID; }
  633.  
  634. protected:
  635.   friend class MemorySSA;
  636.  
  637.   /// this is more complicated than the generic
  638.   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
  639.   /// values and pointers to the incoming blocks, all in one allocation.
  640.   void allocHungoffUses(unsigned N) {
  641.     User::allocHungoffUses(N, /* IsPhi */ true);
  642.   }
  643.  
  644. private:
  645.   // For debugging only
  646.   const unsigned ID;
  647.   unsigned ReservedSpace;
  648.  
  649.   /// This grows the operand list in response to a push_back style of
  650.   /// operation.  This grows the number of ops by 1.5 times.
  651.   void growOperands() {
  652.     unsigned E = getNumOperands();
  653.     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
  654.     ReservedSpace = std::max(E + E / 2, 2u);
  655.     growHungoffUses(ReservedSpace, /* IsPhi */ true);
  656.   }
  657.  
  658.   static void deleteMe(DerivedUser *Self);
  659. };
  660.  
  661. inline unsigned MemoryAccess::getID() const {
  662.   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
  663.          "only memory defs and phis have ids");
  664.   if (const auto *MD = dyn_cast<MemoryDef>(this))
  665.     return MD->getID();
  666.   return cast<MemoryPhi>(this)->getID();
  667. }
  668.  
  669. inline bool MemoryUseOrDef::isOptimized() const {
  670.   if (const auto *MD = dyn_cast<MemoryDef>(this))
  671.     return MD->isOptimized();
  672.   return cast<MemoryUse>(this)->isOptimized();
  673. }
  674.  
  675. inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
  676.   if (const auto *MD = dyn_cast<MemoryDef>(this))
  677.     return MD->getOptimized();
  678.   return cast<MemoryUse>(this)->getOptimized();
  679. }
  680.  
  681. inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
  682.   if (auto *MD = dyn_cast<MemoryDef>(this))
  683.     MD->setOptimized(MA);
  684.   else
  685.     cast<MemoryUse>(this)->setOptimized(MA);
  686. }
  687.  
  688. inline void MemoryUseOrDef::resetOptimized() {
  689.   if (auto *MD = dyn_cast<MemoryDef>(this))
  690.     MD->resetOptimized();
  691.   else
  692.     cast<MemoryUse>(this)->resetOptimized();
  693. }
  694.  
  695. template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
  696. DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
  697.  
  698. /// Encapsulates MemorySSA, including all data associated with memory
  699. /// accesses.
  700. class MemorySSA {
  701. public:
  702.   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
  703.  
  704.   // MemorySSA must remain where it's constructed; Walkers it creates store
  705.   // pointers to it.
  706.   MemorySSA(MemorySSA &&) = delete;
  707.  
  708.   ~MemorySSA();
  709.  
  710.   MemorySSAWalker *getWalker();
  711.   MemorySSAWalker *getSkipSelfWalker();
  712.  
  713.   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
  714.   /// access associated with it. If passed a basic block gets the memory phi
  715.   /// node that exists for that block, if there is one. Otherwise, this will get
  716.   /// a MemoryUseOrDef.
  717.   MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
  718.     return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
  719.   }
  720.  
  721.   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
  722.     return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
  723.   }
  724.  
  725.   DominatorTree &getDomTree() const { return *DT; }
  726.  
  727.   void dump() const;
  728.   void print(raw_ostream &) const;
  729.  
  730.   /// Return true if \p MA represents the live on entry value
  731.   ///
  732.   /// Loads and stores from pointer arguments and other global values may be
  733.   /// defined by memory operations that do not occur in the current function, so
  734.   /// they may be live on entry to the function. MemorySSA represents such
  735.   /// memory state by the live on entry definition, which is guaranteed to occur
  736.   /// before any other memory access in the function.
  737.   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
  738.     return MA == LiveOnEntryDef.get();
  739.   }
  740.  
  741.   inline MemoryAccess *getLiveOnEntryDef() const {
  742.     return LiveOnEntryDef.get();
  743.   }
  744.  
  745.   // Sadly, iplists, by default, owns and deletes pointers added to the
  746.   // list. It's not currently possible to have two iplists for the same type,
  747.   // where one owns the pointers, and one does not. This is because the traits
  748.   // are per-type, not per-tag.  If this ever changes, we should make the
  749.   // DefList an iplist.
  750.   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
  751.   using DefsList =
  752.       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
  753.  
  754.   /// Return the list of MemoryAccess's for a given basic block.
  755.   ///
  756.   /// This list is not modifiable by the user.
  757.   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
  758.     return getWritableBlockAccesses(BB);
  759.   }
  760.  
  761.   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
  762.   /// block.
  763.   ///
  764.   /// This list is not modifiable by the user.
  765.   const DefsList *getBlockDefs(const BasicBlock *BB) const {
  766.     return getWritableBlockDefs(BB);
  767.   }
  768.  
  769.   /// Given two memory accesses in the same basic block, determine
  770.   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
  771.   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
  772.  
  773.   /// Given two memory accesses in potentially different blocks,
  774.   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
  775.   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
  776.  
  777.   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
  778.   /// dominates Use \p B.
  779.   bool dominates(const MemoryAccess *A, const Use &B) const;
  780.  
  781.   enum class VerificationLevel { Fast, Full };
  782.   /// Verify that MemorySSA is self consistent (IE definitions dominate
  783.   /// all uses, uses appear in the right places).  This is used by unit tests.
  784.   void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
  785.  
  786.   /// Used in various insertion functions to specify whether we are talking
  787.   /// about the beginning or end of a block.
  788.   enum InsertionPlace { Beginning, End, BeforeTerminator };
  789.  
  790.   /// By default, uses are *not* optimized during MemorySSA construction.
  791.   /// Calling this method will attempt to optimize all MemoryUses, if this has
  792.   /// not happened yet for this MemorySSA instance. This should be done if you
  793.   /// plan to query the clobbering access for most uses, or if you walk the
  794.   /// def-use chain of uses.
  795.   void ensureOptimizedUses();
  796.  
  797.   AliasAnalysis &getAA() { return *AA; }
  798.  
  799. protected:
  800.   // Used by Memory SSA dumpers and wrapper pass
  801.   friend class MemorySSAPrinterLegacyPass;
  802.   friend class MemorySSAUpdater;
  803.  
  804.   void verifyOrderingDominationAndDefUses(
  805.       Function &F, VerificationLevel = VerificationLevel::Fast) const;
  806.   void verifyDominationNumbers(const Function &F) const;
  807.   void verifyPrevDefInPhis(Function &F) const;
  808.  
  809.   // This is used by the use optimizer and updater.
  810.   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
  811.     auto It = PerBlockAccesses.find(BB);
  812.     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
  813.   }
  814.  
  815.   // This is used by the use optimizer and updater.
  816.   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
  817.     auto It = PerBlockDefs.find(BB);
  818.     return It == PerBlockDefs.end() ? nullptr : It->second.get();
  819.   }
  820.  
  821.   // These is used by the updater to perform various internal MemorySSA
  822.   // machinsations.  They do not always leave the IR in a correct state, and
  823.   // relies on the updater to fixup what it breaks, so it is not public.
  824.  
  825.   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
  826.   void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
  827.  
  828.   // Rename the dominator tree branch rooted at BB.
  829.   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
  830.                   SmallPtrSetImpl<BasicBlock *> &Visited) {
  831.     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
  832.   }
  833.  
  834.   void removeFromLookups(MemoryAccess *);
  835.   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
  836.   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
  837.                                InsertionPlace);
  838.   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
  839.                              AccessList::iterator);
  840.   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
  841.                                       const MemoryUseOrDef *Template = nullptr,
  842.                                       bool CreationMustSucceed = true);
  843.  
  844. private:
  845.   class ClobberWalkerBase;
  846.   class CachingWalker;
  847.   class SkipSelfWalker;
  848.   class OptimizeUses;
  849.  
  850.   CachingWalker *getWalkerImpl();
  851.   void buildMemorySSA(BatchAAResults &BAA);
  852.  
  853.   void prepareForMoveTo(MemoryAccess *, BasicBlock *);
  854.   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
  855.  
  856.   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
  857.   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
  858.  
  859.   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
  860.   MemoryPhi *createMemoryPhi(BasicBlock *BB);
  861.   template <typename AliasAnalysisType>
  862.   MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
  863.                                   const MemoryUseOrDef *Template = nullptr);
  864.   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
  865.   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
  866.   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
  867.   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
  868.                   SmallPtrSetImpl<BasicBlock *> &Visited,
  869.                   bool SkipVisited = false, bool RenameAllUses = false);
  870.   AccessList *getOrCreateAccessList(const BasicBlock *);
  871.   DefsList *getOrCreateDefsList(const BasicBlock *);
  872.   void renumberBlock(const BasicBlock *) const;
  873.   AliasAnalysis *AA = nullptr;
  874.   DominatorTree *DT;
  875.   Function &F;
  876.  
  877.   // Memory SSA mappings
  878.   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
  879.  
  880.   // These two mappings contain the main block to access/def mappings for
  881.   // MemorySSA. The list contained in PerBlockAccesses really owns all the
  882.   // MemoryAccesses.
  883.   // Both maps maintain the invariant that if a block is found in them, the
  884.   // corresponding list is not empty, and if a block is not found in them, the
  885.   // corresponding list is empty.
  886.   AccessMap PerBlockAccesses;
  887.   DefsMap PerBlockDefs;
  888.   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
  889.  
  890.   // Domination mappings
  891.   // Note that the numbering is local to a block, even though the map is
  892.   // global.
  893.   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
  894.   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
  895.  
  896.   // Memory SSA building info
  897.   std::unique_ptr<ClobberWalkerBase> WalkerBase;
  898.   std::unique_ptr<CachingWalker> Walker;
  899.   std::unique_ptr<SkipSelfWalker> SkipWalker;
  900.   unsigned NextID = 0;
  901.   bool IsOptimized = false;
  902. };
  903.  
  904. /// Enables verification of MemorySSA.
  905. ///
  906. /// The checks which this flag enables is exensive and disabled by default
  907. /// unless `EXPENSIVE_CHECKS` is defined.  The flag `-verify-memoryssa` can be
  908. /// used to selectively enable the verification without re-compilation.
  909. extern bool VerifyMemorySSA;
  910.  
  911. // Internal MemorySSA utils, for use by MemorySSA classes and walkers
  912. class MemorySSAUtil {
  913. protected:
  914.   friend class GVNHoist;
  915.   friend class MemorySSAWalker;
  916.  
  917.   // This function should not be used by new passes.
  918.   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
  919.                                   AliasAnalysis &AA);
  920. };
  921.  
  922. // This pass does eager building and then printing of MemorySSA. It is used by
  923. // the tests to be able to build, dump, and verify Memory SSA.
  924. class MemorySSAPrinterLegacyPass : public FunctionPass {
  925. public:
  926.   MemorySSAPrinterLegacyPass();
  927.  
  928.   bool runOnFunction(Function &) override;
  929.   void getAnalysisUsage(AnalysisUsage &AU) const override;
  930.  
  931.   static char ID;
  932. };
  933.  
  934. /// An analysis that produces \c MemorySSA for a function.
  935. ///
  936. class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
  937.   friend AnalysisInfoMixin<MemorySSAAnalysis>;
  938.  
  939.   static AnalysisKey Key;
  940.  
  941. public:
  942.   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
  943.   // pointers after construction.  Use a wrapper class instead of plain
  944.   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
  945.   struct Result {
  946.     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
  947.  
  948.     MemorySSA &getMSSA() { return *MSSA.get(); }
  949.  
  950.     std::unique_ptr<MemorySSA> MSSA;
  951.  
  952.     bool invalidate(Function &F, const PreservedAnalyses &PA,
  953.                     FunctionAnalysisManager::Invalidator &Inv);
  954.   };
  955.  
  956.   Result run(Function &F, FunctionAnalysisManager &AM);
  957. };
  958.  
  959. /// Printer pass for \c MemorySSA.
  960. class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
  961.   raw_ostream &OS;
  962.  
  963. public:
  964.   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
  965.  
  966.   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  967. };
  968.  
  969. /// Printer pass for \c MemorySSA via the walker.
  970. class MemorySSAWalkerPrinterPass
  971.     : public PassInfoMixin<MemorySSAWalkerPrinterPass> {
  972.   raw_ostream &OS;
  973.  
  974. public:
  975.   explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {}
  976.  
  977.   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  978. };
  979.  
  980. /// Verifier pass for \c MemorySSA.
  981. struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
  982.   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  983. };
  984.  
  985. /// Legacy analysis pass which computes \c MemorySSA.
  986. class MemorySSAWrapperPass : public FunctionPass {
  987. public:
  988.   MemorySSAWrapperPass();
  989.  
  990.   static char ID;
  991.  
  992.   bool runOnFunction(Function &) override;
  993.   void releaseMemory() override;
  994.   MemorySSA &getMSSA() { return *MSSA; }
  995.   const MemorySSA &getMSSA() const { return *MSSA; }
  996.  
  997.   void getAnalysisUsage(AnalysisUsage &AU) const override;
  998.  
  999.   void verifyAnalysis() const override;
  1000.   void print(raw_ostream &OS, const Module *M = nullptr) const override;
  1001.  
  1002. private:
  1003.   std::unique_ptr<MemorySSA> MSSA;
  1004. };
  1005.  
  1006. /// This is the generic walker interface for walkers of MemorySSA.
  1007. /// Walkers are used to be able to further disambiguate the def-use chains
  1008. /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
  1009. /// you.
  1010. /// In particular, while the def-use chains provide basic information, and are
  1011. /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
  1012. /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
  1013. /// information. In particular, they may want to use SCEV info to further
  1014. /// disambiguate memory accesses, or they may want the nearest dominating
  1015. /// may-aliasing MemoryDef for a call or a store. This API enables a
  1016. /// standardized interface to getting and using that info.
  1017. class MemorySSAWalker {
  1018. public:
  1019.   MemorySSAWalker(MemorySSA *);
  1020.   virtual ~MemorySSAWalker() = default;
  1021.  
  1022.   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
  1023.  
  1024.   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
  1025.   /// will give you the nearest dominating MemoryAccess that Mod's the location
  1026.   /// the instruction accesses (by skipping any def which AA can prove does not
  1027.   /// alias the location(s) accessed by the instruction given).
  1028.   ///
  1029.   /// Note that this will return a single access, and it must dominate the
  1030.   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
  1031.   /// this will return the MemoryPhi, not the operand. This means that
  1032.   /// given:
  1033.   /// if (a) {
  1034.   ///   1 = MemoryDef(liveOnEntry)
  1035.   ///   store %a
  1036.   /// } else {
  1037.   ///   2 = MemoryDef(liveOnEntry)
  1038.   ///   store %b
  1039.   /// }
  1040.   /// 3 = MemoryPhi(2, 1)
  1041.   /// MemoryUse(3)
  1042.   /// load %a
  1043.   ///
  1044.   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
  1045.   /// in the if (a) branch.
  1046.   MemoryAccess *getClobberingMemoryAccess(const Instruction *I,
  1047.                                           BatchAAResults &AA) {
  1048.     MemoryAccess *MA = MSSA->getMemoryAccess(I);
  1049.     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
  1050.     return getClobberingMemoryAccess(MA, AA);
  1051.   }
  1052.  
  1053.   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
  1054.   /// but takes a MemoryAccess instead of an Instruction.
  1055.   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
  1056.                                                   BatchAAResults &AA) = 0;
  1057.  
  1058.   /// Given a potentially clobbering memory access and a new location,
  1059.   /// calling this will give you the nearest dominating clobbering MemoryAccess
  1060.   /// (by skipping non-aliasing def links).
  1061.   ///
  1062.   /// This version of the function is mainly used to disambiguate phi translated
  1063.   /// pointers, where the value of a pointer may have changed from the initial
  1064.   /// memory access. Note that this expects to be handed either a MemoryUse,
  1065.   /// or an already potentially clobbering access. Unlike the above API, if
  1066.   /// given a MemoryDef that clobbers the pointer as the starting access, it
  1067.   /// will return that MemoryDef, whereas the above would return the clobber
  1068.   /// starting from the use side of  the memory def.
  1069.   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
  1070.                                                   const MemoryLocation &,
  1071.                                                   BatchAAResults &AA) = 0;
  1072.  
  1073.   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
  1074.     BatchAAResults BAA(MSSA->getAA());
  1075.     return getClobberingMemoryAccess(I, BAA);
  1076.   }
  1077.  
  1078.   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) {
  1079.     BatchAAResults BAA(MSSA->getAA());
  1080.     return getClobberingMemoryAccess(MA, BAA);
  1081.   }
  1082.  
  1083.   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
  1084.                                           const MemoryLocation &Loc) {
  1085.     BatchAAResults BAA(MSSA->getAA());
  1086.     return getClobberingMemoryAccess(MA, Loc, BAA);
  1087.   }
  1088.  
  1089.   /// Given a memory access, invalidate anything this walker knows about
  1090.   /// that access.
  1091.   /// This API is used by walkers that store information to perform basic cache
  1092.   /// invalidation.  This will be called by MemorySSA at appropriate times for
  1093.   /// the walker it uses or returns.
  1094.   virtual void invalidateInfo(MemoryAccess *) {}
  1095.  
  1096. protected:
  1097.   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
  1098.                           // constructor.
  1099.   MemorySSA *MSSA;
  1100. };
  1101.  
  1102. /// A MemorySSAWalker that does no alias queries, or anything else. It
  1103. /// simply returns the links as they were constructed by the builder.
  1104. class DoNothingMemorySSAWalker final : public MemorySSAWalker {
  1105. public:
  1106.   // Keep the overrides below from hiding the Instruction overload of
  1107.   // getClobberingMemoryAccess.
  1108.   using MemorySSAWalker::getClobberingMemoryAccess;
  1109.  
  1110.   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
  1111.                                           BatchAAResults &) override;
  1112.   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
  1113.                                           const MemoryLocation &,
  1114.                                           BatchAAResults &) override;
  1115. };
  1116.  
  1117. using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
  1118. using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
  1119.  
  1120. /// Iterator base class used to implement const and non-const iterators
  1121. /// over the defining accesses of a MemoryAccess.
  1122. template <class T>
  1123. class memoryaccess_def_iterator_base
  1124.     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
  1125.                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
  1126.                                   T *> {
  1127.   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
  1128.  
  1129. public:
  1130.   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
  1131.   memoryaccess_def_iterator_base() = default;
  1132.  
  1133.   bool operator==(const memoryaccess_def_iterator_base &Other) const {
  1134.     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
  1135.   }
  1136.  
  1137.   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
  1138.   // block from the operand in constant time (In a PHINode, the uselist has
  1139.   // both, so it's just subtraction). We provide it as part of the
  1140.   // iterator to avoid callers having to linear walk to get the block.
  1141.   // If the operation becomes constant time on MemoryPHI's, this bit of
  1142.   // abstraction breaking should be removed.
  1143.   BasicBlock *getPhiArgBlock() const {
  1144.     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
  1145.     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
  1146.     return MP->getIncomingBlock(ArgNo);
  1147.   }
  1148.  
  1149.   typename std::iterator_traits<BaseT>::pointer operator*() const {
  1150.     assert(Access && "Tried to access past the end of our iterator");
  1151.     // Go to the first argument for phis, and the defining access for everything
  1152.     // else.
  1153.     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
  1154.       return MP->getIncomingValue(ArgNo);
  1155.     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
  1156.   }
  1157.  
  1158.   using BaseT::operator++;
  1159.   memoryaccess_def_iterator_base &operator++() {
  1160.     assert(Access && "Hit end of iterator");
  1161.     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
  1162.       if (++ArgNo >= MP->getNumIncomingValues()) {
  1163.         ArgNo = 0;
  1164.         Access = nullptr;
  1165.       }
  1166.     } else {
  1167.       Access = nullptr;
  1168.     }
  1169.     return *this;
  1170.   }
  1171.  
  1172. private:
  1173.   T *Access = nullptr;
  1174.   unsigned ArgNo = 0;
  1175. };
  1176.  
  1177. inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
  1178.   return memoryaccess_def_iterator(this);
  1179. }
  1180.  
  1181. inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
  1182.   return const_memoryaccess_def_iterator(this);
  1183. }
  1184.  
  1185. inline memoryaccess_def_iterator MemoryAccess::defs_end() {
  1186.   return memoryaccess_def_iterator();
  1187. }
  1188.  
  1189. inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
  1190.   return const_memoryaccess_def_iterator();
  1191. }
  1192.  
  1193. /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
  1194. /// and uses in the inverse case.
  1195. template <> struct GraphTraits<MemoryAccess *> {
  1196.   using NodeRef = MemoryAccess *;
  1197.   using ChildIteratorType = memoryaccess_def_iterator;
  1198.  
  1199.   static NodeRef getEntryNode(NodeRef N) { return N; }
  1200.   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
  1201.   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
  1202. };
  1203.  
  1204. template <> struct GraphTraits<Inverse<MemoryAccess *>> {
  1205.   using NodeRef = MemoryAccess *;
  1206.   using ChildIteratorType = MemoryAccess::iterator;
  1207.  
  1208.   static NodeRef getEntryNode(NodeRef N) { return N; }
  1209.   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
  1210.   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
  1211. };
  1212.  
  1213. /// Provide an iterator that walks defs, giving both the memory access,
  1214. /// and the current pointer location, updating the pointer location as it
  1215. /// changes due to phi node translation.
  1216. ///
  1217. /// This iterator, while somewhat specialized, is what most clients actually
  1218. /// want when walking upwards through MemorySSA def chains. It takes a pair of
  1219. /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
  1220. /// memory location through phi nodes for the user.
  1221. class upward_defs_iterator
  1222.     : public iterator_facade_base<upward_defs_iterator,
  1223.                                   std::forward_iterator_tag,
  1224.                                   const MemoryAccessPair> {
  1225.   using BaseT = upward_defs_iterator::iterator_facade_base;
  1226.  
  1227. public:
  1228.   upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
  1229.       : DefIterator(Info.first), Location(Info.second),
  1230.         OriginalAccess(Info.first), DT(DT) {
  1231.     CurrentPair.first = nullptr;
  1232.  
  1233.     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
  1234.     fillInCurrentPair();
  1235.   }
  1236.  
  1237.   upward_defs_iterator() { CurrentPair.first = nullptr; }
  1238.  
  1239.   bool operator==(const upward_defs_iterator &Other) const {
  1240.     return DefIterator == Other.DefIterator;
  1241.   }
  1242.  
  1243.   typename std::iterator_traits<BaseT>::reference operator*() const {
  1244.     assert(DefIterator != OriginalAccess->defs_end() &&
  1245.            "Tried to access past the end of our iterator");
  1246.     return CurrentPair;
  1247.   }
  1248.  
  1249.   using BaseT::operator++;
  1250.   upward_defs_iterator &operator++() {
  1251.     assert(DefIterator != OriginalAccess->defs_end() &&
  1252.            "Tried to access past the end of the iterator");
  1253.     ++DefIterator;
  1254.     if (DefIterator != OriginalAccess->defs_end())
  1255.       fillInCurrentPair();
  1256.     return *this;
  1257.   }
  1258.  
  1259.   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
  1260.  
  1261. private:
  1262.   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
  1263.   /// loop. In particular, this guarantees that it only references a single
  1264.   /// MemoryLocation during execution of the containing function.
  1265.   bool IsGuaranteedLoopInvariant(const Value *Ptr) const;
  1266.  
  1267.   void fillInCurrentPair() {
  1268.     CurrentPair.first = *DefIterator;
  1269.     CurrentPair.second = Location;
  1270.     if (WalkingPhi && Location.Ptr) {
  1271.       PHITransAddr Translator(
  1272.           const_cast<Value *>(Location.Ptr),
  1273.           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
  1274.  
  1275.       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
  1276.                                         DefIterator.getPhiArgBlock(), DT, true))
  1277.         if (Translator.getAddr() != CurrentPair.second.Ptr)
  1278.           CurrentPair.second =
  1279.               CurrentPair.second.getWithNewPtr(Translator.getAddr());
  1280.  
  1281.       // Mark size as unknown, if the location is not guaranteed to be
  1282.       // loop-invariant for any possible loop in the function. Setting the size
  1283.       // to unknown guarantees that any memory accesses that access locations
  1284.       // after the pointer are considered as clobbers, which is important to
  1285.       // catch loop carried dependences.
  1286.       if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr))
  1287.         CurrentPair.second = CurrentPair.second.getWithNewSize(
  1288.             LocationSize::beforeOrAfterPointer());
  1289.     }
  1290.   }
  1291.  
  1292.   MemoryAccessPair CurrentPair;
  1293.   memoryaccess_def_iterator DefIterator;
  1294.   MemoryLocation Location;
  1295.   MemoryAccess *OriginalAccess = nullptr;
  1296.   DominatorTree *DT = nullptr;
  1297.   bool WalkingPhi = false;
  1298. };
  1299.  
  1300. inline upward_defs_iterator
  1301. upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) {
  1302.   return upward_defs_iterator(Pair, &DT);
  1303. }
  1304.  
  1305. inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
  1306.  
  1307. inline iterator_range<upward_defs_iterator>
  1308. upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
  1309.   return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
  1310. }
  1311.  
  1312. /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
  1313. /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
  1314. /// comparing against a null def_chain_iterator, this will compare equal only
  1315. /// after walking said Phi/liveOnEntry.
  1316. ///
  1317. /// The UseOptimizedChain flag specifies whether to walk the clobbering
  1318. /// access chain, or all the accesses.
  1319. ///
  1320. /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
  1321. /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
  1322. /// a phi node.  The optimized chain walks the clobbering access of a store.
  1323. /// So if you are just trying to find, given a store, what the next
  1324. /// thing that would clobber the same memory is, you want the optimized chain.
  1325. template <class T, bool UseOptimizedChain = false>
  1326. struct def_chain_iterator
  1327.     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
  1328.                                   std::forward_iterator_tag, MemoryAccess *> {
  1329.   def_chain_iterator() : MA(nullptr) {}
  1330.   def_chain_iterator(T MA) : MA(MA) {}
  1331.  
  1332.   T operator*() const { return MA; }
  1333.  
  1334.   def_chain_iterator &operator++() {
  1335.     // N.B. liveOnEntry has a null defining access.
  1336.     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
  1337.       if (UseOptimizedChain && MUD->isOptimized())
  1338.         MA = MUD->getOptimized();
  1339.       else
  1340.         MA = MUD->getDefiningAccess();
  1341.     } else {
  1342.       MA = nullptr;
  1343.     }
  1344.  
  1345.     return *this;
  1346.   }
  1347.  
  1348.   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
  1349.  
  1350. private:
  1351.   T MA;
  1352. };
  1353.  
  1354. template <class T>
  1355. inline iterator_range<def_chain_iterator<T>>
  1356. def_chain(T MA, MemoryAccess *UpTo = nullptr) {
  1357. #ifdef EXPENSIVE_CHECKS
  1358.   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
  1359.          "UpTo isn't in the def chain!");
  1360. #endif
  1361.   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
  1362. }
  1363.  
  1364. template <class T>
  1365. inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
  1366.   return make_range(def_chain_iterator<T, true>(MA),
  1367.                     def_chain_iterator<T, true>(nullptr));
  1368. }
  1369.  
  1370. } // end namespace llvm
  1371.  
  1372. #endif // LLVM_ANALYSIS_MEMORYSSA_H
  1373.