Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// \file
  9. ///
  10. /// This file provides the interface for the instcombine pass implementation.
  11. /// The interface is used for generic transformations in this folder and
  12. /// target specific combinations in the targets.
  13. /// The visitor implementation is in \c InstCombinerImpl in
  14. /// \c InstCombineInternal.h.
  15. ///
  16. //===----------------------------------------------------------------------===//
  17.  
  18. #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
  19. #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
  20.  
  21. #include "llvm/Analysis/InstructionSimplify.h"
  22. #include "llvm/Analysis/TargetFolder.h"
  23. #include "llvm/Analysis/ValueTracking.h"
  24. #include "llvm/IR/IRBuilder.h"
  25. #include "llvm/IR/PatternMatch.h"
  26. #include "llvm/Support/Debug.h"
  27. #include "llvm/Support/KnownBits.h"
  28. #include <cassert>
  29.  
  30. #define DEBUG_TYPE "instcombine"
  31. #include "llvm/Transforms/Utils/InstructionWorklist.h"
  32.  
  33. namespace llvm {
  34.  
  35. class AAResults;
  36. class AssumptionCache;
  37. class ProfileSummaryInfo;
  38. class TargetLibraryInfo;
  39. class TargetTransformInfo;
  40.  
  41. /// The core instruction combiner logic.
  42. ///
  43. /// This class provides both the logic to recursively visit instructions and
  44. /// combine them.
  45. class LLVM_LIBRARY_VISIBILITY InstCombiner {
  46.   /// Only used to call target specific intrinsic combining.
  47.   /// It must **NOT** be used for any other purpose, as InstCombine is a
  48.   /// target-independent canonicalization transform.
  49.   TargetTransformInfo &TTI;
  50.  
  51. public:
  52.   /// Maximum size of array considered when transforming.
  53.   uint64_t MaxArraySizeForCombine = 0;
  54.  
  55.   /// An IRBuilder that automatically inserts new instructions into the
  56.   /// worklist.
  57.   using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
  58.   BuilderTy &Builder;
  59.  
  60. protected:
  61.   /// A worklist of the instructions that need to be simplified.
  62.   InstructionWorklist &Worklist;
  63.  
  64.   // Mode in which we are running the combiner.
  65.   const bool MinimizeSize;
  66.  
  67.   AAResults *AA;
  68.  
  69.   // Required analyses.
  70.   AssumptionCache &AC;
  71.   TargetLibraryInfo &TLI;
  72.   DominatorTree &DT;
  73.   const DataLayout &DL;
  74.   const SimplifyQuery SQ;
  75.   OptimizationRemarkEmitter &ORE;
  76.   BlockFrequencyInfo *BFI;
  77.   ProfileSummaryInfo *PSI;
  78.  
  79.   // Optional analyses. When non-null, these can both be used to do better
  80.   // combining and will be updated to reflect any changes.
  81.   LoopInfo *LI;
  82.  
  83.   bool MadeIRChange = false;
  84.  
  85. public:
  86.   InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder,
  87.                bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
  88.                TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
  89.                DominatorTree &DT, OptimizationRemarkEmitter &ORE,
  90.                BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
  91.                const DataLayout &DL, LoopInfo *LI)
  92.       : TTI(TTI), Builder(Builder), Worklist(Worklist),
  93.         MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
  94.         SQ(DL, &TLI, &DT, &AC), ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}
  95.  
  96.   virtual ~InstCombiner() = default;
  97.  
  98.   /// Return the source operand of a potentially bitcasted value while
  99.   /// optionally checking if it has one use. If there is no bitcast or the one
  100.   /// use check is not met, return the input value itself.
  101.   static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
  102.     if (auto *BitCast = dyn_cast<BitCastInst>(V))
  103.       if (!OneUseOnly || BitCast->hasOneUse())
  104.         return BitCast->getOperand(0);
  105.  
  106.     // V is not a bitcast or V has more than one use and OneUseOnly is true.
  107.     return V;
  108.   }
  109.  
  110.   /// Assign a complexity or rank value to LLVM Values. This is used to reduce
  111.   /// the amount of pattern matching needed for compares and commutative
  112.   /// instructions. For example, if we have:
  113.   ///   icmp ugt X, Constant
  114.   /// or
  115.   ///   xor (add X, Constant), cast Z
  116.   ///
  117.   /// We do not have to consider the commuted variants of these patterns because
  118.   /// canonicalization based on complexity guarantees the above ordering.
  119.   ///
  120.   /// This routine maps IR values to various complexity ranks:
  121.   ///   0 -> undef
  122.   ///   1 -> Constants
  123.   ///   2 -> Other non-instructions
  124.   ///   3 -> Arguments
  125.   ///   4 -> Cast and (f)neg/not instructions
  126.   ///   5 -> Other instructions
  127.   static unsigned getComplexity(Value *V) {
  128.     if (isa<Instruction>(V)) {
  129.       if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
  130.           match(V, m_Not(PatternMatch::m_Value())) ||
  131.           match(V, m_FNeg(PatternMatch::m_Value())))
  132.         return 4;
  133.       return 5;
  134.     }
  135.     if (isa<Argument>(V))
  136.       return 3;
  137.     return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
  138.   }
  139.  
  140.   /// Predicate canonicalization reduces the number of patterns that need to be
  141.   /// matched by other transforms. For example, we may swap the operands of a
  142.   /// conditional branch or select to create a compare with a canonical
  143.   /// (inverted) predicate which is then more likely to be matched with other
  144.   /// values.
  145.   static bool isCanonicalPredicate(CmpInst::Predicate Pred) {
  146.     switch (Pred) {
  147.     case CmpInst::ICMP_NE:
  148.     case CmpInst::ICMP_ULE:
  149.     case CmpInst::ICMP_SLE:
  150.     case CmpInst::ICMP_UGE:
  151.     case CmpInst::ICMP_SGE:
  152.     // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
  153.     case CmpInst::FCMP_ONE:
  154.     case CmpInst::FCMP_OLE:
  155.     case CmpInst::FCMP_OGE:
  156.       return false;
  157.     default:
  158.       return true;
  159.     }
  160.   }
  161.  
  162.   /// Given an exploded icmp instruction, return true if the comparison only
  163.   /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
  164.   /// the result of the comparison is true when the input value is signed.
  165.   static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
  166.                              bool &TrueIfSigned) {
  167.     switch (Pred) {
  168.     case ICmpInst::ICMP_SLT: // True if LHS s< 0
  169.       TrueIfSigned = true;
  170.       return RHS.isZero();
  171.     case ICmpInst::ICMP_SLE: // True if LHS s<= -1
  172.       TrueIfSigned = true;
  173.       return RHS.isAllOnes();
  174.     case ICmpInst::ICMP_SGT: // True if LHS s> -1
  175.       TrueIfSigned = false;
  176.       return RHS.isAllOnes();
  177.     case ICmpInst::ICMP_SGE: // True if LHS s>= 0
  178.       TrueIfSigned = false;
  179.       return RHS.isZero();
  180.     case ICmpInst::ICMP_UGT:
  181.       // True if LHS u> RHS and RHS == sign-bit-mask - 1
  182.       TrueIfSigned = true;
  183.       return RHS.isMaxSignedValue();
  184.     case ICmpInst::ICMP_UGE:
  185.       // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
  186.       TrueIfSigned = true;
  187.       return RHS.isMinSignedValue();
  188.     case ICmpInst::ICMP_ULT:
  189.       // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
  190.       TrueIfSigned = false;
  191.       return RHS.isMinSignedValue();
  192.     case ICmpInst::ICMP_ULE:
  193.       // True if LHS u<= RHS and RHS == sign-bit-mask - 1
  194.       TrueIfSigned = false;
  195.       return RHS.isMaxSignedValue();
  196.     default:
  197.       return false;
  198.     }
  199.   }
  200.  
  201.   /// Add one to a Constant
  202.   static Constant *AddOne(Constant *C) {
  203.     return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
  204.   }
  205.  
  206.   /// Subtract one from a Constant
  207.   static Constant *SubOne(Constant *C) {
  208.     return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
  209.   }
  210.  
  211.   std::optional<std::pair<
  212.       CmpInst::Predicate,
  213.       Constant *>> static getFlippedStrictnessPredicateAndConstant(CmpInst::
  214.                                                                        Predicate
  215.                                                                            Pred,
  216.                                                                    Constant *C);
  217.  
  218.   static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
  219.     // a ? b : false and a ? true : b are the canonical form of logical and/or.
  220.     // This includes !a ? b : false and !a ? true : b. Absorbing the not into
  221.     // the select by swapping operands would break recognition of this pattern
  222.     // in other analyses, so don't do that.
  223.     return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
  224.                                                  PatternMatch::m_Value())) ||
  225.            match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
  226.                                                 PatternMatch::m_Value()));
  227.   }
  228.  
  229.   /// Return true if the specified value is free to invert (apply ~ to).
  230.   /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
  231.   /// is true, work under the assumption that the caller intends to remove all
  232.   /// uses of V and only keep uses of ~V.
  233.   ///
  234.   /// See also: canFreelyInvertAllUsersOf()
  235.   static bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
  236.     // ~(~(X)) -> X.
  237.     if (match(V, m_Not(PatternMatch::m_Value())))
  238.       return true;
  239.  
  240.     // Constants can be considered to be not'ed values.
  241.     if (match(V, PatternMatch::m_AnyIntegralConstant()))
  242.       return true;
  243.  
  244.     // Compares can be inverted if all of their uses are being modified to use
  245.     // the ~V.
  246.     if (isa<CmpInst>(V))
  247.       return WillInvertAllUses;
  248.  
  249.     // If `V` is of the form `A + Constant` then `-1 - V` can be folded into
  250.     // `(-1 - Constant) - A` if we are willing to invert all of the uses.
  251.     if (match(V, m_Add(PatternMatch::m_Value(), PatternMatch::m_ImmConstant())))
  252.       return WillInvertAllUses;
  253.  
  254.     // If `V` is of the form `Constant - A` then `-1 - V` can be folded into
  255.     // `A + (-1 - Constant)` if we are willing to invert all of the uses.
  256.     if (match(V, m_Sub(PatternMatch::m_ImmConstant(), PatternMatch::m_Value())))
  257.       return WillInvertAllUses;
  258.  
  259.     // Selects with invertible operands are freely invertible
  260.     if (match(V,
  261.               m_Select(PatternMatch::m_Value(), m_Not(PatternMatch::m_Value()),
  262.                        m_Not(PatternMatch::m_Value()))))
  263.       return WillInvertAllUses;
  264.  
  265.     // Min/max may be in the form of intrinsics, so handle those identically
  266.     // to select patterns.
  267.     if (match(V, m_MaxOrMin(m_Not(PatternMatch::m_Value()),
  268.                             m_Not(PatternMatch::m_Value()))))
  269.       return WillInvertAllUses;
  270.  
  271.     return false;
  272.   }
  273.  
  274.   /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
  275.   /// InstCombine's freelyInvertAllUsersOf() must be kept in sync with this fn.
  276.   /// NOTE: for Instructions only!
  277.   ///
  278.   /// See also: isFreeToInvert()
  279.   static bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser) {
  280.     // Look at every user of V.
  281.     for (Use &U : V->uses()) {
  282.       if (U.getUser() == IgnoredUser)
  283.         continue; // Don't consider this user.
  284.  
  285.       auto *I = cast<Instruction>(U.getUser());
  286.       switch (I->getOpcode()) {
  287.       case Instruction::Select:
  288.         if (U.getOperandNo() != 0) // Only if the value is used as select cond.
  289.           return false;
  290.         if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
  291.           return false;
  292.         break;
  293.       case Instruction::Br:
  294.         assert(U.getOperandNo() == 0 && "Must be branching on that value.");
  295.         break; // Free to invert by swapping true/false values/destinations.
  296.       case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
  297.                              // it.
  298.         if (!match(I, m_Not(PatternMatch::m_Value())))
  299.           return false; // Not a 'not'.
  300.         break;
  301.       default:
  302.         return false; // Don't know, likely not freely invertible.
  303.       }
  304.       // So far all users were free to invert...
  305.     }
  306.     return true; // Can freely invert all users!
  307.   }
  308.  
  309.   /// Some binary operators require special handling to avoid poison and
  310.   /// undefined behavior. If a constant vector has undef elements, replace those
  311.   /// undefs with identity constants if possible because those are always safe
  312.   /// to execute. If no identity constant exists, replace undef with some other
  313.   /// safe constant.
  314.   static Constant *
  315.   getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
  316.                                 bool IsRHSConstant) {
  317.     auto *InVTy = cast<FixedVectorType>(In->getType());
  318.  
  319.     Type *EltTy = InVTy->getElementType();
  320.     auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
  321.     if (!SafeC) {
  322.       // TODO: Should this be available as a constant utility function? It is
  323.       // similar to getBinOpAbsorber().
  324.       if (IsRHSConstant) {
  325.         switch (Opcode) {
  326.         case Instruction::SRem: // X % 1 = 0
  327.         case Instruction::URem: // X %u 1 = 0
  328.           SafeC = ConstantInt::get(EltTy, 1);
  329.           break;
  330.         case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
  331.           SafeC = ConstantFP::get(EltTy, 1.0);
  332.           break;
  333.         default:
  334.           llvm_unreachable(
  335.               "Only rem opcodes have no identity constant for RHS");
  336.         }
  337.       } else {
  338.         switch (Opcode) {
  339.         case Instruction::Shl:  // 0 << X = 0
  340.         case Instruction::LShr: // 0 >>u X = 0
  341.         case Instruction::AShr: // 0 >> X = 0
  342.         case Instruction::SDiv: // 0 / X = 0
  343.         case Instruction::UDiv: // 0 /u X = 0
  344.         case Instruction::SRem: // 0 % X = 0
  345.         case Instruction::URem: // 0 %u X = 0
  346.         case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
  347.         case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
  348.         case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
  349.         case Instruction::FRem: // 0.0 % X = 0
  350.           SafeC = Constant::getNullValue(EltTy);
  351.           break;
  352.         default:
  353.           llvm_unreachable("Expected to find identity constant for opcode");
  354.         }
  355.       }
  356.     }
  357.     assert(SafeC && "Must have safe constant for binop");
  358.     unsigned NumElts = InVTy->getNumElements();
  359.     SmallVector<Constant *, 16> Out(NumElts);
  360.     for (unsigned i = 0; i != NumElts; ++i) {
  361.       Constant *C = In->getAggregateElement(i);
  362.       Out[i] = isa<UndefValue>(C) ? SafeC : C;
  363.     }
  364.     return ConstantVector::get(Out);
  365.   }
  366.  
  367.   void addToWorklist(Instruction *I) { Worklist.push(I); }
  368.  
  369.   AssumptionCache &getAssumptionCache() const { return AC; }
  370.   TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
  371.   DominatorTree &getDominatorTree() const { return DT; }
  372.   const DataLayout &getDataLayout() const { return DL; }
  373.   const SimplifyQuery &getSimplifyQuery() const { return SQ; }
  374.   OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
  375.     return ORE;
  376.   }
  377.   BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
  378.   ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
  379.   LoopInfo *getLoopInfo() const { return LI; }
  380.  
  381.   // Call target specific combiners
  382.   std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
  383.   std::optional<Value *>
  384.   targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
  385.                                          KnownBits &Known,
  386.                                          bool &KnownBitsComputed);
  387.   std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
  388.       IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
  389.       APInt &UndefElts2, APInt &UndefElts3,
  390.       std::function<void(Instruction *, unsigned, APInt, APInt &)>
  391.           SimplifyAndSetOp);
  392.  
  393.   /// Inserts an instruction \p New before instruction \p Old
  394.   ///
  395.   /// Also adds the new instruction to the worklist and returns \p New so that
  396.   /// it is suitable for use as the return from the visitation patterns.
  397.   Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
  398.     assert(New && !New->getParent() &&
  399.            "New instruction already inserted into a basic block!");
  400.     BasicBlock *BB = Old.getParent();
  401.     New->insertInto(BB, Old.getIterator()); // Insert inst
  402.     Worklist.add(New);
  403.     return New;
  404.   }
  405.  
  406.   /// Same as InsertNewInstBefore, but also sets the debug loc.
  407.   Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
  408.     New->setDebugLoc(Old.getDebugLoc());
  409.     return InsertNewInstBefore(New, Old);
  410.   }
  411.  
  412.   /// A combiner-aware RAUW-like routine.
  413.   ///
  414.   /// This method is to be used when an instruction is found to be dead,
  415.   /// replaceable with another preexisting expression. Here we add all uses of
  416.   /// I to the worklist, replace all uses of I with the new value, then return
  417.   /// I, so that the inst combiner will know that I was modified.
  418.   Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
  419.     // If there are no uses to replace, then we return nullptr to indicate that
  420.     // no changes were made to the program.
  421.     if (I.use_empty()) return nullptr;
  422.  
  423.     Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
  424.  
  425.     // If we are replacing the instruction with itself, this must be in a
  426.     // segment of unreachable code, so just clobber the instruction.
  427.     if (&I == V)
  428.       V = PoisonValue::get(I.getType());
  429.  
  430.     LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
  431.                       << "    with " << *V << '\n');
  432.  
  433.     // If V is a new unnamed instruction, take the name from the old one.
  434.     if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
  435.       V->takeName(&I);
  436.  
  437.     I.replaceAllUsesWith(V);
  438.     return &I;
  439.   }
  440.  
  441.   /// Replace operand of instruction and add old operand to the worklist.
  442.   Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
  443.     Worklist.addValue(I.getOperand(OpNum));
  444.     I.setOperand(OpNum, V);
  445.     return &I;
  446.   }
  447.  
  448.   /// Replace use and add the previously used value to the worklist.
  449.   void replaceUse(Use &U, Value *NewValue) {
  450.     Worklist.addValue(U);
  451.     U = NewValue;
  452.   }
  453.  
  454.   /// Combiner aware instruction erasure.
  455.   ///
  456.   /// When dealing with an instruction that has side effects or produces a void
  457.   /// value, we can't rely on DCE to delete the instruction. Instead, visit
  458.   /// methods should return the value returned by this function.
  459.   virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
  460.  
  461.   void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
  462.                         const Instruction *CxtI) const {
  463.     llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
  464.   }
  465.  
  466.   KnownBits computeKnownBits(const Value *V, unsigned Depth,
  467.                              const Instruction *CxtI) const {
  468.     return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
  469.   }
  470.  
  471.   bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
  472.                               unsigned Depth = 0,
  473.                               const Instruction *CxtI = nullptr) {
  474.     return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
  475.   }
  476.  
  477.   bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
  478.                          const Instruction *CxtI = nullptr) const {
  479.     return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
  480.   }
  481.  
  482.   unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
  483.                               const Instruction *CxtI = nullptr) const {
  484.     return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
  485.   }
  486.  
  487.   unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
  488.                                      const Instruction *CxtI = nullptr) const {
  489.     return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
  490.   }
  491.  
  492.   OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
  493.                                                const Value *RHS,
  494.                                                const Instruction *CxtI) const {
  495.     return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
  496.   }
  497.  
  498.   OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
  499.                                              const Instruction *CxtI) const {
  500.     return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
  501.   }
  502.  
  503.   OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
  504.                                                const Value *RHS,
  505.                                                const Instruction *CxtI) const {
  506.     return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
  507.   }
  508.  
  509.   OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
  510.                                              const Instruction *CxtI) const {
  511.     return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
  512.   }
  513.  
  514.   OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
  515.                                                const Value *RHS,
  516.                                                const Instruction *CxtI) const {
  517.     return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
  518.   }
  519.  
  520.   OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
  521.                                              const Instruction *CxtI) const {
  522.     return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
  523.   }
  524.  
  525.   virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
  526.                                     const APInt &DemandedMask, KnownBits &Known,
  527.                                     unsigned Depth = 0) = 0;
  528.   virtual Value *
  529.   SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
  530.                              unsigned Depth = 0,
  531.                              bool AllowMultipleUsers = false) = 0;
  532. };
  533.  
  534. } // namespace llvm
  535.  
  536. #undef DEBUG_TYPE
  537.  
  538. #endif
  539.