Details | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line | 
|---|---|---|---|
| 14 | pmbaty | 1 | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// | 
| 2 | // | ||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
| 6 | // | ||
| 7 | //===----------------------------------------------------------------------===// | ||
| 8 | // | ||
| 9 | // This file contains routines that help analyze properties that chains of | ||
| 10 | // computations have. | ||
| 11 | // | ||
| 12 | //===----------------------------------------------------------------------===// | ||
| 13 | |||
| 14 | #ifndef LLVM_ANALYSIS_VALUETRACKING_H | ||
| 15 | #define LLVM_ANALYSIS_VALUETRACKING_H | ||
| 16 | |||
| 17 | #include "llvm/ADT/ArrayRef.h" | ||
| 18 | #include "llvm/ADT/SmallSet.h" | ||
| 19 | #include "llvm/IR/Constants.h" | ||
| 20 | #include "llvm/IR/DataLayout.h" | ||
| 21 | #include "llvm/IR/InstrTypes.h" | ||
| 22 | #include "llvm/IR/Intrinsics.h" | ||
| 23 | #include <cassert> | ||
| 24 | #include <cstdint> | ||
| 25 | |||
| 26 | namespace llvm { | ||
| 27 | |||
| 28 | class Operator; | ||
| 29 | class AddOperator; | ||
| 30 | class AllocaInst; | ||
| 31 | class APInt; | ||
| 32 | class AssumptionCache; | ||
| 33 | class DominatorTree; | ||
| 34 | class GEPOperator; | ||
| 35 | class LoadInst; | ||
| 36 | class WithOverflowInst; | ||
| 37 | struct KnownBits; | ||
| 38 | class Loop; | ||
| 39 | class LoopInfo; | ||
| 40 | class MDNode; | ||
| 41 | class OptimizationRemarkEmitter; | ||
| 42 | class StringRef; | ||
| 43 | class TargetLibraryInfo; | ||
| 44 | class Value; | ||
| 45 | |||
| 46 | constexpr unsigned MaxAnalysisRecursionDepth = 6; | ||
| 47 | |||
| 48 | /// Determine which bits of V are known to be either zero or one and return | ||
| 49 | /// them in the KnownZero/KnownOne bit sets. | ||
| 50 | /// | ||
| 51 | /// This function is defined on values with integer type, values with pointer | ||
| 52 | /// type, and vectors of integers.  In the case | ||
| 53 | /// where V is a vector, the known zero and known one values are the | ||
| 54 | /// same width as the vector element, and the bit is set only if it is true | ||
| 55 | /// for all of the elements in the vector. | ||
| 56 | void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, | ||
| 57 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 58 | const Instruction *CxtI = nullptr, | ||
| 59 | const DominatorTree *DT = nullptr, | ||
| 60 | OptimizationRemarkEmitter *ORE = nullptr, | ||
| 61 | bool UseInstrInfo = true); | ||
| 62 | |||
| 63 | /// Determine which bits of V are known to be either zero or one and return | ||
| 64 | /// them in the KnownZero/KnownOne bit sets. | ||
| 65 | /// | ||
| 66 | /// This function is defined on values with integer type, values with pointer | ||
| 67 | /// type, and vectors of integers.  In the case | ||
| 68 | /// where V is a vector, the known zero and known one values are the | ||
| 69 | /// same width as the vector element, and the bit is set only if it is true | ||
| 70 | /// for all of the demanded elements in the vector. | ||
| 71 | void computeKnownBits(const Value *V, const APInt &DemandedElts, | ||
| 72 | KnownBits &Known, const DataLayout &DL, | ||
| 73 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 74 | const Instruction *CxtI = nullptr, | ||
| 75 | const DominatorTree *DT = nullptr, | ||
| 76 | OptimizationRemarkEmitter *ORE = nullptr, | ||
| 77 | bool UseInstrInfo = true); | ||
| 78 | |||
| 79 | /// Returns the known bits rather than passing by reference. | ||
| 80 | KnownBits computeKnownBits(const Value *V, const DataLayout &DL, | ||
| 81 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 82 | const Instruction *CxtI = nullptr, | ||
| 83 | const DominatorTree *DT = nullptr, | ||
| 84 | OptimizationRemarkEmitter *ORE = nullptr, | ||
| 85 | bool UseInstrInfo = true); | ||
| 86 | |||
| 87 | /// Returns the known bits rather than passing by reference. | ||
| 88 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | ||
| 89 | const DataLayout &DL, unsigned Depth = 0, | ||
| 90 | AssumptionCache *AC = nullptr, | ||
| 91 | const Instruction *CxtI = nullptr, | ||
| 92 | const DominatorTree *DT = nullptr, | ||
| 93 | OptimizationRemarkEmitter *ORE = nullptr, | ||
| 94 | bool UseInstrInfo = true); | ||
| 95 | |||
| 96 | /// Compute known bits from the range metadata. | ||
| 97 | /// \p KnownZero the set of bits that are known to be zero | ||
| 98 | /// \p KnownOne the set of bits that are known to be one | ||
| 99 | void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known); | ||
| 100 | |||
| 101 | /// Return true if LHS and RHS have no common bits set. | ||
| 102 | bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, | ||
| 103 | const DataLayout &DL, AssumptionCache *AC = nullptr, | ||
| 104 | const Instruction *CxtI = nullptr, | ||
| 105 | const DominatorTree *DT = nullptr, | ||
| 106 | bool UseInstrInfo = true); | ||
| 107 | |||
| 108 | /// Return true if the given value is known to have exactly one bit set when | ||
| 109 | /// defined. For vectors return true if every element is known to be a power | ||
| 110 | /// of two when defined. Supports values with integer or pointer type and | ||
| 111 | /// vectors of integers. If 'OrZero' is set, then return true if the given | ||
| 112 | /// value is either a power of two or zero. | ||
| 113 | bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, | ||
| 114 | bool OrZero = false, unsigned Depth = 0, | ||
| 115 | AssumptionCache *AC = nullptr, | ||
| 116 | const Instruction *CxtI = nullptr, | ||
| 117 | const DominatorTree *DT = nullptr, | ||
| 118 | bool UseInstrInfo = true); | ||
| 119 | |||
| 120 | bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); | ||
| 121 | |||
| 122 | /// Return true if the given value is known to be non-zero when defined. For | ||
| 123 | /// vectors, return true if every element is known to be non-zero when | ||
| 124 | /// defined. For pointers, if the context instruction and dominator tree are | ||
| 125 | /// specified, perform context-sensitive analysis and return true if the | ||
| 126 | /// pointer couldn't possibly be null at the specified instruction. | ||
| 127 | /// Supports values with integer or pointer type and vectors of integers. | ||
| 128 | bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, | ||
| 129 | AssumptionCache *AC = nullptr, | ||
| 130 | const Instruction *CxtI = nullptr, | ||
| 131 | const DominatorTree *DT = nullptr, | ||
| 132 | bool UseInstrInfo = true); | ||
| 133 | |||
| 134 | /// Return true if the two given values are negation. | ||
| 135 | /// Currently can recoginze Value pair: | ||
| 136 | /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) | ||
| 137 | /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) | ||
| 138 | bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); | ||
| 139 | |||
| 140 | /// Returns true if the give value is known to be non-negative. | ||
| 141 | bool isKnownNonNegative(const Value *V, const DataLayout &DL, | ||
| 142 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 143 | const Instruction *CxtI = nullptr, | ||
| 144 | const DominatorTree *DT = nullptr, | ||
| 145 | bool UseInstrInfo = true); | ||
| 146 | |||
| 147 | /// Returns true if the given value is known be positive (i.e. non-negative | ||
| 148 | /// and non-zero). | ||
| 149 | bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, | ||
| 150 | AssumptionCache *AC = nullptr, | ||
| 151 | const Instruction *CxtI = nullptr, | ||
| 152 | const DominatorTree *DT = nullptr, | ||
| 153 | bool UseInstrInfo = true); | ||
| 154 | |||
| 155 | /// Returns true if the given value is known be negative (i.e. non-positive | ||
| 156 | /// and non-zero). | ||
| 157 | bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, | ||
| 158 | AssumptionCache *AC = nullptr, | ||
| 159 | const Instruction *CxtI = nullptr, | ||
| 160 | const DominatorTree *DT = nullptr, | ||
| 161 | bool UseInstrInfo = true); | ||
| 162 | |||
| 163 | /// Return true if the given values are known to be non-equal when defined. | ||
| 164 | /// Supports scalar integer types only. | ||
| 165 | bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, | ||
| 166 | AssumptionCache *AC = nullptr, | ||
| 167 | const Instruction *CxtI = nullptr, | ||
| 168 | const DominatorTree *DT = nullptr, | ||
| 169 | bool UseInstrInfo = true); | ||
| 170 | |||
| 171 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to | ||
| 172 | /// simplify operations downstream. Mask is known to be zero for bits that V | ||
| 173 | /// cannot have. | ||
| 174 | /// | ||
| 175 | /// This function is defined on values with integer type, values with pointer | ||
| 176 | /// type, and vectors of integers.  In the case | ||
| 177 | /// where V is a vector, the mask, known zero, and known one values are the | ||
| 178 | /// same width as the vector element, and the bit is set only if it is true | ||
| 179 | /// for all of the elements in the vector. | ||
| 180 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, | ||
| 181 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 182 | const Instruction *CxtI = nullptr, | ||
| 183 | const DominatorTree *DT = nullptr, | ||
| 184 | bool UseInstrInfo = true); | ||
| 185 | |||
| 186 | /// Return the number of times the sign bit of the register is replicated into | ||
| 187 | /// the other bits. We know that at least 1 bit is always equal to the sign | ||
| 188 | /// bit (itself), but other cases can give us information. For example, | ||
| 189 | /// immediately after an "ashr X, 2", we know that the top 3 bits are all | ||
| 190 | /// equal to each other, so we return 3. For vectors, return the number of | ||
| 191 | /// sign bits for the vector element with the mininum number of known sign | ||
| 192 | /// bits. | ||
| 193 | unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, | ||
| 194 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | ||
| 195 | const Instruction *CxtI = nullptr, | ||
| 196 | const DominatorTree *DT = nullptr, | ||
| 197 | bool UseInstrInfo = true); | ||
| 198 | |||
| 199 | /// Get the upper bound on bit size for this Value \p Op as a signed integer. | ||
| 200 | /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)). | ||
| 201 | /// Similar to the APInt::getSignificantBits function. | ||
| 202 | unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, | ||
| 203 | unsigned Depth = 0, | ||
| 204 | AssumptionCache *AC = nullptr, | ||
| 205 | const Instruction *CxtI = nullptr, | ||
| 206 | const DominatorTree *DT = nullptr); | ||
| 207 | |||
| 208 | /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent | ||
| 209 | /// intrinsics are treated as-if they were intrinsics. | ||
| 210 | Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, | ||
| 211 | const TargetLibraryInfo *TLI); | ||
| 212 | |||
| 213 | /// Return true if we can prove that the specified FP value is never equal to | ||
| 214 | /// -0.0. | ||
| 215 | bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, | ||
| 216 | unsigned Depth = 0); | ||
| 217 | |||
| 218 | /// Return true if we can prove that the specified FP value is either NaN or | ||
| 219 | /// never less than -0.0. | ||
| 220 | /// | ||
| 221 | ///      NaN --> true | ||
| 222 | ///       +0 --> true | ||
| 223 | ///       -0 --> true | ||
| 224 | ///   x > +0 --> true | ||
| 225 | ///   x < -0 --> false | ||
| 226 | bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); | ||
| 227 | |||
| 228 | /// Return true if the floating-point scalar value is not an infinity or if | ||
| 229 | /// the floating-point vector value has no infinities. Return false if a value | ||
| 230 | /// could ever be infinity. | ||
| 231 | bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, | ||
| 232 | unsigned Depth = 0); | ||
| 233 | |||
| 234 | /// Return true if the floating-point scalar value is not a NaN or if the | ||
| 235 | /// floating-point vector value has no NaN elements. Return false if a value | ||
| 236 | /// could ever be NaN. | ||
| 237 | bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, | ||
| 238 | unsigned Depth = 0); | ||
| 239 | |||
| 240 | /// Return true if we can prove that the specified FP value's sign bit is 0. | ||
| 241 | /// | ||
| 242 | ///      NaN --> true/false (depending on the NaN's sign bit) | ||
| 243 | ///       +0 --> true | ||
| 244 | ///       -0 --> false | ||
| 245 | ///   x > +0 --> true | ||
| 246 | ///   x < -0 --> false | ||
| 247 | bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); | ||
| 248 | |||
| 249 | /// If the specified value can be set by repeating the same byte in memory, | ||
| 250 | /// return the i8 value that it is represented with. This is true for all i8 | ||
| 251 | /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double | ||
| 252 | /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. | ||
| 253 | /// i16 0x1234), return null. If the value is entirely undef and padding, | ||
| 254 | /// return undef. | ||
| 255 | Value *isBytewiseValue(Value *V, const DataLayout &DL); | ||
| 256 | |||
| 257 | /// Given an aggregate and an sequence of indices, see if the scalar value | ||
| 258 | /// indexed is already around as a register, for example if it were inserted | ||
| 259 | /// directly into the aggregate. | ||
| 260 | /// | ||
| 261 | /// If InsertBefore is not null, this function will duplicate (modified) | ||
| 262 | /// insertvalues when a part of a nested struct is extracted. | ||
| 263 | Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, | ||
| 264 | Instruction *InsertBefore = nullptr); | ||
| 265 | |||
| 266 | /// Analyze the specified pointer to see if it can be expressed as a base | ||
| 267 | /// pointer plus a constant offset. Return the base and offset to the caller. | ||
| 268 | /// | ||
| 269 | /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that | ||
| 270 | /// creates and later unpacks the required APInt. | ||
| 271 | inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, | ||
| 272 | const DataLayout &DL, | ||
| 273 | bool AllowNonInbounds = true) { | ||
| 274 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); | ||
| 275 | Value *Base = | ||
| 276 | Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); | ||
| 277 | |||
| 278 | Offset = OffsetAPInt.getSExtValue(); | ||
| 279 | return Base; | ||
| 280 | } | ||
| 281 | inline const Value * | ||
| 282 | GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, | ||
| 283 | const DataLayout &DL, | ||
| 284 | bool AllowNonInbounds = true) { | ||
| 285 | return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, | ||
| 286 | AllowNonInbounds); | ||
| 287 | } | ||
| 288 | |||
| 289 | /// Returns true if the GEP is based on a pointer to a string (array of | ||
| 290 | // \p CharSize integers) and is indexing into this string. | ||
| 291 | bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8); | ||
| 292 | |||
| 293 | /// Represents offset+length into a ConstantDataArray. | ||
| 294 | struct ConstantDataArraySlice { | ||
| 295 |   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid | ||
| 296 |   /// initializer, it just doesn't fit the ConstantDataArray interface). | ||
| 297 | const ConstantDataArray *Array; | ||
| 298 | |||
| 299 |   /// Slice starts at this Offset. | ||
| 300 | uint64_t Offset; | ||
| 301 | |||
| 302 |   /// Length of the slice. | ||
| 303 | uint64_t Length; | ||
| 304 | |||
| 305 |   /// Moves the Offset and adjusts Length accordingly. | ||
| 306 | void move(uint64_t Delta) { | ||
| 307 | assert(Delta < Length); | ||
| 308 | Offset += Delta; | ||
| 309 | Length -= Delta; | ||
| 310 |   } | ||
| 311 | |||
| 312 |   /// Convenience accessor for elements in the slice. | ||
| 313 | uint64_t operator[](unsigned I) const { | ||
| 314 | return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset); | ||
| 315 |   } | ||
| 316 | }; | ||
| 317 | |||
| 318 | /// Returns true if the value \p V is a pointer into a ConstantDataArray. | ||
| 319 | /// If successful \p Slice will point to a ConstantDataArray info object | ||
| 320 | /// with an appropriate offset. | ||
| 321 | bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, | ||
| 322 | unsigned ElementSize, uint64_t Offset = 0); | ||
| 323 | |||
| 324 | /// This function computes the length of a null-terminated C string pointed to | ||
| 325 | /// by V. If successful, it returns true and returns the string in Str. If | ||
| 326 | /// unsuccessful, it returns false. This does not include the trailing null | ||
| 327 | /// character by default. If TrimAtNul is set to false, then this returns any | ||
| 328 | /// trailing null characters as well as any other characters that come after | ||
| 329 | /// it. | ||
| 330 | bool getConstantStringInfo(const Value *V, StringRef &Str, | ||
| 331 | bool TrimAtNul = true); | ||
| 332 | |||
| 333 | /// If we can compute the length of the string pointed to by the specified | ||
| 334 | /// pointer, return 'len+1'.  If we can't, return 0. | ||
| 335 | uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); | ||
| 336 | |||
| 337 | /// This function returns call pointer argument that is considered the same by | ||
| 338 | /// aliasing rules. You CAN'T use it to replace one value with another. If | ||
| 339 | /// \p MustPreserveNullness is true, the call must preserve the nullness of | ||
| 340 | /// the pointer. | ||
| 341 | const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, | ||
| 342 | bool MustPreserveNullness); | ||
| 343 | inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call, | ||
| 344 | bool MustPreserveNullness) { | ||
| 345 | return const_cast<Value *>(getArgumentAliasingToReturnedPointer( | ||
| 346 | const_cast<const CallBase *>(Call), MustPreserveNullness)); | ||
| 347 | } | ||
| 348 | |||
| 349 | /// {launder,strip}.invariant.group returns pointer that aliases its argument, | ||
| 350 | /// and it only captures pointer by returning it. | ||
| 351 | /// These intrinsics are not marked as nocapture, because returning is | ||
| 352 | /// considered as capture. The arguments are not marked as returned neither, | ||
| 353 | /// because it would make it useless. If \p MustPreserveNullness is true, | ||
| 354 | /// the intrinsic must preserve the nullness of the pointer. | ||
| 355 | bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | ||
| 356 | const CallBase *Call, bool MustPreserveNullness); | ||
| 357 | |||
| 358 | /// This method strips off any GEP address adjustments and pointer casts from | ||
| 359 | /// the specified value, returning the original object being addressed. Note | ||
| 360 | /// that the returned value has pointer type if the specified value does. If | ||
| 361 | /// the MaxLookup value is non-zero, it limits the number of instructions to | ||
| 362 | /// be stripped off. | ||
| 363 | const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6); | ||
| 364 | inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) { | ||
| 365 |   // Force const to avoid infinite recursion. | ||
| 366 | const Value *VConst = V; | ||
| 367 | return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup)); | ||
| 368 | } | ||
| 369 | |||
| 370 | /// This method is similar to getUnderlyingObject except that it can | ||
| 371 | /// look through phi and select instructions and return multiple objects. | ||
| 372 | /// | ||
| 373 | /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer | ||
| 374 | /// accesses different objects in each iteration, we don't look through the | ||
| 375 | /// phi node. E.g. consider this loop nest: | ||
| 376 | /// | ||
| 377 | ///   int **A; | ||
| 378 | ///   for (i) | ||
| 379 | ///     for (j) { | ||
| 380 | ///        A[i][j] = A[i-1][j] * B[j] | ||
| 381 | ///     } | ||
| 382 | /// | ||
| 383 | /// This is transformed by Load-PRE to stash away A[i] for the next iteration | ||
| 384 | /// of the outer loop: | ||
| 385 | /// | ||
| 386 | ///   Curr = A[0];          // Prev_0 | ||
| 387 | ///   for (i: 1..N) { | ||
| 388 | ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr) | ||
| 389 | ///     Curr = A[i]; | ||
| 390 | ///     for (j: 0..N) { | ||
| 391 | ///        Curr[j] = Prev[j] * B[j] | ||
| 392 | ///     } | ||
| 393 | ///   } | ||
| 394 | /// | ||
| 395 | /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects | ||
| 396 | /// should not assume that Curr and Prev share the same underlying object thus | ||
| 397 | /// it shouldn't look through the phi above. | ||
| 398 | void getUnderlyingObjects(const Value *V, | ||
| 399 | SmallVectorImpl<const Value *> &Objects, | ||
| 400 | LoopInfo *LI = nullptr, unsigned MaxLookup = 6); | ||
| 401 | |||
| 402 | /// This is a wrapper around getUnderlyingObjects and adds support for basic | ||
| 403 | /// ptrtoint+arithmetic+inttoptr sequences. | ||
| 404 | bool getUnderlyingObjectsForCodeGen(const Value *V, | ||
| 405 | SmallVectorImpl<Value *> &Objects); | ||
| 406 | |||
| 407 | /// Returns unique alloca where the value comes from, or nullptr. | ||
| 408 | /// If OffsetZero is true check that V points to the begining of the alloca. | ||
| 409 | AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false); | ||
| 410 | inline const AllocaInst *findAllocaForValue(const Value *V, | ||
| 411 | bool OffsetZero = false) { | ||
| 412 | return findAllocaForValue(const_cast<Value *>(V), OffsetZero); | ||
| 413 | } | ||
| 414 | |||
| 415 | /// Return true if the only users of this pointer are lifetime markers. | ||
| 416 | bool onlyUsedByLifetimeMarkers(const Value *V); | ||
| 417 | |||
| 418 | /// Return true if the only users of this pointer are lifetime markers or | ||
| 419 | /// droppable instructions. | ||
| 420 | bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V); | ||
| 421 | |||
| 422 | /// Return true if speculation of the given load must be suppressed to avoid | ||
| 423 | /// ordering or interfering with an active sanitizer.  If not suppressed, | ||
| 424 | /// dereferenceability and alignment must be proven separately.  Note: This | ||
| 425 | /// is only needed for raw reasoning; if you use the interface below | ||
| 426 | /// (isSafeToSpeculativelyExecute), this is handled internally. | ||
| 427 | bool mustSuppressSpeculation(const LoadInst &LI); | ||
| 428 | |||
| 429 | /// Return true if the instruction does not have any effects besides | ||
| 430 | /// calculating the result and does not have undefined behavior. | ||
| 431 | /// | ||
| 432 | /// This method never returns true for an instruction that returns true for | ||
| 433 | /// mayHaveSideEffects; however, this method also does some other checks in | ||
| 434 | /// addition. It checks for undefined behavior, like dividing by zero or | ||
| 435 | /// loading from an invalid pointer (but not for undefined results, like a | ||
| 436 | /// shift with a shift amount larger than the width of the result). It checks | ||
| 437 | /// for malloc and alloca because speculatively executing them might cause a | ||
| 438 | /// memory leak. It also returns false for instructions related to control | ||
| 439 | /// flow, specifically terminators and PHI nodes. | ||
| 440 | /// | ||
| 441 | /// If the CtxI is specified this method performs context-sensitive analysis | ||
| 442 | /// and returns true if it is safe to execute the instruction immediately | ||
| 443 | /// before the CtxI. | ||
| 444 | /// | ||
| 445 | /// If the CtxI is NOT specified this method only looks at the instruction | ||
| 446 | /// itself and its operands, so if this method returns true, it is safe to | ||
| 447 | /// move the instruction as long as the correct dominance relationships for | ||
| 448 | /// the operands and users hold. | ||
| 449 | /// | ||
| 450 | /// This method can return true for instructions that read memory; | ||
| 451 | /// for such instructions, moving them may change the resulting value. | ||
| 452 | bool isSafeToSpeculativelyExecute(const Instruction *I, | ||
| 453 | const Instruction *CtxI = nullptr, | ||
| 454 | AssumptionCache *AC = nullptr, | ||
| 455 | const DominatorTree *DT = nullptr, | ||
| 456 | const TargetLibraryInfo *TLI = nullptr); | ||
| 457 | |||
| 458 | /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is | ||
| 459 | /// the actual opcode of Inst. If the provided and actual opcode differ, the | ||
| 460 | /// function (virtually) overrides the opcode of Inst with the provided | ||
| 461 | /// Opcode. There are come constraints in this case: | ||
| 462 | /// * If Opcode has a fixed number of operands (eg, as binary operators do), | ||
| 463 | ///   then Inst has to have at least as many leading operands. The function | ||
| 464 | ///   will ignore all trailing operands beyond that number. | ||
| 465 | /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts | ||
| 466 | ///   do), then all operands are considered. | ||
| 467 | /// * The virtual instruction has to satisfy all typing rules of the provided | ||
| 468 | ///   Opcode. | ||
| 469 | /// * This function is pessimistic in the following sense: If one actually | ||
| 470 | ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute | ||
| 471 | ///   may say that the materialized instruction is speculatable whereas this | ||
| 472 | ///   function may have said that the instruction wouldn't be speculatable. | ||
| 473 | ///   This behavior is a shortcoming in the current implementation and not | ||
| 474 | ///   intentional. | ||
| 475 | bool isSafeToSpeculativelyExecuteWithOpcode( | ||
| 476 | unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr, | ||
| 477 | AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr, | ||
| 478 | const TargetLibraryInfo *TLI = nullptr); | ||
| 479 | |||
| 480 | /// Returns true if the result or effects of the given instructions \p I | ||
| 481 | /// depend values not reachable through the def use graph. | ||
| 482 | /// * Memory dependence arises for example if the instruction reads from | ||
| 483 | ///   memory or may produce effects or undefined behaviour. Memory dependent | ||
| 484 | ///   instructions generally cannot be reorderd with respect to other memory | ||
| 485 | ///   dependent instructions. | ||
| 486 | /// * Control dependence arises for example if the instruction may fault | ||
| 487 | ///   if lifted above a throwing call or infinite loop. | ||
| 488 | bool mayHaveNonDefUseDependency(const Instruction &I); | ||
| 489 | |||
| 490 | /// Return true if it is an intrinsic that cannot be speculated but also | ||
| 491 | /// cannot trap. | ||
| 492 | bool isAssumeLikeIntrinsic(const Instruction *I); | ||
| 493 | |||
| 494 | /// Return true if it is valid to use the assumptions provided by an | ||
| 495 | /// assume intrinsic, I, at the point in the control-flow identified by the | ||
| 496 | /// context instruction, CxtI. | ||
| 497 | bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, | ||
| 498 | const DominatorTree *DT = nullptr); | ||
| 499 | |||
| 500 | enum class OverflowResult { | ||
| 501 |   /// Always overflows in the direction of signed/unsigned min value. | ||
| 502 | AlwaysOverflowsLow, | ||
| 503 |   /// Always overflows in the direction of signed/unsigned max value. | ||
| 504 | AlwaysOverflowsHigh, | ||
| 505 |   /// May or may not overflow. | ||
| 506 | MayOverflow, | ||
| 507 |   /// Never overflows. | ||
| 508 | NeverOverflows, | ||
| 509 | }; | ||
| 510 | |||
| 511 | OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, | ||
| 512 | const DataLayout &DL, | ||
| 513 |                                              AssumptionCache *AC, | ||
| 514 | const Instruction *CxtI, | ||
| 515 | const DominatorTree *DT, | ||
| 516 | bool UseInstrInfo = true); | ||
| 517 | OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, | ||
| 518 | const DataLayout &DL, | ||
| 519 |                                            AssumptionCache *AC, | ||
| 520 | const Instruction *CxtI, | ||
| 521 | const DominatorTree *DT, | ||
| 522 | bool UseInstrInfo = true); | ||
| 523 | OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, | ||
| 524 | const DataLayout &DL, | ||
| 525 |                                              AssumptionCache *AC, | ||
| 526 | const Instruction *CxtI, | ||
| 527 | const DominatorTree *DT, | ||
| 528 | bool UseInstrInfo = true); | ||
| 529 | OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, | ||
| 530 | const DataLayout &DL, | ||
| 531 | AssumptionCache *AC = nullptr, | ||
| 532 | const Instruction *CxtI = nullptr, | ||
| 533 | const DominatorTree *DT = nullptr); | ||
| 534 | /// This version also leverages the sign bit of Add if known. | ||
| 535 | OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, | ||
| 536 | const DataLayout &DL, | ||
| 537 | AssumptionCache *AC = nullptr, | ||
| 538 | const Instruction *CxtI = nullptr, | ||
| 539 | const DominatorTree *DT = nullptr); | ||
| 540 | OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, | ||
| 541 | const DataLayout &DL, | ||
| 542 |                                              AssumptionCache *AC, | ||
| 543 | const Instruction *CxtI, | ||
| 544 | const DominatorTree *DT); | ||
| 545 | OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, | ||
| 546 | const DataLayout &DL, | ||
| 547 |                                            AssumptionCache *AC, | ||
| 548 | const Instruction *CxtI, | ||
| 549 | const DominatorTree *DT); | ||
| 550 | |||
| 551 | /// Returns true if the arithmetic part of the \p WO 's result is | ||
| 552 | /// used only along the paths control dependent on the computation | ||
| 553 | /// not overflowing, \p WO being an <op>.with.overflow intrinsic. | ||
| 554 | bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, | ||
| 555 | const DominatorTree &DT); | ||
| 556 | |||
| 557 | /// Determine the possible constant range of an integer or vector of integer | ||
| 558 | /// value. This is intended as a cheap, non-recursive check. | ||
| 559 | ConstantRange computeConstantRange(const Value *V, bool ForSigned, | ||
| 560 | bool UseInstrInfo = true, | ||
| 561 | AssumptionCache *AC = nullptr, | ||
| 562 | const Instruction *CtxI = nullptr, | ||
| 563 | const DominatorTree *DT = nullptr, | ||
| 564 | unsigned Depth = 0); | ||
| 565 | |||
| 566 | /// Return true if this function can prove that the instruction I will | ||
| 567 | /// always transfer execution to one of its successors (including the next | ||
| 568 | /// instruction that follows within a basic block). E.g. this is not | ||
| 569 | /// guaranteed for function calls that could loop infinitely. | ||
| 570 | /// | ||
| 571 | /// In other words, this function returns false for instructions that may | ||
| 572 | /// transfer execution or fail to transfer execution in a way that is not | ||
| 573 | /// captured in the CFG nor in the sequence of instructions within a basic | ||
| 574 | /// block. | ||
| 575 | /// | ||
| 576 | /// Undefined behavior is assumed not to happen, so e.g. division is | ||
| 577 | /// guaranteed to transfer execution to the following instruction even | ||
| 578 | /// though division by zero might cause undefined behavior. | ||
| 579 | bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); | ||
| 580 | |||
| 581 | /// Returns true if this block does not contain a potential implicit exit. | ||
| 582 | /// This is equivelent to saying that all instructions within the basic block | ||
| 583 | /// are guaranteed to transfer execution to their successor within the basic | ||
| 584 | /// block. This has the same assumptions w.r.t. undefined behavior as the | ||
| 585 | /// instruction variant of this function. | ||
| 586 | bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); | ||
| 587 | |||
| 588 | /// Return true if every instruction in the range (Begin, End) is | ||
| 589 | /// guaranteed to transfer execution to its static successor. \p ScanLimit | ||
| 590 | /// bounds the search to avoid scanning huge blocks. | ||
| 591 | bool isGuaranteedToTransferExecutionToSuccessor( | ||
| 592 | BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, | ||
| 593 | unsigned ScanLimit = 32); | ||
| 594 | |||
| 595 | /// Same as previous, but with range expressed via iterator_range. | ||
| 596 | bool isGuaranteedToTransferExecutionToSuccessor( | ||
| 597 | iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32); | ||
| 598 | |||
| 599 | /// Return true if this function can prove that the instruction I | ||
| 600 | /// is executed for every iteration of the loop L. | ||
| 601 | /// | ||
| 602 | /// Note that this currently only considers the loop header. | ||
| 603 | bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, | ||
| 604 | const Loop *L); | ||
| 605 | |||
| 606 | /// Return true if \p PoisonOp's user yields poison or raises UB if its | ||
| 607 | /// operand \p PoisonOp is poison. | ||
| 608 | /// | ||
| 609 | /// If \p PoisonOp is a vector or an aggregate and the operation's result is a | ||
| 610 | /// single value, any poison element in /p PoisonOp should make the result | ||
| 611 | /// poison or raise UB. | ||
| 612 | /// | ||
| 613 | /// To filter out operands that raise UB on poison, you can use | ||
| 614 | /// getGuaranteedNonPoisonOp. | ||
| 615 | bool propagatesPoison(const Use &PoisonOp); | ||
| 616 | |||
| 617 | /// Insert operands of I into Ops such that I will trigger undefined behavior | ||
| 618 | /// if I is executed and that operand has a poison value. | ||
| 619 | void getGuaranteedNonPoisonOps(const Instruction *I, | ||
| 620 | SmallVectorImpl<const Value *> &Ops); | ||
| 621 | |||
| 622 | /// Insert operands of I into Ops such that I will trigger undefined behavior | ||
| 623 | /// if I is executed and that operand is not a well-defined value | ||
| 624 | /// (i.e. has undef bits or poison). | ||
| 625 | void getGuaranteedWellDefinedOps(const Instruction *I, | ||
| 626 | SmallVectorImpl<const Value *> &Ops); | ||
| 627 | |||
| 628 | /// Return true if the given instruction must trigger undefined behavior | ||
| 629 | /// when I is executed with any operands which appear in KnownPoison holding | ||
| 630 | /// a poison value at the point of execution. | ||
| 631 | bool mustTriggerUB(const Instruction *I, | ||
| 632 | const SmallSet<const Value *, 16> &KnownPoison); | ||
| 633 | |||
| 634 | /// Return true if this function can prove that if Inst is executed | ||
| 635 | /// and yields a poison value or undef bits, then that will trigger | ||
| 636 | /// undefined behavior. | ||
| 637 | /// | ||
| 638 | /// Note that this currently only considers the basic block that is | ||
| 639 | /// the parent of Inst. | ||
| 640 | bool programUndefinedIfUndefOrPoison(const Instruction *Inst); | ||
| 641 | bool programUndefinedIfPoison(const Instruction *Inst); | ||
| 642 | |||
| 643 | /// canCreateUndefOrPoison returns true if Op can create undef or poison from | ||
| 644 | /// non-undef & non-poison operands. | ||
| 645 | /// For vectors, canCreateUndefOrPoison returns true if there is potential | ||
| 646 | /// poison or undef in any element of the result when vectors without | ||
| 647 | /// undef/poison poison are given as operands. | ||
| 648 | /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns | ||
| 649 | /// true. If Op raises immediate UB but never creates poison or undef | ||
| 650 | /// (e.g. sdiv I, 0), canCreatePoison returns false. | ||
| 651 | /// | ||
| 652 | /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and | ||
| 653 | /// metadata on the instruction are considered.  This can be used to see if the | ||
| 654 | /// instruction could still introduce undef or poison even without poison | ||
| 655 | /// generating flags and metadata which might be on the instruction. | ||
| 656 | /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create | ||
| 657 | /// poison or undef) | ||
| 658 | /// | ||
| 659 | /// canCreatePoison returns true if Op can create poison from non-poison | ||
| 660 | /// operands. | ||
| 661 | bool canCreateUndefOrPoison(const Operator *Op, | ||
| 662 | bool ConsiderFlagsAndMetadata = true); | ||
| 663 | bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true); | ||
| 664 | |||
| 665 | /// Return true if V is poison given that ValAssumedPoison is already poison. | ||
| 666 | /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`, | ||
| 667 | /// impliesPoison returns true. | ||
| 668 | bool impliesPoison(const Value *ValAssumedPoison, const Value *V); | ||
| 669 | |||
| 670 | /// Return true if this function can prove that V does not have undef bits | ||
| 671 | /// and is never poison. If V is an aggregate value or vector, check whether | ||
| 672 | /// all elements (except padding) are not undef or poison. | ||
| 673 | /// Note that this is different from canCreateUndefOrPoison because the | ||
| 674 | /// function assumes Op's operands are not poison/undef. | ||
| 675 | /// | ||
| 676 | /// If CtxI and DT are specified this method performs flow-sensitive analysis | ||
| 677 | /// and returns true if it is guaranteed to be never undef or poison | ||
| 678 | /// immediately before the CtxI. | ||
| 679 | bool isGuaranteedNotToBeUndefOrPoison(const Value *V, | ||
| 680 | AssumptionCache *AC = nullptr, | ||
| 681 | const Instruction *CtxI = nullptr, | ||
| 682 | const DominatorTree *DT = nullptr, | ||
| 683 | unsigned Depth = 0); | ||
| 684 | bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, | ||
| 685 | const Instruction *CtxI = nullptr, | ||
| 686 | const DominatorTree *DT = nullptr, | ||
| 687 | unsigned Depth = 0); | ||
| 688 | |||
| 689 | /// Specific patterns of select instructions we can match. | ||
| 690 | enum SelectPatternFlavor { | ||
| 691 | SPF_UNKNOWN = 0, | ||
| 692 |   SPF_SMIN,    /// Signed minimum | ||
| 693 |   SPF_UMIN,    /// Unsigned minimum | ||
| 694 |   SPF_SMAX,    /// Signed maximum | ||
| 695 |   SPF_UMAX,    /// Unsigned maximum | ||
| 696 |   SPF_FMINNUM, /// Floating point minnum | ||
| 697 |   SPF_FMAXNUM, /// Floating point maxnum | ||
| 698 |   SPF_ABS,     /// Absolute value | ||
| 699 |   SPF_NABS     /// Negated absolute value | ||
| 700 | }; | ||
| 701 | |||
| 702 | /// Behavior when a floating point min/max is given one NaN and one | ||
| 703 | /// non-NaN as input. | ||
| 704 | enum SelectPatternNaNBehavior { | ||
| 705 | SPNB_NA = 0, /// NaN behavior not applicable. | ||
| 706 |   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN. | ||
| 707 |   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. | ||
| 708 |   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or | ||
| 709 |                       /// it has been determined that no operands can | ||
| 710 |                       /// be NaN). | ||
| 711 | }; | ||
| 712 | |||
| 713 | struct SelectPatternResult { | ||
| 714 |   SelectPatternFlavor Flavor; | ||
| 715 | SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is | ||
| 716 |                                         /// SPF_FMINNUM or SPF_FMAXNUM. | ||
| 717 | bool Ordered; /// When implementing this min/max pattern as | ||
| 718 |                 /// fcmp; select, does the fcmp have to be | ||
| 719 |                 /// ordered? | ||
| 720 | |||
| 721 |   /// Return true if \p SPF is a min or a max pattern. | ||
| 722 | static bool isMinOrMax(SelectPatternFlavor SPF) { | ||
| 723 | return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; | ||
| 724 |   } | ||
| 725 | }; | ||
| 726 | |||
| 727 | /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind | ||
| 728 | /// and providing the out parameter results if we successfully match. | ||
| 729 | /// | ||
| 730 | /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be | ||
| 731 | /// the negation instruction from the idiom. | ||
| 732 | /// | ||
| 733 | /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does | ||
| 734 | /// not match that of the original select. If this is the case, the cast | ||
| 735 | /// operation (one of Trunc,SExt,Zext) that must be done to transform the | ||
| 736 | /// type of LHS and RHS into the type of V is returned in CastOp. | ||
| 737 | /// | ||
| 738 | /// For example: | ||
| 739 | ///   %1 = icmp slt i32 %a, i32 4 | ||
| 740 | ///   %2 = sext i32 %a to i64 | ||
| 741 | ///   %3 = select i1 %1, i64 %2, i64 4 | ||
| 742 | /// | ||
| 743 | /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt | ||
| 744 | /// | ||
| 745 | SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, | ||
| 746 | Instruction::CastOps *CastOp = nullptr, | ||
| 747 | unsigned Depth = 0); | ||
| 748 | |||
| 749 | inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS, | ||
| 750 | const Value *&RHS) { | ||
| 751 | Value *L = const_cast<Value *>(LHS); | ||
| 752 | Value *R = const_cast<Value *>(RHS); | ||
| 753 | auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); | ||
| 754 | LHS = L; | ||
| 755 | RHS = R; | ||
| 756 | return Result; | ||
| 757 | } | ||
| 758 | |||
| 759 | /// Determine the pattern that a select with the given compare as its | ||
| 760 | /// predicate and given values as its true/false operands would match. | ||
| 761 | SelectPatternResult matchDecomposedSelectPattern( | ||
| 762 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, | ||
| 763 | Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); | ||
| 764 | |||
| 765 | /// Return the canonical comparison predicate for the specified | ||
| 766 | /// minimum/maximum flavor. | ||
| 767 | CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false); | ||
| 768 | |||
| 769 | /// Return the inverse minimum/maximum flavor of the specified flavor. | ||
| 770 | /// For example, signed minimum is the inverse of signed maximum. | ||
| 771 | SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); | ||
| 772 | |||
| 773 | Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID); | ||
| 774 | |||
| 775 | /// Return the minimum or maximum constant value for the specified integer | ||
| 776 | /// min/max flavor and type. | ||
| 777 | APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth); | ||
| 778 | |||
| 779 | /// Check if the values in \p VL are select instructions that can be converted | ||
| 780 | /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a | ||
| 781 | /// conversion is possible, together with a bool indicating whether all select | ||
| 782 | /// conditions are only used by the selects. Otherwise return | ||
| 783 | /// Intrinsic::not_intrinsic. | ||
| 784 | std::pair<Intrinsic::ID, bool> | ||
| 785 | canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL); | ||
| 786 | |||
| 787 | /// Attempt to match a simple first order recurrence cycle of the form: | ||
| 788 | ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | ||
| 789 | ///   %inc = binop %iv, %step | ||
| 790 | /// OR | ||
| 791 | ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | ||
| 792 | ///   %inc = binop %step, %iv | ||
| 793 | /// | ||
| 794 | /// A first order recurrence is a formula with the form: X_n = f(X_(n-1)) | ||
| 795 | /// | ||
| 796 | /// A couple of notes on subtleties in that definition: | ||
| 797 | /// * The Step does not have to be loop invariant.  In math terms, it can | ||
| 798 | ///   be a free variable.  We allow recurrences with both constant and | ||
| 799 | ///   variable coefficients. Callers may wish to filter cases where Step | ||
| 800 | ///   does not dominate P. | ||
| 801 | /// * For non-commutative operators, we will match both forms.  This | ||
| 802 | ///   results in some odd recurrence structures.  Callers may wish to filter | ||
| 803 | ///   out recurrences where the phi is not the LHS of the returned operator. | ||
| 804 | /// * Because of the structure matched, the caller can assume as a post | ||
| 805 | ///   condition of the match the presence of a Loop with P's parent as it's | ||
| 806 | ///   header *except* in unreachable code.  (Dominance decays in unreachable | ||
| 807 | ///   code.) | ||
| 808 | /// | ||
| 809 | /// NOTE: This is intentional simple.  If you want the ability to analyze | ||
| 810 | /// non-trivial loop conditons, see ScalarEvolution instead. | ||
| 811 | bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, | ||
| 812 | Value *&Step); | ||
| 813 | |||
| 814 | /// Analogous to the above, but starting from the binary operator | ||
| 815 | bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start, | ||
| 816 | Value *&Step); | ||
| 817 | |||
| 818 | /// Return true if RHS is known to be implied true by LHS.  Return false if | ||
| 819 | /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if | ||
| 820 | /// no implication can be made. A & B must be i1 (boolean) values or a vector of | ||
| 821 | /// such values. Note that the truth table for implication is the same as <=u on | ||
| 822 | /// i1 values (but not | ||
| 823 | /// <=s!).  The truth table for both is: | ||
| 824 | ///    | T | F (B) | ||
| 825 | ///  T | T | F | ||
| 826 | ///  F | T | T | ||
| 827 | /// (A) | ||
| 828 | std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, | ||
| 829 | const DataLayout &DL, | ||
| 830 | bool LHSIsTrue = true, | ||
| 831 | unsigned Depth = 0); | ||
| 832 | std::optional<bool> isImpliedCondition(const Value *LHS, | ||
| 833 | CmpInst::Predicate RHSPred, | ||
| 834 | const Value *RHSOp0, const Value *RHSOp1, | ||
| 835 | const DataLayout &DL, | ||
| 836 | bool LHSIsTrue = true, | ||
| 837 | unsigned Depth = 0); | ||
| 838 | |||
| 839 | /// Return the boolean condition value in the context of the given instruction | ||
| 840 | /// if it is known based on dominating conditions. | ||
| 841 | std::optional<bool> isImpliedByDomCondition(const Value *Cond, | ||
| 842 | const Instruction *ContextI, | ||
| 843 | const DataLayout &DL); | ||
| 844 | std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, | ||
| 845 | const Value *LHS, const Value *RHS, | ||
| 846 | const Instruction *ContextI, | ||
| 847 | const DataLayout &DL); | ||
| 848 | |||
| 849 | /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that | ||
| 850 | /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In | ||
| 851 | /// this case offset would be -8. | ||
| 852 | std::optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, | ||
| 853 | const DataLayout &DL); | ||
| 854 | } // end namespace llvm | ||
| 855 | |||
| 856 | #endif // LLVM_ANALYSIS_VALUETRACKING_H |