Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
14 | pmbaty | 1 | //===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===// |
2 | // |
||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
||
4 | // See https://llvm.org/LICENSE.txt for license information. |
||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
||
6 | // |
||
7 | //===--------------------------------------------------------------------===// |
||
8 | /// \file |
||
9 | /// This contains common combine transformations that may be used in a combine |
||
10 | /// pass,or by the target elsewhere. |
||
11 | /// Targets can pick individual opcode transformations from the helper or use |
||
12 | /// tryCombine which invokes all transformations. All of the transformations |
||
13 | /// return true if the MachineInstruction changed and false otherwise. |
||
14 | /// |
||
15 | //===--------------------------------------------------------------------===// |
||
16 | |||
17 | #ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H |
||
18 | #define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H |
||
19 | |||
20 | #include "llvm/ADT/DenseMap.h" |
||
21 | #include "llvm/ADT/SmallVector.h" |
||
22 | #include "llvm/CodeGen/Register.h" |
||
23 | #include "llvm/Support/LowLevelTypeImpl.h" |
||
24 | #include "llvm/IR/InstrTypes.h" |
||
25 | #include <functional> |
||
26 | |||
27 | namespace llvm { |
||
28 | |||
29 | class GISelChangeObserver; |
||
30 | class APFloat; |
||
31 | class APInt; |
||
32 | class GPtrAdd; |
||
33 | class GStore; |
||
34 | class GZExtLoad; |
||
35 | class MachineIRBuilder; |
||
36 | class MachineInstrBuilder; |
||
37 | class MachineRegisterInfo; |
||
38 | class MachineInstr; |
||
39 | class MachineOperand; |
||
40 | class GISelKnownBits; |
||
41 | class MachineDominatorTree; |
||
42 | class LegalizerInfo; |
||
43 | struct LegalityQuery; |
||
44 | class RegisterBank; |
||
45 | class RegisterBankInfo; |
||
46 | class TargetLowering; |
||
47 | class TargetRegisterInfo; |
||
48 | |||
49 | struct PreferredTuple { |
||
50 | LLT Ty; // The result type of the extend. |
||
51 | unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT |
||
52 | MachineInstr *MI; |
||
53 | }; |
||
54 | |||
55 | struct IndexedLoadStoreMatchInfo { |
||
56 | Register Addr; |
||
57 | Register Base; |
||
58 | Register Offset; |
||
59 | bool IsPre; |
||
60 | }; |
||
61 | |||
62 | struct PtrAddChain { |
||
63 | int64_t Imm; |
||
64 | Register Base; |
||
65 | const RegisterBank *Bank; |
||
66 | }; |
||
67 | |||
68 | struct RegisterImmPair { |
||
69 | Register Reg; |
||
70 | int64_t Imm; |
||
71 | }; |
||
72 | |||
73 | struct ShiftOfShiftedLogic { |
||
74 | MachineInstr *Logic; |
||
75 | MachineInstr *Shift2; |
||
76 | Register LogicNonShiftReg; |
||
77 | uint64_t ValSum; |
||
78 | }; |
||
79 | |||
80 | using BuildFnTy = std::function<void(MachineIRBuilder &)>; |
||
81 | |||
82 | struct MergeTruncStoresInfo { |
||
83 | SmallVector<GStore *> FoundStores; |
||
84 | GStore *LowestIdxStore = nullptr; |
||
85 | Register WideSrcVal; |
||
86 | bool NeedBSwap = false; |
||
87 | bool NeedRotate = false; |
||
88 | }; |
||
89 | |||
90 | using OperandBuildSteps = |
||
91 | SmallVector<std::function<void(MachineInstrBuilder &)>, 4>; |
||
92 | struct InstructionBuildSteps { |
||
93 | unsigned Opcode = 0; /// The opcode for the produced instruction. |
||
94 | OperandBuildSteps OperandFns; /// Operands to be added to the instruction. |
||
95 | InstructionBuildSteps() = default; |
||
96 | InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns) |
||
97 | : Opcode(Opcode), OperandFns(OperandFns) {} |
||
98 | }; |
||
99 | |||
100 | struct InstructionStepsMatchInfo { |
||
101 | /// Describes instructions to be built during a combine. |
||
102 | SmallVector<InstructionBuildSteps, 2> InstrsToBuild; |
||
103 | InstructionStepsMatchInfo() = default; |
||
104 | InstructionStepsMatchInfo( |
||
105 | std::initializer_list<InstructionBuildSteps> InstrsToBuild) |
||
106 | : InstrsToBuild(InstrsToBuild) {} |
||
107 | }; |
||
108 | |||
109 | class CombinerHelper { |
||
110 | protected: |
||
111 | MachineIRBuilder &Builder; |
||
112 | MachineRegisterInfo &MRI; |
||
113 | GISelChangeObserver &Observer; |
||
114 | GISelKnownBits *KB; |
||
115 | MachineDominatorTree *MDT; |
||
116 | bool IsPreLegalize; |
||
117 | const LegalizerInfo *LI; |
||
118 | const RegisterBankInfo *RBI; |
||
119 | const TargetRegisterInfo *TRI; |
||
120 | |||
121 | public: |
||
122 | CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, |
||
123 | bool IsPreLegalize, |
||
124 | GISelKnownBits *KB = nullptr, |
||
125 | MachineDominatorTree *MDT = nullptr, |
||
126 | const LegalizerInfo *LI = nullptr); |
||
127 | |||
128 | GISelKnownBits *getKnownBits() const { |
||
129 | return KB; |
||
130 | } |
||
131 | |||
132 | MachineIRBuilder &getBuilder() const { |
||
133 | return Builder; |
||
134 | } |
||
135 | |||
136 | const TargetLowering &getTargetLowering() const; |
||
137 | |||
138 | /// \returns true if the combiner is running pre-legalization. |
||
139 | bool isPreLegalize() const; |
||
140 | |||
141 | /// \returns true if \p Query is legal on the target. |
||
142 | bool isLegal(const LegalityQuery &Query) const; |
||
143 | |||
144 | /// \return true if the combine is running prior to legalization, or if \p |
||
145 | /// Query is legal on the target. |
||
146 | bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const; |
||
147 | |||
148 | /// \return true if the combine is running prior to legalization, or if \p Ty |
||
149 | /// is a legal integer constant type on the target. |
||
150 | bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const; |
||
151 | |||
152 | /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes |
||
153 | void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const; |
||
154 | |||
155 | /// Replace a single register operand with a new register and inform the |
||
156 | /// observer of the changes. |
||
157 | void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, |
||
158 | Register ToReg) const; |
||
159 | |||
160 | /// Replace the opcode in instruction with a new opcode and inform the |
||
161 | /// observer of the changes. |
||
162 | void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const; |
||
163 | |||
164 | /// Get the register bank of \p Reg. |
||
165 | /// If Reg has not been assigned a register, a register class, |
||
166 | /// or a register bank, then this returns nullptr. |
||
167 | /// |
||
168 | /// \pre Reg.isValid() |
||
169 | const RegisterBank *getRegBank(Register Reg) const; |
||
170 | |||
171 | /// Set the register bank of \p Reg. |
||
172 | /// Does nothing if the RegBank is null. |
||
173 | /// This is the counterpart to getRegBank. |
||
174 | void setRegBank(Register Reg, const RegisterBank *RegBank); |
||
175 | |||
176 | /// If \p MI is COPY, try to combine it. |
||
177 | /// Returns true if MI changed. |
||
178 | bool tryCombineCopy(MachineInstr &MI); |
||
179 | bool matchCombineCopy(MachineInstr &MI); |
||
180 | void applyCombineCopy(MachineInstr &MI); |
||
181 | |||
182 | /// Returns true if \p DefMI precedes \p UseMI or they are the same |
||
183 | /// instruction. Both must be in the same basic block. |
||
184 | bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI); |
||
185 | |||
186 | /// Returns true if \p DefMI dominates \p UseMI. By definition an |
||
187 | /// instruction dominates itself. |
||
188 | /// |
||
189 | /// If we haven't been provided with a MachineDominatorTree during |
||
190 | /// construction, this function returns a conservative result that tracks just |
||
191 | /// a single basic block. |
||
192 | bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI); |
||
193 | |||
194 | /// If \p MI is extend that consumes the result of a load, try to combine it. |
||
195 | /// Returns true if MI changed. |
||
196 | bool tryCombineExtendingLoads(MachineInstr &MI); |
||
197 | bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo); |
||
198 | void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo); |
||
199 | |||
200 | /// Match (and (load x), mask) -> zextload x |
||
201 | bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
202 | |||
203 | /// Combine \p MI into a pre-indexed or post-indexed load/store operation if |
||
204 | /// legal and the surrounding code makes it useful. |
||
205 | bool tryCombineIndexedLoadStore(MachineInstr &MI); |
||
206 | bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo); |
||
207 | void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo); |
||
208 | |||
209 | bool matchSextTruncSextLoad(MachineInstr &MI); |
||
210 | void applySextTruncSextLoad(MachineInstr &MI); |
||
211 | |||
212 | /// Match sext_inreg(load p), imm -> sextload p |
||
213 | bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo); |
||
214 | void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo); |
||
215 | |||
216 | /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM |
||
217 | /// when their source operands are identical. |
||
218 | bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI); |
||
219 | void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI); |
||
220 | |||
221 | /// If a brcond's true block is not the fallthrough, make it so by inverting |
||
222 | /// the condition and swapping operands. |
||
223 | bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond); |
||
224 | void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond); |
||
225 | |||
226 | /// If \p MI is G_CONCAT_VECTORS, try to combine it. |
||
227 | /// Returns true if MI changed. |
||
228 | /// Right now, we support: |
||
229 | /// - concat_vector(undef, undef) => undef |
||
230 | /// - concat_vector(build_vector(A, B), build_vector(C, D)) => |
||
231 | /// build_vector(A, B, C, D) |
||
232 | /// |
||
233 | /// \pre MI.getOpcode() == G_CONCAT_VECTORS. |
||
234 | bool tryCombineConcatVectors(MachineInstr &MI); |
||
235 | /// Check if the G_CONCAT_VECTORS \p MI is undef or if it |
||
236 | /// can be flattened into a build_vector. |
||
237 | /// In the first case \p IsUndef will be true. |
||
238 | /// In the second case \p Ops will contain the operands needed |
||
239 | /// to produce the flattened build_vector. |
||
240 | /// |
||
241 | /// \pre MI.getOpcode() == G_CONCAT_VECTORS. |
||
242 | bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, |
||
243 | SmallVectorImpl<Register> &Ops); |
||
244 | /// Replace \p MI with a flattened build_vector with \p Ops or an |
||
245 | /// implicit_def if IsUndef is true. |
||
246 | void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef, |
||
247 | const ArrayRef<Register> Ops); |
||
248 | |||
249 | /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS. |
||
250 | /// Returns true if MI changed. |
||
251 | /// |
||
252 | /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR. |
||
253 | bool tryCombineShuffleVector(MachineInstr &MI); |
||
254 | /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a |
||
255 | /// concat_vectors. |
||
256 | /// \p Ops will contain the operands needed to produce the flattened |
||
257 | /// concat_vectors. |
||
258 | /// |
||
259 | /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR. |
||
260 | bool matchCombineShuffleVector(MachineInstr &MI, |
||
261 | SmallVectorImpl<Register> &Ops); |
||
262 | /// Replace \p MI with a concat_vectors with \p Ops. |
||
263 | void applyCombineShuffleVector(MachineInstr &MI, |
||
264 | const ArrayRef<Register> Ops); |
||
265 | |||
266 | /// Optimize memcpy intrinsics et al, e.g. constant len calls. |
||
267 | /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline. |
||
268 | /// |
||
269 | /// For example (pre-indexed): |
||
270 | /// |
||
271 | /// $addr = G_PTR_ADD $base, $offset |
||
272 | /// [...] |
||
273 | /// $val = G_LOAD $addr |
||
274 | /// [...] |
||
275 | /// $whatever = COPY $addr |
||
276 | /// |
||
277 | /// --> |
||
278 | /// |
||
279 | /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre) |
||
280 | /// [...] |
||
281 | /// $whatever = COPY $addr |
||
282 | /// |
||
283 | /// or (post-indexed): |
||
284 | /// |
||
285 | /// G_STORE $val, $base |
||
286 | /// [...] |
||
287 | /// $addr = G_PTR_ADD $base, $offset |
||
288 | /// [...] |
||
289 | /// $whatever = COPY $addr |
||
290 | /// |
||
291 | /// --> |
||
292 | /// |
||
293 | /// $addr = G_INDEXED_STORE $val, $base, $offset |
||
294 | /// [...] |
||
295 | /// $whatever = COPY $addr |
||
296 | bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0); |
||
297 | |||
298 | bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo); |
||
299 | void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo); |
||
300 | |||
301 | /// Fold (shift (shift base, x), y) -> (shift base (x+y)) |
||
302 | bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo); |
||
303 | void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo); |
||
304 | |||
305 | /// If we have a shift-by-constant of a bitwise logic op that itself has a |
||
306 | /// shift-by-constant operand with identical opcode, we may be able to convert |
||
307 | /// that into 2 independent shifts followed by the logic op. |
||
308 | bool matchShiftOfShiftedLogic(MachineInstr &MI, |
||
309 | ShiftOfShiftedLogic &MatchInfo); |
||
310 | void applyShiftOfShiftedLogic(MachineInstr &MI, |
||
311 | ShiftOfShiftedLogic &MatchInfo); |
||
312 | |||
313 | /// Transform a multiply by a power-of-2 value to a left shift. |
||
314 | bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal); |
||
315 | void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal); |
||
316 | |||
317 | // Transform a G_SHL with an extended source into a narrower shift if |
||
318 | // possible. |
||
319 | bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData); |
||
320 | void applyCombineShlOfExtend(MachineInstr &MI, |
||
321 | const RegisterImmPair &MatchData); |
||
322 | |||
323 | /// Fold away a merge of an unmerge of the corresponding values. |
||
324 | bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo); |
||
325 | |||
326 | /// Reduce a shift by a constant to an unmerge and a shift on a half sized |
||
327 | /// type. This will not produce a shift smaller than \p TargetShiftSize. |
||
328 | bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, |
||
329 | unsigned &ShiftVal); |
||
330 | void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal); |
||
331 | bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount); |
||
332 | |||
333 | /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z. |
||
334 | bool |
||
335 | matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, |
||
336 | SmallVectorImpl<Register> &Operands); |
||
337 | void |
||
338 | applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, |
||
339 | SmallVectorImpl<Register> &Operands); |
||
340 | |||
341 | /// Transform G_UNMERGE Constant -> Constant1, Constant2, ... |
||
342 | bool matchCombineUnmergeConstant(MachineInstr &MI, |
||
343 | SmallVectorImpl<APInt> &Csts); |
||
344 | void applyCombineUnmergeConstant(MachineInstr &MI, |
||
345 | SmallVectorImpl<APInt> &Csts); |
||
346 | |||
347 | /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ... |
||
348 | bool |
||
349 | matchCombineUnmergeUndef(MachineInstr &MI, |
||
350 | std::function<void(MachineIRBuilder &)> &MatchInfo); |
||
351 | |||
352 | /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z. |
||
353 | bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI); |
||
354 | void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI); |
||
355 | |||
356 | /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0 |
||
357 | bool matchCombineUnmergeZExtToZExt(MachineInstr &MI); |
||
358 | void applyCombineUnmergeZExtToZExt(MachineInstr &MI); |
||
359 | |||
360 | /// Transform fp_instr(cst) to constant result of the fp operation. |
||
361 | bool matchCombineConstantFoldFpUnary(MachineInstr &MI, |
||
362 | std::optional<APFloat> &Cst); |
||
363 | void applyCombineConstantFoldFpUnary(MachineInstr &MI, |
||
364 | std::optional<APFloat> &Cst); |
||
365 | |||
366 | /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space. |
||
367 | bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg); |
||
368 | void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg); |
||
369 | |||
370 | /// Transform PtrToInt(IntToPtr(x)) to x. |
||
371 | void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg); |
||
372 | |||
373 | /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) |
||
374 | /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y) |
||
375 | bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, |
||
376 | std::pair<Register, bool> &PtrRegAndCommute); |
||
377 | void applyCombineAddP2IToPtrAdd(MachineInstr &MI, |
||
378 | std::pair<Register, bool> &PtrRegAndCommute); |
||
379 | |||
380 | // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2 |
||
381 | bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst); |
||
382 | void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst); |
||
383 | |||
384 | /// Transform anyext(trunc(x)) to x. |
||
385 | bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); |
||
386 | void applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); |
||
387 | |||
388 | /// Transform zext(trunc(x)) to x. |
||
389 | bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg); |
||
390 | |||
391 | /// Transform [asz]ext([asz]ext(x)) to [asz]ext x. |
||
392 | bool matchCombineExtOfExt(MachineInstr &MI, |
||
393 | std::tuple<Register, unsigned> &MatchInfo); |
||
394 | void applyCombineExtOfExt(MachineInstr &MI, |
||
395 | std::tuple<Register, unsigned> &MatchInfo); |
||
396 | |||
397 | /// Transform fabs(fabs(x)) to fabs(x). |
||
398 | void applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src); |
||
399 | |||
400 | /// Transform fabs(fneg(x)) to fabs(x). |
||
401 | bool matchCombineFAbsOfFNeg(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
402 | |||
403 | /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x). |
||
404 | bool matchCombineTruncOfExt(MachineInstr &MI, |
||
405 | std::pair<Register, unsigned> &MatchInfo); |
||
406 | void applyCombineTruncOfExt(MachineInstr &MI, |
||
407 | std::pair<Register, unsigned> &MatchInfo); |
||
408 | |||
409 | /// Transform trunc (shl x, K) to shl (trunc x), K |
||
410 | /// if K < VT.getScalarSizeInBits(). |
||
411 | /// |
||
412 | /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K)) |
||
413 | /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits()) |
||
414 | /// MidVT is obtained by finding a legal type between the trunc's src and dst |
||
415 | /// types. |
||
416 | bool matchCombineTruncOfShift(MachineInstr &MI, |
||
417 | std::pair<MachineInstr *, LLT> &MatchInfo); |
||
418 | void applyCombineTruncOfShift(MachineInstr &MI, |
||
419 | std::pair<MachineInstr *, LLT> &MatchInfo); |
||
420 | |||
421 | /// Transform G_MUL(x, -1) to G_SUB(0, x) |
||
422 | void applyCombineMulByNegativeOne(MachineInstr &MI); |
||
423 | |||
424 | /// Return true if any explicit use operand on \p MI is defined by a |
||
425 | /// G_IMPLICIT_DEF. |
||
426 | bool matchAnyExplicitUseIsUndef(MachineInstr &MI); |
||
427 | |||
428 | /// Return true if all register explicit use operands on \p MI are defined by |
||
429 | /// a G_IMPLICIT_DEF. |
||
430 | bool matchAllExplicitUsesAreUndef(MachineInstr &MI); |
||
431 | |||
432 | /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask. |
||
433 | bool matchUndefShuffleVectorMask(MachineInstr &MI); |
||
434 | |||
435 | /// Return true if a G_STORE instruction \p MI is storing an undef value. |
||
436 | bool matchUndefStore(MachineInstr &MI); |
||
437 | |||
438 | /// Return true if a G_SELECT instruction \p MI has an undef comparison. |
||
439 | bool matchUndefSelectCmp(MachineInstr &MI); |
||
440 | |||
441 | /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index. |
||
442 | bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI); |
||
443 | |||
444 | /// Return true if a G_SELECT instruction \p MI has a constant comparison. If |
||
445 | /// true, \p OpIdx will store the operand index of the known selected value. |
||
446 | bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx); |
||
447 | |||
448 | /// Replace an instruction with a G_FCONSTANT with value \p C. |
||
449 | bool replaceInstWithFConstant(MachineInstr &MI, double C); |
||
450 | |||
451 | /// Replace an instruction with a G_CONSTANT with value \p C. |
||
452 | bool replaceInstWithConstant(MachineInstr &MI, int64_t C); |
||
453 | |||
454 | /// Replace an instruction with a G_CONSTANT with value \p C. |
||
455 | bool replaceInstWithConstant(MachineInstr &MI, APInt C); |
||
456 | |||
457 | /// Replace an instruction with a G_IMPLICIT_DEF. |
||
458 | bool replaceInstWithUndef(MachineInstr &MI); |
||
459 | |||
460 | /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand. |
||
461 | bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx); |
||
462 | |||
463 | /// Delete \p MI and replace all of its uses with \p Replacement. |
||
464 | bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement); |
||
465 | |||
466 | /// Return true if \p MOP1 and \p MOP2 are register operands are defined by |
||
467 | /// equivalent instructions. |
||
468 | bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2); |
||
469 | |||
470 | /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to |
||
471 | /// \p C. |
||
472 | bool matchConstantOp(const MachineOperand &MOP, int64_t C); |
||
473 | |||
474 | /// Optimize (cond ? x : x) -> x |
||
475 | bool matchSelectSameVal(MachineInstr &MI); |
||
476 | |||
477 | /// Optimize (x op x) -> x |
||
478 | bool matchBinOpSameVal(MachineInstr &MI); |
||
479 | |||
480 | /// Check if operand \p OpIdx is zero. |
||
481 | bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx); |
||
482 | |||
483 | /// Check if operand \p OpIdx is undef. |
||
484 | bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx); |
||
485 | |||
486 | /// Check if operand \p OpIdx is known to be a power of 2. |
||
487 | bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx); |
||
488 | |||
489 | /// Erase \p MI |
||
490 | bool eraseInst(MachineInstr &MI); |
||
491 | |||
492 | /// Return true if MI is a G_ADD which can be simplified to a G_SUB. |
||
493 | bool matchSimplifyAddToSub(MachineInstr &MI, |
||
494 | std::tuple<Register, Register> &MatchInfo); |
||
495 | void applySimplifyAddToSub(MachineInstr &MI, |
||
496 | std::tuple<Register, Register> &MatchInfo); |
||
497 | |||
498 | /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y)) |
||
499 | bool |
||
500 | matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, |
||
501 | InstructionStepsMatchInfo &MatchInfo); |
||
502 | |||
503 | /// Replace \p MI with a series of instructions described in \p MatchInfo. |
||
504 | void applyBuildInstructionSteps(MachineInstr &MI, |
||
505 | InstructionStepsMatchInfo &MatchInfo); |
||
506 | |||
507 | /// Match ashr (shl x, C), C -> sext_inreg (C) |
||
508 | bool matchAshrShlToSextInreg(MachineInstr &MI, |
||
509 | std::tuple<Register, int64_t> &MatchInfo); |
||
510 | void applyAshShlToSextInreg(MachineInstr &MI, |
||
511 | std::tuple<Register, int64_t> &MatchInfo); |
||
512 | |||
513 | /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 |
||
514 | bool matchOverlappingAnd(MachineInstr &MI, |
||
515 | BuildFnTy &MatchInfo); |
||
516 | |||
517 | /// \return true if \p MI is a G_AND instruction whose operands are x and y |
||
518 | /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.) |
||
519 | /// |
||
520 | /// \param [in] MI - The G_AND instruction. |
||
521 | /// \param [out] Replacement - A register the G_AND should be replaced with on |
||
522 | /// success. |
||
523 | bool matchRedundantAnd(MachineInstr &MI, Register &Replacement); |
||
524 | |||
525 | /// \return true if \p MI is a G_OR instruction whose operands are x and y |
||
526 | /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros |
||
527 | /// value.) |
||
528 | /// |
||
529 | /// \param [in] MI - The G_OR instruction. |
||
530 | /// \param [out] Replacement - A register the G_OR should be replaced with on |
||
531 | /// success. |
||
532 | bool matchRedundantOr(MachineInstr &MI, Register &Replacement); |
||
533 | |||
534 | /// \return true if \p MI is a G_SEXT_INREG that can be erased. |
||
535 | bool matchRedundantSExtInReg(MachineInstr &MI); |
||
536 | |||
537 | /// Combine inverting a result of a compare into the opposite cond code. |
||
538 | bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate); |
||
539 | void applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate); |
||
540 | |||
541 | /// Fold (xor (and x, y), y) -> (and (not x), y) |
||
542 | ///{ |
||
543 | bool matchXorOfAndWithSameReg(MachineInstr &MI, |
||
544 | std::pair<Register, Register> &MatchInfo); |
||
545 | void applyXorOfAndWithSameReg(MachineInstr &MI, |
||
546 | std::pair<Register, Register> &MatchInfo); |
||
547 | ///} |
||
548 | |||
549 | /// Combine G_PTR_ADD with nullptr to G_INTTOPTR |
||
550 | bool matchPtrAddZero(MachineInstr &MI); |
||
551 | void applyPtrAddZero(MachineInstr &MI); |
||
552 | |||
553 | /// Combine G_UREM x, (known power of 2) to an add and bitmasking. |
||
554 | void applySimplifyURemByPow2(MachineInstr &MI); |
||
555 | |||
556 | /// Push a binary operator through a select on constants. |
||
557 | /// |
||
558 | /// binop (select cond, K0, K1), K2 -> |
||
559 | /// select cond, (binop K0, K2), (binop K1, K2) |
||
560 | bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo); |
||
561 | bool applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo); |
||
562 | |||
563 | bool matchCombineInsertVecElts(MachineInstr &MI, |
||
564 | SmallVectorImpl<Register> &MatchInfo); |
||
565 | |||
566 | void applyCombineInsertVecElts(MachineInstr &MI, |
||
567 | SmallVectorImpl<Register> &MatchInfo); |
||
568 | |||
569 | /// Match expression trees of the form |
||
570 | /// |
||
571 | /// \code |
||
572 | /// sN *a = ... |
||
573 | /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ... |
||
574 | /// \endcode |
||
575 | /// |
||
576 | /// And check if the tree can be replaced with a M-bit load + possibly a |
||
577 | /// bswap. |
||
578 | bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
579 | |||
580 | bool matchTruncStoreMerge(MachineInstr &MI, MergeTruncStoresInfo &MatchInfo); |
||
581 | void applyTruncStoreMerge(MachineInstr &MI, MergeTruncStoresInfo &MatchInfo); |
||
582 | |||
583 | bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI); |
||
584 | void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI); |
||
585 | |||
586 | bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg); |
||
587 | void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg); |
||
588 | |||
589 | bool matchExtractAllEltsFromBuildVector( |
||
590 | MachineInstr &MI, |
||
591 | SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo); |
||
592 | void applyExtractAllEltsFromBuildVector( |
||
593 | MachineInstr &MI, |
||
594 | SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo); |
||
595 | |||
596 | /// Use a function which takes in a MachineIRBuilder to perform a combine. |
||
597 | /// By default, it erases the instruction \p MI from the function. |
||
598 | void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
599 | /// Use a function which takes in a MachineIRBuilder to perform a combine. |
||
600 | /// This variant does not erase \p MI after calling the build function. |
||
601 | void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
602 | |||
603 | bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
604 | bool matchFunnelShiftToRotate(MachineInstr &MI); |
||
605 | void applyFunnelShiftToRotate(MachineInstr &MI); |
||
606 | bool matchRotateOutOfRange(MachineInstr &MI); |
||
607 | void applyRotateOutOfRange(MachineInstr &MI); |
||
608 | |||
609 | /// \returns true if a G_ICMP instruction \p MI can be replaced with a true |
||
610 | /// or false constant based off of KnownBits information. |
||
611 | bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo); |
||
612 | |||
613 | /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of |
||
614 | /// KnownBits information. |
||
615 | bool |
||
616 | matchICmpToLHSKnownBits(MachineInstr &MI, |
||
617 | BuildFnTy &MatchInfo); |
||
618 | |||
619 | /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2) |
||
620 | bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
621 | |||
622 | bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, |
||
623 | BuildFnTy &MatchInfo); |
||
624 | /// Match: and (lshr x, cst), mask -> ubfx x, cst, width |
||
625 | bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
626 | |||
627 | /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width |
||
628 | bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
629 | |||
630 | /// Match: shr (and x, n), k -> ubfx x, pos, width |
||
631 | bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
632 | |||
633 | // Helpers for reassociation: |
||
634 | bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, |
||
635 | BuildFnTy &MatchInfo); |
||
636 | bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, |
||
637 | MachineInstr *RHS, |
||
638 | BuildFnTy &MatchInfo); |
||
639 | bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, |
||
640 | MachineInstr *RHS, BuildFnTy &MatchInfo); |
||
641 | /// Reassociate pointer calculations with G_ADD involved, to allow better |
||
642 | /// addressing mode usage. |
||
643 | bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
644 | |||
645 | /// Do constant folding when opportunities are exposed after MIR building. |
||
646 | bool matchConstantFold(MachineInstr &MI, APInt &MatchInfo); |
||
647 | |||
648 | /// \returns true if it is possible to narrow the width of a scalar binop |
||
649 | /// feeding a G_AND instruction \p MI. |
||
650 | bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
651 | |||
652 | /// Given an G_UDIV \p MI expressing a divide by constant, return an |
||
653 | /// expression that implements it by multiplying by a magic number. |
||
654 | /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". |
||
655 | MachineInstr *buildUDivUsingMul(MachineInstr &MI); |
||
656 | /// Combine G_UDIV by constant into a multiply by magic constant. |
||
657 | bool matchUDivByConst(MachineInstr &MI); |
||
658 | void applyUDivByConst(MachineInstr &MI); |
||
659 | |||
660 | /// Given an G_SDIV \p MI expressing a signed divide by constant, return an |
||
661 | /// expression that implements it by multiplying by a magic number. |
||
662 | /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". |
||
663 | MachineInstr *buildSDivUsingMul(MachineInstr &MI); |
||
664 | bool matchSDivByConst(MachineInstr &MI); |
||
665 | void applySDivByConst(MachineInstr &MI); |
||
666 | |||
667 | // G_UMULH x, (1 << c)) -> x >> (bitwidth - c) |
||
668 | bool matchUMulHToLShr(MachineInstr &MI); |
||
669 | void applyUMulHToLShr(MachineInstr &MI); |
||
670 | |||
671 | /// Try to transform \p MI by using all of the above |
||
672 | /// combine functions. Returns true if changed. |
||
673 | bool tryCombine(MachineInstr &MI); |
||
674 | |||
675 | /// Emit loads and stores that perform the given memcpy. |
||
676 | /// Assumes \p MI is a G_MEMCPY_INLINE |
||
677 | /// TODO: implement dynamically sized inline memcpy, |
||
678 | /// and rename: s/bool tryEmit/void emit/ |
||
679 | bool tryEmitMemcpyInline(MachineInstr &MI); |
||
680 | |||
681 | /// Match: |
||
682 | /// (G_UMULO x, 2) -> (G_UADDO x, x) |
||
683 | /// (G_SMULO x, 2) -> (G_SADDO x, x) |
||
684 | bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
685 | |||
686 | /// Match: |
||
687 | /// (G_*MULO x, 0) -> 0 + no carry out |
||
688 | bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
689 | |||
690 | /// Match: |
||
691 | /// (G_*ADDO x, 0) -> x + no carry out |
||
692 | bool matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
693 | |||
694 | /// Match: |
||
695 | /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y) |
||
696 | /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y) |
||
697 | bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
698 | |||
699 | /// Transform (fadd x, fneg(y)) -> (fsub x, y) |
||
700 | /// (fadd fneg(x), y) -> (fsub y, x) |
||
701 | /// (fsub x, fneg(y)) -> (fadd x, y) |
||
702 | /// (fmul fneg(x), fneg(y)) -> (fmul x, y) |
||
703 | /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y) |
||
704 | /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z) |
||
705 | /// (fma fneg(x), fneg(y), z) -> (fma x, y, z) |
||
706 | bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
707 | |||
708 | bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo); |
||
709 | void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo); |
||
710 | |||
711 | bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, |
||
712 | bool &HasFMAD, bool &Aggressive, |
||
713 | bool CanReassociate = false); |
||
714 | |||
715 | /// Transform (fadd (fmul x, y), z) -> (fma x, y, z) |
||
716 | /// (fadd (fmul x, y), z) -> (fmad x, y, z) |
||
717 | bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
718 | |||
719 | /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) |
||
720 | /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z) |
||
721 | bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, |
||
722 | BuildFnTy &MatchInfo); |
||
723 | |||
724 | /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) |
||
725 | /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z)) |
||
726 | bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, |
||
727 | BuildFnTy &MatchInfo); |
||
728 | |||
729 | // Transform (fadd (fma x, y, (fpext (fmul u, v))), z) |
||
730 | // -> (fma x, y, (fma (fpext u), (fpext v), z)) |
||
731 | // (fadd (fmad x, y, (fpext (fmul u, v))), z) |
||
732 | // -> (fmad x, y, (fmad (fpext u), (fpext v), z)) |
||
733 | bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, |
||
734 | BuildFnTy &MatchInfo); |
||
735 | |||
736 | /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z) |
||
737 | /// (fsub (fmul x, y), z) -> (fmad x, y, -z) |
||
738 | bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
739 | |||
740 | /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) |
||
741 | /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z)) |
||
742 | bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, |
||
743 | BuildFnTy &MatchInfo); |
||
744 | |||
745 | /// Transform (fsub (fpext (fmul x, y)), z) |
||
746 | /// -> (fma (fpext x), (fpext y), (fneg z)) |
||
747 | /// (fsub (fpext (fmul x, y)), z) |
||
748 | /// -> (fmad (fpext x), (fpext y), (fneg z)) |
||
749 | bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, |
||
750 | BuildFnTy &MatchInfo); |
||
751 | |||
752 | /// Transform (fsub (fpext (fneg (fmul x, y))), z) |
||
753 | /// -> (fneg (fma (fpext x), (fpext y), z)) |
||
754 | /// (fsub (fpext (fneg (fmul x, y))), z) |
||
755 | /// -> (fneg (fmad (fpext x), (fpext y), z)) |
||
756 | bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, |
||
757 | BuildFnTy &MatchInfo); |
||
758 | |||
759 | /// Fold boolean selects to logical operations. |
||
760 | bool matchSelectToLogical(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
761 | |||
762 | bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info); |
||
763 | |||
764 | /// Transform G_ADD(x, G_SUB(y, x)) to y. |
||
765 | /// Transform G_ADD(G_SUB(y, x), x) to y. |
||
766 | bool matchAddSubSameReg(MachineInstr &MI, Register &Src); |
||
767 | |||
768 | bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo); |
||
769 | bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo); |
||
770 | bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo); |
||
771 | |||
772 | /// Transform: |
||
773 | /// (x + y) - y -> x |
||
774 | /// (x + y) - x -> y |
||
775 | /// x - (y + x) -> 0 - y |
||
776 | /// x - (x + z) -> 0 - z |
||
777 | bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
778 | |||
779 | /// \returns true if it is possible to simplify a select instruction \p MI |
||
780 | /// to a min/max instruction of some sort. |
||
781 | bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
782 | |||
783 | /// Transform: |
||
784 | /// (X + Y) == X -> Y == 0 |
||
785 | /// (X - Y) == X -> Y == 0 |
||
786 | /// (X ^ Y) == X -> Y == 0 |
||
787 | /// (X + Y) != X -> Y != 0 |
||
788 | /// (X - Y) != X -> Y != 0 |
||
789 | /// (X ^ Y) != X -> Y != 0 |
||
790 | bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo); |
||
791 | |||
792 | private: |
||
793 | /// Given a non-indexed load or store instruction \p MI, find an offset that |
||
794 | /// can be usefully and legally folded into it as a post-indexing operation. |
||
795 | /// |
||
796 | /// \returns true if a candidate is found. |
||
797 | bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base, |
||
798 | Register &Offset); |
||
799 | |||
800 | /// Given a non-indexed load or store instruction \p MI, find an offset that |
||
801 | /// can be usefully and legally folded into it as a pre-indexing operation. |
||
802 | /// |
||
803 | /// \returns true if a candidate is found. |
||
804 | bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base, |
||
805 | Register &Offset); |
||
806 | |||
807 | /// Helper function for matchLoadOrCombine. Searches for Registers |
||
808 | /// which may have been produced by a load instruction + some arithmetic. |
||
809 | /// |
||
810 | /// \param [in] Root - The search root. |
||
811 | /// |
||
812 | /// \returns The Registers found during the search. |
||
813 | std::optional<SmallVector<Register, 8>> |
||
814 | findCandidatesForLoadOrCombine(const MachineInstr *Root) const; |
||
815 | |||
816 | /// Helper function for matchLoadOrCombine. |
||
817 | /// |
||
818 | /// Checks if every register in \p RegsToVisit is defined by a load |
||
819 | /// instruction + some arithmetic. |
||
820 | /// |
||
821 | /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up |
||
822 | /// at to the index of the load. |
||
823 | /// \param [in] MemSizeInBits - The number of bits each load should produce. |
||
824 | /// |
||
825 | /// \returns On success, a 3-tuple containing lowest-index load found, the |
||
826 | /// lowest index, and the last load in the sequence. |
||
827 | std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> |
||
828 | findLoadOffsetsForLoadOrCombine( |
||
829 | SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, |
||
830 | const SmallVector<Register, 8> &RegsToVisit, |
||
831 | const unsigned MemSizeInBits); |
||
832 | |||
833 | /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing |
||
834 | /// a re-association of its operands would break an existing legal addressing |
||
835 | /// mode that the address computation currently represents. |
||
836 | bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd); |
||
837 | |||
838 | /// Behavior when a floating point min/max is given one NaN and one |
||
839 | /// non-NaN as input. |
||
840 | enum class SelectPatternNaNBehaviour { |
||
841 | NOT_APPLICABLE = 0, /// NaN behavior not applicable. |
||
842 | RETURNS_NAN, /// Given one NaN input, returns the NaN. |
||
843 | RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. |
||
844 | RETURNS_ANY /// Given one NaN input, can return either (or both operands are |
||
845 | /// known non-NaN.) |
||
846 | }; |
||
847 | |||
848 | /// \returns which of \p LHS and \p RHS would be the result of a non-equality |
||
849 | /// floating point comparison where one of \p LHS and \p RHS may be NaN. |
||
850 | /// |
||
851 | /// If both \p LHS and \p RHS may be NaN, returns |
||
852 | /// SelectPatternNaNBehaviour::NOT_APPLICABLE. |
||
853 | SelectPatternNaNBehaviour |
||
854 | computeRetValAgainstNaN(Register LHS, Register RHS, |
||
855 | bool IsOrderedComparison) const; |
||
856 | |||
857 | /// Determines the floating point min/max opcode which should be used for |
||
858 | /// a G_SELECT fed by a G_FCMP with predicate \p Pred. |
||
859 | /// |
||
860 | /// \returns 0 if this G_SELECT should not be combined to a floating point |
||
861 | /// min or max. If it should be combined, returns one of |
||
862 | /// |
||
863 | /// * G_FMAXNUM |
||
864 | /// * G_FMAXIMUM |
||
865 | /// * G_FMINNUM |
||
866 | /// * G_FMINIMUM |
||
867 | /// |
||
868 | /// Helper function for matchFPSelectToMinMax. |
||
869 | unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy, |
||
870 | SelectPatternNaNBehaviour VsNaNRetVal) const; |
||
871 | |||
872 | /// Handle floating point cases for matchSimplifySelectToMinMax. |
||
873 | /// |
||
874 | /// E.g. |
||
875 | /// |
||
876 | /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0 |
||
877 | /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0 |
||
878 | bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal, |
||
879 | Register FalseVal, BuildFnTy &MatchInfo); |
||
880 | }; |
||
881 | } // namespace llvm |
||
882 | |||
883 | #endif |