Details | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line | 
|---|---|---|---|
| 14 | pmbaty | 1 | //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===// | 
| 2 | // | ||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
| 6 | // | ||
| 7 | //===----------------------------------------------------------------------===// | ||
| 8 | // | ||
| 9 | // This file defines all of the RISCV-specific intrinsics. | ||
| 10 | // | ||
| 11 | //===----------------------------------------------------------------------===// | ||
| 12 | |||
| 13 | //===----------------------------------------------------------------------===// | ||
| 14 | // Atomics | ||
| 15 | |||
| 16 | // Atomic Intrinsics have multiple versions for different access widths, which | ||
| 17 | // all follow one of the following signatures (depending on how many arguments | ||
| 18 | // they require). We carefully instantiate only specific versions of these for | ||
| 19 | // specific integer widths, rather than using `llvm_anyint_ty`. | ||
| 20 | // | ||
| 21 | // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the | ||
| 22 | // canonical names, and the intrinsics used in the code will have a name | ||
| 23 | // suffixed with the pointer type they are specialised for (denoted `<p>` in the | ||
| 24 | // names below), in order to avoid type conflicts. | ||
| 25 | |||
| 26 | let TargetPrefix = "riscv" in { | ||
| 27 | |||
| 28 | // T @llvm.<name>.T.<p>(any*, T, T, T imm); | ||
| 29 | class MaskedAtomicRMWFourArg<LLVMType itype> | ||
| 30 | : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype], | ||
| 31 | [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>; | ||
| 32 | // T @llvm.<name>.T.<p>(any*, T, T, T, T imm); | ||
| 33 | class MaskedAtomicRMWFiveArg<LLVMType itype> | ||
| 34 | : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype], | ||
| 35 | [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>; | ||
| 36 | |||
| 37 | // We define 32-bit and 64-bit variants of the above, where T stands for i32 | ||
| 38 | // or i64 respectively: | ||
| 39 |   multiclass MaskedAtomicRMWFourArgIntrinsics { | ||
| 40 | // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm); | ||
| 41 | def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>; | ||
| 42 | // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm); | ||
| 43 | def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>; | ||
| 44 | } | ||
| 45 | |||
| 46 |   multiclass MaskedAtomicRMWFiveArgIntrinsics { | ||
| 47 | // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm); | ||
| 48 | def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>; | ||
| 49 | // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm); | ||
| 50 | def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>; | ||
| 51 | } | ||
| 52 | |||
| 53 | // These intrinsics are intended only for internal compiler use (i.e. as | ||
| 54 | // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their | ||
| 55 | // names and semantics could change in the future. | ||
| 56 | |||
| 57 |   // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>( | ||
| 58 | // ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering) | ||
| 59 | defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics; | ||
| 60 | defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics; | ||
| 61 | defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics; | ||
| 62 | defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics; | ||
| 63 | defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics; | ||
| 64 | defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics; | ||
| 65 | // Signed min and max need an extra operand to do sign extension with. | ||
| 66 |   // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>( | ||
| 67 | // ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering) | ||
| 68 | defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics; | ||
| 69 | defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics; | ||
| 70 | |||
| 71 |   // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>( | ||
| 72 | // ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering) | ||
| 73 | defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; | ||
| 74 | |||
| 75 | } // TargetPrefix = "riscv" | ||
| 76 | |||
| 77 | //===----------------------------------------------------------------------===// | ||
| 78 | // Bitmanip (Bit Manipulation) Extension | ||
| 79 | |||
| 80 | let TargetPrefix = "riscv" in { | ||
| 81 | |||
| 82 | class BitManipGPRIntrinsics | ||
| 83 | : DefaultAttrsIntrinsic<[llvm_any_ty], | ||
| 84 | [LLVMMatchType<0>], | ||
| 85 | [IntrNoMem, IntrSpeculatable]>; | ||
| 86 | class BitManipGPRGPRIntrinsics | ||
| 87 | : DefaultAttrsIntrinsic<[llvm_any_ty], | ||
| 88 | [LLVMMatchType<0>, LLVMMatchType<0>], | ||
| 89 | [IntrNoMem, IntrSpeculatable]>; | ||
| 90 | |||
| 91 | // Zbb | ||
| 92 | def int_riscv_orc_b : BitManipGPRIntrinsics; | ||
| 93 | |||
| 94 | // Zbc or Zbkc | ||
| 95 | def int_riscv_clmul : BitManipGPRGPRIntrinsics; | ||
| 96 | def int_riscv_clmulh : BitManipGPRGPRIntrinsics; | ||
| 97 | |||
| 98 | // Zbc | ||
| 99 | def int_riscv_clmulr : BitManipGPRGPRIntrinsics; | ||
| 100 | |||
| 101 | // Zbkb | ||
| 102 | def int_riscv_brev8 : BitManipGPRIntrinsics; | ||
| 103 | def int_riscv_zip : BitManipGPRIntrinsics; | ||
| 104 | def int_riscv_unzip : BitManipGPRIntrinsics; | ||
| 105 | |||
| 106 | // Zbkx | ||
| 107 | def int_riscv_xperm4 : BitManipGPRGPRIntrinsics; | ||
| 108 | def int_riscv_xperm8 : BitManipGPRGPRIntrinsics; | ||
| 109 | } // TargetPrefix = "riscv" | ||
| 110 | |||
| 111 | //===----------------------------------------------------------------------===// | ||
| 112 | // Vectors | ||
| 113 | |||
| 114 | // The intrinsic does not have any operand that must be extended. | ||
| 115 | defvar NoScalarOperand = 0xF; | ||
| 116 | |||
| 117 | // The intrinsic does not have a VL operand. | ||
| 118 | // (e.g., riscv_vmv_x_s and riscv_vfmv_f_s) | ||
| 119 | defvar NoVLOperand = 0x1F; | ||
| 120 | |||
| 121 | class RISCVVIntrinsic { | ||
| 122 | // These intrinsics may accept illegal integer values in their llvm_any_ty | ||
| 123 | // operand, so they have to be extended. | ||
| 124 | Intrinsic IntrinsicID = !cast<Intrinsic>(NAME); | ||
| 125 | bits<4> ScalarOperand = NoScalarOperand; | ||
| 126 | bits<5> VLOperand = NoVLOperand; | ||
| 127 | } | ||
| 128 | |||
| 129 | let TargetPrefix = "riscv" in { | ||
| 130 | // We use anyint here but we only support XLen. | ||
| 131 | def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty], | ||
| 132 | /* AVL */ [LLVMMatchType<0>, | ||
| 133 | /* VSEW */ LLVMMatchType<0>, | ||
| 134 | /* VLMUL */ LLVMMatchType<0>], | ||
| 135 | [IntrNoMem, IntrHasSideEffects, | ||
| 136 | ImmArg<ArgIndex<1>>, | ||
| 137 | ImmArg<ArgIndex<2>>]>; | ||
| 138 | def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty], | ||
| 139 | /* VSEW */ [LLVMMatchType<0>, | ||
| 140 | /* VLMUL */ LLVMMatchType<0>], | ||
| 141 | [IntrNoMem, IntrHasSideEffects, | ||
| 142 | ImmArg<ArgIndex<0>>, | ||
| 143 | ImmArg<ArgIndex<1>>]>; | ||
| 144 | |||
| 145 | // Versions without side effects: better optimizable and usable if only the | ||
| 146 | // returned vector length is important. | ||
| 147 | def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty], | ||
| 148 | /* AVL */ [LLVMMatchType<0>, | ||
| 149 | /* VSEW */ LLVMMatchType<0>, | ||
| 150 | /* VLMUL */ LLVMMatchType<0>], | ||
| 151 | [IntrNoMem, | ||
| 152 | ImmArg<ArgIndex<1>>, | ||
| 153 | ImmArg<ArgIndex<2>>]>; | ||
| 154 | def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty], | ||
| 155 | /* VSEW */ [LLVMMatchType<0>, | ||
| 156 | /* VLMUL */ LLVMMatchType<0>], | ||
| 157 | [IntrNoMem, | ||
| 158 | ImmArg<ArgIndex<0>>, | ||
| 159 | ImmArg<ArgIndex<1>>]>; | ||
| 160 | |||
| 161 | // For unit stride mask load | ||
| 162 | // Input: (pointer, vl) | ||
| 163 | class RISCVUSMLoad | ||
| 164 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 165 | [LLVMPointerType<LLVMMatchType<0>>, | ||
| 166 | llvm_anyint_ty], | ||
| 167 |                     [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 168 | let VLOperand = 1; | ||
| 169 | } | ||
| 170 | // For unit stride load | ||
| 171 | // Input: (passthru, pointer, vl) | ||
| 172 | class RISCVUSLoad | ||
| 173 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 174 | [LLVMMatchType<0>, | ||
| 175 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 176 | llvm_anyint_ty], | ||
| 177 |                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 178 | let VLOperand = 2; | ||
| 179 | } | ||
| 180 | // For unit stride fault-only-first load | ||
| 181 | // Input: (passthru, pointer, vl) | ||
| 182 | // Output: (data, vl) | ||
| 183 | // NOTE: We model this with default memory properties since we model writing | ||
| 184 | // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. | ||
| 185 | class RISCVUSLoadFF | ||
| 186 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], | ||
| 187 | [LLVMMatchType<0>, | ||
| 188 | LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>], | ||
| 189 | [NoCapture<ArgIndex<1>>]>, | ||
| 190 |                     RISCVVIntrinsic { | ||
| 191 | let VLOperand = 2; | ||
| 192 | } | ||
| 193 | // For unit stride load with mask | ||
| 194 | // Input: (maskedoff, pointer, mask, vl, policy) | ||
| 195 | class RISCVUSLoadMasked | ||
| 196 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], | ||
| 197 | [LLVMMatchType<0>, | ||
| 198 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 199 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 200 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 201 | [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>, | ||
| 202 |                     RISCVVIntrinsic { | ||
| 203 | let VLOperand = 3; | ||
| 204 | } | ||
| 205 | // For unit stride fault-only-first load with mask | ||
| 206 | // Input: (maskedoff, pointer, mask, vl, policy) | ||
| 207 | // Output: (data, vl) | ||
| 208 | // NOTE: We model this with default memory properties since we model writing | ||
| 209 | // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. | ||
| 210 | class RISCVUSLoadFFMasked | ||
| 211 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], | ||
| 212 | [LLVMMatchType<0>, | ||
| 213 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 214 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 215 | LLVMMatchType<1>, LLVMMatchType<1>], | ||
| 216 |                     [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { | ||
| 217 | let VLOperand = 3; | ||
| 218 | } | ||
| 219 | // For strided load with passthru operand | ||
| 220 | // Input: (passthru, pointer, stride, vl) | ||
| 221 | class RISCVSLoad | ||
| 222 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 223 | [LLVMMatchType<0>, | ||
| 224 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 225 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 226 |                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 227 | let VLOperand = 3; | ||
| 228 | } | ||
| 229 | // For strided load with mask | ||
| 230 | // Input: (maskedoff, pointer, stride, mask, vl, policy) | ||
| 231 | class RISCVSLoadMasked | ||
| 232 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], | ||
| 233 | [LLVMMatchType<0>, | ||
| 234 | LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty, | ||
| 235 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, | ||
| 236 | LLVMMatchType<1>], | ||
| 237 | [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, | ||
| 238 |                     RISCVVIntrinsic { | ||
| 239 | let VLOperand = 4; | ||
| 240 | } | ||
| 241 | // For indexed load with passthru operand | ||
| 242 | // Input: (passthru, pointer, index, vl) | ||
| 243 | class RISCVILoad | ||
| 244 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 245 | [LLVMMatchType<0>, | ||
| 246 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 247 | llvm_anyvector_ty, llvm_anyint_ty], | ||
| 248 |                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 249 | let VLOperand = 3; | ||
| 250 | } | ||
| 251 | // For indexed load with mask | ||
| 252 | // Input: (maskedoff, pointer, index, mask, vl, policy) | ||
| 253 | class RISCVILoadMasked | ||
| 254 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], | ||
| 255 | [LLVMMatchType<0>, | ||
| 256 | LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, | ||
| 257 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 258 | LLVMMatchType<2>], | ||
| 259 | [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, | ||
| 260 |                     RISCVVIntrinsic { | ||
| 261 | let VLOperand = 4; | ||
| 262 | } | ||
| 263 | // For unit stride store | ||
| 264 | // Input: (vector_in, pointer, vl) | ||
| 265 | class RISCVUSStore | ||
| 266 | : DefaultAttrsIntrinsic<[], | ||
| 267 | [llvm_anyvector_ty, | ||
| 268 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 269 | llvm_anyint_ty], | ||
| 270 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 271 | let VLOperand = 2; | ||
| 272 | } | ||
| 273 | // For unit stride store with mask | ||
| 274 | // Input: (vector_in, pointer, mask, vl) | ||
| 275 | class RISCVUSStoreMasked | ||
| 276 | : DefaultAttrsIntrinsic<[], | ||
| 277 | [llvm_anyvector_ty, | ||
| 278 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 279 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 280 | llvm_anyint_ty], | ||
| 281 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 282 | let VLOperand = 3; | ||
| 283 | } | ||
| 284 | // For strided store | ||
| 285 | // Input: (vector_in, pointer, stride, vl) | ||
| 286 | class RISCVSStore | ||
| 287 | : DefaultAttrsIntrinsic<[], | ||
| 288 | [llvm_anyvector_ty, | ||
| 289 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 290 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 291 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 292 | let VLOperand = 3; | ||
| 293 | } | ||
| 294 | // For stride store with mask | ||
| 295 | // Input: (vector_in, pointer, stirde, mask, vl) | ||
| 296 | class RISCVSStoreMasked | ||
| 297 | : DefaultAttrsIntrinsic<[], | ||
| 298 | [llvm_anyvector_ty, | ||
| 299 | LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty, | ||
| 300 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], | ||
| 301 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 302 | let VLOperand = 4; | ||
| 303 | } | ||
| 304 | // For indexed store | ||
| 305 | // Input: (vector_in, pointer, index, vl) | ||
| 306 | class RISCVIStore | ||
| 307 | : DefaultAttrsIntrinsic<[], | ||
| 308 | [llvm_anyvector_ty, | ||
| 309 | LLVMPointerType<LLVMMatchType<0>>, | ||
| 310 | llvm_anyint_ty, llvm_anyint_ty], | ||
| 311 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 312 | let VLOperand = 3; | ||
| 313 | } | ||
| 314 | // For indexed store with mask | ||
| 315 | // Input: (vector_in, pointer, index, mask, vl) | ||
| 316 | class RISCVIStoreMasked | ||
| 317 | : DefaultAttrsIntrinsic<[], | ||
| 318 | [llvm_anyvector_ty, | ||
| 319 | LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, | ||
| 320 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], | ||
| 321 |                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 322 | let VLOperand = 4; | ||
| 323 | } | ||
| 324 | // For destination vector type is the same as source vector. | ||
| 325 | // Input: (passthru, vector_in, vl) | ||
| 326 | class RISCVUnaryAAUnMasked | ||
| 327 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 328 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], | ||
| 329 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 330 | let VLOperand = 2; | ||
| 331 | } | ||
| 332 | // For destination vector type is the same as first source vector (with mask). | ||
| 333 | // Input: (vector_in, vector_in, mask, vl, policy) | ||
| 334 | class RISCVUnaryAAMasked | ||
| 335 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 336 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 337 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 338 | LLVMMatchType<1>], | ||
| 339 |                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 340 | let VLOperand = 3; | ||
| 341 | } | ||
| 342 | // Input: (passthru, vector_in, vector_in, mask, vl) | ||
| 343 | class RISCVCompress | ||
| 344 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 345 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 346 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], | ||
| 347 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 348 | let VLOperand = 3; | ||
| 349 | } | ||
| 350 | // For destination vector type is the same as first and second source vector. | ||
| 351 | // Input: (vector_in, vector_in, vl) | ||
| 352 | class RISCVBinaryAAAUnMasked | ||
| 353 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 354 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], | ||
| 355 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 356 | let VLOperand = 2; | ||
| 357 | } | ||
| 358 | // For destination vector type is the same as first and second source vector. | ||
| 359 | // Input: (passthru, vector_in, int_vector_in, vl) | ||
| 360 | class RISCVRGatherVVUnMasked | ||
| 361 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 362 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 363 | LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], | ||
| 364 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 365 | let VLOperand = 3; | ||
| 366 | } | ||
| 367 | // For destination vector type is the same as first and second source vector. | ||
| 368 | // Input: (vector_in, vector_in, int_vector_in, vl, policy) | ||
| 369 | class RISCVRGatherVVMasked | ||
| 370 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 371 | [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, | ||
| 372 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 373 | LLVMMatchType<1>], | ||
| 374 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 375 | let VLOperand = 4; | ||
| 376 | } | ||
| 377 | // Input: (passthru, vector_in, int16_vector_in, vl) | ||
| 378 | class RISCVRGatherEI16VVUnMasked | ||
| 379 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 380 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 381 | LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, | ||
| 382 | llvm_anyint_ty], | ||
| 383 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 384 | let VLOperand = 3; | ||
| 385 | } | ||
| 386 | // For destination vector type is the same as first and second source vector. | ||
| 387 | // Input: (vector_in, vector_in, int16_vector_in, vl, policy) | ||
| 388 | class RISCVRGatherEI16VVMasked | ||
| 389 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 390 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 391 | LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, | ||
| 392 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 393 | LLVMMatchType<1>], | ||
| 394 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 395 | let VLOperand = 4; | ||
| 396 | } | ||
| 397 | // For destination vector type is the same as first source vector, and the | ||
| 398 | // second operand is XLen. | ||
| 399 | // Input: (passthru, vector_in, xlen_in, vl) | ||
| 400 | class RISCVGatherVXUnMasked | ||
| 401 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 402 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, | ||
| 403 | LLVMMatchType<1>], | ||
| 404 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 405 | let VLOperand = 3; | ||
| 406 | } | ||
| 407 | // For destination vector type is the same as first source vector (with mask). | ||
| 408 | // Second operand is XLen. | ||
| 409 | // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy) | ||
| 410 | class RISCVGatherVXMasked | ||
| 411 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 412 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, | ||
| 413 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, | ||
| 414 | LLVMMatchType<1>], | ||
| 415 |                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 416 | let VLOperand = 4; | ||
| 417 | } | ||
| 418 | // For destination vector type is the same as first source vector. | ||
| 419 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 420 | class RISCVBinaryAAXUnMasked | ||
| 421 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 422 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 423 | llvm_anyint_ty], | ||
| 424 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 425 | let ScalarOperand = 2; | ||
| 426 | let VLOperand = 3; | ||
| 427 | } | ||
| 428 | // For destination vector type is the same as first source vector (with mask). | ||
| 429 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 430 | class RISCVBinaryAAXMasked | ||
| 431 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 432 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 433 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 434 | LLVMMatchType<2>], | ||
| 435 |                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 436 | let ScalarOperand = 2; | ||
| 437 | let VLOperand = 4; | ||
| 438 | } | ||
| 439 | // For destination vector type is the same as first source vector. The | ||
| 440 | // second source operand must match the destination type or be an XLen scalar. | ||
| 441 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 442 | class RISCVBinaryAAShiftUnMasked | ||
| 443 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 444 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 445 | llvm_anyint_ty], | ||
| 446 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 447 | let VLOperand = 3; | ||
| 448 | } | ||
| 449 | // For destination vector type is the same as first source vector (with mask). | ||
| 450 | // The second source operand must match the destination type or be an XLen scalar. | ||
| 451 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 452 | class RISCVBinaryAAShiftMasked | ||
| 453 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 454 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 455 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 456 | LLVMMatchType<2>], | ||
| 457 |                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 458 | let VLOperand = 4; | ||
| 459 | } | ||
| 460 | // For destination vector type is NOT the same as first source vector. | ||
| 461 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 462 | class RISCVBinaryABXUnMasked | ||
| 463 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 464 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 465 | llvm_anyint_ty], | ||
| 466 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 467 | let ScalarOperand = 2; | ||
| 468 | let VLOperand = 3; | ||
| 469 | } | ||
| 470 | // For destination vector type is NOT the same as first source vector (with mask). | ||
| 471 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 472 | class RISCVBinaryABXMasked | ||
| 473 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 474 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 475 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 476 | LLVMMatchType<3>], | ||
| 477 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 478 | let ScalarOperand = 2; | ||
| 479 | let VLOperand = 4; | ||
| 480 | } | ||
| 481 | // For destination vector type is NOT the same as first source vector. The | ||
| 482 | // second source operand must match the destination type or be an XLen scalar. | ||
| 483 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 484 | class RISCVBinaryABShiftUnMasked | ||
| 485 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 486 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 487 | llvm_anyint_ty], | ||
| 488 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 489 | let VLOperand = 3; | ||
| 490 | } | ||
| 491 | // For destination vector type is NOT the same as first source vector (with mask). | ||
| 492 | // The second source operand must match the destination type or be an XLen scalar. | ||
| 493 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 494 | class RISCVBinaryABShiftMasked | ||
| 495 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 496 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 497 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 498 | LLVMMatchType<3>], | ||
| 499 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 500 | let VLOperand = 4; | ||
| 501 | } | ||
| 502 | // For binary operations with V0 as input. | ||
| 503 | // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl) | ||
| 504 | class RISCVBinaryWithV0 | ||
| 505 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 506 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 507 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 508 | llvm_anyint_ty], | ||
| 509 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 510 | let ScalarOperand = 2; | ||
| 511 | let VLOperand = 4; | ||
| 512 | } | ||
| 513 | // For binary operations with mask type output and V0 as input. | ||
| 514 | // Output: (mask type output) | ||
| 515 | // Input: (vector_in, vector_in/scalar_in, V0, vl) | ||
| 516 | class RISCVBinaryMOutWithV0 | ||
| 517 | :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 518 | [llvm_anyvector_ty, llvm_any_ty, | ||
| 519 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 520 | llvm_anyint_ty], | ||
| 521 |                    [IntrNoMem]>, RISCVVIntrinsic { | ||
| 522 | let ScalarOperand = 1; | ||
| 523 | let VLOperand = 3; | ||
| 524 | } | ||
| 525 | // For binary operations with mask type output. | ||
| 526 | // Output: (mask type output) | ||
| 527 | // Input: (vector_in, vector_in/scalar_in, vl) | ||
| 528 | class RISCVBinaryMOut | ||
| 529 | : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 530 | [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], | ||
| 531 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 532 | let ScalarOperand = 1; | ||
| 533 | let VLOperand = 2; | ||
| 534 | } | ||
| 535 | // For binary operations with mask type output without mask. | ||
| 536 | // Output: (mask type output) | ||
| 537 | // Input: (vector_in, vector_in/scalar_in, vl) | ||
| 538 | class RISCVCompareUnMasked | ||
| 539 | : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 540 | [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], | ||
| 541 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 542 | let ScalarOperand = 1; | ||
| 543 | let VLOperand = 2; | ||
| 544 | } | ||
| 545 | // For binary operations with mask type output with mask. | ||
| 546 | // Output: (mask type output) | ||
| 547 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) | ||
| 548 | class RISCVCompareMasked | ||
| 549 | : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 550 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 551 | llvm_anyvector_ty, llvm_any_ty, | ||
| 552 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], | ||
| 553 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 554 | let ScalarOperand = 2; | ||
| 555 | let VLOperand = 4; | ||
| 556 | } | ||
| 557 | // For FP classify operations. | ||
| 558 | // Output: (bit mask type output) | ||
| 559 | // Input: (passthru, vector_in, vl) | ||
| 560 | class RISCVClassifyUnMasked | ||
| 561 | : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], | ||
| 562 | [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, | ||
| 563 | llvm_anyint_ty], | ||
| 564 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 565 | let VLOperand = 1; | ||
| 566 | } | ||
| 567 | // For FP classify operations with mask. | ||
| 568 | // Output: (bit mask type output) | ||
| 569 | // Input: (maskedoff, vector_in, mask, vl, policy) | ||
| 570 | class RISCVClassifyMasked | ||
| 571 | : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], | ||
| 572 | [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, | ||
| 573 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 574 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 575 |                     [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { | ||
| 576 | let VLOperand = 3; | ||
| 577 | } | ||
| 578 | // For Saturating binary operations. | ||
| 579 | // The destination vector type is the same as first source vector. | ||
| 580 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 581 | class RISCVSaturatingBinaryAAXUnMasked | ||
| 582 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 583 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 584 | llvm_anyint_ty], | ||
| 585 |                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 586 | let ScalarOperand = 2; | ||
| 587 | let VLOperand = 3; | ||
| 588 | } | ||
| 589 | // For Saturating binary operations with mask. | ||
| 590 | // The destination vector type is the same as first source vector. | ||
| 591 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 592 | class RISCVSaturatingBinaryAAXMasked | ||
| 593 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 594 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 595 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 596 | LLVMMatchType<2>], | ||
| 597 |                     [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 598 | let ScalarOperand = 2; | ||
| 599 | let VLOperand = 4; | ||
| 600 | } | ||
| 601 | // For Saturating binary operations. | ||
| 602 | // The destination vector type is the same as first source vector. | ||
| 603 | // The second source operand matches the destination type or is an XLen scalar. | ||
| 604 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 605 | class RISCVSaturatingBinaryAAShiftUnMasked | ||
| 606 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 607 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 608 | llvm_anyint_ty], | ||
| 609 |                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 610 | let VLOperand = 3; | ||
| 611 | } | ||
| 612 | // For Saturating binary operations with mask. | ||
| 613 | // The destination vector type is the same as first source vector. | ||
| 614 | // The second source operand matches the destination type or is an XLen scalar. | ||
| 615 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 616 | class RISCVSaturatingBinaryAAShiftMasked | ||
| 617 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 618 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, | ||
| 619 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 620 | LLVMMatchType<2>], | ||
| 621 |                     [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 622 | let VLOperand = 4; | ||
| 623 | } | ||
| 624 | // For Saturating binary operations. | ||
| 625 | // The destination vector type is NOT the same as first source vector. | ||
| 626 | // The second source operand matches the destination type or is an XLen scalar. | ||
| 627 | // Input: (passthru, vector_in, vector_in/scalar_in, vl) | ||
| 628 | class RISCVSaturatingBinaryABShiftUnMasked | ||
| 629 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 630 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 631 | llvm_anyint_ty], | ||
| 632 |                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 633 | let VLOperand = 3; | ||
| 634 | } | ||
| 635 | // For Saturating binary operations with mask. | ||
| 636 | // The destination vector type is NOT the same as first source vector (with mask). | ||
| 637 | // The second source operand matches the destination type or is an XLen scalar. | ||
| 638 | // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 639 | class RISCVSaturatingBinaryABShiftMasked | ||
| 640 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 641 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, | ||
| 642 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 643 | LLVMMatchType<3>], | ||
| 644 |                     [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { | ||
| 645 | let VLOperand = 4; | ||
| 646 | } | ||
| 647 | // Input: (vector_in, vector_in, scalar_in, vl, policy) | ||
| 648 | class RVVSlideUnMasked | ||
| 649 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 650 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, | ||
| 651 | LLVMMatchType<1>, LLVMMatchType<1>], | ||
| 652 |                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 653 | let VLOperand = 3; | ||
| 654 | } | ||
| 655 | // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) | ||
| 656 | class RVVSlideMasked | ||
| 657 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 658 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, | ||
| 659 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 660 | LLVMMatchType<1>, LLVMMatchType<1>], | ||
| 661 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 662 | let VLOperand = 4; | ||
| 663 | } | ||
| 664 | // UnMasked Vector Multiply-Add operations, its first operand can not be undef. | ||
| 665 | // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) | ||
| 666 | class RISCVTernaryAAXAUnMasked | ||
| 667 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 668 | [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, | ||
| 669 | llvm_anyint_ty, LLVMMatchType<2>], | ||
| 670 |                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 671 | let ScalarOperand = 1; | ||
| 672 | let VLOperand = 3; | ||
| 673 | } | ||
| 674 | // Masked Vector Multiply-Add operations, its first operand can not be undef. | ||
| 675 | // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy | ||
| 676 | class RISCVTernaryAAXAMasked | ||
| 677 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 678 | [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, | ||
| 679 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 680 | llvm_anyint_ty, LLVMMatchType<2>], | ||
| 681 |                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 682 | let ScalarOperand = 1; | ||
| 683 | let VLOperand = 4; | ||
| 684 | } | ||
| 685 | // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. | ||
| 686 | // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) | ||
| 687 | class RISCVTernaryWideUnMasked | ||
| 688 | : DefaultAttrsIntrinsic< [llvm_anyvector_ty], | ||
| 689 | [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, | ||
| 690 | llvm_anyint_ty, LLVMMatchType<3>], | ||
| 691 |                      [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic { | ||
| 692 | let ScalarOperand = 1; | ||
| 693 | let VLOperand = 3; | ||
| 694 | } | ||
| 695 | // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. | ||
| 696 | // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy | ||
| 697 | class RISCVTernaryWideMasked | ||
| 698 | : DefaultAttrsIntrinsic< [llvm_anyvector_ty], | ||
| 699 | [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, | ||
| 700 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 701 | llvm_anyint_ty, LLVMMatchType<3>], | ||
| 702 |                      [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 703 | let ScalarOperand = 1; | ||
| 704 | let VLOperand = 4; | ||
| 705 | } | ||
| 706 | // For Reduction ternary operations. | ||
| 707 | // For destination vector type is the same as first and third source vector. | ||
| 708 | // Input: (vector_in, vector_in, vector_in, vl) | ||
| 709 | class RISCVReductionUnMasked | ||
| 710 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 711 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, | ||
| 712 | llvm_anyint_ty], | ||
| 713 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 714 | let VLOperand = 3; | ||
| 715 | } | ||
| 716 | // For Reduction ternary operations with mask. | ||
| 717 | // For destination vector type is the same as first and third source vector. | ||
| 718 | // The mask type come from second source vector. | ||
| 719 | // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl) | ||
| 720 | class RISCVReductionMasked | ||
| 721 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 722 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, | ||
| 723 | LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty], | ||
| 724 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 725 | let VLOperand = 4; | ||
| 726 | } | ||
| 727 | // For unary operations with scalar type output without mask | ||
| 728 | // Output: (scalar type) | ||
| 729 | // Input: (vector_in, vl) | ||
| 730 | class RISCVMaskedUnarySOutUnMasked | ||
| 731 | : DefaultAttrsIntrinsic<[LLVMMatchType<1>], | ||
| 732 | [llvm_anyvector_ty, llvm_anyint_ty], | ||
| 733 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 734 | let VLOperand = 1; | ||
| 735 | } | ||
| 736 | // For unary operations with scalar type output with mask | ||
| 737 | // Output: (scalar type) | ||
| 738 | // Input: (vector_in, mask, vl) | ||
| 739 | class RISCVMaskedUnarySOutMasked | ||
| 740 | : DefaultAttrsIntrinsic<[LLVMMatchType<1>], | ||
| 741 | [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], | ||
| 742 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 743 | let VLOperand = 2; | ||
| 744 | } | ||
| 745 | // For destination vector type is NOT the same as source vector. | ||
| 746 | // Input: (passthru, vector_in, vl) | ||
| 747 | class RISCVUnaryABUnMasked | ||
| 748 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 749 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], | ||
| 750 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 751 | let VLOperand = 2; | ||
| 752 | } | ||
| 753 | // For destination vector type is NOT the same as source vector (with mask). | ||
| 754 | // Input: (maskedoff, vector_in, mask, vl, policy) | ||
| 755 | class RISCVUnaryABMasked | ||
| 756 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 757 | [LLVMMatchType<0>, llvm_anyvector_ty, | ||
| 758 | LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, | ||
| 759 | llvm_anyint_ty, LLVMMatchType<2>], | ||
| 760 |                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 761 | let VLOperand = 3; | ||
| 762 | } | ||
| 763 | // For unary operations with the same vector type in/out without mask | ||
| 764 | // Output: (vector) | ||
| 765 | // Input: (vector_in, vl) | ||
| 766 | class RISCVUnaryUnMasked | ||
| 767 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 768 | [LLVMMatchType<0>, llvm_anyint_ty], | ||
| 769 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 770 | let VLOperand = 1; | ||
| 771 | } | ||
| 772 | // For mask unary operations with mask type in/out with mask | ||
| 773 | // Output: (mask type output) | ||
| 774 | // Input: (mask type maskedoff, mask type vector_in, mask, vl) | ||
| 775 | class RISCVMaskedUnaryMOutMasked | ||
| 776 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], | ||
| 777 | [LLVMMatchType<0>, LLVMMatchType<0>, | ||
| 778 | LLVMMatchType<0>, llvm_anyint_ty], | ||
| 779 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 780 | let VLOperand = 3; | ||
| 781 | } | ||
| 782 | // Output: (vector) | ||
| 783 | // Input: (vl) | ||
| 784 | class RISCVNullaryIntrinsic | ||
| 785 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 786 |                     [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { | ||
| 787 | let VLOperand = 1; | ||
| 788 | } | ||
| 789 | // Output: (vector) | ||
| 790 | // Input: (passthru, vl) | ||
| 791 | class RISCVID | ||
| 792 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 793 | [LLVMMatchType<0>, llvm_anyint_ty], | ||
| 794 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 795 | let VLOperand = 1; | ||
| 796 | } | ||
| 797 | // For Conversion unary operations. | ||
| 798 | // Input: (passthru, vector_in, vl) | ||
| 799 | class RISCVConversionUnMasked | ||
| 800 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 801 | [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], | ||
| 802 |                     [IntrNoMem]>, RISCVVIntrinsic { | ||
| 803 | let VLOperand = 2; | ||
| 804 | } | ||
| 805 | // For Conversion unary operations with mask. | ||
| 806 | // Input: (maskedoff, vector_in, mask, vl, policy) | ||
| 807 | class RISCVConversionMasked | ||
| 808 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 809 | [LLVMMatchType<0>, llvm_anyvector_ty, | ||
| 810 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, | ||
| 811 | LLVMMatchType<2>], | ||
| 812 |                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 813 | let VLOperand = 3; | ||
| 814 | } | ||
| 815 | |||
| 816 | // For unit stride segment load | ||
| 817 | // Input: (passthru, pointer, vl) | ||
| 818 | class RISCVUSSegLoad<int nf> | ||
| 819 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 820 | !add(nf, -1))), | ||
| 821 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 822 | [LLVMPointerToElt<0>, llvm_anyint_ty]), | ||
| 823 |                     [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 824 | let VLOperand = !add(nf, 1); | ||
| 825 | } | ||
| 826 | // For unit stride segment load with mask | ||
| 827 | // Input: (maskedoff, pointer, mask, vl, policy) | ||
| 828 | class RISCVUSSegLoadMasked<int nf> | ||
| 829 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 830 | !add(nf, -1))), | ||
| 831 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 832 | [LLVMPointerToElt<0>, | ||
| 833 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 834 | llvm_anyint_ty, LLVMMatchType<1>]), | ||
| 835 | [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>, | ||
| 836 |                     RISCVVIntrinsic { | ||
| 837 | let VLOperand = !add(nf, 2); | ||
| 838 | } | ||
| 839 | |||
| 840 | // For unit stride fault-only-first segment load | ||
| 841 | // Input: (passthru, pointer, vl) | ||
| 842 | // Output: (data, vl) | ||
| 843 | // NOTE: We model this with default memory properties since we model writing | ||
| 844 | // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. | ||
| 845 | class RISCVUSSegLoadFF<int nf> | ||
| 846 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 847 | !add(nf, -1)), [llvm_anyint_ty]), | ||
| 848 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 849 | [LLVMPointerToElt<0>, LLVMMatchType<1>]), | ||
| 850 |                     [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic { | ||
| 851 | let VLOperand = !add(nf, 1); | ||
| 852 | } | ||
| 853 | // For unit stride fault-only-first segment load with mask | ||
| 854 | // Input: (maskedoff, pointer, mask, vl, policy) | ||
| 855 | // Output: (data, vl) | ||
| 856 | // NOTE: We model this with default memory properties since we model writing | ||
| 857 | // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. | ||
| 858 | class RISCVUSSegLoadFFMasked<int nf> | ||
| 859 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 860 | !add(nf, -1)), [llvm_anyint_ty]), | ||
| 861 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 862 | [LLVMPointerToElt<0>, | ||
| 863 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 864 | LLVMMatchType<1>, LLVMMatchType<1>]), | ||
| 865 | [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>, | ||
| 866 |                     RISCVVIntrinsic { | ||
| 867 | let VLOperand = !add(nf, 2); | ||
| 868 | } | ||
| 869 | |||
| 870 | // For stride segment load | ||
| 871 | // Input: (passthru, pointer, offset, vl) | ||
| 872 | class RISCVSSegLoad<int nf> | ||
| 873 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 874 | !add(nf, -1))), | ||
| 875 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 876 | [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>]), | ||
| 877 |                     [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 878 | let VLOperand = !add(nf, 2); | ||
| 879 | } | ||
| 880 | // For stride segment load with mask | ||
| 881 | // Input: (maskedoff, pointer, offset, mask, vl, policy) | ||
| 882 | class RISCVSSegLoadMasked<int nf> | ||
| 883 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 884 | !add(nf, -1))), | ||
| 885 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 886 | [LLVMPointerToElt<0>, | ||
| 887 | llvm_anyint_ty, | ||
| 888 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 889 | LLVMMatchType<1>, LLVMMatchType<1>]), | ||
| 890 | [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>, | ||
| 891 |                     RISCVVIntrinsic { | ||
| 892 | let VLOperand = !add(nf, 3); | ||
| 893 | } | ||
| 894 | |||
| 895 | // For indexed segment load | ||
| 896 | // Input: (passthru, pointer, index, vl) | ||
| 897 | class RISCVISegLoad<int nf> | ||
| 898 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 899 | !add(nf, -1))), | ||
| 900 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 901 | [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty]), | ||
| 902 |                     [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic { | ||
| 903 | let VLOperand = !add(nf, 2); | ||
| 904 | } | ||
| 905 | // For indexed segment load with mask | ||
| 906 | // Input: (maskedoff, pointer, index, mask, vl, policy) | ||
| 907 | class RISCVISegLoadMasked<int nf> | ||
| 908 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, | ||
| 909 | !add(nf, -1))), | ||
| 910 | !listconcat(!listsplat(LLVMMatchType<0>, nf), | ||
| 911 | [LLVMPointerToElt<0>, | ||
| 912 | llvm_anyvector_ty, | ||
| 913 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 914 | llvm_anyint_ty, LLVMMatchType<2>]), | ||
| 915 | [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>, | ||
| 916 |                     RISCVVIntrinsic { | ||
| 917 | let VLOperand = !add(nf, 3); | ||
| 918 | } | ||
| 919 | |||
| 920 | // For unit stride segment store | ||
| 921 | // Input: (value, pointer, vl) | ||
| 922 | class RISCVUSSegStore<int nf> | ||
| 923 | : DefaultAttrsIntrinsic<[], | ||
| 924 | !listconcat([llvm_anyvector_ty], | ||
| 925 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 926 | [LLVMPointerToElt<0>, llvm_anyint_ty]), | ||
| 927 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 928 | let VLOperand = !add(nf, 1); | ||
| 929 | } | ||
| 930 | // For unit stride segment store with mask | ||
| 931 | // Input: (value, pointer, mask, vl) | ||
| 932 | class RISCVUSSegStoreMasked<int nf> | ||
| 933 | : DefaultAttrsIntrinsic<[], | ||
| 934 | !listconcat([llvm_anyvector_ty], | ||
| 935 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 936 | [LLVMPointerToElt<0>, | ||
| 937 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 938 | llvm_anyint_ty]), | ||
| 939 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 940 | let VLOperand = !add(nf, 2); | ||
| 941 | } | ||
| 942 | |||
| 943 | // For stride segment store | ||
| 944 | // Input: (value, pointer, offset, vl) | ||
| 945 | class RISCVSSegStore<int nf> | ||
| 946 | : DefaultAttrsIntrinsic<[], | ||
| 947 | !listconcat([llvm_anyvector_ty], | ||
| 948 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 949 | [LLVMPointerToElt<0>, llvm_anyint_ty, | ||
| 950 | LLVMMatchType<1>]), | ||
| 951 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 952 | let VLOperand = !add(nf, 2); | ||
| 953 | } | ||
| 954 | // For stride segment store with mask | ||
| 955 | // Input: (value, pointer, offset, mask, vl) | ||
| 956 | class RISCVSSegStoreMasked<int nf> | ||
| 957 | : DefaultAttrsIntrinsic<[], | ||
| 958 | !listconcat([llvm_anyvector_ty], | ||
| 959 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 960 | [LLVMPointerToElt<0>, llvm_anyint_ty, | ||
| 961 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 962 | LLVMMatchType<1>]), | ||
| 963 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 964 | let VLOperand = !add(nf, 3); | ||
| 965 | } | ||
| 966 | |||
| 967 | // For indexed segment store | ||
| 968 | // Input: (value, pointer, offset, vl) | ||
| 969 | class RISCVISegStore<int nf> | ||
| 970 | : DefaultAttrsIntrinsic<[], | ||
| 971 | !listconcat([llvm_anyvector_ty], | ||
| 972 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 973 | [LLVMPointerToElt<0>, llvm_anyvector_ty, | ||
| 974 | llvm_anyint_ty]), | ||
| 975 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 976 | let VLOperand = !add(nf, 2); | ||
| 977 | } | ||
| 978 | // For indexed segment store with mask | ||
| 979 | // Input: (value, pointer, offset, mask, vl) | ||
| 980 | class RISCVISegStoreMasked<int nf> | ||
| 981 | : DefaultAttrsIntrinsic<[], | ||
| 982 | !listconcat([llvm_anyvector_ty], | ||
| 983 | !listsplat(LLVMMatchType<0>, !add(nf, -1)), | ||
| 984 | [LLVMPointerToElt<0>, llvm_anyvector_ty, | ||
| 985 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 986 | llvm_anyint_ty]), | ||
| 987 |                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { | ||
| 988 | let VLOperand = !add(nf, 3); | ||
| 989 | } | ||
| 990 | |||
| 991 |   multiclass RISCVUSLoad { | ||
| 992 | def "int_riscv_" # NAME : RISCVUSLoad; | ||
| 993 | def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked; | ||
| 994 | } | ||
| 995 |   multiclass RISCVUSLoadFF { | ||
| 996 | def "int_riscv_" # NAME : RISCVUSLoadFF; | ||
| 997 | def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked; | ||
| 998 | } | ||
| 999 |   multiclass RISCVSLoad { | ||
| 1000 | def "int_riscv_" # NAME : RISCVSLoad; | ||
| 1001 | def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked; | ||
| 1002 | } | ||
| 1003 |   multiclass RISCVILoad { | ||
| 1004 | def "int_riscv_" # NAME : RISCVILoad; | ||
| 1005 | def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked; | ||
| 1006 | } | ||
| 1007 |   multiclass RISCVUSStore { | ||
| 1008 | def "int_riscv_" # NAME : RISCVUSStore; | ||
| 1009 | def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked; | ||
| 1010 | } | ||
| 1011 |   multiclass RISCVSStore { | ||
| 1012 | def "int_riscv_" # NAME : RISCVSStore; | ||
| 1013 | def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 |   multiclass RISCVIStore { | ||
| 1017 | def "int_riscv_" # NAME : RISCVIStore; | ||
| 1018 | def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked; | ||
| 1019 | } | ||
| 1020 |   multiclass RISCVUnaryAA { | ||
| 1021 | def "int_riscv_" # NAME : RISCVUnaryAAUnMasked; | ||
| 1022 | def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked; | ||
| 1023 | } | ||
| 1024 |   multiclass RISCVUnaryAB { | ||
| 1025 | def "int_riscv_" # NAME : RISCVUnaryABUnMasked; | ||
| 1026 | def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked; | ||
| 1027 | } | ||
| 1028 | // AAX means the destination type(A) is the same as the first source | ||
| 1029 | // type(A). X means any type for the second source operand. | ||
| 1030 |   multiclass RISCVBinaryAAX { | ||
| 1031 | def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; | ||
| 1032 | def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; | ||
| 1033 | } | ||
| 1034 | // Like RISCVBinaryAAX, but the second operand is used a shift amount so it | ||
| 1035 | // must be a vector or an XLen scalar. | ||
| 1036 |   multiclass RISCVBinaryAAShift { | ||
| 1037 | def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked; | ||
| 1038 | def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked; | ||
| 1039 | } | ||
| 1040 |   multiclass RISCVRGatherVV { | ||
| 1041 | def "int_riscv_" # NAME : RISCVRGatherVVUnMasked; | ||
| 1042 | def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked; | ||
| 1043 | } | ||
| 1044 |   multiclass RISCVRGatherVX { | ||
| 1045 | def "int_riscv_" # NAME : RISCVGatherVXUnMasked; | ||
| 1046 | def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked; | ||
| 1047 | } | ||
| 1048 |   multiclass RISCVRGatherEI16VV { | ||
| 1049 | def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked; | ||
| 1050 | def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked; | ||
| 1051 | } | ||
| 1052 | // ABX means the destination type(A) is different from the first source | ||
| 1053 | // type(B). X means any type for the second source operand. | ||
| 1054 |   multiclass RISCVBinaryABX { | ||
| 1055 | def "int_riscv_" # NAME : RISCVBinaryABXUnMasked; | ||
| 1056 | def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked; | ||
| 1057 | } | ||
| 1058 | // Like RISCVBinaryABX, but the second operand is used a shift amount so it | ||
| 1059 | // must be a vector or an XLen scalar. | ||
| 1060 |   multiclass RISCVBinaryABShift { | ||
| 1061 | def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked; | ||
| 1062 | def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked; | ||
| 1063 | } | ||
| 1064 |   multiclass RISCVBinaryWithV0 { | ||
| 1065 | def "int_riscv_" # NAME : RISCVBinaryWithV0; | ||
| 1066 | } | ||
| 1067 |   multiclass RISCVBinaryMaskOutWithV0 { | ||
| 1068 | def "int_riscv_" # NAME : RISCVBinaryMOutWithV0; | ||
| 1069 | } | ||
| 1070 |   multiclass RISCVBinaryMaskOut { | ||
| 1071 | def "int_riscv_" # NAME : RISCVBinaryMOut; | ||
| 1072 | } | ||
| 1073 |   multiclass RISCVSaturatingBinaryAAX { | ||
| 1074 | def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked; | ||
| 1075 | def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked; | ||
| 1076 | } | ||
| 1077 |   multiclass RISCVSaturatingBinaryAAShift { | ||
| 1078 | def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked; | ||
| 1079 | def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked; | ||
| 1080 | } | ||
| 1081 |   multiclass RISCVSaturatingBinaryABShift { | ||
| 1082 | def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked; | ||
| 1083 | def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked; | ||
| 1084 | } | ||
| 1085 |   multiclass RVVSlide { | ||
| 1086 | def "int_riscv_" # NAME : RVVSlideUnMasked; | ||
| 1087 | def "int_riscv_" # NAME # "_mask" : RVVSlideMasked; | ||
| 1088 | } | ||
| 1089 |   multiclass RISCVTernaryAAXA { | ||
| 1090 | def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked; | ||
| 1091 | def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked; | ||
| 1092 | } | ||
| 1093 |   multiclass RISCVCompare { | ||
| 1094 | def "int_riscv_" # NAME : RISCVCompareUnMasked; | ||
| 1095 | def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked; | ||
| 1096 | } | ||
| 1097 |   multiclass RISCVClassify { | ||
| 1098 | def "int_riscv_" # NAME : RISCVClassifyUnMasked; | ||
| 1099 | def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked; | ||
| 1100 | } | ||
| 1101 |   multiclass RISCVTernaryWide { | ||
| 1102 | def "int_riscv_" # NAME : RISCVTernaryWideUnMasked; | ||
| 1103 | def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked; | ||
| 1104 | } | ||
| 1105 |   multiclass RISCVReduction { | ||
| 1106 | def "int_riscv_" # NAME : RISCVReductionUnMasked; | ||
| 1107 | def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked; | ||
| 1108 | } | ||
| 1109 |   multiclass RISCVMaskedUnarySOut { | ||
| 1110 | def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked; | ||
| 1111 | def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked; | ||
| 1112 | } | ||
| 1113 |   multiclass RISCVMaskedUnaryMOut { | ||
| 1114 | def "int_riscv_" # NAME : RISCVUnaryUnMasked; | ||
| 1115 | def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked; | ||
| 1116 | } | ||
| 1117 |   multiclass RISCVConversion { | ||
| 1118 | def "int_riscv_" #NAME :RISCVConversionUnMasked; | ||
| 1119 | def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked; | ||
| 1120 | } | ||
| 1121 |   multiclass RISCVUSSegLoad<int nf> { | ||
| 1122 | def "int_riscv_" # NAME : RISCVUSSegLoad<nf>; | ||
| 1123 | def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>; | ||
| 1124 | } | ||
| 1125 |   multiclass RISCVUSSegLoadFF<int nf> { | ||
| 1126 | def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>; | ||
| 1127 | def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>; | ||
| 1128 | } | ||
| 1129 |   multiclass RISCVSSegLoad<int nf> { | ||
| 1130 | def "int_riscv_" # NAME : RISCVSSegLoad<nf>; | ||
| 1131 | def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>; | ||
| 1132 | } | ||
| 1133 |   multiclass RISCVISegLoad<int nf> { | ||
| 1134 | def "int_riscv_" # NAME : RISCVISegLoad<nf>; | ||
| 1135 | def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>; | ||
| 1136 | } | ||
| 1137 |   multiclass RISCVUSSegStore<int nf> { | ||
| 1138 | def "int_riscv_" # NAME : RISCVUSSegStore<nf>; | ||
| 1139 | def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>; | ||
| 1140 | } | ||
| 1141 |   multiclass RISCVSSegStore<int nf> { | ||
| 1142 | def "int_riscv_" # NAME : RISCVSSegStore<nf>; | ||
| 1143 | def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>; | ||
| 1144 | } | ||
| 1145 |   multiclass RISCVISegStore<int nf> { | ||
| 1146 | def "int_riscv_" # NAME : RISCVISegStore<nf>; | ||
| 1147 | def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | defm vle : RISCVUSLoad; | ||
| 1151 | defm vleff : RISCVUSLoadFF; | ||
| 1152 | defm vse : RISCVUSStore; | ||
| 1153 | defm vlse: RISCVSLoad; | ||
| 1154 | defm vsse: RISCVSStore; | ||
| 1155 | defm vluxei : RISCVILoad; | ||
| 1156 | defm vloxei : RISCVILoad; | ||
| 1157 | defm vsoxei : RISCVIStore; | ||
| 1158 | defm vsuxei : RISCVIStore; | ||
| 1159 | |||
| 1160 | def int_riscv_vlm : RISCVUSMLoad; | ||
| 1161 | def int_riscv_vsm : RISCVUSStore; | ||
| 1162 | |||
| 1163 | defm vadd : RISCVBinaryAAX; | ||
| 1164 | defm vsub : RISCVBinaryAAX; | ||
| 1165 | defm vrsub : RISCVBinaryAAX; | ||
| 1166 | |||
| 1167 | defm vwaddu : RISCVBinaryABX; | ||
| 1168 | defm vwadd : RISCVBinaryABX; | ||
| 1169 | defm vwaddu_w : RISCVBinaryAAX; | ||
| 1170 | defm vwadd_w : RISCVBinaryAAX; | ||
| 1171 | defm vwsubu : RISCVBinaryABX; | ||
| 1172 | defm vwsub : RISCVBinaryABX; | ||
| 1173 | defm vwsubu_w : RISCVBinaryAAX; | ||
| 1174 | defm vwsub_w : RISCVBinaryAAX; | ||
| 1175 | |||
| 1176 | defm vzext : RISCVUnaryAB; | ||
| 1177 | defm vsext : RISCVUnaryAB; | ||
| 1178 | |||
| 1179 | defm vadc : RISCVBinaryWithV0; | ||
| 1180 | defm vmadc_carry_in : RISCVBinaryMaskOutWithV0; | ||
| 1181 | defm vmadc : RISCVBinaryMaskOut; | ||
| 1182 | |||
| 1183 | defm vsbc : RISCVBinaryWithV0; | ||
| 1184 | defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0; | ||
| 1185 | defm vmsbc : RISCVBinaryMaskOut; | ||
| 1186 | |||
| 1187 | defm vand : RISCVBinaryAAX; | ||
| 1188 | defm vor : RISCVBinaryAAX; | ||
| 1189 | defm vxor : RISCVBinaryAAX; | ||
| 1190 | |||
| 1191 | defm vsll : RISCVBinaryAAShift; | ||
| 1192 | defm vsrl : RISCVBinaryAAShift; | ||
| 1193 | defm vsra : RISCVBinaryAAShift; | ||
| 1194 | |||
| 1195 | defm vnsrl : RISCVBinaryABShift; | ||
| 1196 | defm vnsra : RISCVBinaryABShift; | ||
| 1197 | |||
| 1198 | defm vmseq : RISCVCompare; | ||
| 1199 | defm vmsne : RISCVCompare; | ||
| 1200 | defm vmsltu : RISCVCompare; | ||
| 1201 | defm vmslt : RISCVCompare; | ||
| 1202 | defm vmsleu : RISCVCompare; | ||
| 1203 | defm vmsle : RISCVCompare; | ||
| 1204 | defm vmsgtu : RISCVCompare; | ||
| 1205 | defm vmsgt : RISCVCompare; | ||
| 1206 | defm vmsgeu : RISCVCompare; | ||
| 1207 | defm vmsge : RISCVCompare; | ||
| 1208 | |||
| 1209 | defm vminu : RISCVBinaryAAX; | ||
| 1210 | defm vmin : RISCVBinaryAAX; | ||
| 1211 | defm vmaxu : RISCVBinaryAAX; | ||
| 1212 | defm vmax : RISCVBinaryAAX; | ||
| 1213 | |||
| 1214 | defm vmul : RISCVBinaryAAX; | ||
| 1215 | defm vmulh : RISCVBinaryAAX; | ||
| 1216 | defm vmulhu : RISCVBinaryAAX; | ||
| 1217 | defm vmulhsu : RISCVBinaryAAX; | ||
| 1218 | |||
| 1219 | defm vdivu : RISCVBinaryAAX; | ||
| 1220 | defm vdiv : RISCVBinaryAAX; | ||
| 1221 | defm vremu : RISCVBinaryAAX; | ||
| 1222 | defm vrem : RISCVBinaryAAX; | ||
| 1223 | |||
| 1224 | defm vwmul : RISCVBinaryABX; | ||
| 1225 | defm vwmulu : RISCVBinaryABX; | ||
| 1226 | defm vwmulsu : RISCVBinaryABX; | ||
| 1227 | |||
| 1228 | defm vmacc : RISCVTernaryAAXA; | ||
| 1229 | defm vnmsac : RISCVTernaryAAXA; | ||
| 1230 | defm vmadd : RISCVTernaryAAXA; | ||
| 1231 | defm vnmsub : RISCVTernaryAAXA; | ||
| 1232 | |||
| 1233 | defm vwmaccu : RISCVTernaryWide; | ||
| 1234 | defm vwmacc : RISCVTernaryWide; | ||
| 1235 | defm vwmaccus : RISCVTernaryWide; | ||
| 1236 | defm vwmaccsu : RISCVTernaryWide; | ||
| 1237 | |||
| 1238 | defm vfadd : RISCVBinaryAAX; | ||
| 1239 | defm vfsub : RISCVBinaryAAX; | ||
| 1240 | defm vfrsub : RISCVBinaryAAX; | ||
| 1241 | |||
| 1242 | defm vfwadd : RISCVBinaryABX; | ||
| 1243 | defm vfwsub : RISCVBinaryABX; | ||
| 1244 | defm vfwadd_w : RISCVBinaryAAX; | ||
| 1245 | defm vfwsub_w : RISCVBinaryAAX; | ||
| 1246 | |||
| 1247 | defm vsaddu : RISCVSaturatingBinaryAAX; | ||
| 1248 | defm vsadd : RISCVSaturatingBinaryAAX; | ||
| 1249 | defm vssubu : RISCVSaturatingBinaryAAX; | ||
| 1250 | defm vssub : RISCVSaturatingBinaryAAX; | ||
| 1251 | |||
| 1252 | defm vmerge : RISCVBinaryWithV0; | ||
| 1253 | |||
| 1254 | // Output: (vector) | ||
| 1255 | // Input: (passthru, vector_in, vl) | ||
| 1256 | def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 1257 | [LLVMMatchType<0>, | ||
| 1258 | LLVMMatchType<0>, | ||
| 1259 | llvm_anyint_ty], | ||
| 1260 |                                                 [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1261 | let VLOperand = 2; | ||
| 1262 | } | ||
| 1263 | // Output: (vector) | ||
| 1264 | // Input: (passthru, scalar, vl) | ||
| 1265 | def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], | ||
| 1266 | [LLVMMatchType<0>, | ||
| 1267 | LLVMVectorElementType<0>, | ||
| 1268 | llvm_anyint_ty], | ||
| 1269 |                                                  [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1270 | let VLOperand = 2; | ||
| 1271 | } | ||
| 1272 | // Output: (vector) | ||
| 1273 | // Input: (passthru, scalar, vl) | ||
| 1274 | def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], | ||
| 1275 | [LLVMMatchType<0>, | ||
| 1276 | LLVMVectorElementType<0>, | ||
| 1277 | llvm_anyint_ty], | ||
| 1278 |                                                  [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1279 | let VLOperand = 2; | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], | ||
| 1283 | [llvm_anyint_ty], | ||
| 1284 | [IntrNoMem]>, RISCVVIntrinsic; | ||
| 1285 | def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], | ||
| 1286 | [LLVMMatchType<0>, | ||
| 1287 | LLVMVectorElementType<0>, | ||
| 1288 | llvm_anyint_ty], | ||
| 1289 |                                                 [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1290 | let VLOperand = 2; | ||
| 1291 | } | ||
| 1292 | |||
| 1293 | def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], | ||
| 1294 | [llvm_anyfloat_ty], | ||
| 1295 | [IntrNoMem]>, RISCVVIntrinsic; | ||
| 1296 | def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], | ||
| 1297 | [LLVMMatchType<0>, | ||
| 1298 | LLVMVectorElementType<0>, | ||
| 1299 | llvm_anyint_ty], | ||
| 1300 |                                                  [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1301 | let VLOperand = 2; | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | defm vfmul : RISCVBinaryAAX; | ||
| 1305 | defm vfdiv : RISCVBinaryAAX; | ||
| 1306 | defm vfrdiv : RISCVBinaryAAX; | ||
| 1307 | |||
| 1308 | defm vfwmul : RISCVBinaryABX; | ||
| 1309 | |||
| 1310 | defm vfmacc : RISCVTernaryAAXA; | ||
| 1311 | defm vfnmacc : RISCVTernaryAAXA; | ||
| 1312 | defm vfmsac : RISCVTernaryAAXA; | ||
| 1313 | defm vfnmsac : RISCVTernaryAAXA; | ||
| 1314 | defm vfmadd : RISCVTernaryAAXA; | ||
| 1315 | defm vfnmadd : RISCVTernaryAAXA; | ||
| 1316 | defm vfmsub : RISCVTernaryAAXA; | ||
| 1317 | defm vfnmsub : RISCVTernaryAAXA; | ||
| 1318 | |||
| 1319 | defm vfwmacc : RISCVTernaryWide; | ||
| 1320 | defm vfwnmacc : RISCVTernaryWide; | ||
| 1321 | defm vfwmsac : RISCVTernaryWide; | ||
| 1322 | defm vfwnmsac : RISCVTernaryWide; | ||
| 1323 | |||
| 1324 | defm vfsqrt : RISCVUnaryAA; | ||
| 1325 | defm vfrsqrt7 : RISCVUnaryAA; | ||
| 1326 | defm vfrec7 : RISCVUnaryAA; | ||
| 1327 | |||
| 1328 | defm vfmin : RISCVBinaryAAX; | ||
| 1329 | defm vfmax : RISCVBinaryAAX; | ||
| 1330 | |||
| 1331 | defm vfsgnj : RISCVBinaryAAX; | ||
| 1332 | defm vfsgnjn : RISCVBinaryAAX; | ||
| 1333 | defm vfsgnjx : RISCVBinaryAAX; | ||
| 1334 | |||
| 1335 | defm vfclass : RISCVClassify; | ||
| 1336 | |||
| 1337 | defm vfmerge : RISCVBinaryWithV0; | ||
| 1338 | |||
| 1339 | defm vslideup : RVVSlide; | ||
| 1340 | defm vslidedown : RVVSlide; | ||
| 1341 | |||
| 1342 | defm vslide1up : RISCVBinaryAAX; | ||
| 1343 | defm vslide1down : RISCVBinaryAAX; | ||
| 1344 | defm vfslide1up : RISCVBinaryAAX; | ||
| 1345 | defm vfslide1down : RISCVBinaryAAX; | ||
| 1346 | |||
| 1347 | defm vrgather_vv : RISCVRGatherVV; | ||
| 1348 | defm vrgather_vx : RISCVRGatherVX; | ||
| 1349 | defm vrgatherei16_vv : RISCVRGatherEI16VV; | ||
| 1350 | |||
| 1351 | def "int_riscv_vcompress" : RISCVCompress; | ||
| 1352 | |||
| 1353 | defm vaaddu : RISCVSaturatingBinaryAAX; | ||
| 1354 | defm vaadd : RISCVSaturatingBinaryAAX; | ||
| 1355 | defm vasubu : RISCVSaturatingBinaryAAX; | ||
| 1356 | defm vasub : RISCVSaturatingBinaryAAX; | ||
| 1357 | |||
| 1358 | defm vsmul : RISCVSaturatingBinaryAAX; | ||
| 1359 | |||
| 1360 | defm vssrl : RISCVSaturatingBinaryAAShift; | ||
| 1361 | defm vssra : RISCVSaturatingBinaryAAShift; | ||
| 1362 | |||
| 1363 | defm vnclipu : RISCVSaturatingBinaryABShift; | ||
| 1364 | defm vnclip : RISCVSaturatingBinaryABShift; | ||
| 1365 | |||
| 1366 | defm vmfeq : RISCVCompare; | ||
| 1367 | defm vmfne : RISCVCompare; | ||
| 1368 | defm vmflt : RISCVCompare; | ||
| 1369 | defm vmfle : RISCVCompare; | ||
| 1370 | defm vmfgt : RISCVCompare; | ||
| 1371 | defm vmfge : RISCVCompare; | ||
| 1372 | |||
| 1373 | defm vredsum : RISCVReduction; | ||
| 1374 | defm vredand : RISCVReduction; | ||
| 1375 | defm vredor : RISCVReduction; | ||
| 1376 | defm vredxor : RISCVReduction; | ||
| 1377 | defm vredminu : RISCVReduction; | ||
| 1378 | defm vredmin : RISCVReduction; | ||
| 1379 | defm vredmaxu : RISCVReduction; | ||
| 1380 | defm vredmax : RISCVReduction; | ||
| 1381 | |||
| 1382 | defm vwredsumu : RISCVReduction; | ||
| 1383 | defm vwredsum : RISCVReduction; | ||
| 1384 | |||
| 1385 | defm vfredosum : RISCVReduction; | ||
| 1386 | defm vfredusum : RISCVReduction; | ||
| 1387 | defm vfredmin : RISCVReduction; | ||
| 1388 | defm vfredmax : RISCVReduction; | ||
| 1389 | |||
| 1390 | defm vfwredusum : RISCVReduction; | ||
| 1391 | defm vfwredosum : RISCVReduction; | ||
| 1392 | |||
| 1393 | def int_riscv_vmand: RISCVBinaryAAAUnMasked; | ||
| 1394 | def int_riscv_vmnand: RISCVBinaryAAAUnMasked; | ||
| 1395 | def int_riscv_vmandn: RISCVBinaryAAAUnMasked; | ||
| 1396 | def int_riscv_vmxor: RISCVBinaryAAAUnMasked; | ||
| 1397 | def int_riscv_vmor: RISCVBinaryAAAUnMasked; | ||
| 1398 | def int_riscv_vmnor: RISCVBinaryAAAUnMasked; | ||
| 1399 | def int_riscv_vmorn: RISCVBinaryAAAUnMasked; | ||
| 1400 | def int_riscv_vmxnor: RISCVBinaryAAAUnMasked; | ||
| 1401 | def int_riscv_vmclr : RISCVNullaryIntrinsic; | ||
| 1402 | def int_riscv_vmset : RISCVNullaryIntrinsic; | ||
| 1403 | |||
| 1404 | defm vcpop : RISCVMaskedUnarySOut; | ||
| 1405 | defm vfirst : RISCVMaskedUnarySOut; | ||
| 1406 | defm vmsbf : RISCVMaskedUnaryMOut; | ||
| 1407 | defm vmsof : RISCVMaskedUnaryMOut; | ||
| 1408 | defm vmsif : RISCVMaskedUnaryMOut; | ||
| 1409 | |||
| 1410 | defm vfcvt_xu_f_v : RISCVConversion; | ||
| 1411 | defm vfcvt_x_f_v : RISCVConversion; | ||
| 1412 | defm vfcvt_rtz_xu_f_v : RISCVConversion; | ||
| 1413 | defm vfcvt_rtz_x_f_v : RISCVConversion; | ||
| 1414 | defm vfcvt_f_xu_v : RISCVConversion; | ||
| 1415 | defm vfcvt_f_x_v : RISCVConversion; | ||
| 1416 | |||
| 1417 | defm vfwcvt_f_xu_v : RISCVConversion; | ||
| 1418 | defm vfwcvt_f_x_v : RISCVConversion; | ||
| 1419 | defm vfwcvt_xu_f_v : RISCVConversion; | ||
| 1420 | defm vfwcvt_x_f_v : RISCVConversion; | ||
| 1421 | defm vfwcvt_rtz_xu_f_v : RISCVConversion; | ||
| 1422 | defm vfwcvt_rtz_x_f_v : RISCVConversion; | ||
| 1423 | defm vfwcvt_f_f_v : RISCVConversion; | ||
| 1424 | |||
| 1425 | defm vfncvt_f_xu_w : RISCVConversion; | ||
| 1426 | defm vfncvt_f_x_w : RISCVConversion; | ||
| 1427 | defm vfncvt_xu_f_w : RISCVConversion; | ||
| 1428 | defm vfncvt_x_f_w : RISCVConversion; | ||
| 1429 | defm vfncvt_rtz_xu_f_w : RISCVConversion; | ||
| 1430 | defm vfncvt_rtz_x_f_w : RISCVConversion; | ||
| 1431 | defm vfncvt_f_f_w : RISCVConversion; | ||
| 1432 | defm vfncvt_rod_f_f_w : RISCVConversion; | ||
| 1433 | |||
| 1434 | // Output: (vector) | ||
| 1435 | // Input: (passthru, mask type input, vl) | ||
| 1436 | def int_riscv_viota | ||
| 1437 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 1438 | [LLVMMatchType<0>, | ||
| 1439 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 1440 | llvm_anyint_ty], | ||
| 1441 |                                 [IntrNoMem]>, RISCVVIntrinsic { | ||
| 1442 | let VLOperand = 2; | ||
| 1443 | } | ||
| 1444 | // Output: (vector) | ||
| 1445 | // Input: (maskedoff, mask type vector_in, mask, vl, policy) | ||
| 1446 | def int_riscv_viota_mask | ||
| 1447 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 1448 | [LLVMMatchType<0>, | ||
| 1449 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 1450 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 1451 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 1452 |                                 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 1453 | let VLOperand = 3; | ||
| 1454 | } | ||
| 1455 | // Output: (vector) | ||
| 1456 | // Input: (passthru, vl) | ||
| 1457 | def int_riscv_vid : RISCVID; | ||
| 1458 | |||
| 1459 | // Output: (vector) | ||
| 1460 | // Input: (maskedoff, mask, vl, policy) | ||
| 1461 | def int_riscv_vid_mask | ||
| 1462 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 1463 | [LLVMMatchType<0>, | ||
| 1464 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, | ||
| 1465 | llvm_anyint_ty, LLVMMatchType<1>], | ||
| 1466 |                                 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { | ||
| 1467 | let VLOperand = 2; | ||
| 1468 | } | ||
| 1469 | |||
| 1470 |   foreach nf = [2, 3, 4, 5, 6, 7, 8] in { | ||
| 1471 | defm vlseg # nf : RISCVUSSegLoad<nf>; | ||
| 1472 | defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>; | ||
| 1473 | defm vlsseg # nf : RISCVSSegLoad<nf>; | ||
| 1474 | defm vloxseg # nf : RISCVISegLoad<nf>; | ||
| 1475 | defm vluxseg # nf : RISCVISegLoad<nf>; | ||
| 1476 | defm vsseg # nf : RISCVUSSegStore<nf>; | ||
| 1477 | defm vssseg # nf : RISCVSSegStore<nf>; | ||
| 1478 | defm vsoxseg # nf : RISCVISegStore<nf>; | ||
| 1479 | defm vsuxseg # nf : RISCVISegStore<nf>; | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | // Strided loads/stores for fixed vectors. | ||
| 1483 | def int_riscv_masked_strided_load | ||
| 1484 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], | ||
| 1485 | [LLVMMatchType<0>, llvm_anyptr_ty, | ||
| 1486 | llvm_anyint_ty, | ||
| 1487 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 1488 | [NoCapture<ArgIndex<1>>, IntrReadMem]>; | ||
| 1489 | def int_riscv_masked_strided_store | ||
| 1490 | : DefaultAttrsIntrinsic<[], | ||
| 1491 | [llvm_anyvector_ty, llvm_anyptr_ty, | ||
| 1492 | llvm_anyint_ty, | ||
| 1493 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], | ||
| 1494 | [NoCapture<ArgIndex<1>>, IntrWriteMem]>; | ||
| 1495 | |||
| 1496 | // Segment loads for fixed vectors. | ||
| 1497 |   foreach nf = [2, 3, 4, 5, 6, 7, 8] in { | ||
| 1498 | def int_riscv_seg # nf # _load | ||
| 1499 | : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], | ||
| 1500 | !listsplat(LLVMMatchType<0>, | ||
| 1501 | !add(nf, -1))), | ||
| 1502 | [llvm_anyptr_ty, llvm_anyint_ty], | ||
| 1503 | [NoCapture<ArgIndex<0>>, IntrReadMem]>; | ||
| 1504 | } | ||
| 1505 | |||
| 1506 | } // TargetPrefix = "riscv" | ||
| 1507 | |||
| 1508 | //===----------------------------------------------------------------------===// | ||
| 1509 | // Scalar Cryptography | ||
| 1510 | // | ||
| 1511 | // These intrinsics will lower directly into the corresponding instructions | ||
| 1512 | // added by the scalar cyptography extension, if the extension is present. | ||
| 1513 | |||
| 1514 | let TargetPrefix = "riscv" in { | ||
| 1515 | |||
| 1516 | class ScalarCryptoGprIntrinsicAny | ||
| 1517 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], | ||
| 1518 | [LLVMMatchType<0>], | ||
| 1519 | [IntrNoMem, IntrSpeculatable]>; | ||
| 1520 | |||
| 1521 | class ScalarCryptoByteSelect32 | ||
| 1522 | : DefaultAttrsIntrinsic<[llvm_i32_ty], | ||
| 1523 | [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty], | ||
| 1524 | [IntrNoMem, IntrSpeculatable, | ||
| 1525 | ImmArg<ArgIndex<2>>]>; | ||
| 1526 | |||
| 1527 | class ScalarCryptoGprGprIntrinsic32 | ||
| 1528 | : DefaultAttrsIntrinsic<[llvm_i32_ty], | ||
| 1529 | [llvm_i32_ty, llvm_i32_ty], | ||
| 1530 | [IntrNoMem, IntrSpeculatable]>; | ||
| 1531 | |||
| 1532 | class ScalarCryptoGprGprIntrinsic64 | ||
| 1533 | : DefaultAttrsIntrinsic<[llvm_i64_ty], | ||
| 1534 | [llvm_i64_ty, llvm_i64_ty], | ||
| 1535 | [IntrNoMem, IntrSpeculatable]>; | ||
| 1536 | |||
| 1537 | class ScalarCryptoGprIntrinsic64 | ||
| 1538 | : DefaultAttrsIntrinsic<[llvm_i64_ty], | ||
| 1539 | [llvm_i64_ty], | ||
| 1540 | [IntrNoMem, IntrSpeculatable]>; | ||
| 1541 | |||
| 1542 | class ScalarCryptoByteSelectAny | ||
| 1543 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], | ||
| 1544 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty], | ||
| 1545 | [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>; | ||
| 1546 | |||
| 1547 | // Zknd | ||
| 1548 | def int_riscv_aes32dsi : ScalarCryptoByteSelect32; | ||
| 1549 | def int_riscv_aes32dsmi : ScalarCryptoByteSelect32; | ||
| 1550 | |||
| 1551 | def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64; | ||
| 1552 | def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64; | ||
| 1553 | |||
| 1554 | def int_riscv_aes64im : ScalarCryptoGprIntrinsic64; | ||
| 1555 | |||
| 1556 | // Zkne | ||
| 1557 | def int_riscv_aes32esi : ScalarCryptoByteSelect32; | ||
| 1558 | def int_riscv_aes32esmi : ScalarCryptoByteSelect32; | ||
| 1559 | |||
| 1560 | def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64; | ||
| 1561 | def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64; | ||
| 1562 | |||
| 1563 | // Zknd & Zkne | ||
| 1564 | def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64; | ||
| 1565 | def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty], | ||
| 1566 | [llvm_i64_ty, llvm_i32_ty], | ||
| 1567 | [IntrNoMem, IntrSpeculatable, | ||
| 1568 | ImmArg<ArgIndex<1>>]>; | ||
| 1569 | |||
| 1570 | // Zknh | ||
| 1571 | def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny; | ||
| 1572 | def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny; | ||
| 1573 | def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny; | ||
| 1574 | def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny; | ||
| 1575 | |||
| 1576 | def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32; | ||
| 1577 | def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32; | ||
| 1578 | def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32; | ||
| 1579 | def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32; | ||
| 1580 | def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32; | ||
| 1581 | def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32; | ||
| 1582 | |||
| 1583 | def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64; | ||
| 1584 | def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64; | ||
| 1585 | def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64; | ||
| 1586 | def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64; | ||
| 1587 | |||
| 1588 | // Zksed | ||
| 1589 | def int_riscv_sm4ks : ScalarCryptoByteSelectAny; | ||
| 1590 | def int_riscv_sm4ed : ScalarCryptoByteSelectAny; | ||
| 1591 | |||
| 1592 | // Zksh | ||
| 1593 | def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; | ||
| 1594 | def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; | ||
| 1595 | } // TargetPrefix = "riscv" | ||
| 1596 | |||
| 1597 | //===----------------------------------------------------------------------===// | ||
| 1598 | // Vendor extensions | ||
| 1599 | //===----------------------------------------------------------------------===// | ||
| 1600 | include "llvm/IR/IntrinsicsRISCVXTHead.td" |