Details | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 14 | pmbaty | 1 | //===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===// |
| 2 | // |
||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
||
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
||
| 6 | // |
||
| 7 | //===----------------------------------------------------------------------===// |
||
| 8 | // |
||
| 9 | // This file defines all of the AARCH64-specific intrinsics. |
||
| 10 | // |
||
| 11 | //===----------------------------------------------------------------------===// |
||
| 12 | |||
| 13 | let TargetPrefix = "aarch64" in { |
||
| 14 | |||
| 15 | def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty], |
||
| 16 | [IntrNoFree, IntrWillReturn]>; |
||
| 17 | def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty], |
||
| 18 | [IntrNoFree, IntrWillReturn]>; |
||
| 19 | def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty], |
||
| 20 | [IntrNoFree, IntrWillReturn]>; |
||
| 21 | def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty], |
||
| 22 | [IntrNoFree, IntrWillReturn]>; |
||
| 23 | |||
| 24 | def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty], |
||
| 25 | [IntrNoFree, IntrWillReturn]>; |
||
| 26 | def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty], |
||
| 27 | [IntrNoFree, IntrWillReturn]>; |
||
| 28 | def int_aarch64_stxp : Intrinsic<[llvm_i32_ty], |
||
| 29 | [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty], |
||
| 30 | [IntrNoFree, IntrWillReturn]>; |
||
| 31 | def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty], |
||
| 32 | [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty], |
||
| 33 | [IntrNoFree, IntrWillReturn]>; |
||
| 34 | |||
| 35 | def int_aarch64_clrex : Intrinsic<[]>; |
||
| 36 | |||
| 37 | def int_aarch64_sdiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, |
||
| 38 | LLVMMatchType<0>], [IntrNoMem]>; |
||
| 39 | def int_aarch64_udiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, |
||
| 40 | LLVMMatchType<0>], [IntrNoMem]>; |
||
| 41 | |||
| 42 | def int_aarch64_fjcvtzs : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>; |
||
| 43 | |||
| 44 | def int_aarch64_cls: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; |
||
| 45 | def int_aarch64_cls64: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>; |
||
| 46 | |||
| 47 | def int_aarch64_frint32z |
||
| 48 | : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ], |
||
| 49 | [ IntrNoMem ]>; |
||
| 50 | def int_aarch64_frint64z |
||
| 51 | : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ], |
||
| 52 | [ IntrNoMem ]>; |
||
| 53 | def int_aarch64_frint32x |
||
| 54 | : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ], |
||
| 55 | [ IntrNoMem ]>; |
||
| 56 | def int_aarch64_frint64x |
||
| 57 | : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ], |
||
| 58 | [ IntrNoMem ]>; |
||
| 59 | |||
| 60 | //===----------------------------------------------------------------------===// |
||
| 61 | // HINT |
||
| 62 | |||
| 63 | def int_aarch64_hint : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>; |
||
| 64 | |||
| 65 | def int_aarch64_break : Intrinsic<[], [llvm_i32_ty], |
||
| 66 | [IntrNoMem, IntrHasSideEffects, IntrNoReturn, IntrCold, ImmArg<ArgIndex<0>>]>; |
||
| 67 | |||
| 68 | |||
| 69 | def int_aarch64_prefetch : Intrinsic<[], |
||
| 70 | [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], |
||
| 71 | [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, ReadOnly<ArgIndex<0>>, |
||
| 72 | ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>> |
||
| 73 | ]>, |
||
| 74 | ClangBuiltin<"__builtin_arm_prefetch">; |
||
| 75 | |||
| 76 | //===----------------------------------------------------------------------===// |
||
| 77 | // Data Barrier Instructions |
||
| 78 | |||
| 79 | def int_aarch64_dmb : ClangBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">, |
||
| 80 | Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>; |
||
| 81 | def int_aarch64_dsb : ClangBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">, |
||
| 82 | Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>; |
||
| 83 | def int_aarch64_isb : ClangBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">, |
||
| 84 | Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>; |
||
| 85 | |||
| 86 | // A space-consuming intrinsic primarily for testing block and jump table |
||
| 87 | // placements. The first argument is the number of bytes this "instruction" |
||
| 88 | // takes up, the second and return value are essentially chains, used to force |
||
| 89 | // ordering during ISel. |
||
| 90 | def int_aarch64_space : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>; |
||
| 91 | |||
| 92 | } |
||
| 93 | |||
| 94 | //===----------------------------------------------------------------------===// |
||
| 95 | // Advanced SIMD (NEON) |
||
| 96 | |||
| 97 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 98 | class AdvSIMD_2Scalar_Float_Intrinsic |
||
| 99 | : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 100 | [IntrNoMem]>; |
||
| 101 | |||
| 102 | class AdvSIMD_FPToIntRounding_Intrinsic |
||
| 103 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>; |
||
| 104 | |||
| 105 | class AdvSIMD_1IntArg_Intrinsic |
||
| 106 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>; |
||
| 107 | class AdvSIMD_1FloatArg_Intrinsic |
||
| 108 | : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; |
||
| 109 | class AdvSIMD_1VectorArg_Intrinsic |
||
| 110 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>; |
||
| 111 | class AdvSIMD_1VectorArg_Expand_Intrinsic |
||
| 112 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; |
||
| 113 | class AdvSIMD_1VectorArg_Long_Intrinsic |
||
| 114 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>; |
||
| 115 | class AdvSIMD_1IntArg_Narrow_Intrinsic |
||
| 116 | : DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>; |
||
| 117 | class AdvSIMD_1VectorArg_Narrow_Intrinsic |
||
| 118 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>; |
||
| 119 | class AdvSIMD_1VectorArg_Int_Across_Intrinsic |
||
| 120 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>; |
||
| 121 | class AdvSIMD_1VectorArg_Float_Across_Intrinsic |
||
| 122 | : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; |
||
| 123 | |||
| 124 | class AdvSIMD_2IntArg_Intrinsic |
||
| 125 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 126 | [IntrNoMem]>; |
||
| 127 | class AdvSIMD_2FloatArg_Intrinsic |
||
| 128 | : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 129 | [IntrNoMem]>; |
||
| 130 | class AdvSIMD_2VectorArg_Intrinsic |
||
| 131 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 132 | [IntrNoMem]>; |
||
| 133 | class AdvSIMD_2VectorArg_Compare_Intrinsic |
||
| 134 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>], |
||
| 135 | [IntrNoMem]>; |
||
| 136 | class AdvSIMD_2Arg_FloatCompare_Intrinsic |
||
| 137 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>], |
||
| 138 | [IntrNoMem]>; |
||
| 139 | class AdvSIMD_2VectorArg_Long_Intrinsic |
||
| 140 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 141 | [LLVMTruncatedType<0>, LLVMTruncatedType<0>], |
||
| 142 | [IntrNoMem]>; |
||
| 143 | class AdvSIMD_2VectorArg_Wide_Intrinsic |
||
| 144 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 145 | [LLVMMatchType<0>, LLVMTruncatedType<0>], |
||
| 146 | [IntrNoMem]>; |
||
| 147 | class AdvSIMD_2VectorArg_Narrow_Intrinsic |
||
| 148 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 149 | [LLVMExtendedType<0>, LLVMExtendedType<0>], |
||
| 150 | [IntrNoMem]>; |
||
| 151 | class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic |
||
| 152 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], |
||
| 153 | [LLVMExtendedType<0>, llvm_i32_ty], |
||
| 154 | [IntrNoMem]>; |
||
| 155 | class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic |
||
| 156 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 157 | [llvm_anyvector_ty], |
||
| 158 | [IntrNoMem]>; |
||
| 159 | class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic |
||
| 160 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 161 | [LLVMTruncatedType<0>], |
||
| 162 | [IntrNoMem]>; |
||
| 163 | class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic |
||
| 164 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 165 | [LLVMTruncatedType<0>, llvm_i32_ty], |
||
| 166 | [IntrNoMem]>; |
||
| 167 | class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic |
||
| 168 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 169 | [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty], |
||
| 170 | [IntrNoMem]>; |
||
| 171 | class AdvSIMD_2VectorArg_Lane_Intrinsic |
||
| 172 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], |
||
| 173 | [LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty], |
||
| 174 | [IntrNoMem]>; |
||
| 175 | |||
| 176 | class AdvSIMD_3IntArg_Intrinsic |
||
| 177 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], |
||
| 178 | [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 179 | [IntrNoMem]>; |
||
| 180 | class AdvSIMD_3VectorArg_Intrinsic |
||
| 181 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 182 | [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 183 | [IntrNoMem]>; |
||
| 184 | class AdvSIMD_3VectorArg_Scalar_Intrinsic |
||
| 185 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 186 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], |
||
| 187 | [IntrNoMem]>; |
||
| 188 | class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic |
||
| 189 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 190 | [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, |
||
| 191 | LLVMMatchType<1>], [IntrNoMem]>; |
||
| 192 | class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic |
||
| 193 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 194 | [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty], |
||
| 195 | [IntrNoMem]>; |
||
| 196 | class AdvSIMD_CvtFxToFP_Intrinsic |
||
| 197 | : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], |
||
| 198 | [IntrNoMem]>; |
||
| 199 | class AdvSIMD_CvtFPToFx_Intrinsic |
||
| 200 | : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], |
||
| 201 | [IntrNoMem]>; |
||
| 202 | |||
| 203 | class AdvSIMD_1Arg_Intrinsic |
||
| 204 | : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>; |
||
| 205 | |||
| 206 | class AdvSIMD_Dot_Intrinsic |
||
| 207 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 208 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], |
||
| 209 | [IntrNoMem]>; |
||
| 210 | |||
| 211 | class AdvSIMD_FP16FML_Intrinsic |
||
| 212 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 213 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], |
||
| 214 | [IntrNoMem]>; |
||
| 215 | |||
| 216 | class AdvSIMD_MatMul_Intrinsic |
||
| 217 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 218 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], |
||
| 219 | [IntrNoMem]>; |
||
| 220 | |||
| 221 | class AdvSIMD_FML_Intrinsic |
||
| 222 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 223 | [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], |
||
| 224 | [IntrNoMem]>; |
||
| 225 | |||
| 226 | class AdvSIMD_BF16FML_Intrinsic |
||
| 227 | : DefaultAttrsIntrinsic<[llvm_v4f32_ty], |
||
| 228 | [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty], |
||
| 229 | [IntrNoMem]>; |
||
| 230 | } |
||
| 231 | |||
| 232 | // Arithmetic ops |
||
| 233 | |||
| 234 | let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in { |
||
| 235 | // Vector Add Across Lanes |
||
| 236 | def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 237 | def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 238 | def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic; |
||
| 239 | |||
| 240 | // Vector Long Add Across Lanes |
||
| 241 | def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 242 | def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 243 | |||
| 244 | // Vector Halving Add |
||
| 245 | def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 246 | def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 247 | |||
| 248 | // Vector Rounding Halving Add |
||
| 249 | def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 250 | def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 251 | |||
| 252 | // Vector Saturating Add |
||
| 253 | def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic; |
||
| 254 | def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic; |
||
| 255 | def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic; |
||
| 256 | def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic; |
||
| 257 | |||
| 258 | // Vector Add High-Half |
||
| 259 | // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that |
||
| 260 | // header is no longer supported. |
||
| 261 | def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic; |
||
| 262 | |||
| 263 | // Vector Rounding Add High-Half |
||
| 264 | def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic; |
||
| 265 | |||
| 266 | // Vector Saturating Doubling Multiply High |
||
| 267 | def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic; |
||
| 268 | def int_aarch64_neon_sqdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic; |
||
| 269 | def int_aarch64_neon_sqdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic; |
||
| 270 | |||
| 271 | // Vector Saturating Rounding Doubling Multiply High |
||
| 272 | def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic; |
||
| 273 | def int_aarch64_neon_sqrdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic; |
||
| 274 | def int_aarch64_neon_sqrdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic; |
||
| 275 | |||
| 276 | def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic; |
||
| 277 | def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic; |
||
| 278 | |||
| 279 | // Vector Polynominal Multiply |
||
| 280 | def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic; |
||
| 281 | |||
| 282 | // Vector Long Multiply |
||
| 283 | def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic; |
||
| 284 | def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic; |
||
| 285 | def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic; |
||
| 286 | |||
| 287 | // 64-bit polynomial multiply really returns an i128, which is not legal. Fake |
||
| 288 | // it with a v16i8. |
||
| 289 | def int_aarch64_neon_pmull64 : |
||
| 290 | DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>; |
||
| 291 | |||
| 292 | // Vector Extending Multiply |
||
| 293 | def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic { |
||
| 294 | let IntrProperties = [IntrNoMem, Commutative]; |
||
| 295 | } |
||
| 296 | |||
| 297 | // Vector Saturating Doubling Long Multiply |
||
| 298 | def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic; |
||
| 299 | def int_aarch64_neon_sqdmulls_scalar |
||
| 300 | : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; |
||
| 301 | |||
| 302 | // Vector Halving Subtract |
||
| 303 | def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic; |
||
| 304 | def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic; |
||
| 305 | |||
| 306 | // Vector Saturating Subtract |
||
| 307 | def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic; |
||
| 308 | def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic; |
||
| 309 | |||
| 310 | // Vector Subtract High-Half |
||
| 311 | // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that |
||
| 312 | // header is no longer supported. |
||
| 313 | def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic; |
||
| 314 | |||
| 315 | // Vector Rounding Subtract High-Half |
||
| 316 | def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic; |
||
| 317 | |||
| 318 | // Vector Compare Absolute Greater-than-or-equal |
||
| 319 | def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic; |
||
| 320 | |||
| 321 | // Vector Compare Absolute Greater-than |
||
| 322 | def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic; |
||
| 323 | |||
| 324 | // Vector Absolute Difference |
||
| 325 | def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 326 | def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 327 | def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 328 | |||
| 329 | // Scalar Absolute Difference |
||
| 330 | def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic; |
||
| 331 | |||
| 332 | // Vector Max |
||
| 333 | def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic; |
||
| 334 | def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic; |
||
| 335 | def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic; |
||
| 336 | def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 337 | |||
| 338 | // Vector Max Across Lanes |
||
| 339 | def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 340 | def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 341 | def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic; |
||
| 342 | def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic; |
||
| 343 | |||
| 344 | // Vector Min |
||
| 345 | def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic; |
||
| 346 | def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic; |
||
| 347 | def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic; |
||
| 348 | def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 349 | |||
| 350 | // Vector Min/Max Number |
||
| 351 | def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic; |
||
| 352 | def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic; |
||
| 353 | |||
| 354 | // Vector Min Across Lanes |
||
| 355 | def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 356 | def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic; |
||
| 357 | def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic; |
||
| 358 | def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic; |
||
| 359 | |||
| 360 | // Pairwise Add |
||
| 361 | def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 362 | def int_aarch64_neon_faddp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 363 | |||
| 364 | // Long Pairwise Add |
||
| 365 | // FIXME: In theory, we shouldn't need intrinsics for saddlp or |
||
| 366 | // uaddlp, but tblgen's type inference currently can't handle the |
||
| 367 | // pattern fragments this ends up generating. |
||
| 368 | def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic; |
||
| 369 | def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic; |
||
| 370 | |||
| 371 | // Folding Maximum |
||
| 372 | def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 373 | def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 374 | def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 375 | |||
| 376 | // Folding Minimum |
||
| 377 | def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 378 | def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 379 | def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic; |
||
| 380 | |||
| 381 | // Reciprocal Estimate/Step |
||
| 382 | def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic; |
||
| 383 | def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic; |
||
| 384 | |||
| 385 | // Reciprocal Exponent |
||
| 386 | def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic; |
||
| 387 | |||
| 388 | // Vector Saturating Shift Left |
||
| 389 | def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 390 | def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 391 | |||
| 392 | // Vector Rounding Shift Left |
||
| 393 | def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 394 | def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 395 | |||
| 396 | // Vector Saturating Rounding Shift Left |
||
| 397 | def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 398 | def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 399 | |||
| 400 | // Vector Signed->Unsigned Shift Left by Constant |
||
| 401 | def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic; |
||
| 402 | |||
| 403 | // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant |
||
| 404 | def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 405 | |||
| 406 | // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const |
||
| 407 | def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 408 | |||
| 409 | // Vector Narrowing Shift Right by Constant |
||
| 410 | def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 411 | def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 412 | |||
| 413 | // Vector Rounding Narrowing Shift Right by Constant |
||
| 414 | def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 415 | |||
| 416 | // Vector Rounding Narrowing Saturating Shift Right by Constant |
||
| 417 | def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 418 | def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic; |
||
| 419 | |||
| 420 | // Vector Shift Left |
||
| 421 | def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic; |
||
| 422 | def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic; |
||
| 423 | |||
| 424 | // Vector Widening Shift Left by Constant |
||
| 425 | def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic; |
||
| 426 | def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic; |
||
| 427 | def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic; |
||
| 428 | |||
| 429 | // Vector Shift Right by Constant and Insert |
||
| 430 | def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic; |
||
| 431 | |||
| 432 | // Vector Shift Left by Constant and Insert |
||
| 433 | def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic; |
||
| 434 | |||
| 435 | // Vector Saturating Narrow |
||
| 436 | def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic; |
||
| 437 | def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic; |
||
| 438 | def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic; |
||
| 439 | def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic; |
||
| 440 | |||
| 441 | // Vector Saturating Extract and Unsigned Narrow |
||
| 442 | def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic; |
||
| 443 | def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic; |
||
| 444 | |||
| 445 | // Vector Absolute Value |
||
| 446 | def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic; |
||
| 447 | |||
| 448 | // Vector Saturating Absolute Value |
||
| 449 | def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic; |
||
| 450 | |||
| 451 | // Vector Saturating Negation |
||
| 452 | def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic; |
||
| 453 | |||
| 454 | // Vector Count Leading Sign Bits |
||
| 455 | def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic; |
||
| 456 | |||
| 457 | // Vector Reciprocal Estimate |
||
| 458 | def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic; |
||
| 459 | def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic; |
||
| 460 | |||
| 461 | // Vector Square Root Estimate |
||
| 462 | def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic; |
||
| 463 | def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic; |
||
| 464 | |||
| 465 | // Vector Conversions Between Half-Precision and Single-Precision. |
||
| 466 | def int_aarch64_neon_vcvtfp2hf |
||
| 467 | : DefaultAttrsIntrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>; |
||
| 468 | def int_aarch64_neon_vcvthf2fp |
||
| 469 | : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>; |
||
| 470 | |||
| 471 | // Vector Conversions Between Floating-point and Fixed-point. |
||
| 472 | def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic; |
||
| 473 | def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic; |
||
| 474 | def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic; |
||
| 475 | def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic; |
||
| 476 | |||
| 477 | // Vector FP->Int Conversions |
||
| 478 | def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 479 | def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 480 | def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 481 | def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 482 | def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 483 | def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 484 | def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 485 | def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 486 | def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 487 | def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic; |
||
| 488 | |||
| 489 | // v8.5-A Vector FP Rounding |
||
| 490 | def int_aarch64_neon_frint32x : AdvSIMD_1FloatArg_Intrinsic; |
||
| 491 | def int_aarch64_neon_frint32z : AdvSIMD_1FloatArg_Intrinsic; |
||
| 492 | def int_aarch64_neon_frint64x : AdvSIMD_1FloatArg_Intrinsic; |
||
| 493 | def int_aarch64_neon_frint64z : AdvSIMD_1FloatArg_Intrinsic; |
||
| 494 | |||
| 495 | // Scalar FP->Int conversions |
||
| 496 | |||
| 497 | // Vector FP Inexact Narrowing |
||
| 498 | def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic; |
||
| 499 | |||
| 500 | // Scalar FP Inexact Narrowing |
||
| 501 | def int_aarch64_sisd_fcvtxn : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], |
||
| 502 | [IntrNoMem]>; |
||
| 503 | |||
| 504 | // v8.2-A Dot Product |
||
| 505 | def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic; |
||
| 506 | def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic; |
||
| 507 | |||
| 508 | // v8.6-A Matrix Multiply Intrinsics |
||
| 509 | def int_aarch64_neon_ummla : AdvSIMD_MatMul_Intrinsic; |
||
| 510 | def int_aarch64_neon_smmla : AdvSIMD_MatMul_Intrinsic; |
||
| 511 | def int_aarch64_neon_usmmla : AdvSIMD_MatMul_Intrinsic; |
||
| 512 | def int_aarch64_neon_usdot : AdvSIMD_Dot_Intrinsic; |
||
| 513 | def int_aarch64_neon_bfdot : AdvSIMD_Dot_Intrinsic; |
||
| 514 | def int_aarch64_neon_bfmmla |
||
| 515 | : DefaultAttrsIntrinsic<[llvm_v4f32_ty], |
||
| 516 | [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty], |
||
| 517 | [IntrNoMem]>; |
||
| 518 | def int_aarch64_neon_bfmlalb : AdvSIMD_BF16FML_Intrinsic; |
||
| 519 | def int_aarch64_neon_bfmlalt : AdvSIMD_BF16FML_Intrinsic; |
||
| 520 | |||
| 521 | |||
| 522 | // v8.6-A Bfloat Intrinsics |
||
| 523 | def int_aarch64_neon_bfcvt |
||
| 524 | : DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>; |
||
| 525 | def int_aarch64_neon_bfcvtn |
||
| 526 | : DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>; |
||
| 527 | def int_aarch64_neon_bfcvtn2 |
||
| 528 | : DefaultAttrsIntrinsic<[llvm_v8bf16_ty], |
||
| 529 | [llvm_v8bf16_ty, llvm_v4f32_ty], |
||
| 530 | [IntrNoMem]>; |
||
| 531 | |||
| 532 | // v8.2-A FP16 Fused Multiply-Add Long |
||
| 533 | def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic; |
||
| 534 | def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic; |
||
| 535 | def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic; |
||
| 536 | def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic; |
||
| 537 | |||
| 538 | // v8.3-A Floating-point complex add |
||
| 539 | def int_aarch64_neon_vcadd_rot90 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 540 | def int_aarch64_neon_vcadd_rot270 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 541 | |||
| 542 | def int_aarch64_neon_vcmla_rot0 : AdvSIMD_3VectorArg_Intrinsic; |
||
| 543 | def int_aarch64_neon_vcmla_rot90 : AdvSIMD_3VectorArg_Intrinsic; |
||
| 544 | def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic; |
||
| 545 | def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic; |
||
| 546 | } |
||
| 547 | |||
| 548 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 549 | class AdvSIMD_2Vector2Index_Intrinsic |
||
| 550 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 551 | [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty], |
||
| 552 | [IntrNoMem]>; |
||
| 553 | } |
||
| 554 | |||
| 555 | // Vector element to element moves |
||
| 556 | def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic; |
||
| 557 | |||
| 558 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 559 | class AdvSIMD_1Vec_Load_Intrinsic |
||
| 560 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 561 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 562 | class AdvSIMD_1Vec_Store_Lane_Intrinsic |
||
| 563 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty], |
||
| 564 | [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>; |
||
| 565 | |||
| 566 | class AdvSIMD_2Vec_Load_Intrinsic |
||
| 567 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty], |
||
| 568 | [LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 569 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 570 | class AdvSIMD_2Vec_Load_Lane_Intrinsic |
||
| 571 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 572 | [LLVMMatchType<0>, llvm_anyvector_ty, |
||
| 573 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 574 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 575 | class AdvSIMD_2Vec_Store_Intrinsic |
||
| 576 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 577 | LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 578 | [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>; |
||
| 579 | class AdvSIMD_2Vec_Store_Lane_Intrinsic |
||
| 580 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 581 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 582 | [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>; |
||
| 583 | |||
| 584 | class AdvSIMD_3Vec_Load_Intrinsic |
||
| 585 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty], |
||
| 586 | [LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 587 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 588 | class AdvSIMD_3Vec_Load_Lane_Intrinsic |
||
| 589 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 590 | [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, |
||
| 591 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 592 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 593 | class AdvSIMD_3Vec_Store_Intrinsic |
||
| 594 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 595 | LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 596 | [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>; |
||
| 597 | class AdvSIMD_3Vec_Store_Lane_Intrinsic |
||
| 598 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, |
||
| 599 | LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 600 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 601 | [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>; |
||
| 602 | |||
| 603 | class AdvSIMD_4Vec_Load_Intrinsic |
||
| 604 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 605 | LLVMMatchType<0>, llvm_anyvector_ty], |
||
| 606 | [LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 607 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 608 | class AdvSIMD_4Vec_Load_Lane_Intrinsic |
||
| 609 | : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 610 | LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 611 | [LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 612 | LLVMMatchType<0>, llvm_anyvector_ty, |
||
| 613 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 614 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 615 | class AdvSIMD_4Vec_Store_Intrinsic |
||
| 616 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 617 | LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 618 | LLVMAnyPointerType<LLVMMatchType<0>>], |
||
| 619 | [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>; |
||
| 620 | class AdvSIMD_4Vec_Store_Lane_Intrinsic |
||
| 621 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 622 | LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 623 | llvm_i64_ty, llvm_anyptr_ty], |
||
| 624 | [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>; |
||
| 625 | } |
||
| 626 | |||
| 627 | // Memory ops |
||
| 628 | |||
| 629 | def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic; |
||
| 630 | def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic; |
||
| 631 | def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic; |
||
| 632 | |||
| 633 | def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic; |
||
| 634 | def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic; |
||
| 635 | def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic; |
||
| 636 | |||
| 637 | def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic; |
||
| 638 | def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic; |
||
| 639 | def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic; |
||
| 640 | |||
| 641 | def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic; |
||
| 642 | def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic; |
||
| 643 | def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic; |
||
| 644 | |||
| 645 | def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic; |
||
| 646 | def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic; |
||
| 647 | def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic; |
||
| 648 | |||
| 649 | def int_aarch64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic; |
||
| 650 | def int_aarch64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic; |
||
| 651 | def int_aarch64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic; |
||
| 652 | |||
| 653 | def int_aarch64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic; |
||
| 654 | def int_aarch64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic; |
||
| 655 | def int_aarch64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic; |
||
| 656 | |||
| 657 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 658 | class AdvSIMD_Tbl1_Intrinsic |
||
| 659 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>], |
||
| 660 | [IntrNoMem]>; |
||
| 661 | class AdvSIMD_Tbl2_Intrinsic |
||
| 662 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 663 | [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>; |
||
| 664 | class AdvSIMD_Tbl3_Intrinsic |
||
| 665 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 666 | [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, |
||
| 667 | LLVMMatchType<0>], |
||
| 668 | [IntrNoMem]>; |
||
| 669 | class AdvSIMD_Tbl4_Intrinsic |
||
| 670 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 671 | [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, |
||
| 672 | LLVMMatchType<0>], |
||
| 673 | [IntrNoMem]>; |
||
| 674 | |||
| 675 | class AdvSIMD_Tbx1_Intrinsic |
||
| 676 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 677 | [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>], |
||
| 678 | [IntrNoMem]>; |
||
| 679 | class AdvSIMD_Tbx2_Intrinsic |
||
| 680 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 681 | [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty, |
||
| 682 | LLVMMatchType<0>], |
||
| 683 | [IntrNoMem]>; |
||
| 684 | class AdvSIMD_Tbx3_Intrinsic |
||
| 685 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 686 | [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty, |
||
| 687 | llvm_v16i8_ty, LLVMMatchType<0>], |
||
| 688 | [IntrNoMem]>; |
||
| 689 | class AdvSIMD_Tbx4_Intrinsic |
||
| 690 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 691 | [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty, |
||
| 692 | llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], |
||
| 693 | [IntrNoMem]>; |
||
| 694 | } |
||
| 695 | def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic; |
||
| 696 | def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic; |
||
| 697 | def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic; |
||
| 698 | def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic; |
||
| 699 | |||
| 700 | def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic; |
||
| 701 | def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic; |
||
| 702 | def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic; |
||
| 703 | def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic; |
||
| 704 | |||
| 705 | let TargetPrefix = "aarch64" in { |
||
| 706 | class FPCR_Get_Intrinsic |
||
| 707 | : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>; |
||
| 708 | class FPCR_Set_Intrinsic |
||
| 709 | : DefaultAttrsIntrinsic<[], [llvm_i64_ty], [IntrNoMem, IntrHasSideEffects]>; |
||
| 710 | class RNDR_Intrinsic |
||
| 711 | : DefaultAttrsIntrinsic<[llvm_i64_ty, llvm_i1_ty], [], [IntrNoMem, IntrHasSideEffects]>; |
||
| 712 | } |
||
| 713 | |||
| 714 | // FPCR |
||
| 715 | def int_aarch64_get_fpcr : FPCR_Get_Intrinsic; |
||
| 716 | def int_aarch64_set_fpcr : FPCR_Set_Intrinsic; |
||
| 717 | |||
| 718 | // Armv8.5-A Random number generation intrinsics |
||
| 719 | def int_aarch64_rndr : RNDR_Intrinsic; |
||
| 720 | def int_aarch64_rndrrs : RNDR_Intrinsic; |
||
| 721 | |||
| 722 | let TargetPrefix = "aarch64" in { |
||
| 723 | class Crypto_AES_DataKey_Intrinsic |
||
| 724 | : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; |
||
| 725 | |||
| 726 | class Crypto_AES_Data_Intrinsic |
||
| 727 | : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>; |
||
| 728 | |||
| 729 | // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule |
||
| 730 | // (v4i32). |
||
| 731 | class Crypto_SHA_5Hash4Schedule_Intrinsic |
||
| 732 | : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty], |
||
| 733 | [IntrNoMem]>; |
||
| 734 | |||
| 735 | // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule |
||
| 736 | // (v4i32). |
||
| 737 | class Crypto_SHA_1Hash_Intrinsic |
||
| 738 | : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; |
||
| 739 | |||
| 740 | // SHA intrinsic taking 8 words of the schedule |
||
| 741 | class Crypto_SHA_8Schedule_Intrinsic |
||
| 742 | : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>; |
||
| 743 | |||
| 744 | // SHA intrinsic taking 12 words of the schedule |
||
| 745 | class Crypto_SHA_12Schedule_Intrinsic |
||
| 746 | : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], |
||
| 747 | [IntrNoMem]>; |
||
| 748 | |||
| 749 | // SHA intrinsic taking 8 words of the hash and 4 of the schedule. |
||
| 750 | class Crypto_SHA_8Hash4Schedule_Intrinsic |
||
| 751 | : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], |
||
| 752 | [IntrNoMem]>; |
||
| 753 | |||
| 754 | // SHA512 intrinsic taking 2 arguments |
||
| 755 | class Crypto_SHA512_2Arg_Intrinsic |
||
| 756 | : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>; |
||
| 757 | |||
| 758 | // SHA512 intrinsic taking 3 Arguments |
||
| 759 | class Crypto_SHA512_3Arg_Intrinsic |
||
| 760 | : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], |
||
| 761 | [IntrNoMem]>; |
||
| 762 | |||
| 763 | // SHA3 Intrinsics taking 3 arguments |
||
| 764 | class Crypto_SHA3_3Arg_Intrinsic |
||
| 765 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 766 | [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 767 | [IntrNoMem]>; |
||
| 768 | |||
| 769 | // SHA3 Intrinsic taking 2 arguments |
||
| 770 | class Crypto_SHA3_2Arg_Intrinsic |
||
| 771 | : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], |
||
| 772 | [IntrNoMem]>; |
||
| 773 | |||
| 774 | // SHA3 Intrinsic taking 3 Arguments 1 immediate |
||
| 775 | class Crypto_SHA3_2ArgImm_Intrinsic |
||
| 776 | : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i64_ty], |
||
| 777 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 778 | |||
| 779 | class Crypto_SM3_3Vector_Intrinsic |
||
| 780 | : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], |
||
| 781 | [IntrNoMem]>; |
||
| 782 | |||
| 783 | class Crypto_SM3_3VectorIndexed_Intrinsic |
||
| 784 | : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i64_ty], |
||
| 785 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 786 | |||
| 787 | class Crypto_SM4_2Vector_Intrinsic |
||
| 788 | : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>; |
||
| 789 | } |
||
| 790 | |||
| 791 | // AES |
||
| 792 | def int_aarch64_crypto_aese : Crypto_AES_DataKey_Intrinsic; |
||
| 793 | def int_aarch64_crypto_aesd : Crypto_AES_DataKey_Intrinsic; |
||
| 794 | def int_aarch64_crypto_aesmc : Crypto_AES_Data_Intrinsic; |
||
| 795 | def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic; |
||
| 796 | |||
| 797 | // SHA1 |
||
| 798 | def int_aarch64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic; |
||
| 799 | def int_aarch64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic; |
||
| 800 | def int_aarch64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic; |
||
| 801 | def int_aarch64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic; |
||
| 802 | |||
| 803 | def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic; |
||
| 804 | def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic; |
||
| 805 | |||
| 806 | // SHA256 |
||
| 807 | def int_aarch64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic; |
||
| 808 | def int_aarch64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic; |
||
| 809 | def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic; |
||
| 810 | def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic; |
||
| 811 | |||
| 812 | //SHA3 |
||
| 813 | def int_aarch64_crypto_eor3s : Crypto_SHA3_3Arg_Intrinsic; |
||
| 814 | def int_aarch64_crypto_eor3u : Crypto_SHA3_3Arg_Intrinsic; |
||
| 815 | def int_aarch64_crypto_bcaxs : Crypto_SHA3_3Arg_Intrinsic; |
||
| 816 | def int_aarch64_crypto_bcaxu : Crypto_SHA3_3Arg_Intrinsic; |
||
| 817 | def int_aarch64_crypto_rax1 : Crypto_SHA3_2Arg_Intrinsic; |
||
| 818 | def int_aarch64_crypto_xar : Crypto_SHA3_2ArgImm_Intrinsic; |
||
| 819 | |||
| 820 | // SHA512 |
||
| 821 | def int_aarch64_crypto_sha512h : Crypto_SHA512_3Arg_Intrinsic; |
||
| 822 | def int_aarch64_crypto_sha512h2 : Crypto_SHA512_3Arg_Intrinsic; |
||
| 823 | def int_aarch64_crypto_sha512su0 : Crypto_SHA512_2Arg_Intrinsic; |
||
| 824 | def int_aarch64_crypto_sha512su1 : Crypto_SHA512_3Arg_Intrinsic; |
||
| 825 | |||
| 826 | //SM3 & SM4 |
||
| 827 | def int_aarch64_crypto_sm3partw1 : Crypto_SM3_3Vector_Intrinsic; |
||
| 828 | def int_aarch64_crypto_sm3partw2 : Crypto_SM3_3Vector_Intrinsic; |
||
| 829 | def int_aarch64_crypto_sm3ss1 : Crypto_SM3_3Vector_Intrinsic; |
||
| 830 | def int_aarch64_crypto_sm3tt1a : Crypto_SM3_3VectorIndexed_Intrinsic; |
||
| 831 | def int_aarch64_crypto_sm3tt1b : Crypto_SM3_3VectorIndexed_Intrinsic; |
||
| 832 | def int_aarch64_crypto_sm3tt2a : Crypto_SM3_3VectorIndexed_Intrinsic; |
||
| 833 | def int_aarch64_crypto_sm3tt2b : Crypto_SM3_3VectorIndexed_Intrinsic; |
||
| 834 | def int_aarch64_crypto_sm4e : Crypto_SM4_2Vector_Intrinsic; |
||
| 835 | def int_aarch64_crypto_sm4ekey : Crypto_SM4_2Vector_Intrinsic; |
||
| 836 | |||
| 837 | //===----------------------------------------------------------------------===// |
||
| 838 | // CRC32 |
||
| 839 | |||
| 840 | let TargetPrefix = "aarch64" in { |
||
| 841 | |||
| 842 | def int_aarch64_crc32b : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 843 | [IntrNoMem]>; |
||
| 844 | def int_aarch64_crc32cb : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 845 | [IntrNoMem]>; |
||
| 846 | def int_aarch64_crc32h : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 847 | [IntrNoMem]>; |
||
| 848 | def int_aarch64_crc32ch : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 849 | [IntrNoMem]>; |
||
| 850 | def int_aarch64_crc32w : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 851 | [IntrNoMem]>; |
||
| 852 | def int_aarch64_crc32cw : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], |
||
| 853 | [IntrNoMem]>; |
||
| 854 | def int_aarch64_crc32x : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty], |
||
| 855 | [IntrNoMem]>; |
||
| 856 | def int_aarch64_crc32cx : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty], |
||
| 857 | [IntrNoMem]>; |
||
| 858 | } |
||
| 859 | |||
| 860 | //===----------------------------------------------------------------------===// |
||
| 861 | // Memory Tagging Extensions (MTE) Intrinsics |
||
| 862 | let TargetPrefix = "aarch64" in { |
||
| 863 | def int_aarch64_irg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty], |
||
| 864 | [IntrNoMem, IntrHasSideEffects]>; |
||
| 865 | def int_aarch64_addg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty], |
||
| 866 | [IntrNoMem]>; |
||
| 867 | def int_aarch64_gmi : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], |
||
| 868 | [IntrNoMem]>; |
||
| 869 | def int_aarch64_ldg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty], |
||
| 870 | [IntrReadMem]>; |
||
| 871 | def int_aarch64_stg : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], |
||
| 872 | [IntrWriteMem]>; |
||
| 873 | def int_aarch64_subp : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty], |
||
| 874 | [IntrNoMem]>; |
||
| 875 | |||
| 876 | // The following are codegen-only intrinsics for stack instrumentation. |
||
| 877 | |||
| 878 | // Generate a randomly tagged stack base pointer. |
||
| 879 | def int_aarch64_irg_sp : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i64_ty], |
||
| 880 | [IntrNoMem, IntrHasSideEffects]>; |
||
| 881 | |||
| 882 | // Transfer pointer tag with offset. |
||
| 883 | // ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where |
||
| 884 | // * address is the address in ptr0 |
||
| 885 | // * tag is a function of (tag in baseptr, tag_offset). |
||
| 886 | // ** Beware, this is not the same function as implemented by the ADDG instruction! |
||
| 887 | // Backend optimizations may change tag_offset; the only guarantee is that calls |
||
| 888 | // to tagp with the same pair of (baseptr, tag_offset) will produce pointers |
||
| 889 | // with the same tag value, assuming the set of excluded tags has not changed. |
||
| 890 | // Address bits in baseptr and tag bits in ptr0 are ignored. |
||
| 891 | // When offset between ptr0 and baseptr is a compile time constant, this can be emitted as |
||
| 892 | // ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset |
||
| 893 | // It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp. |
||
| 894 | def int_aarch64_tagp : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty], |
||
| 895 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 896 | |||
| 897 | // Update allocation tags for the memory range to match the tag in the pointer argument. |
||
| 898 | def int_aarch64_settag : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty], |
||
| 899 | [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>; |
||
| 900 | |||
| 901 | // Update allocation tags for the memory range to match the tag in the pointer argument, |
||
| 902 | // and set memory contents to zero. |
||
| 903 | def int_aarch64_settag_zero : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty], |
||
| 904 | [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>; |
||
| 905 | |||
| 906 | // Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values. |
||
| 907 | def int_aarch64_stgp : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty], |
||
| 908 | [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>; |
||
| 909 | } |
||
| 910 | |||
| 911 | //===----------------------------------------------------------------------===// |
||
| 912 | // Memory Operations (MOPS) Intrinsics |
||
| 913 | let TargetPrefix = "aarch64" in { |
||
| 914 | // Sizes are chosen to correspond to the llvm.memset intrinsic: ptr, i8, i64 |
||
| 915 | def int_aarch64_mops_memset_tag : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty, llvm_i64_ty], |
||
| 916 | [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>; |
||
| 917 | } |
||
| 918 | |||
| 919 | // Transactional Memory Extension (TME) Intrinsics |
||
| 920 | let TargetPrefix = "aarch64" in { |
||
| 921 | def int_aarch64_tstart : ClangBuiltin<"__builtin_arm_tstart">, |
||
| 922 | Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>; |
||
| 923 | |||
| 924 | def int_aarch64_tcommit : ClangBuiltin<"__builtin_arm_tcommit">, Intrinsic<[], [], [IntrWillReturn]>; |
||
| 925 | |||
| 926 | def int_aarch64_tcancel : ClangBuiltin<"__builtin_arm_tcancel">, |
||
| 927 | Intrinsic<[], [llvm_i64_ty], [IntrWillReturn, ImmArg<ArgIndex<0>>]>; |
||
| 928 | |||
| 929 | def int_aarch64_ttest : ClangBuiltin<"__builtin_arm_ttest">, |
||
| 930 | Intrinsic<[llvm_i64_ty], [], |
||
| 931 | [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; |
||
| 932 | |||
| 933 | // Armv8.7-A load/store 64-byte intrinsics |
||
| 934 | defvar data512 = !listsplat(llvm_i64_ty, 8); |
||
| 935 | def int_aarch64_ld64b: Intrinsic<data512, [llvm_ptr_ty]>; |
||
| 936 | def int_aarch64_st64b: Intrinsic<[], !listconcat([llvm_ptr_ty], data512)>; |
||
| 937 | def int_aarch64_st64bv: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>; |
||
| 938 | def int_aarch64_st64bv0: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>; |
||
| 939 | |||
| 940 | } |
||
| 941 | |||
| 942 | def llvm_nxv1i1_ty : LLVMType<nxv1i1>; |
||
| 943 | def llvm_nxv2i1_ty : LLVMType<nxv2i1>; |
||
| 944 | def llvm_nxv4i1_ty : LLVMType<nxv4i1>; |
||
| 945 | def llvm_nxv8i1_ty : LLVMType<nxv8i1>; |
||
| 946 | def llvm_nxv16i1_ty : LLVMType<nxv16i1>; |
||
| 947 | def llvm_nxv16i8_ty : LLVMType<nxv16i8>; |
||
| 948 | def llvm_nxv4i32_ty : LLVMType<nxv4i32>; |
||
| 949 | def llvm_nxv2i64_ty : LLVMType<nxv2i64>; |
||
| 950 | def llvm_nxv8f16_ty : LLVMType<nxv8f16>; |
||
| 951 | def llvm_nxv8bf16_ty : LLVMType<nxv8bf16>; |
||
| 952 | def llvm_nxv4f32_ty : LLVMType<nxv4f32>; |
||
| 953 | def llvm_nxv2f64_ty : LLVMType<nxv2f64>; |
||
| 954 | |||
| 955 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 956 | |||
| 957 | class AdvSIMD_1Vec_PredLoad_Intrinsic |
||
| 958 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 959 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 960 | LLVMPointerToElt<0>], |
||
| 961 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 962 | |||
| 963 | class AdvSIMD_2Vec_PredLoad_Intrinsic |
||
| 964 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 965 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 966 | LLVMPointerToElt<0>], |
||
| 967 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 968 | |||
| 969 | class AdvSIMD_3Vec_PredLoad_Intrinsic |
||
| 970 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 971 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 972 | LLVMPointerToElt<0>], |
||
| 973 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 974 | |||
| 975 | class AdvSIMD_4Vec_PredLoad_Intrinsic |
||
| 976 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 977 | LLVMMatchType<0>], |
||
| 978 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 979 | LLVMPointerToElt<0>], |
||
| 980 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 981 | |||
| 982 | class AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic |
||
| 983 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 984 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 985 | LLVMPointerToElt<0>], |
||
| 986 | [IntrInaccessibleMemOrArgMemOnly]>; |
||
| 987 | |||
| 988 | class AdvSIMD_1Vec_PredStore_Intrinsic |
||
| 989 | : DefaultAttrsIntrinsic<[], |
||
| 990 | [llvm_anyvector_ty, |
||
| 991 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 992 | LLVMPointerToElt<0>], |
||
| 993 | [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>; |
||
| 994 | |||
| 995 | class AdvSIMD_2Vec_PredStore_Intrinsic |
||
| 996 | : DefaultAttrsIntrinsic<[], |
||
| 997 | [llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 998 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], |
||
| 999 | [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>; |
||
| 1000 | |||
| 1001 | class AdvSIMD_3Vec_PredStore_Intrinsic |
||
| 1002 | : DefaultAttrsIntrinsic<[], |
||
| 1003 | [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 1004 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], |
||
| 1005 | [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>; |
||
| 1006 | |||
| 1007 | class AdvSIMD_4Vec_PredStore_Intrinsic |
||
| 1008 | : DefaultAttrsIntrinsic<[], |
||
| 1009 | [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 1010 | LLVMMatchType<0>, |
||
| 1011 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], |
||
| 1012 | [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>; |
||
| 1013 | |||
| 1014 | class AdvSIMD_SVE_Index_Intrinsic |
||
| 1015 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1016 | [LLVMVectorElementType<0>, |
||
| 1017 | LLVMVectorElementType<0>], |
||
| 1018 | [IntrNoMem]>; |
||
| 1019 | |||
| 1020 | class AdvSIMD_Merged1VectorArg_Intrinsic |
||
| 1021 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1022 | [LLVMMatchType<0>, |
||
| 1023 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1024 | LLVMMatchType<0>], |
||
| 1025 | [IntrNoMem]>; |
||
| 1026 | |||
| 1027 | class AdvSIMD_2VectorArgIndexed_Intrinsic |
||
| 1028 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1029 | [LLVMMatchType<0>, |
||
| 1030 | LLVMMatchType<0>, |
||
| 1031 | llvm_i32_ty], |
||
| 1032 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 1033 | |||
| 1034 | class AdvSIMD_3VectorArgIndexed_Intrinsic |
||
| 1035 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1036 | [LLVMMatchType<0>, |
||
| 1037 | LLVMMatchType<0>, |
||
| 1038 | LLVMMatchType<0>, |
||
| 1039 | llvm_i32_ty], |
||
| 1040 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1041 | |||
| 1042 | class AdvSIMD_Pred1VectorArg_Intrinsic |
||
| 1043 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1044 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1045 | LLVMMatchType<0>], |
||
| 1046 | [IntrNoMem]>; |
||
| 1047 | |||
| 1048 | class AdvSIMD_Pred2VectorArg_Intrinsic |
||
| 1049 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1050 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1051 | LLVMMatchType<0>, |
||
| 1052 | LLVMMatchType<0>], |
||
| 1053 | [IntrNoMem]>; |
||
| 1054 | |||
| 1055 | class AdvSIMD_Pred3VectorArg_Intrinsic |
||
| 1056 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1057 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1058 | LLVMMatchType<0>, |
||
| 1059 | LLVMMatchType<0>, |
||
| 1060 | LLVMMatchType<0>], |
||
| 1061 | [IntrNoMem]>; |
||
| 1062 | |||
| 1063 | class AdvSIMD_SVE_Compare_Intrinsic |
||
| 1064 | : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], |
||
| 1065 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1066 | llvm_anyvector_ty, |
||
| 1067 | LLVMMatchType<0>], |
||
| 1068 | [IntrNoMem]>; |
||
| 1069 | |||
| 1070 | class AdvSIMD_SVE_CompareWide_Intrinsic |
||
| 1071 | : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], |
||
| 1072 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1073 | llvm_anyvector_ty, |
||
| 1074 | llvm_nxv2i64_ty], |
||
| 1075 | [IntrNoMem]>; |
||
| 1076 | |||
| 1077 | class AdvSIMD_SVE_Saturating_Intrinsic |
||
| 1078 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1079 | [LLVMMatchType<0>, |
||
| 1080 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], |
||
| 1081 | [IntrNoMem]>; |
||
| 1082 | |||
| 1083 | class AdvSIMD_SVE_SaturatingWithPattern_Intrinsic |
||
| 1084 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1085 | [LLVMMatchType<0>, |
||
| 1086 | llvm_i32_ty, |
||
| 1087 | llvm_i32_ty], |
||
| 1088 | [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>; |
||
| 1089 | |||
| 1090 | class AdvSIMD_SVE_Saturating_N_Intrinsic<LLVMType T> |
||
| 1091 | : DefaultAttrsIntrinsic<[T], |
||
| 1092 | [T, llvm_anyvector_ty], |
||
| 1093 | [IntrNoMem]>; |
||
| 1094 | |||
| 1095 | class AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<LLVMType T> |
||
| 1096 | : DefaultAttrsIntrinsic<[T], |
||
| 1097 | [T, llvm_i32_ty, llvm_i32_ty], |
||
| 1098 | [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>; |
||
| 1099 | |||
| 1100 | class AdvSIMD_SVE_CNT_Intrinsic |
||
| 1101 | : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], |
||
| 1102 | [LLVMVectorOfBitcastsToInt<0>, |
||
| 1103 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1104 | llvm_anyvector_ty], |
||
| 1105 | [IntrNoMem]>; |
||
| 1106 | |||
| 1107 | class AdvSIMD_SVE_ReduceWithInit_Intrinsic |
||
| 1108 | : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], |
||
| 1109 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1110 | LLVMVectorElementType<0>, |
||
| 1111 | llvm_anyvector_ty], |
||
| 1112 | [IntrNoMem]>; |
||
| 1113 | |||
| 1114 | class AdvSIMD_SVE_ShiftByImm_Intrinsic |
||
| 1115 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1116 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1117 | LLVMMatchType<0>, |
||
| 1118 | llvm_i32_ty], |
||
| 1119 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 1120 | |||
| 1121 | class AdvSIMD_SVE_ShiftWide_Intrinsic |
||
| 1122 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1123 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1124 | LLVMMatchType<0>, |
||
| 1125 | llvm_nxv2i64_ty], |
||
| 1126 | [IntrNoMem]>; |
||
| 1127 | |||
| 1128 | class AdvSIMD_SVE_Unpack_Intrinsic |
||
| 1129 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1130 | [LLVMSubdivide2VectorType<0>], |
||
| 1131 | [IntrNoMem]>; |
||
| 1132 | |||
| 1133 | class AdvSIMD_SVE_CADD_Intrinsic |
||
| 1134 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1135 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1136 | LLVMMatchType<0>, |
||
| 1137 | LLVMMatchType<0>, |
||
| 1138 | llvm_i32_ty], |
||
| 1139 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1140 | |||
| 1141 | class AdvSIMD_SVE_CMLA_Intrinsic |
||
| 1142 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1143 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1144 | LLVMMatchType<0>, |
||
| 1145 | LLVMMatchType<0>, |
||
| 1146 | LLVMMatchType<0>, |
||
| 1147 | llvm_i32_ty], |
||
| 1148 | [IntrNoMem, ImmArg<ArgIndex<4>>]>; |
||
| 1149 | |||
| 1150 | class AdvSIMD_SVE_CMLA_LANE_Intrinsic |
||
| 1151 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1152 | [LLVMMatchType<0>, |
||
| 1153 | LLVMMatchType<0>, |
||
| 1154 | LLVMMatchType<0>, |
||
| 1155 | llvm_i32_ty, |
||
| 1156 | llvm_i32_ty], |
||
| 1157 | [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>; |
||
| 1158 | |||
| 1159 | class AdvSIMD_SVE_DUP_Intrinsic |
||
| 1160 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1161 | [LLVMMatchType<0>, |
||
| 1162 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1163 | LLVMVectorElementType<0>], |
||
| 1164 | [IntrNoMem]>; |
||
| 1165 | |||
| 1166 | class AdvSIMD_SVE_DUP_Unpred_Intrinsic |
||
| 1167 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMVectorElementType<0>], |
||
| 1168 | [IntrNoMem]>; |
||
| 1169 | |||
| 1170 | class AdvSIMD_SVE_DUPQ_Intrinsic |
||
| 1171 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1172 | [LLVMMatchType<0>, |
||
| 1173 | llvm_i64_ty], |
||
| 1174 | [IntrNoMem]>; |
||
| 1175 | |||
| 1176 | class AdvSIMD_SVE_EXPA_Intrinsic |
||
| 1177 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1178 | [LLVMVectorOfBitcastsToInt<0>], |
||
| 1179 | [IntrNoMem]>; |
||
| 1180 | |||
| 1181 | class AdvSIMD_SVE_FCVT_Intrinsic |
||
| 1182 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1183 | [LLVMMatchType<0>, |
||
| 1184 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1185 | llvm_anyvector_ty], |
||
| 1186 | [IntrNoMem]>; |
||
| 1187 | |||
| 1188 | class AdvSIMD_SVE_FCVTZS_Intrinsic |
||
| 1189 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1190 | [LLVMVectorOfBitcastsToInt<0>, |
||
| 1191 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1192 | llvm_anyvector_ty], |
||
| 1193 | [IntrNoMem]>; |
||
| 1194 | |||
| 1195 | class AdvSIMD_SVE_INSR_Intrinsic |
||
| 1196 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1197 | [LLVMMatchType<0>, |
||
| 1198 | LLVMVectorElementType<0>], |
||
| 1199 | [IntrNoMem]>; |
||
| 1200 | |||
| 1201 | class AdvSIMD_SVE_PTRUE_Intrinsic |
||
| 1202 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1203 | [llvm_i32_ty], |
||
| 1204 | [IntrNoMem, ImmArg<ArgIndex<0>>]>; |
||
| 1205 | |||
| 1206 | class AdvSIMD_SVE_PUNPKHI_Intrinsic |
||
| 1207 | : DefaultAttrsIntrinsic<[LLVMHalfElementsVectorType<0>], |
||
| 1208 | [llvm_anyvector_ty], |
||
| 1209 | [IntrNoMem]>; |
||
| 1210 | |||
| 1211 | class AdvSIMD_SVE_SCALE_Intrinsic |
||
| 1212 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1213 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1214 | LLVMMatchType<0>, |
||
| 1215 | LLVMVectorOfBitcastsToInt<0>], |
||
| 1216 | [IntrNoMem]>; |
||
| 1217 | |||
| 1218 | class AdvSIMD_SVE_SCVTF_Intrinsic |
||
| 1219 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1220 | [LLVMMatchType<0>, |
||
| 1221 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1222 | llvm_anyvector_ty], |
||
| 1223 | [IntrNoMem]>; |
||
| 1224 | |||
| 1225 | class AdvSIMD_SVE_TSMUL_Intrinsic |
||
| 1226 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1227 | [LLVMMatchType<0>, |
||
| 1228 | LLVMVectorOfBitcastsToInt<0>], |
||
| 1229 | [IntrNoMem]>; |
||
| 1230 | |||
| 1231 | class AdvSIMD_SVE_CNTB_Intrinsic |
||
| 1232 | : DefaultAttrsIntrinsic<[llvm_i64_ty], |
||
| 1233 | [llvm_i32_ty], |
||
| 1234 | [IntrNoMem, ImmArg<ArgIndex<0>>]>; |
||
| 1235 | |||
| 1236 | class AdvSIMD_SVE_CNTP_Intrinsic |
||
| 1237 | : DefaultAttrsIntrinsic<[llvm_i64_ty], |
||
| 1238 | [llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 1239 | [IntrNoMem]>; |
||
| 1240 | |||
| 1241 | class AdvSIMD_SVE_DOT_Intrinsic |
||
| 1242 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1243 | [LLVMMatchType<0>, |
||
| 1244 | LLVMSubdivide4VectorType<0>, |
||
| 1245 | LLVMSubdivide4VectorType<0>], |
||
| 1246 | [IntrNoMem]>; |
||
| 1247 | |||
| 1248 | class AdvSIMD_SVE_DOT_Indexed_Intrinsic |
||
| 1249 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1250 | [LLVMMatchType<0>, |
||
| 1251 | LLVMSubdivide4VectorType<0>, |
||
| 1252 | LLVMSubdivide4VectorType<0>, |
||
| 1253 | llvm_i32_ty], |
||
| 1254 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1255 | |||
| 1256 | class AdvSIMD_SVE_PTEST_Intrinsic |
||
| 1257 | : DefaultAttrsIntrinsic<[llvm_i1_ty], |
||
| 1258 | [llvm_anyvector_ty, |
||
| 1259 | LLVMMatchType<0>], |
||
| 1260 | [IntrNoMem]>; |
||
| 1261 | |||
| 1262 | class AdvSIMD_SVE_TBL_Intrinsic |
||
| 1263 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1264 | [LLVMMatchType<0>, |
||
| 1265 | LLVMVectorOfBitcastsToInt<0>], |
||
| 1266 | [IntrNoMem]>; |
||
| 1267 | |||
| 1268 | class AdvSIMD_SVE2_TBX_Intrinsic |
||
| 1269 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1270 | [LLVMMatchType<0>, |
||
| 1271 | LLVMMatchType<0>, |
||
| 1272 | LLVMVectorOfBitcastsToInt<0>], |
||
| 1273 | [IntrNoMem]>; |
||
| 1274 | |||
| 1275 | class SVE2_1VectorArg_Long_Intrinsic |
||
| 1276 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1277 | [LLVMSubdivide2VectorType<0>, |
||
| 1278 | llvm_i32_ty], |
||
| 1279 | [IntrNoMem, ImmArg<ArgIndex<1>>]>; |
||
| 1280 | |||
| 1281 | class SVE2_2VectorArg_Long_Intrinsic |
||
| 1282 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1283 | [LLVMSubdivide2VectorType<0>, |
||
| 1284 | LLVMSubdivide2VectorType<0>], |
||
| 1285 | [IntrNoMem]>; |
||
| 1286 | |||
| 1287 | class SVE2_2VectorArgIndexed_Long_Intrinsic |
||
| 1288 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1289 | [LLVMSubdivide2VectorType<0>, |
||
| 1290 | LLVMSubdivide2VectorType<0>, |
||
| 1291 | llvm_i32_ty], |
||
| 1292 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 1293 | |||
| 1294 | class SVE2_2VectorArg_Wide_Intrinsic |
||
| 1295 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1296 | [LLVMMatchType<0>, |
||
| 1297 | LLVMSubdivide2VectorType<0>], |
||
| 1298 | [IntrNoMem]>; |
||
| 1299 | |||
| 1300 | class SVE2_2VectorArg_Pred_Long_Intrinsic |
||
| 1301 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1302 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1303 | LLVMMatchType<0>, |
||
| 1304 | LLVMSubdivide2VectorType<0>], |
||
| 1305 | [IntrNoMem]>; |
||
| 1306 | |||
| 1307 | class SVE2_3VectorArg_Long_Intrinsic |
||
| 1308 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1309 | [LLVMMatchType<0>, |
||
| 1310 | LLVMSubdivide2VectorType<0>, |
||
| 1311 | LLVMSubdivide2VectorType<0>], |
||
| 1312 | [IntrNoMem]>; |
||
| 1313 | |||
| 1314 | class SVE2_3VectorArgIndexed_Long_Intrinsic |
||
| 1315 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1316 | [LLVMMatchType<0>, |
||
| 1317 | LLVMSubdivide2VectorType<0>, |
||
| 1318 | LLVMSubdivide2VectorType<0>, |
||
| 1319 | llvm_i32_ty], |
||
| 1320 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1321 | |||
| 1322 | class SVE2_1VectorArg_Narrowing_Intrinsic |
||
| 1323 | : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>], |
||
| 1324 | [llvm_anyvector_ty], |
||
| 1325 | [IntrNoMem]>; |
||
| 1326 | |||
| 1327 | class SVE2_Merged1VectorArg_Narrowing_Intrinsic |
||
| 1328 | : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>], |
||
| 1329 | [LLVMSubdivide2VectorType<0>, |
||
| 1330 | llvm_anyvector_ty], |
||
| 1331 | [IntrNoMem]>; |
||
| 1332 | class SVE2_2VectorArg_Narrowing_Intrinsic |
||
| 1333 | : DefaultAttrsIntrinsic< |
||
| 1334 | [LLVMSubdivide2VectorType<0>], |
||
| 1335 | [llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 1336 | [IntrNoMem]>; |
||
| 1337 | |||
| 1338 | class SVE2_Merged2VectorArg_Narrowing_Intrinsic |
||
| 1339 | : DefaultAttrsIntrinsic< |
||
| 1340 | [LLVMSubdivide2VectorType<0>], |
||
| 1341 | [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 1342 | [IntrNoMem]>; |
||
| 1343 | |||
| 1344 | class SVE2_1VectorArg_Imm_Narrowing_Intrinsic |
||
| 1345 | : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>], |
||
| 1346 | [llvm_anyvector_ty, llvm_i32_ty], |
||
| 1347 | [IntrNoMem, ImmArg<ArgIndex<1>>]>; |
||
| 1348 | |||
| 1349 | class SVE2_2VectorArg_Imm_Narrowing_Intrinsic |
||
| 1350 | : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>], |
||
| 1351 | [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, |
||
| 1352 | llvm_i32_ty], |
||
| 1353 | [IntrNoMem, ImmArg<ArgIndex<2>>]>; |
||
| 1354 | |||
| 1355 | class SVE2_CONFLICT_DETECT_Intrinsic |
||
| 1356 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1357 | [LLVMAnyPointerType<llvm_any_ty>, |
||
| 1358 | LLVMMatchType<1>]>; |
||
| 1359 | |||
| 1360 | class SVE2_3VectorArg_Indexed_Intrinsic |
||
| 1361 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1362 | [LLVMMatchType<0>, |
||
| 1363 | LLVMSubdivide2VectorType<0>, |
||
| 1364 | LLVMSubdivide2VectorType<0>, |
||
| 1365 | llvm_i32_ty], |
||
| 1366 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1367 | |||
| 1368 | class AdvSIMD_SVE_CDOT_LANE_Intrinsic |
||
| 1369 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1370 | [LLVMMatchType<0>, |
||
| 1371 | LLVMSubdivide4VectorType<0>, |
||
| 1372 | LLVMSubdivide4VectorType<0>, |
||
| 1373 | llvm_i32_ty, |
||
| 1374 | llvm_i32_ty], |
||
| 1375 | [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>; |
||
| 1376 | |||
| 1377 | // NOTE: There is no relationship between these intrinsics beyond an attempt |
||
| 1378 | // to reuse currently identical class definitions. |
||
| 1379 | class AdvSIMD_SVE_LOGB_Intrinsic : AdvSIMD_SVE_CNT_Intrinsic; |
||
| 1380 | class AdvSIMD_SVE2_CADD_Intrinsic : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 1381 | class AdvSIMD_SVE2_CMLA_Intrinsic : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 1382 | |||
| 1383 | // This class of intrinsics are not intended to be useful within LLVM IR but |
||
| 1384 | // are instead here to support some of the more regid parts of the ACLE. |
||
| 1385 | class Builtin_SVCVT<LLVMType OUT, LLVMType PRED, LLVMType IN> |
||
| 1386 | : DefaultAttrsIntrinsic<[OUT], [OUT, PRED, IN], [IntrNoMem]>; |
||
| 1387 | } |
||
| 1388 | |||
| 1389 | //===----------------------------------------------------------------------===// |
||
| 1390 | // SVE |
||
| 1391 | |||
| 1392 | let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". |
||
| 1393 | |||
| 1394 | class AdvSIMD_SVE_2SVBoolArg_Intrinsic |
||
| 1395 | : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], |
||
| 1396 | [llvm_nxv16i1_ty], |
||
| 1397 | [IntrNoMem]>; |
||
| 1398 | |||
| 1399 | class AdvSIMD_SVE_3SVBoolArg_Intrinsic |
||
| 1400 | : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], |
||
| 1401 | [llvm_nxv16i1_ty, llvm_nxv16i1_ty], |
||
| 1402 | [IntrNoMem]>; |
||
| 1403 | |||
| 1404 | class AdvSIMD_SVE_Reduce_Intrinsic |
||
| 1405 | : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], |
||
| 1406 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1407 | llvm_anyvector_ty], |
||
| 1408 | [IntrNoMem]>; |
||
| 1409 | |||
| 1410 | class AdvSIMD_SVE_SADDV_Reduce_Intrinsic |
||
| 1411 | : DefaultAttrsIntrinsic<[llvm_i64_ty], |
||
| 1412 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1413 | llvm_anyvector_ty], |
||
| 1414 | [IntrNoMem]>; |
||
| 1415 | |||
| 1416 | class AdvSIMD_SVE_WHILE_Intrinsic |
||
| 1417 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1418 | [llvm_anyint_ty, LLVMMatchType<1>], |
||
| 1419 | [IntrNoMem]>; |
||
| 1420 | |||
| 1421 | class AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic |
||
| 1422 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1423 | [ |
||
| 1424 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1425 | LLVMPointerToElt<0>, |
||
| 1426 | LLVMScalarOrSameVectorWidth<0, llvm_i64_ty> |
||
| 1427 | ], |
||
| 1428 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 1429 | |||
| 1430 | class AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic |
||
| 1431 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1432 | [ |
||
| 1433 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1434 | LLVMPointerToElt<0>, |
||
| 1435 | LLVMScalarOrSameVectorWidth<0, llvm_i64_ty> |
||
| 1436 | ], |
||
| 1437 | [IntrInaccessibleMemOrArgMemOnly]>; |
||
| 1438 | |||
| 1439 | class AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic |
||
| 1440 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1441 | [ |
||
| 1442 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1443 | LLVMPointerToElt<0>, |
||
| 1444 | LLVMScalarOrSameVectorWidth<0, llvm_i32_ty> |
||
| 1445 | ], |
||
| 1446 | [IntrReadMem, IntrArgMemOnly]>; |
||
| 1447 | |||
| 1448 | class AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic |
||
| 1449 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1450 | [ |
||
| 1451 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1452 | LLVMPointerToElt<0>, |
||
| 1453 | LLVMScalarOrSameVectorWidth<0, llvm_i32_ty> |
||
| 1454 | ], |
||
| 1455 | [IntrInaccessibleMemOrArgMemOnly]>; |
||
| 1456 | |||
| 1457 | class AdvSIMD_GatherLoad_VS_Intrinsic |
||
| 1458 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1459 | [ |
||
| 1460 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1461 | llvm_anyvector_ty, |
||
| 1462 | llvm_i64_ty |
||
| 1463 | ], |
||
| 1464 | [IntrReadMem]>; |
||
| 1465 | |||
| 1466 | class AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic |
||
| 1467 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1468 | [ |
||
| 1469 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1470 | llvm_anyvector_ty, |
||
| 1471 | llvm_i64_ty |
||
| 1472 | ], |
||
| 1473 | [IntrInaccessibleMemOrArgMemOnly]>; |
||
| 1474 | |||
| 1475 | class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic |
||
| 1476 | : DefaultAttrsIntrinsic<[], |
||
| 1477 | [ |
||
| 1478 | llvm_anyvector_ty, |
||
| 1479 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1480 | LLVMPointerToElt<0>, |
||
| 1481 | LLVMScalarOrSameVectorWidth<0, llvm_i64_ty> |
||
| 1482 | ], |
||
| 1483 | [IntrWriteMem, IntrArgMemOnly]>; |
||
| 1484 | |||
| 1485 | class AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic |
||
| 1486 | : DefaultAttrsIntrinsic<[], |
||
| 1487 | [ |
||
| 1488 | llvm_anyvector_ty, |
||
| 1489 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1490 | LLVMPointerToElt<0>, |
||
| 1491 | LLVMScalarOrSameVectorWidth<0, llvm_i32_ty> |
||
| 1492 | ], |
||
| 1493 | [IntrWriteMem, IntrArgMemOnly]>; |
||
| 1494 | |||
| 1495 | class AdvSIMD_ScatterStore_VS_Intrinsic |
||
| 1496 | : DefaultAttrsIntrinsic<[], |
||
| 1497 | [ |
||
| 1498 | llvm_anyvector_ty, |
||
| 1499 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 1500 | llvm_anyvector_ty, llvm_i64_ty |
||
| 1501 | ], |
||
| 1502 | [IntrWriteMem]>; |
||
| 1503 | |||
| 1504 | |||
| 1505 | class SVE_gather_prf_SV |
||
| 1506 | : DefaultAttrsIntrinsic<[], |
||
| 1507 | [ |
||
| 1508 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate |
||
| 1509 | llvm_ptr_ty, // Base address |
||
| 1510 | llvm_anyvector_ty, // Offsets |
||
| 1511 | llvm_i32_ty // Prfop |
||
| 1512 | ], |
||
| 1513 | [IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>; |
||
| 1514 | |||
| 1515 | class SVE_gather_prf_VS |
||
| 1516 | : DefaultAttrsIntrinsic<[], |
||
| 1517 | [ |
||
| 1518 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate |
||
| 1519 | llvm_anyvector_ty, // Base addresses |
||
| 1520 | llvm_i64_ty, // Scalar offset |
||
| 1521 | llvm_i32_ty // Prfop |
||
| 1522 | ], |
||
| 1523 | [IntrInaccessibleMemOrArgMemOnly, ImmArg<ArgIndex<3>>]>; |
||
| 1524 | |||
| 1525 | class SVE_MatMul_Intrinsic |
||
| 1526 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 1527 | [LLVMMatchType<0>, LLVMSubdivide4VectorType<0>, LLVMSubdivide4VectorType<0>], |
||
| 1528 | [IntrNoMem]>; |
||
| 1529 | |||
| 1530 | class SVE_4Vec_BF16 |
||
| 1531 | : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty], |
||
| 1532 | [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty], |
||
| 1533 | [IntrNoMem]>; |
||
| 1534 | |||
| 1535 | class SVE_4Vec_BF16_Indexed |
||
| 1536 | : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty], |
||
| 1537 | [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty, llvm_i32_ty], |
||
| 1538 | [IntrNoMem, ImmArg<ArgIndex<3>>]>; |
||
| 1539 | |||
| 1540 | // |
||
| 1541 | // Loads |
||
| 1542 | // |
||
| 1543 | |||
| 1544 | def int_aarch64_sve_ld1 : AdvSIMD_1Vec_PredLoad_Intrinsic; |
||
| 1545 | |||
| 1546 | def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic; |
||
| 1547 | def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic; |
||
| 1548 | def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic; |
||
| 1549 | |||
| 1550 | def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; |
||
| 1551 | def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic; |
||
| 1552 | def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic; |
||
| 1553 | |||
| 1554 | def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic; |
||
| 1555 | def int_aarch64_sve_ld1ro : AdvSIMD_1Vec_PredLoad_Intrinsic; |
||
| 1556 | |||
| 1557 | // |
||
| 1558 | // Stores |
||
| 1559 | // |
||
| 1560 | |||
| 1561 | def int_aarch64_sve_st1 : AdvSIMD_1Vec_PredStore_Intrinsic; |
||
| 1562 | def int_aarch64_sve_st2 : AdvSIMD_2Vec_PredStore_Intrinsic; |
||
| 1563 | def int_aarch64_sve_st3 : AdvSIMD_3Vec_PredStore_Intrinsic; |
||
| 1564 | def int_aarch64_sve_st4 : AdvSIMD_4Vec_PredStore_Intrinsic; |
||
| 1565 | |||
| 1566 | def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic; |
||
| 1567 | |||
| 1568 | // |
||
| 1569 | // Prefetches |
||
| 1570 | // |
||
| 1571 | |||
| 1572 | def int_aarch64_sve_prf |
||
| 1573 | : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_ptr_ty, llvm_i32_ty], |
||
| 1574 | [IntrArgMemOnly, ImmArg<ArgIndex<2>>]>; |
||
| 1575 | |||
| 1576 | // Scalar + 32-bit scaled offset vector, zero extend, packed and |
||
| 1577 | // unpacked. |
||
| 1578 | def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV; |
||
| 1579 | def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV; |
||
| 1580 | def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV; |
||
| 1581 | def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV; |
||
| 1582 | |||
| 1583 | // Scalar + 32-bit scaled offset vector, sign extend, packed and |
||
| 1584 | // unpacked. |
||
| 1585 | def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV; |
||
| 1586 | def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV; |
||
| 1587 | def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV; |
||
| 1588 | def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV; |
||
| 1589 | |||
| 1590 | // Scalar + 64-bit scaled offset vector. |
||
| 1591 | def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV; |
||
| 1592 | def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV; |
||
| 1593 | def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV; |
||
| 1594 | def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV; |
||
| 1595 | |||
| 1596 | // Vector + scalar. |
||
| 1597 | def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS; |
||
| 1598 | def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS; |
||
| 1599 | def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS; |
||
| 1600 | def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS; |
||
| 1601 | |||
| 1602 | // |
||
| 1603 | // Scalar to vector operations |
||
| 1604 | // |
||
| 1605 | |||
| 1606 | def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic; |
||
| 1607 | def int_aarch64_sve_dup_x : AdvSIMD_SVE_DUP_Unpred_Intrinsic; |
||
| 1608 | |||
| 1609 | def int_aarch64_sve_index : AdvSIMD_SVE_Index_Intrinsic; |
||
| 1610 | |||
| 1611 | // |
||
| 1612 | // Address calculation |
||
| 1613 | // |
||
| 1614 | |||
| 1615 | def int_aarch64_sve_adrb : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1616 | def int_aarch64_sve_adrh : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1617 | def int_aarch64_sve_adrw : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1618 | def int_aarch64_sve_adrd : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1619 | |||
| 1620 | // |
||
| 1621 | // Integer arithmetic |
||
| 1622 | // |
||
| 1623 | |||
| 1624 | def int_aarch64_sve_add : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1625 | def int_aarch64_sve_add_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1626 | def int_aarch64_sve_sub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1627 | def int_aarch64_sve_sub_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1628 | def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1629 | |||
| 1630 | def int_aarch64_sve_pmul : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1631 | |||
| 1632 | def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1633 | def int_aarch64_sve_mul_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1634 | def int_aarch64_sve_mul_lane : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 1635 | def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1636 | def int_aarch64_sve_smulh_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1637 | def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1638 | def int_aarch64_sve_umulh_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1639 | |||
| 1640 | def int_aarch64_sve_sdiv : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1641 | def int_aarch64_sve_sdiv_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1642 | def int_aarch64_sve_udiv : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1643 | def int_aarch64_sve_udiv_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1644 | def int_aarch64_sve_sdivr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1645 | def int_aarch64_sve_udivr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1646 | |||
| 1647 | def int_aarch64_sve_smax : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1648 | def int_aarch64_sve_smax_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1649 | def int_aarch64_sve_umax : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1650 | def int_aarch64_sve_umax_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1651 | def int_aarch64_sve_smin : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1652 | def int_aarch64_sve_smin_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1653 | def int_aarch64_sve_umin : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1654 | def int_aarch64_sve_umin_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1655 | def int_aarch64_sve_sabd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1656 | def int_aarch64_sve_sabd_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1657 | def int_aarch64_sve_uabd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1658 | def int_aarch64_sve_uabd_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1659 | |||
| 1660 | def int_aarch64_sve_mad : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1661 | def int_aarch64_sve_msb : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1662 | def int_aarch64_sve_mla : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1663 | def int_aarch64_sve_mla_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 1664 | def int_aarch64_sve_mls : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1665 | def int_aarch64_sve_mls_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 1666 | |||
| 1667 | def int_aarch64_sve_saddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic; |
||
| 1668 | def int_aarch64_sve_uaddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic; |
||
| 1669 | |||
| 1670 | def int_aarch64_sve_smaxv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1671 | def int_aarch64_sve_umaxv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1672 | def int_aarch64_sve_sminv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1673 | def int_aarch64_sve_uminv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1674 | |||
| 1675 | def int_aarch64_sve_orv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1676 | def int_aarch64_sve_eorv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1677 | def int_aarch64_sve_andv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1678 | |||
| 1679 | def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1680 | def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1681 | |||
| 1682 | def int_aarch64_sve_sdot : AdvSIMD_SVE_DOT_Intrinsic; |
||
| 1683 | def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; |
||
| 1684 | |||
| 1685 | def int_aarch64_sve_udot : AdvSIMD_SVE_DOT_Intrinsic; |
||
| 1686 | def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; |
||
| 1687 | |||
| 1688 | def int_aarch64_sve_sqadd_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1689 | def int_aarch64_sve_sqsub_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1690 | def int_aarch64_sve_uqadd_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1691 | def int_aarch64_sve_uqsub_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1692 | |||
| 1693 | // Shifts |
||
| 1694 | |||
| 1695 | def int_aarch64_sve_asr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1696 | def int_aarch64_sve_asr_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1697 | def int_aarch64_sve_asr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic; |
||
| 1698 | def int_aarch64_sve_asrd : AdvSIMD_SVE_ShiftByImm_Intrinsic; |
||
| 1699 | def int_aarch64_sve_insr : AdvSIMD_SVE_INSR_Intrinsic; |
||
| 1700 | def int_aarch64_sve_lsl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1701 | def int_aarch64_sve_lsl_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1702 | def int_aarch64_sve_lsl_wide : AdvSIMD_SVE_ShiftWide_Intrinsic; |
||
| 1703 | def int_aarch64_sve_lsr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1704 | def int_aarch64_sve_lsr_u : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1705 | def int_aarch64_sve_lsr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic; |
||
| 1706 | |||
| 1707 | // |
||
| 1708 | // Integer comparisons |
||
| 1709 | // |
||
| 1710 | |||
| 1711 | def int_aarch64_sve_cmpeq : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1712 | def int_aarch64_sve_cmpge : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1713 | def int_aarch64_sve_cmpgt : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1714 | def int_aarch64_sve_cmphi : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1715 | def int_aarch64_sve_cmphs : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1716 | def int_aarch64_sve_cmpne : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 1717 | |||
| 1718 | def int_aarch64_sve_cmpeq_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1719 | def int_aarch64_sve_cmpge_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1720 | def int_aarch64_sve_cmpgt_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1721 | def int_aarch64_sve_cmphi_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1722 | def int_aarch64_sve_cmphs_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1723 | def int_aarch64_sve_cmple_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1724 | def int_aarch64_sve_cmplo_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1725 | def int_aarch64_sve_cmpls_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1726 | def int_aarch64_sve_cmplt_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1727 | def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic; |
||
| 1728 | |||
| 1729 | // |
||
| 1730 | // Counting bits |
||
| 1731 | // |
||
| 1732 | |||
| 1733 | def int_aarch64_sve_cls : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1734 | def int_aarch64_sve_clz : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1735 | def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic; |
||
| 1736 | |||
| 1737 | // |
||
| 1738 | // Counting elements |
||
| 1739 | // |
||
| 1740 | |||
| 1741 | def int_aarch64_sve_cntb : AdvSIMD_SVE_CNTB_Intrinsic; |
||
| 1742 | def int_aarch64_sve_cnth : AdvSIMD_SVE_CNTB_Intrinsic; |
||
| 1743 | def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic; |
||
| 1744 | def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic; |
||
| 1745 | |||
| 1746 | def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic; |
||
| 1747 | |||
| 1748 | // |
||
| 1749 | // FFR manipulation |
||
| 1750 | // |
||
| 1751 | |||
| 1752 | def int_aarch64_sve_rdffr : ClangBuiltin<"__builtin_sve_svrdffr">, DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>; |
||
| 1753 | def int_aarch64_sve_rdffr_z : ClangBuiltin<"__builtin_sve_svrdffr_z">, DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [llvm_nxv16i1_ty], [IntrReadMem, IntrInaccessibleMemOnly]>; |
||
| 1754 | def int_aarch64_sve_setffr : ClangBuiltin<"__builtin_sve_svsetffr">, DefaultAttrsIntrinsic<[], [], [IntrWriteMem, IntrInaccessibleMemOnly]>; |
||
| 1755 | def int_aarch64_sve_wrffr : ClangBuiltin<"__builtin_sve_svwrffr">, DefaultAttrsIntrinsic<[], [llvm_nxv16i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly]>; |
||
| 1756 | |||
| 1757 | // |
||
| 1758 | // Saturating scalar arithmetic |
||
| 1759 | // |
||
| 1760 | |||
| 1761 | def int_aarch64_sve_sqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1762 | def int_aarch64_sve_sqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1763 | def int_aarch64_sve_sqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1764 | def int_aarch64_sve_sqdecp : AdvSIMD_SVE_Saturating_Intrinsic; |
||
| 1765 | |||
| 1766 | def int_aarch64_sve_sqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1767 | def int_aarch64_sve_sqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1768 | def int_aarch64_sve_sqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1769 | def int_aarch64_sve_sqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1770 | def int_aarch64_sve_sqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1771 | def int_aarch64_sve_sqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1772 | def int_aarch64_sve_sqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1773 | def int_aarch64_sve_sqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1774 | def int_aarch64_sve_sqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>; |
||
| 1775 | def int_aarch64_sve_sqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>; |
||
| 1776 | |||
| 1777 | def int_aarch64_sve_sqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1778 | def int_aarch64_sve_sqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1779 | def int_aarch64_sve_sqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1780 | def int_aarch64_sve_sqincp : AdvSIMD_SVE_Saturating_Intrinsic; |
||
| 1781 | |||
| 1782 | def int_aarch64_sve_sqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1783 | def int_aarch64_sve_sqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1784 | def int_aarch64_sve_sqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1785 | def int_aarch64_sve_sqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1786 | def int_aarch64_sve_sqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1787 | def int_aarch64_sve_sqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1788 | def int_aarch64_sve_sqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1789 | def int_aarch64_sve_sqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1790 | def int_aarch64_sve_sqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>; |
||
| 1791 | def int_aarch64_sve_sqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>; |
||
| 1792 | |||
| 1793 | def int_aarch64_sve_uqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1794 | def int_aarch64_sve_uqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1795 | def int_aarch64_sve_uqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1796 | def int_aarch64_sve_uqdecp : AdvSIMD_SVE_Saturating_Intrinsic; |
||
| 1797 | |||
| 1798 | def int_aarch64_sve_uqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1799 | def int_aarch64_sve_uqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1800 | def int_aarch64_sve_uqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1801 | def int_aarch64_sve_uqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1802 | def int_aarch64_sve_uqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1803 | def int_aarch64_sve_uqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1804 | def int_aarch64_sve_uqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1805 | def int_aarch64_sve_uqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1806 | def int_aarch64_sve_uqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>; |
||
| 1807 | def int_aarch64_sve_uqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>; |
||
| 1808 | |||
| 1809 | def int_aarch64_sve_uqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1810 | def int_aarch64_sve_uqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1811 | def int_aarch64_sve_uqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic; |
||
| 1812 | def int_aarch64_sve_uqincp : AdvSIMD_SVE_Saturating_Intrinsic; |
||
| 1813 | |||
| 1814 | def int_aarch64_sve_uqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1815 | def int_aarch64_sve_uqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1816 | def int_aarch64_sve_uqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1817 | def int_aarch64_sve_uqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1818 | def int_aarch64_sve_uqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1819 | def int_aarch64_sve_uqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1820 | def int_aarch64_sve_uqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>; |
||
| 1821 | def int_aarch64_sve_uqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>; |
||
| 1822 | def int_aarch64_sve_uqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>; |
||
| 1823 | def int_aarch64_sve_uqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>; |
||
| 1824 | |||
| 1825 | // |
||
| 1826 | // Reversal |
||
| 1827 | // |
||
| 1828 | |||
| 1829 | def int_aarch64_sve_rbit : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1830 | def int_aarch64_sve_revb : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1831 | def int_aarch64_sve_revh : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1832 | def int_aarch64_sve_revw : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1833 | |||
| 1834 | // |
||
| 1835 | // Permutations and selection |
||
| 1836 | // |
||
| 1837 | |||
| 1838 | def int_aarch64_sve_clasta : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1839 | def int_aarch64_sve_clasta_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; |
||
| 1840 | def int_aarch64_sve_clastb : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1841 | def int_aarch64_sve_clastb_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; |
||
| 1842 | def int_aarch64_sve_compact : AdvSIMD_Pred1VectorArg_Intrinsic; |
||
| 1843 | def int_aarch64_sve_dupq_lane : AdvSIMD_SVE_DUPQ_Intrinsic; |
||
| 1844 | def int_aarch64_sve_ext : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 1845 | def int_aarch64_sve_sel : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1846 | def int_aarch64_sve_lasta : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1847 | def int_aarch64_sve_lastb : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1848 | def int_aarch64_sve_rev : AdvSIMD_1VectorArg_Intrinsic; |
||
| 1849 | def int_aarch64_sve_rev_b16 : AdvSIMD_SVE_2SVBoolArg_Intrinsic; |
||
| 1850 | def int_aarch64_sve_rev_b32 : AdvSIMD_SVE_2SVBoolArg_Intrinsic; |
||
| 1851 | def int_aarch64_sve_rev_b64 : AdvSIMD_SVE_2SVBoolArg_Intrinsic; |
||
| 1852 | def int_aarch64_sve_splice : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1853 | def int_aarch64_sve_sunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; |
||
| 1854 | def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic; |
||
| 1855 | def int_aarch64_sve_tbl : AdvSIMD_SVE_TBL_Intrinsic; |
||
| 1856 | def int_aarch64_sve_trn1 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1857 | def int_aarch64_sve_trn1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1858 | def int_aarch64_sve_trn1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1859 | def int_aarch64_sve_trn1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1860 | def int_aarch64_sve_trn2 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1861 | def int_aarch64_sve_trn2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1862 | def int_aarch64_sve_trn2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1863 | def int_aarch64_sve_trn2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1864 | def int_aarch64_sve_trn1q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1865 | def int_aarch64_sve_trn2q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1866 | def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; |
||
| 1867 | def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic; |
||
| 1868 | def int_aarch64_sve_uzp1 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1869 | def int_aarch64_sve_uzp1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1870 | def int_aarch64_sve_uzp1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1871 | def int_aarch64_sve_uzp1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1872 | def int_aarch64_sve_uzp2 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1873 | def int_aarch64_sve_uzp2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1874 | def int_aarch64_sve_uzp2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1875 | def int_aarch64_sve_uzp2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1876 | def int_aarch64_sve_uzp1q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1877 | def int_aarch64_sve_uzp2q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1878 | def int_aarch64_sve_zip1 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1879 | def int_aarch64_sve_zip1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1880 | def int_aarch64_sve_zip1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1881 | def int_aarch64_sve_zip1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1882 | def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1883 | def int_aarch64_sve_zip2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1884 | def int_aarch64_sve_zip2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1885 | def int_aarch64_sve_zip2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic; |
||
| 1886 | def int_aarch64_sve_zip1q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1887 | def int_aarch64_sve_zip2q : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1888 | |||
| 1889 | // |
||
| 1890 | // Logical operations |
||
| 1891 | // |
||
| 1892 | |||
| 1893 | def int_aarch64_sve_and : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1894 | def int_aarch64_sve_bic : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1895 | def int_aarch64_sve_cnot : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1896 | def int_aarch64_sve_eor : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1897 | def int_aarch64_sve_not : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1898 | def int_aarch64_sve_orr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1899 | |||
| 1900 | // |
||
| 1901 | // Conversion |
||
| 1902 | // |
||
| 1903 | |||
| 1904 | def int_aarch64_sve_sxtb : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1905 | def int_aarch64_sve_sxth : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1906 | def int_aarch64_sve_sxtw : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1907 | def int_aarch64_sve_uxtb : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1908 | def int_aarch64_sve_uxth : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1909 | def int_aarch64_sve_uxtw : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1910 | |||
| 1911 | // |
||
| 1912 | // While comparisons |
||
| 1913 | // |
||
| 1914 | |||
| 1915 | def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1916 | def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1917 | def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1918 | def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1919 | def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1920 | def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1921 | def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1922 | def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic; |
||
| 1923 | |||
| 1924 | // |
||
| 1925 | // Floating-point arithmetic |
||
| 1926 | // |
||
| 1927 | |||
| 1928 | def int_aarch64_sve_fabd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1929 | def int_aarch64_sve_fabs : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1930 | def int_aarch64_sve_fadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1931 | def int_aarch64_sve_fcadd : AdvSIMD_SVE_CADD_Intrinsic; |
||
| 1932 | def int_aarch64_sve_fcmla : AdvSIMD_SVE_CMLA_Intrinsic; |
||
| 1933 | def int_aarch64_sve_fcmla_lane : AdvSIMD_SVE_CMLA_LANE_Intrinsic; |
||
| 1934 | def int_aarch64_sve_fdiv : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1935 | def int_aarch64_sve_fdivr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1936 | def int_aarch64_sve_fexpa_x : AdvSIMD_SVE_EXPA_Intrinsic; |
||
| 1937 | def int_aarch64_sve_fmad : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1938 | def int_aarch64_sve_fmax : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1939 | def int_aarch64_sve_fmaxnm : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1940 | def int_aarch64_sve_fmin : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1941 | def int_aarch64_sve_fminnm : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1942 | def int_aarch64_sve_fmla : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1943 | def int_aarch64_sve_fmla_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 1944 | def int_aarch64_sve_fmls : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1945 | def int_aarch64_sve_fmls_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 1946 | def int_aarch64_sve_fmsb : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1947 | def int_aarch64_sve_fmul : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1948 | def int_aarch64_sve_fmulx : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1949 | def int_aarch64_sve_fneg : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1950 | def int_aarch64_sve_fmul_lane : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 1951 | def int_aarch64_sve_fnmad : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1952 | def int_aarch64_sve_fnmla : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1953 | def int_aarch64_sve_fnmls : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1954 | def int_aarch64_sve_fnmsb : AdvSIMD_Pred3VectorArg_Intrinsic; |
||
| 1955 | def int_aarch64_sve_frecpe_x : AdvSIMD_1VectorArg_Intrinsic; |
||
| 1956 | def int_aarch64_sve_frecps_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1957 | def int_aarch64_sve_frecpx : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1958 | def int_aarch64_sve_frinta : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1959 | def int_aarch64_sve_frinti : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1960 | def int_aarch64_sve_frintm : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1961 | def int_aarch64_sve_frintn : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1962 | def int_aarch64_sve_frintp : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1963 | def int_aarch64_sve_frintx : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1964 | def int_aarch64_sve_frintz : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1965 | def int_aarch64_sve_frsqrte_x : AdvSIMD_1VectorArg_Intrinsic; |
||
| 1966 | def int_aarch64_sve_frsqrts_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 1967 | def int_aarch64_sve_fscale : AdvSIMD_SVE_SCALE_Intrinsic; |
||
| 1968 | def int_aarch64_sve_fsqrt : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 1969 | def int_aarch64_sve_fsub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1970 | def int_aarch64_sve_fsubr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 1971 | def int_aarch64_sve_ftmad_x : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 1972 | def int_aarch64_sve_ftsmul_x : AdvSIMD_SVE_TSMUL_Intrinsic; |
||
| 1973 | def int_aarch64_sve_ftssel_x : AdvSIMD_SVE_TSMUL_Intrinsic; |
||
| 1974 | |||
| 1975 | // |
||
| 1976 | // Floating-point reductions |
||
| 1977 | // |
||
| 1978 | |||
| 1979 | def int_aarch64_sve_fadda : AdvSIMD_SVE_ReduceWithInit_Intrinsic; |
||
| 1980 | def int_aarch64_sve_faddv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1981 | def int_aarch64_sve_fmaxv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1982 | def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1983 | def int_aarch64_sve_fminv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1984 | def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic; |
||
| 1985 | |||
| 1986 | // |
||
| 1987 | // Floating-point conversions |
||
| 1988 | // |
||
| 1989 | |||
| 1990 | def int_aarch64_sve_fcvt : AdvSIMD_SVE_FCVT_Intrinsic; |
||
| 1991 | def int_aarch64_sve_fcvtzs : AdvSIMD_SVE_FCVTZS_Intrinsic; |
||
| 1992 | def int_aarch64_sve_fcvtzu : AdvSIMD_SVE_FCVTZS_Intrinsic; |
||
| 1993 | def int_aarch64_sve_scvtf : AdvSIMD_SVE_SCVTF_Intrinsic; |
||
| 1994 | def int_aarch64_sve_ucvtf : AdvSIMD_SVE_SCVTF_Intrinsic; |
||
| 1995 | |||
| 1996 | // |
||
| 1997 | // Floating-point comparisons |
||
| 1998 | // |
||
| 1999 | |||
| 2000 | def int_aarch64_sve_facge : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2001 | def int_aarch64_sve_facgt : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2002 | |||
| 2003 | def int_aarch64_sve_fcmpeq : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2004 | def int_aarch64_sve_fcmpge : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2005 | def int_aarch64_sve_fcmpgt : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2006 | def int_aarch64_sve_fcmpne : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2007 | def int_aarch64_sve_fcmpuo : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2008 | |||
| 2009 | def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>; |
||
| 2010 | def int_aarch64_sve_fcvtzs_i32f64 : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2011 | def int_aarch64_sve_fcvtzs_i64f16 : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>; |
||
| 2012 | def int_aarch64_sve_fcvtzs_i64f32 : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>; |
||
| 2013 | |||
| 2014 | def int_aarch64_sve_fcvt_bf16f32 : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>; |
||
| 2015 | def int_aarch64_sve_fcvtnt_bf16f32 : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>; |
||
| 2016 | |||
| 2017 | def int_aarch64_sve_fcvtzu_i32f16 : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>; |
||
| 2018 | def int_aarch64_sve_fcvtzu_i32f64 : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2019 | def int_aarch64_sve_fcvtzu_i64f16 : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>; |
||
| 2020 | def int_aarch64_sve_fcvtzu_i64f32 : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>; |
||
| 2021 | |||
| 2022 | def int_aarch64_sve_fcvt_f16f32 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>; |
||
| 2023 | def int_aarch64_sve_fcvt_f16f64 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2024 | def int_aarch64_sve_fcvt_f32f64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2025 | |||
| 2026 | def int_aarch64_sve_fcvt_f32f16 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>; |
||
| 2027 | def int_aarch64_sve_fcvt_f64f16 : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>; |
||
| 2028 | def int_aarch64_sve_fcvt_f64f32 : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>; |
||
| 2029 | |||
| 2030 | def int_aarch64_sve_fcvtlt_f32f16 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>; |
||
| 2031 | def int_aarch64_sve_fcvtlt_f64f32 : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>; |
||
| 2032 | def int_aarch64_sve_fcvtnt_f16f32 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>; |
||
| 2033 | def int_aarch64_sve_fcvtnt_f32f64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2034 | |||
| 2035 | def int_aarch64_sve_fcvtx_f32f64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2036 | def int_aarch64_sve_fcvtxnt_f32f64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>; |
||
| 2037 | |||
| 2038 | def int_aarch64_sve_scvtf_f16i32 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>; |
||
| 2039 | def int_aarch64_sve_scvtf_f16i64 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>; |
||
| 2040 | def int_aarch64_sve_scvtf_f32i64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>; |
||
| 2041 | def int_aarch64_sve_scvtf_f64i32 : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>; |
||
| 2042 | |||
| 2043 | def int_aarch64_sve_ucvtf_f16i32 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>; |
||
| 2044 | def int_aarch64_sve_ucvtf_f16i64 : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>; |
||
| 2045 | def int_aarch64_sve_ucvtf_f32i64 : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>; |
||
| 2046 | def int_aarch64_sve_ucvtf_f64i32 : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>; |
||
| 2047 | |||
| 2048 | // |
||
| 2049 | // Predicate creation |
||
| 2050 | // |
||
| 2051 | |||
| 2052 | def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic; |
||
| 2053 | |||
| 2054 | // |
||
| 2055 | // Predicate operations |
||
| 2056 | // |
||
| 2057 | |||
| 2058 | def int_aarch64_sve_and_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2059 | def int_aarch64_sve_bic_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2060 | def int_aarch64_sve_brka : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2061 | def int_aarch64_sve_brka_z : AdvSIMD_Pred1VectorArg_Intrinsic; |
||
| 2062 | def int_aarch64_sve_brkb : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2063 | def int_aarch64_sve_brkb_z : AdvSIMD_Pred1VectorArg_Intrinsic; |
||
| 2064 | def int_aarch64_sve_brkn_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2065 | def int_aarch64_sve_brkpa_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2066 | def int_aarch64_sve_brkpb_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2067 | def int_aarch64_sve_eor_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2068 | def int_aarch64_sve_nand_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2069 | def int_aarch64_sve_nor_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2070 | def int_aarch64_sve_orn_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2071 | def int_aarch64_sve_orr_z : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2072 | def int_aarch64_sve_pfirst : AdvSIMD_Pred1VectorArg_Intrinsic; |
||
| 2073 | def int_aarch64_sve_pnext : AdvSIMD_Pred1VectorArg_Intrinsic; |
||
| 2074 | def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic; |
||
| 2075 | def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic; |
||
| 2076 | |||
| 2077 | // |
||
| 2078 | // Testing predicates |
||
| 2079 | // |
||
| 2080 | |||
| 2081 | def int_aarch64_sve_ptest_any : AdvSIMD_SVE_PTEST_Intrinsic; |
||
| 2082 | def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic; |
||
| 2083 | def int_aarch64_sve_ptest_last : AdvSIMD_SVE_PTEST_Intrinsic; |
||
| 2084 | |||
| 2085 | // |
||
| 2086 | // Reinterpreting data |
||
| 2087 | // |
||
| 2088 | |||
| 2089 | def int_aarch64_sve_convert_from_svbool : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 2090 | [llvm_nxv16i1_ty], |
||
| 2091 | [IntrNoMem]>; |
||
| 2092 | |||
| 2093 | def int_aarch64_sve_convert_to_svbool : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], |
||
| 2094 | [llvm_anyvector_ty], |
||
| 2095 | [IntrNoMem]>; |
||
| 2096 | |||
| 2097 | // |
||
| 2098 | // Gather loads: scalar base + vector offsets |
||
| 2099 | // |
||
| 2100 | |||
| 2101 | // 64 bit unscaled offsets |
||
| 2102 | def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic; |
||
| 2103 | |||
| 2104 | // 64 bit scaled offsets |
||
| 2105 | def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic; |
||
| 2106 | |||
| 2107 | // 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits |
||
| 2108 | def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic; |
||
| 2109 | def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic; |
||
| 2110 | |||
| 2111 | // 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits |
||
| 2112 | def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic; |
||
| 2113 | def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic; |
||
| 2114 | |||
| 2115 | // |
||
| 2116 | // Gather loads: vector base + scalar offset |
||
| 2117 | // |
||
| 2118 | |||
| 2119 | def int_aarch64_sve_ld1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic; |
||
| 2120 | |||
| 2121 | |||
| 2122 | // |
||
| 2123 | // First-faulting gather loads: scalar base + vector offsets |
||
| 2124 | // |
||
| 2125 | |||
| 2126 | // 64 bit unscaled offsets |
||
| 2127 | def int_aarch64_sve_ldff1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic; |
||
| 2128 | |||
| 2129 | // 64 bit scaled offsets |
||
| 2130 | def int_aarch64_sve_ldff1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic; |
||
| 2131 | |||
| 2132 | // 32 bit unscaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits |
||
| 2133 | def int_aarch64_sve_ldff1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic; |
||
| 2134 | def int_aarch64_sve_ldff1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic; |
||
| 2135 | |||
| 2136 | // 32 bit scaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits |
||
| 2137 | def int_aarch64_sve_ldff1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic; |
||
| 2138 | def int_aarch64_sve_ldff1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic; |
||
| 2139 | |||
| 2140 | // |
||
| 2141 | // First-faulting gather loads: vector base + scalar offset |
||
| 2142 | // |
||
| 2143 | |||
| 2144 | def int_aarch64_sve_ldff1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic; |
||
| 2145 | |||
| 2146 | |||
| 2147 | // |
||
| 2148 | // Non-temporal gather loads: scalar base + vector offsets |
||
| 2149 | // |
||
| 2150 | |||
| 2151 | // 64 bit unscaled offsets |
||
| 2152 | def int_aarch64_sve_ldnt1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic; |
||
| 2153 | |||
| 2154 | // 64 bit indices |
||
| 2155 | def int_aarch64_sve_ldnt1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic; |
||
| 2156 | |||
| 2157 | // 32 bit unscaled offsets, zero (zxtw) extended to 64 bits |
||
| 2158 | def int_aarch64_sve_ldnt1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic; |
||
| 2159 | |||
| 2160 | // |
||
| 2161 | // Non-temporal gather loads: vector base + scalar offset |
||
| 2162 | // |
||
| 2163 | |||
| 2164 | def int_aarch64_sve_ldnt1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic; |
||
| 2165 | |||
| 2166 | // |
||
| 2167 | // Scatter stores: scalar base + vector offsets |
||
| 2168 | // |
||
| 2169 | |||
| 2170 | // 64 bit unscaled offsets |
||
| 2171 | def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic; |
||
| 2172 | |||
| 2173 | // 64 bit scaled offsets |
||
| 2174 | def int_aarch64_sve_st1_scatter_index |
||
| 2175 | : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic; |
||
| 2176 | |||
| 2177 | // 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits |
||
| 2178 | def int_aarch64_sve_st1_scatter_sxtw |
||
| 2179 | : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic; |
||
| 2180 | |||
| 2181 | def int_aarch64_sve_st1_scatter_uxtw |
||
| 2182 | : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic; |
||
| 2183 | |||
| 2184 | // 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits |
||
| 2185 | def int_aarch64_sve_st1_scatter_sxtw_index |
||
| 2186 | : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic; |
||
| 2187 | |||
| 2188 | def int_aarch64_sve_st1_scatter_uxtw_index |
||
| 2189 | : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic; |
||
| 2190 | |||
| 2191 | // |
||
| 2192 | // Scatter stores: vector base + scalar offset |
||
| 2193 | // |
||
| 2194 | |||
| 2195 | def int_aarch64_sve_st1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic; |
||
| 2196 | |||
| 2197 | // |
||
| 2198 | // Non-temporal scatter stores: scalar base + vector offsets |
||
| 2199 | // |
||
| 2200 | |||
| 2201 | // 64 bit unscaled offsets |
||
| 2202 | def int_aarch64_sve_stnt1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic; |
||
| 2203 | |||
| 2204 | // 64 bit indices |
||
| 2205 | def int_aarch64_sve_stnt1_scatter_index |
||
| 2206 | : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic; |
||
| 2207 | |||
| 2208 | // 32 bit unscaled offsets, zero (zxtw) extended to 64 bits |
||
| 2209 | def int_aarch64_sve_stnt1_scatter_uxtw : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic; |
||
| 2210 | |||
| 2211 | // |
||
| 2212 | // Non-temporal scatter stores: vector base + scalar offset |
||
| 2213 | // |
||
| 2214 | |||
| 2215 | def int_aarch64_sve_stnt1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic; |
||
| 2216 | |||
| 2217 | // |
||
| 2218 | // SVE2 - Uniform DSP operations |
||
| 2219 | // |
||
| 2220 | |||
| 2221 | def int_aarch64_sve_saba : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2222 | def int_aarch64_sve_shadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2223 | def int_aarch64_sve_shsub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2224 | def int_aarch64_sve_shsubr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2225 | def int_aarch64_sve_sli : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2226 | def int_aarch64_sve_sqabs : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2227 | def int_aarch64_sve_sqadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2228 | def int_aarch64_sve_sqdmulh : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2229 | def int_aarch64_sve_sqdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2230 | def int_aarch64_sve_sqneg : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2231 | def int_aarch64_sve_sqrdmlah : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2232 | def int_aarch64_sve_sqrdmlah_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 2233 | def int_aarch64_sve_sqrdmlsh : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2234 | def int_aarch64_sve_sqrdmlsh_lane : AdvSIMD_3VectorArgIndexed_Intrinsic; |
||
| 2235 | def int_aarch64_sve_sqrdmulh : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2236 | def int_aarch64_sve_sqrdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2237 | def int_aarch64_sve_sqrshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2238 | def int_aarch64_sve_sqshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2239 | def int_aarch64_sve_sqshlu : AdvSIMD_SVE_ShiftByImm_Intrinsic; |
||
| 2240 | def int_aarch64_sve_sqsub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2241 | def int_aarch64_sve_sqsubr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2242 | def int_aarch64_sve_srhadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2243 | def int_aarch64_sve_sri : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2244 | def int_aarch64_sve_srshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2245 | def int_aarch64_sve_srshr : AdvSIMD_SVE_ShiftByImm_Intrinsic; |
||
| 2246 | def int_aarch64_sve_srsra : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2247 | def int_aarch64_sve_ssra : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2248 | def int_aarch64_sve_suqadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2249 | def int_aarch64_sve_uaba : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2250 | def int_aarch64_sve_uhadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2251 | def int_aarch64_sve_uhsub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2252 | def int_aarch64_sve_uhsubr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2253 | def int_aarch64_sve_uqadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2254 | def int_aarch64_sve_uqrshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2255 | def int_aarch64_sve_uqshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2256 | def int_aarch64_sve_uqsub : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2257 | def int_aarch64_sve_uqsubr : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2258 | def int_aarch64_sve_urecpe : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2259 | def int_aarch64_sve_urhadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2260 | def int_aarch64_sve_urshl : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2261 | def int_aarch64_sve_urshr : AdvSIMD_SVE_ShiftByImm_Intrinsic; |
||
| 2262 | def int_aarch64_sve_ursqrte : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2263 | def int_aarch64_sve_ursra : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2264 | def int_aarch64_sve_usqadd : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2265 | def int_aarch64_sve_usra : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2266 | |||
| 2267 | // |
||
| 2268 | // SVE2 - Widening DSP operations |
||
| 2269 | // |
||
| 2270 | |||
| 2271 | def int_aarch64_sve_sabalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2272 | def int_aarch64_sve_sabalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2273 | def int_aarch64_sve_sabdlb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2274 | def int_aarch64_sve_sabdlt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2275 | def int_aarch64_sve_saddlb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2276 | def int_aarch64_sve_saddlt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2277 | def int_aarch64_sve_saddwb : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2278 | def int_aarch64_sve_saddwt : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2279 | def int_aarch64_sve_sshllb : SVE2_1VectorArg_Long_Intrinsic; |
||
| 2280 | def int_aarch64_sve_sshllt : SVE2_1VectorArg_Long_Intrinsic; |
||
| 2281 | def int_aarch64_sve_ssublb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2282 | def int_aarch64_sve_ssublt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2283 | def int_aarch64_sve_ssubwb : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2284 | def int_aarch64_sve_ssubwt : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2285 | def int_aarch64_sve_uabalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2286 | def int_aarch64_sve_uabalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2287 | def int_aarch64_sve_uabdlb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2288 | def int_aarch64_sve_uabdlt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2289 | def int_aarch64_sve_uaddlb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2290 | def int_aarch64_sve_uaddlt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2291 | def int_aarch64_sve_uaddwb : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2292 | def int_aarch64_sve_uaddwt : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2293 | def int_aarch64_sve_ushllb : SVE2_1VectorArg_Long_Intrinsic; |
||
| 2294 | def int_aarch64_sve_ushllt : SVE2_1VectorArg_Long_Intrinsic; |
||
| 2295 | def int_aarch64_sve_usublb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2296 | def int_aarch64_sve_usublt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2297 | def int_aarch64_sve_usubwb : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2298 | def int_aarch64_sve_usubwt : SVE2_2VectorArg_Wide_Intrinsic; |
||
| 2299 | |||
| 2300 | // |
||
| 2301 | // SVE2 - Non-widening pairwise arithmetic |
||
| 2302 | // |
||
| 2303 | |||
| 2304 | def int_aarch64_sve_addp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2305 | def int_aarch64_sve_faddp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2306 | def int_aarch64_sve_fmaxp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2307 | def int_aarch64_sve_fmaxnmp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2308 | def int_aarch64_sve_fminp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2309 | def int_aarch64_sve_fminnmp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2310 | def int_aarch64_sve_smaxp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2311 | def int_aarch64_sve_sminp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2312 | def int_aarch64_sve_umaxp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2313 | def int_aarch64_sve_uminp : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2314 | |||
| 2315 | // |
||
| 2316 | // SVE2 - Widening pairwise arithmetic |
||
| 2317 | // |
||
| 2318 | |||
| 2319 | def int_aarch64_sve_sadalp : SVE2_2VectorArg_Pred_Long_Intrinsic; |
||
| 2320 | def int_aarch64_sve_uadalp : SVE2_2VectorArg_Pred_Long_Intrinsic; |
||
| 2321 | |||
| 2322 | // |
||
| 2323 | // SVE2 - Uniform complex integer arithmetic |
||
| 2324 | // |
||
| 2325 | |||
| 2326 | def int_aarch64_sve_cadd_x : AdvSIMD_SVE2_CADD_Intrinsic; |
||
| 2327 | def int_aarch64_sve_sqcadd_x : AdvSIMD_SVE2_CADD_Intrinsic; |
||
| 2328 | def int_aarch64_sve_cmla_x : AdvSIMD_SVE2_CMLA_Intrinsic; |
||
| 2329 | def int_aarch64_sve_cmla_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic; |
||
| 2330 | def int_aarch64_sve_sqrdcmlah_x : AdvSIMD_SVE2_CMLA_Intrinsic; |
||
| 2331 | def int_aarch64_sve_sqrdcmlah_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic; |
||
| 2332 | |||
| 2333 | // |
||
| 2334 | // SVE2 - Widening complex integer arithmetic |
||
| 2335 | // |
||
| 2336 | |||
| 2337 | def int_aarch64_sve_saddlbt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2338 | def int_aarch64_sve_ssublbt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2339 | def int_aarch64_sve_ssubltb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2340 | |||
| 2341 | // |
||
| 2342 | // SVE2 - Widening complex integer dot product |
||
| 2343 | // |
||
| 2344 | |||
| 2345 | def int_aarch64_sve_cdot : AdvSIMD_SVE_DOT_Indexed_Intrinsic; |
||
| 2346 | def int_aarch64_sve_cdot_lane : AdvSIMD_SVE_CDOT_LANE_Intrinsic; |
||
| 2347 | |||
| 2348 | // |
||
| 2349 | // SVE2 - Floating-point widening multiply-accumulate |
||
| 2350 | // |
||
| 2351 | |||
| 2352 | def int_aarch64_sve_fmlalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2353 | def int_aarch64_sve_fmlalb_lane : SVE2_3VectorArgIndexed_Long_Intrinsic; |
||
| 2354 | def int_aarch64_sve_fmlalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2355 | def int_aarch64_sve_fmlalt_lane : SVE2_3VectorArgIndexed_Long_Intrinsic; |
||
| 2356 | def int_aarch64_sve_fmlslb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2357 | def int_aarch64_sve_fmlslb_lane : SVE2_3VectorArgIndexed_Long_Intrinsic; |
||
| 2358 | def int_aarch64_sve_fmlslt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2359 | def int_aarch64_sve_fmlslt_lane : SVE2_3VectorArgIndexed_Long_Intrinsic; |
||
| 2360 | |||
| 2361 | // |
||
| 2362 | // SVE2 - Floating-point integer binary logarithm |
||
| 2363 | // |
||
| 2364 | |||
| 2365 | def int_aarch64_sve_flogb : AdvSIMD_SVE_LOGB_Intrinsic; |
||
| 2366 | |||
| 2367 | // |
||
| 2368 | // SVE2 - Vector histogram count |
||
| 2369 | // |
||
| 2370 | |||
| 2371 | def int_aarch64_sve_histcnt : AdvSIMD_Pred2VectorArg_Intrinsic; |
||
| 2372 | def int_aarch64_sve_histseg : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2373 | |||
| 2374 | // |
||
| 2375 | // SVE2 - Character match |
||
| 2376 | // |
||
| 2377 | |||
| 2378 | def int_aarch64_sve_match : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2379 | def int_aarch64_sve_nmatch : AdvSIMD_SVE_Compare_Intrinsic; |
||
| 2380 | |||
| 2381 | // |
||
| 2382 | // SVE2 - Unary narrowing operations |
||
| 2383 | // |
||
| 2384 | |||
| 2385 | def int_aarch64_sve_sqxtnb : SVE2_1VectorArg_Narrowing_Intrinsic; |
||
| 2386 | def int_aarch64_sve_sqxtnt : SVE2_Merged1VectorArg_Narrowing_Intrinsic; |
||
| 2387 | def int_aarch64_sve_sqxtunb : SVE2_1VectorArg_Narrowing_Intrinsic; |
||
| 2388 | def int_aarch64_sve_sqxtunt : SVE2_Merged1VectorArg_Narrowing_Intrinsic; |
||
| 2389 | def int_aarch64_sve_uqxtnb : SVE2_1VectorArg_Narrowing_Intrinsic; |
||
| 2390 | def int_aarch64_sve_uqxtnt : SVE2_Merged1VectorArg_Narrowing_Intrinsic; |
||
| 2391 | |||
| 2392 | // |
||
| 2393 | // SVE2 - Binary narrowing DSP operations |
||
| 2394 | // |
||
| 2395 | def int_aarch64_sve_addhnb : SVE2_2VectorArg_Narrowing_Intrinsic; |
||
| 2396 | def int_aarch64_sve_addhnt : SVE2_Merged2VectorArg_Narrowing_Intrinsic; |
||
| 2397 | |||
| 2398 | def int_aarch64_sve_raddhnb : SVE2_2VectorArg_Narrowing_Intrinsic; |
||
| 2399 | def int_aarch64_sve_raddhnt : SVE2_Merged2VectorArg_Narrowing_Intrinsic; |
||
| 2400 | |||
| 2401 | def int_aarch64_sve_subhnb : SVE2_2VectorArg_Narrowing_Intrinsic; |
||
| 2402 | def int_aarch64_sve_subhnt : SVE2_Merged2VectorArg_Narrowing_Intrinsic; |
||
| 2403 | |||
| 2404 | def int_aarch64_sve_rsubhnb : SVE2_2VectorArg_Narrowing_Intrinsic; |
||
| 2405 | def int_aarch64_sve_rsubhnt : SVE2_Merged2VectorArg_Narrowing_Intrinsic; |
||
| 2406 | |||
| 2407 | // Narrowing shift right |
||
| 2408 | def int_aarch64_sve_shrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2409 | def int_aarch64_sve_shrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2410 | |||
| 2411 | def int_aarch64_sve_rshrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2412 | def int_aarch64_sve_rshrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2413 | |||
| 2414 | // Saturating shift right - signed input/output |
||
| 2415 | def int_aarch64_sve_sqshrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2416 | def int_aarch64_sve_sqshrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2417 | |||
| 2418 | def int_aarch64_sve_sqrshrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2419 | def int_aarch64_sve_sqrshrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2420 | |||
| 2421 | // Saturating shift right - unsigned input/output |
||
| 2422 | def int_aarch64_sve_uqshrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2423 | def int_aarch64_sve_uqshrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2424 | |||
| 2425 | def int_aarch64_sve_uqrshrnb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2426 | def int_aarch64_sve_uqrshrnt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2427 | |||
| 2428 | // Saturating shift right - signed input, unsigned output |
||
| 2429 | def int_aarch64_sve_sqshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2430 | def int_aarch64_sve_sqshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2431 | |||
| 2432 | def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2433 | def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic; |
||
| 2434 | |||
| 2435 | // SVE2 MLA LANE. |
||
| 2436 | def int_aarch64_sve_smlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2437 | def int_aarch64_sve_smlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2438 | def int_aarch64_sve_umlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2439 | def int_aarch64_sve_umlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2440 | def int_aarch64_sve_smlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2441 | def int_aarch64_sve_smlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2442 | def int_aarch64_sve_umlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2443 | def int_aarch64_sve_umlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2444 | def int_aarch64_sve_smullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2445 | def int_aarch64_sve_smullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2446 | def int_aarch64_sve_umullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2447 | def int_aarch64_sve_umullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2448 | def int_aarch64_sve_sqdmlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2449 | def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2450 | def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2451 | def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic; |
||
| 2452 | def int_aarch64_sve_sqdmullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2453 | def int_aarch64_sve_sqdmullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic; |
||
| 2454 | |||
| 2455 | // SVE2 MLA Unpredicated. |
||
| 2456 | def int_aarch64_sve_smlalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2457 | def int_aarch64_sve_smlalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2458 | def int_aarch64_sve_umlalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2459 | def int_aarch64_sve_umlalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2460 | def int_aarch64_sve_smlslb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2461 | def int_aarch64_sve_smlslt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2462 | def int_aarch64_sve_umlslb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2463 | def int_aarch64_sve_umlslt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2464 | def int_aarch64_sve_smullb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2465 | def int_aarch64_sve_smullt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2466 | def int_aarch64_sve_umullb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2467 | def int_aarch64_sve_umullt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2468 | |||
| 2469 | def int_aarch64_sve_sqdmlalb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2470 | def int_aarch64_sve_sqdmlalt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2471 | def int_aarch64_sve_sqdmlslb : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2472 | def int_aarch64_sve_sqdmlslt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2473 | def int_aarch64_sve_sqdmullb : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2474 | def int_aarch64_sve_sqdmullt : SVE2_2VectorArg_Long_Intrinsic; |
||
| 2475 | def int_aarch64_sve_sqdmlalbt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2476 | def int_aarch64_sve_sqdmlslbt : SVE2_3VectorArg_Long_Intrinsic; |
||
| 2477 | |||
| 2478 | // SVE2 ADDSUB Long Unpredicated. |
||
| 2479 | def int_aarch64_sve_adclb : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2480 | def int_aarch64_sve_adclt : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2481 | def int_aarch64_sve_sbclb : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2482 | def int_aarch64_sve_sbclt : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2483 | |||
| 2484 | // |
||
| 2485 | // SVE2 - Polynomial arithmetic |
||
| 2486 | // |
||
| 2487 | def int_aarch64_sve_eorbt : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2488 | def int_aarch64_sve_eortb : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2489 | def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2490 | def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2491 | |||
| 2492 | // |
||
| 2493 | // SVE2 bitwise ternary operations. |
||
| 2494 | // |
||
| 2495 | def int_aarch64_sve_eor3 : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2496 | def int_aarch64_sve_bcax : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2497 | def int_aarch64_sve_bsl : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2498 | def int_aarch64_sve_bsl1n : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2499 | def int_aarch64_sve_bsl2n : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2500 | def int_aarch64_sve_nbsl : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2501 | def int_aarch64_sve_xar : AdvSIMD_2VectorArgIndexed_Intrinsic; |
||
| 2502 | |||
| 2503 | // |
||
| 2504 | // SVE2 - Optional AES, SHA-3 and SM4 |
||
| 2505 | // |
||
| 2506 | |||
| 2507 | def int_aarch64_sve_aesd : ClangBuiltin<"__builtin_sve_svaesd_u8">, |
||
| 2508 | DefaultAttrsIntrinsic<[llvm_nxv16i8_ty], |
||
| 2509 | [llvm_nxv16i8_ty, llvm_nxv16i8_ty], |
||
| 2510 | [IntrNoMem]>; |
||
| 2511 | def int_aarch64_sve_aesimc : ClangBuiltin<"__builtin_sve_svaesimc_u8">, |
||
| 2512 | DefaultAttrsIntrinsic<[llvm_nxv16i8_ty], |
||
| 2513 | [llvm_nxv16i8_ty], |
||
| 2514 | [IntrNoMem]>; |
||
| 2515 | def int_aarch64_sve_aese : ClangBuiltin<"__builtin_sve_svaese_u8">, |
||
| 2516 | DefaultAttrsIntrinsic<[llvm_nxv16i8_ty], |
||
| 2517 | [llvm_nxv16i8_ty, llvm_nxv16i8_ty], |
||
| 2518 | [IntrNoMem]>; |
||
| 2519 | def int_aarch64_sve_aesmc : ClangBuiltin<"__builtin_sve_svaesmc_u8">, |
||
| 2520 | DefaultAttrsIntrinsic<[llvm_nxv16i8_ty], |
||
| 2521 | [llvm_nxv16i8_ty], |
||
| 2522 | [IntrNoMem]>; |
||
| 2523 | def int_aarch64_sve_rax1 : ClangBuiltin<"__builtin_sve_svrax1_u64">, |
||
| 2524 | DefaultAttrsIntrinsic<[llvm_nxv2i64_ty], |
||
| 2525 | [llvm_nxv2i64_ty, llvm_nxv2i64_ty], |
||
| 2526 | [IntrNoMem]>; |
||
| 2527 | def int_aarch64_sve_sm4e : ClangBuiltin<"__builtin_sve_svsm4e_u32">, |
||
| 2528 | DefaultAttrsIntrinsic<[llvm_nxv4i32_ty], |
||
| 2529 | [llvm_nxv4i32_ty, llvm_nxv4i32_ty], |
||
| 2530 | [IntrNoMem]>; |
||
| 2531 | def int_aarch64_sve_sm4ekey : ClangBuiltin<"__builtin_sve_svsm4ekey_u32">, |
||
| 2532 | DefaultAttrsIntrinsic<[llvm_nxv4i32_ty], |
||
| 2533 | [llvm_nxv4i32_ty, llvm_nxv4i32_ty], |
||
| 2534 | [IntrNoMem]>; |
||
| 2535 | // |
||
| 2536 | // SVE2 - Extended table lookup/permute |
||
| 2537 | // |
||
| 2538 | |||
| 2539 | def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic; |
||
| 2540 | def int_aarch64_sve_tbx : AdvSIMD_SVE2_TBX_Intrinsic; |
||
| 2541 | |||
| 2542 | // |
||
| 2543 | // SVE2 - Optional bit permutation |
||
| 2544 | // |
||
| 2545 | |||
| 2546 | def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2547 | def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2548 | def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic; |
||
| 2549 | |||
| 2550 | |||
| 2551 | // |
||
| 2552 | // SVE ACLE: 7.3. INT8 matrix multiply extensions |
||
| 2553 | // |
||
| 2554 | def int_aarch64_sve_ummla : SVE_MatMul_Intrinsic; |
||
| 2555 | def int_aarch64_sve_smmla : SVE_MatMul_Intrinsic; |
||
| 2556 | def int_aarch64_sve_usmmla : SVE_MatMul_Intrinsic; |
||
| 2557 | |||
| 2558 | def int_aarch64_sve_usdot : AdvSIMD_SVE_DOT_Intrinsic; |
||
| 2559 | def int_aarch64_sve_usdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; |
||
| 2560 | def int_aarch64_sve_sudot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; |
||
| 2561 | |||
| 2562 | // |
||
| 2563 | // SVE ACLE: 7.4/5. FP64/FP32 matrix multiply extensions |
||
| 2564 | // |
||
| 2565 | def int_aarch64_sve_fmmla : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2566 | |||
| 2567 | // |
||
| 2568 | // SVE ACLE: 7.2. BFloat16 extensions |
||
| 2569 | // |
||
| 2570 | |||
| 2571 | def int_aarch64_sve_bfdot : SVE_4Vec_BF16; |
||
| 2572 | def int_aarch64_sve_bfmlalb : SVE_4Vec_BF16; |
||
| 2573 | def int_aarch64_sve_bfmlalt : SVE_4Vec_BF16; |
||
| 2574 | |||
| 2575 | def int_aarch64_sve_bfmmla : SVE_4Vec_BF16; |
||
| 2576 | |||
| 2577 | def int_aarch64_sve_bfdot_lane_v2 : SVE_4Vec_BF16_Indexed; |
||
| 2578 | def int_aarch64_sve_bfmlalb_lane_v2 : SVE_4Vec_BF16_Indexed; |
||
| 2579 | def int_aarch64_sve_bfmlalt_lane_v2 : SVE_4Vec_BF16_Indexed; |
||
| 2580 | } |
||
| 2581 | |||
| 2582 | // |
||
| 2583 | // SVE2 - Contiguous conflict detection |
||
| 2584 | // |
||
| 2585 | |||
| 2586 | def int_aarch64_sve_whilerw_b : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2587 | def int_aarch64_sve_whilerw_h : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2588 | def int_aarch64_sve_whilerw_s : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2589 | def int_aarch64_sve_whilerw_d : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2590 | def int_aarch64_sve_whilewr_b : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2591 | def int_aarch64_sve_whilewr_h : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2592 | def int_aarch64_sve_whilewr_s : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2593 | def int_aarch64_sve_whilewr_d : SVE2_CONFLICT_DETECT_Intrinsic; |
||
| 2594 | |||
| 2595 | // Scalable Matrix Extension (SME) Intrinsics |
||
| 2596 | let TargetPrefix = "aarch64" in { |
||
| 2597 | class SME_Load_Store_Intrinsic<LLVMType pred_ty> |
||
| 2598 | : DefaultAttrsIntrinsic<[], |
||
| 2599 | [pred_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>; |
||
| 2600 | |||
| 2601 | // Loads |
||
| 2602 | def int_aarch64_sme_ld1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>; |
||
| 2603 | def int_aarch64_sme_ld1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>; |
||
| 2604 | def int_aarch64_sme_ld1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>; |
||
| 2605 | def int_aarch64_sme_ld1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>; |
||
| 2606 | def int_aarch64_sme_ld1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>; |
||
| 2607 | def int_aarch64_sme_ld1b_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>; |
||
| 2608 | def int_aarch64_sme_ld1h_vert : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>; |
||
| 2609 | def int_aarch64_sme_ld1w_vert : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>; |
||
| 2610 | def int_aarch64_sme_ld1d_vert : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>; |
||
| 2611 | def int_aarch64_sme_ld1q_vert : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>; |
||
| 2612 | |||
| 2613 | // Stores |
||
| 2614 | def int_aarch64_sme_st1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>; |
||
| 2615 | def int_aarch64_sme_st1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>; |
||
| 2616 | def int_aarch64_sme_st1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>; |
||
| 2617 | def int_aarch64_sme_st1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>; |
||
| 2618 | def int_aarch64_sme_st1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>; |
||
| 2619 | def int_aarch64_sme_st1b_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>; |
||
| 2620 | def int_aarch64_sme_st1h_vert : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>; |
||
| 2621 | def int_aarch64_sme_st1w_vert : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>; |
||
| 2622 | def int_aarch64_sme_st1d_vert : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>; |
||
| 2623 | def int_aarch64_sme_st1q_vert : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>; |
||
| 2624 | |||
| 2625 | // Spill + fill |
||
| 2626 | def int_aarch64_sme_ldr : DefaultAttrsIntrinsic< |
||
| 2627 | [], [llvm_i32_ty, llvm_ptr_ty]>; |
||
| 2628 | def int_aarch64_sme_str : DefaultAttrsIntrinsic< |
||
| 2629 | [], [llvm_i32_ty, llvm_ptr_ty]>; |
||
| 2630 | |||
| 2631 | class SME_TileToVector_Intrinsic |
||
| 2632 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 2633 | [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2634 | llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>; |
||
| 2635 | class SME_VectorToTile_Intrinsic |
||
| 2636 | : DefaultAttrsIntrinsic<[], |
||
| 2637 | [llvm_i32_ty, llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2638 | llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>; |
||
| 2639 | |||
| 2640 | def int_aarch64_sme_read_horiz : SME_TileToVector_Intrinsic; |
||
| 2641 | def int_aarch64_sme_read_vert : SME_TileToVector_Intrinsic; |
||
| 2642 | def int_aarch64_sme_write_horiz : SME_VectorToTile_Intrinsic; |
||
| 2643 | def int_aarch64_sme_write_vert : SME_VectorToTile_Intrinsic; |
||
| 2644 | |||
| 2645 | def int_aarch64_sme_readq_horiz : SME_TileToVector_Intrinsic; |
||
| 2646 | def int_aarch64_sme_readq_vert : SME_TileToVector_Intrinsic; |
||
| 2647 | def int_aarch64_sme_writeq_horiz : SME_VectorToTile_Intrinsic; |
||
| 2648 | def int_aarch64_sme_writeq_vert : SME_VectorToTile_Intrinsic; |
||
| 2649 | |||
| 2650 | def int_aarch64_sme_zero : DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>; |
||
| 2651 | |||
| 2652 | class SME_OuterProduct_Intrinsic |
||
| 2653 | : DefaultAttrsIntrinsic<[], |
||
| 2654 | [llvm_i32_ty, |
||
| 2655 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2656 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2657 | LLVMMatchType<0>, |
||
| 2658 | llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>; |
||
| 2659 | |||
| 2660 | def int_aarch64_sme_mopa : SME_OuterProduct_Intrinsic; |
||
| 2661 | def int_aarch64_sme_mops : SME_OuterProduct_Intrinsic; |
||
| 2662 | |||
| 2663 | def int_aarch64_sme_mopa_wide : SME_OuterProduct_Intrinsic; |
||
| 2664 | def int_aarch64_sme_mops_wide : SME_OuterProduct_Intrinsic; |
||
| 2665 | |||
| 2666 | def int_aarch64_sme_smopa_wide : SME_OuterProduct_Intrinsic; |
||
| 2667 | def int_aarch64_sme_smops_wide : SME_OuterProduct_Intrinsic; |
||
| 2668 | def int_aarch64_sme_umopa_wide : SME_OuterProduct_Intrinsic; |
||
| 2669 | def int_aarch64_sme_umops_wide : SME_OuterProduct_Intrinsic; |
||
| 2670 | def int_aarch64_sme_sumopa_wide : SME_OuterProduct_Intrinsic; |
||
| 2671 | def int_aarch64_sme_sumops_wide : SME_OuterProduct_Intrinsic; |
||
| 2672 | def int_aarch64_sme_usmopa_wide : SME_OuterProduct_Intrinsic; |
||
| 2673 | def int_aarch64_sme_usmops_wide : SME_OuterProduct_Intrinsic; |
||
| 2674 | |||
| 2675 | class SME_AddVectorToTile_Intrinsic |
||
| 2676 | : DefaultAttrsIntrinsic<[], |
||
| 2677 | [llvm_i32_ty, |
||
| 2678 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2679 | LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2680 | llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>; |
||
| 2681 | |||
| 2682 | def int_aarch64_sme_addha : SME_AddVectorToTile_Intrinsic; |
||
| 2683 | def int_aarch64_sme_addva : SME_AddVectorToTile_Intrinsic; |
||
| 2684 | |||
| 2685 | // |
||
| 2686 | // Counting elements |
||
| 2687 | // |
||
| 2688 | |||
| 2689 | class AdvSIMD_SME_CNTSB_Intrinsic |
||
| 2690 | : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem]>; |
||
| 2691 | |||
| 2692 | def int_aarch64_sme_cntsb : AdvSIMD_SME_CNTSB_Intrinsic; |
||
| 2693 | def int_aarch64_sme_cntsh : AdvSIMD_SME_CNTSB_Intrinsic; |
||
| 2694 | def int_aarch64_sme_cntsw : AdvSIMD_SME_CNTSB_Intrinsic; |
||
| 2695 | def int_aarch64_sme_cntsd : AdvSIMD_SME_CNTSB_Intrinsic; |
||
| 2696 | |||
| 2697 | // |
||
| 2698 | // PSTATE Functions |
||
| 2699 | // |
||
| 2700 | |||
| 2701 | def int_aarch64_sme_get_tpidr2 |
||
| 2702 | : DefaultAttrsIntrinsic<[llvm_i64_ty], [], |
||
| 2703 | [IntrNoMem, IntrHasSideEffects]>; |
||
| 2704 | def int_aarch64_sme_set_tpidr2 |
||
| 2705 | : DefaultAttrsIntrinsic<[], [llvm_i64_ty], |
||
| 2706 | [IntrNoMem, IntrHasSideEffects]>; |
||
| 2707 | |||
| 2708 | def int_aarch64_sme_za_enable |
||
| 2709 | : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; |
||
| 2710 | def int_aarch64_sme_za_disable |
||
| 2711 | : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; |
||
| 2712 | |||
| 2713 | // Clamp |
||
| 2714 | // |
||
| 2715 | |||
| 2716 | def int_aarch64_sve_sclamp : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2717 | def int_aarch64_sve_uclamp : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2718 | def int_aarch64_sve_fclamp : AdvSIMD_3VectorArg_Intrinsic; |
||
| 2719 | |||
| 2720 | |||
| 2721 | // |
||
| 2722 | // Reversal |
||
| 2723 | // |
||
| 2724 | |||
| 2725 | def int_aarch64_sve_revd : AdvSIMD_Merged1VectorArg_Intrinsic; |
||
| 2726 | |||
| 2727 | // |
||
| 2728 | // Predicate selection |
||
| 2729 | // |
||
| 2730 | |||
| 2731 | def int_aarch64_sve_psel |
||
| 2732 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty], |
||
| 2733 | [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, |
||
| 2734 | LLVMMatchType<0>, llvm_i32_ty]>; |
||
| 2735 | |||
| 2736 | // |
||
| 2737 | // Predicate-pair intrinsics |
||
| 2738 | // |
||
| 2739 | foreach cmp = ["ge", "gt", "hi", "hs", "le", "lo", "ls", "lt"] in { |
||
| 2740 | def int_aarch64_sve_while # cmp # _x2 |
||
| 2741 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2742 | [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>; |
||
| 2743 | } |
||
| 2744 | |||
| 2745 | // |
||
| 2746 | // SME2 Intrinsics |
||
| 2747 | // |
||
| 2748 | |||
| 2749 | class SME2_Matrix_ArrayVector_Single_Single_Intrinsic |
||
| 2750 | : DefaultAttrsIntrinsic<[], |
||
| 2751 | [llvm_i32_ty, |
||
| 2752 | llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2753 | []>; |
||
| 2754 | |||
| 2755 | class SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic |
||
| 2756 | : DefaultAttrsIntrinsic<[], |
||
| 2757 | [llvm_i32_ty, |
||
| 2758 | llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 2759 | LLVMMatchType<0>], |
||
| 2760 | []>; |
||
| 2761 | |||
| 2762 | class SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic |
||
| 2763 | : DefaultAttrsIntrinsic<[], |
||
| 2764 | [llvm_i32_ty, |
||
| 2765 | llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 2766 | LLVMMatchType<0>], |
||
| 2767 | []>; |
||
| 2768 | |||
| 2769 | class SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic |
||
| 2770 | : DefaultAttrsIntrinsic<[], |
||
| 2771 | [llvm_i32_ty, |
||
| 2772 | llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 2773 | LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2774 | []>; |
||
| 2775 | |||
| 2776 | class SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic |
||
| 2777 | : DefaultAttrsIntrinsic<[], |
||
| 2778 | [llvm_i32_ty, |
||
| 2779 | llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 2780 | LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2781 | []>; |
||
| 2782 | |||
| 2783 | class SME2_Matrix_ArrayVector_Single_Index_Intrinsic |
||
| 2784 | : DefaultAttrsIntrinsic<[], |
||
| 2785 | [llvm_i32_ty, |
||
| 2786 | llvm_anyvector_ty, |
||
| 2787 | LLVMMatchType<0>, llvm_i32_ty], |
||
| 2788 | [ImmArg<ArgIndex<3>>]>; |
||
| 2789 | |||
| 2790 | class SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic |
||
| 2791 | : DefaultAttrsIntrinsic<[], |
||
| 2792 | [llvm_i32_ty, |
||
| 2793 | llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 2794 | LLVMMatchType<0>, llvm_i32_ty], |
||
| 2795 | [ImmArg<ArgIndex<4>>]>; |
||
| 2796 | |||
| 2797 | class SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic |
||
| 2798 | : DefaultAttrsIntrinsic<[], |
||
| 2799 | [llvm_i32_ty, |
||
| 2800 | llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, |
||
| 2801 | LLVMMatchType<0>, llvm_i32_ty], |
||
| 2802 | [ImmArg<ArgIndex<6>>]>; |
||
| 2803 | |||
| 2804 | class SME2_ZA_Write_VG2_Intrinsic |
||
| 2805 | : DefaultAttrsIntrinsic<[], |
||
| 2806 | [llvm_i32_ty, |
||
| 2807 | llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2808 | []>; |
||
| 2809 | |||
| 2810 | class SME2_ZA_Write_VG4_Intrinsic |
||
| 2811 | : DefaultAttrsIntrinsic<[], |
||
| 2812 | [llvm_i32_ty, |
||
| 2813 | llvm_anyvector_ty, LLVMMatchType<0>, |
||
| 2814 | LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2815 | []>; |
||
| 2816 | |||
| 2817 | class SME2_CVT_VG2_SINGLE_Intrinsic |
||
| 2818 | : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>], |
||
| 2819 | [llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2820 | [IntrNoMem]>; |
||
| 2821 | |||
| 2822 | class SME2_CVT_VG2_SINGLE_BF16_Intrinsic |
||
| 2823 | : DefaultAttrsIntrinsic<[llvm_nxv8bf16_ty], |
||
| 2824 | [llvm_nxv4f32_ty, llvm_nxv4f32_ty], |
||
| 2825 | [IntrNoMem]>; |
||
| 2826 | |||
| 2827 | class SME2_CVT_VG4_SINGLE_Intrinsic |
||
| 2828 | : DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>], |
||
| 2829 | [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2830 | [IntrNoMem]>; |
||
| 2831 | |||
| 2832 | class SME2_CVT_FtoI_VG2_Intrinsic |
||
| 2833 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2834 | [LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>], |
||
| 2835 | [IntrNoMem]>; |
||
| 2836 | |||
| 2837 | class SME2_CVT_ItoF_VG2_Intrinsic |
||
| 2838 | : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>], |
||
| 2839 | [llvm_anyvector_ty, LLVMMatchType<0>], |
||
| 2840 | [IntrNoMem]>; |
||
| 2841 | |||
| 2842 | class SME2_CVT_FtoI_VG4_Intrinsic |
||
| 2843 | : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2844 | [LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>, |
||
| 2845 | LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>], |
||
| 2846 | [IntrNoMem]>; |
||
| 2847 | |||
| 2848 | class SME2_CVT_ItoF_VG4_Intrinsic |
||
| 2849 | : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>, |
||
| 2850 | LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>], |
||
| 2851 | [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], |
||
| 2852 | [IntrNoMem]>; |
||
| 2853 | |||
| 2854 | // |
||
| 2855 | // Multi-vector fused multiply-add/subtract |
||
| 2856 | // |
||
| 2857 | |||
| 2858 | def int_aarch64_sme_fmla_single_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; |
||
| 2859 | def int_aarch64_sme_fmls_single_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; |
||
| 2860 | def int_aarch64_sme_fmla_single_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; |
||
| 2861 | def int_aarch64_sme_fmls_single_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; |
||
| 2862 | |||
| 2863 | def int_aarch64_sme_fmla_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; |
||
| 2864 | def int_aarch64_sme_fmls_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; |
||
| 2865 | def int_aarch64_sme_fmla_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; |
||
| 2866 | def int_aarch64_sme_fmls_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; |
||
| 2867 | |||
| 2868 | def int_aarch64_sme_fmla_lane_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; |
||
| 2869 | def int_aarch64_sme_fmls_lane_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; |
||
| 2870 | def int_aarch64_sme_fmla_lane_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2871 | def int_aarch64_sme_fmls_lane_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2872 | |||
| 2873 | // |
||
| 2874 | // Multi-vector multiply-add/subtract long |
||
| 2875 | // |
||
| 2876 | |||
| 2877 | foreach ty = ["f", "s", "u"] in { |
||
| 2878 | foreach instr = ["mlal", "mlsl"] in { |
||
| 2879 | def int_aarch64_sme_ # ty # instr # _single_vg2x1 : SME2_Matrix_ArrayVector_Single_Single_Intrinsic; |
||
| 2880 | def int_aarch64_sme_ # ty # instr # _single_vg2x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; |
||
| 2881 | def int_aarch64_sme_ # ty # instr # _single_vg2x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; |
||
| 2882 | |||
| 2883 | def int_aarch64_sme_ # ty # instr # _vg2x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; |
||
| 2884 | def int_aarch64_sme_ # ty # instr # _vg2x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; |
||
| 2885 | |||
| 2886 | def int_aarch64_sme_ # ty # instr # _lane_vg2x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic; |
||
| 2887 | def int_aarch64_sme_ # ty # instr # _lane_vg2x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; |
||
| 2888 | def int_aarch64_sme_ # ty # instr # _lane_vg2x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2889 | } |
||
| 2890 | } |
||
| 2891 | |||
| 2892 | // |
||
| 2893 | // Multi-vector vertical dot-products |
||
| 2894 | // |
||
| 2895 | |||
| 2896 | def int_aarch64_sme_fvdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; |
||
| 2897 | |||
| 2898 | foreach ty = ["s", "u"] in { |
||
| 2899 | def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; |
||
| 2900 | def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2901 | def int_aarch64_sme_ #ty # vdot_lane_za64_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2902 | } |
||
| 2903 | |||
| 2904 | def int_aarch64_sme_suvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2905 | def int_aarch64_sme_usvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; |
||
| 2906 | |||
| 2907 | // |
||
| 2908 | // Multi-vector floating-point CVT from single-precision to interleaved half-precision/BFloat16 |
||
| 2909 | // |
||
| 2910 | def int_aarch64_sve_fcvtn_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2911 | def int_aarch64_sve_bfcvtn_x2 : SME2_CVT_VG2_SINGLE_BF16_Intrinsic; |
||
| 2912 | |||
| 2913 | // |
||
| 2914 | // Multi-vector convert to/from floating-point. |
||
| 2915 | // |
||
| 2916 | def int_aarch64_sve_fcvt_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2917 | def int_aarch64_sve_bfcvt_x2 : SME2_CVT_VG2_SINGLE_BF16_Intrinsic; |
||
| 2918 | def int_aarch64_sve_fcvts_x2 : SME2_CVT_FtoI_VG2_Intrinsic; |
||
| 2919 | def int_aarch64_sve_fcvtu_x2 : SME2_CVT_FtoI_VG2_Intrinsic; |
||
| 2920 | def int_aarch64_sve_scvtf_x2 : SME2_CVT_ItoF_VG2_Intrinsic; |
||
| 2921 | def int_aarch64_sve_ucvtf_x2 : SME2_CVT_ItoF_VG2_Intrinsic; |
||
| 2922 | def int_aarch64_sve_fcvts_x4 : SME2_CVT_FtoI_VG4_Intrinsic; |
||
| 2923 | def int_aarch64_sve_fcvtu_x4 : SME2_CVT_FtoI_VG4_Intrinsic; |
||
| 2924 | def int_aarch64_sve_scvtf_x4 : SME2_CVT_ItoF_VG4_Intrinsic; |
||
| 2925 | def int_aarch64_sve_ucvtf_x4 : SME2_CVT_ItoF_VG4_Intrinsic; |
||
| 2926 | |||
| 2927 | // |
||
| 2928 | // Multi-vector saturating extract narrow |
||
| 2929 | // |
||
| 2930 | def int_aarch64_sve_sqcvt_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2931 | def int_aarch64_sve_uqcvt_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2932 | def int_aarch64_sve_sqcvtu_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2933 | def int_aarch64_sve_sqcvt_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2934 | def int_aarch64_sve_uqcvt_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2935 | def int_aarch64_sve_sqcvtu_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2936 | |||
| 2937 | // |
||
| 2938 | // Multi-vector saturating extract narrow and interleave |
||
| 2939 | // |
||
| 2940 | def int_aarch64_sve_sqcvtn_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2941 | def int_aarch64_sve_uqcvtn_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2942 | def int_aarch64_sve_sqcvtun_x2 : SME2_CVT_VG2_SINGLE_Intrinsic; |
||
| 2943 | def int_aarch64_sve_sqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2944 | def int_aarch64_sve_uqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2945 | def int_aarch64_sve_sqcvtun_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; |
||
| 2946 | |||
| 2947 | // |
||
| 2948 | // Multi-Single add/sub |
||
| 2949 | // |
||
| 2950 | def int_aarch64_sme_add_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; |
||
| 2951 | def int_aarch64_sme_sub_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; |
||
| 2952 | def int_aarch64_sme_add_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; |
||
| 2953 | def int_aarch64_sme_sub_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; |
||
| 2954 | |||
| 2955 | // |
||
| 2956 | // Multi-Multi add/sub |
||
| 2957 | // |
||
| 2958 | def int_aarch64_sme_add_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; |
||
| 2959 | def int_aarch64_sme_sub_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; |
||
| 2960 | def int_aarch64_sme_add_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; |
||
| 2961 | def int_aarch64_sme_sub_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; |
||
| 2962 | } |