1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines all of the RISCV-specific intrinsics. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// Atomics 15 16// Atomic Intrinsics have multiple versions for different access widths, which 17// all follow one of the following signatures (depending on how many arguments 18// they require). We carefully instantiate only specific versions of these for 19// specific integer widths, rather than using `llvm_anyint_ty`. 20// 21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the 22// canonical names, and the intrinsics used in the code will have a name 23// suffixed with the pointer type they are specialised for (denoted `<p>` in the 24// names below), in order to avoid type conflicts. 25 26let TargetPrefix = "riscv" in { 27 28 // T @llvm.<name>.T.<p>(any*, T, T, T imm); 29 class MaskedAtomicRMWFourArg<LLVMType itype> 30 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype], 31 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>; 32 // T @llvm.<name>.T.<p>(any*, T, T, T, T imm); 33 class MaskedAtomicRMWFiveArg<LLVMType itype> 34 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype], 35 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>; 36 37 // We define 32-bit and 64-bit variants of the above, where T stands for i32 38 // or i64 respectively: 39 multiclass MaskedAtomicRMWFourArgIntrinsics { 40 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm); 41 def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>; 42 // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm); 43 def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>; 44 } 45 46 multiclass MaskedAtomicRMWFiveArgIntrinsics { 47 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm); 48 def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>; 49 // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm); 50 def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>; 51 } 52 53 // These intrinsics are intended only for internal compiler use (i.e. as 54 // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their 55 // names and semantics could change in the future. 56 57 // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>( 58 // ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering) 59 defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics; 60 defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics; 61 defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics; 62 defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics; 63 defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics; 64 defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics; 65 // Signed min and max need an extra operand to do sign extension with. 66 // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>( 67 // ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering) 68 defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics; 69 defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics; 70 71 // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>( 72 // ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering) 73 defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; 74 75} // TargetPrefix = "riscv" 76 77//===----------------------------------------------------------------------===// 78// Bitmanip (Bit Manipulation) Extension 79 80let TargetPrefix = "riscv" in { 81 82 class BitManipGPRIntrinsics 83 : DefaultAttrsIntrinsic<[llvm_any_ty], 84 [LLVMMatchType<0>], 85 [IntrNoMem, IntrSpeculatable]>; 86 class BitManipGPRGPRIntrinsics 87 : DefaultAttrsIntrinsic<[llvm_any_ty], 88 [LLVMMatchType<0>, LLVMMatchType<0>], 89 [IntrNoMem, IntrSpeculatable]>; 90 91 // Zbb 92 def int_riscv_orc_b : BitManipGPRIntrinsics; 93 94 // Zbc or Zbkc 95 def int_riscv_clmul : BitManipGPRGPRIntrinsics; 96 def int_riscv_clmulh : BitManipGPRGPRIntrinsics; 97 98 // Zbc 99 def int_riscv_clmulr : BitManipGPRGPRIntrinsics; 100 101 // Zbkb 102 def int_riscv_brev8 : BitManipGPRIntrinsics; 103 def int_riscv_zip : BitManipGPRIntrinsics; 104 def int_riscv_unzip : BitManipGPRIntrinsics; 105 106 // Zbkx 107 def int_riscv_xperm4 : BitManipGPRGPRIntrinsics; 108 def int_riscv_xperm8 : BitManipGPRGPRIntrinsics; 109} // TargetPrefix = "riscv" 110 111//===----------------------------------------------------------------------===// 112// May-Be-Operations 113 114let TargetPrefix = "riscv" in { 115 116 // Zimop 117 def int_riscv_mopr 118 : DefaultAttrsIntrinsic<[llvm_any_ty], 119 [LLVMMatchType<0>, LLVMMatchType<0>], 120 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>; 121 def int_riscv_moprr 122 : DefaultAttrsIntrinsic<[llvm_any_ty], 123 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], 124 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>; 125} // TargetPrefix = "riscv" 126 127//===----------------------------------------------------------------------===// 128// Vectors 129 130// The intrinsic does not have any operand that must be extended. 131defvar NoScalarOperand = 0xF; 132 133// The intrinsic does not have a VL operand. 134// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s) 135defvar NoVLOperand = 0x1F; 136 137class RISCVVIntrinsic { 138 // These intrinsics may accept illegal integer values in their llvm_any_ty 139 // operand, so they have to be extended. 140 Intrinsic IntrinsicID = !cast<Intrinsic>(NAME); 141 bits<4> ScalarOperand = NoScalarOperand; 142 bits<5> VLOperand = NoVLOperand; 143} 144 145let TargetPrefix = "riscv" in { 146 // We use anyint here but we only support XLen. 147 def int_riscv_vsetvli : DefaultAttrsIntrinsic<[llvm_anyint_ty], 148 /* AVL */ [LLVMMatchType<0>, 149 /* VSEW */ LLVMMatchType<0>, 150 /* VLMUL */ LLVMMatchType<0>], 151 [IntrNoMem, 152 ImmArg<ArgIndex<1>>, 153 ImmArg<ArgIndex<2>>]>; 154 def int_riscv_vsetvlimax : DefaultAttrsIntrinsic<[llvm_anyint_ty], 155 /* VSEW */ [LLVMMatchType<0>, 156 /* VLMUL */ LLVMMatchType<0>], 157 [IntrNoMem, 158 ImmArg<ArgIndex<0>>, 159 ImmArg<ArgIndex<1>>]>; 160 161 // For unit stride mask load 162 // Input: (pointer, vl) 163 class RISCVUSMLoad 164 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 165 [llvm_ptr_ty, llvm_anyint_ty], 166 [NoCapture<ArgIndex<0>>, IntrReadMem, IntrArgMemOnly]>, 167 RISCVVIntrinsic { 168 let VLOperand = 1; 169 } 170 // For unit stride load 171 // Input: (passthru, pointer, vl) 172 class RISCVUSLoad 173 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 174 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty], 175 [NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>, 176 RISCVVIntrinsic { 177 let VLOperand = 2; 178 } 179 // For unit stride fault-only-first load 180 // Input: (passthru, pointer, vl) 181 // Output: (data, vl) 182 // NOTE: We model this with default memory properties since we model writing 183 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 184 class RISCVUSLoadFF 185 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], 186 [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>], 187 [NoCapture<ArgIndex<1>>]>, 188 RISCVVIntrinsic { 189 let VLOperand = 2; 190 } 191 // For unit stride load with mask 192 // Input: (maskedoff, pointer, mask, vl, policy) 193 class RISCVUSLoadMasked 194 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 195 [LLVMMatchType<0>, llvm_ptr_ty, 196 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 197 llvm_anyint_ty, LLVMMatchType<1>], 198 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem, 199 IntrArgMemOnly]>, 200 RISCVVIntrinsic { 201 let VLOperand = 3; 202 } 203 // For unit stride fault-only-first load with mask 204 // Input: (maskedoff, pointer, mask, vl, policy) 205 // Output: (data, vl) 206 // NOTE: We model this with default memory properties since we model writing 207 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 208 class RISCVUSLoadFFMasked 209 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], 210 [LLVMMatchType<0>, llvm_ptr_ty, 211 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 212 LLVMMatchType<1>, LLVMMatchType<1>], 213 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { 214 let VLOperand = 3; 215 } 216 // For strided load with passthru operand 217 // Input: (passthru, pointer, stride, vl) 218 class RISCVSLoad 219 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 220 [LLVMMatchType<0>, llvm_ptr_ty, 221 llvm_anyint_ty, LLVMMatchType<1>], 222 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { 223 let VLOperand = 3; 224 } 225 // For strided load with mask 226 // Input: (maskedoff, pointer, stride, mask, vl, policy) 227 class RISCVSLoadMasked 228 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 229 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, 230 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, 231 LLVMMatchType<1>], 232 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, 233 RISCVVIntrinsic { 234 let VLOperand = 4; 235 } 236 // For indexed load with passthru operand 237 // Input: (passthru, pointer, index, vl) 238 class RISCVILoad 239 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 240 [LLVMMatchType<0>, llvm_ptr_ty, 241 llvm_anyvector_ty, llvm_anyint_ty], 242 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { 243 let VLOperand = 3; 244 } 245 // For indexed load with mask 246 // Input: (maskedoff, pointer, index, mask, vl, policy) 247 class RISCVILoadMasked 248 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 249 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, 250 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 251 LLVMMatchType<2>], 252 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, 253 RISCVVIntrinsic { 254 let VLOperand = 4; 255 } 256 // For unit stride store 257 // Input: (vector_in, pointer, vl) 258 class RISCVUSStore 259 : DefaultAttrsIntrinsic<[], 260 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty], 261 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>, 262 RISCVVIntrinsic { 263 let VLOperand = 2; 264 } 265 // For unit stride store with mask 266 // Input: (vector_in, pointer, mask, vl) 267 class RISCVUSStoreMasked 268 : DefaultAttrsIntrinsic<[], 269 [llvm_anyvector_ty, llvm_ptr_ty, 270 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 271 llvm_anyint_ty], 272 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>, 273 RISCVVIntrinsic { 274 let VLOperand = 3; 275 } 276 // For strided store 277 // Input: (vector_in, pointer, stride, vl) 278 class RISCVSStore 279 : DefaultAttrsIntrinsic<[], 280 [llvm_anyvector_ty, llvm_ptr_ty, 281 llvm_anyint_ty, LLVMMatchType<1>], 282 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 283 let VLOperand = 3; 284 } 285 // For stride store with mask 286 // Input: (vector_in, pointer, stirde, mask, vl) 287 class RISCVSStoreMasked 288 : DefaultAttrsIntrinsic<[], 289 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty, 290 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], 291 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 292 let VLOperand = 4; 293 } 294 // For indexed store 295 // Input: (vector_in, pointer, index, vl) 296 class RISCVIStore 297 : DefaultAttrsIntrinsic<[], 298 [llvm_anyvector_ty, llvm_ptr_ty, 299 llvm_anyint_ty, llvm_anyint_ty], 300 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 301 let VLOperand = 3; 302 } 303 // For indexed store with mask 304 // Input: (vector_in, pointer, index, mask, vl) 305 class RISCVIStoreMasked 306 : DefaultAttrsIntrinsic<[], 307 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty, 308 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 309 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 310 let VLOperand = 4; 311 } 312 // For destination vector type is the same as source vector. 313 // Input: (passthru, vector_in, vl) 314 class RISCVUnaryAAUnMasked 315 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 316 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], 317 [IntrNoMem]>, RISCVVIntrinsic { 318 let VLOperand = 2; 319 } 320 // For destination vector type is the same as the source vector type 321 // Input: (passthru, vector_in, vl, policy) 322 class RISCVUnaryAAUnMaskedZvk<bit IsVS> 323 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 324 [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>), 325 llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)], 326 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 327 let VLOperand = 2; 328 } 329 330 multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> { 331 if HasVV then 332 def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>; 333 334 if HasVS then 335 def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>; 336 } 337 // For destination vector type is the same as first source vector (with mask). 338 // Input: (vector_in, vector_in, mask, vl, policy) 339 class RISCVUnaryAAMasked 340 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 341 [LLVMMatchType<0>, LLVMMatchType<0>, 342 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 343 LLVMMatchType<1>], 344 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 345 let VLOperand = 3; 346 } 347 // For destination vector type is the same as source vector. 348 // Input: (passthru, vector_in, frm, vl) 349 class RISCVUnaryAAUnMaskedRoundingMode 350 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 351 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], 352 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic { 353 let VLOperand = 3; 354 } 355 // For destination vector type is the same as first source vector (with mask). 356 // Input: (vector_in, vector_in, mask, frm, vl, policy) 357 class RISCVUnaryAAMaskedRoundingMode 358 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 359 [LLVMMatchType<0>, LLVMMatchType<0>, 360 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 361 LLVMMatchType<1>, LLVMMatchType<1>], 362 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 363 let VLOperand = 4; 364 } 365 // Input: (passthru, vector_in, vector_in, mask, vl) 366 class RISCVCompress 367 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 368 [LLVMMatchType<0>, LLVMMatchType<0>, 369 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 370 [IntrNoMem]>, RISCVVIntrinsic { 371 let VLOperand = 3; 372 } 373 // For destination vector type is the same as first and second source vector. 374 // Input: (vector_in, vector_in, vl) 375 class RISCVBinaryAAAUnMasked 376 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 377 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], 378 [IntrNoMem]>, RISCVVIntrinsic { 379 let VLOperand = 2; 380 } 381 // For destination vector type is the same as first and second source vector. 382 // Input: (passthru, vector_in, int_vector_in, vl) 383 class RISCVRGatherVVUnMasked 384 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 385 [LLVMMatchType<0>, LLVMMatchType<0>, 386 LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], 387 [IntrNoMem]>, RISCVVIntrinsic { 388 let VLOperand = 3; 389 } 390 // For destination vector type is the same as first and second source vector. 391 // Input: (vector_in, vector_in, int_vector_in, vl, policy) 392 class RISCVRGatherVVMasked 393 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 394 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, 395 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 396 LLVMMatchType<1>], 397 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 398 let VLOperand = 4; 399 } 400 // Input: (passthru, vector_in, int16_vector_in, vl) 401 class RISCVRGatherEI16VVUnMasked 402 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 403 [LLVMMatchType<0>, LLVMMatchType<0>, 404 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, 405 llvm_anyint_ty], 406 [IntrNoMem]>, RISCVVIntrinsic { 407 let VLOperand = 3; 408 } 409 // For destination vector type is the same as first and second source vector. 410 // Input: (vector_in, vector_in, int16_vector_in, vl, policy) 411 class RISCVRGatherEI16VVMasked 412 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 413 [LLVMMatchType<0>, LLVMMatchType<0>, 414 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, 415 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 416 LLVMMatchType<1>], 417 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 418 let VLOperand = 4; 419 } 420 // For destination vector type is the same as first source vector, and the 421 // second operand is XLen. 422 // Input: (passthru, vector_in, xlen_in, vl) 423 class RISCVGatherVXUnMasked 424 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 425 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 426 LLVMMatchType<1>], 427 [IntrNoMem]>, RISCVVIntrinsic { 428 let VLOperand = 3; 429 } 430 // For destination vector type is the same as first source vector (with mask). 431 // Second operand is XLen. 432 // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy) 433 class RISCVGatherVXMasked 434 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 435 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 436 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, 437 LLVMMatchType<1>], 438 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 439 let VLOperand = 4; 440 } 441 // For destination vector type is the same as first source vector. 442 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 443 class RISCVBinaryAAXUnMasked<bit IsVI = 0> 444 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 445 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 446 llvm_anyint_ty], 447 !listconcat([IntrNoMem], 448 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>, 449 RISCVVIntrinsic { 450 let ScalarOperand = 2; 451 let VLOperand = 3; 452 } 453 // For destination vector type is the same as the source vector type. 454 // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy) 455 class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0> 456 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 457 [LLVMMatchType<0>, LLVMMatchType<0>, 458 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>], 459 !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem], 460 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>, 461 RISCVVIntrinsic { 462 let ScalarOperand = 2; 463 let VLOperand = 3; 464 } 465 // For destination vector type is the same as first source vector (with mask). 466 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 467 class RISCVBinaryAAXMasked 468 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 469 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 470 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 471 LLVMMatchType<2>], 472 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 473 let ScalarOperand = 2; 474 let VLOperand = 4; 475 } 476 // For destination vector type is the same as first source vector. 477 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) 478 class RISCVBinaryAAXUnMaskedRoundingMode 479 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 480 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 481 llvm_anyint_ty, LLVMMatchType<2>], 482 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 483 let ScalarOperand = 2; 484 let VLOperand = 4; 485 } 486 // For destination vector type is the same as first source vector (with mask). 487 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 488 class RISCVBinaryAAXMaskedRoundingMode 489 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 490 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 491 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 492 LLVMMatchType<2>, LLVMMatchType<2>], 493 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 494 let ScalarOperand = 2; 495 let VLOperand = 5; 496 } 497 // For destination vector type is the same as first source vector. The 498 // second source operand must match the destination type or be an XLen scalar. 499 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 500 class RISCVBinaryAAShiftUnMasked 501 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 502 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 503 llvm_anyint_ty], 504 [IntrNoMem]>, RISCVVIntrinsic { 505 let VLOperand = 3; 506 } 507 // For destination vector type is the same as first source vector (with mask). 508 // The second source operand must match the destination type or be an XLen scalar. 509 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 510 class RISCVBinaryAAShiftMasked 511 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 512 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 513 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 514 LLVMMatchType<2>], 515 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 516 let VLOperand = 4; 517 } 518 // For destination vector type is NOT the same as first source vector. 519 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 520 class RISCVBinaryABXUnMasked 521 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 522 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 523 llvm_anyint_ty], 524 [IntrNoMem]>, RISCVVIntrinsic { 525 let ScalarOperand = 2; 526 let VLOperand = 3; 527 } 528 // For destination vector type is NOT the same as first source vector (with mask). 529 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 530 class RISCVBinaryABXMasked 531 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 532 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 533 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 534 LLVMMatchType<3>], 535 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 536 let ScalarOperand = 2; 537 let VLOperand = 4; 538 } 539 // For destination vector type is NOT the same as first source vector. 540 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) 541 class RISCVBinaryABXUnMaskedRoundingMode 542 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 543 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 544 llvm_anyint_ty, LLVMMatchType<3>], 545 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 546 let ScalarOperand = 2; 547 let VLOperand = 4; 548 } 549 // For destination vector type is NOT the same as first source vector (with mask). 550 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 551 class RISCVBinaryABXMaskedRoundingMode 552 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 553 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 554 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 555 LLVMMatchType<3>, LLVMMatchType<3>], 556 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 557 let ScalarOperand = 2; 558 let VLOperand = 5; 559 } 560 // For destination vector type is NOT the same as first source vector. The 561 // second source operand must match the destination type or be an XLen scalar. 562 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 563 class RISCVBinaryABShiftUnMasked 564 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 565 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 566 llvm_anyint_ty], 567 [IntrNoMem]>, RISCVVIntrinsic { 568 let VLOperand = 3; 569 } 570 // For destination vector type is NOT the same as first source vector (with mask). 571 // The second source operand must match the destination type or be an XLen scalar. 572 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 573 class RISCVBinaryABShiftMasked 574 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 575 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 576 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 577 LLVMMatchType<3>], 578 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 579 let VLOperand = 4; 580 } 581 // For binary operations with V0 as input. 582 // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl) 583 class RISCVBinaryWithV0 584 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 585 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 586 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 587 llvm_anyint_ty], 588 [IntrNoMem]>, RISCVVIntrinsic { 589 let ScalarOperand = 2; 590 let VLOperand = 4; 591 } 592 // For binary operations with mask type output and V0 as input. 593 // Output: (mask type output) 594 // Input: (vector_in, vector_in/scalar_in, V0, vl) 595 class RISCVBinaryMOutWithV0 596 :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 597 [llvm_anyvector_ty, llvm_any_ty, 598 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 599 llvm_anyint_ty], 600 [IntrNoMem]>, RISCVVIntrinsic { 601 let ScalarOperand = 1; 602 let VLOperand = 3; 603 } 604 // For binary operations with mask type output. 605 // Output: (mask type output) 606 // Input: (vector_in, vector_in/scalar_in, vl) 607 class RISCVBinaryMOut 608 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 609 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], 610 [IntrNoMem]>, RISCVVIntrinsic { 611 let ScalarOperand = 1; 612 let VLOperand = 2; 613 } 614 // For binary operations with mask type output without mask. 615 // Output: (mask type output) 616 // Input: (vector_in, vector_in/scalar_in, vl) 617 class RISCVCompareUnMasked 618 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 619 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], 620 [IntrNoMem]>, RISCVVIntrinsic { 621 let ScalarOperand = 1; 622 let VLOperand = 2; 623 } 624 // For binary operations with mask type output with mask. 625 // Output: (mask type output) 626 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) 627 class RISCVCompareMasked 628 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 629 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 630 llvm_anyvector_ty, llvm_any_ty, 631 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 632 [IntrNoMem]>, RISCVVIntrinsic { 633 let ScalarOperand = 2; 634 let VLOperand = 4; 635 } 636 // For FP classify operations. 637 // Output: (bit mask type output) 638 // Input: (passthru, vector_in, vl) 639 class RISCVClassifyUnMasked 640 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], 641 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, 642 llvm_anyint_ty], 643 [IntrNoMem]>, RISCVVIntrinsic { 644 let VLOperand = 1; 645 } 646 // For FP classify operations with mask. 647 // Output: (bit mask type output) 648 // Input: (maskedoff, vector_in, mask, vl, policy) 649 class RISCVClassifyMasked 650 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], 651 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, 652 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 653 llvm_anyint_ty, LLVMMatchType<1>], 654 [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { 655 let VLOperand = 3; 656 } 657 // For Saturating binary operations. 658 // The destination vector type is the same as first source vector. 659 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 660 class RISCVSaturatingBinaryAAXUnMasked 661 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 662 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 663 llvm_anyint_ty], 664 [IntrNoMem]>, RISCVVIntrinsic { 665 let ScalarOperand = 2; 666 let VLOperand = 3; 667 } 668 // For Saturating binary operations with rounding-mode operand 669 // The destination vector type is the same as first source vector. 670 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 671 class RISCVSaturatingBinaryAAXUnMaskedRoundingMode 672 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 673 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 674 llvm_anyint_ty, LLVMMatchType<2>], 675 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 676 let ScalarOperand = 2; 677 let VLOperand = 4; 678 } 679 // For Saturating binary operations with mask. 680 // The destination vector type is the same as first source vector. 681 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 682 class RISCVSaturatingBinaryAAXMasked 683 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 684 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 685 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 686 LLVMMatchType<2>], 687 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 688 let ScalarOperand = 2; 689 let VLOperand = 4; 690 } 691 // For Saturating binary operations with mask and rounding-mode operand 692 // The destination vector type is the same as first source vector. 693 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 694 class RISCVSaturatingBinaryAAXMaskedRoundingMode 695 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 696 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 697 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 698 LLVMMatchType<2>, LLVMMatchType<2>], 699 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 700 let ScalarOperand = 2; 701 let VLOperand = 5; 702 } 703 // For Saturating binary operations. 704 // The destination vector type is the same as first source vector. 705 // The second source operand matches the destination type or is an XLen scalar. 706 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 707 class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode 708 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 709 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 710 llvm_anyint_ty, LLVMMatchType<2>], 711 [ImmArg<ArgIndex<3>>, IntrNoMem]>, 712 RISCVVIntrinsic { 713 let VLOperand = 4; 714 } 715 // For Saturating binary operations with mask. 716 // The destination vector type is the same as first source vector. 717 // The second source operand matches the destination type or is an XLen scalar. 718 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 719 class RISCVSaturatingBinaryAAShiftMaskedRoundingMode 720 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 721 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 722 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 723 LLVMMatchType<2>, LLVMMatchType<2>], 724 [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem]>, 725 RISCVVIntrinsic { 726 let VLOperand = 5; 727 } 728 // For Saturating binary operations. 729 // The destination vector type is NOT the same as first source vector. 730 // The second source operand matches the destination type or is an XLen scalar. 731 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 732 class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode 733 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 734 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 735 llvm_anyint_ty, LLVMMatchType<3>], 736 [ImmArg<ArgIndex<3>>, IntrNoMem]>, 737 RISCVVIntrinsic { 738 let VLOperand = 4; 739 } 740 // For Saturating binary operations with mask. 741 // The destination vector type is NOT the same as first source vector (with mask). 742 // The second source operand matches the destination type or is an XLen scalar. 743 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 744 class RISCVSaturatingBinaryABShiftMaskedRoundingMode 745 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 746 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 747 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 748 LLVMMatchType<3>, LLVMMatchType<3>], 749 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 750 let VLOperand = 5; 751 } 752 // Input: (vector_in, vector_in, scalar_in, vl, policy) 753 class RVVSlideUnMasked 754 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 755 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 756 LLVMMatchType<1>, LLVMMatchType<1>], 757 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 758 let VLOperand = 3; 759 } 760 // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) 761 class RVVSlideMasked 762 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 763 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 764 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 765 LLVMMatchType<1>, LLVMMatchType<1>], 766 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 767 let VLOperand = 4; 768 } 769 // UnMasked Vector Multiply-Add operations, its first operand can not be undef. 770 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) 771 class RISCVTernaryAAXAUnMasked 772 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 773 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 774 llvm_anyint_ty, LLVMMatchType<2>], 775 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 776 let ScalarOperand = 1; 777 let VLOperand = 3; 778 } 779 // Masked Vector Multiply-Add operations, its first operand can not be undef. 780 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy 781 class RISCVTernaryAAXAMasked 782 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 783 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 784 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 785 llvm_anyint_ty, LLVMMatchType<2>], 786 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 787 let ScalarOperand = 1; 788 let VLOperand = 4; 789 } 790 // UnMasked Vector Multiply-Add operations, its first operand can not be undef. 791 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy) 792 class RISCVTernaryAAXAUnMaskedRoundingMode 793 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 794 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 795 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>], 796 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, 797 RISCVVIntrinsic { 798 let ScalarOperand = 1; 799 let VLOperand = 4; 800 } 801 // Masked Vector Multiply-Add operations, its first operand can not be undef. 802 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy 803 class RISCVTernaryAAXAMaskedRoundingMode 804 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 805 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 806 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 807 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>], 808 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, 809 RISCVVIntrinsic { 810 let ScalarOperand = 1; 811 let VLOperand = 5; 812 } 813 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. 814 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) 815 class RISCVTernaryWideUnMasked 816 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 817 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 818 llvm_anyint_ty, LLVMMatchType<3>], 819 [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic { 820 let ScalarOperand = 1; 821 let VLOperand = 3; 822 } 823 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. 824 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy 825 class RISCVTernaryWideMasked 826 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 827 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 828 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 829 llvm_anyint_ty, LLVMMatchType<3>], 830 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 831 let ScalarOperand = 1; 832 let VLOperand = 4; 833 } 834 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. 835 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy) 836 class RISCVTernaryWideUnMaskedRoundingMode 837 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 838 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 839 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>], 840 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >, 841 RISCVVIntrinsic { 842 let ScalarOperand = 1; 843 let VLOperand = 4; 844 } 845 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. 846 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy 847 class RISCVTernaryWideMaskedRoundingMode 848 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 849 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 850 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 851 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>], 852 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, 853 RISCVVIntrinsic { 854 let ScalarOperand = 1; 855 let VLOperand = 5; 856 } 857 // For Reduction ternary operations. 858 // For destination vector type is the same as first and third source vector. 859 // Input: (vector_in, vector_in, vector_in, vl) 860 class RISCVReductionUnMasked 861 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 862 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 863 llvm_anyint_ty], 864 [IntrNoMem]>, RISCVVIntrinsic { 865 let VLOperand = 3; 866 } 867 // For Reduction ternary operations with mask. 868 // For destination vector type is the same as first and third source vector. 869 // The mask type come from second source vector. 870 // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl) 871 class RISCVReductionMasked 872 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 873 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 874 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty], 875 [IntrNoMem]>, RISCVVIntrinsic { 876 let VLOperand = 4; 877 } 878 // For Reduction ternary operations. 879 // For destination vector type is the same as first and third source vector. 880 // Input: (vector_in, vector_in, vector_in, frm, vl) 881 class RISCVReductionUnMaskedRoundingMode 882 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 883 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 884 llvm_anyint_ty, LLVMMatchType<2>], 885 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 886 let VLOperand = 4; 887 } 888 // For Reduction ternary operations with mask. 889 // For destination vector type is the same as first and third source vector. 890 // The mask type come from second source vector. 891 // Input: (vector_in, vector_in, vector_in, mask, frm, vl) 892 class RISCVReductionMaskedRoundingMode 893 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 894 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 895 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty, 896 LLVMMatchType<2>], 897 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 898 let VLOperand = 5; 899 } 900 // For unary operations with scalar type output without mask 901 // Output: (scalar type) 902 // Input: (vector_in, vl) 903 class RISCVMaskedUnarySOutUnMasked 904 : DefaultAttrsIntrinsic<[LLVMMatchType<1>], 905 [llvm_anyvector_ty, llvm_anyint_ty], 906 [IntrNoMem]>, RISCVVIntrinsic { 907 let VLOperand = 1; 908 } 909 // For unary operations with scalar type output with mask 910 // Output: (scalar type) 911 // Input: (vector_in, mask, vl) 912 class RISCVMaskedUnarySOutMasked 913 : DefaultAttrsIntrinsic<[LLVMMatchType<1>], 914 [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], 915 [IntrNoMem]>, RISCVVIntrinsic { 916 let VLOperand = 2; 917 } 918 // For destination vector type is NOT the same as source vector. 919 // Input: (passthru, vector_in, vl) 920 class RISCVUnaryABUnMasked 921 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 922 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], 923 [IntrNoMem]>, RISCVVIntrinsic { 924 let VLOperand = 2; 925 } 926 // For destination vector type is NOT the same as source vector (with mask). 927 // Input: (maskedoff, vector_in, mask, vl, policy) 928 class RISCVUnaryABMasked 929 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 930 [LLVMMatchType<0>, llvm_anyvector_ty, 931 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, 932 llvm_anyint_ty, LLVMMatchType<2>], 933 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 934 let VLOperand = 3; 935 } 936 // For unary operations with the same vector type in/out without mask 937 // Output: (vector) 938 // Input: (vector_in, vl) 939 class RISCVUnaryUnMasked 940 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 941 [LLVMMatchType<0>, llvm_anyint_ty], 942 [IntrNoMem]>, RISCVVIntrinsic { 943 let VLOperand = 1; 944 } 945 // For mask unary operations with mask type in/out with mask 946 // Output: (mask type output) 947 // Input: (mask type maskedoff, mask type vector_in, mask, vl) 948 class RISCVMaskedUnaryMOutMasked 949 : DefaultAttrsIntrinsic<[llvm_anyint_ty], 950 [LLVMMatchType<0>, LLVMMatchType<0>, 951 LLVMMatchType<0>, llvm_anyint_ty], 952 [IntrNoMem]>, RISCVVIntrinsic { 953 let VLOperand = 3; 954 } 955 // Output: (vector) 956 // Input: (vl) 957 class RISCVNullaryIntrinsic 958 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 959 [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { 960 let VLOperand = 1; 961 } 962 // Output: (vector) 963 // Input: (passthru, vl) 964 class RISCVID 965 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 966 [LLVMMatchType<0>, llvm_anyint_ty], 967 [IntrNoMem]>, RISCVVIntrinsic { 968 let VLOperand = 1; 969 } 970 // For Conversion unary operations. 971 // Input: (passthru, vector_in, vl) 972 class RISCVConversionUnMasked 973 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 974 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], 975 [IntrNoMem]>, RISCVVIntrinsic { 976 let VLOperand = 2; 977 } 978 // For Conversion unary operations with mask. 979 // Input: (maskedoff, vector_in, mask, vl, policy) 980 class RISCVConversionMasked 981 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 982 [LLVMMatchType<0>, llvm_anyvector_ty, 983 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 984 LLVMMatchType<2>], 985 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 986 let VLOperand = 3; 987 } 988 // For Conversion unary operations. 989 // Input: (passthru, vector_in, frm, vl) 990 class RISCVConversionUnMaskedRoundingMode 991 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 992 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty, 993 LLVMMatchType<2>], 994 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic { 995 let VLOperand = 3; 996 } 997 // For Conversion unary operations with mask. 998 // Input: (maskedoff, vector_in, mask, frm, vl, policy) 999 class RISCVConversionMaskedRoundingMode 1000 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1001 [LLVMMatchType<0>, llvm_anyvector_ty, 1002 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 1003 LLVMMatchType<2>, LLVMMatchType<2>], 1004 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 1005 let VLOperand = 4; 1006 } 1007 1008 // For unit stride segment load 1009 // Input: (passthru, pointer, vl, sew) 1010 class RISCVUSSegLoad 1011 : DefaultAttrsIntrinsic<[llvm_any_ty], 1012 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, 1013 LLVMMatchType<1>], 1014 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>, IntrReadMem, 1015 IntrArgMemOnly]>, 1016 RISCVVIntrinsic { 1017 let VLOperand = 2; 1018 } 1019 // For unit stride segment load with mask 1020 // Input: (maskedoff, pointer, mask, vl, policy, sew) 1021 class RISCVUSSegLoadMasked 1022 : DefaultAttrsIntrinsic<[llvm_any_ty], 1023 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, 1024 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>], 1025 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, 1026 NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>, 1027 RISCVVIntrinsic { 1028 let VLOperand = 3; 1029 } 1030 1031 // For unit stride fault-only-first segment load 1032 // Input: (passthru, pointer, vl, sew) 1033 // Output: (data, vl) 1034 // NOTE: We model this with default memory properties since we model writing 1035 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 1036 class RISCVUSSegLoadFF 1037 : DefaultAttrsIntrinsic<[llvm_any_ty, llvm_anyint_ty], 1038 [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>, LLVMMatchType<1>], 1039 [ImmArg<ArgIndex<3>>, NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic { 1040 let VLOperand = 2; 1041 } 1042 // For unit stride fault-only-first segment load with mask 1043 // Input: (maskedoff, pointer, mask, vl, policy, sew) 1044 // Output: (data, vl) 1045 // NOTE: We model this with default memory properties since we model writing 1046 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 1047 class RISCVUSSegLoadFFMasked 1048 : DefaultAttrsIntrinsic<[llvm_any_ty, llvm_anyint_ty], 1049 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, 1050 LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>], 1051 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>]>, 1052 RISCVVIntrinsic { 1053 let VLOperand = 3; 1054 } 1055 1056 // For stride segment load 1057 // Input: (passthru, pointer, offset, vl, sew) 1058 class RISCVSSegLoad 1059 : DefaultAttrsIntrinsic<[llvm_any_ty], 1060 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, 1061 LLVMMatchType<1>, LLVMMatchType<1>], 1062 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrReadMem]>, 1063 RISCVVIntrinsic { 1064 let VLOperand = 3; 1065 } 1066 // For stride segment load with mask 1067 // Input: (maskedoff, pointer, offset, mask, vl, policy, sew) 1068 class RISCVSSegLoadMasked 1069 : DefaultAttrsIntrinsic<[llvm_any_ty], 1070 [LLVMMatchType<0>, llvm_ptr_ty, 1071 llvm_anyint_ty, llvm_anyvector_ty, 1072 LLVMMatchType<1>, LLVMMatchType<1>, 1073 LLVMMatchType<1>], 1074 [ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>, 1075 NoCapture<ArgIndex<1>>, IntrReadMem]>, 1076 RISCVVIntrinsic { 1077 let VLOperand = 4; 1078 } 1079 1080 // For indexed segment load 1081 // Input: (passthru, pointer, index, vl, sew) 1082 class RISCVISegLoad 1083 : DefaultAttrsIntrinsic<[llvm_any_ty], 1084 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, 1085 llvm_anyint_ty, LLVMMatchType<2>], 1086 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrReadMem]>, 1087 RISCVVIntrinsic { 1088 let VLOperand = 3; 1089 } 1090 // For indexed segment load with mask 1091 // Input: (maskedoff, pointer, index, mask, vl, policy, sew) 1092 class RISCVISegLoadMasked 1093 : DefaultAttrsIntrinsic<[llvm_any_ty], 1094 [LLVMMatchType<0>, llvm_ptr_ty, 1095 llvm_anyvector_ty, llvm_anyvector_ty, 1096 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>], 1097 [ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>, 1098 NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { 1099 let VLOperand = 4; 1100 } 1101 1102 // For unit stride segment store 1103 // Input: (value, pointer, vl, sew) 1104 class RISCVUSSegStore 1105 : DefaultAttrsIntrinsic<[], 1106 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty, 1107 LLVMMatchType<1>], 1108 [ImmArg<ArgIndex<3>>, NoCapture<ArgIndex<1>>, IntrWriteMem, 1109 IntrArgMemOnly]>, 1110 RISCVVIntrinsic { 1111 let VLOperand = 2; 1112 } 1113 // For unit stride segment store with mask 1114 // Input: (value, pointer, mask, vl, sew) 1115 class RISCVUSSegStoreMasked 1116 : DefaultAttrsIntrinsic<[], 1117 [llvm_any_ty, llvm_ptr_ty, 1118 llvm_anyvector_ty, llvm_anyint_ty, LLVMMatchType<2>], 1119 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem, 1120 IntrArgMemOnly]>, 1121 RISCVVIntrinsic { 1122 let VLOperand = 3; 1123 } 1124 1125 // For stride segment store 1126 // Input: (value, pointer, offset, vl, sew) 1127 class RISCVSSegStore 1128 : DefaultAttrsIntrinsic<[], 1129 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty, 1130 LLVMMatchType<1>, LLVMMatchType<1>], 1131 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>, 1132 RISCVVIntrinsic { 1133 let VLOperand = 3; 1134 } 1135 // For stride segment store with mask 1136 // Input: (value, pointer, offset, mask, vl, sew) 1137 class RISCVSSegStoreMasked 1138 : DefaultAttrsIntrinsic<[], 1139 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty, 1140 llvm_anyvector_ty, LLVMMatchType<1>, 1141 LLVMMatchType<1>], 1142 [ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>, 1143 RISCVVIntrinsic { 1144 let VLOperand = 4; 1145 } 1146 1147 // For indexed segment store 1148 // Input: (value, pointer, offset, vl, sew) 1149 class RISCVISegStore 1150 : DefaultAttrsIntrinsic<[], 1151 [llvm_any_ty, llvm_ptr_ty, llvm_anyvector_ty, 1152 llvm_anyint_ty, LLVMMatchType<2>], 1153 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>, 1154 RISCVVIntrinsic { 1155 let VLOperand = 3; 1156 } 1157 // For indexed segment store with mask 1158 // Input: (value, pointer, offset, mask, vl, sew) 1159 class RISCVISegStoreMasked 1160 : DefaultAttrsIntrinsic<[], 1161 [llvm_any_ty, llvm_ptr_ty, llvm_anyvector_ty, 1162 llvm_anyvector_ty, llvm_anyint_ty, 1163 LLVMMatchType<3>], 1164 [ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>, 1165 RISCVVIntrinsic { 1166 let VLOperand = 4; 1167 } 1168 1169 multiclass RISCVUSLoad { 1170 def "int_riscv_" # NAME : RISCVUSLoad; 1171 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked; 1172 } 1173 multiclass RISCVUSLoadFF { 1174 def "int_riscv_" # NAME : RISCVUSLoadFF; 1175 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked; 1176 } 1177 multiclass RISCVSLoad { 1178 def "int_riscv_" # NAME : RISCVSLoad; 1179 def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked; 1180 } 1181 multiclass RISCVILoad { 1182 def "int_riscv_" # NAME : RISCVILoad; 1183 def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked; 1184 } 1185 multiclass RISCVUSStore { 1186 def "int_riscv_" # NAME : RISCVUSStore; 1187 def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked; 1188 } 1189 multiclass RISCVSStore { 1190 def "int_riscv_" # NAME : RISCVSStore; 1191 def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked; 1192 } 1193 1194 multiclass RISCVIStore { 1195 def "int_riscv_" # NAME : RISCVIStore; 1196 def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked; 1197 } 1198 multiclass RISCVUnaryAA { 1199 def "int_riscv_" # NAME : RISCVUnaryAAUnMasked; 1200 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked; 1201 } 1202 multiclass RISCVUnaryAARoundingMode { 1203 def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode; 1204 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode; 1205 } 1206 multiclass RISCVUnaryAB { 1207 def "int_riscv_" # NAME : RISCVUnaryABUnMasked; 1208 def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked; 1209 } 1210 // AAX means the destination type(A) is the same as the first source 1211 // type(A). X means any type for the second source operand. 1212 multiclass RISCVBinaryAAX { 1213 def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; 1214 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; 1215 } 1216 multiclass RISCVBinaryAAXRoundingMode { 1217 def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode; 1218 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode; 1219 } 1220 // Like RISCVBinaryAAX, but the second operand is used a shift amount so it 1221 // must be a vector or an XLen scalar. 1222 multiclass RISCVBinaryAAShift { 1223 def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked; 1224 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked; 1225 } 1226 multiclass RISCVRGatherVV { 1227 def "int_riscv_" # NAME : RISCVRGatherVVUnMasked; 1228 def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked; 1229 } 1230 multiclass RISCVRGatherVX { 1231 def "int_riscv_" # NAME : RISCVGatherVXUnMasked; 1232 def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked; 1233 } 1234 multiclass RISCVRGatherEI16VV { 1235 def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked; 1236 def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked; 1237 } 1238 // ABX means the destination type(A) is different from the first source 1239 // type(B). X means any type for the second source operand. 1240 multiclass RISCVBinaryABX { 1241 def "int_riscv_" # NAME : RISCVBinaryABXUnMasked; 1242 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked; 1243 } 1244 multiclass RISCVBinaryABXRoundingMode { 1245 def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode; 1246 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode; 1247 } 1248 // Like RISCVBinaryABX, but the second operand is used a shift amount so it 1249 // must be a vector or an XLen scalar. 1250 multiclass RISCVBinaryABShift { 1251 def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked; 1252 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked; 1253 } 1254 multiclass RISCVBinaryWithV0 { 1255 def "int_riscv_" # NAME : RISCVBinaryWithV0; 1256 } 1257 multiclass RISCVBinaryMaskOutWithV0 { 1258 def "int_riscv_" # NAME : RISCVBinaryMOutWithV0; 1259 } 1260 multiclass RISCVBinaryMaskOut { 1261 def "int_riscv_" # NAME : RISCVBinaryMOut; 1262 } 1263 multiclass RISCVSaturatingBinaryAAX { 1264 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked; 1265 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked; 1266 } 1267 multiclass RISCVSaturatingBinaryAAXRoundingMode { 1268 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode; 1269 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode; 1270 } 1271 multiclass RISCVSaturatingBinaryAAShiftRoundingMode { 1272 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode; 1273 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode; 1274 } 1275 multiclass RISCVSaturatingBinaryABShiftRoundingMode { 1276 def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode; 1277 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode; 1278 } 1279 multiclass RVVSlide { 1280 def "int_riscv_" # NAME : RVVSlideUnMasked; 1281 def "int_riscv_" # NAME # "_mask" : RVVSlideMasked; 1282 } 1283 multiclass RISCVTernaryAAXA { 1284 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked; 1285 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked; 1286 } 1287 multiclass RISCVTernaryAAXARoundingMode { 1288 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode; 1289 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode; 1290 } 1291 multiclass RISCVCompare { 1292 def "int_riscv_" # NAME : RISCVCompareUnMasked; 1293 def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked; 1294 } 1295 multiclass RISCVClassify { 1296 def "int_riscv_" # NAME : RISCVClassifyUnMasked; 1297 def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked; 1298 } 1299 multiclass RISCVTernaryWide { 1300 def "int_riscv_" # NAME : RISCVTernaryWideUnMasked; 1301 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked; 1302 } 1303 multiclass RISCVTernaryWideRoundingMode { 1304 def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode; 1305 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode; 1306 } 1307 multiclass RISCVReduction { 1308 def "int_riscv_" # NAME : RISCVReductionUnMasked; 1309 def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked; 1310 } 1311 multiclass RISCVReductionRoundingMode { 1312 def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode; 1313 def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode; 1314 } 1315 multiclass RISCVMaskedUnarySOut { 1316 def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked; 1317 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked; 1318 } 1319 multiclass RISCVMaskedUnaryMOut { 1320 def "int_riscv_" # NAME : RISCVUnaryUnMasked; 1321 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked; 1322 } 1323 multiclass RISCVConversion { 1324 def "int_riscv_" #NAME :RISCVConversionUnMasked; 1325 def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked; 1326 } 1327 multiclass RISCVConversionRoundingMode { 1328 def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode; 1329 def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode; 1330 } 1331 multiclass RISCVUSSegLoad { 1332 def "int_riscv_" # NAME : RISCVUSSegLoad; 1333 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked; 1334 } 1335 multiclass RISCVUSSegLoadFF { 1336 def "int_riscv_" # NAME : RISCVUSSegLoadFF; 1337 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked; 1338 } 1339 multiclass RISCVSSegLoad { 1340 def "int_riscv_" # NAME : RISCVSSegLoad; 1341 def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked; 1342 } 1343 multiclass RISCVISegLoad { 1344 def "int_riscv_" # NAME : RISCVISegLoad; 1345 def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked; 1346 } 1347 multiclass RISCVUSSegStore { 1348 def "int_riscv_" # NAME : RISCVUSSegStore; 1349 def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked; 1350 } 1351 multiclass RISCVSSegStore { 1352 def "int_riscv_" # NAME : RISCVSSegStore; 1353 def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked; 1354 } 1355 multiclass RISCVISegStore { 1356 def "int_riscv_" # NAME : RISCVISegStore; 1357 def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked; 1358 } 1359 1360 //==-- Intrinsics to perform vector tuple subvector insertion/extraction --=// 1361 def int_riscv_tuple_insert 1362 : DefaultAttrsIntrinsic<[llvm_any_ty], 1363 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty], 1364 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>; 1365 1366 def int_riscv_tuple_extract 1367 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1368 [llvm_any_ty, llvm_i32_ty], 1369 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>; 1370 1371 defm vle : RISCVUSLoad; 1372 defm vleff : RISCVUSLoadFF; 1373 defm vse : RISCVUSStore; 1374 defm vlse: RISCVSLoad; 1375 defm vsse: RISCVSStore; 1376 defm vluxei : RISCVILoad; 1377 defm vloxei : RISCVILoad; 1378 defm vsoxei : RISCVIStore; 1379 defm vsuxei : RISCVIStore; 1380 1381 def int_riscv_vlm : RISCVUSMLoad; 1382 def int_riscv_vsm : RISCVUSStore; 1383 1384 defm vadd : RISCVBinaryAAX; 1385 defm vsub : RISCVBinaryAAX; 1386 defm vrsub : RISCVBinaryAAX; 1387 1388 defm vwaddu : RISCVBinaryABX; 1389 defm vwadd : RISCVBinaryABX; 1390 defm vwaddu_w : RISCVBinaryAAX; 1391 defm vwadd_w : RISCVBinaryAAX; 1392 defm vwsubu : RISCVBinaryABX; 1393 defm vwsub : RISCVBinaryABX; 1394 defm vwsubu_w : RISCVBinaryAAX; 1395 defm vwsub_w : RISCVBinaryAAX; 1396 1397 defm vzext : RISCVUnaryAB; 1398 defm vsext : RISCVUnaryAB; 1399 1400 defm vadc : RISCVBinaryWithV0; 1401 defm vmadc_carry_in : RISCVBinaryMaskOutWithV0; 1402 defm vmadc : RISCVBinaryMaskOut; 1403 1404 defm vsbc : RISCVBinaryWithV0; 1405 defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0; 1406 defm vmsbc : RISCVBinaryMaskOut; 1407 1408 defm vand : RISCVBinaryAAX; 1409 defm vor : RISCVBinaryAAX; 1410 defm vxor : RISCVBinaryAAX; 1411 1412 defm vsll : RISCVBinaryAAShift; 1413 defm vsrl : RISCVBinaryAAShift; 1414 defm vsra : RISCVBinaryAAShift; 1415 1416 defm vnsrl : RISCVBinaryABShift; 1417 defm vnsra : RISCVBinaryABShift; 1418 1419 defm vmseq : RISCVCompare; 1420 defm vmsne : RISCVCompare; 1421 defm vmsltu : RISCVCompare; 1422 defm vmslt : RISCVCompare; 1423 defm vmsleu : RISCVCompare; 1424 defm vmsle : RISCVCompare; 1425 defm vmsgtu : RISCVCompare; 1426 defm vmsgt : RISCVCompare; 1427 defm vmsgeu : RISCVCompare; 1428 defm vmsge : RISCVCompare; 1429 1430 defm vminu : RISCVBinaryAAX; 1431 defm vmin : RISCVBinaryAAX; 1432 defm vmaxu : RISCVBinaryAAX; 1433 defm vmax : RISCVBinaryAAX; 1434 1435 defm vmul : RISCVBinaryAAX; 1436 defm vmulh : RISCVBinaryAAX; 1437 defm vmulhu : RISCVBinaryAAX; 1438 defm vmulhsu : RISCVBinaryAAX; 1439 1440 defm vdivu : RISCVBinaryAAX; 1441 defm vdiv : RISCVBinaryAAX; 1442 defm vremu : RISCVBinaryAAX; 1443 defm vrem : RISCVBinaryAAX; 1444 1445 defm vwmul : RISCVBinaryABX; 1446 defm vwmulu : RISCVBinaryABX; 1447 defm vwmulsu : RISCVBinaryABX; 1448 1449 defm vmacc : RISCVTernaryAAXA; 1450 defm vnmsac : RISCVTernaryAAXA; 1451 defm vmadd : RISCVTernaryAAXA; 1452 defm vnmsub : RISCVTernaryAAXA; 1453 1454 defm vwmaccu : RISCVTernaryWide; 1455 defm vwmacc : RISCVTernaryWide; 1456 defm vwmaccus : RISCVTernaryWide; 1457 defm vwmaccsu : RISCVTernaryWide; 1458 1459 defm vfadd : RISCVBinaryAAXRoundingMode; 1460 defm vfsub : RISCVBinaryAAXRoundingMode; 1461 defm vfrsub : RISCVBinaryAAXRoundingMode; 1462 1463 defm vfwadd : RISCVBinaryABXRoundingMode; 1464 defm vfwsub : RISCVBinaryABXRoundingMode; 1465 defm vfwadd_w : RISCVBinaryAAXRoundingMode; 1466 defm vfwsub_w : RISCVBinaryAAXRoundingMode; 1467 1468 defm vsaddu : RISCVSaturatingBinaryAAX; 1469 defm vsadd : RISCVSaturatingBinaryAAX; 1470 defm vssubu : RISCVSaturatingBinaryAAX; 1471 defm vssub : RISCVSaturatingBinaryAAX; 1472 1473 defm vmerge : RISCVBinaryWithV0; 1474 1475 // Output: (vector) 1476 // Input: (passthru, vector_in, vl) 1477 def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1478 [LLVMMatchType<0>, 1479 LLVMMatchType<0>, 1480 llvm_anyint_ty], 1481 [IntrNoMem]>, RISCVVIntrinsic { 1482 let VLOperand = 2; 1483 } 1484 // Output: (vector) 1485 // Input: (passthru, scalar, vl) 1486 def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], 1487 [LLVMMatchType<0>, 1488 LLVMVectorElementType<0>, 1489 llvm_anyint_ty], 1490 [IntrNoMem]>, RISCVVIntrinsic { 1491 let VLOperand = 2; 1492 } 1493 // Output: (vector) 1494 // Input: (passthru, scalar, vl) 1495 def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], 1496 [LLVMMatchType<0>, 1497 LLVMVectorElementType<0>, 1498 llvm_anyint_ty], 1499 [IntrNoMem]>, RISCVVIntrinsic { 1500 let VLOperand = 2; 1501 } 1502 1503 def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], 1504 [llvm_anyint_ty], 1505 [IntrNoMem]>, RISCVVIntrinsic; 1506 def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], 1507 [LLVMMatchType<0>, 1508 LLVMVectorElementType<0>, 1509 llvm_anyint_ty], 1510 [IntrNoMem]>, RISCVVIntrinsic { 1511 let VLOperand = 2; 1512 } 1513 1514 def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], 1515 [llvm_anyfloat_ty], 1516 [IntrNoMem]>, RISCVVIntrinsic; 1517 def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], 1518 [LLVMMatchType<0>, 1519 LLVMVectorElementType<0>, 1520 llvm_anyint_ty], 1521 [IntrNoMem]>, RISCVVIntrinsic { 1522 let VLOperand = 2; 1523 } 1524 1525 defm vfmul : RISCVBinaryAAXRoundingMode; 1526 defm vfdiv : RISCVBinaryAAXRoundingMode; 1527 defm vfrdiv : RISCVBinaryAAXRoundingMode; 1528 1529 defm vfwmul : RISCVBinaryABXRoundingMode; 1530 1531 defm vfmacc : RISCVTernaryAAXARoundingMode; 1532 defm vfnmacc : RISCVTernaryAAXARoundingMode; 1533 defm vfmsac : RISCVTernaryAAXARoundingMode; 1534 defm vfnmsac : RISCVTernaryAAXARoundingMode; 1535 defm vfmadd : RISCVTernaryAAXARoundingMode; 1536 defm vfnmadd : RISCVTernaryAAXARoundingMode; 1537 defm vfmsub : RISCVTernaryAAXARoundingMode; 1538 defm vfnmsub : RISCVTernaryAAXARoundingMode; 1539 1540 defm vfwmacc : RISCVTernaryWideRoundingMode; 1541 defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; 1542 defm vfwnmacc : RISCVTernaryWideRoundingMode; 1543 defm vfwmsac : RISCVTernaryWideRoundingMode; 1544 defm vfwnmsac : RISCVTernaryWideRoundingMode; 1545 1546 defm vfsqrt : RISCVUnaryAARoundingMode; 1547 defm vfrsqrt7 : RISCVUnaryAA; 1548 defm vfrec7 : RISCVUnaryAARoundingMode; 1549 1550 defm vfmin : RISCVBinaryAAX; 1551 defm vfmax : RISCVBinaryAAX; 1552 1553 defm vfsgnj : RISCVBinaryAAX; 1554 defm vfsgnjn : RISCVBinaryAAX; 1555 defm vfsgnjx : RISCVBinaryAAX; 1556 1557 defm vfclass : RISCVClassify; 1558 1559 defm vfmerge : RISCVBinaryWithV0; 1560 1561 defm vslideup : RVVSlide; 1562 defm vslidedown : RVVSlide; 1563 1564 defm vslide1up : RISCVBinaryAAX; 1565 defm vslide1down : RISCVBinaryAAX; 1566 defm vfslide1up : RISCVBinaryAAX; 1567 defm vfslide1down : RISCVBinaryAAX; 1568 1569 defm vrgather_vv : RISCVRGatherVV; 1570 defm vrgather_vx : RISCVRGatherVX; 1571 defm vrgatherei16_vv : RISCVRGatherEI16VV; 1572 1573 def "int_riscv_vcompress" : RISCVCompress; 1574 1575 defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode; 1576 defm vaadd : RISCVSaturatingBinaryAAXRoundingMode; 1577 defm vasubu : RISCVSaturatingBinaryAAXRoundingMode; 1578 defm vasub : RISCVSaturatingBinaryAAXRoundingMode; 1579 1580 defm vsmul : RISCVSaturatingBinaryAAXRoundingMode; 1581 1582 defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode; 1583 defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode; 1584 1585 defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode; 1586 defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode; 1587 1588 defm vmfeq : RISCVCompare; 1589 defm vmfne : RISCVCompare; 1590 defm vmflt : RISCVCompare; 1591 defm vmfle : RISCVCompare; 1592 defm vmfgt : RISCVCompare; 1593 defm vmfge : RISCVCompare; 1594 1595 defm vredsum : RISCVReduction; 1596 defm vredand : RISCVReduction; 1597 defm vredor : RISCVReduction; 1598 defm vredxor : RISCVReduction; 1599 defm vredminu : RISCVReduction; 1600 defm vredmin : RISCVReduction; 1601 defm vredmaxu : RISCVReduction; 1602 defm vredmax : RISCVReduction; 1603 1604 defm vwredsumu : RISCVReduction; 1605 defm vwredsum : RISCVReduction; 1606 1607 defm vfredosum : RISCVReductionRoundingMode; 1608 defm vfredusum : RISCVReductionRoundingMode; 1609 defm vfredmin : RISCVReduction; 1610 defm vfredmax : RISCVReduction; 1611 1612 defm vfwredusum : RISCVReductionRoundingMode; 1613 defm vfwredosum : RISCVReductionRoundingMode; 1614 1615 def int_riscv_vmand: RISCVBinaryAAAUnMasked; 1616 def int_riscv_vmnand: RISCVBinaryAAAUnMasked; 1617 def int_riscv_vmandn: RISCVBinaryAAAUnMasked; 1618 def int_riscv_vmxor: RISCVBinaryAAAUnMasked; 1619 def int_riscv_vmor: RISCVBinaryAAAUnMasked; 1620 def int_riscv_vmnor: RISCVBinaryAAAUnMasked; 1621 def int_riscv_vmorn: RISCVBinaryAAAUnMasked; 1622 def int_riscv_vmxnor: RISCVBinaryAAAUnMasked; 1623 def int_riscv_vmclr : RISCVNullaryIntrinsic; 1624 def int_riscv_vmset : RISCVNullaryIntrinsic; 1625 1626 defm vcpop : RISCVMaskedUnarySOut; 1627 defm vfirst : RISCVMaskedUnarySOut; 1628 defm vmsbf : RISCVMaskedUnaryMOut; 1629 defm vmsof : RISCVMaskedUnaryMOut; 1630 defm vmsif : RISCVMaskedUnaryMOut; 1631 1632 defm vfcvt_xu_f_v : RISCVConversionRoundingMode; 1633 defm vfcvt_x_f_v : RISCVConversionRoundingMode; 1634 defm vfcvt_rtz_xu_f_v : RISCVConversion; 1635 defm vfcvt_rtz_x_f_v : RISCVConversion; 1636 defm vfcvt_f_xu_v : RISCVConversionRoundingMode; 1637 defm vfcvt_f_x_v : RISCVConversionRoundingMode; 1638 1639 defm vfwcvt_f_xu_v : RISCVConversion; 1640 defm vfwcvt_f_x_v : RISCVConversion; 1641 defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; 1642 defm vfwcvt_x_f_v : RISCVConversionRoundingMode; 1643 defm vfwcvt_rtz_xu_f_v : RISCVConversion; 1644 defm vfwcvt_rtz_x_f_v : RISCVConversion; 1645 defm vfwcvt_f_f_v : RISCVConversion; 1646 defm vfwcvtbf16_f_f_v : RISCVConversion; 1647 1648 defm vfncvt_f_xu_w : RISCVConversionRoundingMode; 1649 defm vfncvt_f_x_w : RISCVConversionRoundingMode; 1650 defm vfncvt_xu_f_w : RISCVConversionRoundingMode; 1651 defm vfncvt_x_f_w : RISCVConversionRoundingMode; 1652 defm vfncvt_rtz_xu_f_w : RISCVConversion; 1653 defm vfncvt_rtz_x_f_w : RISCVConversion; 1654 defm vfncvt_f_f_w : RISCVConversionRoundingMode; 1655 defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; 1656 defm vfncvt_rod_f_f_w : RISCVConversion; 1657 1658 // Output: (vector) 1659 // Input: (passthru, mask type input, vl) 1660 def int_riscv_viota 1661 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1662 [LLVMMatchType<0>, 1663 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1664 llvm_anyint_ty], 1665 [IntrNoMem]>, RISCVVIntrinsic { 1666 let VLOperand = 2; 1667 } 1668 // Output: (vector) 1669 // Input: (maskedoff, mask type vector_in, mask, vl, policy) 1670 def int_riscv_viota_mask 1671 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1672 [LLVMMatchType<0>, 1673 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1674 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1675 llvm_anyint_ty, LLVMMatchType<1>], 1676 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 1677 let VLOperand = 3; 1678 } 1679 // Output: (vector) 1680 // Input: (passthru, vl) 1681 def int_riscv_vid : RISCVID; 1682 1683 // Output: (vector) 1684 // Input: (maskedoff, mask, vl, policy) 1685 def int_riscv_vid_mask 1686 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1687 [LLVMMatchType<0>, 1688 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1689 llvm_anyint_ty, LLVMMatchType<1>], 1690 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 1691 let VLOperand = 2; 1692 } 1693 1694 foreach nf = [2, 3, 4, 5, 6, 7, 8] in { 1695 defm vlseg # nf : RISCVUSSegLoad; 1696 defm vlseg # nf # ff : RISCVUSSegLoadFF; 1697 defm vlsseg # nf : RISCVSSegLoad; 1698 defm vloxseg # nf : RISCVISegLoad; 1699 defm vluxseg # nf : RISCVISegLoad; 1700 defm vsseg # nf : RISCVUSSegStore; 1701 defm vssseg # nf : RISCVSSegStore; 1702 defm vsoxseg # nf : RISCVISegStore; 1703 defm vsuxseg # nf : RISCVISegStore; 1704 } 1705 1706 // Segment loads/stores for fixed vectors. 1707 foreach nf = [2, 3, 4, 5, 6, 7, 8] in { 1708 def int_riscv_seg # nf # _load 1709 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], 1710 !listsplat(LLVMMatchType<0>, 1711 !add(nf, -1))), 1712 [llvm_anyptr_ty, llvm_anyint_ty], 1713 [NoCapture<ArgIndex<0>>, IntrReadMem]>; 1714 def int_riscv_seg # nf # _store 1715 : DefaultAttrsIntrinsic<[], 1716 !listconcat([llvm_anyvector_ty], 1717 !listsplat(LLVMMatchType<0>, 1718 !add(nf, -1)), 1719 [llvm_anyptr_ty, llvm_anyint_ty]), 1720 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>; 1721 } 1722 1723} // TargetPrefix = "riscv" 1724 1725//===----------------------------------------------------------------------===// 1726// Scalar Cryptography 1727// 1728// These intrinsics will lower directly into the corresponding instructions 1729// added by the scalar cyptography extension, if the extension is present. 1730 1731let TargetPrefix = "riscv" in { 1732 1733class ScalarCryptoByteSelect32 1734 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1735 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], 1736 [IntrNoMem, IntrSpeculatable, 1737 ImmArg<ArgIndex<2>>]>; 1738 1739class ScalarCryptoGprGprIntrinsic32 1740 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1741 [llvm_i32_ty, llvm_i32_ty], 1742 [IntrNoMem, IntrSpeculatable]>; 1743 1744class ScalarCryptoGprGprIntrinsic64 1745 : DefaultAttrsIntrinsic<[llvm_i64_ty], 1746 [llvm_i64_ty, llvm_i64_ty], 1747 [IntrNoMem, IntrSpeculatable]>; 1748 1749class ScalarCryptoGprIntrinsic32 1750 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1751 [llvm_i32_ty], 1752 [IntrNoMem, IntrSpeculatable]>; 1753 1754class ScalarCryptoGprIntrinsic64 1755 : DefaultAttrsIntrinsic<[llvm_i64_ty], 1756 [llvm_i64_ty], 1757 [IntrNoMem, IntrSpeculatable]>; 1758 1759// Zknd 1760def int_riscv_aes32dsi : ScalarCryptoByteSelect32, 1761 ClangBuiltin<"__builtin_riscv_aes32dsi">; 1762def int_riscv_aes32dsmi : ScalarCryptoByteSelect32, 1763 ClangBuiltin<"__builtin_riscv_aes32dsmi">; 1764 1765def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64, 1766 ClangBuiltin<"__builtin_riscv_aes64ds">; 1767def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64, 1768 ClangBuiltin<"__builtin_riscv_aes64dsm">; 1769 1770def int_riscv_aes64im : ScalarCryptoGprIntrinsic64, 1771 ClangBuiltin<"__builtin_riscv_aes64im">; 1772 1773// Zkne 1774def int_riscv_aes32esi : ScalarCryptoByteSelect32, 1775 ClangBuiltin<"__builtin_riscv_aes32esi">; 1776def int_riscv_aes32esmi : ScalarCryptoByteSelect32, 1777 ClangBuiltin<"__builtin_riscv_aes32esmi">; 1778 1779def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64, 1780 ClangBuiltin<"__builtin_riscv_aes64es">; 1781def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64, 1782 ClangBuiltin<"__builtin_riscv_aes64esm">; 1783 1784// Zknd & Zkne 1785def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64, 1786 ClangBuiltin<"__builtin_riscv_aes64ks2">; 1787def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty], 1788 [llvm_i64_ty, llvm_i32_ty], 1789 [IntrNoMem, IntrSpeculatable, 1790 ImmArg<ArgIndex<1>>]>, 1791 ClangBuiltin<"__builtin_riscv_aes64ks1i">; 1792 1793// Zknh 1794def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32; 1795def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32; 1796def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32; 1797def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32; 1798 1799def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32, 1800 ClangBuiltin<"__builtin_riscv_sha512sig0l">; 1801def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32, 1802 ClangBuiltin<"__builtin_riscv_sha512sig0h">; 1803def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32, 1804 ClangBuiltin<"__builtin_riscv_sha512sig1l">; 1805def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32, 1806 ClangBuiltin<"__builtin_riscv_sha512sig1h">; 1807def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32, 1808 ClangBuiltin<"__builtin_riscv_sha512sum0r">; 1809def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32, 1810 ClangBuiltin<"__builtin_riscv_sha512sum1r">; 1811 1812def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64, 1813 ClangBuiltin<"__builtin_riscv_sha512sig0">; 1814def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64, 1815 ClangBuiltin<"__builtin_riscv_sha512sig1">; 1816def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64, 1817 ClangBuiltin<"__builtin_riscv_sha512sum0">; 1818def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64, 1819 ClangBuiltin<"__builtin_riscv_sha512sum1">; 1820 1821// Zksed 1822def int_riscv_sm4ks : ScalarCryptoByteSelect32; 1823def int_riscv_sm4ed : ScalarCryptoByteSelect32; 1824 1825// Zksh 1826def int_riscv_sm3p0 : ScalarCryptoGprIntrinsic32; 1827def int_riscv_sm3p1 : ScalarCryptoGprIntrinsic32; 1828} // TargetPrefix = "riscv" 1829 1830//===----------------------------------------------------------------------===// 1831// Vector Cryptography 1832// 1833// These intrinsics will lower directly into the corresponding instructions 1834// added by the vector cyptography extension, if the extension is present. 1835let TargetPrefix = "riscv" in { 1836 // Zvkb 1837 defm vandn : RISCVBinaryAAX; 1838 defm vbrev8 : RISCVUnaryAA; 1839 defm vrev8 : RISCVUnaryAA; 1840 defm vrol : RISCVBinaryAAX; 1841 defm vror : RISCVBinaryAAX; 1842 1843 // Zvbb 1844 defm vbrev : RISCVUnaryAA; 1845 defm vclz : RISCVUnaryAA; 1846 defm vctz : RISCVUnaryAA; 1847 defm vcpopv : RISCVUnaryAA; 1848 defm vwsll : RISCVBinaryABX; 1849 1850 // Zvbc 1851 defm vclmul : RISCVBinaryAAX; 1852 defm vclmulh : RISCVBinaryAAX; 1853 1854 // Zvkg 1855 def int_riscv_vghsh : RISCVBinaryAAXUnMaskedZvk; 1856 def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>; 1857 1858 // Zvkned 1859 defm vaesdf : RISCVUnaryAAUnMaskedZvk; 1860 defm vaesdm : RISCVUnaryAAUnMaskedZvk; 1861 defm vaesef : RISCVUnaryAAUnMaskedZvk; 1862 defm vaesem : RISCVUnaryAAUnMaskedZvk; 1863 def int_riscv_vaeskf1 : RISCVBinaryAAXUnMasked<IsVI=1>; 1864 def int_riscv_vaeskf2 : RISCVBinaryAAXUnMaskedZvk<IsVI=1>; 1865 defm vaesz : RISCVUnaryAAUnMaskedZvk<HasVV=0>; 1866 1867 // Zvknha or Zvknhb 1868 def int_riscv_vsha2ch : RISCVBinaryAAXUnMaskedZvk; 1869 def int_riscv_vsha2cl : RISCVBinaryAAXUnMaskedZvk; 1870 def int_riscv_vsha2ms : RISCVBinaryAAXUnMaskedZvk; 1871 1872 // Zvksed 1873 def int_riscv_vsm4k : RISCVBinaryAAXUnMasked<IsVI=1>; 1874 defm vsm4r : RISCVUnaryAAUnMaskedZvk; 1875 1876 // Zvksh 1877 def int_riscv_vsm3c : RISCVBinaryAAXUnMaskedZvk<IsVI=1>; 1878 def int_riscv_vsm3me : RISCVBinaryAAXUnMasked; 1879} // TargetPrefix = "riscv" 1880 1881// Vendor extensions 1882//===----------------------------------------------------------------------===// 1883include "llvm/IR/IntrinsicsRISCVXTHead.td" 1884include "llvm/IR/IntrinsicsRISCVXsf.td" 1885include "llvm/IR/IntrinsicsRISCVXCV.td" 1886