1//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines the builtins for RISC-V V-extension. See: 10// 11// https://github.com/riscv/rvv-intrinsic-doc 12// 13//===----------------------------------------------------------------------===// 14 15include "riscv_vector_common.td" 16 17defvar TypeList = ["c","s","i","l","x","f","d","y"]; 18defvar EEWList = [["8", "(Log2EEW:3)"], 19 ["16", "(Log2EEW:4)"], 20 ["32", "(Log2EEW:5)"], 21 ["64", "(Log2EEW:6)"]]; 22 23class IsFloat<string type> { 24 bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d"), !eq(type, "y")); 25} 26 27let SupportOverloading = false, 28 MaskedPolicyScheme = NonePolicy in { 29 class RVVVLEMaskBuiltin : RVVOutBuiltin<"m", "mPCUe", "c"> { 30 let Name = "vlm_v"; 31 let IRName = "vlm"; 32 let HasMasked = false; 33 } 34} 35 36let SupportOverloading = false, 37 UnMaskedPolicyScheme = HasPassthruOperand in { 38 multiclass RVVVLEBuiltin<list<string> types> { 39 let Name = NAME # "_v", 40 IRName = "vle", 41 MaskedIRName ="vle_mask" in { 42 foreach type = types in { 43 def : RVVOutBuiltin<"v", "vPCe", type>; 44 if !not(IsFloat<type>.val) then { 45 def : RVVOutBuiltin<"Uv", "UvPCUe", type>; 46 } 47 } 48 } 49 } 50} 51 52multiclass RVVVLEFFBuiltin<list<string> types> { 53 let Name = NAME # "_v", 54 IRName = "vleff", 55 MaskedIRName = "vleff_mask", 56 SupportOverloading = false, 57 UnMaskedPolicyScheme = HasPassthruOperand, 58 ManualCodegen = [{ 59 { 60 if (IsMasked) { 61 // Move mask to right before vl. 62 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 63 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 64 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 65 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 66 IntrinsicTypes = {ResultType, Ops[4]->getType()}; 67 } else { 68 if (PolicyAttrs & RVV_VTA) 69 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 70 IntrinsicTypes = {ResultType, Ops[3]->getType()}; 71 } 72 Value *NewVL = Ops[2]; 73 Ops.erase(Ops.begin() + 2); 74 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 75 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); 76 llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); 77 // Store new_vl. 78 clang::CharUnits Align; 79 if (IsMasked) 80 Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(E->getNumArgs()-2)->getType()); 81 else 82 Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType()); 83 llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); 84 Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); 85 return V; 86 } 87 }] in { 88 foreach type = types in { 89 def : RVVBuiltin<"v", "vPCePz", type>; 90 // Skip floating types for unsigned versions. 91 if !not(IsFloat<type>.val) then { 92 def : RVVBuiltin<"Uv", "UvPCUePz", type>; 93 } 94 } 95 } 96} 97 98multiclass RVVVLSEBuiltin<list<string> types> { 99 let Name = NAME # "_v", 100 IRName = "vlse", 101 MaskedIRName ="vlse_mask", 102 SupportOverloading = false, 103 UnMaskedPolicyScheme = HasPassthruOperand in { 104 foreach type = types in { 105 def : RVVOutBuiltin<"v", "vPCet", type>; 106 if !not(IsFloat<type>.val) then { 107 def : RVVOutBuiltin<"Uv", "UvPCUet", type>; 108 } 109 } 110 } 111} 112 113multiclass RVVIndexedLoad<string op> { 114 let UnMaskedPolicyScheme = HasPassthruOperand in { 115 foreach type = TypeList in { 116 foreach eew_list = EEWList[0-2] in { 117 defvar eew = eew_list[0]; 118 defvar eew_type = eew_list[1]; 119 let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", 120 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 121 !if(!eq(type, "y"), ["Zvfbfmin"], 122 []<string>)) in { 123 def: RVVOutOp1Builtin<"v", "vPCe" # eew_type # "Uv", type>; 124 if !not(IsFloat<type>.val) then { 125 def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew_type # "Uv", type>; 126 } 127 } 128 } 129 defvar eew64 = "64"; 130 defvar eew64_type = "(Log2EEW:6)"; 131 let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", 132 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"], 133 !if(!eq(type, "y"), ["Zvfbfmin", "RV64"], 134 ["RV64"])) in { 135 def: RVVOutOp1Builtin<"v", "vPCe" # eew64_type # "Uv", type>; 136 if !not(IsFloat<type>.val) then { 137 def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew64_type # "Uv", type>; 138 } 139 } 140 } 141 } 142} 143 144let HasMaskedOffOperand = false, 145 MaskedPolicyScheme = NonePolicy, 146 ManualCodegen = [{ 147 if (IsMasked) { 148 // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) 149 std::swap(Ops[0], Ops[2]); 150 } else { 151 // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) 152 std::swap(Ops[0], Ops[1]); 153 } 154 if (IsMasked) 155 IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; 156 else 157 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; 158 }] in { 159 class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> { 160 let Name = "vsm_v"; 161 let IRName = "vsm"; 162 let HasMasked = false; 163 } 164 multiclass RVVVSEBuiltin<list<string> types> { 165 let Name = NAME # "_v", 166 IRName = "vse", 167 MaskedIRName = "vse_mask" in { 168 foreach type = types in { 169 def : RVVBuiltin<"v", "0Pev", type>; 170 if !not(IsFloat<type>.val) then { 171 def : RVVBuiltin<"Uv", "0PUeUv", type>; 172 } 173 } 174 } 175 } 176} 177 178multiclass RVVVSSEBuiltin<list<string> types> { 179 let Name = NAME # "_v", 180 IRName = "vsse", 181 MaskedIRName = "vsse_mask", 182 HasMaskedOffOperand = false, 183 MaskedPolicyScheme = NonePolicy, 184 ManualCodegen = [{ 185 if (IsMasked) { 186 // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl) 187 std::swap(Ops[0], Ops[3]); 188 } else { 189 // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) 190 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); 191 } 192 if (IsMasked) 193 IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()}; 194 else 195 IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; 196 }] in { 197 foreach type = types in { 198 def : RVVBuiltin<"v", "0Petv", type>; 199 if !not(IsFloat<type>.val) then { 200 def : RVVBuiltin<"Uv", "0PUetUv", type>; 201 } 202 } 203 } 204} 205 206multiclass RVVIndexedStore<string op> { 207 let HasMaskedOffOperand = false, 208 MaskedPolicyScheme = NonePolicy, 209 ManualCodegen = [{ 210 if (IsMasked) { 211 // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl) 212 std::swap(Ops[0], Ops[3]); 213 } else { 214 // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) 215 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); 216 } 217 if (IsMasked) 218 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()}; 219 else 220 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()}; 221 }] in { 222 foreach type = TypeList in { 223 foreach eew_list = EEWList[0-2] in { 224 defvar eew = eew_list[0]; 225 defvar eew_type = eew_list[1]; 226 let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", 227 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 228 !if(!eq(type, "y"), ["Zvfbfmin"], 229 []<string>)) in { 230 def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>; 231 if !not(IsFloat<type>.val) then { 232 def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>; 233 } 234 } 235 } 236 defvar eew64 = "64"; 237 defvar eew64_type = "(Log2EEW:6)"; 238 let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", 239 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"], 240 !if(!eq(type, "y"), ["Zvfbfmin", "RV64"], 241 ["RV64"])) in { 242 def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; 243 if !not(IsFloat<type>.val) then { 244 def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>; 245 } 246 } 247 } 248 } 249} 250 251defvar NFList = [2, 3, 4, 5, 6, 7, 8]; 252/* 253A segment load builtin has different variants. 254 255Therefore a segment unit-stride load builtin can have 4 variants, 2561. When unmasked and the policies are all specified as agnostic: 257(Address0, ..., Address{NF - 1}, Ptr, VL) 2582. When masked and the policies are all specified as agnostic: 259(Address0, ..., Address{NF - 1}, Mask, Ptr, VL) 2603. When unmasked and one of the policies is specified as undisturbed: 261(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 262 Ptr, VL) 2634. When masked and one of the policies is specified as undisturbed: 264(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 265 Ptr, VL) 266 267Other variants of segment load builtin share the same structure, but they 268have their own extra parameter. 269 270The segment unit-stride fault-only-first load builtin has a 'NewVL' 271operand after the 'Ptr' operand. 2721. When unmasked and the policies are all specified as agnostic: 273(Address0, ..., Address{NF - 1}, Ptr, NewVL, VL) 2742. When masked and the policies are all specified as agnostic: 275(Address0, ..., Address{NF - 1}, Mask, Ptr, NewVL, VL) 2763. When unmasked and one of the policies is specified as undisturbed: 277(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 278 Ptr, NewVL, VL) 2794. When masked and one of the policies is specified as undisturbed: 280(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 281 Ptr, NewVL, VL) 282 283The segment strided load builtin has a 'Stride' operand after the 'Ptr' 284operand. 2851. When unmasked and the policies are all specified as agnostic: 286(Address0, ..., Address{NF - 1}, Ptr, Stride, VL) 2872. When masked and the policies are all specified as agnostic: 288(Address0, ..., Address{NF - 1}, Mask, Ptr, Stride, VL) 2893. When unmasked and one of the policies is specified as undisturbed: 290(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 291 Ptr, Stride, VL) 2924. When masked and one of the policies is specified as undisturbed: 293(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 294 Ptr, Stride, VL) 295 296The segment indexed load builtin has a 'Idx' operand after the 'Ptr' operand. 2971. When unmasked and the policies are all specified as agnostic: 298(Address0, ..., Address{NF - 1}, Ptr, Idx, VL) 2992. When masked and the policies are all specified as agnostic: 300(Address0, ..., Address{NF - 1}, Mask, Ptr, Idx, VL) 3013. When unmasked and one of the policies is specified as undisturbed: 302(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 303 Ptr, Idx, VL) 3044. When masked and one of the policies is specified as undisturbed: 305(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 306 Ptr, Idx, VL) 307 308Segment load intrinsics has different variants similar to their builtins. 309 310Segment unit-stride load intrinsic, 311 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy) 312 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL) 313Segment unit-stride fault-only-first load intrinsic, 314 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy) 315 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL) 316Segment strided load intrinsic, 317 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, Mask, VL, Policy) 318 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, VL) 319Segment indexed load intrinsic, 320 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, Mask, VL, Policy) 321 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, VL) 322 323The Vector(s) is poison when the policy behavior allows us to not care 324about any masked-off elements. 325*/ 326 327class PVString<int nf, bit signed> { 328 string S = 329 !cond(!eq(nf, 2): !if(signed, "PvPv", "PUvPUv"), 330 !eq(nf, 3): !if(signed, "PvPvPv", "PUvPUvPUv"), 331 !eq(nf, 4): !if(signed, "PvPvPvPv", "PUvPUvPUvPUv"), 332 !eq(nf, 5): !if(signed, "PvPvPvPvPv", "PUvPUvPUvPUvPUv"), 333 !eq(nf, 6): !if(signed, "PvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUv"), 334 !eq(nf, 7): !if(signed, "PvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUv"), 335 !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv")); 336} 337 338class VString<int nf, bit signed> { 339 string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"), 340 !eq(nf, 3): !if(signed, "vvv", "UvUvUv"), 341 !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"), 342 !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"), 343 !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"), 344 !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"), 345 !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv")); 346} 347 348 349class FixedVString<int fixed_lmul, int num, string vec> { 350 string V = "(LFixedLog2LMUL:" # fixed_lmul # ")" # vec; 351 string S = !interleave(!listsplat(V, num), ""); 352} 353 354multiclass RVVNonTupleVCreateBuiltin<int dst_lmul, list<int> src_lmul_list> { 355 defvar dst_v = FixedVString<dst_lmul, 1, "v">.V; 356 defvar dst_uv = FixedVString<dst_lmul, 1, "Uv">.V; 357 foreach src_lmul = src_lmul_list in { 358 defvar num = !shl(1, !sub(dst_lmul, src_lmul)); 359 360 defvar src_v = FixedVString<src_lmul, num, "v">.V; 361 defvar src_s = FixedVString<src_lmul, num, "v">.S; 362 def vcreate # src_v # dst_v : RVVBuiltin<src_v # dst_v, 363 dst_v # src_s, 364 "csilfd">; 365 let RequiredFeatures = ["Zvfhmin"] in 366 def vcreate_h # src_v # dst_v : RVVBuiltin<src_v # dst_v, 367 dst_v # src_s, 368 "x", dst_v>; 369 let RequiredFeatures = ["Zvfbfmin"] in 370 def vcreate_bf16 # src_v # dst_v : RVVBuiltin<src_v # dst_v, 371 dst_v # src_s, 372 "y", dst_v>; 373 374 defvar src_uv = FixedVString<src_lmul, num, "Uv">.V; 375 defvar src_us = FixedVString<src_lmul, num, "Uv">.S; 376 def vcreate_u # src_uv # dst_uv : RVVBuiltin<src_uv # dst_uv, 377 dst_uv # src_us, 378 "csil">; 379 } 380} 381 382multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> { 383 let Name = NAME, 384 IRName = IR, 385 MaskedIRName = IR # "_mask", 386 UnMaskedPolicyScheme = HasPassthruOperand, 387 ManualCodegen = [{ 388 { 389 if (IsMasked) { 390 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 391 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 392 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 393 } else { 394 if (PolicyAttrs & RVV_VTA) 395 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 396 } 397 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 398 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); 399 400 if (IsMasked) { 401 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 402 // maskedoff, op1, op2, mask, vl, policy 403 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()}; 404 } else { 405 // passthru, op1, op2, vl 406 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()}; 407 } 408 break; 409 } 410 }] in { 411 def : RVVBuiltin<"v", "vv", type_range>; 412 } 413} 414 415multiclass RVVPseudoVNotBuiltin<string IR, string type_range> { 416 let Name = NAME, 417 IRName = IR, 418 MaskedIRName = IR # "_mask", 419 UnMaskedPolicyScheme = HasPassthruOperand, 420 ManualCodegen = [{ 421 { 422 if (IsMasked) { 423 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 424 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 425 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 426 } else { 427 if (PolicyAttrs & RVV_VTA) 428 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 429 } 430 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 431 Ops.insert(Ops.begin() + 2, 432 llvm::Constant::getAllOnesValue(ElemTy)); 433 if (IsMasked) { 434 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 435 // maskedoff, op1, po2, mask, vl, policy 436 IntrinsicTypes = {ResultType, 437 ElemTy, 438 Ops[4]->getType()}; 439 } else { 440 // passthru, op1, op2, vl 441 IntrinsicTypes = {ResultType, 442 ElemTy, 443 Ops[3]->getType()}; 444 } 445 break; 446 } 447 }] in { 448 def : RVVBuiltin<"v", "vv", type_range>; 449 def : RVVBuiltin<"Uv", "UvUv", type_range>; 450 } 451} 452 453multiclass RVVPseudoMaskBuiltin<string IR, string type_range> { 454 let Name = NAME, 455 IRName = IR, 456 HasMasked = false, 457 ManualCodegen = [{ 458 { 459 // op1, vl 460 IntrinsicTypes = {ResultType, 461 Ops[1]->getType()}; 462 Ops.insert(Ops.begin() + 1, Ops[0]); 463 break; 464 } 465 }] in { 466 def : RVVBuiltin<"m", "mm", type_range>; 467 } 468} 469 470multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> { 471 let Name = NAME, 472 IRName = IR, 473 MaskedIRName = IR # "_mask", 474 UnMaskedPolicyScheme = HasPassthruOperand, 475 ManualCodegen = [{ 476 { 477 if (IsMasked) { 478 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 479 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 480 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 481 Ops.insert(Ops.begin() + 2, Ops[1]); 482 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 483 // maskedoff, op1, op2, mask, vl 484 IntrinsicTypes = {ResultType, 485 Ops[2]->getType(), 486 Ops.back()->getType()}; 487 } else { 488 if (PolicyAttrs & RVV_VTA) 489 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 490 // op1, po2, vl 491 IntrinsicTypes = {ResultType, 492 Ops[1]->getType(), Ops[2]->getType()}; 493 Ops.insert(Ops.begin() + 2, Ops[1]); 494 break; 495 } 496 break; 497 } 498 }] in { 499 def : RVVBuiltin<"v", "vv", type_range>; 500 } 501} 502 503multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range, 504 list<list<string>> suffixes_prototypes> { 505 let Name = NAME, 506 OverloadedName = MName, 507 IRName = IR, 508 MaskedIRName = IR # "_mask", 509 UnMaskedPolicyScheme = HasPassthruOperand, 510 ManualCodegen = [{ 511 { 512 if (IsMasked) { 513 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 514 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 515 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 516 } else { 517 if (PolicyAttrs & RVV_VTA) 518 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 519 } 520 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 521 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); 522 if (IsMasked) { 523 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 524 // maskedoff, op1, op2, mask, vl, policy 525 IntrinsicTypes = {ResultType, 526 Ops[1]->getType(), 527 ElemTy, 528 Ops[4]->getType()}; 529 } else { 530 // passtru, op1, op2, vl 531 IntrinsicTypes = {ResultType, 532 Ops[1]->getType(), 533 ElemTy, 534 Ops[3]->getType()}; 535 } 536 break; 537 } 538 }] in { 539 foreach s_p = suffixes_prototypes in { 540 def : RVVBuiltin<s_p[0], s_p[1], type_range>; 541 } 542 } 543} 544 545multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range, 546 list<list<string>> suffixes_prototypes> { 547 let Name = NAME, 548 OverloadedName = MName, 549 IRName = IR, 550 MaskedIRName = IR # "_mask", 551 UnMaskedPolicyScheme = HasPassthruOperand, 552 ManualCodegen = [{ 553 { 554 if (IsMasked) { 555 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 556 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 557 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 558 } else { 559 if (PolicyAttrs & RVV_VTA) 560 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 561 } 562 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType())); 563 if (IsMasked) { 564 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 565 // maskedoff, op1, xlen, mask, vl 566 IntrinsicTypes = {ResultType, 567 Ops[1]->getType(), 568 Ops[4]->getType(), 569 Ops[4]->getType()}; 570 } else { 571 // passthru, op1, xlen, vl 572 IntrinsicTypes = {ResultType, 573 Ops[1]->getType(), 574 Ops[3]->getType(), 575 Ops[3]->getType()}; 576 } 577 break; 578 } 579 }] in { 580 foreach s_p = suffixes_prototypes in { 581 def : RVVBuiltin<s_p[0], s_p[1], type_range>; 582 } 583 } 584} 585 586let HeaderCode = 587[{ 588#define __riscv_vlenb() __builtin_rvv_vlenb() 589}] in 590def vlenb_macro: RVVHeader; 591 592let HasBuiltinAlias = false, HasVL = false, HasMasked = false, 593 UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy, 594 Log2LMUL = [0], IRName = "", 595 ManualCodegen = [{ 596 { 597 LLVMContext &Context = CGM.getLLVMContext(); 598 llvm::MDBuilder MDHelper(Context); 599 600 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "vlenb")}; 601 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); 602 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); 603 llvm::Function *F = 604 CGM.getIntrinsic(llvm::Intrinsic::read_register, {SizeTy}); 605 return Builder.CreateCall(F, Metadata); 606 } 607 }] in 608{ 609 def vlenb : RVVBuiltin<"", "u", "i">; 610} 611 612// 6. Configuration-Setting Instructions 613// 6.1. vsetvli/vsetvl instructions 614 615// vsetvl/vsetvlmax are a macro because they require constant integers in SEW 616// and LMUL. 617let HeaderCode = 618[{ 619#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6) 620#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7) 621#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0) 622#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1) 623#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2) 624#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3) 625 626#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7) 627#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0) 628#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1) 629#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2) 630#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3) 631 632#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0) 633#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1) 634#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2) 635#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3) 636 637#if __riscv_v_elen >= 64 638#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) 639#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6) 640#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7) 641 642#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0) 643#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1) 644#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2) 645#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3) 646#endif 647 648#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6) 649#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7) 650#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0) 651#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1) 652#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2) 653#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3) 654 655#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7) 656#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0) 657#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1) 658#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2) 659#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3) 660 661#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0) 662#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1) 663#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2) 664#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3) 665 666#if __riscv_v_elen >= 64 667#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) 668#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6) 669#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7) 670 671#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0) 672#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1) 673#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2) 674#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3) 675#endif 676 677}] in 678def vsetvl_macro: RVVHeader; 679 680let HasBuiltinAlias = false, 681 HasVL = false, 682 HasMasked = false, 683 MaskedPolicyScheme = NonePolicy, 684 Log2LMUL = [0], 685 ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type 686{ 687 def vsetvli : RVVBuiltin<"", "zzKzKz", "i">; 688 def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">; 689} 690 691// 7. Vector Loads and Stores 692// 7.4. Vector Unit-Stride Instructions 693def vlm: RVVVLEMaskBuiltin; 694defm vle8: RVVVLEBuiltin<["c"]>; 695defm vle16: RVVVLEBuiltin<["s"]>; 696let Name = "vle16_v", RequiredFeatures = ["Zvfhmin"] in 697 defm vle16_h: RVVVLEBuiltin<["x"]>; 698let Name = "vle16_v", RequiredFeatures = ["Zvfbfmin"] in 699 defm vle16_bf16 : RVVVLEBuiltin<["y"]>; 700defm vle32: RVVVLEBuiltin<["i","f"]>; 701defm vle64: RVVVLEBuiltin<["l","d"]>; 702 703def vsm : RVVVSEMaskBuiltin; 704defm vse8 : RVVVSEBuiltin<["c"]>; 705defm vse16: RVVVSEBuiltin<["s"]>; 706let Name = "vse16_v", RequiredFeatures = ["Zvfhmin"] in 707 defm vse16_h: RVVVSEBuiltin<["x"]>; 708let Name = "vse16_v", RequiredFeatures = ["Zvfbfmin"] in 709 defm vse16_bf16: RVVVSEBuiltin<["y"]>; 710defm vse32: RVVVSEBuiltin<["i","f"]>; 711defm vse64: RVVVSEBuiltin<["l","d"]>; 712 713// 7.5. Vector Strided Instructions 714defm vlse8: RVVVLSEBuiltin<["c"]>; 715defm vlse16: RVVVLSEBuiltin<["s"]>; 716let Name = "vlse16_v", RequiredFeatures = ["Zvfhmin"] in 717 defm vlse16_h: RVVVLSEBuiltin<["x"]>; 718let Name = "vlse16_v", RequiredFeatures = ["Zvfbfmin"] in 719 defm vlse16_bf16: RVVVLSEBuiltin<["y"]>; 720defm vlse32: RVVVLSEBuiltin<["i","f"]>; 721defm vlse64: RVVVLSEBuiltin<["l","d"]>; 722 723defm vsse8 : RVVVSSEBuiltin<["c"]>; 724defm vsse16: RVVVSSEBuiltin<["s"]>; 725let Name = "vsse16_v", RequiredFeatures = ["Zvfhmin"] in 726 defm vsse16_h: RVVVSSEBuiltin<["x"]>; 727let Name = "vsse16_v", RequiredFeatures = ["Zvfbfmin"] in 728 defm vsse16_bf: RVVVSSEBuiltin<["y"]>; 729defm vsse32: RVVVSSEBuiltin<["i","f"]>; 730defm vsse64: RVVVSSEBuiltin<["l","d"]>; 731 732// 7.6. Vector Indexed Instructions 733defm : RVVIndexedLoad<"vluxei">; 734defm : RVVIndexedLoad<"vloxei">; 735 736defm : RVVIndexedStore<"vsuxei">; 737defm : RVVIndexedStore<"vsoxei">; 738 739// 7.7. Unit-stride Fault-Only-First Loads 740defm vle8ff: RVVVLEFFBuiltin<["c"]>; 741defm vle16ff: RVVVLEFFBuiltin<["s"]>; 742let Name = "vle16ff_v", RequiredFeatures = ["Zvfhmin"] in 743 defm vle16ff: RVVVLEFFBuiltin<["x"]>; 744let Name = "vle16ff_v", RequiredFeatures = ["Zvfbfmin"] in 745 defm vle16ff: RVVVLEFFBuiltin<["y"]>; 746defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>; 747defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; 748 749multiclass RVVUnitStridedSegLoadTuple<string op> { 750 foreach type = TypeList in { 751 defvar eew = !cond(!eq(type, "c") : "8", 752 !eq(type, "s") : "16", 753 !eq(type, "i") : "32", 754 !eq(type, "l") : "64", 755 !eq(type, "x") : "16", 756 !eq(type, "f") : "32", 757 !eq(type, "d") : "64", 758 !eq(type, "y") : "16"); 759 foreach nf = NFList in { 760 let Name = op # nf # "e" # eew # "_v", 761 IRName = op # nf, 762 MaskedIRName = op # nf # "_mask", 763 NF = nf, 764 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 765 !if(!eq(type, "y"), ["Zvfbfmin"], 766 []<string>)), 767 ManualCodegen = [{ 768 { 769 if (IsMasked) 770 IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops.back()->getType()}; 771 else 772 IntrinsicTypes = {ResultType, Ops.back()->getType()}; 773 SmallVector<llvm::Value*, 6> Operands; 774 775 bool NoPassthru = 776 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 777 (!IsMasked && (PolicyAttrs & RVV_VTA)); 778 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 779 780 if (NoPassthru) { // Push poison into passthru 781 Operands.push_back(llvm::PoisonValue::get(ResultType)); 782 } else { // Push intrinsics operands into passthru 783 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 784 Operands.push_back(PassthruOperand); 785 } 786 787 Operands.push_back(Ops[Offset]); // Ptr 788 if (IsMasked) 789 Operands.push_back(Ops[0]); 790 Operands.push_back(Ops[Offset + 1]); // VL 791 if (IsMasked) 792 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 793 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 794 795 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 796 797 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 798 if (ReturnValue.isNull()) 799 return LoadValue; 800 else 801 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 802 } 803 }] in { 804 defvar T = "(Tuple:" # nf # ")"; 805 def : RVVBuiltin<T # "v", T # "vPCe", type>; 806 if !not(IsFloat<type>.val) then { 807 def : RVVBuiltin<T # "Uv", T # "UvPCUe", type>; 808 } 809 } 810 } 811 } 812} 813 814multiclass RVVUnitStridedSegStoreTuple<string op> { 815 foreach type = TypeList in { 816 defvar eew = !cond(!eq(type, "c") : "8", 817 !eq(type, "s") : "16", 818 !eq(type, "i") : "32", 819 !eq(type, "l") : "64", 820 !eq(type, "x") : "16", 821 !eq(type, "f") : "32", 822 !eq(type, "d") : "64", 823 !eq(type, "y") : "16"); 824 foreach nf = NFList in { 825 let Name = op # nf # "e" # eew # "_v", 826 IRName = op # nf, 827 MaskedIRName = op # nf # "_mask", 828 NF = nf, 829 HasMaskedOffOperand = false, 830 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 831 !if(!eq(type, "y"), ["Zvfbfmin"], 832 []<string>)), 833 ManualCodegen = [{ 834 { 835 // Masked 836 // Builtin: (mask, ptr, v_tuple, vl) 837 // Intrinsic: (tuple, ptr, mask, vl) 838 // Unmasked 839 // Builtin: (ptr, v_tuple, vl) 840 // Intrinsic: (tuple, ptr, vl) 841 unsigned Offset = IsMasked ? 1 : 0; 842 843 SmallVector<llvm::Value*, 5> Operands; 844 Operands.push_back(Ops[Offset + 1]); // tuple 845 Operands.push_back(Ops[Offset]); // Ptr 846 if (IsMasked) 847 Operands.push_back(Ops[0]); 848 Operands.push_back(Ops[Offset + 2]); // VL 849 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 850 851 if (IsMasked) 852 IntrinsicTypes = {Operands[0]->getType(), Ops[0]->getType(), Operands.back()->getType()}; 853 else 854 IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()}; 855 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 856 return Builder.CreateCall(F, Operands, ""); 857 } 858 }] in { 859 defvar T = "(Tuple:" # nf # ")"; 860 def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>; 861 if !not(IsFloat<type>.val) then { 862 def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>; 863 } 864 } 865 } 866 } 867} 868 869multiclass RVVUnitStridedSegLoadFFTuple<string op> { 870 foreach type = TypeList in { 871 defvar eew = !cond(!eq(type, "c") : "8", 872 !eq(type, "s") : "16", 873 !eq(type, "i") : "32", 874 !eq(type, "l") : "64", 875 !eq(type, "x") : "16", 876 !eq(type, "f") : "32", 877 !eq(type, "d") : "64", 878 !eq(type, "y") : "16"); 879 foreach nf = NFList in { 880 let Name = op # nf # "e" # eew # "ff_v", 881 IRName = op # nf # "ff", 882 MaskedIRName = op # nf # "ff_mask", 883 NF = nf, 884 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 885 !if(!eq(type, "y"), ["Zvfbfmin"], 886 []<string>)), 887 ManualCodegen = [{ 888 { 889 if (IsMasked) 890 IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()}; 891 else 892 IntrinsicTypes = {ResultType, Ops.back()->getType()}; 893 SmallVector<llvm::Value*, 6> Operands; 894 895 bool NoPassthru = 896 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 897 (!IsMasked && (PolicyAttrs & RVV_VTA)); 898 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 899 900 if (NoPassthru) { // Push poison into passthru 901 Operands.push_back(llvm::PoisonValue::get(ResultType)); 902 } else { // Push intrinsics operands into passthru 903 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 904 Operands.push_back(PassthruOperand); 905 } 906 907 Operands.push_back(Ops[Offset]); // Ptr 908 if (IsMasked) 909 Operands.push_back(Ops[0]); 910 Operands.push_back(Ops[Offset + 2]); // vl 911 if (IsMasked) 912 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 913 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 914 915 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 916 917 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 918 // Get alignment from the new vl operand 919 clang::CharUnits Align = 920 CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType()); 921 922 llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0); 923 924 // Store new_vl 925 llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1); 926 Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align)); 927 928 if (ReturnValue.isNull()) 929 return ReturnTuple; 930 else 931 return Builder.CreateStore(ReturnTuple, ReturnValue.getValue()); 932 } 933 }] in { 934 defvar T = "(Tuple:" # nf # ")"; 935 def : RVVBuiltin<T # "v", T # "vPCePz", type>; 936 if !not(IsFloat<type>.val) then { 937 def : RVVBuiltin<T # "Uv", T # "UvPCUePz", type>; 938 } 939 } 940 } 941 } 942} 943 944multiclass RVVStridedSegLoadTuple<string op> { 945 foreach type = TypeList in { 946 defvar eew = !cond(!eq(type, "c") : "8", 947 !eq(type, "s") : "16", 948 !eq(type, "i") : "32", 949 !eq(type, "l") : "64", 950 !eq(type, "x") : "16", 951 !eq(type, "f") : "32", 952 !eq(type, "d") : "64", 953 !eq(type, "y") : "16"); 954 foreach nf = NFList in { 955 let Name = op # nf # "e" # eew # "_v", 956 IRName = op # nf, 957 MaskedIRName = op # nf # "_mask", 958 NF = nf, 959 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 960 !if(!eq(type, "y"), ["Zvfbfmin"], 961 []<string>)), 962 ManualCodegen = [{ 963 { 964 if (IsMasked) 965 IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()}; 966 else 967 IntrinsicTypes = {ResultType, Ops.back()->getType()}; 968 SmallVector<llvm::Value*, 7> Operands; 969 970 bool NoPassthru = 971 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 972 (!IsMasked && (PolicyAttrs & RVV_VTA)); 973 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 974 975 if (NoPassthru) { // Push poison into passthru 976 Operands.push_back(llvm::PoisonValue::get(ResultType)); 977 } else { // Push intrinsics operands into passthru 978 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 979 Operands.push_back(PassthruOperand); 980 } 981 982 Operands.push_back(Ops[Offset]); // Ptr 983 Operands.push_back(Ops[Offset + 1]); // Stride 984 if (IsMasked) 985 Operands.push_back(Ops[0]); 986 Operands.push_back(Ops[Offset + 2]); // VL 987 if (IsMasked) 988 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 989 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 990 991 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 992 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 993 994 if (ReturnValue.isNull()) 995 return LoadValue; 996 else 997 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 998 } 999 }] in { 1000 defvar T = "(Tuple:" # nf # ")"; 1001 def : RVVBuiltin<T # "v", T # "vPCet", type>; 1002 if !not(IsFloat<type>.val) then { 1003 def : RVVBuiltin<T # "Uv", T # "UvPCUet", type>; 1004 } 1005 } 1006 } 1007 } 1008} 1009 1010multiclass RVVStridedSegStoreTuple<string op> { 1011 foreach type = TypeList in { 1012 defvar eew = !cond(!eq(type, "c") : "8", 1013 !eq(type, "s") : "16", 1014 !eq(type, "i") : "32", 1015 !eq(type, "l") : "64", 1016 !eq(type, "x") : "16", 1017 !eq(type, "f") : "32", 1018 !eq(type, "d") : "64", 1019 !eq(type, "y") : "16"); 1020 foreach nf = NFList in { 1021 let Name = op # nf # "e" # eew # "_v", 1022 IRName = op # nf, 1023 MaskedIRName = op # nf # "_mask", 1024 NF = nf, 1025 HasMaskedOffOperand = false, 1026 MaskedPolicyScheme = NonePolicy, 1027 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 1028 !if(!eq(type, "y"), ["Zvfbfmin"], 1029 []<string>)), 1030 ManualCodegen = [{ 1031 { 1032 // Masked 1033 // Builtin: (mask, ptr, stride, v_tuple, vl) 1034 // Intrinsic: (tuple, ptr, stride, mask, vl) 1035 // Unmasked 1036 // Builtin: (ptr, stride, v_tuple, vl) 1037 // Intrinsic: (tuple, ptr, stride, vl) 1038 unsigned Offset = IsMasked ? 1 : 0; 1039 1040 SmallVector<llvm::Value*, 6> Operands; 1041 Operands.push_back(Ops[Offset + 2]); // tuple 1042 Operands.push_back(Ops[Offset]); // Ptr 1043 Operands.push_back(Ops[Offset + 1]); // Stride 1044 if (IsMasked) 1045 Operands.push_back(Ops[0]); 1046 Operands.push_back(Ops[Offset + 3]); // VL 1047 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 1048 1049 if (IsMasked) 1050 IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType(), Ops[0]->getType()}; 1051 else 1052 IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()}; 1053 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1054 return Builder.CreateCall(F, Operands, ""); 1055 } 1056 }] in { 1057 defvar T = "(Tuple:" # nf # ")"; 1058 def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>; 1059 if !not(IsFloat<type>.val) then { 1060 def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>; 1061 } 1062 } 1063 } 1064 } 1065} 1066 1067multiclass RVVIndexedSegLoadTuple<string op> { 1068 foreach type = TypeList in { 1069 foreach eew_info = EEWList in { 1070 defvar eew = eew_info[0]; 1071 defvar eew_type = eew_info[1]; 1072 foreach nf = NFList in { 1073 let Name = op # nf # "ei" # eew # "_v", 1074 IRName = op # nf, 1075 MaskedIRName = op # nf # "_mask", 1076 NF = nf, 1077 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 1078 !if(!eq(type, "y"), ["Zvfbfmin"], 1079 []<string>)), 1080 ManualCodegen = [{ 1081 { 1082 SmallVector<llvm::Value*, 7> Operands; 1083 1084 bool NoPassthru = 1085 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 1086 (!IsMasked && (PolicyAttrs & RVV_VTA)); 1087 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 1088 1089 if (NoPassthru) { // Push poison into passthru 1090 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1091 } else { // Push intrinsics operands into passthru 1092 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 1093 Operands.push_back(PassthruOperand); 1094 } 1095 1096 Operands.push_back(Ops[Offset]); // Ptr 1097 Operands.push_back(Ops[Offset + 1]); // Idx 1098 if (IsMasked) 1099 Operands.push_back(Ops[0]); 1100 Operands.push_back(Ops[Offset + 2]); // VL 1101 if (IsMasked) 1102 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1103 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 1104 1105 if (IsMasked) 1106 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), 1107 Ops[0]->getType(), 1108 Ops.back()->getType()}; 1109 else 1110 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), 1111 Ops.back()->getType()}; 1112 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1113 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 1114 1115 if (ReturnValue.isNull()) 1116 return LoadValue; 1117 else 1118 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 1119 } 1120 }] in { 1121 defvar T = "(Tuple:" # nf # ")"; 1122 def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>; 1123 if !not(IsFloat<type>.val) then { 1124 def : RVVBuiltin<T # "Uv", T # "UvPCUe" # eew_type # "Uv", type>; 1125 } 1126 } 1127 } 1128 } 1129 } 1130} 1131 1132multiclass RVVIndexedSegStoreTuple<string op> { 1133 foreach type = TypeList in { 1134 foreach eew_info = EEWList in { 1135 defvar eew = eew_info[0]; 1136 defvar eew_type = eew_info[1]; 1137 foreach nf = NFList in { 1138 let Name = op # nf # "ei" # eew # "_v", 1139 IRName = op # nf, 1140 MaskedIRName = op # nf # "_mask", 1141 NF = nf, 1142 HasMaskedOffOperand = false, 1143 MaskedPolicyScheme = NonePolicy, 1144 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 1145 !if(!eq(type, "y"), ["Zvfbfmin"], 1146 []<string>)), 1147 ManualCodegen = [{ 1148 { 1149 // Masked 1150 // Builtin: (mask, ptr, index, v_tuple, vl) 1151 // Intrinsic: (tuple, ptr, index, mask, vl) 1152 // Unmasked 1153 // Builtin: (ptr, index, v_tuple, vl) 1154 // Intrinsic: (tuple, ptr, index, vl) 1155 unsigned Offset = IsMasked ? 1 : 0; 1156 1157 SmallVector<llvm::Value*, 6> Operands; 1158 Operands.push_back(Ops[Offset + 2]); // tuple 1159 Operands.push_back(Ops[Offset]); // Ptr 1160 Operands.push_back(Ops[Offset + 1]); // Idx 1161 if (IsMasked) 1162 Operands.push_back(Ops[0]); 1163 Operands.push_back(Ops[Offset + 3]); // VL 1164 Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW)); 1165 1166 if (IsMasked) 1167 IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(), 1168 Ops[0]->getType(), 1169 Operands.back()->getType()}; 1170 else 1171 IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(), 1172 Operands.back()->getType()}; 1173 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1174 return Builder.CreateCall(F, Operands, ""); 1175 } 1176 }] in { 1177 defvar T = "(Tuple:" # nf # ")"; 1178 def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>; 1179 if !not(IsFloat<type>.val) then { 1180 def : RVVBuiltin<T # "Uv", "0PUe" # eew_type # "Uv" # T # "Uv", type>; 1181 } 1182 } 1183 } 1184 } 1185 } 1186} 1187 1188// 7.8 Vector Load/Store Segment Instructions 1189let UnMaskedPolicyScheme = HasPassthruOperand, 1190 IsTuple = true in { 1191 defm : RVVUnitStridedSegLoadTuple<"vlseg">; 1192 defm : RVVUnitStridedSegLoadFFTuple<"vlseg">; 1193 defm : RVVStridedSegLoadTuple<"vlsseg">; 1194 defm : RVVIndexedSegLoadTuple<"vluxseg">; 1195 defm : RVVIndexedSegLoadTuple<"vloxseg">; 1196} 1197 1198let UnMaskedPolicyScheme = NonePolicy, 1199 MaskedPolicyScheme = NonePolicy, 1200 IsTuple = true in { 1201defm : RVVUnitStridedSegStoreTuple<"vsseg">; 1202defm : RVVStridedSegStoreTuple<"vssseg">; 1203defm : RVVIndexedSegStoreTuple<"vsuxseg">; 1204defm : RVVIndexedSegStoreTuple<"vsoxseg">; 1205} 1206 1207// 11. Vector Integer Arithmetic Instructions 1208// 11.1. Vector Single-Width Integer Add and Subtract 1209let UnMaskedPolicyScheme = HasPassthruOperand in { 1210defm vadd : RVVIntBinBuiltinSet; 1211defm vsub : RVVIntBinBuiltinSet; 1212defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", 1213 [["vx", "v", "vve"], 1214 ["vx", "Uv", "UvUvUe"]]>; 1215} 1216defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; 1217 1218// 11.2. Vector Widening Integer Add/Subtract 1219// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW 1220let UnMaskedPolicyScheme = HasPassthruOperand in { 1221defm vwaddu : RVVUnsignedWidenBinBuiltinSet; 1222defm vwsubu : RVVUnsignedWidenBinBuiltinSet; 1223// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW 1224defm vwadd : RVVSignedWidenBinBuiltinSet; 1225defm vwsub : RVVSignedWidenBinBuiltinSet; 1226// Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW 1227defm vwaddu : RVVUnsignedWidenOp0BinBuiltinSet; 1228defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet; 1229// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW 1230defm vwadd : RVVSignedWidenOp0BinBuiltinSet; 1231defm vwsub : RVVSignedWidenOp0BinBuiltinSet; 1232} 1233defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", 1234 [["Uw", "UwUv"]]>; 1235defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", 1236 [["w", "wv"]]>; 1237 1238// 11.3. Vector Integer Extension 1239let UnMaskedPolicyScheme = HasPassthruOperand in { 1240let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1241 def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">; 1242 def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">; 1243} 1244let Log2LMUL = [-3, -2, -1, 0, 1] in { 1245 def vsext_vf4 : RVVIntExt<"vsext", "q", "qv", "cs">; 1246 def vzext_vf4 : RVVIntExt<"vzext", "Uq", "UqUv", "cs">; 1247} 1248let Log2LMUL = [-3, -2, -1, 0] in { 1249 def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">; 1250 def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">; 1251} 1252} 1253 1254// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 1255let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 1256 let UnMaskedPolicyScheme = HasPassthruOperand in { 1257 defm vadc : RVVCarryinBuiltinSet; 1258 defm vsbc : RVVCarryinBuiltinSet; 1259 } 1260 defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">; 1261 defm vmadc : RVVIntMaskOutBuiltinSet; 1262 defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">; 1263 defm vmsbc : RVVIntMaskOutBuiltinSet; 1264} 1265 1266// 11.5. Vector Bitwise Logical Instructions 1267let UnMaskedPolicyScheme = HasPassthruOperand in { 1268defm vand : RVVIntBinBuiltinSet; 1269defm vxor : RVVIntBinBuiltinSet; 1270defm vor : RVVIntBinBuiltinSet; 1271} 1272defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; 1273 1274// 11.6. Vector Single-Width Shift Instructions 1275let UnMaskedPolicyScheme = HasPassthruOperand in { 1276defm vsll : RVVShiftBuiltinSet; 1277defm vsrl : RVVUnsignedShiftBuiltinSet; 1278defm vsra : RVVSignedShiftBuiltinSet; 1279 1280// 11.7. Vector Narrowing Integer Right Shift Instructions 1281defm vnsrl : RVVUnsignedNShiftBuiltinSet; 1282defm vnsra : RVVSignedNShiftBuiltinSet; 1283} 1284defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", 1285 [["v", "vw"], 1286 ["Uv", "UvUw"]]>; 1287 1288// 11.8. Vector Integer Compare Instructions 1289let MaskedPolicyScheme = HasPassthruOperand, 1290 HasTailPolicy = false in { 1291defm vmseq : RVVIntMaskOutBuiltinSet; 1292defm vmsne : RVVIntMaskOutBuiltinSet; 1293defm vmsltu : RVVUnsignedMaskOutBuiltinSet; 1294defm vmslt : RVVSignedMaskOutBuiltinSet; 1295defm vmsleu : RVVUnsignedMaskOutBuiltinSet; 1296defm vmsle : RVVSignedMaskOutBuiltinSet; 1297defm vmsgtu : RVVUnsignedMaskOutBuiltinSet; 1298defm vmsgt : RVVSignedMaskOutBuiltinSet; 1299defm vmsgeu : RVVUnsignedMaskOutBuiltinSet; 1300defm vmsge : RVVSignedMaskOutBuiltinSet; 1301} 1302 1303// 11.9. Vector Integer Min/Max Instructions 1304let UnMaskedPolicyScheme = HasPassthruOperand in { 1305defm vminu : RVVUnsignedBinBuiltinSet; 1306defm vmin : RVVSignedBinBuiltinSet; 1307defm vmaxu : RVVUnsignedBinBuiltinSet; 1308defm vmax : RVVSignedBinBuiltinSet; 1309 1310// 11.10. Vector Single-Width Integer Multiply Instructions 1311defm vmul : RVVIntBinBuiltinSet; 1312defm vmulh : RVVSignedBinBuiltinSet; 1313defm vmulhu : RVVUnsignedBinBuiltinSet; 1314defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil", 1315 [["vv", "v", "vvUv"], 1316 ["vx", "v", "vvUe"]]>; 1317 1318// 11.11. Vector Integer Divide Instructions 1319defm vdivu : RVVUnsignedBinBuiltinSet; 1320defm vdiv : RVVSignedBinBuiltinSet; 1321defm vremu : RVVUnsignedBinBuiltinSet; 1322defm vrem : RVVSignedBinBuiltinSet; 1323} 1324 1325// 11.12. Vector Widening Integer Multiply Instructions 1326let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicyScheme = HasPassthruOperand in { 1327defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", 1328 [["vv", "w", "wvv"], 1329 ["vx", "w", "wve"]]>; 1330defm vwmulu : RVVOutOp0Op1BuiltinSet<"vwmulu", "csi", 1331 [["vv", "Uw", "UwUvUv"], 1332 ["vx", "Uw", "UwUvUe"]]>; 1333defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi", 1334 [["vv", "w", "wvUv"], 1335 ["vx", "w", "wvUe"]]>; 1336} 1337 1338// 11.13. Vector Single-Width Integer Multiply-Add Instructions 1339let UnMaskedPolicyScheme = HasPolicyOperand in { 1340defm vmacc : RVVIntTerBuiltinSet; 1341defm vnmsac : RVVIntTerBuiltinSet; 1342defm vmadd : RVVIntTerBuiltinSet; 1343defm vnmsub : RVVIntTerBuiltinSet; 1344 1345// 11.14. Vector Widening Integer Multiply-Add Instructions 1346let HasMaskedOffOperand = false, 1347 Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1348defm vwmaccu : RVVOutOp1Op2BuiltinSet<"vwmaccu", "csi", 1349 [["vv", "Uw", "UwUwUvUv"], 1350 ["vx", "Uw", "UwUwUeUv"]]>; 1351defm vwmacc : RVVOutOp1Op2BuiltinSet<"vwmacc", "csi", 1352 [["vv", "w", "wwvv"], 1353 ["vx", "w", "wwev"]]>; 1354defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi", 1355 [["vv", "w", "wwvUv"], 1356 ["vx", "w", "wweUv"]]>; 1357defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", 1358 [["vx", "w", "wwUev"]]>; 1359} 1360} 1361 1362// 11.15. Vector Integer Merge Instructions 1363// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) 1364let HasMasked = false, 1365 UnMaskedPolicyScheme = HasPassthruOperand, 1366 MaskedPolicyScheme = NonePolicy, 1367 ManualCodegen = [{ 1368 // insert poison passthru 1369 if (PolicyAttrs & RVV_VTA) 1370 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 1371 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; 1372 }] in { 1373 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", 1374 [["vvm", "v", "vvvm"], 1375 ["vxm", "v", "vvem"], 1376 ["vvm", "Uv", "UvUvUvm"], 1377 ["vxm", "Uv", "UvUvUem"]]>; 1378} 1379 1380// 11.16. Vector Integer Move Instructions 1381let HasMasked = false, 1382 UnMaskedPolicyScheme = HasPassthruOperand, 1383 MaskedPolicyScheme = NonePolicy, 1384 OverloadedName = "vmv_v" in { 1385 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", 1386 [["v", "Uv", "UvUv"]]>; 1387 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilfd", 1388 [["v", "v", "vv"]]>; 1389 let RequiredFeatures = ["Zvfhmin"] in 1390 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x", 1391 [["v", "v", "vv"]]>; 1392 let RequiredFeatures = ["Zvfbfmin"] in 1393 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "y", 1394 [["v", "v", "vv"]]>; 1395 let SupportOverloading = false in 1396 defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil", 1397 [["x", "v", "ve"], 1398 ["x", "Uv", "UvUe"]]>; 1399} 1400 1401// 12. Vector Fixed-Point Arithmetic Instructions 1402let HeaderCode = 1403[{ 1404enum __RISCV_VXRM { 1405 __RISCV_VXRM_RNU = 0, 1406 __RISCV_VXRM_RNE = 1, 1407 __RISCV_VXRM_RDN = 2, 1408 __RISCV_VXRM_ROD = 3, 1409}; 1410}] in 1411def vxrm_enum : RVVHeader; 1412 1413// 12.1. Vector Single-Width Saturating Add and Subtract 1414let UnMaskedPolicyScheme = HasPassthruOperand in { 1415defm vsaddu : RVVUnsignedBinBuiltinSet; 1416defm vsadd : RVVSignedBinBuiltinSet; 1417defm vssubu : RVVUnsignedBinBuiltinSet; 1418defm vssub : RVVSignedBinBuiltinSet; 1419 1420let ManualCodegen = [{ 1421 { 1422 // LLVM intrinsic 1423 // Unmasked: (passthru, op0, op1, round_mode, vl) 1424 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 1425 1426 SmallVector<llvm::Value*, 7> Operands; 1427 bool HasMaskedOff = !( 1428 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1429 (!IsMasked && PolicyAttrs & RVV_VTA)); 1430 unsigned Offset = IsMasked ? 1431 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1432 1433 if (!HasMaskedOff) 1434 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1435 else 1436 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1437 1438 Operands.push_back(Ops[Offset]); // op0 1439 Operands.push_back(Ops[Offset + 1]); // op1 1440 1441 if (IsMasked) 1442 Operands.push_back(Ops[0]); // mask 1443 1444 Operands.push_back(Ops[Offset + 2]); // vxrm 1445 Operands.push_back(Ops[Offset + 3]); // vl 1446 1447 if (IsMasked) 1448 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1449 1450 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()}; 1451 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1452 return Builder.CreateCall(F, Operands, ""); 1453 } 1454}] in { 1455 // 12.2. Vector Single-Width Averaging Add and Subtract 1456 defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode; 1457 defm vaadd : RVVSignedBinBuiltinSetRoundingMode; 1458 defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode; 1459 defm vasub : RVVSignedBinBuiltinSetRoundingMode; 1460 1461 // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation 1462 defm vsmul : RVVSignedBinBuiltinSetRoundingMode; 1463 1464 // 12.4. Vector Single-Width Scaling Shift Instructions 1465 defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode; 1466 defm vssra : RVVSignedShiftBuiltinSetRoundingMode; 1467} 1468 1469let ManualCodegen = [{ 1470 { 1471 // LLVM intrinsic 1472 // Unmasked: (passthru, op0, op1, round_mode, vl) 1473 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 1474 1475 SmallVector<llvm::Value*, 7> Operands; 1476 bool HasMaskedOff = !( 1477 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1478 (!IsMasked && PolicyAttrs & RVV_VTA)); 1479 unsigned Offset = IsMasked ? 1480 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1481 1482 if (!HasMaskedOff) 1483 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1484 else 1485 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1486 1487 Operands.push_back(Ops[Offset]); // op0 1488 Operands.push_back(Ops[Offset + 1]); // op1 1489 1490 if (IsMasked) 1491 Operands.push_back(Ops[0]); // mask 1492 1493 Operands.push_back(Ops[Offset + 2]); // vxrm 1494 Operands.push_back(Ops[Offset + 3]); // vl 1495 1496 if (IsMasked) 1497 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1498 1499 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1500 Ops.back()->getType()}; 1501 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1502 return Builder.CreateCall(F, Operands, ""); 1503 } 1504}] in { 1505 // 12.5. Vector Narrowing Fixed-Point Clip Instructions 1506 defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode; 1507 defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode; 1508} 1509} 1510 1511// 13. Vector Floating-Point Instructions 1512let HeaderCode = 1513[{ 1514enum __RISCV_FRM { 1515 __RISCV_FRM_RNE = 0, 1516 __RISCV_FRM_RTZ = 1, 1517 __RISCV_FRM_RDN = 2, 1518 __RISCV_FRM_RUP = 3, 1519 __RISCV_FRM_RMM = 4, 1520}; 1521}] in def frm_enum : RVVHeader; 1522 1523let UnMaskedPolicyScheme = HasPassthruOperand in { 1524let ManualCodegen = [{ 1525 { 1526 // LLVM intrinsic 1527 // Unmasked: (passthru, op0, op1, round_mode, vl) 1528 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1529 1530 SmallVector<llvm::Value*, 7> Operands; 1531 bool HasMaskedOff = !( 1532 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1533 (!IsMasked && PolicyAttrs & RVV_VTA)); 1534 bool HasRoundModeOp = IsMasked ? 1535 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 1536 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 1537 1538 unsigned Offset = IsMasked ? 1539 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1540 1541 if (!HasMaskedOff) 1542 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1543 else 1544 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1545 1546 Operands.push_back(Ops[Offset]); // op0 1547 Operands.push_back(Ops[Offset + 1]); // op1 1548 1549 if (IsMasked) 1550 Operands.push_back(Ops[0]); // mask 1551 1552 if (HasRoundModeOp) { 1553 Operands.push_back(Ops[Offset + 2]); // frm 1554 Operands.push_back(Ops[Offset + 3]); // vl 1555 } else { 1556 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1557 Operands.push_back(Ops[Offset + 2]); // vl 1558 } 1559 1560 if (IsMasked) 1561 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1562 1563 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), 1564 Operands.back()->getType()}; 1565 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1566 return Builder.CreateCall(F, Operands, ""); 1567 } 1568}] in { 1569 let HasFRMRoundModeOp = true in { 1570 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1571 defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; 1572 defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; 1573 defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; 1574 1575 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1576 // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW 1577 defm vfwadd : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; 1578 defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; 1579 1580 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1581 defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; 1582 defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode; 1583 defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode; 1584 } 1585 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1586 defm vfadd : RVVFloatingBinBuiltinSet; 1587 defm vfsub : RVVFloatingBinBuiltinSet; 1588 defm vfrsub : RVVFloatingBinVFBuiltinSet; 1589 1590 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1591 // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW 1592 defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet; 1593 defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet; 1594 1595 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1596 defm vfmul : RVVFloatingBinBuiltinSet; 1597 defm vfdiv : RVVFloatingBinBuiltinSet; 1598 defm vfrdiv : RVVFloatingBinVFBuiltinSet; 1599} 1600 1601let ManualCodegen = [{ 1602 { 1603 // LLVM intrinsic 1604 // Unmasked: (passthru, op0, op1, round_mode, vl) 1605 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1606 1607 SmallVector<llvm::Value*, 7> Operands; 1608 bool HasMaskedOff = !( 1609 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1610 (!IsMasked && PolicyAttrs & RVV_VTA)); 1611 bool HasRoundModeOp = IsMasked ? 1612 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 1613 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 1614 1615 unsigned Offset = IsMasked ? 1616 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1617 1618 if (!HasMaskedOff) 1619 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1620 else 1621 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1622 1623 Operands.push_back(Ops[Offset]); // op0 1624 Operands.push_back(Ops[Offset + 1]); // op1 1625 1626 if (IsMasked) 1627 Operands.push_back(Ops[0]); // mask 1628 1629 if (HasRoundModeOp) { 1630 Operands.push_back(Ops[Offset + 2]); // frm 1631 Operands.push_back(Ops[Offset + 3]); // vl 1632 } else { 1633 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1634 Operands.push_back(Ops[Offset + 2]); // vl 1635 } 1636 1637 if (IsMasked) 1638 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1639 1640 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1641 Ops.back()->getType()}; 1642 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1643 return Builder.CreateCall(F, Operands, ""); 1644 } 1645}] in { 1646 let HasFRMRoundModeOp = true in { 1647 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1648 // Widening FP add/subtract, 2*SEW = SEW +/- SEW 1649 defm vfwadd : RVVFloatingWidenBinBuiltinSetRoundingMode; 1650 defm vfwsub : RVVFloatingWidenBinBuiltinSetRoundingMode; 1651 1652 // 13.5. Vector Widening Floating-Point Multiply 1653 let Log2LMUL = [-2, -1, 0, 1, 2] in { 1654 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "f", 1655 [["vv", "w", "wvvu"], 1656 ["vf", "w", "wveu"]]>; 1657 let RequiredFeatures = ["Zvfh"] in 1658 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", 1659 [["vv", "w", "wvvu"], 1660 ["vf", "w", "wveu"]]>; 1661 } 1662 } 1663 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1664 // Widening FP add/subtract, 2*SEW = SEW +/- SEW 1665 defm vfwadd : RVVFloatingWidenBinBuiltinSet; 1666 defm vfwsub : RVVFloatingWidenBinBuiltinSet; 1667 1668 // 13.5. Vector Widening Floating-Point Multiply 1669 let Log2LMUL = [-2, -1, 0, 1, 2] in { 1670 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "f", 1671 [["vv", "w", "wvv"], 1672 ["vf", "w", "wve"]]>; 1673 let RequiredFeatures = ["Zvfh"] in 1674 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", 1675 [["vv", "w", "wvv"], 1676 ["vf", "w", "wve"]]>; 1677 } 1678} 1679} 1680 1681 1682let UnMaskedPolicyScheme = HasPolicyOperand in { 1683let ManualCodegen = [{ 1684 { 1685 // LLVM intrinsic 1686 // Unmasked: (passthru, op0, op1, round_mode, vl) 1687 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1688 1689 SmallVector<llvm::Value*, 7> Operands; 1690 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5; 1691 1692 unsigned Offset = IsMasked ? 2 : 1; 1693 1694 Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough 1695 1696 Operands.push_back(Ops[Offset]); // op0 1697 Operands.push_back(Ops[Offset + 1]); // op1 1698 1699 if (IsMasked) 1700 Operands.push_back(Ops[0]); // mask 1701 1702 if (HasRoundModeOp) { 1703 Operands.push_back(Ops[Offset + 2]); // frm 1704 Operands.push_back(Ops[Offset + 3]); // vl 1705 } else { 1706 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1707 Operands.push_back(Ops[Offset + 2]); // vl 1708 } 1709 1710 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1711 1712 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 1713 Operands.back()->getType()}; 1714 1715 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1716 1717 return Builder.CreateCall(F, Operands, ""); 1718 } 1719}] in { 1720 let HasFRMRoundModeOp = 1 in { 1721 // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1722 defm vfmacc : RVVFloatingTerBuiltinSetRoundingMode; 1723 defm vfnmacc : RVVFloatingTerBuiltinSetRoundingMode; 1724 defm vfmsac : RVVFloatingTerBuiltinSetRoundingMode; 1725 defm vfnmsac : RVVFloatingTerBuiltinSetRoundingMode; 1726 defm vfmadd : RVVFloatingTerBuiltinSetRoundingMode; 1727 defm vfnmadd : RVVFloatingTerBuiltinSetRoundingMode; 1728 defm vfmsub : RVVFloatingTerBuiltinSetRoundingMode; 1729 defm vfnmsub : RVVFloatingTerBuiltinSetRoundingMode; 1730 } 1731 // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1732 defm vfmacc : RVVFloatingTerBuiltinSet; 1733 defm vfnmacc : RVVFloatingTerBuiltinSet; 1734 defm vfmsac : RVVFloatingTerBuiltinSet; 1735 defm vfnmsac : RVVFloatingTerBuiltinSet; 1736 defm vfmadd : RVVFloatingTerBuiltinSet; 1737 defm vfnmadd : RVVFloatingTerBuiltinSet; 1738 defm vfmsub : RVVFloatingTerBuiltinSet; 1739 defm vfnmsub : RVVFloatingTerBuiltinSet; 1740} 1741 1742let ManualCodegen = [{ 1743 { 1744 // LLVM intrinsic 1745 // Unmasked: (passthru, op0, op1, round_mode, vl) 1746 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1747 1748 SmallVector<llvm::Value*, 7> Operands; 1749 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5; 1750 1751 unsigned Offset = IsMasked ? 2 : 1; 1752 1753 Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough 1754 1755 Operands.push_back(Ops[Offset]); // op0 1756 Operands.push_back(Ops[Offset + 1]); // op1 1757 1758 if (IsMasked) 1759 Operands.push_back(Ops[0]); // mask 1760 1761 if (HasRoundModeOp) { 1762 Operands.push_back(Ops[Offset + 2]); // frm 1763 Operands.push_back(Ops[Offset + 3]); // vl 1764 } else { 1765 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1766 Operands.push_back(Ops[Offset + 2]); // vl 1767 } 1768 1769 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1770 1771 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1772 Operands.back()->getType()}; 1773 1774 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1775 1776 return Builder.CreateCall(F, Operands, ""); 1777 } 1778}] in { 1779 let HasFRMRoundModeOp = 1 in { 1780 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1781 defm vfwmacc : RVVFloatingWidenTerBuiltinSetRoundingMode; 1782 defm vfwnmacc : RVVFloatingWidenTerBuiltinSetRoundingMode; 1783 defm vfwmsac : RVVFloatingWidenTerBuiltinSetRoundingMode; 1784 defm vfwnmsac : RVVFloatingWidenTerBuiltinSetRoundingMode; 1785 1786 // Vector BF16 widening multiply-accumulate 1787 let Log2LMUL = [-2, -1, 0, 1, 2], 1788 RequiredFeatures = ["Zvfbfwma"], 1789 HasMaskedOffOperand = false in 1790 defm vfwmaccbf16 : RVVOutOp1Op2BuiltinSet<"vfwmaccbf16", "y", 1791 [["vv", "Fw", "FwFwvvu"], 1792 ["vf", "Fw", "FwFwevu"]]>; 1793 } 1794 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1795 defm vfwmacc : RVVFloatingWidenTerBuiltinSet; 1796 defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; 1797 defm vfwmsac : RVVFloatingWidenTerBuiltinSet; 1798 defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; 1799 1800 // Vector BF16 widening multiply-accumulate 1801 let Log2LMUL = [-2, -1, 0, 1, 2], 1802 RequiredFeatures = ["Zvfbfwma"], 1803 HasMaskedOffOperand = false in 1804 defm vfwmaccbf16 : RVVOutOp1Op2BuiltinSet<"vfwmaccbf16", "y", 1805 [["vv", "Fw", "FwFwvv"], 1806 ["vf", "Fw", "FwFwev"]]>; 1807} 1808 1809} 1810 1811let UnMaskedPolicyScheme = HasPassthruOperand in { 1812let ManualCodegen = [{ 1813 { 1814 // LLVM intrinsic 1815 // Unmasked: (passthru, op0, round_mode, vl) 1816 // Masked: (passthru, op0, mask, frm, vl, policy) 1817 1818 SmallVector<llvm::Value*, 7> Operands; 1819 bool HasMaskedOff = !( 1820 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1821 (!IsMasked && PolicyAttrs & RVV_VTA)); 1822 bool HasRoundModeOp = IsMasked ? 1823 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) : 1824 (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3); 1825 1826 unsigned Offset = IsMasked ? 1827 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1828 1829 if (!HasMaskedOff) 1830 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1831 else 1832 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1833 1834 Operands.push_back(Ops[Offset]); // op0 1835 1836 if (IsMasked) 1837 Operands.push_back(Ops[0]); // mask 1838 1839 if (HasRoundModeOp) { 1840 Operands.push_back(Ops[Offset + 1]); // frm 1841 Operands.push_back(Ops[Offset + 2]); // vl 1842 } else { 1843 Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm 1844 Operands.push_back(Ops[Offset + 1]); // vl 1845 } 1846 1847 if (IsMasked) 1848 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1849 1850 IntrinsicTypes = {ResultType, Operands.back()->getType()}; 1851 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1852 return Builder.CreateCall(F, Operands, ""); 1853 } 1854}] in { 1855 let HasFRMRoundModeOp = 1 in { 1856 // 13.8. Vector Floating-Point Square-Root Instruction 1857 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vvu"]]>; 1858 let RequiredFeatures = ["Zvfh"] in 1859 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "x", [["v", "v", "vvu"]]>; 1860 1861 // 13.10. Vector Floating-Point Reciprocal Estimate Instruction 1862 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>; 1863 let RequiredFeatures = ["Zvfh"] in 1864 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>; 1865 } 1866 // 13.8. Vector Floating-Point Square-Root Instruction 1867 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>; 1868 let RequiredFeatures = ["Zvfh"] in 1869 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "x", [["v", "v", "vv"]]>; 1870 1871 // 13.10. Vector Floating-Point Reciprocal Estimate Instruction 1872 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>; 1873 let RequiredFeatures = ["Zvfh"] in 1874 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>; 1875} 1876 1877// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction 1878defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>; 1879let RequiredFeatures = ["Zvfh"] in 1880 defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>; 1881 1882// 13.11. Vector Floating-Point MIN/MAX Instructions 1883defm vfmin : RVVFloatingBinBuiltinSet; 1884defm vfmax : RVVFloatingBinBuiltinSet; 1885 1886// 13.12. Vector Floating-Point Sign-Injection Instructions 1887defm vfsgnj : RVVFloatingBinBuiltinSet; 1888defm vfsgnjn : RVVFloatingBinBuiltinSet; 1889defm vfsgnjx : RVVFloatingBinBuiltinSet; 1890} 1891defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">; 1892let RequiredFeatures = ["Zvfh"] in 1893 defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "x">; 1894defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "fd">; 1895let RequiredFeatures = ["Zvfh"] in 1896 defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "x">; 1897 1898// 13.13. Vector Floating-Point Compare Instructions 1899let MaskedPolicyScheme = HasPassthruOperand, 1900 HasTailPolicy = false in { 1901defm vmfeq : RVVFloatingMaskOutBuiltinSet; 1902defm vmfne : RVVFloatingMaskOutBuiltinSet; 1903defm vmflt : RVVFloatingMaskOutBuiltinSet; 1904defm vmfle : RVVFloatingMaskOutBuiltinSet; 1905defm vmfgt : RVVFloatingMaskOutBuiltinSet; 1906defm vmfge : RVVFloatingMaskOutBuiltinSet; 1907} 1908 1909// 13.14. Vector Floating-Point Classify Instruction 1910let UnMaskedPolicyScheme = HasPassthruOperand in { 1911defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>; 1912let RequiredFeatures = ["Zvfh"] in 1913 defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>; 1914} 1915 1916// 13.15. Vector Floating-Point Merge Instruction 1917// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) 1918let HasMasked = false, 1919 UnMaskedPolicyScheme = HasPassthruOperand, 1920 MaskedPolicyScheme = NonePolicy, 1921 ManualCodegen = [{ 1922 // insert poison passthru 1923 if (PolicyAttrs & RVV_VTA) 1924 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 1925 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; 1926 }] in { 1927 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "fd", 1928 [["vvm", "v", "vvvm"]]>; 1929 let RequiredFeatures = ["Zvfhmin"] in 1930 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x", 1931 [["vvm", "v", "vvvm"]]>; 1932 let RequiredFeatures = ["Zvfbfmin"] in 1933 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "y", 1934 [["vvm", "v", "vvvm"]]>; 1935 defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "fd", 1936 [["vfm", "v", "vvem"]]>; 1937 let RequiredFeatures = ["Zvfh"] in 1938 defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x", 1939 [["vfm", "v", "vvem"]]>; 1940} 1941 1942// 13.16. Vector Floating-Point Move Instruction 1943let HasMasked = false, 1944 UnMaskedPolicyScheme = HasPassthruOperand, 1945 SupportOverloading = false, 1946 MaskedPolicyScheme = NonePolicy, 1947 OverloadedName = "vfmv_v" in { 1948 defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "fd", 1949 [["f", "v", "ve"]]>; 1950 let RequiredFeatures = ["Zvfh"] in 1951 defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x", 1952 [["f", "v", "ve"]]>; 1953} 1954 1955// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions 1956let UnMaskedPolicyScheme = HasPassthruOperand in { 1957let OverloadedName = "vfcvt_rtz_xu" in { 1958 defm : RVVConvBuiltinSet<"vfcvt_rtz_xu_f_v", "fd", [["Uv", "Uvv"]]>; 1959 let RequiredFeatures = ["Zvfh"] in 1960 defm : RVVConvBuiltinSet<"vfcvt_rtz_xu_f_v", "x", [["Uv", "Uvv"]]>; 1961} 1962let OverloadedName = "vfcvt_rtz_x" in { 1963 defm : RVVConvBuiltinSet<"vfcvt_rtz_x_f_v", "fd", [["Iv", "Ivv"]]>; 1964 let RequiredFeatures = ["Zvfh"] in 1965 defm : RVVConvBuiltinSet<"vfcvt_rtz_x_f_v", "x", [["Iv", "Ivv"]]>; 1966} 1967 1968// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1969let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1970 let OverloadedName = "vfwcvt_rtz_xu" in { 1971 defm : RVVConvBuiltinSet<"vfwcvt_rtz_xu_f_v", "f", [["Uw", "Uwv"]]>; 1972 let RequiredFeatures = ["Zvfh"] in 1973 defm : RVVConvBuiltinSet<"vfwcvt_rtz_xu_f_v", "x", [["Uw", "Uwv"]]>; 1974 } 1975 let OverloadedName = "vfwcvt_rtz_x" in { 1976 defm : RVVConvBuiltinSet<"vfwcvt_rtz_x_f_v", "f", [["Iw", "Iwv"]]>; 1977 let RequiredFeatures = ["Zvfh"] in 1978 defm : RVVConvBuiltinSet<"vfwcvt_rtz_x_f_v", "x", [["Iw", "Iwv"]]>; 1979 } 1980 let OverloadedName = "vfwcvt_f" in { 1981 defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "si", [["Fw", "FwUv"]]>; 1982 defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "si", [["Fw", "Fwv"]]>; 1983 let RequiredFeatures = ["Zvfh"] in { 1984 defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Fw", "FwUv"]]>; 1985 defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>; 1986 } 1987 } 1988 let OverloadedName = "vfwcvt_f" in { 1989 defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>; 1990 let RequiredFeatures = ["Zvfhmin"] in 1991 defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>; 1992 } 1993} 1994 1995// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1996let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1997 let OverloadedName = "vfncvt_rtz_xu" in { 1998 defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>; 1999 let RequiredFeatures = ["Zvfh"] in 2000 defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>; 2001 } 2002 let OverloadedName = "vfncvt_rtz_x" in { 2003 defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>; 2004 let RequiredFeatures = ["Zvfh"] in 2005 defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>; 2006 } 2007 let OverloadedName = "vfncvt_rod_f" in { 2008 defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>; 2009 let RequiredFeatures = ["Zvfh"] in 2010 defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>; 2011 } 2012} 2013 2014// Zvfbfmin - Vector convert BF16 to FP32 2015let Log2LMUL = [-2, -1, 0, 1, 2] in 2016def vfwcvtbf16_f_f_v : RVVConvBuiltin<"Fw", "Fwv", "y", "vfwcvtbf16_f">; 2017 2018let ManualCodegen = [{ 2019 { 2020 // LLVM intrinsic 2021 // Unmasked: (passthru, op0, frm, vl) 2022 // Masked: (passthru, op0, mask, frm, vl, policy) 2023 SmallVector<llvm::Value*, 7> Operands; 2024 bool HasMaskedOff = !( 2025 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 2026 (!IsMasked && PolicyAttrs & RVV_VTA)); 2027 bool HasRoundModeOp = IsMasked ? 2028 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) : 2029 (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3); 2030 2031 unsigned Offset = IsMasked ? 2032 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 2033 2034 if (!HasMaskedOff) 2035 Operands.push_back(llvm::PoisonValue::get(ResultType)); 2036 else 2037 Operands.push_back(Ops[IsMasked ? 1 : 0]); 2038 2039 Operands.push_back(Ops[Offset]); // op0 2040 2041 if (IsMasked) 2042 Operands.push_back(Ops[0]); // mask 2043 2044 if (HasRoundModeOp) { 2045 Operands.push_back(Ops[Offset + 1]); // frm 2046 Operands.push_back(Ops[Offset + 2]); // vl 2047 } else { 2048 Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm 2049 Operands.push_back(Ops[Offset + 1]); // vl 2050 } 2051 2052 if (IsMasked) 2053 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 2054 2055 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 2056 Operands.back()->getType()}; 2057 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 2058 return Builder.CreateCall(F, Operands, ""); 2059 } 2060}] in { 2061 let HasFRMRoundModeOp = 1 in { 2062 // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions 2063 let OverloadedName = "vfcvt_x" in { 2064 defm : RVVConvBuiltinSet<"vfcvt_x_f_v", "fd", [["Iv", "Ivvu"]]>; 2065 let RequiredFeatures = ["Zvfh"] in 2066 defm : RVVConvBuiltinSet<"vfcvt_x_f_v", "x", [["Iv", "Ivvu"]]>; 2067 } 2068 let OverloadedName = "vfcvt_xu" in { 2069 defm : RVVConvBuiltinSet<"vfcvt_xu_f_v", "fd", [["Uv", "Uvvu"]]>; 2070 let RequiredFeatures = ["Zvfh"] in 2071 defm : RVVConvBuiltinSet<"vfcvt_xu_f_v", "x", [["Uv", "Uvvu"]]>; 2072 } 2073 let OverloadedName = "vfcvt_f" in { 2074 defm : RVVConvBuiltinSet<"vfcvt_f_x_v", "fd", [["v", "vIvu"]]>; 2075 defm : RVVConvBuiltinSet<"vfcvt_f_xu_v", "fd", [["v", "vUvu"]]>; 2076 let RequiredFeatures = ["Zvfh"] in { 2077 defm : RVVConvBuiltinSet<"vfcvt_f_x_v", "x", [["v", "vIvu"]]>; 2078 defm : RVVConvBuiltinSet<"vfcvt_f_xu_v", "x", [["v", "vUvu"]]>; 2079 } 2080 } 2081 2082 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2083 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 2084 let OverloadedName = "vfwcvt_x" in { 2085 defm : RVVConvBuiltinSet<"vfwcvt_x_f_v", "f", [["Iw", "Iwvu"]]>; 2086 let RequiredFeatures = ["Zvfh"] in 2087 defm : RVVConvBuiltinSet<"vfwcvt_x_f_v", "x", [["Iw", "Iwvu"]]>; 2088 } 2089 let OverloadedName = "vfwcvt_xu" in { 2090 defm : RVVConvBuiltinSet<"vfwcvt_xu_f_v", "f", [["Uw", "Uwvu"]]>; 2091 let RequiredFeatures = ["Zvfh"] in 2092 defm : RVVConvBuiltinSet<"vfwcvt_xu_f_v", "x", [["Uw", "Uwvu"]]>; 2093 } 2094 } 2095 // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 2096 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 2097 let OverloadedName = "vfncvt_x" in { 2098 defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>; 2099 let RequiredFeatures = ["Zvfh"] in 2100 defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>; 2101 } 2102 let OverloadedName = "vfncvt_xu" in { 2103 defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>; 2104 let RequiredFeatures = ["Zvfh"] in 2105 defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>; 2106 } 2107 let OverloadedName = "vfncvt_f" in { 2108 defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>; 2109 defm : RVVConvBuiltinSet<"vfncvt_f_xu_w", "f", [["v", "vUwu"]]>; 2110 let RequiredFeatures = ["Zvfh"] in { 2111 defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "x", [["v", "vIwu"]]>; 2112 defm : RVVConvBuiltinSet<"vfncvt_f_xu_w", "x", [["v", "vUwu"]]>; 2113 } 2114 } 2115 let OverloadedName = "vfncvt_f" in { 2116 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vwu"]]>; 2117 let RequiredFeatures = ["Zvfhmin"] in 2118 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>; 2119 } 2120 } 2121 2122 // Zvfbfmin - Vector convert FP32 to BF16 2123 let Log2LMUL = [-2, -1, 0, 1, 2], 2124 OverloadedName = "vfncvtbf16_f" in 2125 defm : RVVConvBuiltinSet<"vfncvtbf16_f_f_w", "y", [["v", "vFwu"]]>; 2126 } 2127 2128 // 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions 2129 let OverloadedName = "vfcvt_x" in { 2130 defm : RVVConvBuiltinSet<"vfcvt_x_f_v", "fd", [["Iv", "Ivv"]]>; 2131 let RequiredFeatures = ["Zvfh"] in 2132 defm : RVVConvBuiltinSet<"vfcvt_x_f_v", "x", [["Iv", "Ivv"]]>; 2133 } 2134 let OverloadedName = "vfcvt_xu" in { 2135 defm : RVVConvBuiltinSet<"vfcvt_xu_f_v", "fd", [["Uv", "Uvv"]]>; 2136 let RequiredFeatures = ["Zvfh"] in 2137 defm : RVVConvBuiltinSet<"vfcvt_xu_f_v", "x", [["Uv", "Uvv"]]>; 2138 } 2139 let OverloadedName = "vfcvt_f" in { 2140 defm : RVVConvBuiltinSet<"vfcvt_f_x_v", "fd", [["v", "vIv"]]>; 2141 defm : RVVConvBuiltinSet<"vfcvt_f_xu_v", "fd", [["v", "vUv"]]>; 2142 let RequiredFeatures = ["Zvfh"] in { 2143 defm : RVVConvBuiltinSet<"vfcvt_f_x_v", "x", [["v", "vIv"]]>; 2144 defm : RVVConvBuiltinSet<"vfcvt_f_xu_v", "x", [["v", "vUv"]]>; 2145 } 2146 } 2147 2148 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2149 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 2150 let OverloadedName = "vfwcvt_x" in { 2151 defm : RVVConvBuiltinSet<"vfwcvt_x_f_v", "f", [["Iw", "Iwv"]]>; 2152 let RequiredFeatures = ["Zvfh"] in 2153 defm : RVVConvBuiltinSet<"vfwcvt_x_f_v", "x", [["Iw", "Iwv"]]>; 2154 } 2155 let OverloadedName = "vfwcvt_xu" in { 2156 defm : RVVConvBuiltinSet<"vfwcvt_xu_f_v", "f", [["Uw", "Uwv"]]>; 2157 let RequiredFeatures = ["Zvfh"] in 2158 defm : RVVConvBuiltinSet<"vfwcvt_xu_f_v", "x", [["Uw", "Uwv"]]>; 2159 } 2160 } 2161 // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 2162 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 2163 let OverloadedName = "vfncvt_x" in { 2164 defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>; 2165 let RequiredFeatures = ["Zvfh"] in 2166 defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>; 2167 } 2168 let OverloadedName = "vfncvt_xu" in { 2169 defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>; 2170 let RequiredFeatures = ["Zvfh"] in 2171 defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>; 2172 } 2173 let OverloadedName = "vfncvt_f" in { 2174 defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>; 2175 defm : RVVConvBuiltinSet<"vfncvt_f_xu_w", "f", [["v", "vUw"]]>; 2176 let RequiredFeatures = ["Zvfh"] in { 2177 defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "x", [["v", "vIw"]]>; 2178 defm : RVVConvBuiltinSet<"vfncvt_f_xu_w", "x", [["v", "vUw"]]>; 2179 } 2180 } 2181 let OverloadedName = "vfncvt_f" in { 2182 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vw"]]>; 2183 let RequiredFeatures = ["Zvfhmin"] in 2184 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>; 2185 } 2186 } 2187 2188 // Zvfbfmin - Vector convert FP32 to BF16 2189 let Log2LMUL = [-2, -1, 0, 1, 2], 2190 OverloadedName = "vfncvtbf16_f" in 2191 defm : RVVConvBuiltinSet<"vfncvtbf16_f_f_w", "y", [["v", "vFw"]]>; 2192} 2193} 2194 2195// 14. Vector Reduction Operations 2196// 14.1. Vector Single-Width Integer Reduction Instructions 2197let UnMaskedPolicyScheme = HasPassthruOperand, 2198 MaskedPolicyScheme = HasPassthruOperand, 2199 HasMaskPolicy = false in { 2200defm vredsum : RVVIntReductionBuiltinSet; 2201defm vredmaxu : RVVUnsignedReductionBuiltin; 2202defm vredmax : RVVSignedReductionBuiltin; 2203defm vredminu : RVVUnsignedReductionBuiltin; 2204defm vredmin : RVVSignedReductionBuiltin; 2205defm vredand : RVVIntReductionBuiltinSet; 2206defm vredor : RVVIntReductionBuiltinSet; 2207defm vredxor : RVVIntReductionBuiltinSet; 2208 2209// 14.2. Vector Widening Integer Reduction Instructions 2210// Vector Widening Integer Reduction Operations 2211let HasMaskedOffOperand = true in { 2212 defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi", 2213 [["vs", "vSw", "SwvSw"]]>; 2214 defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi", 2215 [["vs", "UvUSw", "USwUvUSw"]]>; 2216} 2217 2218// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2219defm vfredmax : RVVFloatingReductionBuiltin; 2220defm vfredmin : RVVFloatingReductionBuiltin; 2221let ManualCodegen = [{ 2222 { 2223 // LLVM intrinsic 2224 // Unmasked: (passthru, op0, op1, round_mode, vl) 2225 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 2226 2227 SmallVector<llvm::Value*, 7> Operands; 2228 bool HasMaskedOff = !( 2229 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 2230 (!IsMasked && PolicyAttrs & RVV_VTA)); 2231 bool HasRoundModeOp = IsMasked ? 2232 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 2233 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 2234 2235 unsigned Offset = IsMasked ? 2236 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 2237 2238 if (!HasMaskedOff) 2239 Operands.push_back(llvm::PoisonValue::get(ResultType)); 2240 else 2241 Operands.push_back(Ops[IsMasked ? 1 : 0]); 2242 2243 Operands.push_back(Ops[Offset]); // op0 2244 Operands.push_back(Ops[Offset + 1]); // op1 2245 2246 if (IsMasked) 2247 Operands.push_back(Ops[0]); // mask 2248 2249 if (HasRoundModeOp) { 2250 Operands.push_back(Ops[Offset + 2]); // frm 2251 Operands.push_back(Ops[Offset + 3]); // vl 2252 } else { 2253 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 2254 Operands.push_back(Ops[Offset + 2]); // vl 2255 } 2256 2257 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 2258 Ops.back()->getType()}; 2259 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 2260 return Builder.CreateCall(F, Operands, ""); 2261 } 2262}] in { 2263 let HasFRMRoundModeOp = 1 in { 2264 // 14.3. Vector Single-Width Floating-Point Reduction Instructions 2265 defm vfredusum : RVVFloatingReductionBuiltinRoundingMode; 2266 defm vfredosum : RVVFloatingReductionBuiltinRoundingMode; 2267 2268 // 14.4. Vector Widening Floating-Point Reduction Instructions 2269 defm vfwredusum : RVVFloatingWidenReductionBuiltinRoundingMode; 2270 defm vfwredosum : RVVFloatingWidenReductionBuiltinRoundingMode; 2271 } 2272 // 14.3. Vector Single-Width Floating-Point Reduction Instructions 2273 defm vfredusum : RVVFloatingReductionBuiltin; 2274 defm vfredosum : RVVFloatingReductionBuiltin; 2275 2276 // 14.4. Vector Widening Floating-Point Reduction Instructions 2277 defm vfwredusum : RVVFloatingWidenReductionBuiltin; 2278 defm vfwredosum : RVVFloatingWidenReductionBuiltin; 2279} 2280} 2281 2282// 15. Vector Mask Instructions 2283// 15.1. Vector Mask-Register Logical Instructions 2284def vmand : RVVMaskBinBuiltin; 2285def vmnand : RVVMaskBinBuiltin; 2286def vmandn : RVVMaskBinBuiltin; 2287def vmxor : RVVMaskBinBuiltin; 2288def vmor : RVVMaskBinBuiltin; 2289def vmnor : RVVMaskBinBuiltin; 2290def vmorn : RVVMaskBinBuiltin; 2291def vmxnor : RVVMaskBinBuiltin; 2292// pseudoinstructions 2293def vmclr : RVVMaskNullaryBuiltin; 2294def vmset : RVVMaskNullaryBuiltin; 2295defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">; 2296defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; 2297 2298let MaskedPolicyScheme = NonePolicy in { 2299// 15.2. Vector count population in mask vcpop.m 2300def vcpop : RVVMaskOp0Builtin<"um">; 2301 2302// 15.3. vfirst find-first-set mask bit 2303def vfirst : RVVMaskOp0Builtin<"lm">; 2304} 2305 2306let MaskedPolicyScheme = HasPassthruOperand, 2307 HasTailPolicy = false in { 2308// 15.4. vmsbf.m set-before-first mask bit 2309def vmsbf : RVVMaskUnaryBuiltin; 2310 2311// 15.5. vmsif.m set-including-first mask bit 2312def vmsif : RVVMaskUnaryBuiltin; 2313 2314// 15.6. vmsof.m set-only-first mask bit 2315def vmsof : RVVMaskUnaryBuiltin; 2316} 2317 2318let UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in { 2319 // 15.8. Vector Iota Instruction 2320 defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>; 2321 2322 // 15.9. Vector Element Index Instruction 2323 defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], 2324 ["v", "Uv", "Uv"]]>; 2325} 2326 2327// 16. Vector Permutation Instructions 2328// 16.1. Integer Scalar Move Instructions 2329let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 2330 let HasVL = false, OverloadedName = "vmv_x" in 2331 defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", 2332 [["s", "ve", "ev"], 2333 ["s", "UvUe", "UeUv"]]>; 2334 let OverloadedName = "vmv_s", 2335 UnMaskedPolicyScheme = HasPassthruOperand, 2336 SupportOverloading = false in 2337 defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil", 2338 [["x", "v", "ve"], 2339 ["x", "Uv", "UvUe"]]>; 2340} 2341 2342// 16.2. Floating-Point Scalar Move Instructions 2343let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 2344 let HasVL = false, OverloadedName = "vfmv_f" in { 2345 defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "fd", 2346 [["s", "ve", "ev"]]>; 2347 let RequiredFeatures = ["Zvfh"] in 2348 defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x", 2349 [["s", "ve", "ev"]]>; 2350 } 2351 let OverloadedName = "vfmv_s", 2352 UnMaskedPolicyScheme = HasPassthruOperand, 2353 SupportOverloading = false in { 2354 defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "fd", 2355 [["f", "v", "ve"], 2356 ["x", "Uv", "UvUe"]]>; 2357 let RequiredFeatures = ["Zvfh"] in 2358 defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x", 2359 [["f", "v", "ve"], 2360 ["x", "Uv", "UvUe"]]>; 2361 } 2362} 2363 2364// 16.3. Vector Slide Instructions 2365// 16.3.1. Vector Slideup Instructions 2366defm vslideup : RVVSlideUpBuiltinSet; 2367// 16.3.2. Vector Slidedown Instructions 2368defm vslidedown : RVVSlideDownBuiltinSet; 2369 2370// 16.3.3. Vector Slide1up Instructions 2371let UnMaskedPolicyScheme = HasPassthruOperand in { 2372defm vslide1up : RVVSlideOneBuiltinSet; 2373defm vfslide1up : RVVFloatingBinVFBuiltinSet; 2374 2375// 16.3.4. Vector Slide1down Instruction 2376defm vslide1down : RVVSlideOneBuiltinSet; 2377defm vfslide1down : RVVFloatingBinVFBuiltinSet; 2378 2379// 16.4. Vector Register Gather Instructions 2380// signed and floating type 2381defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd", 2382 [["vv", "v", "vvUv"]]>; 2383defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd", 2384 [["vx", "v", "vvz"]]>; 2385let RequiredFeatures = ["Zvfhmin"] in { 2386 defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "x", 2387 [["vv", "v", "vvUv"]]>; 2388 defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "x", 2389 [["vx", "v", "vvz"]]>; 2390} 2391let RequiredFeatures = ["Zvfbfmin"] in { 2392 defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "y", 2393 [["vv", "v", "vvUv"]]>; 2394 defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "y", 2395 [["vx", "v", "vvz"]]>; 2396} 2397defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilfd", 2398 [["vv", "v", "vv(Log2EEW:4)Uv"]]>; 2399let RequiredFeatures = ["Zvfh"] in 2400defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "x", 2401 [["vv", "v", "vv(Log2EEW:4)Uv"]]>; 2402// unsigned type 2403defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil", 2404 [["vv", "Uv", "UvUvUv"]]>; 2405defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil", 2406 [["vx", "Uv", "UvUvz"]]>; 2407defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", 2408 [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; 2409} 2410 2411// 16.5. Vector Compress Instruction 2412let HasMasked = false, 2413 UnMaskedPolicyScheme = HasPassthruOperand, 2414 MaskedPolicyScheme = NonePolicy, 2415 ManualCodegen = [{ 2416 // insert poison passthru 2417 if (PolicyAttrs & RVV_VTA) 2418 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 2419 IntrinsicTypes = {ResultType, Ops.back()->getType()}; 2420 }] in { 2421 // signed and floating type 2422 defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd", 2423 [["vm", "v", "vvm"]]>; 2424 let RequiredFeatures = ["Zvfhmin"] in 2425 defm vcompress : RVVOutBuiltinSet<"vcompress", "x", 2426 [["vm", "v", "vvm"]]>; 2427 let RequiredFeatures = ["Zvfbfmin"] in 2428 defm vcompress : RVVOutBuiltinSet<"vcompress", "y", 2429 [["vm", "v", "vvm"]]>; 2430 // unsigned type 2431 defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", 2432 [["vm", "Uv", "UvUvm"]]>; 2433} 2434 2435// Miscellaneous 2436let HasMasked = false, HasVL = false, IRName = "" in { 2437 let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy, 2438 ManualCodegen = [{ 2439 if (ResultType->isIntOrIntVectorTy(1) || 2440 Ops[0]->getType()->isIntOrIntVectorTy(1)) { 2441 assert(isa<ScalableVectorType>(ResultType) && 2442 isa<ScalableVectorType>(Ops[0]->getType())); 2443 2444 LLVMContext &Context = CGM.getLLVMContext(); 2445 ScalableVectorType *Boolean64Ty = 2446 ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64); 2447 2448 if (ResultType->isIntOrIntVectorTy(1)) { 2449 // Casting from m1 vector integer -> vector boolean 2450 // Ex: <vscale x 8 x i8> 2451 // --(bitcast)--------> <vscale x 64 x i1> 2452 // --(vector_extract)-> <vscale x 8 x i1> 2453 llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty); 2454 return Builder.CreateExtractVector(ResultType, BitCast, 2455 ConstantInt::get(Int64Ty, 0)); 2456 } else { 2457 // Casting from vector boolean -> m1 vector integer 2458 // Ex: <vscale x 1 x i1> 2459 // --(vector_insert)-> <vscale x 64 x i1> 2460 // --(bitcast)-------> <vscale x 8 x i8> 2461 llvm::Value *Boolean64Val = 2462 Builder.CreateInsertVector(Boolean64Ty, 2463 llvm::PoisonValue::get(Boolean64Ty), 2464 Ops[0], 2465 ConstantInt::get(Int64Ty, 0)); 2466 return Builder.CreateBitCast(Boolean64Val, ResultType); 2467 } 2468 } 2469 return Builder.CreateBitCast(Ops[0], ResultType); 2470 }] in { 2471 // Reinterpret between different type under the same SEW and LMUL 2472 def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">; 2473 def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">; 2474 def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">; 2475 def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">; 2476 def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">; 2477 def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">; 2478 let RequiredFeatures = ["Zvfhmin"] in { 2479 def vreinterpret_i_h : RVVBuiltin<"Fvv", "vFv", "s", "v">; 2480 def vreinterpret_u_h : RVVBuiltin<"FvUv", "UvFv", "s", "Uv">; 2481 def vreinterpret_h_i : RVVBuiltin<"vFv", "Fvv", "s", "Fv">; 2482 def vreinterpret_h_u : RVVBuiltin<"UvFv", "FvUv", "s", "Fv">; 2483 } 2484 let RequiredFeatures = ["Zvfbfmin"] in { 2485 def vreinterpret_i_bf16 : RVVBuiltin<"vIv", "Ivv", "y", "Iv">; 2486 def vreinterpret_u_bf16 : RVVBuiltin<"vUv", "Uvv", "y", "Uv">; 2487 def vreinterpret_bf16_i : RVVBuiltin<"Ivv", "vIv", "y", "v">; 2488 def vreinterpret_bf16_u : RVVBuiltin<"Uvv", "vUv", "y", "v">; 2489 } 2490 2491 // Reinterpret between different SEW under the same LMUL 2492 foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", 2493 "(FixedSEW:64)"] in { 2494 def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", 2495 dst_sew # "vv", "csil", dst_sew # "v">; 2496 def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", 2497 dst_sew # "UvUv", "csil", dst_sew # "Uv">; 2498 } 2499 2500 // Existing users of FixedSEW - the reinterpretation between different SEW 2501 // and same LMUL has the implicit assumption that if FixedSEW is set to the 2502 // given element width, then the type will be identified as invalid, thus 2503 // skipping definition of reinterpret of SEW=8 to SEW=8. However this blocks 2504 // our usage here of defining all possible combinations of a fixed SEW to 2505 // any boolean. So we need to separately define SEW=8 here. 2506 // Reinterpret from LMUL=1 integer type to vector boolean type 2507 def vreintrepret_m1_b8_signed : 2508 RVVBuiltin<"Svm", 2509 "mSv", 2510 "c", "m">; 2511 def vreintrepret_m1_b8_usigned : 2512 RVVBuiltin<"USvm", 2513 "mUSv", 2514 "c", "m">; 2515 2516 // Reinterpret from vector boolean type to LMUL=1 integer type 2517 def vreintrepret_b8_m1_signed : 2518 RVVBuiltin<"mSv", 2519 "Svm", 2520 "c", "Sv">; 2521 def vreintrepret_b8_m1_usigned : 2522 RVVBuiltin<"mUSv", 2523 "USvm", 2524 "c", "USv">; 2525 2526 foreach dst_sew = ["16", "32", "64"] in { 2527 // Reinterpret from LMUL=1 integer type to vector boolean type 2528 def vreinterpret_m1_b # dst_sew # _signed: 2529 RVVBuiltin<"(FixedSEW:" # dst_sew # ")Svm", 2530 "m(FixedSEW:" # dst_sew # ")Sv", 2531 "c", "m">; 2532 def vreinterpret_m1_b # dst_sew # _unsigned: 2533 RVVBuiltin<"(FixedSEW:" # dst_sew # ")USvm", 2534 "m(FixedSEW:" # dst_sew # ")USv", 2535 "c", "m">; 2536 // Reinterpret from vector boolean type to LMUL=1 integer type 2537 def vreinterpret_b # dst_sew # _m1_signed: 2538 RVVBuiltin<"m(FixedSEW:" # dst_sew # ")Sv", 2539 "(FixedSEW:" # dst_sew # ")Svm", 2540 "c", "(FixedSEW:" # dst_sew # ")Sv">; 2541 def vreinterpret_b # dst_sew # _m1_unsigned: 2542 RVVBuiltin<"m(FixedSEW:" # dst_sew # ")USv", 2543 "(FixedSEW:" # dst_sew # ")USvm", 2544 "c", "(FixedSEW:" # dst_sew # ")USv">; 2545 } 2546 } 2547 2548 let Name = "vundefined", SupportOverloading = false, 2549 MaskedPolicyScheme = NonePolicy, 2550 ManualCodegen = [{ 2551 return llvm::PoisonValue::get(ResultType); 2552 }] in { 2553 def vundefined : RVVBuiltin<"v", "v", "csilfd">; 2554 let RequiredFeatures = ["Zvfhmin"] in 2555 def vundefined_h : RVVBuiltin<"v", "v", "x">; 2556 let RequiredFeatures = ["Zvfbfmin"] in 2557 def vundefined_bf16 : RVVBuiltin<"v", "v", "y">; 2558 def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">; 2559 2560 foreach nf = NFList in { 2561 let NF = nf in { 2562 defvar T = "(Tuple:" # nf # ")"; 2563 def : RVVBuiltin<T # "v", T # "v", "csilfd">; 2564 let RequiredFeatures = ["Zvfhmin"] in 2565 def : RVVBuiltin<T # "v", T # "v", "x">; 2566 let RequiredFeatures = ["Zvfbfmin"] in 2567 def : RVVBuiltin<T # "v", T # "v", "y">; 2568 def : RVVBuiltin<T # "Uv", T # "Uv", "csil">; 2569 } 2570 } 2571 2572 } 2573 2574 // LMUL truncation 2575 // C/C++ Operand: VecTy, IR Operand: VecTy, Index 2576 let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", 2577 MaskedPolicyScheme = NonePolicy, 2578 ManualCodegen = [{ { 2579 return Builder.CreateExtractVector(ResultType, Ops[0], 2580 ConstantInt::get(Int64Ty, 0)); 2581 } }] in { 2582 foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)", 2583 "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { 2584 def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2585 dst_lmul # "vv", "csilfd", dst_lmul # "v">; 2586 let RequiredFeatures = ["Zvfhmin"] in 2587 def vlmul_trunc_h # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2588 dst_lmul # "vv", "x", dst_lmul # "v">; 2589 let RequiredFeatures = ["Zvfbfmin"] in 2590 def vlmul_trunc_bf16 # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2591 dst_lmul # "vv", "y", dst_lmul # "v">; 2592 def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", 2593 dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; 2594 } 2595 } 2596 2597 // LMUL extension 2598 // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index 2599 let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", 2600 MaskedPolicyScheme = NonePolicy, 2601 ManualCodegen = [{ 2602 return Builder.CreateInsertVector(ResultType, 2603 llvm::PoisonValue::get(ResultType), 2604 Ops[0], ConstantInt::get(Int64Ty, 0)); 2605 }] in { 2606 foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)", 2607 "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { 2608 def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2609 dst_lmul # "vv", "csilfd", dst_lmul # "v">; 2610 let RequiredFeatures = ["Zvfhmin"] in 2611 def vlmul_ext_h # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2612 dst_lmul # "vv", "x", dst_lmul # "v">; 2613 let RequiredFeatures = ["Zvfbfmin"] in 2614 def vlmul_ext_bf16 # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2615 dst_lmul # "vv", "y", dst_lmul # "v">; 2616 def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", 2617 dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; 2618 } 2619 } 2620 2621 let Name = "vget_v", MaskedPolicyScheme = NonePolicy, 2622 ManualCodegen = [{ 2623 { 2624 auto *VecTy = cast<ScalableVectorType>(ResultType); 2625 if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) { 2626 unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements(); 2627 assert(isPowerOf2_32(MaxIndex)); 2628 // Mask to only valid indices. 2629 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty()); 2630 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1); 2631 Ops[1] = Builder.CreateMul(Ops[1], 2632 ConstantInt::get(Ops[1]->getType(), 2633 VecTy->getMinNumElements())); 2634 return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]); 2635 } 2636 2637 return Builder.CreateIntrinsic(Intrinsic::riscv_tuple_extract, 2638 {ResultType, Ops[0]->getType()}, 2639 {Ops[0], Builder.CreateTrunc(Ops[1], 2640 Builder.getInt32Ty())}); 2641 } 2642 }] in { 2643 foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { 2644 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd", dst_lmul # "v">; 2645 let RequiredFeatures = ["Zvfhmin"] in 2646 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "x", dst_lmul # "v">; 2647 let RequiredFeatures = ["Zvfbfmin"] in 2648 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "y", dst_lmul # "v">; 2649 def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">; 2650 } 2651 foreach nf = NFList in { 2652 defvar T = "(Tuple:" # nf # ")"; 2653 def : RVVBuiltin<T # "vv", "v" # T # "vKz", "csilfd", "v">; 2654 let RequiredFeatures = ["Zvfhmin"] in 2655 def : RVVBuiltin<T # "vv", "v" # T # "vKz", "x", "v">; 2656 let RequiredFeatures = ["Zvfbfmin"] in 2657 def : RVVBuiltin<T # "vv", "v" # T # "vKz", "y", "v">; 2658 def : RVVBuiltin<T # "UvUv", "Uv" # T # "UvKz", "csil", "Uv">; 2659 } 2660 } 2661 2662 let Name = "vset_v", MaskedPolicyScheme = NonePolicy, 2663 ManualCodegen = [{ 2664 { 2665 if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) { 2666 auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType()); 2667 unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements(); 2668 assert(isPowerOf2_32(MaxIndex)); 2669 // Mask to only valid indices. 2670 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty()); 2671 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1); 2672 Ops[1] = Builder.CreateMul(Ops[1], 2673 ConstantInt::get(Ops[1]->getType(), 2674 VecTy->getMinNumElements())); 2675 return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]); 2676 } 2677 2678 return Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert, 2679 {ResultType, Ops[2]->getType()}, 2680 {Ops[0], Ops[2], 2681 Builder.CreateTrunc(Ops[1],Builder.getInt32Ty())}); 2682 } 2683 }] in { 2684 foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { 2685 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilfd">; 2686 let RequiredFeatures = ["Zvfhmin"] in 2687 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "x">; 2688 let RequiredFeatures = ["Zvfbfmin"] in 2689 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "y">; 2690 def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">; 2691 } 2692 foreach nf = NFList in { 2693 defvar T = "(Tuple:" # nf # ")"; 2694 def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "csilfd">; 2695 let RequiredFeatures = ["Zvfhmin"] in 2696 def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "x">; 2697 let RequiredFeatures = ["Zvfbfmin"] in 2698 def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "y">; 2699 def : RVVBuiltin<"Uv" # T # "Uv", T # "Uv" # T # "UvKzUv", "csil">; 2700 } 2701 } 2702 2703 let Name = "vcreate_v", 2704 UnMaskedPolicyScheme = NonePolicy, 2705 MaskedPolicyScheme = NonePolicy, 2706 SupportOverloading = false, 2707 ManualCodegen = [{ 2708 { 2709 llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType); 2710 auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType()); 2711 for (unsigned I = 0, N = Ops.size(); I < N; ++I) { 2712 if (isa<ScalableVectorType>(ResultType)) { 2713 llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(), 2714 VecTy->getMinNumElements() * I); 2715 ReturnVector = 2716 Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx); 2717 } else { 2718 llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I); 2719 ReturnVector = 2720 Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert, 2721 {ResultType, Ops[I]->getType()}, 2722 {ReturnVector, Ops[I], Idx}); 2723 } 2724 2725 } 2726 return ReturnVector; 2727 } 2728 }] in { 2729 2730 // Since the vcreate_v uses LFixedLog2LMUL, setting the Log2LMUL to [-3] can 2731 // avoid creating the intrinsics which contain the same name and prototype. 2732 let Log2LMUL = [-3] in { 2733 defm : RVVNonTupleVCreateBuiltin<1, [0]>; 2734 defm : RVVNonTupleVCreateBuiltin<2, [0, 1]>; 2735 defm : RVVNonTupleVCreateBuiltin<3, [0, 1, 2]>; 2736 } 2737 2738 foreach nf = NFList in { 2739 let NF = nf in { 2740 defvar T = "(Tuple:" # nf # ")"; 2741 defvar V = VString<nf, /*signed=*/true>.S; 2742 defvar UV = VString<nf, /*signed=*/false>.S; 2743 def : RVVBuiltin<T # "v", T # "v" # V, "csilfd">; 2744 let RequiredFeatures = ["Zvfhmin"] in 2745 def : RVVBuiltin<T # "v", T # "v" # V, "x">; 2746 let RequiredFeatures = ["Zvfbfmin"] in 2747 def : RVVBuiltin<T # "v", T # "v" # V, "y">; 2748 def : RVVBuiltin<T # "Uv", T # "Uv" # UV, "csil">; 2749 } 2750 } 2751 } 2752} 2753 2754multiclass RVVOutBuiltinSetZvbb { 2755 let OverloadedName = NAME in 2756 defm "" : RVVOutBuiltinSet<NAME, "csil", [["v", "v", "vv"], 2757 ["v", "Uv", "UvUv"]]>; 2758} 2759 2760multiclass RVVOutBuiltinSetZvk<bit HasVV = 1, bit HasVS = 1> { 2761 // vaesz only has 'vs' and vgmul only has 'vv' and they do not have ambiguous 2762 // prototypes like other zvkned instructions (e.g. vaesdf), so we don't 2763 // need to encode the operand mnemonics into its intrinsic function name. 2764 if HasVV then { 2765 defvar name = NAME # !if(!eq(NAME, "vgmul"), "", "_vv"); 2766 let OverloadedName = name in 2767 defm "" : RVVOutBuiltinSet<NAME # "_vv", "i", 2768 [["vv", "Uv", "UvUvUv"]]>; 2769 } 2770 2771 if HasVS then { 2772 foreach vs2_lmul = ["(SEFixedLog2LMUL:-1)", "(SEFixedLog2LMUL:0)", 2773 "(SEFixedLog2LMUL:1)", "(SEFixedLog2LMUL:2)"] in { 2774 defvar name = NAME # !if(!eq(NAME, "vaesz"), "", "_vs"); 2775 let OverloadedName = name, IRName = NAME # "_vs", Name = NAME # "_vs", 2776 IntrinsicTypes = [-1, 1] in 2777 def NAME # vs2_lmul 2778 : RVVBuiltin<vs2_lmul # "UvUv", "UvUv" # vs2_lmul # "Uv", "i">; 2779 } 2780 } 2781} 2782 2783multiclass RVVOutOp2BuiltinSetVVZvk<string type_range = "i"> 2784 : RVVOutOp2BuiltinSet<NAME, type_range, [["vv", "Uv", "UvUvUvUv"]]>; 2785 2786multiclass RVVOutOp2BuiltinSetVIZvk<string type_range = "i"> 2787 : RVVOutOp2BuiltinSet<NAME, type_range, [["vi", "Uv", "UvUvUvKz"]]>; 2788 2789multiclass RVVSignedWidenBinBuiltinSetVwsll 2790 : RVVWidenBuiltinSet<NAME, "csi", 2791 [["vv", "Uw", "UwUvUv"], 2792 ["vx", "Uw", "UwUvz"]]>; 2793 2794let UnMaskedPolicyScheme = HasPassthruOperand in { 2795 // zvkb 2796 let RequiredFeatures = ["Zvkb"] in { 2797 defm vandn : RVVUnsignedBinBuiltinSet; 2798 defm vbrev8 : RVVOutBuiltinSetZvbb; 2799 defm vrev8 : RVVOutBuiltinSetZvbb; 2800 defm vrol : RVVUnsignedShiftBuiltinSet; 2801 defm vror : RVVUnsignedShiftBuiltinSet; 2802 } 2803 2804 // zvbb 2805 let RequiredFeatures = ["Zvbb"] in { 2806 defm vbrev : RVVOutBuiltinSetZvbb; 2807 defm vclz : RVVOutBuiltinSetZvbb; 2808 defm vctz : RVVOutBuiltinSetZvbb; 2809 let IRName = "vcpopv", MaskedIRName = "vcpopv_mask" in 2810 defm vcpop : RVVOutBuiltinSetZvbb; 2811 let OverloadedName = "vwsll" in 2812 defm vwsll : RVVSignedWidenBinBuiltinSetVwsll; 2813 } 2814 2815 // zvbc 2816 let RequiredFeatures = ["Zvbc"] in { 2817 defm vclmul : RVVInt64BinBuiltinSet; 2818 defm vclmulh : RVVInt64BinBuiltinSet; 2819 } 2820} 2821 2822let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in { 2823 // zvkg 2824 let RequiredFeatures = ["Zvkg"] in { 2825 defm vghsh : RVVOutOp2BuiltinSetVVZvk; 2826 defm vgmul : RVVOutBuiltinSetZvk<HasVV=1, HasVS=0>; 2827 } 2828 2829 // zvkned 2830 let RequiredFeatures = ["Zvkned"] in { 2831 defm vaesdf : RVVOutBuiltinSetZvk; 2832 defm vaesdm : RVVOutBuiltinSetZvk; 2833 defm vaesef : RVVOutBuiltinSetZvk; 2834 defm vaesem : RVVOutBuiltinSetZvk; 2835 let UnMaskedPolicyScheme = HasPassthruOperand in 2836 defm vaeskf1 : RVVOutOp1BuiltinSet<"vaeskf1", "i", [["vi", "Uv", "UvUvKz"]]>; 2837 defm vaeskf2 : RVVOutOp2BuiltinSetVIZvk; 2838 defm vaesz : RVVOutBuiltinSetZvk<HasVV=0>; 2839 } 2840 2841 // zvknha 2842 let RequiredFeatures = ["Zvknha"] in { 2843 defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"i">; 2844 defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"i">; 2845 defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"i">; 2846 } 2847 2848 // zvknhb 2849 let RequiredFeatures = ["Zvknhb"] in { 2850 defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"il">; 2851 defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"il">; 2852 defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"il">; 2853 } 2854 2855 // zvksed 2856 let RequiredFeatures = ["Zvksed"] in { 2857 let UnMaskedPolicyScheme = HasPassthruOperand in 2858 defm vsm4k : RVVOutOp1BuiltinSet<"vsm4k", "i", [["vi", "Uv", "UvUvKz"]]>; 2859 defm vsm4r : RVVOutBuiltinSetZvk; 2860 } 2861 2862 // zvksh 2863 let RequiredFeatures = ["Zvksh"] in { 2864 defm vsm3c : RVVOutOp2BuiltinSetVIZvk; 2865 let UnMaskedPolicyScheme = HasPassthruOperand in 2866 defm vsm3me : RVVOutOp1BuiltinSet<"vsm3me", "i", [["vv", "Uv", "UvUvUv"]]>; 2867 } 2868} 2869