1//===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions ---*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions from the standard 'Zvk', 10// Vector Cryptography Instructions extension, version Release 1.0.0. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Operand and SDNode transformation definitions. 16//===----------------------------------------------------------------------===// 17 18def tuimm5 : RISCVOp, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]>; 19 20//===----------------------------------------------------------------------===// 21// Instruction class templates 22//===----------------------------------------------------------------------===// 23 24let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 25multiclass VCLMUL_MV_V_X<string opcodestr, bits<6> funct6> { 26 def V : VALUVV<funct6, OPMVV, opcodestr # "." # "vv">, 27 SchedBinaryMC<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV">; 28 def X : VALUVX<funct6, OPMVX, opcodestr # "." # "vx">, 29 SchedBinaryMC<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX">; 30} 31 32class RVInstIVI_VROR<bits<6> funct6, dag outs, dag ins, string opcodestr, 33 string argstr> 34 : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> { 35 bits<5> vs2; 36 bits<6> imm; 37 bits<5> vd; 38 bit vm; 39 40 let Inst{31-27} = funct6{5-1}; 41 let Inst{26} = imm{5}; 42 let Inst{25} = vm; 43 let Inst{24-20} = vs2; 44 let Inst{19-15} = imm{4-0}; 45 let Inst{14-12} = OPIVI.Value; 46 let Inst{11-7} = vd; 47 let Inst{6-0} = OPC_OP_V.Value; 48 49 let Uses = [VTYPE, VL]; 50 let RVVConstraint = VMConstraint; 51} 52 53multiclass VROR_IV_V_X_I<string opcodestr, bits<6> funct6> 54 : VALU_IV_V_X<opcodestr, funct6> { 55 def I : RVInstIVI_VROR<funct6, (outs VR:$vd), 56 (ins VR:$vs2, uimm6:$imm, VMaskOp:$vm), 57 opcodestr # ".vi", "$vd, $vs2, $imm$vm">, 58 SchedUnaryMC<"WriteVRotI", "ReadVRotV">; 59} 60 61// op vd, vs2, vs1 62class PALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 63 : VALUVVNoVm<funct6, opv, opcodestr> { 64 let Inst{6-0} = OPC_OP_VE.Value; 65} 66 67// op vd, vs2, vs1 68class PALUVVNoVmTernary<bits<6> funct6, RISCVVFormat opv, string opcodestr> 69 : RVInstVV<funct6, opv, (outs VR:$vd_wb), 70 (ins VR:$vd, VR:$vs2, VR:$vs1), 71 opcodestr, "$vd, $vs2, $vs1"> { 72 let Constraints = "$vd = $vd_wb"; 73 let vm = 1; 74 let Inst{6-0} = OPC_OP_VE.Value; 75} 76 77// op vd, vs2, imm 78class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype> 79 : VALUVINoVm<funct6, opcodestr, optype> { 80 let Inst{6-0} = OPC_OP_VE.Value; 81 let Inst{14-12} = OPMVV.Value; 82} 83 84// op vd, vs2, imm where vd is also a source regardless of tail policy 85class PALUVINoVmBinary<bits<6> funct6, string opcodestr, Operand optype> 86 : RVInstIVI<funct6, (outs VR:$vd_wb), 87 (ins VR:$vd, VR:$vs2, optype:$imm), 88 opcodestr, "$vd, $vs2, $imm"> { 89 let Constraints = "$vd = $vd_wb"; 90 let vm = 1; 91 let Inst{6-0} = OPC_OP_VE.Value; 92 let Inst{14-12} = OPMVV.Value; 93} 94 95// op vd, vs2 (use vs1 as instruction encoding) where vd is also a source 96// regardless of tail policy 97class PALUVs2NoVmBinary<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, 98 string opcodestr> 99 : RVInstV<funct6, vs1, opv, (outs VR:$vd_wb), (ins VR:$vd, VR:$vs2), 100 opcodestr, "$vd, $vs2"> { 101 let Constraints = "$vd = $vd_wb"; 102 let vm = 1; 103 let Inst{6-0} = OPC_OP_VE.Value; 104} 105 106multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1, 107 RISCVVFormat opv, string opcodestr> { 108 let RVVConstraint = NoConstraint in 109 def NAME # _VV : PALUVs2NoVmBinary<funct6_vv, vs1, opv, opcodestr # ".vv">, 110 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; 111 let RVVConstraint = VS2Constraint in 112 def NAME # _VS : PALUVs2NoVmBinary<funct6_vs, vs1, opv, opcodestr # ".vs">, 113 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; 114} 115} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 116 117//===----------------------------------------------------------------------===// 118// Instructions 119//===----------------------------------------------------------------------===// 120 121let Predicates = [HasStdExtZvbb] in { 122 def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">; 123 def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">; 124 def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">; 125 def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">; 126 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, 127 DestEEW = EEWSEWx2 in 128 defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>; 129} // Predicates = [HasStdExtZvbb] 130 131let Predicates = [HasStdExtZvbcOrZvbc32e] in { 132 defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>; 133 defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>; 134} // Predicates = [HasStdExtZvbcOrZvbc32e] 135 136let Predicates = [HasStdExtZvkb] in { 137 defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>; 138 def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">; 139 def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">; 140 defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>; 141 defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>; 142} // Predicates = [HasStdExtZvkb] 143 144let ElementsDependOn = EltDepsVLMask in { 145 146let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in { 147 def VGHSH_VV : PALUVVNoVmTernary<0b101100, OPMVV, "vghsh.vv">, 148 SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", 149 "ReadVGHSHV">; 150 def VGMUL_VV : PALUVs2NoVmBinary<0b101000, 0b10001, OPMVV, "vgmul.vv">, 151 SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">; 152} // Predicates = [HasStdExtZvkg] 153 154let Predicates = [HasStdExtZvkgs], RVVConstraint = VS2Constraint in { 155 def VGHSH_VS : PALUVVNoVmTernary<0b100011, OPMVV, "vghsh.vs">, 156 SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", 157 "ReadVGHSHV">; 158 def VGMUL_VS : PALUVs2NoVmBinary<0b101001, 0b10001, OPMVV, "vgmul.vs">, 159 SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">; 160} // Predicates = [HasStdExtZvkgs] 161 162let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = Sha2Constraint in { 163 def VSHA2CH_VV : PALUVVNoVmTernary<0b101110, OPMVV, "vsha2ch.vv">, 164 SchedTernaryMC<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", 165 "ReadVSHA2CHV">; 166 def VSHA2CL_VV : PALUVVNoVmTernary<0b101111, OPMVV, "vsha2cl.vv">, 167 SchedTernaryMC<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", 168 "ReadVSHA2CLV">; 169 def VSHA2MS_VV : PALUVVNoVmTernary<0b101101, OPMVV, "vsha2ms.vv">, 170 SchedTernaryMC<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", 171 "ReadVSHA2MSV">; 172} // Predicates = [HasStdExtZvknhaOrZvknhb] 173 174let Predicates = [HasStdExtZvkned] in { 175 defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">; 176 defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">; 177 defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">; 178 defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">; 179 def VAESKF1_VI : PALUVINoVm<0b100010, "vaeskf1.vi", uimm5>, 180 SchedUnaryMC<"WriteVAESKF1V", "ReadVAESKF1V">; 181 def VAESKF2_VI : PALUVINoVmBinary<0b101010, "vaeskf2.vi", uimm5>, 182 SchedBinaryMC<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V">; 183 let RVVConstraint = VS2Constraint in 184 def VAESZ_VS : PALUVs2NoVmBinary<0b101001, 0b00111, OPMVV, "vaesz.vs">, 185 SchedBinaryMC<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV">; 186} // Predicates = [HasStdExtZvkned] 187 188let Predicates = [HasStdExtZvksed] in { 189 let RVVConstraint = NoConstraint in 190 def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>, 191 SchedUnaryMC<"WriteVSM4KV", "ReadVSM4KV">; 192 defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">; 193} // Predicates = [HasStdExtZvksed] 194 195let Predicates = [HasStdExtZvksh], RVVConstraint = VS2Constraint in { 196 def VSM3C_VI : PALUVINoVmBinary<0b101011, "vsm3c.vi", uimm5>, 197 SchedBinaryMC<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV">; 198 def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">, 199 SchedUnaryMC<"WriteVSM3MEV", "ReadVSM3MEV">; 200} // Predicates = [HasStdExtZvksh] 201 202} // ElementsDependOn = EltDepsVLMask 203 204//===----------------------------------------------------------------------===// 205// Pseudo instructions 206//===----------------------------------------------------------------------===// 207 208defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); 209defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, 210 !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); 211 212class ZvkI32IntegerVectors<string vd_lmul> { 213 list<VTypeInfo> vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 214 !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 215 !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)), 216 !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)), 217 !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)), 218 !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)), 219 !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1))); 220} 221 222class ZvkMxSet<string vd_lmul> { 223 list<LMULInfo> vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 224 !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 225 !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2], 226 !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1], 227 !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2], 228 !eq(vd_lmul, "MF4") : [V_MF8, V_MF4], 229 !eq(vd_lmul, "MF8") : [V_MF8]); 230} 231 232class VPseudoBinaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass> : 233 Pseudo<(outs RetClass:$rd_wb), 234 (ins RetClass:$rd, OpClass:$rs2, AVL:$vl, sew:$sew, vec_policy:$policy), []>, 235 RISCVVPseudo { 236 let mayLoad = 0; 237 let mayStore = 0; 238 let hasSideEffects = 0; 239 let Constraints = "$rd_wb = $rd"; 240 let HasVLOp = 1; 241 let HasSEWOp = 1; 242 let HasVecPolicyOp = 1; 243 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 244} 245 246class VPseudoTernaryNoMask_Zvk<VReg RetClass, 247 VReg Op1Class, 248 DAGOperand Op2Class> : 249 Pseudo<(outs RetClass:$rd_wb), 250 (ins RetClass:$rd, Op1Class:$rs2, Op2Class:$rs1, 251 AVL:$vl, sew:$sew, vec_policy:$policy), []>, 252 RISCVVPseudo { 253 let mayLoad = 0; 254 let mayStore = 0; 255 let hasSideEffects = 0; 256 let Constraints = "$rd_wb = $rd"; 257 let HasVLOp = 1; 258 let HasSEWOp = 1; 259 let HasVecPolicyOp = 1; 260 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 261} 262 263multiclass VPseudoBinaryNoMaskPolicy_Zvk<VReg RetClass, 264 VReg Op1Class, 265 DAGOperand Op2Class, 266 LMULInfo MInfo, 267 string Constraint = ""> { 268 let VLMul = MInfo.value in { 269 def "_" # MInfo.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class, 270 Constraint>; 271 } 272} 273 274multiclass VPseudoTernaryNoMask_Zvk<VReg RetClass, 275 VReg Op1Class, 276 DAGOperand Op2Class, 277 LMULInfo MInfo, int sew = 0> { 278 let VLMul = MInfo.value, SEW = sew in { 279 defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); 280 def suffix : VPseudoTernaryNoMask_Zvk<RetClass, Op1Class, Op2Class>; 281 } 282} 283 284multiclass VPseudoBinaryV_V_NoMask_Zvk<LMULInfo m> { 285 let VLMul = m.value in { 286 def "_VV_" # m.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass>; 287 } 288} 289 290multiclass VPseudoBinaryV_S_NoMask_Zvk<LMULInfo m> { 291 let VLMul = m.value in 292 foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in 293 def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass>; 294} 295 296multiclass VPseudoVGMUL { 297 foreach m = MxListVF4 in { 298 defvar mx = m.MX; 299 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 300 SchedBinary<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV", mx>; 301 } 302} 303 304multiclass VPseudoVAESMV { 305 foreach m = MxListVF4 in { 306 defvar mx = m.MX; 307 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 308 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; 309 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 310 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; 311 312 } 313} 314 315multiclass VPseudoVSM4R { 316 foreach m = MxListVF4 in { 317 defvar mx = m.MX; 318 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 319 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; 320 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 321 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; 322 323 } 324} 325 326multiclass VPseudoVGHSH { 327 foreach m = MxListVF4 in { 328 defvar mx = m.MX; 329 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 330 SchedTernary<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", 331 "ReadVGHSHV", mx>; 332 } 333} 334 335multiclass VPseudoVSHA2CH { 336 foreach m = MxListVF4 in { 337 defvar mx = m.MX; 338 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 339 SchedTernary<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", 340 "ReadVSHA2CHV", mx>; 341 } 342} 343 344multiclass VPseudoVSHA2CL { 345 foreach m = MxListVF4 in { 346 defvar mx = m.MX; 347 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 348 SchedTernary<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", 349 "ReadVSHA2CLV", mx>; 350 } 351} 352 353multiclass VPseudoVSHA2MS<int sew = 0> { 354 foreach m = !if(!eq(sew, 64), MxListVF8, MxListVF4) in { 355 defvar mx = m.MX; 356 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m, sew = sew>, 357 SchedTernary<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", 358 "ReadVSHA2MSV", mx, sew>; 359 } 360} 361 362multiclass VPseudoVAESKF1 { 363 foreach m = MxListVF4 in { 364 defvar mx = m.MX; 365 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>, 366 SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx, 367 forcePassthruRead=true>; 368 } 369} 370 371multiclass VPseudoVAESKF2 { 372 foreach m = MxListVF4 in { 373 defvar mx = m.MX; 374 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>, 375 SchedTernary<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V", 376 "ReadVAESKF2V", mx>; 377 } 378} 379 380multiclass VPseudoVAESZ { 381 foreach m = MxListVF4 in { 382 defvar mx = m.MX; 383 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 384 SchedBinary<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV", mx>; 385 } 386} 387 388multiclass VPseudoVSM3C { 389 foreach m = MxListVF4 in { 390 defvar mx = m.MX; 391 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>, 392 SchedTernary<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV", 393 "ReadVSM3CV", mx>; 394 } 395} 396 397multiclass VPseudoVSM4K { 398 foreach m = MxListVF4 in { 399 defvar mx = m.MX; 400 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>, 401 SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx, 402 forcePassthruRead=true>; 403 } 404} 405 406multiclass VPseudoVSM3ME { 407 foreach m = MxListVF4 in { 408 defvar mx = m.MX; 409 defm _VV : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 410 SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx, 411 forcePassthruRead=true>; 412 } 413} 414 415multiclass VPseudoVCLMUL_VV_VX { 416 foreach m = MxList in { 417 defvar mx = m.MX; 418 defm "" : VPseudoBinaryV_VV<m>, 419 SchedBinary<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV", mx, 420 forcePassthruRead=true>; 421 defm "" : VPseudoBinaryV_VX<m>, 422 SchedBinary<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX", mx, 423 forcePassthruRead=true>; 424 } 425} 426 427multiclass VPseudoUnaryV_V<LMULInfo m> { 428 let VLMul = m.value in { 429 defvar suffix = "_V_" # m.MX; 430 def suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>; 431 def suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>, 432 RISCVMaskedPseudo<MaskIdx=2>; 433 } 434} 435 436multiclass VPseudoVBREV { 437 foreach m = MxList in { 438 defvar mx = m.MX; 439 defm "" : VPseudoUnaryV_V<m>, 440 SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forcePassthruRead=true>; 441 } 442} 443 444multiclass VPseudoVCLZ { 445 foreach m = MxList in { 446 defvar mx = m.MX; 447 defm "" : VPseudoUnaryV_V<m>, 448 SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forcePassthruRead=true>; 449 } 450} 451 452multiclass VPseudoVCTZ { 453 foreach m = MxList in { 454 defvar mx = m.MX; 455 defm "" : VPseudoUnaryV_V<m>, 456 SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forcePassthruRead=true>; 457 } 458} 459 460multiclass VPseudoVCPOP { 461 foreach m = MxList in { 462 defvar mx = m.MX; 463 defm "" : VPseudoUnaryV_V<m>, 464 SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forcePassthruRead=true>; 465 } 466} 467 468multiclass VPseudoVWSLL { 469 foreach m = MxListW in { 470 defvar mx = m.MX; 471 defm "" : VPseudoBinaryW_VV<m>, 472 SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx, 473 forcePassthruRead=true>; 474 defm "" : VPseudoBinaryW_VX<m>, 475 SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx, 476 forcePassthruRead=true>; 477 defm "" : VPseudoBinaryW_VI<uimm5, m>, 478 SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx, 479 forcePassthruRead=true>; 480 } 481} 482 483multiclass VPseudoVANDN { 484 foreach m = MxList in { 485 defm "" : VPseudoBinaryV_VV<m>, 486 SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX, 487 forcePassthruRead=true>; 488 defm "" : VPseudoBinaryV_VX<m>, 489 SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX, 490 forcePassthruRead=true>; 491 } 492} 493 494multiclass VPseudoVBREV8 { 495 foreach m = MxList in { 496 defvar mx = m.MX; 497 defm "" : VPseudoUnaryV_V<m>, 498 SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forcePassthruRead=true>; 499 } 500} 501 502multiclass VPseudoVREV8 { 503 foreach m = MxList in { 504 defvar mx = m.MX; 505 defm "" : VPseudoUnaryV_V<m>, 506 SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forcePassthruRead=true>; 507 } 508} 509 510multiclass VPseudoVROT_VV_VX { 511 foreach m = MxList in { 512 defm "" : VPseudoBinaryV_VV<m>, 513 SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX, 514 forcePassthruRead=true>; 515 defm "" : VPseudoBinaryV_VX<m>, 516 SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", m.MX, 517 forcePassthruRead=true>; 518 } 519} 520 521multiclass VPseudoVROT_VV_VX_VI 522 : VPseudoVROT_VV_VX { 523 foreach m = MxList in { 524 defm "" : VPseudoBinaryV_VI<uimm6, m>, 525 SchedUnary<"WriteVRotI", "ReadVRotV", m.MX, 526 forcePassthruRead=true>; 527 } 528} 529 530let Predicates = [HasStdExtZvbb] in { 531 defm PseudoVBREV : VPseudoVBREV; 532 defm PseudoVCLZ : VPseudoVCLZ; 533 defm PseudoVCTZ : VPseudoVCTZ; 534 defm PseudoVCPOP : VPseudoVCPOP; 535 defm PseudoVWSLL : VPseudoVWSLL; 536} // Predicates = [HasStdExtZvbb] 537 538let Predicates = [HasStdExtZvbc] in { 539 defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; 540 defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; 541} // Predicates = [HasStdExtZvbc] 542 543let Predicates = [HasStdExtZvkb] in { 544 defm PseudoVANDN : VPseudoVANDN; 545 defm PseudoVBREV8 : VPseudoVBREV8; 546 defm PseudoVREV8 : VPseudoVREV8; 547 defm PseudoVROL : VPseudoVROT_VV_VX; 548 defm PseudoVROR : VPseudoVROT_VV_VX_VI; 549} // Predicates = [HasStdExtZvkb] 550 551let Predicates = [HasStdExtZvkg] in { 552 defm PseudoVGHSH : VPseudoVGHSH; 553 defm PseudoVGMUL : VPseudoVGMUL; 554} // Predicates = [HasStdExtZvkg] 555 556let Predicates = [HasStdExtZvkned] in { 557 defm PseudoVAESDF : VPseudoVAESMV; 558 defm PseudoVAESDM : VPseudoVAESMV; 559 defm PseudoVAESEF : VPseudoVAESMV; 560 defm PseudoVAESEM : VPseudoVAESMV; 561 defm PseudoVAESKF1 : VPseudoVAESKF1; 562 defm PseudoVAESKF2 : VPseudoVAESKF2; 563 defm PseudoVAESZ : VPseudoVAESZ; 564} // Predicates = [HasStdExtZvkned] 565 566let Predicates = [HasStdExtZvknhaOrZvknhb] in { 567 defm PseudoVSHA2CH : VPseudoVSHA2CH; 568 defm PseudoVSHA2CL : VPseudoVSHA2CL; 569 defm PseudoVSHA2MS : VPseudoVSHA2MS<sew=32>; 570 let Predicates = [HasStdExtZvknhb] in 571 defm PseudoVSHA2MS : VPseudoVSHA2MS<sew=64>; 572} // Predicates = [HasStdExtZvknhaOrZvknhb] 573 574let Predicates = [HasStdExtZvksed] in { 575 defm PseudoVSM4K : VPseudoVSM4K; 576 defm PseudoVSM4R : VPseudoVSM4R; 577} // Predicates = [HasStdExtZvksed] 578 579let Predicates = [HasStdExtZvksh] in { 580 defm PseudoVSM3C : VPseudoVSM3C; 581 defm PseudoVSM3ME : VPseudoVSM3ME; 582} // Predicates = [HasStdExtZvksh] 583 584//===----------------------------------------------------------------------===// 585// SDNode patterns 586//===----------------------------------------------------------------------===// 587 588multiclass VPatUnarySDNode_V<SDPatternOperator op, string instruction_name, 589 Predicate predicate = HasStdExtZvbb> { 590 foreach vti = AllIntegerVectors in { 591 let Predicates = !listconcat([predicate], 592 GetVTypePredicates<vti>.Predicates) in { 593 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), 594 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX) 595 (vti.Vector (IMPLICIT_DEF)), 596 vti.RegClass:$rs1, 597 vti.AVL, vti.Log2SEW, TA_MA)>; 598 } 599 } 600} 601 602// Helpers for detecting splats since we preprocess splat_vector to vmv.v.x 603// This should match the logic in RISCVDAGToDAGISel::selectVSplat 604def riscv_splat_vector : PatFrag<(ops node:$rs1), 605 (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; 606def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, 607 (riscv_splat_vector -1))>; 608 609foreach vti = AllIntegerVectors in { 610 let Predicates = !listconcat([HasStdExtZvkb], 611 GetVTypePredicates<vti>.Predicates) in { 612 def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), 613 vti.RegClass:$rs2)), 614 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX) 615 (vti.Vector (IMPLICIT_DEF)), 616 vti.RegClass:$rs2, 617 vti.RegClass:$rs1, 618 vti.AVL, vti.Log2SEW, TA_MA)>; 619 def : Pat<(vti.Vector (and (riscv_splat_vector 620 (not vti.ScalarRegClass:$rs1)), 621 vti.RegClass:$rs2)), 622 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX) 623 (vti.Vector (IMPLICIT_DEF)), 624 vti.RegClass:$rs2, 625 vti.ScalarRegClass:$rs1, 626 vti.AVL, vti.Log2SEW, TA_MA)>; 627 def : Pat<(vti.Vector (and (riscv_splat_vector invLogicImm:$rs1), 628 vti.RegClass:$rs2)), 629 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX) 630 (vti.Vector (IMPLICIT_DEF)), 631 vti.RegClass:$rs2, 632 invLogicImm:$rs1, 633 vti.AVL, vti.Log2SEW, TA_MA)>; 634 } 635} 636 637defm : VPatUnarySDNode_V<bitreverse, "PseudoVBREV">; 638defm : VPatUnarySDNode_V<bswap, "PseudoVREV8", HasStdExtZvkb>; 639defm : VPatUnarySDNode_V<ctlz, "PseudoVCLZ">; 640defm : VPatUnarySDNode_V<cttz, "PseudoVCTZ">; 641defm : VPatUnarySDNode_V<ctpop, "PseudoVCPOP">; 642 643defm : VPatBinarySDNode_VV_VX<rotl, "PseudoVROL">; 644 645// Invert the immediate and mask it to SEW for readability. 646def InvRot8Imm : SDNodeXForm<imm, [{ 647 return CurDAG->getTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N), 648 N->getValueType(0)); 649}]>; 650def InvRot16Imm : SDNodeXForm<imm, [{ 651 return CurDAG->getTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N), 652 N->getValueType(0)); 653}]>; 654def InvRot32Imm : SDNodeXForm<imm, [{ 655 return CurDAG->getTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N), 656 N->getValueType(0)); 657}]>; 658def InvRot64Imm : SDNodeXForm<imm, [{ 659 return CurDAG->getTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N), 660 N->getValueType(0)); 661}]>; 662 663// Although there is no vrol.vi, an immediate rotate left can be achieved by 664// negating the immediate in vror.vi 665foreach vti = AllIntegerVectors in { 666 let Predicates = !listconcat([HasStdExtZvkb], 667 GetVTypePredicates<vti>.Predicates) in { 668 def : Pat<(vti.Vector (rotl vti.RegClass:$rs2, 669 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))), 670 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX) 671 (vti.Vector (IMPLICIT_DEF)), 672 vti.RegClass:$rs2, 673 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 674 vti.AVL, vti.Log2SEW, TA_MA)>; 675 } 676} 677defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>; 678 679foreach vtiToWti = AllWidenableIntVectors in { 680 defvar vti = vtiToWti.Vti; 681 defvar wti = vtiToWti.Wti; 682 let Predicates = !listconcat([HasStdExtZvbb], 683 GetVTypePredicates<vti>.Predicates, 684 GetVTypePredicates<wti>.Predicates) in { 685 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 686 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 687 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX) 688 (wti.Vector (IMPLICIT_DEF)), 689 vti.RegClass:$rs2, vti.RegClass:$rs1, 690 vti.AVL, vti.Log2SEW, TA_MA)>; 691 692 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 693 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 694 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX) 695 (wti.Vector (IMPLICIT_DEF)), 696 vti.RegClass:$rs2, GPR:$rs1, 697 vti.AVL, vti.Log2SEW, TA_MA)>; 698 699 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 700 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), 701 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX) 702 (wti.Vector (IMPLICIT_DEF)), 703 vti.RegClass:$rs2, uimm5:$rs1, 704 vti.AVL, vti.Log2SEW, TA_MA)>; 705 } 706} 707 708//===----------------------------------------------------------------------===// 709// VL patterns 710//===----------------------------------------------------------------------===// 711 712multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name, 713 Predicate predicate = HasStdExtZvbb> { 714 foreach vti = AllIntegerVectors in { 715 let Predicates = !listconcat([predicate], 716 GetVTypePredicates<vti>.Predicates) in { 717 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1), 718 (vti.Vector vti.RegClass:$passthru), 719 (vti.Mask V0), 720 VLOpFrag)), 721 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK") 722 vti.RegClass:$passthru, 723 vti.RegClass:$rs1, 724 (vti.Mask V0), 725 GPR:$vl, 726 vti.Log2SEW, 727 TAIL_AGNOSTIC)>; 728 } 729 } 730} 731 732foreach vti = AllIntegerVectors in { 733 let Predicates = !listconcat([HasStdExtZvkb], 734 GetVTypePredicates<vti>.Predicates) in { 735 def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl 736 (vti.Vector vti.RegClass:$rs1), 737 (riscv_splat_vector -1), 738 (vti.Vector vti.RegClass:$passthru), 739 (vti.Mask V0), 740 VLOpFrag), 741 (vti.Vector vti.RegClass:$rs2), 742 (vti.Vector vti.RegClass:$passthru), 743 (vti.Mask V0), 744 VLOpFrag)), 745 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") 746 vti.RegClass:$passthru, 747 vti.RegClass:$rs2, 748 vti.RegClass:$rs1, 749 (vti.Mask V0), 750 GPR:$vl, 751 vti.Log2SEW, 752 TAIL_AGNOSTIC)>; 753 754 def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector 755 (not vti.ScalarRegClass:$rs1)), 756 (vti.Vector vti.RegClass:$rs2), 757 (vti.Vector vti.RegClass:$passthru), 758 (vti.Mask V0), 759 VLOpFrag)), 760 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") 761 vti.RegClass:$passthru, 762 vti.RegClass:$rs2, 763 vti.ScalarRegClass:$rs1, 764 (vti.Mask V0), 765 GPR:$vl, 766 vti.Log2SEW, 767 TAIL_AGNOSTIC)>; 768 769 def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector invLogicImm:$rs1), 770 (vti.Vector vti.RegClass:$rs2), 771 (vti.Vector vti.RegClass:$passthru), 772 (vti.Mask V0), 773 VLOpFrag)), 774 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") 775 vti.RegClass:$passthru, 776 vti.RegClass:$rs2, 777 invLogicImm:$rs1, 778 (vti.Mask V0), 779 GPR:$vl, 780 vti.Log2SEW, 781 TAIL_AGNOSTIC)>; 782 } 783} 784 785defm : VPatUnaryVL_V<riscv_bitreverse_vl, "PseudoVBREV">; 786defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8", HasStdExtZvkb>; 787defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">; 788defm : VPatUnaryVL_V<riscv_cttz_vl, "PseudoVCTZ">; 789defm : VPatUnaryVL_V<riscv_ctpop_vl, "PseudoVCPOP">; 790 791defm : VPatBinaryVL_VV_VX<riscv_rotl_vl, "PseudoVROL">; 792// Although there is no vrol.vi, an immediate rotate left can be achieved by 793// negating the immediate in vror.vi 794foreach vti = AllIntegerVectors in { 795 let Predicates = !listconcat([HasStdExtZvkb], 796 GetVTypePredicates<vti>.Predicates) in { 797 def : Pat<(riscv_rotl_vl vti.RegClass:$rs2, 798 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)), 799 (vti.Vector vti.RegClass:$passthru), 800 (vti.Mask V0), VLOpFrag), 801 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK") 802 vti.RegClass:$passthru, 803 vti.RegClass:$rs2, 804 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 805 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 806 } 807} 808defm : VPatBinaryVL_VV_VX_VI<riscv_rotr_vl, "PseudoVROR", uimm6>; 809 810foreach vtiToWti = AllWidenableIntVectors in { 811 defvar vti = vtiToWti.Vti; 812 defvar wti = vtiToWti.Wti; 813 let Predicates = !listconcat([HasStdExtZvbb], 814 GetVTypePredicates<vti>.Predicates, 815 GetVTypePredicates<wti>.Predicates) in { 816 def : Pat<(riscv_shl_vl 817 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 818 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))), 819 (wti.Vector wti.RegClass:$passthru), 820 (vti.Mask V0), VLOpFrag), 821 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 822 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 823 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 824 825 def : Pat<(riscv_shl_vl 826 (wti.Vector (riscv_zext_vl_oneuse 827 (vti.Vector vti.RegClass:$rs2), 828 (vti.Mask V0), VLOpFrag)), 829 (wti.Vector (riscv_ext_vl_oneuse 830 (vti.Vector vti.RegClass:$rs1), 831 (vti.Mask V0), VLOpFrag)), 832 (wti.Vector wti.RegClass:$passthru), 833 (vti.Mask V0), VLOpFrag), 834 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 835 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 836 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 837 838 def : Pat<(riscv_shl_vl 839 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 840 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 841 (wti.Vector wti.RegClass:$passthru), 842 (vti.Mask V0), VLOpFrag), 843 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 844 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 845 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 846 847 def : Pat<(riscv_shl_vl 848 (wti.Vector (riscv_zext_vl_oneuse 849 (vti.Vector vti.RegClass:$rs2), 850 (vti.Mask V0), VLOpFrag)), 851 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 852 (wti.Vector wti.RegClass:$passthru), 853 (vti.Mask V0), VLOpFrag), 854 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 855 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 856 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 857 858 def : Pat<(riscv_shl_vl 859 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 860 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), 861 (wti.Vector wti.RegClass:$passthru), 862 (vti.Mask V0), VLOpFrag), 863 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 864 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1, 865 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 866 867 def : Pat<(riscv_shl_vl 868 (wti.Vector (riscv_zext_vl_oneuse 869 (vti.Vector vti.RegClass:$rs2), 870 (vti.Mask V0), VLOpFrag)), 871 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), 872 (wti.Vector wti.RegClass:$passthru), 873 (vti.Mask V0), VLOpFrag), 874 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 875 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1, 876 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 877 878 def : Pat<(riscv_vwsll_vl 879 (vti.Vector vti.RegClass:$rs2), 880 (vti.Vector vti.RegClass:$rs1), 881 (wti.Vector wti.RegClass:$passthru), 882 (vti.Mask V0), VLOpFrag), 883 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 884 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 885 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 886 887 def : Pat<(riscv_vwsll_vl 888 (vti.Vector vti.RegClass:$rs2), 889 (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 890 (wti.Vector wti.RegClass:$passthru), 891 (vti.Mask V0), VLOpFrag), 892 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 893 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 894 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 895 896 def : Pat<(riscv_vwsll_vl 897 (vti.Vector vti.RegClass:$rs2), 898 (vti.Vector (SplatPat_uimm5 uimm5:$rs1)), 899 (wti.Vector wti.RegClass:$passthru), 900 (vti.Mask V0), VLOpFrag), 901 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 902 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1, 903 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 904 } 905} 906 907//===----------------------------------------------------------------------===// 908// Codegen patterns 909//===----------------------------------------------------------------------===// 910 911class VPatUnaryNoMask_Zvk<string intrinsic_name, 912 string inst, 913 string kind, 914 ValueType result_type, 915 ValueType op2_type, 916 int sew, 917 LMULInfo vlmul, 918 VReg result_reg_class, 919 VReg op2_reg_class> : 920 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 921 (result_type result_reg_class:$rd), 922 (op2_type op2_reg_class:$rs2), 923 VLOpFrag, (XLenVT timm:$policy))), 924 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 925 (result_type result_reg_class:$rd), 926 (op2_type op2_reg_class:$rs2), 927 GPR:$vl, sew, (XLenVT timm:$policy))>; 928 929class VPatUnaryNoMask_VS_Zvk<string intrinsic_name, 930 string inst, 931 string kind, 932 ValueType result_type, 933 ValueType op2_type, 934 int sew, 935 LMULInfo vlmul, 936 LMULInfo vs2_lmul, 937 VReg result_reg_class, 938 VReg op2_reg_class> : 939 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 940 (result_type result_reg_class:$rd), 941 (op2_type op2_reg_class:$rs2), 942 VLOpFrag, (XLenVT timm:$policy))), 943 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX) 944 (result_type result_reg_class:$rd), 945 (op2_type op2_reg_class:$rs2), 946 GPR:$vl, sew, (XLenVT timm:$policy))>; 947 948multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction, 949 list<VTypeInfo> vtilist> { 950 foreach vti = vtilist in 951 def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV", 952 vti.Vector, vti.Vector, vti.Log2SEW, 953 vti.LMul, vti.RegClass, vti.RegClass>; 954} 955 956multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction, 957 list<VTypeInfo> vtilist> { 958 foreach vti = vtilist in 959 foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in 960 def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS", 961 vti.Vector, vti_vs2.Vector, vti.Log2SEW, 962 vti.LMul, vti_vs2.LMul, vti.RegClass, vti_vs2.RegClass>; 963} 964 965multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction, 966 list<VTypeInfo> vtilist> { 967 defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>; 968 defm : VPatUnaryV_S_NoMaskVectorCrypto<intrinsic, instruction, vtilist>; 969} 970 971multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction, 972 list<VTypeInfo> vtilist, 973 bit isSEWAware = false> { 974 foreach vti = vtilist in 975 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV", 976 vti.Vector, vti.Vector, vti.Vector, 977 vti.Log2SEW, vti.LMul, vti.RegClass, 978 vti.RegClass, vti.RegClass, 979 isSEWAware = isSEWAware>; 980} 981 982multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction, 983 list<VTypeInfo> vtilist, 984 Operand imm_type = tuimm5> { 985 foreach vti = vtilist in 986 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI", 987 vti.Vector, vti.Vector, XLenVT, 988 vti.Log2SEW, vti.LMul, vti.RegClass, 989 vti.RegClass, imm_type>; 990} 991 992multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction, 993 list<VTypeInfo> vtilist, 994 Operand imm_type = tuimm5> { 995 foreach vti = vtilist in 996 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX, 997 vti.Vector, vti.Vector, XLenVT, vti.Log2SEW, 998 vti.RegClass, vti.RegClass, imm_type>; 999} 1000 1001multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction, 1002 list<VTypeInfo> vtilist> { 1003 foreach vti = vtilist in 1004 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX, 1005 vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW, 1006 vti.RegClass, vti.RegClass, vti.RegClass>; 1007} 1008 1009multiclass VPatBinaryV_VX_VROTATE<string intrinsic, string instruction, 1010 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 1011 foreach vti = vtilist in { 1012 defvar kind = "V"#vti.ScalarSuffix; 1013 let Predicates = GetVTypePredicates<vti>.Predicates in 1014 defm : VPatBinary<intrinsic, 1015 !if(isSEWAware, 1016 instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW, 1017 instruction#"_"#kind#"_"#vti.LMul.MX), 1018 vti.Vector, vti.Vector, XLenVT, vti.Mask, 1019 vti.Log2SEW, vti.RegClass, 1020 vti.RegClass, vti.ScalarRegClass>; 1021 } 1022} 1023 1024multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction, 1025 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 1026 foreach vti = vtilist in { 1027 defvar Intr = !cast<Intrinsic>(intrinsic); 1028 defvar Pseudo = !cast<Instruction>( 1029 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW, 1030 instruction#"_VI_"#vti.LMul.MX)); 1031 let Predicates = GetVTypePredicates<vti>.Predicates in 1032 def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$passthru), 1033 (vti.Vector vti.RegClass:$rs2), 1034 (XLenVT uimm6:$rs1), 1035 VLOpFrag)), 1036 (Pseudo (vti.Vector vti.RegClass:$passthru), 1037 (vti.Vector vti.RegClass:$rs2), 1038 (InvRot64Imm uimm6:$rs1), 1039 GPR:$vl, vti.Log2SEW, TU_MU)>; 1040 1041 defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask"); 1042 defvar PseudoMask = !cast<Instruction>( 1043 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK", 1044 instruction#"_VI_"#vti.LMul.MX#"_MASK")); 1045 let Predicates = GetVTypePredicates<vti>.Predicates in 1046 def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$passthru), 1047 (vti.Vector vti.RegClass:$rs2), 1048 (XLenVT uimm6:$rs1), 1049 (vti.Mask V0), 1050 VLOpFrag, (XLenVT timm:$policy))), 1051 (PseudoMask (vti.Vector vti.RegClass:$passthru), 1052 (vti.Vector vti.RegClass:$rs2), 1053 (InvRot64Imm uimm6:$rs1), 1054 (vti.Mask V0), 1055 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1056 } 1057} 1058 1059multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction, 1060 string instruction2, list<VTypeInfo> vtilist> 1061 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 1062 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 1063 VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>; 1064 1065multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction, 1066 list<VTypeInfo> vtilist> 1067 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 1068 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 1069 VPatBinaryV_VI<intrinsic, instruction, vtilist, uimm6>; 1070 1071multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction, 1072 list<VTypeInfoToWide> vtilist> 1073 : VPatBinaryW_VV<intrinsic, instruction, vtilist> { 1074 foreach VtiToWti = vtilist in { 1075 defvar Vti = VtiToWti.Vti; 1076 defvar Wti = VtiToWti.Wti; 1077 defvar kind = "V"#Vti.ScalarSuffix; 1078 let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates, 1079 GetVTypePredicates<Wti>.Predicates) in { 1080 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 1081 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 1082 Vti.Log2SEW, Wti.RegClass, 1083 Vti.RegClass, Vti.ScalarRegClass>; 1084 defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX, 1085 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 1086 Vti.Log2SEW, Wti.RegClass, 1087 Vti.RegClass, uimm5>; 1088 } 1089 } 1090} 1091 1092let Predicates = [HasStdExtZvbb] in { 1093 defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; 1094 defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; 1095 defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; 1096 defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; 1097 defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; 1098} // Predicates = [HasStdExtZvbb] 1099 1100let Predicates = [HasStdExtZvbc] in { 1101 defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; 1102 defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; 1103} // Predicates = [HasStdExtZvbc] 1104 1105let Predicates = [HasStdExtZvkb] in { 1106 defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; 1107 defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; 1108 defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; 1109 defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>; 1110 defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; 1111} // Predicates = [HasStdExtZvkb] 1112 1113let Predicates = [HasStdExtZvkg] in { 1114 defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; 1115 defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; 1116} // Predicates = [HasStdExtZvkg] 1117 1118let Predicates = [HasStdExtZvkned] in { 1119 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; 1120 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; 1121 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; 1122 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; 1123 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; 1124 defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; 1125 defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; 1126} // Predicates = [HasStdExtZvkned] 1127 1128let Predicates = [HasStdExtZvknha] in { 1129 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>; 1130 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>; 1131 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors, isSEWAware=true>; 1132} // Predicates = [HasStdExtZvknha] 1133 1134let Predicates = [HasStdExtZvknhb] in { 1135 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; 1136 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; 1137 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors, isSEWAware=true>; 1138} // Predicates = [HasStdExtZvknhb] 1139 1140let Predicates = [HasStdExtZvksed] in { 1141 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; 1142 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; 1143} // Predicates = [HasStdExtZvksed] 1144 1145let Predicates = [HasStdExtZvksh] in { 1146 defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; 1147 defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; 1148} // Predicates = [HasStdExtZvksh] 1149