1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 RegisterClass regclass, 29 int log2sew, 30 LMULInfo vlmul, 31 OutPatFrag avl, 32 VReg reg_class, 33 int sew = !shl(1, log2sew)> { 34 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 35 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 36 // Load 37 def : Pat<(type (load (XLenVT GPR:$rs1))), 38 (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, 39 log2sew, TA_MA)>; 40 // Store 41 def : Pat<(store (type regclass:$rs2), (XLenVT GPR:$rs1)), 42 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 43} 44 45multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> { 46 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 47 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 48 // Load 49 def : Pat<(m.Mask (load GPR:$rs1)), 50 (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, 51 m.Log2SEW, TA_MA)>; 52 // Store 53 def : Pat<(store (m.Mask VR:$rs2), GPR:$rs1), 54 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 55} 56 57class VPatBinarySDNode_VV<SDPatternOperator vop, 58 string instruction_name, 59 ValueType result_type, 60 ValueType op_type, 61 int log2sew, 62 LMULInfo vlmul, 63 OutPatFrag avl, 64 VReg op_reg_class, 65 bit isSEWAware = 0> : 66 Pat<(result_type (vop 67 (op_type op_reg_class:$rs1), 68 (op_type op_reg_class:$rs2))), 69 (!cast<Instruction>( 70 !if(isSEWAware, 71 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 72 instruction_name#"_VV_"# vlmul.MX)) 73 (result_type (IMPLICIT_DEF)), 74 op_reg_class:$rs1, 75 op_reg_class:$rs2, 76 avl, log2sew, TA_MA)>; 77 78class VPatBinarySDNode_VV_RM<SDPatternOperator vop, 79 string instruction_name, 80 ValueType result_type, 81 ValueType op_type, 82 int log2sew, 83 LMULInfo vlmul, 84 OutPatFrag avl, 85 VReg op_reg_class, 86 bit isSEWAware = 0> : 87 Pat<(result_type (vop 88 (op_type op_reg_class:$rs1), 89 (op_type op_reg_class:$rs2))), 90 (!cast<Instruction>( 91 !if(isSEWAware, 92 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 93 instruction_name#"_VV_"# vlmul.MX)) 94 (result_type (IMPLICIT_DEF)), 95 op_reg_class:$rs1, 96 op_reg_class:$rs2, 97 // Value to indicate no rounding mode change in 98 // RISCVInsertReadWriteCSR 99 FRM_DYN, 100 avl, log2sew, TA_MA)>; 101 102class VPatBinarySDNode_XI<SDPatternOperator vop, 103 string instruction_name, 104 string suffix, 105 ValueType result_type, 106 ValueType vop_type, 107 int log2sew, 108 LMULInfo vlmul, 109 OutPatFrag avl, 110 VReg vop_reg_class, 111 ComplexPattern SplatPatKind, 112 DAGOperand xop_kind, 113 bit isSEWAware = 0> : 114 Pat<(result_type (vop 115 (vop_type vop_reg_class:$rs1), 116 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), 117 (!cast<Instruction>( 118 !if(isSEWAware, 119 instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), 120 instruction_name#_#suffix#_# vlmul.MX)) 121 (result_type (IMPLICIT_DEF)), 122 vop_reg_class:$rs1, 123 xop_kind:$rs2, 124 avl, log2sew, TA_MA)>; 125 126multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name, 127 list<VTypeInfo> vtilist = AllIntegerVectors, 128 bit isSEWAware = 0> { 129 foreach vti = vtilist in { 130 let Predicates = GetVTypePredicates<vti>.Predicates in { 131 def : VPatBinarySDNode_VV<vop, instruction_name, 132 vti.Vector, vti.Vector, vti.Log2SEW, 133 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 134 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 135 vti.Vector, vti.Vector, vti.Log2SEW, 136 vti.LMul, vti.AVL, vti.RegClass, 137 SplatPat, GPR, isSEWAware>; 138 } 139 } 140} 141 142multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name, 143 Operand ImmType = simm5> 144 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 145 foreach vti = AllIntegerVectors in { 146 let Predicates = GetVTypePredicates<vti>.Predicates in 147 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 148 vti.Vector, vti.Vector, vti.Log2SEW, 149 vti.LMul, vti.AVL, vti.RegClass, 150 !cast<ComplexPattern>(SplatPat#_#ImmType), 151 ImmType>; 152 } 153} 154 155class VPatBinarySDNode_VF<SDPatternOperator vop, 156 string instruction_name, 157 ValueType result_type, 158 ValueType vop_type, 159 ValueType xop_type, 160 int log2sew, 161 LMULInfo vlmul, 162 OutPatFrag avl, 163 VReg vop_reg_class, 164 DAGOperand xop_kind, 165 bit isSEWAware = 0> : 166 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 167 (vop_type (SplatFPOp xop_kind:$rs2)))), 168 (!cast<Instruction>( 169 !if(isSEWAware, 170 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 171 instruction_name#"_"#vlmul.MX)) 172 (result_type (IMPLICIT_DEF)), 173 vop_reg_class:$rs1, 174 (xop_type xop_kind:$rs2), 175 avl, log2sew, TA_MA)>; 176 177class VPatBinarySDNode_VF_RM<SDPatternOperator vop, 178 string instruction_name, 179 ValueType result_type, 180 ValueType vop_type, 181 ValueType xop_type, 182 int log2sew, 183 LMULInfo vlmul, 184 OutPatFrag avl, 185 VReg vop_reg_class, 186 DAGOperand xop_kind, 187 bit isSEWAware = 0> : 188 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 189 (vop_type (SplatFPOp xop_kind:$rs2)))), 190 (!cast<Instruction>( 191 !if(isSEWAware, 192 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 193 instruction_name#"_"#vlmul.MX)) 194 (result_type (IMPLICIT_DEF)), 195 vop_reg_class:$rs1, 196 (xop_type xop_kind:$rs2), 197 // Value to indicate no rounding mode change in 198 // RISCVInsertReadWriteCSR 199 FRM_DYN, 200 avl, log2sew, TA_MA)>; 201 202multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name, 203 bit isSEWAware = 0> { 204 foreach vti = AllFloatVectors in { 205 let Predicates = GetVTypePredicates<vti>.Predicates in { 206 def : VPatBinarySDNode_VV<vop, instruction_name, 207 vti.Vector, vti.Vector, vti.Log2SEW, 208 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 209 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 210 vti.Vector, vti.Vector, vti.Scalar, 211 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 212 vti.ScalarRegClass, isSEWAware>; 213 } 214 } 215} 216 217multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name, 218 bit isSEWAware = 0> { 219 foreach vti = AllFloatVectors in { 220 let Predicates = GetVTypePredicates<vti>.Predicates in { 221 def : VPatBinarySDNode_VV_RM<vop, instruction_name, 222 vti.Vector, vti.Vector, vti.Log2SEW, 223 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 224 def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 225 vti.Vector, vti.Vector, vti.Scalar, 226 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 227 vti.ScalarRegClass, isSEWAware>; 228 } 229 } 230} 231 232multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name, 233 bit isSEWAware = 0> { 234 foreach fvti = AllFloatVectors in 235 let Predicates = GetVTypePredicates<fvti>.Predicates in 236 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 237 (fvti.Vector fvti.RegClass:$rs1))), 238 (!cast<Instruction>( 239 !if(isSEWAware, 240 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 241 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 242 (fvti.Vector (IMPLICIT_DEF)), 243 fvti.RegClass:$rs1, 244 (fvti.Scalar fvti.ScalarRegClass:$rs2), 245 fvti.AVL, fvti.Log2SEW, TA_MA)>; 246} 247 248multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name, 249 bit isSEWAware = 0> { 250 foreach fvti = AllFloatVectors in 251 let Predicates = GetVTypePredicates<fvti>.Predicates in 252 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 253 (fvti.Vector fvti.RegClass:$rs1))), 254 (!cast<Instruction>( 255 !if(isSEWAware, 256 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 257 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 258 (fvti.Vector (IMPLICIT_DEF)), 259 fvti.RegClass:$rs1, 260 (fvti.Scalar fvti.ScalarRegClass:$rs2), 261 // Value to indicate no rounding mode change in 262 // RISCVInsertReadWriteCSR 263 FRM_DYN, 264 fvti.AVL, fvti.Log2SEW, TA_MA)>; 265} 266 267multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 268 CondCode cc> { 269 foreach vti = AllIntegerVectors in { 270 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 271 let Predicates = GetVTypePredicates<vti>.Predicates in 272 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 273 (vti.Vector vti.RegClass:$rs2), cc)), 274 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 275 vti.Log2SEW)>; 276 } 277} 278 279multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 280 CondCode cc, CondCode invcc> 281 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 282 foreach vti = AllIntegerVectors in { 283 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 284 let Predicates = GetVTypePredicates<vti>.Predicates in 285 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 286 (vti.Vector vti.RegClass:$rs1), invcc)), 287 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 288 vti.Log2SEW)>; 289 } 290} 291 292multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 293 CondCode cc, CondCode invcc, 294 string kind, 295 ComplexPattern SplatPatKind, 296 DAGOperand xop_kind> { 297 foreach vti = AllIntegerVectors in { 298 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 299 let Predicates = GetVTypePredicates<vti>.Predicates in { 300 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 301 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 302 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 303 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), 304 (vti.Vector vti.RegClass:$rs1), invcc)), 305 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 306 } 307 } 308} 309 310multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 311 CondCode cc, CondCode invcc> 312 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 313 SplatPat, GPR>; 314 315multiclass VPatIntegerSetCCSDNode_VI_Swappable<string instruction_name, 316 CondCode cc, CondCode invcc> 317 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VI", 318 SplatPat_simm5, simm5>; 319 320multiclass VPatIntegerSetCCSDNode_VIPlus1_Swappable<string instruction_name, 321 CondCode cc, CondCode invcc, 322 ComplexPattern splatpat_kind> { 323 foreach vti = AllIntegerVectors in { 324 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 325 let Predicates = GetVTypePredicates<vti>.Predicates in { 326 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 327 (vti.Vector (splatpat_kind simm5:$rs2)), 328 cc)), 329 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 330 vti.AVL, vti.Log2SEW)>; 331 def : Pat<(vti.Mask (setcc (vti.Vector (splatpat_kind simm5:$rs2)), 332 (vti.Vector vti.RegClass:$rs1), 333 invcc)), 334 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 335 vti.AVL, vti.Log2SEW)>; 336 } 337 } 338} 339 340multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 341 string inst_name, 342 string swapped_op_inst_name> { 343 foreach fvti = AllFloatVectors in { 344 let Predicates = GetVTypePredicates<fvti>.Predicates in { 345 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 346 (fvti.Vector fvti.RegClass:$rs2), 347 cc)), 348 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 349 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 350 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 351 (SplatFPOp fvti.ScalarRegClass:$rs2), 352 cc)), 353 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 354 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 355 fvti.AVL, fvti.Log2SEW)>; 356 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 357 (fvti.Vector fvti.RegClass:$rs1), 358 cc)), 359 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 360 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 361 fvti.AVL, fvti.Log2SEW)>; 362 } 363 } 364} 365 366multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 367 list <VTypeInfoToFraction> fraction_list> { 368 foreach vtiTofti = fraction_list in { 369 defvar vti = vtiTofti.Vti; 370 defvar fti = vtiTofti.Fti; 371 foreach op = ops in 372 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 373 GetVTypePredicates<fti>.Predicates) in 374 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 375 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 376 (vti.Vector (IMPLICIT_DEF)), 377 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; 378 } 379} 380 381multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop, 382 string instruction_name> { 383 foreach fvti = AllFloatVectors in { 384 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 385 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 386 GetVTypePredicates<ivti>.Predicates) in 387 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 388 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW) 389 (fvti.Vector (IMPLICIT_DEF)), 390 ivti.RegClass:$rs1, 391 // Value to indicate no rounding mode change in 392 // RISCVInsertReadWriteCSR 393 FRM_DYN, 394 fvti.AVL, fvti.Log2SEW, TA_MA)>; 395 } 396} 397 398multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop, 399 string instruction_name> { 400 foreach fvti = AllFloatVectors in { 401 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 402 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 403 GetVTypePredicates<ivti>.Predicates) in 404 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 405 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 406 (ivti.Vector (IMPLICIT_DEF)), 407 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; 408 } 409} 410 411multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop, 412 string instruction_name> { 413 foreach vtiToWti = AllWidenableIntToFloatVectors in { 414 defvar ivti = vtiToWti.Vti; 415 defvar fwti = vtiToWti.Wti; 416 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 417 GetVTypePredicates<fwti>.Predicates) in 418 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 419 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW) 420 (fwti.Vector (IMPLICIT_DEF)), 421 ivti.RegClass:$rs1, 422 ivti.AVL, ivti.Log2SEW, TA_MA)>; 423 } 424} 425 426multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop, 427 string instruction_name> { 428 foreach fvtiToFWti = AllWidenableFloatVectors in { 429 defvar fvti = fvtiToFWti.Vti; 430 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 431 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 432 GetVTypePredicates<iwti>.Predicates) in 433 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 434 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 435 (iwti.Vector (IMPLICIT_DEF)), 436 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; 437 } 438} 439 440multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop, 441 string instruction_name> { 442 foreach fvtiToFWti = AllWidenableFloatVectors in { 443 defvar fvti = fvtiToFWti.Vti; 444 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 445 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 446 GetVTypePredicates<iwti>.Predicates) in 447 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 448 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW) 449 (fvti.Vector (IMPLICIT_DEF)), 450 iwti.RegClass:$rs1, 451 // Value to indicate no rounding mode change in 452 // RISCVInsertReadWriteCSR 453 FRM_DYN, 454 fvti.AVL, fvti.Log2SEW, TA_MA)>; 455 } 456} 457 458multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop, 459 string instruction_name> { 460 foreach vtiToWti = AllWidenableIntToFloatVectors in { 461 defvar vti = vtiToWti.Vti; 462 defvar fwti = vtiToWti.Wti; 463 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 464 GetVTypePredicates<fwti>.Predicates) in 465 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 466 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 467 (vti.Vector (IMPLICIT_DEF)), 468 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 469 } 470} 471 472multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 473 string instruction_name> { 474 foreach vtiToWti = AllWidenableIntVectors in { 475 defvar vti = vtiToWti.Vti; 476 defvar wti = vtiToWti.Wti; 477 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 478 GetVTypePredicates<wti>.Predicates) in { 479 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 480 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 481 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 482 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 483 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 484 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 485 (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 486 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 487 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 488 GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 489 } 490 } 491} 492 493multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 494 string instruction_name> { 495 foreach vtiToWti = AllWidenableIntVectors in { 496 defvar vti = vtiToWti.Vti; 497 defvar wti = vtiToWti.Wti; 498 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 499 GetVTypePredicates<wti>.Predicates) in { 500 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 501 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 502 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 503 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 504 TAIL_AGNOSTIC)>; 505 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 506 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 507 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 508 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, 509 vti.AVL, vti.Log2SEW, TA_MA)>; 510 } 511 } 512} 513 514multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 515 string instruction_name> 516 : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>, 517 VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 518 519multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 520 foreach vtiToWti = AllWidenableIntVectors in { 521 defvar vti = vtiToWti.Vti; 522 defvar wti = vtiToWti.Wti; 523 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 524 GetVTypePredicates<wti>.Predicates) in 525 def : Pat< 526 (add (wti.Vector wti.RegClass:$rd), 527 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 528 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 529 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 530 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 531 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 532 )>; 533 } 534} 535multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 536 foreach vtiToWti = AllWidenableIntVectors in { 537 defvar vti = vtiToWti.Vti; 538 defvar wti = vtiToWti.Wti; 539 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 540 GetVTypePredicates<wti>.Predicates) in 541 def : Pat< 542 (add (wti.Vector wti.RegClass:$rd), 543 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), 544 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 545 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 546 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 547 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 548 )>; 549 } 550} 551 552multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 553 foreach vtiToWti = AllWidenableFloatVectors in { 554 defvar vti = vtiToWti.Vti; 555 defvar wti = vtiToWti.Wti; 556 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 557 GetVTypePredicates<wti>.Predicates) in { 558 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 559 (vti.Vector vti.RegClass:$rs2), 560 (vti.Mask true_mask), (XLenVT srcvalue))), 561 (wti.Vector (riscv_fpextend_vl_oneuse 562 (vti.Vector vti.RegClass:$rs1), 563 (vti.Mask true_mask), (XLenVT srcvalue)))), 564 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 565 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 566 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 567 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 568 (vti.Vector vti.RegClass:$rs2), 569 (vti.Mask true_mask), (XLenVT srcvalue))), 570 (wti.Vector (riscv_fpextend_vl_oneuse 571 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 572 (vti.Mask true_mask), (XLenVT srcvalue)))), 573 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 574 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 575 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 576 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 577 (vti.Vector vti.RegClass:$rs2), 578 (vti.Mask true_mask), (XLenVT srcvalue))), 579 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 580 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 581 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 582 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 583 } 584 } 585} 586 587multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name> { 588 foreach vtiToWti = AllWidenableFloatVectors in { 589 defvar vti = vtiToWti.Vti; 590 defvar wti = vtiToWti.Wti; 591 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 592 GetVTypePredicates<wti>.Predicates) in { 593 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 594 (vti.Vector vti.RegClass:$rs2), 595 (vti.Mask true_mask), (XLenVT srcvalue))), 596 (wti.Vector (riscv_fpextend_vl_oneuse 597 (vti.Vector vti.RegClass:$rs1), 598 (vti.Mask true_mask), (XLenVT srcvalue)))), 599 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_E"#vti.SEW) 600 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 601 vti.RegClass:$rs1, 602 // Value to indicate no rounding mode change in 603 // RISCVInsertReadWriteCSR 604 FRM_DYN, 605 vti.AVL, vti.Log2SEW, TA_MA)>; 606 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 607 (vti.Vector vti.RegClass:$rs2), 608 (vti.Mask true_mask), (XLenVT srcvalue))), 609 (wti.Vector (riscv_fpextend_vl_oneuse 610 (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), 611 (vti.Mask true_mask), (XLenVT srcvalue)))), 612 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 613 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 614 vti.ScalarRegClass:$rs1, 615 // Value to indicate no rounding mode change in 616 // RISCVInsertReadWriteCSR 617 FRM_DYN, 618 vti.AVL, vti.Log2SEW, TA_MA)>; 619 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 620 (vti.Vector vti.RegClass:$rs2), 621 (vti.Mask true_mask), (XLenVT srcvalue))), 622 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 623 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 624 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 625 vti.ScalarRegClass:$rs1, 626 // Value to indicate no rounding mode change in 627 // RISCVInsertReadWriteCSR 628 FRM_DYN, 629 vti.AVL, vti.Log2SEW, TA_MA)>; 630 } 631 } 632} 633 634multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name> { 635 foreach vtiToWti = AllWidenableFloatVectors in { 636 defvar vti = vtiToWti.Vti; 637 defvar wti = vtiToWti.Wti; 638 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 639 GetVTypePredicates<wti>.Predicates) in { 640 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 641 (wti.Vector (riscv_fpextend_vl_oneuse 642 (vti.Vector vti.RegClass:$rs1), 643 (vti.Mask true_mask), (XLenVT srcvalue)))), 644 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_E"#vti.SEW#"_TIED") 645 wti.RegClass:$rs2, vti.RegClass:$rs1, 646 // Value to indicate no rounding mode change in 647 // RISCVInsertReadWriteCSR 648 FRM_DYN, 649 vti.AVL, vti.Log2SEW, 650 TAIL_AGNOSTIC)>; 651 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 652 (wti.Vector (riscv_fpextend_vl_oneuse 653 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 654 (vti.Mask true_mask), (XLenVT srcvalue)))), 655 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 656 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 657 vti.ScalarRegClass:$rs1, 658 // Value to indicate no rounding mode change in 659 // RISCVInsertReadWriteCSR 660 FRM_DYN, 661 vti.AVL, vti.Log2SEW, TA_MA)>; 662 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 663 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 664 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 665 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 666 vti.ScalarRegClass:$rs1, 667 // Value to indicate no rounding mode change in 668 // RISCVInsertReadWriteCSR 669 FRM_DYN, 670 vti.AVL, vti.Log2SEW, TA_MA)>; 671 } 672 } 673} 674 675multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op, 676 string instruction_name> 677 : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>, 678 VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>; 679 680multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name, 681 list <VTypeInfoToWide> vtiToWtis> { 682 foreach vtiToWti = vtiToWtis in { 683 defvar vti = vtiToWti.Vti; 684 defvar wti = vtiToWti.Wti; 685 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 686 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 687 GetVTypePredicates<wti>.Predicates, 688 !if(!eq(vti.Scalar, bf16), 689 [HasStdExtZvfbfwma], 690 [])) in { 691 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 692 (vti.Vector vti.RegClass:$rs1), 693 (vti.Mask true_mask), (XLenVT srcvalue))), 694 (wti.Vector (riscv_fpextend_vl_oneuse 695 (vti.Vector vti.RegClass:$rs2), 696 (vti.Mask true_mask), (XLenVT srcvalue))), 697 (wti.Vector wti.RegClass:$rd)), 698 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 699 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 700 // Value to indicate no rounding mode change in 701 // RISCVInsertReadWriteCSR 702 FRM_DYN, 703 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 704 def : Pat<(fma (wti.Vector (SplatFPOp 705 (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 706 (wti.Vector (riscv_fpextend_vl_oneuse 707 (vti.Vector vti.RegClass:$rs2), 708 (vti.Mask true_mask), (XLenVT srcvalue))), 709 (wti.Vector wti.RegClass:$rd)), 710 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 711 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 712 // Value to indicate no rounding mode change in 713 // RISCVInsertReadWriteCSR 714 FRM_DYN, 715 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 716 } 717 } 718} 719 720multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> { 721 foreach vtiToWti = AllWidenableFloatVectors in { 722 defvar vti = vtiToWti.Vti; 723 defvar wti = vtiToWti.Wti; 724 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 725 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 726 GetVTypePredicates<wti>.Predicates) in { 727 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 728 (vti.Vector vti.RegClass:$rs1), 729 (vti.Mask true_mask), (XLenVT srcvalue)))), 730 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 731 (vti.Mask true_mask), (XLenVT srcvalue)), 732 (fneg wti.RegClass:$rd)), 733 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 734 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 735 // Value to indicate no rounding mode change in 736 // RISCVInsertReadWriteCSR 737 FRM_DYN, 738 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 739 def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), 740 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 741 (vti.Vector vti.RegClass:$rs2), 742 (vti.Mask true_mask), (XLenVT srcvalue)))), 743 (fneg wti.RegClass:$rd)), 744 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 745 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 746 // Value to indicate no rounding mode change in 747 // RISCVInsertReadWriteCSR 748 FRM_DYN, 749 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 750 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 751 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 752 (vti.Mask true_mask), (XLenVT srcvalue)), 753 (fneg wti.RegClass:$rd)), 754 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 755 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 756 // Value to indicate no rounding mode change in 757 // RISCVInsertReadWriteCSR 758 FRM_DYN, 759 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 760 } 761 } 762} 763 764multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> { 765 foreach vtiToWti = AllWidenableFloatVectors in { 766 defvar vti = vtiToWti.Vti; 767 defvar wti = vtiToWti.Wti; 768 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 769 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 770 GetVTypePredicates<wti>.Predicates) in { 771 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 772 (vti.Vector vti.RegClass:$rs1), 773 (vti.Mask true_mask), (XLenVT srcvalue))), 774 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 775 (vti.Mask true_mask), (XLenVT srcvalue)), 776 (fneg wti.RegClass:$rd)), 777 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 778 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 779 // Value to indicate no rounding mode change in 780 // RISCVInsertReadWriteCSR 781 FRM_DYN, 782 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 783 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 784 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 785 (vti.Mask true_mask), (XLenVT srcvalue)), 786 (fneg wti.RegClass:$rd)), 787 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 788 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 789 // Value to indicate no rounding mode change in 790 // RISCVInsertReadWriteCSR 791 FRM_DYN, 792 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 793 } 794 } 795} 796 797multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> { 798 foreach vtiToWti = AllWidenableFloatVectors in { 799 defvar vti = vtiToWti.Vti; 800 defvar wti = vtiToWti.Wti; 801 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 802 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 803 GetVTypePredicates<wti>.Predicates) in { 804 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 805 (vti.Vector vti.RegClass:$rs1), 806 (vti.Mask true_mask), (XLenVT srcvalue)))), 807 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 808 (vti.Mask true_mask), (XLenVT srcvalue)), 809 wti.RegClass:$rd), 810 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 811 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 812 // Value to indicate no rounding mode change in 813 // RISCVInsertReadWriteCSR 814 FRM_DYN, 815 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 816 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 817 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 818 (vti.Vector vti.RegClass:$rs2), 819 (vti.Mask true_mask), (XLenVT srcvalue)))), 820 wti.RegClass:$rd), 821 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 822 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 823 // Value to indicate no rounding mode change in 824 // RISCVInsertReadWriteCSR 825 FRM_DYN, 826 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 827 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 828 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 829 (vti.Mask true_mask), (XLenVT srcvalue)), 830 wti.RegClass:$rd), 831 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 832 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 833 // Value to indicate no rounding mode change in 834 // RISCVInsertReadWriteCSR 835 FRM_DYN, 836 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 837 } 838 } 839} 840 841multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 842 foreach vti = AllIntegerVectors in { 843 defvar suffix = vti.LMul.MX; 844 let Predicates = GetVTypePredicates<vti>.Predicates in { 845 // NOTE: We choose VMADD because it has the most commuting freedom. So it 846 // works best with how TwoAddressInstructionPass tries commuting. 847 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 848 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 849 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 850 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 851 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 852 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 853 // commutable. 854 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 855 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 856 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 857 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 858 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 859 } 860 } 861} 862 863multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> { 864 foreach vti = AllIntegerVectors in { 865 let Predicates = GetVTypePredicates<vti>.Predicates in { 866 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 867 (vti.Vector vti.RegClass:$rs2)), 868 (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX) 869 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, 870 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 871 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 872 (vti.Vector (SplatPat (XLenVT GPR:$rs2)))), 873 (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX) 874 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 875 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 876 } 877 } 878} 879 880//===----------------------------------------------------------------------===// 881// Patterns. 882//===----------------------------------------------------------------------===// 883 884// 7.4. Vector Unit-Stride Instructions 885foreach vti = AllVectors in 886 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 887 GetVTypePredicates<vti>.Predicates) in 888 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.RegClass, vti.Log2SEW, vti.LMul, 889 vti.AVL, vti.RegClass>; 890foreach mti = AllMasks in 891 let Predicates = [HasVInstructions] in 892 defm : VPatUSLoadStoreMaskSDNode<mti>; 893 894// 11. Vector Integer Arithmetic Instructions 895 896// 11.1. Vector Single-Width Integer Add and Subtract 897defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 898defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 899// Handle VRSUB specially since it's the only integer binary op with reversed 900// pattern operands 901foreach vti = AllIntegerVectors in { 902 // FIXME: The AddedComplexity here is covering up a missing matcher for 903 // widening vwsub.vx which can recognize a extended folded into the 904 // scalar of the splat. 905 let AddedComplexity = 20 in 906 let Predicates = GetVTypePredicates<vti>.Predicates in { 907 def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 908 (vti.Vector vti.RegClass:$rs1)), 909 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 910 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 911 vti.AVL, vti.Log2SEW, TA_MA)>; 912 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 913 (vti.Vector vti.RegClass:$rs1)), 914 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 915 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 916 simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 917 } 918} 919 920// 11.2. Vector Widening Integer Add and Subtract 921defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 922defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 923defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 924 925defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 926defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 927defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 928 929// shl (ext v, splat 1) is a special case of widening add. 930foreach vtiToWti = AllWidenableIntVectors in { 931 defvar vti = vtiToWti.Vti; 932 defvar wti = vtiToWti.Wti; 933 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 934 GetVTypePredicates<wti>.Predicates) in { 935 def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 936 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 937 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX) 938 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 939 vti.AVL, vti.Log2SEW, TA_MA)>; 940 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), 941 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 942 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 943 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 944 vti.AVL, vti.Log2SEW, TA_MA)>; 945 def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), 946 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 947 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 948 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 949 vti.AVL, vti.Log2SEW, TA_MA)>; 950 def : Pat<(shl (wti.Vector (riscv_sext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), 951 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 952 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 953 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 954 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 955 def : Pat<(shl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), 956 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 957 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 958 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 959 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 960 } 961} 962 963// 11.3. Vector Integer Extension 964defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 965 AllFractionableVF2IntVectors>; 966defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 967 AllFractionableVF2IntVectors>; 968defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 969 AllFractionableVF4IntVectors>; 970defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 971 AllFractionableVF4IntVectors>; 972defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 973 AllFractionableVF8IntVectors>; 974defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 975 AllFractionableVF8IntVectors>; 976 977// 11.5. Vector Bitwise Logical Instructions 978defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 979defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 980defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 981 982// 11.6. Vector Single-Width Bit Shift Instructions 983defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 984defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 985defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 986 987foreach vti = AllIntegerVectors in { 988 // Emit shift by 1 as an add since it might be faster. 989 let Predicates = GetVTypePredicates<vti>.Predicates in 990 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 991 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 992 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 993 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 994 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 995 996} 997 998// 11.8. Vector Integer Comparison Instructions 999defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 1000defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 1001 1002defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1003defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1004defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1005defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1006 1007defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1008defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1009defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1010defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1011defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1012defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1013defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1014defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1015// There is no VMSGE(U)_VX instruction 1016 1017defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1018defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1019defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1020defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1021defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1022defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1023 1024defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSLE", SETLT, SETGT, 1025 SplatPat_simm5_plus1>; 1026defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSLEU", SETULT, SETUGT, 1027 SplatPat_simm5_plus1_nonzero>; 1028defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSGT", SETGE, SETLE, 1029 SplatPat_simm5_plus1>; 1030defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSGTU", SETUGE, SETULE, 1031 SplatPat_simm5_plus1_nonzero>; 1032 1033// 11.9. Vector Integer Min/Max Instructions 1034defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 1035defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 1036defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 1037defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 1038 1039// 11.10. Vector Single-Width Integer Multiply Instructions 1040defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 1041 1042defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", IntegerVectorsExceptI64>; 1043defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", IntegerVectorsExceptI64>; 1044 1045let Predicates = [HasVInstructionsFullMultiply] in { 1046 defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", I64IntegerVectors>; 1047 defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", I64IntegerVectors>; 1048} 1049 1050// 11.11. Vector Integer Divide Instructions 1051defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU", isSEWAware=1>; 1052defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>; 1053defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>; 1054defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>; 1055 1056foreach vtiTowti = AllWidenableIntVectors in { 1057 defvar vti = vtiTowti.Vti; 1058 defvar wti = vtiTowti.Wti; 1059 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1060 GetVTypePredicates<wti>.Predicates) in { 1061 def : Pat< 1062 (vti.Vector 1063 (riscv_trunc_vector_vl 1064 (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 1065 (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), 1066 (vti.Mask true_mask), (XLenVT srcvalue))), 1067 (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) 1068 (vti.Vector (IMPLICIT_DEF)), 1069 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1070 } 1071} 1072 1073// 11.12. Vector Widening Integer Multiply Instructions 1074defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 1075 "PseudoVWMUL">; 1076defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 1077 "PseudoVWMULU">; 1078defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 1079 "PseudoVWMULU">; 1080defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 1081 "PseudoVWMULU">; 1082defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 1083 "PseudoVWMULSU">; 1084defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 1085 "PseudoVWMULSU">; 1086 1087// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 1088defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 1089defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 1090 1091// 11.14 Vector Widening Integer Multiply-Add Instructions 1092defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1093defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1094defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1095defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1096defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1097defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1098defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 1099 1100// 11.15. Vector Integer Merge Instructions 1101foreach vti = AllIntegerVectors in { 1102 let Predicates = GetVTypePredicates<vti>.Predicates in { 1103 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 1104 vti.RegClass:$rs2)), 1105 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1106 (vti.Vector (IMPLICIT_DEF)), 1107 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1108 vti.AVL, vti.Log2SEW)>; 1109 1110 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 1111 vti.RegClass:$rs2)), 1112 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1113 (vti.Vector (IMPLICIT_DEF)), 1114 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1115 1116 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 1117 vti.RegClass:$rs2)), 1118 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1119 (vti.Vector (IMPLICIT_DEF)), 1120 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1121 } 1122} 1123 1124// 12. Vector Fixed-Point Arithmetic Instructions 1125 1126// 12.1. Vector Single-Width Saturating Add and Subtract 1127defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 1128defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 1129defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 1130defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 1131 1132// 12.2. Vector Single-Width Averaging Add and Subtract 1133defm : VPatAVGADD_VV_VX_RM<avgfloors, 0b10>; 1134defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10, suffix = "U">; 1135defm : VPatAVGADD_VV_VX_RM<avgceils, 0b00>; 1136defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">; 1137 1138// 15. Vector Mask Instructions 1139 1140// 15.1. Vector Mask-Register Logical Instructions 1141foreach mti = AllMasks in { 1142 let Predicates = [HasVInstructions] in { 1143 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 1144 (!cast<Instruction>("PseudoVMAND_MM_"#mti.BX) 1145 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1146 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 1147 (!cast<Instruction>("PseudoVMOR_MM_"#mti.BX) 1148 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1149 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 1150 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.BX) 1151 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1152 1153 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 1154 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX) 1155 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1156 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 1157 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.BX) 1158 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1159 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 1160 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.BX) 1161 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1162 1163 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 1164 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.BX) 1165 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1166 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 1167 (!cast<Instruction>("PseudoVMORN_MM_"#mti.BX) 1168 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1169 1170 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 1171 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 1172 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX) 1173 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 1174 } 1175} 1176 1177// 13. Vector Floating-Point Instructions 1178 1179// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1180defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD", isSEWAware=1>; 1181defm : VPatBinaryFPSDNode_VV_VF_RM<any_fsub, "PseudoVFSUB", isSEWAware=1>; 1182defm : VPatBinaryFPSDNode_R_VF_RM<any_fsub, "PseudoVFRSUB", isSEWAware=1>; 1183 1184// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1185defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fadd, "PseudoVFWADD">; 1186defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fsub, "PseudoVFWSUB">; 1187 1188// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1189defm : VPatBinaryFPSDNode_VV_VF_RM<any_fmul, "PseudoVFMUL", isSEWAware=1>; 1190defm : VPatBinaryFPSDNode_VV_VF_RM<any_fdiv, "PseudoVFDIV", isSEWAware=1>; 1191defm : VPatBinaryFPSDNode_R_VF_RM<any_fdiv, "PseudoVFRDIV", isSEWAware=1>; 1192 1193// 13.5. Vector Widening Floating-Point Multiply Instructions 1194defm : VPatWidenBinaryFPSDNode_VV_VF_RM<fmul, "PseudoVFWMUL">; 1195 1196// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1197foreach fvti = AllFloatVectors in { 1198 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 1199 // works best with how TwoAddressInstructionPass tries commuting. 1200 defvar suffix = fvti.LMul.MX # "_E" # fvti.SEW; 1201 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1202 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1203 fvti.RegClass:$rs2)), 1204 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 1205 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1206 // Value to indicate no rounding mode change in 1207 // RISCVInsertReadWriteCSR 1208 FRM_DYN, 1209 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1210 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1211 (fneg fvti.RegClass:$rs2))), 1212 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 1213 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1214 // Value to indicate no rounding mode change in 1215 // RISCVInsertReadWriteCSR 1216 FRM_DYN, 1217 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1218 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1219 (fneg fvti.RegClass:$rs2))), 1220 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 1221 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1222 // Value to indicate no rounding mode change in 1223 // RISCVInsertReadWriteCSR 1224 FRM_DYN, 1225 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1226 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1227 fvti.RegClass:$rs2)), 1228 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1229 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1230 // Value to indicate no rounding mode change in 1231 // RISCVInsertReadWriteCSR 1232 FRM_DYN, 1233 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1234 1235 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1236 // commutable. 1237 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1238 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1239 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1240 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1241 // Value to indicate no rounding mode change in 1242 // RISCVInsertReadWriteCSR 1243 FRM_DYN, 1244 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1245 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1246 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1247 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1248 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1249 // Value to indicate no rounding mode change in 1250 // RISCVInsertReadWriteCSR 1251 FRM_DYN, 1252 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1253 1254 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1255 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 1256 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1257 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1258 // Value to indicate no rounding mode change in 1259 // RISCVInsertReadWriteCSR 1260 FRM_DYN, 1261 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1262 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1263 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 1264 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1265 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1266 // Value to indicate no rounding mode change in 1267 // RISCVInsertReadWriteCSR 1268 FRM_DYN, 1269 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1270 1271 // The splat might be negated. 1272 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1273 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1274 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1275 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1276 // Value to indicate no rounding mode change in 1277 // RISCVInsertReadWriteCSR 1278 FRM_DYN, 1279 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1280 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1281 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1282 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1283 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1284 // Value to indicate no rounding mode change in 1285 // RISCVInsertReadWriteCSR 1286 FRM_DYN, 1287 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1288 } 1289} 1290 1291// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1292defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC", 1293 AllWidenableFloatVectors>; 1294defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16", 1295 AllWidenableBFloatToFloatVectors>; 1296defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; 1297defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; 1298defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; 1299 1300foreach vti = AllFloatVectors in { 1301 let Predicates = GetVTypePredicates<vti>.Predicates in { 1302 // 13.8. Vector Floating-Point Square-Root Instruction 1303 def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), 1304 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) 1305 (vti.Vector (IMPLICIT_DEF)), 1306 vti.RegClass:$rs2, 1307 // Value to indicate no rounding mode change in 1308 // RISCVInsertReadWriteCSR 1309 FRM_DYN, 1310 vti.AVL, vti.Log2SEW, TA_MA)>; 1311 1312 // 13.12. Vector Floating-Point Sign-Injection Instructions 1313 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 1314 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1315 (vti.Vector (IMPLICIT_DEF)), 1316 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1317 // Handle fneg with VFSGNJN using the same input for both operands. 1318 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 1319 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1320 (vti.Vector (IMPLICIT_DEF)), 1321 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1322 1323 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1324 (vti.Vector vti.RegClass:$rs2))), 1325 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1326 (vti.Vector (IMPLICIT_DEF)), 1327 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1328 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1329 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 1330 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 1331 (vti.Vector (IMPLICIT_DEF)), 1332 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1333 1334 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1335 (vti.Vector (fneg vti.RegClass:$rs2)))), 1336 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1337 (vti.Vector (IMPLICIT_DEF)), 1338 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1339 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1340 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 1341 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 1342 (vti.Vector (IMPLICIT_DEF)), 1343 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1344 } 1345} 1346 1347// 13.11. Vector Floating-Point MIN/MAX Instructions 1348defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN", isSEWAware=1>; 1349defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX", isSEWAware=1>; 1350 1351// 13.13. Vector Floating-Point Compare Instructions 1352defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1353defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1354 1355defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1356defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1357 1358defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1359defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1360 1361defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1362defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1363 1364// Floating-point vselects: 1365// 11.15. Vector Integer Merge Instructions 1366// 13.15. Vector Floating-Point Merge Instruction 1367foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 1368 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1369 let Predicates = GetVTypePredicates<ivti>.Predicates in { 1370 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 1371 fvti.RegClass:$rs2)), 1372 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1373 (fvti.Vector (IMPLICIT_DEF)), 1374 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1375 fvti.AVL, fvti.Log2SEW)>; 1376 1377 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1378 (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), 1379 fvti.RegClass:$rs2)), 1380 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 1381 (fvti.Vector (IMPLICIT_DEF)), 1382 fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1383 1384 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1385 (SplatFPOp (fvti.Scalar fpimm0)), 1386 fvti.RegClass:$rs2)), 1387 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1388 (fvti.Vector (IMPLICIT_DEF)), 1389 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1390 } 1391} 1392 1393foreach fvti = AllFloatVectors in { 1394 let Predicates = GetVTypePredicates<fvti>.Predicates in 1395 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1396 (SplatFPOp fvti.ScalarRegClass:$rs1), 1397 fvti.RegClass:$rs2)), 1398 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1399 (fvti.Vector (IMPLICIT_DEF)), 1400 fvti.RegClass:$rs2, 1401 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1402 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1403} 1404 1405// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1406defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 1407defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 1408defm : VPatConvertI2FPSDNode_V_RM<any_sint_to_fp, "PseudoVFCVT_F_X_V">; 1409defm : VPatConvertI2FPSDNode_V_RM<any_uint_to_fp, "PseudoVFCVT_F_XU_V">; 1410 1411// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1412defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 1413defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 1414defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">; 1415defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1416 1417// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1418defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1419defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1420defm : VPatNConvertI2FPSDNode_W_RM<any_sint_to_fp, "PseudoVFNCVT_F_X_W">; 1421defm : VPatNConvertI2FPSDNode_W_RM<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1422foreach fvtiToFWti = AllWidenableFloatVectors in { 1423 defvar fvti = fvtiToFWti.Vti; 1424 defvar fwti = fvtiToFWti.Wti; 1425 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 1426 !listconcat(GetVTypePredicates<fvti>.Predicates, 1427 GetVTypePredicates<fwti>.Predicates)) in 1428 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1429 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW) 1430 (fvti.Vector (IMPLICIT_DEF)), 1431 fwti.RegClass:$rs1, 1432 // Value to indicate no rounding mode change in 1433 // RISCVInsertReadWriteCSR 1434 FRM_DYN, 1435 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1436} 1437 1438foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 1439 defvar fvti = fvtiToFWti.Vti; 1440 defvar fwti = fvtiToFWti.Wti; 1441 let Predicates = [HasVInstructionsBF16Minimal] in 1442 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1443 (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW) 1444 (fvti.Vector (IMPLICIT_DEF)), 1445 fwti.RegClass:$rs1, 1446 // Value to indicate no rounding mode change in 1447 // RISCVInsertReadWriteCSR 1448 FRM_DYN, 1449 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1450} 1451 1452//===----------------------------------------------------------------------===// 1453// Vector Element Extracts 1454//===----------------------------------------------------------------------===// 1455foreach vti = NoGroupFloatVectors in { 1456 defvar vfmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1457 vti.ScalarSuffix, 1458 "_S")); 1459 // Only pattern-match extract-element operations where the index is 0. Any 1460 // other index will have been custom-lowered to slide the vector correctly 1461 // into place. 1462 let Predicates = GetVTypePredicates<vti>.Predicates in 1463 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1464 (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1465} 1466