1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \ 7; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D 8 9; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul 10 11declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8( 12 <vscale x 1 x i8>, 13 <vscale x 1 x i8>, 14 <vscale x 1 x i8>, 15 iXLen, iXLen) 16 17define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 18; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: 19; CHECK: # %bb.0: # %entry 20; CHECK-NEXT: csrwi vxrm, 0 21; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 22; CHECK-NEXT: vsmul.vv v8, v8, v9 23; CHECK-NEXT: ret 24entry: 25 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8( 26 <vscale x 1 x i8> undef, 27 <vscale x 1 x i8> %0, 28 <vscale x 1 x i8> %1, 29 iXLen 0, iXLen %2) 30 31 ret <vscale x 1 x i8> %a 32} 33 34declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( 35 <vscale x 1 x i8>, 36 <vscale x 1 x i8>, 37 <vscale x 1 x i8>, 38 <vscale x 1 x i1>, 39 iXLen, iXLen, iXLen) 40 41define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 42; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: 43; CHECK: # %bb.0: # %entry 44; CHECK-NEXT: csrwi vxrm, 0 45; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 46; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 47; CHECK-NEXT: ret 48entry: 49 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( 50 <vscale x 1 x i8> %0, 51 <vscale x 1 x i8> %1, 52 <vscale x 1 x i8> %2, 53 <vscale x 1 x i1> %3, 54 iXLen 0, iXLen %4, iXLen 1) 55 56 ret <vscale x 1 x i8> %a 57} 58 59declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8( 60 <vscale x 2 x i8>, 61 <vscale x 2 x i8>, 62 <vscale x 2 x i8>, 63 iXLen, iXLen) 64 65define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 66; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: 67; CHECK: # %bb.0: # %entry 68; CHECK-NEXT: csrwi vxrm, 0 69; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 70; CHECK-NEXT: vsmul.vv v8, v8, v9 71; CHECK-NEXT: ret 72entry: 73 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8( 74 <vscale x 2 x i8> undef, 75 <vscale x 2 x i8> %0, 76 <vscale x 2 x i8> %1, 77 iXLen 0, iXLen %2) 78 79 ret <vscale x 2 x i8> %a 80} 81 82declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( 83 <vscale x 2 x i8>, 84 <vscale x 2 x i8>, 85 <vscale x 2 x i8>, 86 <vscale x 2 x i1>, 87 iXLen, iXLen, iXLen) 88 89define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 90; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: 91; CHECK: # %bb.0: # %entry 92; CHECK-NEXT: csrwi vxrm, 0 93; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 94; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 95; CHECK-NEXT: ret 96entry: 97 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( 98 <vscale x 2 x i8> %0, 99 <vscale x 2 x i8> %1, 100 <vscale x 2 x i8> %2, 101 <vscale x 2 x i1> %3, 102 iXLen 0, iXLen %4, iXLen 1) 103 104 ret <vscale x 2 x i8> %a 105} 106 107declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8( 108 <vscale x 4 x i8>, 109 <vscale x 4 x i8>, 110 <vscale x 4 x i8>, 111 iXLen, iXLen) 112 113define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 114; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: 115; CHECK: # %bb.0: # %entry 116; CHECK-NEXT: csrwi vxrm, 0 117; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 118; CHECK-NEXT: vsmul.vv v8, v8, v9 119; CHECK-NEXT: ret 120entry: 121 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8( 122 <vscale x 4 x i8> undef, 123 <vscale x 4 x i8> %0, 124 <vscale x 4 x i8> %1, 125 iXLen 0, iXLen %2) 126 127 ret <vscale x 4 x i8> %a 128} 129 130declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( 131 <vscale x 4 x i8>, 132 <vscale x 4 x i8>, 133 <vscale x 4 x i8>, 134 <vscale x 4 x i1>, 135 iXLen, iXLen, iXLen) 136 137define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 138; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: 139; CHECK: # %bb.0: # %entry 140; CHECK-NEXT: csrwi vxrm, 0 141; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 142; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 143; CHECK-NEXT: ret 144entry: 145 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( 146 <vscale x 4 x i8> %0, 147 <vscale x 4 x i8> %1, 148 <vscale x 4 x i8> %2, 149 <vscale x 4 x i1> %3, 150 iXLen 0, iXLen %4, iXLen 1) 151 152 ret <vscale x 4 x i8> %a 153} 154 155declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8( 156 <vscale x 8 x i8>, 157 <vscale x 8 x i8>, 158 <vscale x 8 x i8>, 159 iXLen, iXLen) 160 161define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 162; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: 163; CHECK: # %bb.0: # %entry 164; CHECK-NEXT: csrwi vxrm, 0 165; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 166; CHECK-NEXT: vsmul.vv v8, v8, v9 167; CHECK-NEXT: ret 168entry: 169 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8( 170 <vscale x 8 x i8> undef, 171 <vscale x 8 x i8> %0, 172 <vscale x 8 x i8> %1, 173 iXLen 0, iXLen %2) 174 175 ret <vscale x 8 x i8> %a 176} 177 178declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( 179 <vscale x 8 x i8>, 180 <vscale x 8 x i8>, 181 <vscale x 8 x i8>, 182 <vscale x 8 x i1>, 183 iXLen, iXLen, iXLen) 184 185define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 186; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: 187; CHECK: # %bb.0: # %entry 188; CHECK-NEXT: csrwi vxrm, 0 189; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 190; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 191; CHECK-NEXT: ret 192entry: 193 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( 194 <vscale x 8 x i8> %0, 195 <vscale x 8 x i8> %1, 196 <vscale x 8 x i8> %2, 197 <vscale x 8 x i1> %3, 198 iXLen 0, iXLen %4, iXLen 1) 199 200 ret <vscale x 8 x i8> %a 201} 202 203declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8( 204 <vscale x 16 x i8>, 205 <vscale x 16 x i8>, 206 <vscale x 16 x i8>, 207 iXLen, iXLen) 208 209define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 210; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: 211; CHECK: # %bb.0: # %entry 212; CHECK-NEXT: csrwi vxrm, 0 213; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 214; CHECK-NEXT: vsmul.vv v8, v8, v10 215; CHECK-NEXT: ret 216entry: 217 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8( 218 <vscale x 16 x i8> undef, 219 <vscale x 16 x i8> %0, 220 <vscale x 16 x i8> %1, 221 iXLen 0, iXLen %2) 222 223 ret <vscale x 16 x i8> %a 224} 225 226declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( 227 <vscale x 16 x i8>, 228 <vscale x 16 x i8>, 229 <vscale x 16 x i8>, 230 <vscale x 16 x i1>, 231 iXLen, iXLen, iXLen) 232 233define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 234; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: 235; CHECK: # %bb.0: # %entry 236; CHECK-NEXT: csrwi vxrm, 0 237; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 238; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t 239; CHECK-NEXT: ret 240entry: 241 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( 242 <vscale x 16 x i8> %0, 243 <vscale x 16 x i8> %1, 244 <vscale x 16 x i8> %2, 245 <vscale x 16 x i1> %3, 246 iXLen 0, iXLen %4, iXLen 1) 247 248 ret <vscale x 16 x i8> %a 249} 250 251declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8( 252 <vscale x 32 x i8>, 253 <vscale x 32 x i8>, 254 <vscale x 32 x i8>, 255 iXLen, iXLen) 256 257define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 258; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: 259; CHECK: # %bb.0: # %entry 260; CHECK-NEXT: csrwi vxrm, 0 261; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 262; CHECK-NEXT: vsmul.vv v8, v8, v12 263; CHECK-NEXT: ret 264entry: 265 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8( 266 <vscale x 32 x i8> undef, 267 <vscale x 32 x i8> %0, 268 <vscale x 32 x i8> %1, 269 iXLen 0, iXLen %2) 270 271 ret <vscale x 32 x i8> %a 272} 273 274declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( 275 <vscale x 32 x i8>, 276 <vscale x 32 x i8>, 277 <vscale x 32 x i8>, 278 <vscale x 32 x i1>, 279 iXLen, iXLen, iXLen) 280 281define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 282; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: 283; CHECK: # %bb.0: # %entry 284; CHECK-NEXT: csrwi vxrm, 0 285; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 286; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t 287; CHECK-NEXT: ret 288entry: 289 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( 290 <vscale x 32 x i8> %0, 291 <vscale x 32 x i8> %1, 292 <vscale x 32 x i8> %2, 293 <vscale x 32 x i1> %3, 294 iXLen 0, iXLen %4, iXLen 1) 295 296 ret <vscale x 32 x i8> %a 297} 298 299declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8( 300 <vscale x 64 x i8>, 301 <vscale x 64 x i8>, 302 <vscale x 64 x i8>, 303 iXLen, iXLen) 304 305define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 306; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: 307; CHECK: # %bb.0: # %entry 308; CHECK-NEXT: csrwi vxrm, 0 309; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 310; CHECK-NEXT: vsmul.vv v8, v8, v16 311; CHECK-NEXT: ret 312entry: 313 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8( 314 <vscale x 64 x i8> undef, 315 <vscale x 64 x i8> %0, 316 <vscale x 64 x i8> %1, 317 iXLen 0, iXLen %2) 318 319 ret <vscale x 64 x i8> %a 320} 321 322declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( 323 <vscale x 64 x i8>, 324 <vscale x 64 x i8>, 325 <vscale x 64 x i8>, 326 <vscale x 64 x i1>, 327 iXLen, iXLen, iXLen) 328 329define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 330; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: 331; CHECK: # %bb.0: # %entry 332; CHECK-NEXT: vl8r.v v24, (a0) 333; CHECK-NEXT: csrwi vxrm, 0 334; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 335; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t 336; CHECK-NEXT: ret 337entry: 338 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( 339 <vscale x 64 x i8> %0, 340 <vscale x 64 x i8> %1, 341 <vscale x 64 x i8> %2, 342 <vscale x 64 x i1> %3, 343 iXLen 0, iXLen %4, iXLen 1) 344 345 ret <vscale x 64 x i8> %a 346} 347 348declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16( 349 <vscale x 1 x i16>, 350 <vscale x 1 x i16>, 351 <vscale x 1 x i16>, 352 iXLen, iXLen) 353 354define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 355; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: 356; CHECK: # %bb.0: # %entry 357; CHECK-NEXT: csrwi vxrm, 0 358; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 359; CHECK-NEXT: vsmul.vv v8, v8, v9 360; CHECK-NEXT: ret 361entry: 362 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16( 363 <vscale x 1 x i16> undef, 364 <vscale x 1 x i16> %0, 365 <vscale x 1 x i16> %1, 366 iXLen 0, iXLen %2) 367 368 ret <vscale x 1 x i16> %a 369} 370 371declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( 372 <vscale x 1 x i16>, 373 <vscale x 1 x i16>, 374 <vscale x 1 x i16>, 375 <vscale x 1 x i1>, 376 iXLen, iXLen, iXLen) 377 378define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 379; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: 380; CHECK: # %bb.0: # %entry 381; CHECK-NEXT: csrwi vxrm, 0 382; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 383; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 384; CHECK-NEXT: ret 385entry: 386 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( 387 <vscale x 1 x i16> %0, 388 <vscale x 1 x i16> %1, 389 <vscale x 1 x i16> %2, 390 <vscale x 1 x i1> %3, 391 iXLen 0, iXLen %4, iXLen 1) 392 393 ret <vscale x 1 x i16> %a 394} 395 396declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16( 397 <vscale x 2 x i16>, 398 <vscale x 2 x i16>, 399 <vscale x 2 x i16>, 400 iXLen, iXLen) 401 402define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 403; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: 404; CHECK: # %bb.0: # %entry 405; CHECK-NEXT: csrwi vxrm, 0 406; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 407; CHECK-NEXT: vsmul.vv v8, v8, v9 408; CHECK-NEXT: ret 409entry: 410 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16( 411 <vscale x 2 x i16> undef, 412 <vscale x 2 x i16> %0, 413 <vscale x 2 x i16> %1, 414 iXLen 0, iXLen %2) 415 416 ret <vscale x 2 x i16> %a 417} 418 419declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( 420 <vscale x 2 x i16>, 421 <vscale x 2 x i16>, 422 <vscale x 2 x i16>, 423 <vscale x 2 x i1>, 424 iXLen, iXLen, iXLen) 425 426define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 427; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: 428; CHECK: # %bb.0: # %entry 429; CHECK-NEXT: csrwi vxrm, 0 430; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 431; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 432; CHECK-NEXT: ret 433entry: 434 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( 435 <vscale x 2 x i16> %0, 436 <vscale x 2 x i16> %1, 437 <vscale x 2 x i16> %2, 438 <vscale x 2 x i1> %3, 439 iXLen 0, iXLen %4, iXLen 1) 440 441 ret <vscale x 2 x i16> %a 442} 443 444declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16( 445 <vscale x 4 x i16>, 446 <vscale x 4 x i16>, 447 <vscale x 4 x i16>, 448 iXLen, iXLen) 449 450define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 451; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: 452; CHECK: # %bb.0: # %entry 453; CHECK-NEXT: csrwi vxrm, 0 454; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 455; CHECK-NEXT: vsmul.vv v8, v8, v9 456; CHECK-NEXT: ret 457entry: 458 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16( 459 <vscale x 4 x i16> undef, 460 <vscale x 4 x i16> %0, 461 <vscale x 4 x i16> %1, 462 iXLen 0, iXLen %2) 463 464 ret <vscale x 4 x i16> %a 465} 466 467declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( 468 <vscale x 4 x i16>, 469 <vscale x 4 x i16>, 470 <vscale x 4 x i16>, 471 <vscale x 4 x i1>, 472 iXLen, iXLen, iXLen) 473 474define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 475; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: 476; CHECK: # %bb.0: # %entry 477; CHECK-NEXT: csrwi vxrm, 0 478; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 479; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 480; CHECK-NEXT: ret 481entry: 482 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( 483 <vscale x 4 x i16> %0, 484 <vscale x 4 x i16> %1, 485 <vscale x 4 x i16> %2, 486 <vscale x 4 x i1> %3, 487 iXLen 0, iXLen %4, iXLen 1) 488 489 ret <vscale x 4 x i16> %a 490} 491 492declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16( 493 <vscale x 8 x i16>, 494 <vscale x 8 x i16>, 495 <vscale x 8 x i16>, 496 iXLen, iXLen) 497 498define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 499; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: 500; CHECK: # %bb.0: # %entry 501; CHECK-NEXT: csrwi vxrm, 0 502; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 503; CHECK-NEXT: vsmul.vv v8, v8, v10 504; CHECK-NEXT: ret 505entry: 506 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16( 507 <vscale x 8 x i16> undef, 508 <vscale x 8 x i16> %0, 509 <vscale x 8 x i16> %1, 510 iXLen 0, iXLen %2) 511 512 ret <vscale x 8 x i16> %a 513} 514 515declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( 516 <vscale x 8 x i16>, 517 <vscale x 8 x i16>, 518 <vscale x 8 x i16>, 519 <vscale x 8 x i1>, 520 iXLen, iXLen, iXLen) 521 522define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 523; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: 524; CHECK: # %bb.0: # %entry 525; CHECK-NEXT: csrwi vxrm, 0 526; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 527; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t 528; CHECK-NEXT: ret 529entry: 530 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( 531 <vscale x 8 x i16> %0, 532 <vscale x 8 x i16> %1, 533 <vscale x 8 x i16> %2, 534 <vscale x 8 x i1> %3, 535 iXLen 0, iXLen %4, iXLen 1) 536 537 ret <vscale x 8 x i16> %a 538} 539 540declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16( 541 <vscale x 16 x i16>, 542 <vscale x 16 x i16>, 543 <vscale x 16 x i16>, 544 iXLen, iXLen) 545 546define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 547; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: 548; CHECK: # %bb.0: # %entry 549; CHECK-NEXT: csrwi vxrm, 0 550; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 551; CHECK-NEXT: vsmul.vv v8, v8, v12 552; CHECK-NEXT: ret 553entry: 554 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16( 555 <vscale x 16 x i16> undef, 556 <vscale x 16 x i16> %0, 557 <vscale x 16 x i16> %1, 558 iXLen 0, iXLen %2) 559 560 ret <vscale x 16 x i16> %a 561} 562 563declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( 564 <vscale x 16 x i16>, 565 <vscale x 16 x i16>, 566 <vscale x 16 x i16>, 567 <vscale x 16 x i1>, 568 iXLen, iXLen, iXLen) 569 570define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 571; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: 572; CHECK: # %bb.0: # %entry 573; CHECK-NEXT: csrwi vxrm, 0 574; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 575; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t 576; CHECK-NEXT: ret 577entry: 578 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( 579 <vscale x 16 x i16> %0, 580 <vscale x 16 x i16> %1, 581 <vscale x 16 x i16> %2, 582 <vscale x 16 x i1> %3, 583 iXLen 0, iXLen %4, iXLen 1) 584 585 ret <vscale x 16 x i16> %a 586} 587 588declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16( 589 <vscale x 32 x i16>, 590 <vscale x 32 x i16>, 591 <vscale x 32 x i16>, 592 iXLen, iXLen) 593 594define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 595; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: 596; CHECK: # %bb.0: # %entry 597; CHECK-NEXT: csrwi vxrm, 0 598; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 599; CHECK-NEXT: vsmul.vv v8, v8, v16 600; CHECK-NEXT: ret 601entry: 602 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16( 603 <vscale x 32 x i16> undef, 604 <vscale x 32 x i16> %0, 605 <vscale x 32 x i16> %1, 606 iXLen 0, iXLen %2) 607 608 ret <vscale x 32 x i16> %a 609} 610 611declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( 612 <vscale x 32 x i16>, 613 <vscale x 32 x i16>, 614 <vscale x 32 x i16>, 615 <vscale x 32 x i1>, 616 iXLen, iXLen, iXLen) 617 618define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 619; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: 620; CHECK: # %bb.0: # %entry 621; CHECK-NEXT: vl8re16.v v24, (a0) 622; CHECK-NEXT: csrwi vxrm, 0 623; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 624; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t 625; CHECK-NEXT: ret 626entry: 627 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( 628 <vscale x 32 x i16> %0, 629 <vscale x 32 x i16> %1, 630 <vscale x 32 x i16> %2, 631 <vscale x 32 x i1> %3, 632 iXLen 0, iXLen %4, iXLen 1) 633 634 ret <vscale x 32 x i16> %a 635} 636 637declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32( 638 <vscale x 1 x i32>, 639 <vscale x 1 x i32>, 640 <vscale x 1 x i32>, 641 iXLen, iXLen) 642 643define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 644; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: 645; CHECK: # %bb.0: # %entry 646; CHECK-NEXT: csrwi vxrm, 0 647; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 648; CHECK-NEXT: vsmul.vv v8, v8, v9 649; CHECK-NEXT: ret 650entry: 651 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32( 652 <vscale x 1 x i32> undef, 653 <vscale x 1 x i32> %0, 654 <vscale x 1 x i32> %1, 655 iXLen 0, iXLen %2) 656 657 ret <vscale x 1 x i32> %a 658} 659 660declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( 661 <vscale x 1 x i32>, 662 <vscale x 1 x i32>, 663 <vscale x 1 x i32>, 664 <vscale x 1 x i1>, 665 iXLen, iXLen, iXLen) 666 667define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 668; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: 669; CHECK: # %bb.0: # %entry 670; CHECK-NEXT: csrwi vxrm, 0 671; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 672; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 673; CHECK-NEXT: ret 674entry: 675 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( 676 <vscale x 1 x i32> %0, 677 <vscale x 1 x i32> %1, 678 <vscale x 1 x i32> %2, 679 <vscale x 1 x i1> %3, 680 iXLen 0, iXLen %4, iXLen 1) 681 682 ret <vscale x 1 x i32> %a 683} 684 685declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32( 686 <vscale x 2 x i32>, 687 <vscale x 2 x i32>, 688 <vscale x 2 x i32>, 689 iXLen, iXLen) 690 691define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 692; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: 693; CHECK: # %bb.0: # %entry 694; CHECK-NEXT: csrwi vxrm, 0 695; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 696; CHECK-NEXT: vsmul.vv v8, v8, v9 697; CHECK-NEXT: ret 698entry: 699 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32( 700 <vscale x 2 x i32> undef, 701 <vscale x 2 x i32> %0, 702 <vscale x 2 x i32> %1, 703 iXLen 0, iXLen %2) 704 705 ret <vscale x 2 x i32> %a 706} 707 708declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( 709 <vscale x 2 x i32>, 710 <vscale x 2 x i32>, 711 <vscale x 2 x i32>, 712 <vscale x 2 x i1>, 713 iXLen, iXLen, iXLen) 714 715define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 716; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: 717; CHECK: # %bb.0: # %entry 718; CHECK-NEXT: csrwi vxrm, 0 719; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 720; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 721; CHECK-NEXT: ret 722entry: 723 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( 724 <vscale x 2 x i32> %0, 725 <vscale x 2 x i32> %1, 726 <vscale x 2 x i32> %2, 727 <vscale x 2 x i1> %3, 728 iXLen 0, iXLen %4, iXLen 1) 729 730 ret <vscale x 2 x i32> %a 731} 732 733declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32( 734 <vscale x 4 x i32>, 735 <vscale x 4 x i32>, 736 <vscale x 4 x i32>, 737 iXLen, iXLen) 738 739define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 740; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: 741; CHECK: # %bb.0: # %entry 742; CHECK-NEXT: csrwi vxrm, 0 743; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 744; CHECK-NEXT: vsmul.vv v8, v8, v10 745; CHECK-NEXT: ret 746entry: 747 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32( 748 <vscale x 4 x i32> undef, 749 <vscale x 4 x i32> %0, 750 <vscale x 4 x i32> %1, 751 iXLen 0, iXLen %2) 752 753 ret <vscale x 4 x i32> %a 754} 755 756declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( 757 <vscale x 4 x i32>, 758 <vscale x 4 x i32>, 759 <vscale x 4 x i32>, 760 <vscale x 4 x i1>, 761 iXLen, iXLen, iXLen) 762 763define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 764; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: 765; CHECK: # %bb.0: # %entry 766; CHECK-NEXT: csrwi vxrm, 0 767; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 768; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t 769; CHECK-NEXT: ret 770entry: 771 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( 772 <vscale x 4 x i32> %0, 773 <vscale x 4 x i32> %1, 774 <vscale x 4 x i32> %2, 775 <vscale x 4 x i1> %3, 776 iXLen 0, iXLen %4, iXLen 1) 777 778 ret <vscale x 4 x i32> %a 779} 780 781declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32( 782 <vscale x 8 x i32>, 783 <vscale x 8 x i32>, 784 <vscale x 8 x i32>, 785 iXLen, iXLen) 786 787define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 788; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: 789; CHECK: # %bb.0: # %entry 790; CHECK-NEXT: csrwi vxrm, 0 791; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 792; CHECK-NEXT: vsmul.vv v8, v8, v12 793; CHECK-NEXT: ret 794entry: 795 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32( 796 <vscale x 8 x i32> undef, 797 <vscale x 8 x i32> %0, 798 <vscale x 8 x i32> %1, 799 iXLen 0, iXLen %2) 800 801 ret <vscale x 8 x i32> %a 802} 803 804declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( 805 <vscale x 8 x i32>, 806 <vscale x 8 x i32>, 807 <vscale x 8 x i32>, 808 <vscale x 8 x i1>, 809 iXLen, iXLen, iXLen) 810 811define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 812; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: 813; CHECK: # %bb.0: # %entry 814; CHECK-NEXT: csrwi vxrm, 0 815; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 816; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t 817; CHECK-NEXT: ret 818entry: 819 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( 820 <vscale x 8 x i32> %0, 821 <vscale x 8 x i32> %1, 822 <vscale x 8 x i32> %2, 823 <vscale x 8 x i1> %3, 824 iXLen 0, iXLen %4, iXLen 1) 825 826 ret <vscale x 8 x i32> %a 827} 828 829declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32( 830 <vscale x 16 x i32>, 831 <vscale x 16 x i32>, 832 <vscale x 16 x i32>, 833 iXLen, iXLen) 834 835define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 836; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: 837; CHECK: # %bb.0: # %entry 838; CHECK-NEXT: csrwi vxrm, 0 839; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 840; CHECK-NEXT: vsmul.vv v8, v8, v16 841; CHECK-NEXT: ret 842entry: 843 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32( 844 <vscale x 16 x i32> undef, 845 <vscale x 16 x i32> %0, 846 <vscale x 16 x i32> %1, 847 iXLen 0, iXLen %2) 848 849 ret <vscale x 16 x i32> %a 850} 851 852declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( 853 <vscale x 16 x i32>, 854 <vscale x 16 x i32>, 855 <vscale x 16 x i32>, 856 <vscale x 16 x i1>, 857 iXLen, iXLen, iXLen) 858 859define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 860; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: 861; CHECK: # %bb.0: # %entry 862; CHECK-NEXT: vl8re32.v v24, (a0) 863; CHECK-NEXT: csrwi vxrm, 0 864; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 865; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t 866; CHECK-NEXT: ret 867entry: 868 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( 869 <vscale x 16 x i32> %0, 870 <vscale x 16 x i32> %1, 871 <vscale x 16 x i32> %2, 872 <vscale x 16 x i1> %3, 873 iXLen 0, iXLen %4, iXLen 1) 874 875 ret <vscale x 16 x i32> %a 876} 877 878declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64( 879 <vscale x 1 x i64>, 880 <vscale x 1 x i64>, 881 <vscale x 1 x i64>, 882 iXLen, iXLen) 883 884define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 885; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: 886; CHECK: # %bb.0: # %entry 887; CHECK-NEXT: csrwi vxrm, 0 888; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 889; CHECK-NEXT: vsmul.vv v8, v8, v9 890; CHECK-NEXT: ret 891entry: 892 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64( 893 <vscale x 1 x i64> undef, 894 <vscale x 1 x i64> %0, 895 <vscale x 1 x i64> %1, 896 iXLen 0, iXLen %2) 897 898 ret <vscale x 1 x i64> %a 899} 900 901declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64( 902 <vscale x 1 x i64>, 903 <vscale x 1 x i64>, 904 <vscale x 1 x i64>, 905 <vscale x 1 x i1>, 906 iXLen, iXLen, iXLen) 907 908define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 909; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: 910; CHECK: # %bb.0: # %entry 911; CHECK-NEXT: csrwi vxrm, 0 912; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 913; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t 914; CHECK-NEXT: ret 915entry: 916 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64( 917 <vscale x 1 x i64> %0, 918 <vscale x 1 x i64> %1, 919 <vscale x 1 x i64> %2, 920 <vscale x 1 x i1> %3, 921 iXLen 0, iXLen %4, iXLen 1) 922 923 ret <vscale x 1 x i64> %a 924} 925 926declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64( 927 <vscale x 2 x i64>, 928 <vscale x 2 x i64>, 929 <vscale x 2 x i64>, 930 iXLen, iXLen) 931 932define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 933; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: 934; CHECK: # %bb.0: # %entry 935; CHECK-NEXT: csrwi vxrm, 0 936; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 937; CHECK-NEXT: vsmul.vv v8, v8, v10 938; CHECK-NEXT: ret 939entry: 940 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64( 941 <vscale x 2 x i64> undef, 942 <vscale x 2 x i64> %0, 943 <vscale x 2 x i64> %1, 944 iXLen 0, iXLen %2) 945 946 ret <vscale x 2 x i64> %a 947} 948 949declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64( 950 <vscale x 2 x i64>, 951 <vscale x 2 x i64>, 952 <vscale x 2 x i64>, 953 <vscale x 2 x i1>, 954 iXLen, iXLen, iXLen) 955 956define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 957; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: 958; CHECK: # %bb.0: # %entry 959; CHECK-NEXT: csrwi vxrm, 0 960; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 961; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t 962; CHECK-NEXT: ret 963entry: 964 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64( 965 <vscale x 2 x i64> %0, 966 <vscale x 2 x i64> %1, 967 <vscale x 2 x i64> %2, 968 <vscale x 2 x i1> %3, 969 iXLen 0, iXLen %4, iXLen 1) 970 971 ret <vscale x 2 x i64> %a 972} 973 974declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64( 975 <vscale x 4 x i64>, 976 <vscale x 4 x i64>, 977 <vscale x 4 x i64>, 978 iXLen, iXLen) 979 980define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 981; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: 982; CHECK: # %bb.0: # %entry 983; CHECK-NEXT: csrwi vxrm, 0 984; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 985; CHECK-NEXT: vsmul.vv v8, v8, v12 986; CHECK-NEXT: ret 987entry: 988 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64( 989 <vscale x 4 x i64> undef, 990 <vscale x 4 x i64> %0, 991 <vscale x 4 x i64> %1, 992 iXLen 0, iXLen %2) 993 994 ret <vscale x 4 x i64> %a 995} 996 997declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64( 998 <vscale x 4 x i64>, 999 <vscale x 4 x i64>, 1000 <vscale x 4 x i64>, 1001 <vscale x 4 x i1>, 1002 iXLen, iXLen, iXLen) 1003 1004define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1005; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: 1006; CHECK: # %bb.0: # %entry 1007; CHECK-NEXT: csrwi vxrm, 0 1008; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 1009; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t 1010; CHECK-NEXT: ret 1011entry: 1012 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64( 1013 <vscale x 4 x i64> %0, 1014 <vscale x 4 x i64> %1, 1015 <vscale x 4 x i64> %2, 1016 <vscale x 4 x i1> %3, 1017 iXLen 0, iXLen %4, iXLen 1) 1018 1019 ret <vscale x 4 x i64> %a 1020} 1021 1022declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64( 1023 <vscale x 8 x i64>, 1024 <vscale x 8 x i64>, 1025 <vscale x 8 x i64>, 1026 iXLen, iXLen) 1027 1028define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 1029; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: 1030; CHECK: # %bb.0: # %entry 1031; CHECK-NEXT: csrwi vxrm, 0 1032; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1033; CHECK-NEXT: vsmul.vv v8, v8, v16 1034; CHECK-NEXT: ret 1035entry: 1036 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64( 1037 <vscale x 8 x i64> undef, 1038 <vscale x 8 x i64> %0, 1039 <vscale x 8 x i64> %1, 1040 iXLen 0, iXLen %2) 1041 1042 ret <vscale x 8 x i64> %a 1043} 1044 1045declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64( 1046 <vscale x 8 x i64>, 1047 <vscale x 8 x i64>, 1048 <vscale x 8 x i64>, 1049 <vscale x 8 x i1>, 1050 iXLen, iXLen, iXLen) 1051 1052define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1053; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: 1054; CHECK: # %bb.0: # %entry 1055; CHECK-NEXT: vl8re64.v v24, (a0) 1056; CHECK-NEXT: csrwi vxrm, 0 1057; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1058; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t 1059; CHECK-NEXT: ret 1060entry: 1061 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64( 1062 <vscale x 8 x i64> %0, 1063 <vscale x 8 x i64> %1, 1064 <vscale x 8 x i64> %2, 1065 <vscale x 8 x i1> %3, 1066 iXLen 0, iXLen %4, iXLen 1) 1067 1068 ret <vscale x 8 x i64> %a 1069} 1070 1071declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8( 1072 <vscale x 1 x i8>, 1073 <vscale x 1 x i8>, 1074 i8, 1075 iXLen, iXLen) 1076 1077define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 1078; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: 1079; CHECK: # %bb.0: # %entry 1080; CHECK-NEXT: csrwi vxrm, 0 1081; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1082; CHECK-NEXT: vsmul.vx v8, v8, a0 1083; CHECK-NEXT: ret 1084entry: 1085 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8( 1086 <vscale x 1 x i8> undef, 1087 <vscale x 1 x i8> %0, 1088 i8 %1, 1089 iXLen 0, iXLen %2) 1090 1091 ret <vscale x 1 x i8> %a 1092} 1093 1094declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8( 1095 <vscale x 1 x i8>, 1096 <vscale x 1 x i8>, 1097 i8, 1098 <vscale x 1 x i1>, 1099 iXLen, iXLen, iXLen) 1100 1101define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1102; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: 1103; CHECK: # %bb.0: # %entry 1104; CHECK-NEXT: csrwi vxrm, 0 1105; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 1106; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1107; CHECK-NEXT: ret 1108entry: 1109 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8( 1110 <vscale x 1 x i8> %0, 1111 <vscale x 1 x i8> %1, 1112 i8 %2, 1113 <vscale x 1 x i1> %3, 1114 iXLen 0, iXLen %4, iXLen 1) 1115 1116 ret <vscale x 1 x i8> %a 1117} 1118 1119declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8( 1120 <vscale x 2 x i8>, 1121 <vscale x 2 x i8>, 1122 i8, 1123 iXLen, iXLen) 1124 1125define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 1126; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: 1127; CHECK: # %bb.0: # %entry 1128; CHECK-NEXT: csrwi vxrm, 0 1129; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1130; CHECK-NEXT: vsmul.vx v8, v8, a0 1131; CHECK-NEXT: ret 1132entry: 1133 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8( 1134 <vscale x 2 x i8> undef, 1135 <vscale x 2 x i8> %0, 1136 i8 %1, 1137 iXLen 0, iXLen %2) 1138 1139 ret <vscale x 2 x i8> %a 1140} 1141 1142declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8( 1143 <vscale x 2 x i8>, 1144 <vscale x 2 x i8>, 1145 i8, 1146 <vscale x 2 x i1>, 1147 iXLen, iXLen, iXLen) 1148 1149define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1150; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: 1151; CHECK: # %bb.0: # %entry 1152; CHECK-NEXT: csrwi vxrm, 0 1153; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1154; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1155; CHECK-NEXT: ret 1156entry: 1157 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8( 1158 <vscale x 2 x i8> %0, 1159 <vscale x 2 x i8> %1, 1160 i8 %2, 1161 <vscale x 2 x i1> %3, 1162 iXLen 0, iXLen %4, iXLen 1) 1163 1164 ret <vscale x 2 x i8> %a 1165} 1166 1167declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8( 1168 <vscale x 4 x i8>, 1169 <vscale x 4 x i8>, 1170 i8, 1171 iXLen, iXLen) 1172 1173define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1174; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: 1175; CHECK: # %bb.0: # %entry 1176; CHECK-NEXT: csrwi vxrm, 0 1177; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1178; CHECK-NEXT: vsmul.vx v8, v8, a0 1179; CHECK-NEXT: ret 1180entry: 1181 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8( 1182 <vscale x 4 x i8> undef, 1183 <vscale x 4 x i8> %0, 1184 i8 %1, 1185 iXLen 0, iXLen %2) 1186 1187 ret <vscale x 4 x i8> %a 1188} 1189 1190declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8( 1191 <vscale x 4 x i8>, 1192 <vscale x 4 x i8>, 1193 i8, 1194 <vscale x 4 x i1>, 1195 iXLen, iXLen, iXLen) 1196 1197define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1198; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: 1199; CHECK: # %bb.0: # %entry 1200; CHECK-NEXT: csrwi vxrm, 0 1201; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1202; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1203; CHECK-NEXT: ret 1204entry: 1205 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8( 1206 <vscale x 4 x i8> %0, 1207 <vscale x 4 x i8> %1, 1208 i8 %2, 1209 <vscale x 4 x i1> %3, 1210 iXLen 0, iXLen %4, iXLen 1) 1211 1212 ret <vscale x 4 x i8> %a 1213} 1214 1215declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8( 1216 <vscale x 8 x i8>, 1217 <vscale x 8 x i8>, 1218 i8, 1219 iXLen, iXLen) 1220 1221define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1222; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: 1223; CHECK: # %bb.0: # %entry 1224; CHECK-NEXT: csrwi vxrm, 0 1225; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1226; CHECK-NEXT: vsmul.vx v8, v8, a0 1227; CHECK-NEXT: ret 1228entry: 1229 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8( 1230 <vscale x 8 x i8> undef, 1231 <vscale x 8 x i8> %0, 1232 i8 %1, 1233 iXLen 0, iXLen %2) 1234 1235 ret <vscale x 8 x i8> %a 1236} 1237 1238declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8( 1239 <vscale x 8 x i8>, 1240 <vscale x 8 x i8>, 1241 i8, 1242 <vscale x 8 x i1>, 1243 iXLen, iXLen, iXLen) 1244 1245define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1246; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: 1247; CHECK: # %bb.0: # %entry 1248; CHECK-NEXT: csrwi vxrm, 0 1249; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1250; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1251; CHECK-NEXT: ret 1252entry: 1253 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8( 1254 <vscale x 8 x i8> %0, 1255 <vscale x 8 x i8> %1, 1256 i8 %2, 1257 <vscale x 8 x i1> %3, 1258 iXLen 0, iXLen %4, iXLen 1) 1259 1260 ret <vscale x 8 x i8> %a 1261} 1262 1263declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8( 1264 <vscale x 16 x i8>, 1265 <vscale x 16 x i8>, 1266 i8, 1267 iXLen, iXLen) 1268 1269define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1270; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: 1271; CHECK: # %bb.0: # %entry 1272; CHECK-NEXT: csrwi vxrm, 0 1273; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1274; CHECK-NEXT: vsmul.vx v8, v8, a0 1275; CHECK-NEXT: ret 1276entry: 1277 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8( 1278 <vscale x 16 x i8> undef, 1279 <vscale x 16 x i8> %0, 1280 i8 %1, 1281 iXLen 0, iXLen %2) 1282 1283 ret <vscale x 16 x i8> %a 1284} 1285 1286declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8( 1287 <vscale x 16 x i8>, 1288 <vscale x 16 x i8>, 1289 i8, 1290 <vscale x 16 x i1>, 1291 iXLen, iXLen, iXLen) 1292 1293define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1294; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: 1295; CHECK: # %bb.0: # %entry 1296; CHECK-NEXT: csrwi vxrm, 0 1297; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1298; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t 1299; CHECK-NEXT: ret 1300entry: 1301 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8( 1302 <vscale x 16 x i8> %0, 1303 <vscale x 16 x i8> %1, 1304 i8 %2, 1305 <vscale x 16 x i1> %3, 1306 iXLen 0, iXLen %4, iXLen 1) 1307 1308 ret <vscale x 16 x i8> %a 1309} 1310 1311declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8( 1312 <vscale x 32 x i8>, 1313 <vscale x 32 x i8>, 1314 i8, 1315 iXLen, iXLen) 1316 1317define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1318; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: 1319; CHECK: # %bb.0: # %entry 1320; CHECK-NEXT: csrwi vxrm, 0 1321; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1322; CHECK-NEXT: vsmul.vx v8, v8, a0 1323; CHECK-NEXT: ret 1324entry: 1325 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8( 1326 <vscale x 32 x i8> undef, 1327 <vscale x 32 x i8> %0, 1328 i8 %1, 1329 iXLen 0, iXLen %2) 1330 1331 ret <vscale x 32 x i8> %a 1332} 1333 1334declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8( 1335 <vscale x 32 x i8>, 1336 <vscale x 32 x i8>, 1337 i8, 1338 <vscale x 32 x i1>, 1339 iXLen, iXLen, iXLen) 1340 1341define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1342; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: 1343; CHECK: # %bb.0: # %entry 1344; CHECK-NEXT: csrwi vxrm, 0 1345; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1346; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t 1347; CHECK-NEXT: ret 1348entry: 1349 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8( 1350 <vscale x 32 x i8> %0, 1351 <vscale x 32 x i8> %1, 1352 i8 %2, 1353 <vscale x 32 x i1> %3, 1354 iXLen 0, iXLen %4, iXLen 1) 1355 1356 ret <vscale x 32 x i8> %a 1357} 1358 1359declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8( 1360 <vscale x 64 x i8>, 1361 <vscale x 64 x i8>, 1362 i8, 1363 iXLen, iXLen) 1364 1365define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 1366; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: 1367; CHECK: # %bb.0: # %entry 1368; CHECK-NEXT: csrwi vxrm, 0 1369; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 1370; CHECK-NEXT: vsmul.vx v8, v8, a0 1371; CHECK-NEXT: ret 1372entry: 1373 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8( 1374 <vscale x 64 x i8> undef, 1375 <vscale x 64 x i8> %0, 1376 i8 %1, 1377 iXLen 0, iXLen %2) 1378 1379 ret <vscale x 64 x i8> %a 1380} 1381 1382declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8( 1383 <vscale x 64 x i8>, 1384 <vscale x 64 x i8>, 1385 i8, 1386 <vscale x 64 x i1>, 1387 iXLen, iXLen, iXLen) 1388 1389define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1390; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: 1391; CHECK: # %bb.0: # %entry 1392; CHECK-NEXT: csrwi vxrm, 0 1393; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1394; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t 1395; CHECK-NEXT: ret 1396entry: 1397 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8( 1398 <vscale x 64 x i8> %0, 1399 <vscale x 64 x i8> %1, 1400 i8 %2, 1401 <vscale x 64 x i1> %3, 1402 iXLen 0, iXLen %4, iXLen 1) 1403 1404 ret <vscale x 64 x i8> %a 1405} 1406 1407declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16( 1408 <vscale x 1 x i16>, 1409 <vscale x 1 x i16>, 1410 i16, 1411 iXLen, iXLen) 1412 1413define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1414; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: 1415; CHECK: # %bb.0: # %entry 1416; CHECK-NEXT: csrwi vxrm, 0 1417; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1418; CHECK-NEXT: vsmul.vx v8, v8, a0 1419; CHECK-NEXT: ret 1420entry: 1421 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16( 1422 <vscale x 1 x i16> undef, 1423 <vscale x 1 x i16> %0, 1424 i16 %1, 1425 iXLen 0, iXLen %2) 1426 1427 ret <vscale x 1 x i16> %a 1428} 1429 1430declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16( 1431 <vscale x 1 x i16>, 1432 <vscale x 1 x i16>, 1433 i16, 1434 <vscale x 1 x i1>, 1435 iXLen, iXLen, iXLen) 1436 1437define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1438; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: 1439; CHECK: # %bb.0: # %entry 1440; CHECK-NEXT: csrwi vxrm, 0 1441; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1442; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1443; CHECK-NEXT: ret 1444entry: 1445 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16( 1446 <vscale x 1 x i16> %0, 1447 <vscale x 1 x i16> %1, 1448 i16 %2, 1449 <vscale x 1 x i1> %3, 1450 iXLen 0, iXLen %4, iXLen 1) 1451 1452 ret <vscale x 1 x i16> %a 1453} 1454 1455declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16( 1456 <vscale x 2 x i16>, 1457 <vscale x 2 x i16>, 1458 i16, 1459 iXLen, iXLen) 1460 1461define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1462; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: 1463; CHECK: # %bb.0: # %entry 1464; CHECK-NEXT: csrwi vxrm, 0 1465; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1466; CHECK-NEXT: vsmul.vx v8, v8, a0 1467; CHECK-NEXT: ret 1468entry: 1469 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16( 1470 <vscale x 2 x i16> undef, 1471 <vscale x 2 x i16> %0, 1472 i16 %1, 1473 iXLen 0, iXLen %2) 1474 1475 ret <vscale x 2 x i16> %a 1476} 1477 1478declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16( 1479 <vscale x 2 x i16>, 1480 <vscale x 2 x i16>, 1481 i16, 1482 <vscale x 2 x i1>, 1483 iXLen, iXLen, iXLen) 1484 1485define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1486; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: 1487; CHECK: # %bb.0: # %entry 1488; CHECK-NEXT: csrwi vxrm, 0 1489; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1490; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1491; CHECK-NEXT: ret 1492entry: 1493 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16( 1494 <vscale x 2 x i16> %0, 1495 <vscale x 2 x i16> %1, 1496 i16 %2, 1497 <vscale x 2 x i1> %3, 1498 iXLen 0, iXLen %4, iXLen 1) 1499 1500 ret <vscale x 2 x i16> %a 1501} 1502 1503declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16( 1504 <vscale x 4 x i16>, 1505 <vscale x 4 x i16>, 1506 i16, 1507 iXLen, iXLen) 1508 1509define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1510; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: 1511; CHECK: # %bb.0: # %entry 1512; CHECK-NEXT: csrwi vxrm, 0 1513; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1514; CHECK-NEXT: vsmul.vx v8, v8, a0 1515; CHECK-NEXT: ret 1516entry: 1517 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16( 1518 <vscale x 4 x i16> undef, 1519 <vscale x 4 x i16> %0, 1520 i16 %1, 1521 iXLen 0, iXLen %2) 1522 1523 ret <vscale x 4 x i16> %a 1524} 1525 1526declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16( 1527 <vscale x 4 x i16>, 1528 <vscale x 4 x i16>, 1529 i16, 1530 <vscale x 4 x i1>, 1531 iXLen, iXLen, iXLen) 1532 1533define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1534; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: 1535; CHECK: # %bb.0: # %entry 1536; CHECK-NEXT: csrwi vxrm, 0 1537; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1538; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1539; CHECK-NEXT: ret 1540entry: 1541 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16( 1542 <vscale x 4 x i16> %0, 1543 <vscale x 4 x i16> %1, 1544 i16 %2, 1545 <vscale x 4 x i1> %3, 1546 iXLen 0, iXLen %4, iXLen 1) 1547 1548 ret <vscale x 4 x i16> %a 1549} 1550 1551declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16( 1552 <vscale x 8 x i16>, 1553 <vscale x 8 x i16>, 1554 i16, 1555 iXLen, iXLen) 1556 1557define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1558; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: 1559; CHECK: # %bb.0: # %entry 1560; CHECK-NEXT: csrwi vxrm, 0 1561; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1562; CHECK-NEXT: vsmul.vx v8, v8, a0 1563; CHECK-NEXT: ret 1564entry: 1565 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16( 1566 <vscale x 8 x i16> undef, 1567 <vscale x 8 x i16> %0, 1568 i16 %1, 1569 iXLen 0, iXLen %2) 1570 1571 ret <vscale x 8 x i16> %a 1572} 1573 1574declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16( 1575 <vscale x 8 x i16>, 1576 <vscale x 8 x i16>, 1577 i16, 1578 <vscale x 8 x i1>, 1579 iXLen, iXLen, iXLen) 1580 1581define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1582; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: 1583; CHECK: # %bb.0: # %entry 1584; CHECK-NEXT: csrwi vxrm, 0 1585; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1586; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t 1587; CHECK-NEXT: ret 1588entry: 1589 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16( 1590 <vscale x 8 x i16> %0, 1591 <vscale x 8 x i16> %1, 1592 i16 %2, 1593 <vscale x 8 x i1> %3, 1594 iXLen 0, iXLen %4, iXLen 1) 1595 1596 ret <vscale x 8 x i16> %a 1597} 1598 1599declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16( 1600 <vscale x 16 x i16>, 1601 <vscale x 16 x i16>, 1602 i16, 1603 iXLen, iXLen) 1604 1605define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1606; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: 1607; CHECK: # %bb.0: # %entry 1608; CHECK-NEXT: csrwi vxrm, 0 1609; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1610; CHECK-NEXT: vsmul.vx v8, v8, a0 1611; CHECK-NEXT: ret 1612entry: 1613 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16( 1614 <vscale x 16 x i16> undef, 1615 <vscale x 16 x i16> %0, 1616 i16 %1, 1617 iXLen 0, iXLen %2) 1618 1619 ret <vscale x 16 x i16> %a 1620} 1621 1622declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16( 1623 <vscale x 16 x i16>, 1624 <vscale x 16 x i16>, 1625 i16, 1626 <vscale x 16 x i1>, 1627 iXLen, iXLen, iXLen) 1628 1629define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1630; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: 1631; CHECK: # %bb.0: # %entry 1632; CHECK-NEXT: csrwi vxrm, 0 1633; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1634; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t 1635; CHECK-NEXT: ret 1636entry: 1637 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16( 1638 <vscale x 16 x i16> %0, 1639 <vscale x 16 x i16> %1, 1640 i16 %2, 1641 <vscale x 16 x i1> %3, 1642 iXLen 0, iXLen %4, iXLen 1) 1643 1644 ret <vscale x 16 x i16> %a 1645} 1646 1647declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16( 1648 <vscale x 32 x i16>, 1649 <vscale x 32 x i16>, 1650 i16, 1651 iXLen, iXLen) 1652 1653define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 1654; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: 1655; CHECK: # %bb.0: # %entry 1656; CHECK-NEXT: csrwi vxrm, 0 1657; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1658; CHECK-NEXT: vsmul.vx v8, v8, a0 1659; CHECK-NEXT: ret 1660entry: 1661 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16( 1662 <vscale x 32 x i16> undef, 1663 <vscale x 32 x i16> %0, 1664 i16 %1, 1665 iXLen 0, iXLen %2) 1666 1667 ret <vscale x 32 x i16> %a 1668} 1669 1670declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16( 1671 <vscale x 32 x i16>, 1672 <vscale x 32 x i16>, 1673 i16, 1674 <vscale x 32 x i1>, 1675 iXLen, iXLen, iXLen) 1676 1677define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1678; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: 1679; CHECK: # %bb.0: # %entry 1680; CHECK-NEXT: csrwi vxrm, 0 1681; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1682; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t 1683; CHECK-NEXT: ret 1684entry: 1685 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16( 1686 <vscale x 32 x i16> %0, 1687 <vscale x 32 x i16> %1, 1688 i16 %2, 1689 <vscale x 32 x i1> %3, 1690 iXLen 0, iXLen %4, iXLen 1) 1691 1692 ret <vscale x 32 x i16> %a 1693} 1694 1695declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32( 1696 <vscale x 1 x i32>, 1697 <vscale x 1 x i32>, 1698 i32, 1699 iXLen, iXLen) 1700 1701define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1702; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: 1703; CHECK: # %bb.0: # %entry 1704; CHECK-NEXT: csrwi vxrm, 0 1705; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1706; CHECK-NEXT: vsmul.vx v8, v8, a0 1707; CHECK-NEXT: ret 1708entry: 1709 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32( 1710 <vscale x 1 x i32> undef, 1711 <vscale x 1 x i32> %0, 1712 i32 %1, 1713 iXLen 0, iXLen %2) 1714 1715 ret <vscale x 1 x i32> %a 1716} 1717 1718declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32( 1719 <vscale x 1 x i32>, 1720 <vscale x 1 x i32>, 1721 i32, 1722 <vscale x 1 x i1>, 1723 iXLen, iXLen, iXLen) 1724 1725define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1726; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: 1727; CHECK: # %bb.0: # %entry 1728; CHECK-NEXT: csrwi vxrm, 0 1729; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1730; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1731; CHECK-NEXT: ret 1732entry: 1733 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32( 1734 <vscale x 1 x i32> %0, 1735 <vscale x 1 x i32> %1, 1736 i32 %2, 1737 <vscale x 1 x i1> %3, 1738 iXLen 0, iXLen %4, iXLen 1) 1739 1740 ret <vscale x 1 x i32> %a 1741} 1742 1743declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32( 1744 <vscale x 2 x i32>, 1745 <vscale x 2 x i32>, 1746 i32, 1747 iXLen, iXLen) 1748 1749define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1750; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: 1751; CHECK: # %bb.0: # %entry 1752; CHECK-NEXT: csrwi vxrm, 0 1753; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1754; CHECK-NEXT: vsmul.vx v8, v8, a0 1755; CHECK-NEXT: ret 1756entry: 1757 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32( 1758 <vscale x 2 x i32> undef, 1759 <vscale x 2 x i32> %0, 1760 i32 %1, 1761 iXLen 0, iXLen %2) 1762 1763 ret <vscale x 2 x i32> %a 1764} 1765 1766declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32( 1767 <vscale x 2 x i32>, 1768 <vscale x 2 x i32>, 1769 i32, 1770 <vscale x 2 x i1>, 1771 iXLen, iXLen, iXLen) 1772 1773define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1774; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: 1775; CHECK: # %bb.0: # %entry 1776; CHECK-NEXT: csrwi vxrm, 0 1777; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1778; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t 1779; CHECK-NEXT: ret 1780entry: 1781 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32( 1782 <vscale x 2 x i32> %0, 1783 <vscale x 2 x i32> %1, 1784 i32 %2, 1785 <vscale x 2 x i1> %3, 1786 iXLen 0, iXLen %4, iXLen 1) 1787 1788 ret <vscale x 2 x i32> %a 1789} 1790 1791declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32( 1792 <vscale x 4 x i32>, 1793 <vscale x 4 x i32>, 1794 i32, 1795 iXLen, iXLen) 1796 1797define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1798; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: 1799; CHECK: # %bb.0: # %entry 1800; CHECK-NEXT: csrwi vxrm, 0 1801; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1802; CHECK-NEXT: vsmul.vx v8, v8, a0 1803; CHECK-NEXT: ret 1804entry: 1805 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32( 1806 <vscale x 4 x i32> undef, 1807 <vscale x 4 x i32> %0, 1808 i32 %1, 1809 iXLen 0, iXLen %2) 1810 1811 ret <vscale x 4 x i32> %a 1812} 1813 1814declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32( 1815 <vscale x 4 x i32>, 1816 <vscale x 4 x i32>, 1817 i32, 1818 <vscale x 4 x i1>, 1819 iXLen, iXLen, iXLen) 1820 1821define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1822; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: 1823; CHECK: # %bb.0: # %entry 1824; CHECK-NEXT: csrwi vxrm, 0 1825; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1826; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t 1827; CHECK-NEXT: ret 1828entry: 1829 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32( 1830 <vscale x 4 x i32> %0, 1831 <vscale x 4 x i32> %1, 1832 i32 %2, 1833 <vscale x 4 x i1> %3, 1834 iXLen 0, iXLen %4, iXLen 1) 1835 1836 ret <vscale x 4 x i32> %a 1837} 1838 1839declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32( 1840 <vscale x 8 x i32>, 1841 <vscale x 8 x i32>, 1842 i32, 1843 iXLen, iXLen) 1844 1845define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1846; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: 1847; CHECK: # %bb.0: # %entry 1848; CHECK-NEXT: csrwi vxrm, 0 1849; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1850; CHECK-NEXT: vsmul.vx v8, v8, a0 1851; CHECK-NEXT: ret 1852entry: 1853 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32( 1854 <vscale x 8 x i32> undef, 1855 <vscale x 8 x i32> %0, 1856 i32 %1, 1857 iXLen 0, iXLen %2) 1858 1859 ret <vscale x 8 x i32> %a 1860} 1861 1862declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32( 1863 <vscale x 8 x i32>, 1864 <vscale x 8 x i32>, 1865 i32, 1866 <vscale x 8 x i1>, 1867 iXLen, iXLen, iXLen) 1868 1869define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1870; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: 1871; CHECK: # %bb.0: # %entry 1872; CHECK-NEXT: csrwi vxrm, 0 1873; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1874; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t 1875; CHECK-NEXT: ret 1876entry: 1877 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32( 1878 <vscale x 8 x i32> %0, 1879 <vscale x 8 x i32> %1, 1880 i32 %2, 1881 <vscale x 8 x i1> %3, 1882 iXLen 0, iXLen %4, iXLen 1) 1883 1884 ret <vscale x 8 x i32> %a 1885} 1886 1887declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32( 1888 <vscale x 16 x i32>, 1889 <vscale x 16 x i32>, 1890 i32, 1891 iXLen, iXLen) 1892 1893define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 1894; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: 1895; CHECK: # %bb.0: # %entry 1896; CHECK-NEXT: csrwi vxrm, 0 1897; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1898; CHECK-NEXT: vsmul.vx v8, v8, a0 1899; CHECK-NEXT: ret 1900entry: 1901 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32( 1902 <vscale x 16 x i32> undef, 1903 <vscale x 16 x i32> %0, 1904 i32 %1, 1905 iXLen 0, iXLen %2) 1906 1907 ret <vscale x 16 x i32> %a 1908} 1909 1910declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32( 1911 <vscale x 16 x i32>, 1912 <vscale x 16 x i32>, 1913 i32, 1914 <vscale x 16 x i1>, 1915 iXLen, iXLen, iXLen) 1916 1917define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1918; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: 1919; CHECK: # %bb.0: # %entry 1920; CHECK-NEXT: csrwi vxrm, 0 1921; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1922; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t 1923; CHECK-NEXT: ret 1924entry: 1925 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32( 1926 <vscale x 16 x i32> %0, 1927 <vscale x 16 x i32> %1, 1928 i32 %2, 1929 <vscale x 16 x i1> %3, 1930 iXLen 0, iXLen %4, iXLen 1) 1931 1932 ret <vscale x 16 x i32> %a 1933} 1934 1935declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64( 1936 <vscale x 1 x i64>, 1937 <vscale x 1 x i64>, 1938 i64, 1939 iXLen, iXLen) 1940 1941define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1942; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: 1943; RV32: # %bb.0: # %entry 1944; RV32-NEXT: addi sp, sp, -16 1945; RV32-NEXT: sw a0, 8(sp) 1946; RV32-NEXT: sw a1, 12(sp) 1947; RV32-NEXT: addi a0, sp, 8 1948; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1949; RV32-NEXT: vlse64.v v9, (a0), zero 1950; RV32-NEXT: csrwi vxrm, 0 1951; RV32-NEXT: vsmul.vv v8, v8, v9 1952; RV32-NEXT: addi sp, sp, 16 1953; RV32-NEXT: ret 1954; 1955; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: 1956; RV64: # %bb.0: # %entry 1957; RV64-NEXT: csrwi vxrm, 0 1958; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1959; RV64-NEXT: vsmul.vx v8, v8, a0 1960; RV64-NEXT: ret 1961entry: 1962 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64( 1963 <vscale x 1 x i64> undef, 1964 <vscale x 1 x i64> %0, 1965 i64 %1, 1966 iXLen 0, iXLen %2) 1967 1968 ret <vscale x 1 x i64> %a 1969} 1970 1971declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64( 1972 <vscale x 1 x i64>, 1973 <vscale x 1 x i64>, 1974 i64, 1975 <vscale x 1 x i1>, 1976 iXLen, iXLen, iXLen) 1977 1978define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1979; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: 1980; RV32: # %bb.0: # %entry 1981; RV32-NEXT: addi sp, sp, -16 1982; RV32-NEXT: sw a0, 8(sp) 1983; RV32-NEXT: sw a1, 12(sp) 1984; RV32-NEXT: addi a0, sp, 8 1985; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1986; RV32-NEXT: vlse64.v v10, (a0), zero 1987; RV32-NEXT: csrwi vxrm, 0 1988; RV32-NEXT: vsmul.vv v8, v9, v10, v0.t 1989; RV32-NEXT: addi sp, sp, 16 1990; RV32-NEXT: ret 1991; 1992; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: 1993; RV64: # %bb.0: # %entry 1994; RV64-NEXT: csrwi vxrm, 0 1995; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1996; RV64-NEXT: vsmul.vx v8, v9, a0, v0.t 1997; RV64-NEXT: ret 1998entry: 1999 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64( 2000 <vscale x 1 x i64> %0, 2001 <vscale x 1 x i64> %1, 2002 i64 %2, 2003 <vscale x 1 x i1> %3, 2004 iXLen 0, iXLen %4, iXLen 1) 2005 2006 ret <vscale x 1 x i64> %a 2007} 2008 2009declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64( 2010 <vscale x 2 x i64>, 2011 <vscale x 2 x i64>, 2012 i64, 2013 iXLen, iXLen) 2014 2015define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 2016; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: 2017; RV32: # %bb.0: # %entry 2018; RV32-NEXT: addi sp, sp, -16 2019; RV32-NEXT: sw a0, 8(sp) 2020; RV32-NEXT: sw a1, 12(sp) 2021; RV32-NEXT: addi a0, sp, 8 2022; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 2023; RV32-NEXT: vlse64.v v10, (a0), zero 2024; RV32-NEXT: csrwi vxrm, 0 2025; RV32-NEXT: vsmul.vv v8, v8, v10 2026; RV32-NEXT: addi sp, sp, 16 2027; RV32-NEXT: ret 2028; 2029; RV64-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: 2030; RV64: # %bb.0: # %entry 2031; RV64-NEXT: csrwi vxrm, 0 2032; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 2033; RV64-NEXT: vsmul.vx v8, v8, a0 2034; RV64-NEXT: ret 2035entry: 2036 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64( 2037 <vscale x 2 x i64> undef, 2038 <vscale x 2 x i64> %0, 2039 i64 %1, 2040 iXLen 0, iXLen %2) 2041 2042 ret <vscale x 2 x i64> %a 2043} 2044 2045declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64( 2046 <vscale x 2 x i64>, 2047 <vscale x 2 x i64>, 2048 i64, 2049 <vscale x 2 x i1>, 2050 iXLen, iXLen, iXLen) 2051 2052define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 2053; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: 2054; RV32: # %bb.0: # %entry 2055; RV32-NEXT: addi sp, sp, -16 2056; RV32-NEXT: sw a0, 8(sp) 2057; RV32-NEXT: sw a1, 12(sp) 2058; RV32-NEXT: addi a0, sp, 8 2059; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 2060; RV32-NEXT: vlse64.v v12, (a0), zero 2061; RV32-NEXT: csrwi vxrm, 0 2062; RV32-NEXT: vsmul.vv v8, v10, v12, v0.t 2063; RV32-NEXT: addi sp, sp, 16 2064; RV32-NEXT: ret 2065; 2066; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: 2067; RV64: # %bb.0: # %entry 2068; RV64-NEXT: csrwi vxrm, 0 2069; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 2070; RV64-NEXT: vsmul.vx v8, v10, a0, v0.t 2071; RV64-NEXT: ret 2072entry: 2073 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64( 2074 <vscale x 2 x i64> %0, 2075 <vscale x 2 x i64> %1, 2076 i64 %2, 2077 <vscale x 2 x i1> %3, 2078 iXLen 0, iXLen %4, iXLen 1) 2079 2080 ret <vscale x 2 x i64> %a 2081} 2082 2083declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64( 2084 <vscale x 4 x i64>, 2085 <vscale x 4 x i64>, 2086 i64, 2087 iXLen, iXLen) 2088 2089define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 2090; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: 2091; RV32: # %bb.0: # %entry 2092; RV32-NEXT: addi sp, sp, -16 2093; RV32-NEXT: sw a0, 8(sp) 2094; RV32-NEXT: sw a1, 12(sp) 2095; RV32-NEXT: addi a0, sp, 8 2096; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 2097; RV32-NEXT: vlse64.v v12, (a0), zero 2098; RV32-NEXT: csrwi vxrm, 0 2099; RV32-NEXT: vsmul.vv v8, v8, v12 2100; RV32-NEXT: addi sp, sp, 16 2101; RV32-NEXT: ret 2102; 2103; RV64-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: 2104; RV64: # %bb.0: # %entry 2105; RV64-NEXT: csrwi vxrm, 0 2106; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2107; RV64-NEXT: vsmul.vx v8, v8, a0 2108; RV64-NEXT: ret 2109entry: 2110 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64( 2111 <vscale x 4 x i64> undef, 2112 <vscale x 4 x i64> %0, 2113 i64 %1, 2114 iXLen 0, iXLen %2) 2115 2116 ret <vscale x 4 x i64> %a 2117} 2118 2119declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64( 2120 <vscale x 4 x i64>, 2121 <vscale x 4 x i64>, 2122 i64, 2123 <vscale x 4 x i1>, 2124 iXLen, iXLen, iXLen) 2125 2126define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2127; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: 2128; RV32: # %bb.0: # %entry 2129; RV32-NEXT: addi sp, sp, -16 2130; RV32-NEXT: sw a0, 8(sp) 2131; RV32-NEXT: sw a1, 12(sp) 2132; RV32-NEXT: addi a0, sp, 8 2133; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 2134; RV32-NEXT: vlse64.v v16, (a0), zero 2135; RV32-NEXT: csrwi vxrm, 0 2136; RV32-NEXT: vsmul.vv v8, v12, v16, v0.t 2137; RV32-NEXT: addi sp, sp, 16 2138; RV32-NEXT: ret 2139; 2140; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: 2141; RV64: # %bb.0: # %entry 2142; RV64-NEXT: csrwi vxrm, 0 2143; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 2144; RV64-NEXT: vsmul.vx v8, v12, a0, v0.t 2145; RV64-NEXT: ret 2146entry: 2147 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64( 2148 <vscale x 4 x i64> %0, 2149 <vscale x 4 x i64> %1, 2150 i64 %2, 2151 <vscale x 4 x i1> %3, 2152 iXLen 0, iXLen %4, iXLen 1) 2153 2154 ret <vscale x 4 x i64> %a 2155} 2156 2157declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64( 2158 <vscale x 8 x i64>, 2159 <vscale x 8 x i64>, 2160 i64, 2161 iXLen, iXLen) 2162 2163define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 2164; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: 2165; RV32: # %bb.0: # %entry 2166; RV32-NEXT: addi sp, sp, -16 2167; RV32-NEXT: sw a0, 8(sp) 2168; RV32-NEXT: sw a1, 12(sp) 2169; RV32-NEXT: addi a0, sp, 8 2170; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2171; RV32-NEXT: vlse64.v v16, (a0), zero 2172; RV32-NEXT: csrwi vxrm, 0 2173; RV32-NEXT: vsmul.vv v8, v8, v16 2174; RV32-NEXT: addi sp, sp, 16 2175; RV32-NEXT: ret 2176; 2177; RV64-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: 2178; RV64: # %bb.0: # %entry 2179; RV64-NEXT: csrwi vxrm, 0 2180; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2181; RV64-NEXT: vsmul.vx v8, v8, a0 2182; RV64-NEXT: ret 2183entry: 2184 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64( 2185 <vscale x 8 x i64> undef, 2186 <vscale x 8 x i64> %0, 2187 i64 %1, 2188 iXLen 0, iXLen %2) 2189 2190 ret <vscale x 8 x i64> %a 2191} 2192 2193declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64( 2194 <vscale x 8 x i64>, 2195 <vscale x 8 x i64>, 2196 i64, 2197 <vscale x 8 x i1>, 2198 iXLen, iXLen, iXLen) 2199 2200define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2201; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: 2202; RV32: # %bb.0: # %entry 2203; RV32-NEXT: addi sp, sp, -16 2204; RV32-NEXT: sw a0, 8(sp) 2205; RV32-NEXT: sw a1, 12(sp) 2206; RV32-NEXT: addi a0, sp, 8 2207; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu 2208; RV32-NEXT: vlse64.v v24, (a0), zero 2209; RV32-NEXT: csrwi vxrm, 0 2210; RV32-NEXT: vsmul.vv v8, v16, v24, v0.t 2211; RV32-NEXT: addi sp, sp, 16 2212; RV32-NEXT: ret 2213; 2214; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: 2215; RV64: # %bb.0: # %entry 2216; RV64-NEXT: csrwi vxrm, 0 2217; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 2218; RV64-NEXT: vsmul.vx v8, v16, a0, v0.t 2219; RV64-NEXT: ret 2220entry: 2221 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64( 2222 <vscale x 8 x i64> %0, 2223 <vscale x 8 x i64> %1, 2224 i64 %2, 2225 <vscale x 8 x i1> %3, 2226 iXLen 0, iXLen %4, iXLen 1) 2227 2228 ret <vscale x 8 x i64> %a 2229} 2230