10fd212c9SJim Lin; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 20fd212c9SJim Lin; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 30fd212c9SJim Lin; RUN: -verify-machineinstrs | FileCheck %s 40fd212c9SJim Lin; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 50fd212c9SJim Lin; RUN: -verify-machineinstrs | FileCheck %s 6*f2bdc29fSJim Lin 70fd212c9SJim Lindeclare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( 80fd212c9SJim Lin <vscale x 1 x i16>, 90fd212c9SJim Lin <vscale x 1 x i8>, 100fd212c9SJim Lin <vscale x 1 x i8>, 110fd212c9SJim Lin iXLen); 120fd212c9SJim Lin 130fd212c9SJim Lindefine <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 140fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: 150fd212c9SJim Lin; CHECK: # %bb.0: # %entry 160fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 170fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 180fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 190fd212c9SJim Lin; CHECK-NEXT: ret 200fd212c9SJim Linentry: 210fd212c9SJim Lin %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( 220fd212c9SJim Lin <vscale x 1 x i16> undef, 230fd212c9SJim Lin <vscale x 1 x i8> %0, 240fd212c9SJim Lin <vscale x 1 x i8> %1, 250fd212c9SJim Lin iXLen %2) 260fd212c9SJim Lin 270fd212c9SJim Lin ret <vscale x 1 x i16> %a 280fd212c9SJim Lin} 290fd212c9SJim Lin 300fd212c9SJim Lindeclare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( 310fd212c9SJim Lin <vscale x 1 x i16>, 320fd212c9SJim Lin <vscale x 1 x i8>, 330fd212c9SJim Lin <vscale x 1 x i8>, 340fd212c9SJim Lin <vscale x 1 x i1>, 350fd212c9SJim Lin iXLen, 360fd212c9SJim Lin iXLen); 370fd212c9SJim Lin 380fd212c9SJim Lindefine <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 390fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: 400fd212c9SJim Lin; CHECK: # %bb.0: # %entry 410fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 420fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 430fd212c9SJim Lin; CHECK-NEXT: ret 440fd212c9SJim Linentry: 450fd212c9SJim Lin %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( 460fd212c9SJim Lin <vscale x 1 x i16> %0, 470fd212c9SJim Lin <vscale x 1 x i8> %1, 480fd212c9SJim Lin <vscale x 1 x i8> %2, 490fd212c9SJim Lin <vscale x 1 x i1> %3, 500fd212c9SJim Lin iXLen %4, iXLen 1) 510fd212c9SJim Lin 520fd212c9SJim Lin ret <vscale x 1 x i16> %a 530fd212c9SJim Lin} 540fd212c9SJim Lin 550fd212c9SJim Lindeclare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( 560fd212c9SJim Lin <vscale x 2 x i16>, 570fd212c9SJim Lin <vscale x 2 x i8>, 580fd212c9SJim Lin <vscale x 2 x i8>, 590fd212c9SJim Lin iXLen); 600fd212c9SJim Lin 610fd212c9SJim Lindefine <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 620fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8: 630fd212c9SJim Lin; CHECK: # %bb.0: # %entry 640fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 650fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 660fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 670fd212c9SJim Lin; CHECK-NEXT: ret 680fd212c9SJim Linentry: 690fd212c9SJim Lin %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( 700fd212c9SJim Lin <vscale x 2 x i16> undef, 710fd212c9SJim Lin <vscale x 2 x i8> %0, 720fd212c9SJim Lin <vscale x 2 x i8> %1, 730fd212c9SJim Lin iXLen %2) 740fd212c9SJim Lin 750fd212c9SJim Lin ret <vscale x 2 x i16> %a 760fd212c9SJim Lin} 770fd212c9SJim Lin 780fd212c9SJim Lindeclare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( 790fd212c9SJim Lin <vscale x 2 x i16>, 800fd212c9SJim Lin <vscale x 2 x i8>, 810fd212c9SJim Lin <vscale x 2 x i8>, 820fd212c9SJim Lin <vscale x 2 x i1>, 830fd212c9SJim Lin iXLen, 840fd212c9SJim Lin iXLen); 850fd212c9SJim Lin 860fd212c9SJim Lindefine <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 870fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8: 880fd212c9SJim Lin; CHECK: # %bb.0: # %entry 890fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 900fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 910fd212c9SJim Lin; CHECK-NEXT: ret 920fd212c9SJim Linentry: 930fd212c9SJim Lin %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( 940fd212c9SJim Lin <vscale x 2 x i16> %0, 950fd212c9SJim Lin <vscale x 2 x i8> %1, 960fd212c9SJim Lin <vscale x 2 x i8> %2, 970fd212c9SJim Lin <vscale x 2 x i1> %3, 980fd212c9SJim Lin iXLen %4, iXLen 1) 990fd212c9SJim Lin 1000fd212c9SJim Lin ret <vscale x 2 x i16> %a 1010fd212c9SJim Lin} 1020fd212c9SJim Lin 1030fd212c9SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( 1040fd212c9SJim Lin <vscale x 4 x i16>, 1050fd212c9SJim Lin <vscale x 4 x i8>, 1060fd212c9SJim Lin <vscale x 4 x i8>, 1070fd212c9SJim Lin iXLen); 1080fd212c9SJim Lin 1090fd212c9SJim Lindefine <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 1100fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8: 1110fd212c9SJim Lin; CHECK: # %bb.0: # %entry 1120fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 1130fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 1140fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 1150fd212c9SJim Lin; CHECK-NEXT: ret 1160fd212c9SJim Linentry: 1170fd212c9SJim Lin %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( 1180fd212c9SJim Lin <vscale x 4 x i16> undef, 1190fd212c9SJim Lin <vscale x 4 x i8> %0, 1200fd212c9SJim Lin <vscale x 4 x i8> %1, 1210fd212c9SJim Lin iXLen %2) 1220fd212c9SJim Lin 1230fd212c9SJim Lin ret <vscale x 4 x i16> %a 1240fd212c9SJim Lin} 1250fd212c9SJim Lin 1260fd212c9SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( 1270fd212c9SJim Lin <vscale x 4 x i16>, 1280fd212c9SJim Lin <vscale x 4 x i8>, 1290fd212c9SJim Lin <vscale x 4 x i8>, 1300fd212c9SJim Lin <vscale x 4 x i1>, 1310fd212c9SJim Lin iXLen, 1320fd212c9SJim Lin iXLen); 1330fd212c9SJim Lin 1340fd212c9SJim Lindefine <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1350fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8: 1360fd212c9SJim Lin; CHECK: # %bb.0: # %entry 1370fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 1380fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 1390fd212c9SJim Lin; CHECK-NEXT: ret 1400fd212c9SJim Linentry: 1410fd212c9SJim Lin %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( 1420fd212c9SJim Lin <vscale x 4 x i16> %0, 1430fd212c9SJim Lin <vscale x 4 x i8> %1, 1440fd212c9SJim Lin <vscale x 4 x i8> %2, 1450fd212c9SJim Lin <vscale x 4 x i1> %3, 1460fd212c9SJim Lin iXLen %4, iXLen 1) 1470fd212c9SJim Lin 1480fd212c9SJim Lin ret <vscale x 4 x i16> %a 1490fd212c9SJim Lin} 1500fd212c9SJim Lin 1510fd212c9SJim Lindeclare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( 1520fd212c9SJim Lin <vscale x 8 x i16>, 1530fd212c9SJim Lin <vscale x 8 x i8>, 1540fd212c9SJim Lin <vscale x 8 x i8>, 1550fd212c9SJim Lin iXLen); 1560fd212c9SJim Lin 1570fd212c9SJim Lindefine <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 1580fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8: 1590fd212c9SJim Lin; CHECK: # %bb.0: # %entry 1600fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 1610fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 1620fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 1630fd212c9SJim Lin; CHECK-NEXT: ret 1640fd212c9SJim Linentry: 1650fd212c9SJim Lin %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( 1660fd212c9SJim Lin <vscale x 8 x i16> undef, 1670fd212c9SJim Lin <vscale x 8 x i8> %0, 1680fd212c9SJim Lin <vscale x 8 x i8> %1, 1690fd212c9SJim Lin iXLen %2) 1700fd212c9SJim Lin 1710fd212c9SJim Lin ret <vscale x 8 x i16> %a 1720fd212c9SJim Lin} 1730fd212c9SJim Lin 1740fd212c9SJim Lindeclare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( 1750fd212c9SJim Lin <vscale x 8 x i16>, 1760fd212c9SJim Lin <vscale x 8 x i8>, 1770fd212c9SJim Lin <vscale x 8 x i8>, 1780fd212c9SJim Lin <vscale x 8 x i1>, 1790fd212c9SJim Lin iXLen, 1800fd212c9SJim Lin iXLen); 1810fd212c9SJim Lin 1820fd212c9SJim Lindefine <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1830fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8: 1840fd212c9SJim Lin; CHECK: # %bb.0: # %entry 1850fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 1860fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t 1870fd212c9SJim Lin; CHECK-NEXT: ret 1880fd212c9SJim Linentry: 1890fd212c9SJim Lin %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( 1900fd212c9SJim Lin <vscale x 8 x i16> %0, 1910fd212c9SJim Lin <vscale x 8 x i8> %1, 1920fd212c9SJim Lin <vscale x 8 x i8> %2, 1930fd212c9SJim Lin <vscale x 8 x i1> %3, 1940fd212c9SJim Lin iXLen %4, iXLen 1) 1950fd212c9SJim Lin 1960fd212c9SJim Lin ret <vscale x 8 x i16> %a 1970fd212c9SJim Lin} 1980fd212c9SJim Lin 1990fd212c9SJim Lindeclare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( 2000fd212c9SJim Lin <vscale x 16 x i16>, 2010fd212c9SJim Lin <vscale x 16 x i8>, 2020fd212c9SJim Lin <vscale x 16 x i8>, 2030fd212c9SJim Lin iXLen); 2040fd212c9SJim Lin 2050fd212c9SJim Lindefine <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 2060fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8: 2070fd212c9SJim Lin; CHECK: # %bb.0: # %entry 2080fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 2090fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v12, v8, v10 2100fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 2110fd212c9SJim Lin; CHECK-NEXT: ret 2120fd212c9SJim Linentry: 2130fd212c9SJim Lin %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( 2140fd212c9SJim Lin <vscale x 16 x i16> undef, 2150fd212c9SJim Lin <vscale x 16 x i8> %0, 2160fd212c9SJim Lin <vscale x 16 x i8> %1, 2170fd212c9SJim Lin iXLen %2) 2180fd212c9SJim Lin 2190fd212c9SJim Lin ret <vscale x 16 x i16> %a 2200fd212c9SJim Lin} 2210fd212c9SJim Lin 2220fd212c9SJim Lindeclare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( 2230fd212c9SJim Lin <vscale x 16 x i16>, 2240fd212c9SJim Lin <vscale x 16 x i8>, 2250fd212c9SJim Lin <vscale x 16 x i8>, 2260fd212c9SJim Lin <vscale x 16 x i1>, 2270fd212c9SJim Lin iXLen, 2280fd212c9SJim Lin iXLen); 2290fd212c9SJim Lin 2300fd212c9SJim Lindefine <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 2310fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8: 2320fd212c9SJim Lin; CHECK: # %bb.0: # %entry 2330fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 2340fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t 2350fd212c9SJim Lin; CHECK-NEXT: ret 2360fd212c9SJim Linentry: 2370fd212c9SJim Lin %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( 2380fd212c9SJim Lin <vscale x 16 x i16> %0, 2390fd212c9SJim Lin <vscale x 16 x i8> %1, 2400fd212c9SJim Lin <vscale x 16 x i8> %2, 2410fd212c9SJim Lin <vscale x 16 x i1> %3, 2420fd212c9SJim Lin iXLen %4, iXLen 1) 2430fd212c9SJim Lin 2440fd212c9SJim Lin ret <vscale x 16 x i16> %a 2450fd212c9SJim Lin} 2460fd212c9SJim Lin 2470fd212c9SJim Lindeclare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( 2480fd212c9SJim Lin <vscale x 32 x i16>, 2490fd212c9SJim Lin <vscale x 32 x i8>, 2500fd212c9SJim Lin <vscale x 32 x i8>, 2510fd212c9SJim Lin iXLen); 2520fd212c9SJim Lin 2530fd212c9SJim Lindefine <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 2540fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8: 2550fd212c9SJim Lin; CHECK: # %bb.0: # %entry 2560fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 2570fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v16, v8, v12 2580fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 2590fd212c9SJim Lin; CHECK-NEXT: ret 2600fd212c9SJim Linentry: 2610fd212c9SJim Lin %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( 2620fd212c9SJim Lin <vscale x 32 x i16> undef, 2630fd212c9SJim Lin <vscale x 32 x i8> %0, 2640fd212c9SJim Lin <vscale x 32 x i8> %1, 2650fd212c9SJim Lin iXLen %2) 2660fd212c9SJim Lin 2670fd212c9SJim Lin ret <vscale x 32 x i16> %a 2680fd212c9SJim Lin} 2690fd212c9SJim Lin 2700fd212c9SJim Lindeclare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( 2710fd212c9SJim Lin <vscale x 32 x i16>, 2720fd212c9SJim Lin <vscale x 32 x i8>, 2730fd212c9SJim Lin <vscale x 32 x i8>, 2740fd212c9SJim Lin <vscale x 32 x i1>, 2750fd212c9SJim Lin iXLen, 2760fd212c9SJim Lin iXLen); 2770fd212c9SJim Lin 2780fd212c9SJim Lindefine <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 2790fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8: 2800fd212c9SJim Lin; CHECK: # %bb.0: # %entry 2810fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 2820fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t 2830fd212c9SJim Lin; CHECK-NEXT: ret 2840fd212c9SJim Linentry: 2850fd212c9SJim Lin %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( 2860fd212c9SJim Lin <vscale x 32 x i16> %0, 2870fd212c9SJim Lin <vscale x 32 x i8> %1, 2880fd212c9SJim Lin <vscale x 32 x i8> %2, 2890fd212c9SJim Lin <vscale x 32 x i1> %3, 2900fd212c9SJim Lin iXLen %4, iXLen 1) 2910fd212c9SJim Lin 2920fd212c9SJim Lin ret <vscale x 32 x i16> %a 2930fd212c9SJim Lin} 2940fd212c9SJim Lin 2950fd212c9SJim Lindeclare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( 2960fd212c9SJim Lin <vscale x 1 x i32>, 2970fd212c9SJim Lin <vscale x 1 x i16>, 2980fd212c9SJim Lin <vscale x 1 x i16>, 2990fd212c9SJim Lin iXLen); 3000fd212c9SJim Lin 3010fd212c9SJim Lindefine <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 3020fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16: 3030fd212c9SJim Lin; CHECK: # %bb.0: # %entry 3040fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 3050fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 3060fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 3070fd212c9SJim Lin; CHECK-NEXT: ret 3080fd212c9SJim Linentry: 3090fd212c9SJim Lin %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( 3100fd212c9SJim Lin <vscale x 1 x i32> undef, 3110fd212c9SJim Lin <vscale x 1 x i16> %0, 3120fd212c9SJim Lin <vscale x 1 x i16> %1, 3130fd212c9SJim Lin iXLen %2) 3140fd212c9SJim Lin 3150fd212c9SJim Lin ret <vscale x 1 x i32> %a 3160fd212c9SJim Lin} 3170fd212c9SJim Lin 3180fd212c9SJim Lindeclare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( 3190fd212c9SJim Lin <vscale x 1 x i32>, 3200fd212c9SJim Lin <vscale x 1 x i16>, 3210fd212c9SJim Lin <vscale x 1 x i16>, 3220fd212c9SJim Lin <vscale x 1 x i1>, 3230fd212c9SJim Lin iXLen, 3240fd212c9SJim Lin iXLen); 3250fd212c9SJim Lin 3260fd212c9SJim Lindefine <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 3270fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16: 3280fd212c9SJim Lin; CHECK: # %bb.0: # %entry 3290fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 3300fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 3310fd212c9SJim Lin; CHECK-NEXT: ret 3320fd212c9SJim Linentry: 3330fd212c9SJim Lin %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( 3340fd212c9SJim Lin <vscale x 1 x i32> %0, 3350fd212c9SJim Lin <vscale x 1 x i16> %1, 3360fd212c9SJim Lin <vscale x 1 x i16> %2, 3370fd212c9SJim Lin <vscale x 1 x i1> %3, 3380fd212c9SJim Lin iXLen %4, iXLen 1) 3390fd212c9SJim Lin 3400fd212c9SJim Lin ret <vscale x 1 x i32> %a 3410fd212c9SJim Lin} 3420fd212c9SJim Lin 3430fd212c9SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( 3440fd212c9SJim Lin <vscale x 2 x i32>, 3450fd212c9SJim Lin <vscale x 2 x i16>, 3460fd212c9SJim Lin <vscale x 2 x i16>, 3470fd212c9SJim Lin iXLen); 3480fd212c9SJim Lin 3490fd212c9SJim Lindefine <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 3500fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16: 3510fd212c9SJim Lin; CHECK: # %bb.0: # %entry 3520fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 3530fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 3540fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 3550fd212c9SJim Lin; CHECK-NEXT: ret 3560fd212c9SJim Linentry: 3570fd212c9SJim Lin %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( 3580fd212c9SJim Lin <vscale x 2 x i32> undef, 3590fd212c9SJim Lin <vscale x 2 x i16> %0, 3600fd212c9SJim Lin <vscale x 2 x i16> %1, 3610fd212c9SJim Lin iXLen %2) 3620fd212c9SJim Lin 3630fd212c9SJim Lin ret <vscale x 2 x i32> %a 3640fd212c9SJim Lin} 3650fd212c9SJim Lin 3660fd212c9SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( 3670fd212c9SJim Lin <vscale x 2 x i32>, 3680fd212c9SJim Lin <vscale x 2 x i16>, 3690fd212c9SJim Lin <vscale x 2 x i16>, 3700fd212c9SJim Lin <vscale x 2 x i1>, 3710fd212c9SJim Lin iXLen, 3720fd212c9SJim Lin iXLen); 3730fd212c9SJim Lin 3740fd212c9SJim Lindefine <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 3750fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16: 3760fd212c9SJim Lin; CHECK: # %bb.0: # %entry 3770fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 3780fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 3790fd212c9SJim Lin; CHECK-NEXT: ret 3800fd212c9SJim Linentry: 3810fd212c9SJim Lin %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( 3820fd212c9SJim Lin <vscale x 2 x i32> %0, 3830fd212c9SJim Lin <vscale x 2 x i16> %1, 3840fd212c9SJim Lin <vscale x 2 x i16> %2, 3850fd212c9SJim Lin <vscale x 2 x i1> %3, 3860fd212c9SJim Lin iXLen %4, iXLen 1) 3870fd212c9SJim Lin 3880fd212c9SJim Lin ret <vscale x 2 x i32> %a 3890fd212c9SJim Lin} 3900fd212c9SJim Lin 3910fd212c9SJim Lindeclare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( 3920fd212c9SJim Lin <vscale x 4 x i32>, 3930fd212c9SJim Lin <vscale x 4 x i16>, 3940fd212c9SJim Lin <vscale x 4 x i16>, 3950fd212c9SJim Lin iXLen); 3960fd212c9SJim Lin 3970fd212c9SJim Lindefine <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 3980fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16: 3990fd212c9SJim Lin; CHECK: # %bb.0: # %entry 4000fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 4010fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 4020fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 4030fd212c9SJim Lin; CHECK-NEXT: ret 4040fd212c9SJim Linentry: 4050fd212c9SJim Lin %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( 4060fd212c9SJim Lin <vscale x 4 x i32> undef, 4070fd212c9SJim Lin <vscale x 4 x i16> %0, 4080fd212c9SJim Lin <vscale x 4 x i16> %1, 4090fd212c9SJim Lin iXLen %2) 4100fd212c9SJim Lin 4110fd212c9SJim Lin ret <vscale x 4 x i32> %a 4120fd212c9SJim Lin} 4130fd212c9SJim Lin 4140fd212c9SJim Lindeclare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( 4150fd212c9SJim Lin <vscale x 4 x i32>, 4160fd212c9SJim Lin <vscale x 4 x i16>, 4170fd212c9SJim Lin <vscale x 4 x i16>, 4180fd212c9SJim Lin <vscale x 4 x i1>, 4190fd212c9SJim Lin iXLen, 4200fd212c9SJim Lin iXLen); 4210fd212c9SJim Lin 4220fd212c9SJim Lindefine <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 4230fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16: 4240fd212c9SJim Lin; CHECK: # %bb.0: # %entry 4250fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 4260fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t 4270fd212c9SJim Lin; CHECK-NEXT: ret 4280fd212c9SJim Linentry: 4290fd212c9SJim Lin %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( 4300fd212c9SJim Lin <vscale x 4 x i32> %0, 4310fd212c9SJim Lin <vscale x 4 x i16> %1, 4320fd212c9SJim Lin <vscale x 4 x i16> %2, 4330fd212c9SJim Lin <vscale x 4 x i1> %3, 4340fd212c9SJim Lin iXLen %4, iXLen 1) 4350fd212c9SJim Lin 4360fd212c9SJim Lin ret <vscale x 4 x i32> %a 4370fd212c9SJim Lin} 4380fd212c9SJim Lin 4390fd212c9SJim Lindeclare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( 4400fd212c9SJim Lin <vscale x 8 x i32>, 4410fd212c9SJim Lin <vscale x 8 x i16>, 4420fd212c9SJim Lin <vscale x 8 x i16>, 4430fd212c9SJim Lin iXLen); 4440fd212c9SJim Lin 4450fd212c9SJim Lindefine <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 4460fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16: 4470fd212c9SJim Lin; CHECK: # %bb.0: # %entry 4480fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 4490fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v12, v8, v10 4500fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 4510fd212c9SJim Lin; CHECK-NEXT: ret 4520fd212c9SJim Linentry: 4530fd212c9SJim Lin %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( 4540fd212c9SJim Lin <vscale x 8 x i32> undef, 4550fd212c9SJim Lin <vscale x 8 x i16> %0, 4560fd212c9SJim Lin <vscale x 8 x i16> %1, 4570fd212c9SJim Lin iXLen %2) 4580fd212c9SJim Lin 4590fd212c9SJim Lin ret <vscale x 8 x i32> %a 4600fd212c9SJim Lin} 4610fd212c9SJim Lin 4620fd212c9SJim Lindeclare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( 4630fd212c9SJim Lin <vscale x 8 x i32>, 4640fd212c9SJim Lin <vscale x 8 x i16>, 4650fd212c9SJim Lin <vscale x 8 x i16>, 4660fd212c9SJim Lin <vscale x 8 x i1>, 4670fd212c9SJim Lin iXLen, 4680fd212c9SJim Lin iXLen); 4690fd212c9SJim Lin 4700fd212c9SJim Lindefine <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 4710fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16: 4720fd212c9SJim Lin; CHECK: # %bb.0: # %entry 4730fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 4740fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t 4750fd212c9SJim Lin; CHECK-NEXT: ret 4760fd212c9SJim Linentry: 4770fd212c9SJim Lin %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( 4780fd212c9SJim Lin <vscale x 8 x i32> %0, 4790fd212c9SJim Lin <vscale x 8 x i16> %1, 4800fd212c9SJim Lin <vscale x 8 x i16> %2, 4810fd212c9SJim Lin <vscale x 8 x i1> %3, 4820fd212c9SJim Lin iXLen %4, iXLen 1) 4830fd212c9SJim Lin 4840fd212c9SJim Lin ret <vscale x 8 x i32> %a 4850fd212c9SJim Lin} 4860fd212c9SJim Lin 4870fd212c9SJim Lindeclare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( 4880fd212c9SJim Lin <vscale x 16 x i32>, 4890fd212c9SJim Lin <vscale x 16 x i16>, 4900fd212c9SJim Lin <vscale x 16 x i16>, 4910fd212c9SJim Lin iXLen); 4920fd212c9SJim Lin 4930fd212c9SJim Lindefine <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 4940fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16: 4950fd212c9SJim Lin; CHECK: # %bb.0: # %entry 4960fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 4970fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v16, v8, v12 4980fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 4990fd212c9SJim Lin; CHECK-NEXT: ret 5000fd212c9SJim Linentry: 5010fd212c9SJim Lin %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( 5020fd212c9SJim Lin <vscale x 16 x i32> undef, 5030fd212c9SJim Lin <vscale x 16 x i16> %0, 5040fd212c9SJim Lin <vscale x 16 x i16> %1, 5050fd212c9SJim Lin iXLen %2) 5060fd212c9SJim Lin 5070fd212c9SJim Lin ret <vscale x 16 x i32> %a 5080fd212c9SJim Lin} 5090fd212c9SJim Lin 5100fd212c9SJim Lindeclare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( 5110fd212c9SJim Lin <vscale x 16 x i32>, 5120fd212c9SJim Lin <vscale x 16 x i16>, 5130fd212c9SJim Lin <vscale x 16 x i16>, 5140fd212c9SJim Lin <vscale x 16 x i1>, 5150fd212c9SJim Lin iXLen, 5160fd212c9SJim Lin iXLen); 5170fd212c9SJim Lin 5180fd212c9SJim Lindefine <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 5190fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16: 5200fd212c9SJim Lin; CHECK: # %bb.0: # %entry 5210fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 5220fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t 5230fd212c9SJim Lin; CHECK-NEXT: ret 5240fd212c9SJim Linentry: 5250fd212c9SJim Lin %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( 5260fd212c9SJim Lin <vscale x 16 x i32> %0, 5270fd212c9SJim Lin <vscale x 16 x i16> %1, 5280fd212c9SJim Lin <vscale x 16 x i16> %2, 5290fd212c9SJim Lin <vscale x 16 x i1> %3, 5300fd212c9SJim Lin iXLen %4, iXLen 1) 5310fd212c9SJim Lin 5320fd212c9SJim Lin ret <vscale x 16 x i32> %a 5330fd212c9SJim Lin} 5340fd212c9SJim Lin 5350fd212c9SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( 5360fd212c9SJim Lin <vscale x 1 x i64>, 5370fd212c9SJim Lin <vscale x 1 x i32>, 5380fd212c9SJim Lin <vscale x 1 x i32>, 5390fd212c9SJim Lin iXLen); 5400fd212c9SJim Lin 5410fd212c9SJim Lindefine <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 5420fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32: 5430fd212c9SJim Lin; CHECK: # %bb.0: # %entry 5440fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 5450fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 5460fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v10 5470fd212c9SJim Lin; CHECK-NEXT: ret 5480fd212c9SJim Linentry: 5490fd212c9SJim Lin %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( 5500fd212c9SJim Lin <vscale x 1 x i64> undef, 5510fd212c9SJim Lin <vscale x 1 x i32> %0, 5520fd212c9SJim Lin <vscale x 1 x i32> %1, 5530fd212c9SJim Lin iXLen %2) 5540fd212c9SJim Lin 5550fd212c9SJim Lin ret <vscale x 1 x i64> %a 5560fd212c9SJim Lin} 5570fd212c9SJim Lin 5580fd212c9SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( 5590fd212c9SJim Lin <vscale x 1 x i64>, 5600fd212c9SJim Lin <vscale x 1 x i32>, 5610fd212c9SJim Lin <vscale x 1 x i32>, 5620fd212c9SJim Lin <vscale x 1 x i1>, 5630fd212c9SJim Lin iXLen, 5640fd212c9SJim Lin iXLen); 5650fd212c9SJim Lin 5660fd212c9SJim Lindefine <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 5670fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32: 5680fd212c9SJim Lin; CHECK: # %bb.0: # %entry 5690fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 5700fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t 5710fd212c9SJim Lin; CHECK-NEXT: ret 5720fd212c9SJim Linentry: 5730fd212c9SJim Lin %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( 5740fd212c9SJim Lin <vscale x 1 x i64> %0, 5750fd212c9SJim Lin <vscale x 1 x i32> %1, 5760fd212c9SJim Lin <vscale x 1 x i32> %2, 5770fd212c9SJim Lin <vscale x 1 x i1> %3, 5780fd212c9SJim Lin iXLen %4, iXLen 1) 5790fd212c9SJim Lin 5800fd212c9SJim Lin ret <vscale x 1 x i64> %a 5810fd212c9SJim Lin} 5820fd212c9SJim Lin 5830fd212c9SJim Lindeclare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( 5840fd212c9SJim Lin <vscale x 2 x i64>, 5850fd212c9SJim Lin <vscale x 2 x i32>, 5860fd212c9SJim Lin <vscale x 2 x i32>, 5870fd212c9SJim Lin iXLen); 5880fd212c9SJim Lin 5890fd212c9SJim Lindefine <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 5900fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32: 5910fd212c9SJim Lin; CHECK: # %bb.0: # %entry 5920fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 5930fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v10, v8, v9 5940fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 5950fd212c9SJim Lin; CHECK-NEXT: ret 5960fd212c9SJim Linentry: 5970fd212c9SJim Lin %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( 5980fd212c9SJim Lin <vscale x 2 x i64> undef, 5990fd212c9SJim Lin <vscale x 2 x i32> %0, 6000fd212c9SJim Lin <vscale x 2 x i32> %1, 6010fd212c9SJim Lin iXLen %2) 6020fd212c9SJim Lin 6030fd212c9SJim Lin ret <vscale x 2 x i64> %a 6040fd212c9SJim Lin} 6050fd212c9SJim Lin 6060fd212c9SJim Lindeclare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( 6070fd212c9SJim Lin <vscale x 2 x i64>, 6080fd212c9SJim Lin <vscale x 2 x i32>, 6090fd212c9SJim Lin <vscale x 2 x i32>, 6100fd212c9SJim Lin <vscale x 2 x i1>, 6110fd212c9SJim Lin iXLen, 6120fd212c9SJim Lin iXLen); 6130fd212c9SJim Lin 6140fd212c9SJim Lindefine <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 6150fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32: 6160fd212c9SJim Lin; CHECK: # %bb.0: # %entry 6170fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 6180fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t 6190fd212c9SJim Lin; CHECK-NEXT: ret 6200fd212c9SJim Linentry: 6210fd212c9SJim Lin %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( 6220fd212c9SJim Lin <vscale x 2 x i64> %0, 6230fd212c9SJim Lin <vscale x 2 x i32> %1, 6240fd212c9SJim Lin <vscale x 2 x i32> %2, 6250fd212c9SJim Lin <vscale x 2 x i1> %3, 6260fd212c9SJim Lin iXLen %4, iXLen 1) 6270fd212c9SJim Lin 6280fd212c9SJim Lin ret <vscale x 2 x i64> %a 6290fd212c9SJim Lin} 6300fd212c9SJim Lin 6310fd212c9SJim Lindeclare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( 6320fd212c9SJim Lin <vscale x 4 x i64>, 6330fd212c9SJim Lin <vscale x 4 x i32>, 6340fd212c9SJim Lin <vscale x 4 x i32>, 6350fd212c9SJim Lin iXLen); 6360fd212c9SJim Lin 6370fd212c9SJim Lindefine <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 6380fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32: 6390fd212c9SJim Lin; CHECK: # %bb.0: # %entry 6400fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 6410fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v12, v8, v10 6420fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 6430fd212c9SJim Lin; CHECK-NEXT: ret 6440fd212c9SJim Linentry: 6450fd212c9SJim Lin %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( 6460fd212c9SJim Lin <vscale x 4 x i64> undef, 6470fd212c9SJim Lin <vscale x 4 x i32> %0, 6480fd212c9SJim Lin <vscale x 4 x i32> %1, 6490fd212c9SJim Lin iXLen %2) 6500fd212c9SJim Lin 6510fd212c9SJim Lin ret <vscale x 4 x i64> %a 6520fd212c9SJim Lin} 6530fd212c9SJim Lin 6540fd212c9SJim Lindeclare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( 6550fd212c9SJim Lin <vscale x 4 x i64>, 6560fd212c9SJim Lin <vscale x 4 x i32>, 6570fd212c9SJim Lin <vscale x 4 x i32>, 6580fd212c9SJim Lin <vscale x 4 x i1>, 6590fd212c9SJim Lin iXLen, 6600fd212c9SJim Lin iXLen); 6610fd212c9SJim Lin 6620fd212c9SJim Lindefine <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 6630fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32: 6640fd212c9SJim Lin; CHECK: # %bb.0: # %entry 6650fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 6660fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t 6670fd212c9SJim Lin; CHECK-NEXT: ret 6680fd212c9SJim Linentry: 6690fd212c9SJim Lin %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( 6700fd212c9SJim Lin <vscale x 4 x i64> %0, 6710fd212c9SJim Lin <vscale x 4 x i32> %1, 6720fd212c9SJim Lin <vscale x 4 x i32> %2, 6730fd212c9SJim Lin <vscale x 4 x i1> %3, 6740fd212c9SJim Lin iXLen %4, iXLen 1) 6750fd212c9SJim Lin 6760fd212c9SJim Lin ret <vscale x 4 x i64> %a 6770fd212c9SJim Lin} 6780fd212c9SJim Lin 6790fd212c9SJim Lindeclare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( 6800fd212c9SJim Lin <vscale x 8 x i64>, 6810fd212c9SJim Lin <vscale x 8 x i32>, 6820fd212c9SJim Lin <vscale x 8 x i32>, 6830fd212c9SJim Lin iXLen); 6840fd212c9SJim Lin 6850fd212c9SJim Lindefine <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 6860fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32: 6870fd212c9SJim Lin; CHECK: # %bb.0: # %entry 6880fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 6890fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v16, v8, v12 6900fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 6910fd212c9SJim Lin; CHECK-NEXT: ret 6920fd212c9SJim Linentry: 6930fd212c9SJim Lin %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( 6940fd212c9SJim Lin <vscale x 8 x i64> undef, 6950fd212c9SJim Lin <vscale x 8 x i32> %0, 6960fd212c9SJim Lin <vscale x 8 x i32> %1, 6970fd212c9SJim Lin iXLen %2) 6980fd212c9SJim Lin 6990fd212c9SJim Lin ret <vscale x 8 x i64> %a 7000fd212c9SJim Lin} 7010fd212c9SJim Lin 7020fd212c9SJim Lindeclare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( 7030fd212c9SJim Lin <vscale x 8 x i64>, 7040fd212c9SJim Lin <vscale x 8 x i32>, 7050fd212c9SJim Lin <vscale x 8 x i32>, 7060fd212c9SJim Lin <vscale x 8 x i1>, 7070fd212c9SJim Lin iXLen, 7080fd212c9SJim Lin iXLen); 7090fd212c9SJim Lin 7100fd212c9SJim Lindefine <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 7110fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32: 7120fd212c9SJim Lin; CHECK: # %bb.0: # %entry 7130fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 7140fd212c9SJim Lin; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t 7150fd212c9SJim Lin; CHECK-NEXT: ret 7160fd212c9SJim Linentry: 7170fd212c9SJim Lin %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( 7180fd212c9SJim Lin <vscale x 8 x i64> %0, 7190fd212c9SJim Lin <vscale x 8 x i32> %1, 7200fd212c9SJim Lin <vscale x 8 x i32> %2, 7210fd212c9SJim Lin <vscale x 8 x i1> %3, 7220fd212c9SJim Lin iXLen %4, iXLen 1) 7230fd212c9SJim Lin 7240fd212c9SJim Lin ret <vscale x 8 x i64> %a 7250fd212c9SJim Lin} 7260fd212c9SJim Lin 7270fd212c9SJim Lindeclare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( 7280fd212c9SJim Lin <vscale x 1 x i16>, 7290fd212c9SJim Lin <vscale x 1 x i8>, 7300fd212c9SJim Lin i8, 7310fd212c9SJim Lin iXLen); 7320fd212c9SJim Lin 7330fd212c9SJim Lindefine <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 7340fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8: 7350fd212c9SJim Lin; CHECK: # %bb.0: # %entry 7360fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 7370fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 7380fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 7390fd212c9SJim Lin; CHECK-NEXT: ret 7400fd212c9SJim Linentry: 7410fd212c9SJim Lin %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( 7420fd212c9SJim Lin <vscale x 1 x i16> undef, 7430fd212c9SJim Lin <vscale x 1 x i8> %0, 7440fd212c9SJim Lin i8 %1, 7450fd212c9SJim Lin iXLen %2) 7460fd212c9SJim Lin 7470fd212c9SJim Lin ret <vscale x 1 x i16> %a 7480fd212c9SJim Lin} 7490fd212c9SJim Lin 7500fd212c9SJim Lindeclare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( 7510fd212c9SJim Lin <vscale x 1 x i16>, 7520fd212c9SJim Lin <vscale x 1 x i8>, 7530fd212c9SJim Lin i8, 7540fd212c9SJim Lin <vscale x 1 x i1>, 7550fd212c9SJim Lin iXLen, 7560fd212c9SJim Lin iXLen); 7570fd212c9SJim Lin 7580fd212c9SJim Lindefine <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 7590fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8: 7600fd212c9SJim Lin; CHECK: # %bb.0: # %entry 7610fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 7620fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 7630fd212c9SJim Lin; CHECK-NEXT: ret 7640fd212c9SJim Linentry: 7650fd212c9SJim Lin %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( 7660fd212c9SJim Lin <vscale x 1 x i16> %0, 7670fd212c9SJim Lin <vscale x 1 x i8> %1, 7680fd212c9SJim Lin i8 %2, 7690fd212c9SJim Lin <vscale x 1 x i1> %3, 7700fd212c9SJim Lin iXLen %4, iXLen 1) 7710fd212c9SJim Lin 7720fd212c9SJim Lin ret <vscale x 1 x i16> %a 7730fd212c9SJim Lin} 7740fd212c9SJim Lin 7750fd212c9SJim Lindeclare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( 7760fd212c9SJim Lin <vscale x 2 x i16>, 7770fd212c9SJim Lin <vscale x 2 x i8>, 7780fd212c9SJim Lin i8, 7790fd212c9SJim Lin iXLen); 7800fd212c9SJim Lin 7810fd212c9SJim Lindefine <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 7820fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8: 7830fd212c9SJim Lin; CHECK: # %bb.0: # %entry 7840fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 7850fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 7860fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 7870fd212c9SJim Lin; CHECK-NEXT: ret 7880fd212c9SJim Linentry: 7890fd212c9SJim Lin %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( 7900fd212c9SJim Lin <vscale x 2 x i16> undef, 7910fd212c9SJim Lin <vscale x 2 x i8> %0, 7920fd212c9SJim Lin i8 %1, 7930fd212c9SJim Lin iXLen %2) 7940fd212c9SJim Lin 7950fd212c9SJim Lin ret <vscale x 2 x i16> %a 7960fd212c9SJim Lin} 7970fd212c9SJim Lin 7980fd212c9SJim Lindeclare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( 7990fd212c9SJim Lin <vscale x 2 x i16>, 8000fd212c9SJim Lin <vscale x 2 x i8>, 8010fd212c9SJim Lin i8, 8020fd212c9SJim Lin <vscale x 2 x i1>, 8030fd212c9SJim Lin iXLen, 8040fd212c9SJim Lin iXLen); 8050fd212c9SJim Lin 8060fd212c9SJim Lindefine <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 8070fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8: 8080fd212c9SJim Lin; CHECK: # %bb.0: # %entry 8090fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 8100fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 8110fd212c9SJim Lin; CHECK-NEXT: ret 8120fd212c9SJim Linentry: 8130fd212c9SJim Lin %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( 8140fd212c9SJim Lin <vscale x 2 x i16> %0, 8150fd212c9SJim Lin <vscale x 2 x i8> %1, 8160fd212c9SJim Lin i8 %2, 8170fd212c9SJim Lin <vscale x 2 x i1> %3, 8180fd212c9SJim Lin iXLen %4, iXLen 1) 8190fd212c9SJim Lin 8200fd212c9SJim Lin ret <vscale x 2 x i16> %a 8210fd212c9SJim Lin} 8220fd212c9SJim Lin 8230fd212c9SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( 8240fd212c9SJim Lin <vscale x 4 x i16>, 8250fd212c9SJim Lin <vscale x 4 x i8>, 8260fd212c9SJim Lin i8, 8270fd212c9SJim Lin iXLen); 8280fd212c9SJim Lin 8290fd212c9SJim Lindefine <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 8300fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8: 8310fd212c9SJim Lin; CHECK: # %bb.0: # %entry 8320fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 8330fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 8340fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 8350fd212c9SJim Lin; CHECK-NEXT: ret 8360fd212c9SJim Linentry: 8370fd212c9SJim Lin %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( 8380fd212c9SJim Lin <vscale x 4 x i16> undef, 8390fd212c9SJim Lin <vscale x 4 x i8> %0, 8400fd212c9SJim Lin i8 %1, 8410fd212c9SJim Lin iXLen %2) 8420fd212c9SJim Lin 8430fd212c9SJim Lin ret <vscale x 4 x i16> %a 8440fd212c9SJim Lin} 8450fd212c9SJim Lin 8460fd212c9SJim Lindeclare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( 8470fd212c9SJim Lin <vscale x 4 x i16>, 8480fd212c9SJim Lin <vscale x 4 x i8>, 8490fd212c9SJim Lin i8, 8500fd212c9SJim Lin <vscale x 4 x i1>, 8510fd212c9SJim Lin iXLen, 8520fd212c9SJim Lin iXLen); 8530fd212c9SJim Lin 8540fd212c9SJim Lindefine <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 8550fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8: 8560fd212c9SJim Lin; CHECK: # %bb.0: # %entry 8570fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 8580fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 8590fd212c9SJim Lin; CHECK-NEXT: ret 8600fd212c9SJim Linentry: 8610fd212c9SJim Lin %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( 8620fd212c9SJim Lin <vscale x 4 x i16> %0, 8630fd212c9SJim Lin <vscale x 4 x i8> %1, 8640fd212c9SJim Lin i8 %2, 8650fd212c9SJim Lin <vscale x 4 x i1> %3, 8660fd212c9SJim Lin iXLen %4, iXLen 1) 8670fd212c9SJim Lin 8680fd212c9SJim Lin ret <vscale x 4 x i16> %a 8690fd212c9SJim Lin} 8700fd212c9SJim Lin 8710fd212c9SJim Lindeclare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( 8720fd212c9SJim Lin <vscale x 8 x i16>, 8730fd212c9SJim Lin <vscale x 8 x i8>, 8740fd212c9SJim Lin i8, 8750fd212c9SJim Lin iXLen); 8760fd212c9SJim Lin 8770fd212c9SJim Lindefine <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 8780fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8: 8790fd212c9SJim Lin; CHECK: # %bb.0: # %entry 8800fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 8810fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v10, v8, a0 8820fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 8830fd212c9SJim Lin; CHECK-NEXT: ret 8840fd212c9SJim Linentry: 8850fd212c9SJim Lin %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( 8860fd212c9SJim Lin <vscale x 8 x i16> undef, 8870fd212c9SJim Lin <vscale x 8 x i8> %0, 8880fd212c9SJim Lin i8 %1, 8890fd212c9SJim Lin iXLen %2) 8900fd212c9SJim Lin 8910fd212c9SJim Lin ret <vscale x 8 x i16> %a 8920fd212c9SJim Lin} 8930fd212c9SJim Lin 8940fd212c9SJim Lindeclare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( 8950fd212c9SJim Lin <vscale x 8 x i16>, 8960fd212c9SJim Lin <vscale x 8 x i8>, 8970fd212c9SJim Lin i8, 8980fd212c9SJim Lin <vscale x 8 x i1>, 8990fd212c9SJim Lin iXLen, 9000fd212c9SJim Lin iXLen); 9010fd212c9SJim Lin 9020fd212c9SJim Lindefine <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 9030fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8: 9040fd212c9SJim Lin; CHECK: # %bb.0: # %entry 9050fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 9060fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t 9070fd212c9SJim Lin; CHECK-NEXT: ret 9080fd212c9SJim Linentry: 9090fd212c9SJim Lin %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( 9100fd212c9SJim Lin <vscale x 8 x i16> %0, 9110fd212c9SJim Lin <vscale x 8 x i8> %1, 9120fd212c9SJim Lin i8 %2, 9130fd212c9SJim Lin <vscale x 8 x i1> %3, 9140fd212c9SJim Lin iXLen %4, iXLen 1) 9150fd212c9SJim Lin 9160fd212c9SJim Lin ret <vscale x 8 x i16> %a 9170fd212c9SJim Lin} 9180fd212c9SJim Lin 9190fd212c9SJim Lindeclare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( 9200fd212c9SJim Lin <vscale x 16 x i16>, 9210fd212c9SJim Lin <vscale x 16 x i8>, 9220fd212c9SJim Lin i8, 9230fd212c9SJim Lin iXLen); 9240fd212c9SJim Lin 9250fd212c9SJim Lindefine <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 9260fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8: 9270fd212c9SJim Lin; CHECK: # %bb.0: # %entry 9280fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 9290fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v12, v8, a0 9300fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 9310fd212c9SJim Lin; CHECK-NEXT: ret 9320fd212c9SJim Linentry: 9330fd212c9SJim Lin %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( 9340fd212c9SJim Lin <vscale x 16 x i16> undef, 9350fd212c9SJim Lin <vscale x 16 x i8> %0, 9360fd212c9SJim Lin i8 %1, 9370fd212c9SJim Lin iXLen %2) 9380fd212c9SJim Lin 9390fd212c9SJim Lin ret <vscale x 16 x i16> %a 9400fd212c9SJim Lin} 9410fd212c9SJim Lin 9420fd212c9SJim Lindeclare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( 9430fd212c9SJim Lin <vscale x 16 x i16>, 9440fd212c9SJim Lin <vscale x 16 x i8>, 9450fd212c9SJim Lin i8, 9460fd212c9SJim Lin <vscale x 16 x i1>, 9470fd212c9SJim Lin iXLen, 9480fd212c9SJim Lin iXLen); 9490fd212c9SJim Lin 9500fd212c9SJim Lindefine <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 9510fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8: 9520fd212c9SJim Lin; CHECK: # %bb.0: # %entry 9530fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 9540fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t 9550fd212c9SJim Lin; CHECK-NEXT: ret 9560fd212c9SJim Linentry: 9570fd212c9SJim Lin %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( 9580fd212c9SJim Lin <vscale x 16 x i16> %0, 9590fd212c9SJim Lin <vscale x 16 x i8> %1, 9600fd212c9SJim Lin i8 %2, 9610fd212c9SJim Lin <vscale x 16 x i1> %3, 9620fd212c9SJim Lin iXLen %4, iXLen 1) 9630fd212c9SJim Lin 9640fd212c9SJim Lin ret <vscale x 16 x i16> %a 9650fd212c9SJim Lin} 9660fd212c9SJim Lin 9670fd212c9SJim Lindeclare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( 9680fd212c9SJim Lin <vscale x 32 x i16>, 9690fd212c9SJim Lin <vscale x 32 x i8>, 9700fd212c9SJim Lin i8, 9710fd212c9SJim Lin iXLen); 9720fd212c9SJim Lin 9730fd212c9SJim Lindefine <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 9740fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8: 9750fd212c9SJim Lin; CHECK: # %bb.0: # %entry 9760fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 9770fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v16, v8, a0 9780fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 9790fd212c9SJim Lin; CHECK-NEXT: ret 9800fd212c9SJim Linentry: 9810fd212c9SJim Lin %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( 9820fd212c9SJim Lin <vscale x 32 x i16> undef, 9830fd212c9SJim Lin <vscale x 32 x i8> %0, 9840fd212c9SJim Lin i8 %1, 9850fd212c9SJim Lin iXLen %2) 9860fd212c9SJim Lin 9870fd212c9SJim Lin ret <vscale x 32 x i16> %a 9880fd212c9SJim Lin} 9890fd212c9SJim Lin 9900fd212c9SJim Lindeclare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( 9910fd212c9SJim Lin <vscale x 32 x i16>, 9920fd212c9SJim Lin <vscale x 32 x i8>, 9930fd212c9SJim Lin i8, 9940fd212c9SJim Lin <vscale x 32 x i1>, 9950fd212c9SJim Lin iXLen, 9960fd212c9SJim Lin iXLen); 9970fd212c9SJim Lin 9980fd212c9SJim Lindefine <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 9990fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8: 10000fd212c9SJim Lin; CHECK: # %bb.0: # %entry 10010fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 10020fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t 10030fd212c9SJim Lin; CHECK-NEXT: ret 10040fd212c9SJim Linentry: 10050fd212c9SJim Lin %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( 10060fd212c9SJim Lin <vscale x 32 x i16> %0, 10070fd212c9SJim Lin <vscale x 32 x i8> %1, 10080fd212c9SJim Lin i8 %2, 10090fd212c9SJim Lin <vscale x 32 x i1> %3, 10100fd212c9SJim Lin iXLen %4, iXLen 1) 10110fd212c9SJim Lin 10120fd212c9SJim Lin ret <vscale x 32 x i16> %a 10130fd212c9SJim Lin} 10140fd212c9SJim Lin 10150fd212c9SJim Lindeclare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( 10160fd212c9SJim Lin <vscale x 1 x i32>, 10170fd212c9SJim Lin <vscale x 1 x i16>, 10180fd212c9SJim Lin i16, 10190fd212c9SJim Lin iXLen); 10200fd212c9SJim Lin 10210fd212c9SJim Lindefine <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 10220fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16: 10230fd212c9SJim Lin; CHECK: # %bb.0: # %entry 10240fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10250fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 10260fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 10270fd212c9SJim Lin; CHECK-NEXT: ret 10280fd212c9SJim Linentry: 10290fd212c9SJim Lin %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( 10300fd212c9SJim Lin <vscale x 1 x i32> undef, 10310fd212c9SJim Lin <vscale x 1 x i16> %0, 10320fd212c9SJim Lin i16 %1, 10330fd212c9SJim Lin iXLen %2) 10340fd212c9SJim Lin 10350fd212c9SJim Lin ret <vscale x 1 x i32> %a 10360fd212c9SJim Lin} 10370fd212c9SJim Lin 10380fd212c9SJim Lindeclare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( 10390fd212c9SJim Lin <vscale x 1 x i32>, 10400fd212c9SJim Lin <vscale x 1 x i16>, 10410fd212c9SJim Lin i16, 10420fd212c9SJim Lin <vscale x 1 x i1>, 10430fd212c9SJim Lin iXLen, 10440fd212c9SJim Lin iXLen); 10450fd212c9SJim Lin 10460fd212c9SJim Lindefine <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 10470fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16: 10480fd212c9SJim Lin; CHECK: # %bb.0: # %entry 10490fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 10500fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 10510fd212c9SJim Lin; CHECK-NEXT: ret 10520fd212c9SJim Linentry: 10530fd212c9SJim Lin %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( 10540fd212c9SJim Lin <vscale x 1 x i32> %0, 10550fd212c9SJim Lin <vscale x 1 x i16> %1, 10560fd212c9SJim Lin i16 %2, 10570fd212c9SJim Lin <vscale x 1 x i1> %3, 10580fd212c9SJim Lin iXLen %4, iXLen 1) 10590fd212c9SJim Lin 10600fd212c9SJim Lin ret <vscale x 1 x i32> %a 10610fd212c9SJim Lin} 10620fd212c9SJim Lin 10630fd212c9SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( 10640fd212c9SJim Lin <vscale x 2 x i32>, 10650fd212c9SJim Lin <vscale x 2 x i16>, 10660fd212c9SJim Lin i16, 10670fd212c9SJim Lin iXLen); 10680fd212c9SJim Lin 10690fd212c9SJim Lindefine <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 10700fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16: 10710fd212c9SJim Lin; CHECK: # %bb.0: # %entry 10720fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10730fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 10740fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 10750fd212c9SJim Lin; CHECK-NEXT: ret 10760fd212c9SJim Linentry: 10770fd212c9SJim Lin %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( 10780fd212c9SJim Lin <vscale x 2 x i32> undef, 10790fd212c9SJim Lin <vscale x 2 x i16> %0, 10800fd212c9SJim Lin i16 %1, 10810fd212c9SJim Lin iXLen %2) 10820fd212c9SJim Lin 10830fd212c9SJim Lin ret <vscale x 2 x i32> %a 10840fd212c9SJim Lin} 10850fd212c9SJim Lin 10860fd212c9SJim Lindeclare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( 10870fd212c9SJim Lin <vscale x 2 x i32>, 10880fd212c9SJim Lin <vscale x 2 x i16>, 10890fd212c9SJim Lin i16, 10900fd212c9SJim Lin <vscale x 2 x i1>, 10910fd212c9SJim Lin iXLen, 10920fd212c9SJim Lin iXLen); 10930fd212c9SJim Lin 10940fd212c9SJim Lindefine <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 10950fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16: 10960fd212c9SJim Lin; CHECK: # %bb.0: # %entry 10970fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 10980fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 10990fd212c9SJim Lin; CHECK-NEXT: ret 11000fd212c9SJim Linentry: 11010fd212c9SJim Lin %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( 11020fd212c9SJim Lin <vscale x 2 x i32> %0, 11030fd212c9SJim Lin <vscale x 2 x i16> %1, 11040fd212c9SJim Lin i16 %2, 11050fd212c9SJim Lin <vscale x 2 x i1> %3, 11060fd212c9SJim Lin iXLen %4, iXLen 1) 11070fd212c9SJim Lin 11080fd212c9SJim Lin ret <vscale x 2 x i32> %a 11090fd212c9SJim Lin} 11100fd212c9SJim Lin 11110fd212c9SJim Lindeclare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( 11120fd212c9SJim Lin <vscale x 4 x i32>, 11130fd212c9SJim Lin <vscale x 4 x i16>, 11140fd212c9SJim Lin i16, 11150fd212c9SJim Lin iXLen); 11160fd212c9SJim Lin 11170fd212c9SJim Lindefine <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 11180fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16: 11190fd212c9SJim Lin; CHECK: # %bb.0: # %entry 11200fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 11210fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v10, v8, a0 11220fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 11230fd212c9SJim Lin; CHECK-NEXT: ret 11240fd212c9SJim Linentry: 11250fd212c9SJim Lin %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( 11260fd212c9SJim Lin <vscale x 4 x i32> undef, 11270fd212c9SJim Lin <vscale x 4 x i16> %0, 11280fd212c9SJim Lin i16 %1, 11290fd212c9SJim Lin iXLen %2) 11300fd212c9SJim Lin 11310fd212c9SJim Lin ret <vscale x 4 x i32> %a 11320fd212c9SJim Lin} 11330fd212c9SJim Lin 11340fd212c9SJim Lindeclare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( 11350fd212c9SJim Lin <vscale x 4 x i32>, 11360fd212c9SJim Lin <vscale x 4 x i16>, 11370fd212c9SJim Lin i16, 11380fd212c9SJim Lin <vscale x 4 x i1>, 11390fd212c9SJim Lin iXLen, 11400fd212c9SJim Lin iXLen); 11410fd212c9SJim Lin 11420fd212c9SJim Lindefine <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 11430fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16: 11440fd212c9SJim Lin; CHECK: # %bb.0: # %entry 11450fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 11460fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t 11470fd212c9SJim Lin; CHECK-NEXT: ret 11480fd212c9SJim Linentry: 11490fd212c9SJim Lin %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( 11500fd212c9SJim Lin <vscale x 4 x i32> %0, 11510fd212c9SJim Lin <vscale x 4 x i16> %1, 11520fd212c9SJim Lin i16 %2, 11530fd212c9SJim Lin <vscale x 4 x i1> %3, 11540fd212c9SJim Lin iXLen %4, iXLen 1) 11550fd212c9SJim Lin 11560fd212c9SJim Lin ret <vscale x 4 x i32> %a 11570fd212c9SJim Lin} 11580fd212c9SJim Lin 11590fd212c9SJim Lindeclare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( 11600fd212c9SJim Lin <vscale x 8 x i32>, 11610fd212c9SJim Lin <vscale x 8 x i16>, 11620fd212c9SJim Lin i16, 11630fd212c9SJim Lin iXLen); 11640fd212c9SJim Lin 11650fd212c9SJim Lindefine <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 11660fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16: 11670fd212c9SJim Lin; CHECK: # %bb.0: # %entry 11680fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 11690fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v12, v8, a0 11700fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 11710fd212c9SJim Lin; CHECK-NEXT: ret 11720fd212c9SJim Linentry: 11730fd212c9SJim Lin %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( 11740fd212c9SJim Lin <vscale x 8 x i32> undef, 11750fd212c9SJim Lin <vscale x 8 x i16> %0, 11760fd212c9SJim Lin i16 %1, 11770fd212c9SJim Lin iXLen %2) 11780fd212c9SJim Lin 11790fd212c9SJim Lin ret <vscale x 8 x i32> %a 11800fd212c9SJim Lin} 11810fd212c9SJim Lin 11820fd212c9SJim Lindeclare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( 11830fd212c9SJim Lin <vscale x 8 x i32>, 11840fd212c9SJim Lin <vscale x 8 x i16>, 11850fd212c9SJim Lin i16, 11860fd212c9SJim Lin <vscale x 8 x i1>, 11870fd212c9SJim Lin iXLen, 11880fd212c9SJim Lin iXLen); 11890fd212c9SJim Lin 11900fd212c9SJim Lindefine <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 11910fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16: 11920fd212c9SJim Lin; CHECK: # %bb.0: # %entry 11930fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 11940fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t 11950fd212c9SJim Lin; CHECK-NEXT: ret 11960fd212c9SJim Linentry: 11970fd212c9SJim Lin %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( 11980fd212c9SJim Lin <vscale x 8 x i32> %0, 11990fd212c9SJim Lin <vscale x 8 x i16> %1, 12000fd212c9SJim Lin i16 %2, 12010fd212c9SJim Lin <vscale x 8 x i1> %3, 12020fd212c9SJim Lin iXLen %4, iXLen 1) 12030fd212c9SJim Lin 12040fd212c9SJim Lin ret <vscale x 8 x i32> %a 12050fd212c9SJim Lin} 12060fd212c9SJim Lin 12070fd212c9SJim Lindeclare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( 12080fd212c9SJim Lin <vscale x 16 x i32>, 12090fd212c9SJim Lin <vscale x 16 x i16>, 12100fd212c9SJim Lin i16, 12110fd212c9SJim Lin iXLen); 12120fd212c9SJim Lin 12130fd212c9SJim Lindefine <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 12140fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16: 12150fd212c9SJim Lin; CHECK: # %bb.0: # %entry 12160fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 12170fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v16, v8, a0 12180fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 12190fd212c9SJim Lin; CHECK-NEXT: ret 12200fd212c9SJim Linentry: 12210fd212c9SJim Lin %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( 12220fd212c9SJim Lin <vscale x 16 x i32> undef, 12230fd212c9SJim Lin <vscale x 16 x i16> %0, 12240fd212c9SJim Lin i16 %1, 12250fd212c9SJim Lin iXLen %2) 12260fd212c9SJim Lin 12270fd212c9SJim Lin ret <vscale x 16 x i32> %a 12280fd212c9SJim Lin} 12290fd212c9SJim Lin 12300fd212c9SJim Lindeclare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( 12310fd212c9SJim Lin <vscale x 16 x i32>, 12320fd212c9SJim Lin <vscale x 16 x i16>, 12330fd212c9SJim Lin i16, 12340fd212c9SJim Lin <vscale x 16 x i1>, 12350fd212c9SJim Lin iXLen, 12360fd212c9SJim Lin iXLen); 12370fd212c9SJim Lin 12380fd212c9SJim Lindefine <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 12390fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16: 12400fd212c9SJim Lin; CHECK: # %bb.0: # %entry 12410fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 12420fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t 12430fd212c9SJim Lin; CHECK-NEXT: ret 12440fd212c9SJim Linentry: 12450fd212c9SJim Lin %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( 12460fd212c9SJim Lin <vscale x 16 x i32> %0, 12470fd212c9SJim Lin <vscale x 16 x i16> %1, 12480fd212c9SJim Lin i16 %2, 12490fd212c9SJim Lin <vscale x 16 x i1> %3, 12500fd212c9SJim Lin iXLen %4, iXLen 1) 12510fd212c9SJim Lin 12520fd212c9SJim Lin ret <vscale x 16 x i32> %a 12530fd212c9SJim Lin} 12540fd212c9SJim Lin 12550fd212c9SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( 12560fd212c9SJim Lin <vscale x 1 x i64>, 12570fd212c9SJim Lin <vscale x 1 x i32>, 12580fd212c9SJim Lin i32, 12590fd212c9SJim Lin iXLen); 12600fd212c9SJim Lin 12610fd212c9SJim Lindefine <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 12620fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32: 12630fd212c9SJim Lin; CHECK: # %bb.0: # %entry 12640fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12650fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v9, v8, a0 12660fd212c9SJim Lin; CHECK-NEXT: vmv1r.v v8, v9 12670fd212c9SJim Lin; CHECK-NEXT: ret 12680fd212c9SJim Linentry: 12690fd212c9SJim Lin %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( 12700fd212c9SJim Lin <vscale x 1 x i64> undef, 12710fd212c9SJim Lin <vscale x 1 x i32> %0, 12720fd212c9SJim Lin i32 %1, 12730fd212c9SJim Lin iXLen %2) 12740fd212c9SJim Lin 12750fd212c9SJim Lin ret <vscale x 1 x i64> %a 12760fd212c9SJim Lin} 12770fd212c9SJim Lin 12780fd212c9SJim Lindeclare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( 12790fd212c9SJim Lin <vscale x 1 x i64>, 12800fd212c9SJim Lin <vscale x 1 x i32>, 12810fd212c9SJim Lin i32, 12820fd212c9SJim Lin <vscale x 1 x i1>, 12830fd212c9SJim Lin iXLen, 12840fd212c9SJim Lin iXLen); 12850fd212c9SJim Lin 12860fd212c9SJim Lindefine <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 12870fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32: 12880fd212c9SJim Lin; CHECK: # %bb.0: # %entry 12890fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 12900fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t 12910fd212c9SJim Lin; CHECK-NEXT: ret 12920fd212c9SJim Linentry: 12930fd212c9SJim Lin %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( 12940fd212c9SJim Lin <vscale x 1 x i64> %0, 12950fd212c9SJim Lin <vscale x 1 x i32> %1, 12960fd212c9SJim Lin i32 %2, 12970fd212c9SJim Lin <vscale x 1 x i1> %3, 12980fd212c9SJim Lin iXLen %4, iXLen 1) 12990fd212c9SJim Lin 13000fd212c9SJim Lin ret <vscale x 1 x i64> %a 13010fd212c9SJim Lin} 13020fd212c9SJim Lin 13030fd212c9SJim Lindeclare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( 13040fd212c9SJim Lin <vscale x 2 x i64>, 13050fd212c9SJim Lin <vscale x 2 x i32>, 13060fd212c9SJim Lin i32, 13070fd212c9SJim Lin iXLen); 13080fd212c9SJim Lin 13090fd212c9SJim Lindefine <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 13100fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32: 13110fd212c9SJim Lin; CHECK: # %bb.0: # %entry 13120fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 13130fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v10, v8, a0 13140fd212c9SJim Lin; CHECK-NEXT: vmv2r.v v8, v10 13150fd212c9SJim Lin; CHECK-NEXT: ret 13160fd212c9SJim Linentry: 13170fd212c9SJim Lin %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( 13180fd212c9SJim Lin <vscale x 2 x i64> undef, 13190fd212c9SJim Lin <vscale x 2 x i32> %0, 13200fd212c9SJim Lin i32 %1, 13210fd212c9SJim Lin iXLen %2) 13220fd212c9SJim Lin 13230fd212c9SJim Lin ret <vscale x 2 x i64> %a 13240fd212c9SJim Lin} 13250fd212c9SJim Lin 13260fd212c9SJim Lindeclare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( 13270fd212c9SJim Lin <vscale x 2 x i64>, 13280fd212c9SJim Lin <vscale x 2 x i32>, 13290fd212c9SJim Lin i32, 13300fd212c9SJim Lin <vscale x 2 x i1>, 13310fd212c9SJim Lin iXLen, 13320fd212c9SJim Lin iXLen); 13330fd212c9SJim Lin 13340fd212c9SJim Lindefine <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 13350fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32: 13360fd212c9SJim Lin; CHECK: # %bb.0: # %entry 13370fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 13380fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t 13390fd212c9SJim Lin; CHECK-NEXT: ret 13400fd212c9SJim Linentry: 13410fd212c9SJim Lin %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( 13420fd212c9SJim Lin <vscale x 2 x i64> %0, 13430fd212c9SJim Lin <vscale x 2 x i32> %1, 13440fd212c9SJim Lin i32 %2, 13450fd212c9SJim Lin <vscale x 2 x i1> %3, 13460fd212c9SJim Lin iXLen %4, iXLen 1) 13470fd212c9SJim Lin 13480fd212c9SJim Lin ret <vscale x 2 x i64> %a 13490fd212c9SJim Lin} 13500fd212c9SJim Lin 13510fd212c9SJim Lindeclare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( 13520fd212c9SJim Lin <vscale x 4 x i64>, 13530fd212c9SJim Lin <vscale x 4 x i32>, 13540fd212c9SJim Lin i32, 13550fd212c9SJim Lin iXLen); 13560fd212c9SJim Lin 13570fd212c9SJim Lindefine <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 13580fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32: 13590fd212c9SJim Lin; CHECK: # %bb.0: # %entry 13600fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 13610fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v12, v8, a0 13620fd212c9SJim Lin; CHECK-NEXT: vmv4r.v v8, v12 13630fd212c9SJim Lin; CHECK-NEXT: ret 13640fd212c9SJim Linentry: 13650fd212c9SJim Lin %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( 13660fd212c9SJim Lin <vscale x 4 x i64> undef, 13670fd212c9SJim Lin <vscale x 4 x i32> %0, 13680fd212c9SJim Lin i32 %1, 13690fd212c9SJim Lin iXLen %2) 13700fd212c9SJim Lin 13710fd212c9SJim Lin ret <vscale x 4 x i64> %a 13720fd212c9SJim Lin} 13730fd212c9SJim Lin 13740fd212c9SJim Lindeclare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( 13750fd212c9SJim Lin <vscale x 4 x i64>, 13760fd212c9SJim Lin <vscale x 4 x i32>, 13770fd212c9SJim Lin i32, 13780fd212c9SJim Lin <vscale x 4 x i1>, 13790fd212c9SJim Lin iXLen, 13800fd212c9SJim Lin iXLen); 13810fd212c9SJim Lin 13820fd212c9SJim Lindefine <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 13830fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32: 13840fd212c9SJim Lin; CHECK: # %bb.0: # %entry 13850fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 13860fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t 13870fd212c9SJim Lin; CHECK-NEXT: ret 13880fd212c9SJim Linentry: 13890fd212c9SJim Lin %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( 13900fd212c9SJim Lin <vscale x 4 x i64> %0, 13910fd212c9SJim Lin <vscale x 4 x i32> %1, 13920fd212c9SJim Lin i32 %2, 13930fd212c9SJim Lin <vscale x 4 x i1> %3, 13940fd212c9SJim Lin iXLen %4, iXLen 1) 13950fd212c9SJim Lin 13960fd212c9SJim Lin ret <vscale x 4 x i64> %a 13970fd212c9SJim Lin} 13980fd212c9SJim Lin 13990fd212c9SJim Lindeclare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( 14000fd212c9SJim Lin <vscale x 8 x i64>, 14010fd212c9SJim Lin <vscale x 8 x i32>, 14020fd212c9SJim Lin i32, 14030fd212c9SJim Lin iXLen); 14040fd212c9SJim Lin 14050fd212c9SJim Lindefine <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 14060fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32: 14070fd212c9SJim Lin; CHECK: # %bb.0: # %entry 14080fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 14090fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v16, v8, a0 14100fd212c9SJim Lin; CHECK-NEXT: vmv8r.v v8, v16 14110fd212c9SJim Lin; CHECK-NEXT: ret 14120fd212c9SJim Linentry: 14130fd212c9SJim Lin %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( 14140fd212c9SJim Lin <vscale x 8 x i64> undef, 14150fd212c9SJim Lin <vscale x 8 x i32> %0, 14160fd212c9SJim Lin i32 %1, 14170fd212c9SJim Lin iXLen %2) 14180fd212c9SJim Lin 14190fd212c9SJim Lin ret <vscale x 8 x i64> %a 14200fd212c9SJim Lin} 14210fd212c9SJim Lin 14220fd212c9SJim Lindeclare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( 14230fd212c9SJim Lin <vscale x 8 x i64>, 14240fd212c9SJim Lin <vscale x 8 x i32>, 14250fd212c9SJim Lin i32, 14260fd212c9SJim Lin <vscale x 8 x i1>, 14270fd212c9SJim Lin iXLen, 14280fd212c9SJim Lin iXLen); 14290fd212c9SJim Lin 14300fd212c9SJim Lindefine <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 14310fd212c9SJim Lin; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32: 14320fd212c9SJim Lin; CHECK: # %bb.0: # %entry 14330fd212c9SJim Lin; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 14340fd212c9SJim Lin; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t 14350fd212c9SJim Lin; CHECK-NEXT: ret 14360fd212c9SJim Linentry: 14370fd212c9SJim Lin %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( 14380fd212c9SJim Lin <vscale x 8 x i64> %0, 14390fd212c9SJim Lin <vscale x 8 x i32> %1, 14400fd212c9SJim Lin i32 %2, 14410fd212c9SJim Lin <vscale x 8 x i1> %3, 14420fd212c9SJim Lin iXLen %4, iXLen 1) 14430fd212c9SJim Lin 14440fd212c9SJim Lin ret <vscale x 8 x i64> %a 14450fd212c9SJim Lin} 1446