xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vgmul.ll (revision 09058654f68dd4cc5435f49502de33bac2b7f8fa)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkg \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
6
7declare <vscale x 1 x i32> @llvm.riscv.vgmul.vv.nxv1i32(
8  <vscale x 1 x i32>,
9  <vscale x 1 x i32>,
10  iXLen, iXLen);
11
12define <vscale x 1 x i32> @intrinsic_vgmul_vs_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vgmul_vs_nxv1i32:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
16; CHECK-NEXT:    vgmul.vv v8, v9
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i32> @llvm.riscv.vgmul.vv.nxv1i32(
20    <vscale x 1 x i32> %0,
21    <vscale x 1 x i32> %1,
22    iXLen %2, iXLen 2)
23
24  ret <vscale x 1 x i32> %a
25}
26
27declare <vscale x 2 x i32> @llvm.riscv.vgmul.vv.nxv2i32(
28  <vscale x 2 x i32>,
29  <vscale x 2 x i32>,
30  iXLen, iXLen);
31
32define <vscale x 2 x i32> @intrinsic_vgmul_vs_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
33; CHECK-LABEL: intrinsic_vgmul_vs_nxv2i32:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
36; CHECK-NEXT:    vgmul.vv v8, v9
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 2 x i32> @llvm.riscv.vgmul.vv.nxv2i32(
40    <vscale x 2 x i32> %0,
41    <vscale x 2 x i32> %1,
42    iXLen %2, iXLen 2)
43
44  ret <vscale x 2 x i32> %a
45}
46
47declare <vscale x 4 x i32> @llvm.riscv.vgmul.vv.nxv4i32(
48  <vscale x 4 x i32>,
49  <vscale x 4 x i32>,
50  iXLen, iXLen);
51
52define <vscale x 4 x i32> @intrinsic_vgmul_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
53; CHECK-LABEL: intrinsic_vgmul_vs_nxv4i32:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
56; CHECK-NEXT:    vgmul.vv v8, v10
57; CHECK-NEXT:    ret
58entry:
59  %a = call <vscale x 4 x i32> @llvm.riscv.vgmul.vv.nxv4i32(
60    <vscale x 4 x i32> %0,
61    <vscale x 4 x i32> %1,
62    iXLen %2, iXLen 2)
63
64  ret <vscale x 4 x i32> %a
65}
66
67declare <vscale x 8 x i32> @llvm.riscv.vgmul.vv.nxv8i32(
68  <vscale x 8 x i32>,
69  <vscale x 8 x i32>,
70  iXLen, iXLen);
71
72define <vscale x 8 x i32> @intrinsic_vgmul_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
73; CHECK-LABEL: intrinsic_vgmul_vs_nxv8i32:
74; CHECK:       # %bb.0: # %entry
75; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
76; CHECK-NEXT:    vgmul.vv v8, v12
77; CHECK-NEXT:    ret
78entry:
79  %a = call <vscale x 8 x i32> @llvm.riscv.vgmul.vv.nxv8i32(
80    <vscale x 8 x i32> %0,
81    <vscale x 8 x i32> %1,
82    iXLen %2, iXLen 2)
83
84  ret <vscale x 8 x i32> %a
85}
86
87declare <vscale x 16 x i32> @llvm.riscv.vgmul.vv.nxv16i32(
88  <vscale x 16 x i32>,
89  <vscale x 16 x i32>,
90  iXLen, iXLen);
91
92define <vscale x 16 x i32> @intrinsic_vgmul_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
93; CHECK-LABEL: intrinsic_vgmul_vs_nxv16i32:
94; CHECK:       # %bb.0: # %entry
95; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
96; CHECK-NEXT:    vgmul.vv v8, v16
97; CHECK-NEXT:    ret
98entry:
99  %a = call <vscale x 16 x i32> @llvm.riscv.vgmul.vv.nxv16i32(
100    <vscale x 16 x i32> %0,
101    <vscale x 16 x i32> %1,
102    iXLen %2, iXLen 2)
103
104  ret <vscale x 16 x i32> %a
105}
106