xref: /llvm-project/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4r.c (revision 051054e6f74303bc880221e88671745f363964cc)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN:   -target-feature +zvbb \
5 // RUN:   -target-feature +zvbc \
6 // RUN:   -target-feature +zvkb \
7 // RUN:   -target-feature +zvkg \
8 // RUN:   -target-feature +zvkned \
9 // RUN:   -target-feature +zvknhb \
10 // RUN:   -target-feature +zvksed \
11 // RUN:   -target-feature +zvksh \
12 // RUN:   -disable-O0-optnone \
13 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
15 
16 #include <riscv_vector.h>
17 
18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsm4r_vv_u32mf2_tu
19 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4r.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
22 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
23 //
24 vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
25   return __riscv_vsm4r_vv_tu(vd, vs2, vl);
26 }
27 
28 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsm4r_vs_u32mf2_u32mf2_tu
29 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4r.vs.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
32 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
33 //
34 vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
35   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
36 }
37 
38 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsm4r_vs_u32mf2_u32m1_tu
39 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vs.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
42 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
43 //
44 vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
45   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
46 }
47 
48 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsm4r_vs_u32mf2_u32m2_tu
49 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vs.nxv4i32.nxv1i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
52 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
53 //
54 vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
55   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
56 }
57 
58 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm4r_vs_u32mf2_u32m4_tu
59 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv1i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
62 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
63 //
64 vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
65   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
66 }
67 
68 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm4r_vs_u32mf2_u32m8_tu
69 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv1i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 2)
72 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
73 //
74 vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
75   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
76 }
77 
78 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsm4r_vv_u32m1_tu
79 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 2)
82 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
83 //
84 vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
85   return __riscv_vsm4r_vv_tu(vd, vs2, vl);
86 }
87 
88 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsm4r_vs_u32m1_u32m1_tu
89 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vs.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 2)
92 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
93 //
94 vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
95   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
96 }
97 
98 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsm4r_vs_u32m1_u32m2_tu
99 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vs.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 2)
102 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
103 //
104 vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
105   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
106 }
107 
108 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm4r_vs_u32m1_u32m4_tu
109 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv2i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 2)
112 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
113 //
114 vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
115   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
116 }
117 
118 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm4r_vs_u32m1_u32m8_tu
119 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv2i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 2)
122 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
123 //
124 vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
125   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
126 }
127 
128 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsm4r_vv_u32m2_tu
129 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 2)
132 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
133 //
134 vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
135   return __riscv_vsm4r_vv_tu(vd, vs2, vl);
136 }
137 
138 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsm4r_vs_u32m2_u32m2_tu
139 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vs.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 2)
142 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
143 //
144 vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
145   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
146 }
147 
148 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm4r_vs_u32m2_u32m4_tu
149 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 2)
152 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
153 //
154 vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
155   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
156 }
157 
158 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm4r_vs_u32m2_u32m8_tu
159 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv4i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 2)
162 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
163 //
164 vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
165   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
166 }
167 
168 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm4r_vv_u32m4_tu
169 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 2)
172 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
173 //
174 vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
175   return __riscv_vsm4r_vv_tu(vd, vs2, vl);
176 }
177 
178 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm4r_vs_u32m4_u32m4_tu
179 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 2)
182 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
183 //
184 vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
185   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
186 }
187 
188 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm4r_vs_u32m4_u32m8_tu
189 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 2)
192 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
193 //
194 vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
195   return __riscv_vsm4r_vs_tu(vd, vs2, vl);
196 }
197 
198 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm4r_vv_u32m8_tu
199 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 2)
202 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
203 //
204 vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
205   return __riscv_vsm4r_vv_tu(vd, vs2, vl);
206 }
207 
208