xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5; Test that the prepareSREMEqFold optimization doesn't crash on scalable
6; vector types.
7define <vscale x 4 x i1> @srem_eq_fold_nxv4i8(<vscale x 4 x i8> %va) {
8; CHECK-LABEL: srem_eq_fold_nxv4i8:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    li a0, 42
11; CHECK-NEXT:    li a1, -85
12; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
13; CHECK-NEXT:    vmv.v.x v9, a0
14; CHECK-NEXT:    vmacc.vx v9, a1, v8
15; CHECK-NEXT:    vsll.vi v8, v9, 7
16; CHECK-NEXT:    vsrl.vi v9, v9, 1
17; CHECK-NEXT:    vor.vv v8, v9, v8
18; CHECK-NEXT:    vmsleu.vx v0, v8, a0
19; CHECK-NEXT:    ret
20  %rem = srem <vscale x 4 x i8> %va, splat (i8 6)
21
22  %cc = icmp eq <vscale x 4 x i8> %rem, zeroinitializer
23  ret <vscale x 4 x i1> %cc
24}
25
26define <vscale x 1 x i32> @vmulh_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
27; CHECK-LABEL: vmulh_vv_nxv1i32:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
30; CHECK-NEXT:    vmulh.vv v8, v9, v8
31; CHECK-NEXT:    ret
32  %vc = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
33  %vd = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
34  %ve = mul <vscale x 1 x i64> %vc, %vd
35  %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
36  %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
37  ret <vscale x 1 x i32> %vg
38}
39
40define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
41; CHECK-LABEL: vmulh_vx_nxv1i32:
42; CHECK:       # %bb.0:
43; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
44; CHECK-NEXT:    vmulh.vx v8, v8, a0
45; CHECK-NEXT:    ret
46  %head1 = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
47  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
48  %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
49  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
50  %vd = mul <vscale x 1 x i64> %vb, %vc
51  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
52  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
53  ret <vscale x 1 x i32> %vf
54}
55
56define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
57; CHECK-LABEL: vmulh_vi_nxv1i32_0:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    li a0, -7
60; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
61; CHECK-NEXT:    vmulh.vx v8, v8, a0
62; CHECK-NEXT:    ret
63  %vb = sext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
64  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
65  %vd = mul <vscale x 1 x i64> %vb, %vc
66  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
67  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
68  ret <vscale x 1 x i32> %vf
69}
70
71define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
72; CHECK-LABEL: vmulh_vi_nxv1i32_1:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    li a0, 16
75; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
76; CHECK-NEXT:    vmulh.vx v8, v8, a0
77; CHECK-NEXT:    ret
78  %vb = sext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
79  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
80  %vd = mul <vscale x 1 x i64> %vb, %vc
81  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
82  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
83  ret <vscale x 1 x i32> %vf
84}
85
86define <vscale x 2 x i32> @vmulh_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
87; CHECK-LABEL: vmulh_vv_nxv2i32:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
90; CHECK-NEXT:    vmulh.vv v8, v9, v8
91; CHECK-NEXT:    ret
92  %vc = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
93  %vd = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
94  %ve = mul <vscale x 2 x i64> %vc, %vd
95  %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
96  %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
97  ret <vscale x 2 x i32> %vg
98}
99
100define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
101; CHECK-LABEL: vmulh_vx_nxv2i32:
102; CHECK:       # %bb.0:
103; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
104; CHECK-NEXT:    vmulh.vx v8, v8, a0
105; CHECK-NEXT:    ret
106  %head1 = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
107  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
108  %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
109  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
110  %vd = mul <vscale x 2 x i64> %vb, %vc
111  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
112  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
113  ret <vscale x 2 x i32> %vf
114}
115
116define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
117; CHECK-LABEL: vmulh_vi_nxv2i32_0:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    li a0, -7
120; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
121; CHECK-NEXT:    vmulh.vx v8, v8, a0
122; CHECK-NEXT:    ret
123  %vb = sext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
124  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
125  %vd = mul <vscale x 2 x i64> %vb, %vc
126  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
127  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
128  ret <vscale x 2 x i32> %vf
129}
130
131define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
132; CHECK-LABEL: vmulh_vi_nxv2i32_1:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    li a0, 16
135; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
136; CHECK-NEXT:    vmulh.vx v8, v8, a0
137; CHECK-NEXT:    ret
138  %vb = sext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
139  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
140  %vd = mul <vscale x 2 x i64> %vb, %vc
141  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
142  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
143  ret <vscale x 2 x i32> %vf
144}
145
146define <vscale x 4 x i32> @vmulh_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
147; CHECK-LABEL: vmulh_vv_nxv4i32:
148; CHECK:       # %bb.0:
149; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
150; CHECK-NEXT:    vmulh.vv v8, v10, v8
151; CHECK-NEXT:    ret
152  %vc = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
153  %vd = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
154  %ve = mul <vscale x 4 x i64> %vc, %vd
155  %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
156  %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
157  ret <vscale x 4 x i32> %vg
158}
159
160define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
161; CHECK-LABEL: vmulh_vx_nxv4i32:
162; CHECK:       # %bb.0:
163; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
164; CHECK-NEXT:    vmulh.vx v8, v8, a0
165; CHECK-NEXT:    ret
166  %head1 = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
167  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
168  %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
169  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
170  %vd = mul <vscale x 4 x i64> %vb, %vc
171  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
172  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
173  ret <vscale x 4 x i32> %vf
174}
175
176define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
177; CHECK-LABEL: vmulh_vi_nxv4i32_0:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    li a0, -7
180; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
181; CHECK-NEXT:    vmulh.vx v8, v8, a0
182; CHECK-NEXT:    ret
183  %vb = sext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
184  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
185  %vd = mul <vscale x 4 x i64> %vb, %vc
186  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
187  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
188  ret <vscale x 4 x i32> %vf
189}
190
191define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
192; CHECK-LABEL: vmulh_vi_nxv4i32_1:
193; CHECK:       # %bb.0:
194; CHECK-NEXT:    li a0, 16
195; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
196; CHECK-NEXT:    vmulh.vx v8, v8, a0
197; CHECK-NEXT:    ret
198  %vb = sext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
199  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
200  %vd = mul <vscale x 4 x i64> %vb, %vc
201  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
202  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
203  ret <vscale x 4 x i32> %vf
204}
205
206define <vscale x 8 x i32> @vmulh_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
207; CHECK-LABEL: vmulh_vv_nxv8i32:
208; CHECK:       # %bb.0:
209; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
210; CHECK-NEXT:    vmulh.vv v8, v12, v8
211; CHECK-NEXT:    ret
212  %vc = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
213  %vd = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
214  %ve = mul <vscale x 8 x i64> %vc, %vd
215  %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
216  %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
217  ret <vscale x 8 x i32> %vg
218}
219
220define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
221; CHECK-LABEL: vmulh_vx_nxv8i32:
222; CHECK:       # %bb.0:
223; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
224; CHECK-NEXT:    vmulh.vx v8, v8, a0
225; CHECK-NEXT:    ret
226  %head1 = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
227  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
228  %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
229  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
230  %vd = mul <vscale x 8 x i64> %vb, %vc
231  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
232  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
233  ret <vscale x 8 x i32> %vf
234}
235
236define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
237; CHECK-LABEL: vmulh_vi_nxv8i32_0:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    li a0, -7
240; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
241; CHECK-NEXT:    vmulh.vx v8, v8, a0
242; CHECK-NEXT:    ret
243  %vb = sext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
244  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
245  %vd = mul <vscale x 8 x i64> %vb, %vc
246  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
247  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
248  ret <vscale x 8 x i32> %vf
249}
250
251define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
252; CHECK-LABEL: vmulh_vi_nxv8i32_1:
253; CHECK:       # %bb.0:
254; CHECK-NEXT:    li a0, 16
255; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
256; CHECK-NEXT:    vmulh.vx v8, v8, a0
257; CHECK-NEXT:    ret
258  %vb = sext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
259  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
260  %vd = mul <vscale x 8 x i64> %vb, %vc
261  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
262  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
263  ret <vscale x 8 x i32> %vf
264}
265