xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll (revision d89d45ca9a6e51be388a6ff3893d59e54748b928)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3
4declare { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
5
6define <vscale x 1 x i8> @umulo_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
7; CHECK-LABEL: umulo_nxv1i8:
8; CHECK:       # %bb.0:
9; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
10; CHECK-NEXT:    vmulhu.vv v10, v8, v9
11; CHECK-NEXT:    vmsne.vi v0, v10, 0
12; CHECK-NEXT:    vmul.vv v8, v8, v9
13; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
14; CHECK-NEXT:    ret
15  %a = call { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y)
16  %b = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 0
17  %c = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 1
18  %d = select <vscale x 1 x i1> %c, <vscale x 1 x i8> zeroinitializer, <vscale x 1 x i8> %b
19  ret <vscale x 1 x i8> %d
20}
21
22declare { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
23
24define <vscale x 2 x i8> @umulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y) {
25; CHECK-LABEL: umulo_nxv2i8:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
28; CHECK-NEXT:    vmulhu.vv v10, v8, v9
29; CHECK-NEXT:    vmsne.vi v0, v10, 0
30; CHECK-NEXT:    vmul.vv v8, v8, v9
31; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
32; CHECK-NEXT:    ret
33  %a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
34  %b = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 0
35  %c = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 1
36  %d = select <vscale x 2 x i1> %c, <vscale x 2 x i8> zeroinitializer, <vscale x 2 x i8> %b
37  ret <vscale x 2 x i8> %d
38}
39
40declare { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
41
42define <vscale x 4 x i8> @umulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y) {
43; CHECK-LABEL: umulo_nxv4i8:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
46; CHECK-NEXT:    vmulhu.vv v10, v8, v9
47; CHECK-NEXT:    vmsne.vi v0, v10, 0
48; CHECK-NEXT:    vmul.vv v8, v8, v9
49; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
50; CHECK-NEXT:    ret
51  %a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
52  %b = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 0
53  %c = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 1
54  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer, <vscale x 4 x i8> %b
55  ret <vscale x 4 x i8> %d
56}
57
58declare { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
59
60define <vscale x 8 x i8> @umulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
61; CHECK-LABEL: umulo_nxv8i8:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
64; CHECK-NEXT:    vmulhu.vv v10, v8, v9
65; CHECK-NEXT:    vmsne.vi v0, v10, 0
66; CHECK-NEXT:    vmul.vv v8, v8, v9
67; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
68; CHECK-NEXT:    ret
69  %a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
70  %b = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 0
71  %c = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 1
72  %d = select <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer, <vscale x 8 x i8> %b
73  ret <vscale x 8 x i8> %d
74}
75
76declare { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
77
78define <vscale x 16 x i8> @umulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y) {
79; CHECK-LABEL: umulo_nxv16i8:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
82; CHECK-NEXT:    vmulhu.vv v12, v8, v10
83; CHECK-NEXT:    vmsne.vi v0, v12, 0
84; CHECK-NEXT:    vmul.vv v8, v8, v10
85; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
86; CHECK-NEXT:    ret
87  %a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
88  %b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
89  %c = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 1
90  %d = select <vscale x 16 x i1> %c, <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> %b
91  ret <vscale x 16 x i8> %d
92}
93
94declare { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
95
96define <vscale x 32 x i8> @umulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y) {
97; CHECK-LABEL: umulo_nxv32i8:
98; CHECK:       # %bb.0:
99; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
100; CHECK-NEXT:    vmulhu.vv v16, v8, v12
101; CHECK-NEXT:    vmsne.vi v0, v16, 0
102; CHECK-NEXT:    vmul.vv v8, v8, v12
103; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
104; CHECK-NEXT:    ret
105  %a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
106  %b = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 0
107  %c = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 1
108  %d = select <vscale x 32 x i1> %c, <vscale x 32 x i8> zeroinitializer, <vscale x 32 x i8> %b
109  ret <vscale x 32 x i8> %d
110}
111
112declare { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.umul.with.overflow.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
113
114define <vscale x 64 x i8> @umulo_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y) {
115; CHECK-LABEL: umulo_nxv64i8:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
118; CHECK-NEXT:    vmulhu.vv v24, v8, v16
119; CHECK-NEXT:    vmsne.vi v0, v24, 0
120; CHECK-NEXT:    vmul.vv v8, v8, v16
121; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
122; CHECK-NEXT:    ret
123  %a = call { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.umul.with.overflow.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y)
124  %b = extractvalue { <vscale x 64 x i8>, <vscale x 64 x i1> } %a, 0
125  %c = extractvalue { <vscale x 64 x i8>, <vscale x 64 x i1> } %a, 1
126  %d = select <vscale x 64 x i1> %c, <vscale x 64 x i8> zeroinitializer, <vscale x 64 x i8> %b
127  ret <vscale x 64 x i8> %d
128}
129
130declare { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
131
132define <vscale x 1 x i16> @umulo_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
133; CHECK-LABEL: umulo_nxv1i16:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
136; CHECK-NEXT:    vmulhu.vv v10, v8, v9
137; CHECK-NEXT:    vmsne.vi v0, v10, 0
138; CHECK-NEXT:    vmul.vv v8, v8, v9
139; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
140; CHECK-NEXT:    ret
141  %a = call { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y)
142  %b = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 0
143  %c = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 1
144  %d = select <vscale x 1 x i1> %c, <vscale x 1 x i16> zeroinitializer, <vscale x 1 x i16> %b
145  ret <vscale x 1 x i16> %d
146}
147
148declare { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
149
150define <vscale x 2 x i16> @umulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y) {
151; CHECK-LABEL: umulo_nxv2i16:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
154; CHECK-NEXT:    vmulhu.vv v10, v8, v9
155; CHECK-NEXT:    vmsne.vi v0, v10, 0
156; CHECK-NEXT:    vmul.vv v8, v8, v9
157; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
158; CHECK-NEXT:    ret
159  %a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
160  %b = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 0
161  %c = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 1
162  %d = select <vscale x 2 x i1> %c, <vscale x 2 x i16> zeroinitializer, <vscale x 2 x i16> %b
163  ret <vscale x 2 x i16> %d
164}
165
166declare { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
167
168define <vscale x 4 x i16> @umulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
169; CHECK-LABEL: umulo_nxv4i16:
170; CHECK:       # %bb.0:
171; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
172; CHECK-NEXT:    vmulhu.vv v10, v8, v9
173; CHECK-NEXT:    vmsne.vi v0, v10, 0
174; CHECK-NEXT:    vmul.vv v8, v8, v9
175; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
176; CHECK-NEXT:    ret
177  %a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
178  %b = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 0
179  %c = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 1
180  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer, <vscale x 4 x i16> %b
181  ret <vscale x 4 x i16> %d
182}
183
184declare { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
185
186define <vscale x 8 x i16> @umulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y) {
187; CHECK-LABEL: umulo_nxv8i16:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
190; CHECK-NEXT:    vmulhu.vv v12, v8, v10
191; CHECK-NEXT:    vmsne.vi v0, v12, 0
192; CHECK-NEXT:    vmul.vv v8, v8, v10
193; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
194; CHECK-NEXT:    ret
195  %a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
196  %b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
197  %c = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 1
198  %d = select <vscale x 8 x i1> %c, <vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %b
199  ret <vscale x 8 x i16> %d
200}
201
202declare { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
203
204define <vscale x 16 x i16> @umulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y) {
205; CHECK-LABEL: umulo_nxv16i16:
206; CHECK:       # %bb.0:
207; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
208; CHECK-NEXT:    vmulhu.vv v16, v8, v12
209; CHECK-NEXT:    vmsne.vi v0, v16, 0
210; CHECK-NEXT:    vmul.vv v8, v8, v12
211; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
212; CHECK-NEXT:    ret
213  %a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
214  %b = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 0
215  %c = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 1
216  %d = select <vscale x 16 x i1> %c, <vscale x 16 x i16> zeroinitializer, <vscale x 16 x i16> %b
217  ret <vscale x 16 x i16> %d
218}
219
220declare { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
221
222define <vscale x 32 x i16> @umulo_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y) {
223; CHECK-LABEL: umulo_nxv32i16:
224; CHECK:       # %bb.0:
225; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
226; CHECK-NEXT:    vmulhu.vv v24, v8, v16
227; CHECK-NEXT:    vmsne.vi v0, v24, 0
228; CHECK-NEXT:    vmul.vv v8, v8, v16
229; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
230; CHECK-NEXT:    ret
231  %a = call { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y)
232  %b = extractvalue { <vscale x 32 x i16>, <vscale x 32 x i1> } %a, 0
233  %c = extractvalue { <vscale x 32 x i16>, <vscale x 32 x i1> } %a, 1
234  %d = select <vscale x 32 x i1> %c, <vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> %b
235  ret <vscale x 32 x i16> %d
236}
237
238declare { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
239
240define <vscale x 1 x i32> @umulo_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y) {
241; CHECK-LABEL: umulo_nxv1i32:
242; CHECK:       # %bb.0:
243; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
244; CHECK-NEXT:    vmulhu.vv v10, v8, v9
245; CHECK-NEXT:    vmsne.vi v0, v10, 0
246; CHECK-NEXT:    vmul.vv v8, v8, v9
247; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
248; CHECK-NEXT:    ret
249  %a = call { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y)
250  %b = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 0
251  %c = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 1
252  %d = select <vscale x 1 x i1> %c, <vscale x 1 x i32> zeroinitializer, <vscale x 1 x i32> %b
253  ret <vscale x 1 x i32> %d
254}
255
256declare { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
257
258define <vscale x 2 x i32> @umulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
259; CHECK-LABEL: umulo_nxv2i32:
260; CHECK:       # %bb.0:
261; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
262; CHECK-NEXT:    vmulhu.vv v10, v8, v9
263; CHECK-NEXT:    vmsne.vi v0, v10, 0
264; CHECK-NEXT:    vmul.vv v8, v8, v9
265; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
266; CHECK-NEXT:    ret
267  %a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
268  %b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
269  %c = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 1
270  %d = select <vscale x 2 x i1> %c, <vscale x 2 x i32> zeroinitializer, <vscale x 2 x i32> %b
271  ret <vscale x 2 x i32> %d
272}
273
274declare { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
275
276define <vscale x 4 x i32> @umulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
277; CHECK-LABEL: umulo_nxv4i32:
278; CHECK:       # %bb.0:
279; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
280; CHECK-NEXT:    vmulhu.vv v12, v8, v10
281; CHECK-NEXT:    vmsne.vi v0, v12, 0
282; CHECK-NEXT:    vmul.vv v8, v8, v10
283; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
284; CHECK-NEXT:    ret
285  %a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
286  %b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
287  %c = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 1
288  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> %b
289  ret <vscale x 4 x i32> %d
290}
291
292declare { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
293
294define <vscale x 8 x i32> @umulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
295; CHECK-LABEL: umulo_nxv8i32:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
298; CHECK-NEXT:    vmulhu.vv v16, v8, v12
299; CHECK-NEXT:    vmsne.vi v0, v16, 0
300; CHECK-NEXT:    vmul.vv v8, v8, v12
301; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
302; CHECK-NEXT:    ret
303  %a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
304  %b = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 0
305  %c = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 1
306  %d = select <vscale x 8 x i1> %c, <vscale x 8 x i32> zeroinitializer, <vscale x 8 x i32> %b
307  ret <vscale x 8 x i32> %d
308}
309
310declare { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
311
312define <vscale x 16 x i32> @umulo_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y) {
313; CHECK-LABEL: umulo_nxv16i32:
314; CHECK:       # %bb.0:
315; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
316; CHECK-NEXT:    vmulhu.vv v24, v8, v16
317; CHECK-NEXT:    vmsne.vi v0, v24, 0
318; CHECK-NEXT:    vmul.vv v8, v8, v16
319; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
320; CHECK-NEXT:    ret
321  %a = call { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y)
322  %b = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i1> } %a, 0
323  %c = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i1> } %a, 1
324  %d = select <vscale x 16 x i1> %c, <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32> %b
325  ret <vscale x 16 x i32> %d
326}
327
328declare { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
329
330define <vscale x 1 x i64> @umulo_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y) {
331; CHECK-LABEL: umulo_nxv1i64:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
334; CHECK-NEXT:    vmulhu.vv v10, v8, v9
335; CHECK-NEXT:    vmsne.vi v0, v10, 0
336; CHECK-NEXT:    vmul.vv v8, v8, v9
337; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
338; CHECK-NEXT:    ret
339  %a = call { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y)
340  %b = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 0
341  %c = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 1
342  %d = select <vscale x 1 x i1> %c, <vscale x 1 x i64> zeroinitializer, <vscale x 1 x i64> %b
343  ret <vscale x 1 x i64> %d
344}
345
346declare { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
347
348define <vscale x 2 x i64> @umulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
349; CHECK-LABEL: umulo_nxv2i64:
350; CHECK:       # %bb.0:
351; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
352; CHECK-NEXT:    vmulhu.vv v12, v8, v10
353; CHECK-NEXT:    vmsne.vi v0, v12, 0
354; CHECK-NEXT:    vmul.vv v8, v8, v10
355; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
356; CHECK-NEXT:    ret
357  %a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
358  %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
359  %c = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 1
360  %d = select <vscale x 2 x i1> %c, <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %b
361  ret <vscale x 2 x i64> %d
362}
363
364declare { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
365
366define <vscale x 4 x i64> @umulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y) {
367; CHECK-LABEL: umulo_nxv4i64:
368; CHECK:       # %bb.0:
369; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
370; CHECK-NEXT:    vmulhu.vv v16, v8, v12
371; CHECK-NEXT:    vmsne.vi v0, v16, 0
372; CHECK-NEXT:    vmul.vv v8, v8, v12
373; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
374; CHECK-NEXT:    ret
375  %a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
376  %b = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 0
377  %c = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 1
378  %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> zeroinitializer, <vscale x 4 x i64> %b
379  ret <vscale x 4 x i64> %d
380}
381
382declare { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
383
384define <vscale x 8 x i64> @umulo_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y) {
385; CHECK-LABEL: umulo_nxv8i64:
386; CHECK:       # %bb.0:
387; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
388; CHECK-NEXT:    vmulhu.vv v24, v8, v16
389; CHECK-NEXT:    vmsne.vi v0, v24, 0
390; CHECK-NEXT:    vmul.vv v8, v8, v16
391; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
392; CHECK-NEXT:    ret
393  %a = call { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y)
394  %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i1> } %a, 0
395  %c = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i1> } %a, 1
396  %d = select <vscale x 8 x i1> %c, <vscale x 8 x i64> zeroinitializer, <vscale x 8 x i64> %b
397  ret <vscale x 8 x i64> %d
398}
399