xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll (revision afc7cc7b123666a8917b26c7e483d78cbb79ff8d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
3; RUN:     -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
4; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
5; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
6; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
7; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
8
9define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
10; RV32-LABEL: lrint_nxv1f32:
11; RV32:       # %bb.0:
12; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
13; RV32-NEXT:    vfcvt.x.f.v v8, v8
14; RV32-NEXT:    ret
15;
16; RV64-i32-LABEL: lrint_nxv1f32:
17; RV64-i32:       # %bb.0:
18; RV64-i32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
19; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
20; RV64-i32-NEXT:    ret
21;
22; RV64-i64-LABEL: lrint_nxv1f32:
23; RV64-i64:       # %bb.0:
24; RV64-i64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
25; RV64-i64-NEXT:    vfwcvt.x.f.v v9, v8
26; RV64-i64-NEXT:    vmv1r.v v8, v9
27; RV64-i64-NEXT:    ret
28  %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float> %x)
29  ret <vscale x 1 x iXLen> %a
30}
31declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float>)
32
33define <vscale x 2 x iXLen> @lrint_nxv2f32(<vscale x 2 x float> %x) {
34; RV32-LABEL: lrint_nxv2f32:
35; RV32:       # %bb.0:
36; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
37; RV32-NEXT:    vfcvt.x.f.v v8, v8
38; RV32-NEXT:    ret
39;
40; RV64-i32-LABEL: lrint_nxv2f32:
41; RV64-i32:       # %bb.0:
42; RV64-i32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
43; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
44; RV64-i32-NEXT:    ret
45;
46; RV64-i64-LABEL: lrint_nxv2f32:
47; RV64-i64:       # %bb.0:
48; RV64-i64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
49; RV64-i64-NEXT:    vfwcvt.x.f.v v10, v8
50; RV64-i64-NEXT:    vmv2r.v v8, v10
51; RV64-i64-NEXT:    ret
52  %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float> %x)
53  ret <vscale x 2 x iXLen> %a
54}
55declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float>)
56
57define <vscale x 4 x iXLen> @lrint_nxv4f32(<vscale x 4 x float> %x) {
58; RV32-LABEL: lrint_nxv4f32:
59; RV32:       # %bb.0:
60; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
61; RV32-NEXT:    vfcvt.x.f.v v8, v8
62; RV32-NEXT:    ret
63;
64; RV64-i32-LABEL: lrint_nxv4f32:
65; RV64-i32:       # %bb.0:
66; RV64-i32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
67; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
68; RV64-i32-NEXT:    ret
69;
70; RV64-i64-LABEL: lrint_nxv4f32:
71; RV64-i64:       # %bb.0:
72; RV64-i64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
73; RV64-i64-NEXT:    vfwcvt.x.f.v v12, v8
74; RV64-i64-NEXT:    vmv4r.v v8, v12
75; RV64-i64-NEXT:    ret
76  %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float> %x)
77  ret <vscale x 4 x iXLen> %a
78}
79declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float>)
80
81define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x) {
82; RV32-LABEL: lrint_nxv8f32:
83; RV32:       # %bb.0:
84; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
85; RV32-NEXT:    vfcvt.x.f.v v8, v8
86; RV32-NEXT:    ret
87;
88; RV64-i32-LABEL: lrint_nxv8f32:
89; RV64-i32:       # %bb.0:
90; RV64-i32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
91; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
92; RV64-i32-NEXT:    ret
93;
94; RV64-i64-LABEL: lrint_nxv8f32:
95; RV64-i64:       # %bb.0:
96; RV64-i64-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
97; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v8
98; RV64-i64-NEXT:    vmv8r.v v8, v16
99; RV64-i64-NEXT:    ret
100  %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float> %x)
101  ret <vscale x 8 x iXLen> %a
102}
103declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float>)
104
105define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x) {
106; RV32-LABEL: lrint_nxv16f32:
107; RV32:       # %bb.0:
108; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
109; RV32-NEXT:    vfcvt.x.f.v v8, v8
110; RV32-NEXT:    ret
111;
112; RV64-i32-LABEL: lrint_nxv16f32:
113; RV64-i32:       # %bb.0:
114; RV64-i32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
115; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
116; RV64-i32-NEXT:    ret
117;
118; RV64-i64-LABEL: lrint_nxv16f32:
119; RV64-i64:       # %bb.0:
120; RV64-i64-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
121; RV64-i64-NEXT:    vfwcvt.x.f.v v24, v8
122; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v12
123; RV64-i64-NEXT:    vmv8r.v v8, v24
124; RV64-i64-NEXT:    ret
125  %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x)
126  ret <vscale x 16 x iXLen> %a
127}
128declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float>)
129
130define <vscale x 1 x iXLen> @lrint_nxv1f64(<vscale x 1 x double> %x) {
131; RV32-LABEL: lrint_nxv1f64:
132; RV32:       # %bb.0:
133; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
134; RV32-NEXT:    vfncvt.x.f.w v9, v8
135; RV32-NEXT:    vmv1r.v v8, v9
136; RV32-NEXT:    ret
137;
138; RV64-i32-LABEL: lrint_nxv1f64:
139; RV64-i32:       # %bb.0:
140; RV64-i32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
141; RV64-i32-NEXT:    vfncvt.x.f.w v9, v8
142; RV64-i32-NEXT:    vmv1r.v v8, v9
143; RV64-i32-NEXT:    ret
144;
145; RV64-i64-LABEL: lrint_nxv1f64:
146; RV64-i64:       # %bb.0:
147; RV64-i64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
148; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
149; RV64-i64-NEXT:    ret
150  %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double> %x)
151  ret <vscale x 1 x iXLen> %a
152}
153declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double>)
154
155define <vscale x 2 x iXLen> @lrint_nxv2f64(<vscale x 2 x double> %x) {
156; RV32-LABEL: lrint_nxv2f64:
157; RV32:       # %bb.0:
158; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
159; RV32-NEXT:    vfncvt.x.f.w v10, v8
160; RV32-NEXT:    vmv.v.v v8, v10
161; RV32-NEXT:    ret
162;
163; RV64-i32-LABEL: lrint_nxv2f64:
164; RV64-i32:       # %bb.0:
165; RV64-i32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
166; RV64-i32-NEXT:    vfncvt.x.f.w v10, v8
167; RV64-i32-NEXT:    vmv.v.v v8, v10
168; RV64-i32-NEXT:    ret
169;
170; RV64-i64-LABEL: lrint_nxv2f64:
171; RV64-i64:       # %bb.0:
172; RV64-i64-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
173; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
174; RV64-i64-NEXT:    ret
175  %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double> %x)
176  ret <vscale x 2 x iXLen> %a
177}
178declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double>)
179
180define <vscale x 4 x iXLen> @lrint_nxv4f64(<vscale x 4 x double> %x) {
181; RV32-LABEL: lrint_nxv4f64:
182; RV32:       # %bb.0:
183; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
184; RV32-NEXT:    vfncvt.x.f.w v12, v8
185; RV32-NEXT:    vmv.v.v v8, v12
186; RV32-NEXT:    ret
187;
188; RV64-i32-LABEL: lrint_nxv4f64:
189; RV64-i32:       # %bb.0:
190; RV64-i32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
191; RV64-i32-NEXT:    vfncvt.x.f.w v12, v8
192; RV64-i32-NEXT:    vmv.v.v v8, v12
193; RV64-i32-NEXT:    ret
194;
195; RV64-i64-LABEL: lrint_nxv4f64:
196; RV64-i64:       # %bb.0:
197; RV64-i64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
198; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
199; RV64-i64-NEXT:    ret
200  %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double> %x)
201  ret <vscale x 4 x iXLen> %a
202}
203declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double>)
204
205define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
206; RV32-LABEL: lrint_nxv8f64:
207; RV32:       # %bb.0:
208; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
209; RV32-NEXT:    vfncvt.x.f.w v16, v8
210; RV32-NEXT:    vmv.v.v v8, v16
211; RV32-NEXT:    ret
212;
213; RV64-i32-LABEL: lrint_nxv8f64:
214; RV64-i32:       # %bb.0:
215; RV64-i32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
216; RV64-i32-NEXT:    vfncvt.x.f.w v16, v8
217; RV64-i32-NEXT:    vmv.v.v v8, v16
218; RV64-i32-NEXT:    ret
219;
220; RV64-i64-LABEL: lrint_nxv8f64:
221; RV64-i64:       # %bb.0:
222; RV64-i64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
223; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
224; RV64-i64-NEXT:    ret
225  %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double> %x)
226  ret <vscale x 8 x iXLen> %a
227}
228declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double>)
229