xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f32(<vscale x 1 x float> %x, <vscale x 1 x i1> %m, i32 zeroext %evl) {
8; CHECK-LABEL: llrint_nxv1i64_nxv1f32:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
11; CHECK-NEXT:    vfwcvt.x.f.v v9, v8, v0.t
12; CHECK-NEXT:    vmv1r.v v8, v9
13; CHECK-NEXT:    ret
14  %a = call <vscale x 1 x i64> @llvm.vp.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x, <vscale x 1 x i1> %m, i32 %evl)
15  ret <vscale x 1 x i64> %a
16}
17declare <vscale x 1 x i64> @llvm.vp.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
18
19define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
20; CHECK-LABEL: llrint_nxv2i64_nxv2f32:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
23; CHECK-NEXT:    vfwcvt.x.f.v v10, v8, v0.t
24; CHECK-NEXT:    vmv2r.v v8, v10
25; CHECK-NEXT:    ret
26  %a = call <vscale x 2 x i64> @llvm.vp.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 %evl)
27  ret <vscale x 2 x i64> %a
28}
29declare <vscale x 2 x i64> @llvm.vp.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
30
31define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
32; CHECK-LABEL: llrint_nxv4i64_nxv4f32:
33; CHECK:       # %bb.0:
34; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
35; CHECK-NEXT:    vfwcvt.x.f.v v12, v8, v0.t
36; CHECK-NEXT:    vmv4r.v v8, v12
37; CHECK-NEXT:    ret
38  %a = call <vscale x 4 x i64> @llvm.vp.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x i1> %m, i32 %evl)
39  ret <vscale x 4 x i64> %a
40}
41declare <vscale x 4 x i64> @llvm.vp.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
42
43define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f32(<vscale x 8 x float> %x, <vscale x 8 x i1> %m, i32 zeroext %evl) {
44; CHECK-LABEL: llrint_nxv8i64_nxv8f32:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
47; CHECK-NEXT:    vfwcvt.x.f.v v16, v8, v0.t
48; CHECK-NEXT:    vmv8r.v v8, v16
49; CHECK-NEXT:    ret
50  %a = call <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x, <vscale x 8 x i1> %m, i32 %evl)
51  ret <vscale x 8 x i64> %a
52}
53declare <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
54
55define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
56; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
59; CHECK-NEXT:    vmv1r.v v24, v0
60; CHECK-NEXT:    csrr a1, vlenb
61; CHECK-NEXT:    srli a2, a1, 3
62; CHECK-NEXT:    sub a3, a0, a1
63; CHECK-NEXT:    vslidedown.vx v0, v0, a2
64; CHECK-NEXT:    sltu a2, a0, a3
65; CHECK-NEXT:    addi a2, a2, -1
66; CHECK-NEXT:    and a2, a2, a3
67; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
68; CHECK-NEXT:    vfwcvt.x.f.v v16, v12, v0.t
69; CHECK-NEXT:    bltu a0, a1, .LBB4_2
70; CHECK-NEXT:  # %bb.1:
71; CHECK-NEXT:    mv a0, a1
72; CHECK-NEXT:  .LBB4_2:
73; CHECK-NEXT:    vmv1r.v v0, v24
74; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
75; CHECK-NEXT:    vfwcvt.x.f.v v24, v8, v0.t
76; CHECK-NEXT:    vmv8r.v v8, v24
77; CHECK-NEXT:    ret
78  %a = call <vscale x 16 x i64> @llvm.vp.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 %evl)
79  ret <vscale x 16 x i64> %a
80}
81declare <vscale x 16 x i64> @llvm.vp.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32)
82
83define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x i1> %m, i32 zeroext %evl) {
84; CHECK-LABEL: llrint_nxv1i64_nxv1f64:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
87; CHECK-NEXT:    vfcvt.x.f.v v8, v8, v0.t
88; CHECK-NEXT:    ret
89  %a = call <vscale x 1 x i64> @llvm.vp.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x i1> %m, i32 %evl)
90  ret <vscale x 1 x i64> %a
91}
92declare <vscale x 1 x i64> @llvm.vp.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
93
94define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
95; CHECK-LABEL: llrint_nxv2i64_nxv2f64:
96; CHECK:       # %bb.0:
97; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
98; CHECK-NEXT:    vfcvt.x.f.v v8, v8, v0.t
99; CHECK-NEXT:    ret
100  %a = call <vscale x 2 x i64> @llvm.vp.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 %evl)
101  ret <vscale x 2 x i64> %a
102}
103declare <vscale x 2 x i64> @llvm.vp.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
104
105define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f64(<vscale x 4 x double> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
106; CHECK-LABEL: llrint_nxv4i64_nxv4f64:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
109; CHECK-NEXT:    vfcvt.x.f.v v8, v8, v0.t
110; CHECK-NEXT:    ret
111  %a = call <vscale x 4 x i64> @llvm.vp.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x, <vscale x 4 x i1> %m, i32 %evl)
112  ret <vscale x 4 x i64> %a
113}
114declare <vscale x 4 x i64> @llvm.vp.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
115
116define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f64(<vscale x 8 x double> %x, <vscale x 8 x i1> %m, i32 zeroext %evl) {
117; CHECK-LABEL: llrint_nxv8i64_nxv8f64:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
120; CHECK-NEXT:    vfcvt.x.f.v v8, v8, v0.t
121; CHECK-NEXT:    ret
122  %a = call <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x, <vscale x 8 x i1> %m, i32 %evl)
123  ret <vscale x 8 x i64> %a
124}
125declare <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
126