xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-ptest.ll (revision 370ff43a15c90eca61dfa5715c7da82f1a4709f8)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve %s -o - | FileCheck %s
3
4
5; Ensure that the inactive lanes of p1 aren't zeroed, since the FP compare should do that for free.
6
7define i32 @fcmpeq_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
8; CHECK-LABEL: fcmpeq_nxv4f32:
9; CHECK:       // %bb.0:
10; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
11; CHECK-NEXT:    ptest p0, p1.b
12; CHECK-NEXT:    cset w0, ne
13; CHECK-NEXT:    ret
14  %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpeq.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
15  %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %1)
16  %conv = zext i1 %2 to i32
17  ret i32 %conv
18}
19
20define i32 @fcmpne_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
21; CHECK-LABEL: fcmpne_nxv4f32:
22; CHECK:       // %bb.0:
23; CHECK-NEXT:    fcmne p1.s, p0/z, z0.s, z1.s
24; CHECK-NEXT:    ptest p0, p1.b
25; CHECK-NEXT:    cset w0, ne
26; CHECK-NEXT:    ret
27  %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpne.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
28  %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %1)
29  %conv = zext i1 %2 to i32
30  ret i32 %conv
31}
32
33define i32 @fcmpge_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
34; CHECK-LABEL: fcmpge_nxv4f32:
35; CHECK:       // %bb.0:
36; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
37; CHECK-NEXT:    ptest p0, p1.b
38; CHECK-NEXT:    cset w0, ne
39; CHECK-NEXT:    ret
40  %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
41  %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %1)
42  %conv = zext i1 %2 to i32
43  ret i32 %conv
44}
45
46define i32 @fcmpgt_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
47; CHECK-LABEL: fcmpgt_nxv4f32:
48; CHECK:       // %bb.0:
49; CHECK-NEXT:    fcmgt p1.s, p0/z, z0.s, z1.s
50; CHECK-NEXT:    ptest p0, p1.b
51; CHECK-NEXT:    cset w0, ne
52; CHECK-NEXT:    ret
53  %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
54  %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %1)
55  %conv = zext i1 %2 to i32
56  ret i32 %conv
57}
58
59define i32 @fcmpuo_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
60; CHECK-LABEL: fcmpuo_nxv4f32:
61; CHECK:       // %bb.0:
62; CHECK-NEXT:    fcmuo p1.s, p0/z, z0.s, z1.s
63; CHECK-NEXT:    ptest p0, p1.b
64; CHECK-NEXT:    cset w0, ne
65; CHECK-NEXT:    ret
66  %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpuo.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
67  %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %1)
68  %conv = zext i1 %2 to i32
69  ret i32 %conv
70}
71
72declare <vscale x 4 x i1> @llvm.aarch64.sve.fcmpeq.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
73declare <vscale x 4 x i1> @llvm.aarch64.sve.fcmpne.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
74declare <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
75declare <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
76declare <vscale x 4 x i1> @llvm.aarch64.sve.fcmpuo.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
77
78declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
79
80declare i1 @llvm.aarch64.sve.ptest.any.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
81
82declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
83declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
84declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 4 x i1>)
85declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 4 x i1>)
86