xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-saba.ll (revision c5ed93f975830b4ed52f1899bfc9d8c89bf81c38)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple aarch64-unknown-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3
4; SABA from ADD(ABS(SUB NSW))
5
6define <vscale x 2 x i64> @saba_abs_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
7; CHECK-LABEL: saba_abs_d:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    saba z0.d, z1.d, z2.d
10; CHECK-NEXT:    ret
11  %sub = sub nsw <vscale x 2 x i64> %b, %c
12  %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
13  %add = add <vscale x 2 x i64> %a, %abs
14  ret <vscale x 2 x i64> %add
15}
16
17define <vscale x 4 x i32> @saba_abs_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
18; CHECK-LABEL: saba_abs_s:
19; CHECK:       // %bb.0:
20; CHECK-NEXT:    saba z0.s, z1.s, z2.s
21; CHECK-NEXT:    ret
22  %sub = sub nsw <vscale x 4 x i32> %b, %c
23  %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
24  %add = add <vscale x 4 x i32> %a, %abs
25  ret <vscale x 4 x i32> %add
26}
27
28define <vscale x 8 x i16> @saba_abs_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
29; CHECK-LABEL: saba_abs_h:
30; CHECK:       // %bb.0:
31; CHECK-NEXT:    saba z0.h, z1.h, z2.h
32; CHECK-NEXT:    ret
33  %sub = sub nsw <vscale x 8 x i16> %b, %c
34  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
35  %add = add <vscale x 8 x i16> %a, %abs
36  ret <vscale x 8 x i16> %add
37}
38
39define <vscale x 16 x i8> @saba_abs_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
40; CHECK-LABEL: saba_abs_b:
41; CHECK:       // %bb.0:
42; CHECK-NEXT:    saba z0.b, z1.b, z2.b
43; CHECK-NEXT:    ret
44  %sub = sub nsw <vscale x 16 x i8> %b, %c
45  %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
46  %add = add <vscale x 16 x i8> %a, %abs
47  ret <vscale x 16 x i8> %add
48}
49
50; SABA from ADD(SABD)
51
52define <vscale x 2 x i64> @saba_sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
53; CHECK-LABEL: saba_sabd_d:
54; CHECK:       // %bb.0:
55; CHECK-NEXT:    saba z0.d, z1.d, z2.d
56; CHECK-NEXT:    ret
57  %true = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
58  %sabd = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1> %true, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
59  %add = add <vscale x 2 x i64> %sabd, %a
60  ret <vscale x 2 x i64> %add
61}
62
63define <vscale x 4 x i32> @saba_sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
64; CHECK-LABEL: saba_sabd_s:
65; CHECK:       // %bb.0:
66; CHECK-NEXT:    saba z0.s, z1.s, z2.s
67; CHECK-NEXT:    ret
68  %true = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
69  %sabd = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1> %true, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
70  %add = add <vscale x 4 x i32> %sabd, %a
71  ret <vscale x 4 x i32> %add
72}
73
74define <vscale x 8 x i16> @saba_sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
75; CHECK-LABEL: saba_sabd_h:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    saba z0.h, z1.h, z2.h
78; CHECK-NEXT:    ret
79  %true = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
80  %sabd = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1> %true, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
81  %add = add <vscale x 8 x i16> %sabd, %a
82  ret <vscale x 8 x i16> %add
83}
84
85define <vscale x 16 x i8> @saba_sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
86; CHECK-LABEL: saba_sabd_b:
87; CHECK:       // %bb.0:
88; CHECK-NEXT:    saba z0.b, z1.b, z2.b
89; CHECK-NEXT:    ret
90  %true = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
91  %sabd = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1> %true, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
92  %add = add <vscale x 16 x i8> %sabd, %a
93  ret <vscale x 16 x i8> %add
94}
95
96declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
97declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
98declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
99declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
100
101declare <vscale x  2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
102declare <vscale x  4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
103declare <vscale x  8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
104declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32)
105
106declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x  2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
107declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x  4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
108declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x  8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
109declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
110