xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-intrinsics-conversion.ll (revision 62baf21daa377c4ec1a641b26931063c1117d262)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s
4
5;
6; SXTB
7;
8
9define <vscale x 8 x i16> @sxtb_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
10; CHECK-LABEL: sxtb_i16:
11; CHECK:       // %bb.0:
12; CHECK-NEXT:    sxtb z0.h, p0/m, z1.h
13; CHECK-NEXT:    ret
14  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> %a,
15                                                                <vscale x 8 x i1> %pg,
16                                                                <vscale x 8 x i16> %b)
17  ret <vscale x 8 x i16> %out
18}
19
20define <vscale x 4 x i32> @sxtb_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
21; CHECK-LABEL: sxtb_i32:
22; CHECK:       // %bb.0:
23; CHECK-NEXT:    sxtb z0.s, p0/m, z1.s
24; CHECK-NEXT:    ret
25  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> %a,
26                                                                <vscale x 4 x i1> %pg,
27                                                                <vscale x 4 x i32> %b)
28  ret <vscale x 4 x i32> %out
29}
30
31define <vscale x 2 x i64> @sxtb_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
32; CHECK-LABEL: sxtb_i64:
33; CHECK:       // %bb.0:
34; CHECK-NEXT:    sxtb z0.d, p0/m, z1.d
35; CHECK-NEXT:    ret
36  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> %a,
37                                                                <vscale x 2 x i1> %pg,
38                                                                <vscale x 2 x i64> %b)
39  ret <vscale x 2 x i64> %out
40}
41
42;
43; SXTH
44;
45
46define <vscale x 4 x i32> @sxth_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
47; CHECK-LABEL: sxth_i32:
48; CHECK:       // %bb.0:
49; CHECK-NEXT:    sxth z0.s, p0/m, z1.s
50; CHECK-NEXT:    ret
51  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> %a,
52                                                                <vscale x 4 x i1> %pg,
53                                                                <vscale x 4 x i32> %b)
54  ret <vscale x 4 x i32> %out
55}
56
57define <vscale x 2 x i64> @sxth_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
58; CHECK-LABEL: sxth_i64:
59; CHECK:       // %bb.0:
60; CHECK-NEXT:    sxth z0.d, p0/m, z1.d
61; CHECK-NEXT:    ret
62  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> %a,
63                                                                <vscale x 2 x i1> %pg,
64                                                                <vscale x 2 x i64> %b)
65  ret <vscale x 2 x i64> %out
66}
67
68;
69; SXTW
70;
71
72define <vscale x 2 x i64> @sxtw_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
73; CHECK-LABEL: sxtw_i64:
74; CHECK:       // %bb.0:
75; CHECK-NEXT:    sxtw z0.d, p0/m, z1.d
76; CHECK-NEXT:    ret
77  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> %a,
78                                                                <vscale x 2 x i1> %pg,
79                                                                <vscale x 2 x i64> %b)
80  ret <vscale x 2 x i64> %out
81}
82
83;
84; UXTB
85;
86
87define <vscale x 8 x i16> @uxtb_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
88; CHECK-LABEL: uxtb_i16:
89; CHECK:       // %bb.0:
90; CHECK-NEXT:    uxtb z0.h, p0/m, z1.h
91; CHECK-NEXT:    ret
92  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> %a,
93                                                                <vscale x 8 x i1> %pg,
94                                                                <vscale x 8 x i16> %b)
95  ret <vscale x 8 x i16> %out
96}
97
98define <vscale x 4 x i32> @uxtb_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
99; CHECK-LABEL: uxtb_i32:
100; CHECK:       // %bb.0:
101; CHECK-NEXT:    uxtb z0.s, p0/m, z1.s
102; CHECK-NEXT:    ret
103  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> %a,
104                                                                <vscale x 4 x i1> %pg,
105                                                                <vscale x 4 x i32> %b)
106  ret <vscale x 4 x i32> %out
107}
108
109define <vscale x 2 x i64> @uxtb_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
110; CHECK-LABEL: uxtb_i64:
111; CHECK:       // %bb.0:
112; CHECK-NEXT:    uxtb z0.d, p0/m, z1.d
113; CHECK-NEXT:    ret
114  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> %a,
115                                                                <vscale x 2 x i1> %pg,
116                                                                <vscale x 2 x i64> %b)
117  ret <vscale x 2 x i64> %out
118}
119
120;
121; UXTH
122;
123
124define <vscale x 4 x i32> @uxth_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
125; CHECK-LABEL: uxth_i32:
126; CHECK:       // %bb.0:
127; CHECK-NEXT:    uxth z0.s, p0/m, z1.s
128; CHECK-NEXT:    ret
129  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> %a,
130                                                                <vscale x 4 x i1> %pg,
131                                                                <vscale x 4 x i32> %b)
132  ret <vscale x 4 x i32> %out
133}
134
135define <vscale x 2 x i64> @uxth_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
136; CHECK-LABEL: uxth_i64:
137; CHECK:       // %bb.0:
138; CHECK-NEXT:    uxth z0.d, p0/m, z1.d
139; CHECK-NEXT:    ret
140  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> %a,
141                                                                <vscale x 2 x i1> %pg,
142                                                                <vscale x 2 x i64> %b)
143  ret <vscale x 2 x i64> %out
144}
145
146;
147; UXTW
148;
149
150define <vscale x 2 x i64> @uxtw_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
151; CHECK-LABEL: uxtw_i64:
152; CHECK:       // %bb.0:
153; CHECK-NEXT:    uxtw z0.d, p0/m, z1.d
154; CHECK-NEXT:    ret
155  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> %a,
156                                                                <vscale x 2 x i1> %pg,
157                                                                <vscale x 2 x i64> %b)
158  ret <vscale x 2 x i64> %out
159}
160
161declare <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
162declare <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
163declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
164declare <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
165declare <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
166declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
167
168declare <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
169declare <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
170declare <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
171declare <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
172declare <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
173declare <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
174