xref: /llvm-project/llvm/test/CodeGen/AArch64/replace-with-veclib-armpl.ll (revision 56c091ea7106507b36015297ee9005c9d5fab0bf)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --version 2
2; RUN: opt -S -vector-library=ArmPL -replace-with-veclib < %s | FileCheck %s
3
4target triple = "aarch64-unknown-linux-gnu"
5
6;
7; The replace-with-veclib pass does not work with scalable types, thus
8; the mappings aren't utilised. Tests will need to be regenerated when the
9; pass is improved.
10;
11
12declare <2 x double> @llvm.cos.v2f64(<2 x double>)
13declare <4 x float> @llvm.cos.v4f32(<4 x float>)
14declare <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double>)
15declare <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float>)
16
17
18;.
19; CHECK: @llvm.compiler.used = appending global [68 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_svcos_f64_x, ptr @armpl_svcos_f32_x, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_svexp_f64_x, ptr @armpl_svexp_f32_x, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_svexp10_f64_x, ptr @armpl_svexp10_f32_x, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_svexp2_f64_x, ptr @armpl_svexp2_f32_x, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_svlog_f64_x, ptr @armpl_svlog_f32_x, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32, ptr @armpl_svlog10_f64_x, ptr @armpl_svlog10_f32_x, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_svlog2_f64_x, ptr @armpl_svlog2_f32_x, ptr @armpl_vpowq_f64, ptr @armpl_vpowq_f32, ptr @armpl_svpow_f64_x, ptr @armpl_svpow_f32_x, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_svsin_f64_x, ptr @armpl_svsin_f32_x, ptr @armpl_vtanq_f64, ptr @armpl_vtanq_f32, ptr @armpl_svtan_f64_x, ptr @armpl_svtan_f32_x, ptr @armpl_vacosq_f64, ptr @armpl_vacosq_f32, ptr @armpl_svacos_f64_x, ptr @armpl_svacos_f32_x, ptr @armpl_vasinq_f64, ptr @armpl_vasinq_f32, ptr @armpl_svasin_f64_x, ptr @armpl_svasin_f32_x, ptr @armpl_vatanq_f64, ptr @armpl_vatanq_f32, ptr @armpl_svatan_f64_x, ptr @armpl_svatan_f32_x, ptr @armpl_vatan2q_f64, ptr @armpl_vatan2q_f32, ptr @armpl_svatan2_f64_x, ptr @armpl_svatan2_f32_x, ptr @armpl_vcoshq_f64, ptr @armpl_vcoshq_f32, ptr @armpl_svcosh_f64_x, ptr @armpl_svcosh_f32_x, ptr @armpl_vsinhq_f64, ptr @armpl_vsinhq_f32, ptr @armpl_svsinh_f64_x, ptr @armpl_svsinh_f32_x, ptr @armpl_vtanhq_f64, ptr @armpl_vtanhq_f32, ptr @armpl_svtanh_f64_x, ptr @armpl_svtanh_f32_x], section "llvm.metadata"
20;.
21define <2 x double> @llvm_cos_f64(<2 x double> %in) {
22; CHECK-LABEL: define <2 x double> @llvm_cos_f64
23; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
24; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vcosq_f64(<2 x double> [[IN]])
25; CHECK-NEXT:    ret <2 x double> [[TMP1]]
26;
27  %1 = call fast <2 x double> @llvm.cos.v2f64(<2 x double> %in)
28  ret <2 x double> %1
29}
30
31define <4 x float> @llvm_cos_f32(<4 x float> %in) {
32; CHECK-LABEL: define <4 x float> @llvm_cos_f32
33; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
34; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vcosq_f32(<4 x float> [[IN]])
35; CHECK-NEXT:    ret <4 x float> [[TMP1]]
36;
37  %1 = call fast <4 x float> @llvm.cos.v4f32(<4 x float> %in)
38  ret <4 x float> %1
39}
40
41define <vscale x 2 x double> @llvm_cos_vscale_f64(<vscale x 2 x double> %in) #0 {
42; CHECK-LABEL: define <vscale x 2 x double> @llvm_cos_vscale_f64
43; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1:[0-9]+]] {
44; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
45; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
46;
47  %1 = call fast <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double> %in)
48  ret <vscale x 2 x double> %1
49}
50
51define <vscale x 4 x float> @llvm_cos_vscale_f32(<vscale x 4 x float> %in) #0 {
52; CHECK-LABEL: define <vscale x 4 x float> @llvm_cos_vscale_f32
53; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
54; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
55; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
56;
57  %1 = call fast <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float> %in)
58  ret <vscale x 4 x float> %1
59}
60
61declare <2 x double> @llvm.exp.v2f64(<2 x double>)
62declare <4 x float> @llvm.exp.v4f32(<4 x float>)
63declare <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double>)
64declare <vscale x 4 x float> @llvm.exp.nxv4f32(<vscale x 4 x float>)
65
66define <2 x double> @llvm_exp_f64(<2 x double> %in) {
67; CHECK-LABEL: define <2 x double> @llvm_exp_f64
68; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
69; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vexpq_f64(<2 x double> [[IN]])
70; CHECK-NEXT:    ret <2 x double> [[TMP1]]
71;
72  %1 = call fast <2 x double> @llvm.exp.v2f64(<2 x double> %in)
73  ret <2 x double> %1
74}
75
76define <4 x float> @llvm_exp_f32(<4 x float> %in) {
77; CHECK-LABEL: define <4 x float> @llvm_exp_f32
78; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
79; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vexpq_f32(<4 x float> [[IN]])
80; CHECK-NEXT:    ret <4 x float> [[TMP1]]
81;
82  %1 = call fast <4 x float> @llvm.exp.v4f32(<4 x float> %in)
83  ret <4 x float> %1
84}
85
86define <vscale x 2 x double> @llvm_exp_vscale_f64(<vscale x 2 x double> %in) #0 {
87; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp_vscale_f64
88; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
89; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
90; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
91;
92  %1 = call fast <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double> %in)
93  ret <vscale x 2 x double> %1
94}
95
96define <vscale x 4 x float> @llvm_exp_vscale_f32(<vscale x 4 x float> %in) #0 {
97; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp_vscale_f32
98; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
99; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
100; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
101;
102  %1 = call fast <vscale x 4 x float> @llvm.exp.nxv4f32(<vscale x 4 x float> %in)
103  ret <vscale x 4 x float> %1
104}
105
106declare <2 x double> @llvm.exp10.v2f64(<2 x double>)
107declare <4 x float> @llvm.exp10.v4f32(<4 x float>)
108declare <vscale x 2 x double> @llvm.exp10.nxv2f64(<vscale x 2 x double>)
109declare <vscale x 4 x float> @llvm.exp10.nxv4f32(<vscale x 4 x float>)
110
111define <2 x double> @llvm_exp10_f64(<2 x double> %in) {
112; CHECK-LABEL: define <2 x double> @llvm_exp10_f64
113; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
114; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vexp10q_f64(<2 x double> [[IN]])
115; CHECK-NEXT:    ret <2 x double> [[TMP1]]
116;
117  %1 = call fast <2 x double> @llvm.exp10.v2f64(<2 x double> %in)
118  ret <2 x double> %1
119}
120
121define <4 x float> @llvm_exp10_f32(<4 x float> %in) {
122; CHECK-LABEL: define <4 x float> @llvm_exp10_f32
123; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
124; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vexp10q_f32(<4 x float> [[IN]])
125; CHECK-NEXT:    ret <4 x float> [[TMP1]]
126;
127  %1 = call fast <4 x float> @llvm.exp10.v4f32(<4 x float> %in)
128  ret <4 x float> %1
129}
130
131define <vscale x 2 x double> @llvm_exp10_vscale_f64(<vscale x 2 x double> %in) #0 {
132; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp10_vscale_f64
133; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
134; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
135; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
136;
137  %1 = call fast <vscale x 2 x double> @llvm.exp10.nxv2f64(<vscale x 2 x double> %in)
138  ret <vscale x 2 x double> %1
139}
140
141define <vscale x 4 x float> @llvm_exp10_vscale_f32(<vscale x 4 x float> %in) #0 {
142; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp10_vscale_f32
143; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
144; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
145; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
146;
147  %1 = call fast <vscale x 4 x float> @llvm.exp10.nxv4f32(<vscale x 4 x float> %in)
148  ret <vscale x 4 x float> %1
149}
150
151declare <2 x double> @llvm.exp2.v2f64(<2 x double>)
152declare <4 x float> @llvm.exp2.v4f32(<4 x float>)
153declare <vscale x 2 x double> @llvm.exp2.nxv2f64(<vscale x 2 x double>)
154declare <vscale x 4 x float> @llvm.exp2.nxv4f32(<vscale x 4 x float>)
155
156define <2 x double> @llvm_exp2_f64(<2 x double> %in) {
157; CHECK-LABEL: define <2 x double> @llvm_exp2_f64
158; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
159; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vexp2q_f64(<2 x double> [[IN]])
160; CHECK-NEXT:    ret <2 x double> [[TMP1]]
161;
162  %1 = call fast <2 x double> @llvm.exp2.v2f64(<2 x double> %in)
163  ret <2 x double> %1
164}
165
166define <4 x float> @llvm_exp2_f32(<4 x float> %in) {
167; CHECK-LABEL: define <4 x float> @llvm_exp2_f32
168; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
169; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vexp2q_f32(<4 x float> [[IN]])
170; CHECK-NEXT:    ret <4 x float> [[TMP1]]
171;
172  %1 = call fast <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
173  ret <4 x float> %1
174}
175
176define <vscale x 2 x double> @llvm_exp2_vscale_f64(<vscale x 2 x double> %in) #0 {
177; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp2_vscale_f64
178; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
179; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
180; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
181;
182  %1 = call fast <vscale x 2 x double> @llvm.exp2.nxv2f64(<vscale x 2 x double> %in)
183  ret <vscale x 2 x double> %1
184}
185
186define <vscale x 4 x float> @llvm_exp2_vscale_f32(<vscale x 4 x float> %in) #0 {
187; CHECK-LABEL: define <vscale x 4 x float> @llvm_exp2_vscale_f32
188; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
189; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
190; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
191;
192  %1 = call fast <vscale x 4 x float> @llvm.exp2.nxv4f32(<vscale x 4 x float> %in)
193  ret <vscale x 4 x float> %1
194}
195
196declare <2 x double> @llvm.log.v2f64(<2 x double>)
197declare <4 x float> @llvm.log.v4f32(<4 x float>)
198declare <vscale x 2 x double> @llvm.log.nxv2f64(<vscale x 2 x double>)
199declare <vscale x 4 x float> @llvm.log.nxv4f32(<vscale x 4 x float>)
200
201define <2 x double> @llvm_log_f64(<2 x double> %in) {
202; CHECK-LABEL: define <2 x double> @llvm_log_f64
203; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
204; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vlogq_f64(<2 x double> [[IN]])
205; CHECK-NEXT:    ret <2 x double> [[TMP1]]
206;
207  %1 = call fast <2 x double> @llvm.log.v2f64(<2 x double> %in)
208  ret <2 x double> %1
209}
210
211define <4 x float> @llvm_log_f32(<4 x float> %in) {
212; CHECK-LABEL: define <4 x float> @llvm_log_f32
213; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
214; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vlogq_f32(<4 x float> [[IN]])
215; CHECK-NEXT:    ret <4 x float> [[TMP1]]
216;
217  %1 = call fast <4 x float> @llvm.log.v4f32(<4 x float> %in)
218  ret <4 x float> %1
219}
220
221define <vscale x 2 x double> @llvm_log_vscale_f64(<vscale x 2 x double> %in) #0 {
222; CHECK-LABEL: define <vscale x 2 x double> @llvm_log_vscale_f64
223; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
224; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
225; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
226;
227  %1 = call fast <vscale x 2 x double> @llvm.log.nxv2f64(<vscale x 2 x double> %in)
228  ret <vscale x 2 x double> %1
229}
230
231define <vscale x 4 x float> @llvm_log_vscale_f32(<vscale x 4 x float> %in) #0 {
232; CHECK-LABEL: define <vscale x 4 x float> @llvm_log_vscale_f32
233; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
234; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
235; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
236;
237  %1 = call fast <vscale x 4 x float> @llvm.log.nxv4f32(<vscale x 4 x float> %in)
238  ret <vscale x 4 x float> %1
239}
240
241declare <2 x double> @llvm.log10.v2f64(<2 x double>)
242declare <4 x float> @llvm.log10.v4f32(<4 x float>)
243declare <vscale x 2 x double> @llvm.log10.nxv2f64(<vscale x 2 x double>)
244declare <vscale x 4 x float> @llvm.log10.nxv4f32(<vscale x 4 x float>)
245
246define <2 x double> @llvm_log10_f64(<2 x double> %in) {
247; CHECK-LABEL: define <2 x double> @llvm_log10_f64
248; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
249; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vlog10q_f64(<2 x double> [[IN]])
250; CHECK-NEXT:    ret <2 x double> [[TMP1]]
251;
252  %1 = call fast <2 x double> @llvm.log10.v2f64(<2 x double> %in)
253  ret <2 x double> %1
254}
255
256define <4 x float> @llvm_log10_f32(<4 x float> %in) {
257; CHECK-LABEL: define <4 x float> @llvm_log10_f32
258; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
259; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vlog10q_f32(<4 x float> [[IN]])
260; CHECK-NEXT:    ret <4 x float> [[TMP1]]
261;
262  %1 = call fast <4 x float> @llvm.log10.v4f32(<4 x float> %in)
263  ret <4 x float> %1
264}
265
266define <vscale x 2 x double> @llvm_log10_vscale_f64(<vscale x 2 x double> %in) #0 {
267; CHECK-LABEL: define <vscale x 2 x double> @llvm_log10_vscale_f64
268; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
269; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
270; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
271;
272  %1 = call fast <vscale x 2 x double> @llvm.log10.nxv2f64(<vscale x 2 x double> %in)
273  ret <vscale x 2 x double> %1
274}
275
276define <vscale x 4 x float> @llvm_log10_vscale_f32(<vscale x 4 x float> %in) #0 {
277; CHECK-LABEL: define <vscale x 4 x float> @llvm_log10_vscale_f32
278; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
279; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
280; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
281;
282  %1 = call fast <vscale x 4 x float> @llvm.log10.nxv4f32(<vscale x 4 x float> %in)
283  ret <vscale x 4 x float> %1
284}
285
286declare <2 x double> @llvm.log2.v2f64(<2 x double>)
287declare <4 x float> @llvm.log2.v4f32(<4 x float>)
288declare <vscale x 2 x double> @llvm.log2.nxv2f64(<vscale x 2 x double>)
289declare <vscale x 4 x float> @llvm.log2.nxv4f32(<vscale x 4 x float>)
290
291define <2 x double> @llvm_log2_f64(<2 x double> %in) {
292; CHECK-LABEL: define <2 x double> @llvm_log2_f64
293; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
294; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vlog2q_f64(<2 x double> [[IN]])
295; CHECK-NEXT:    ret <2 x double> [[TMP1]]
296;
297  %1 = call fast <2 x double> @llvm.log2.v2f64(<2 x double> %in)
298  ret <2 x double> %1
299}
300
301define <4 x float> @llvm_log2_f32(<4 x float> %in) {
302; CHECK-LABEL: define <4 x float> @llvm_log2_f32
303; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
304; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vlog2q_f32(<4 x float> [[IN]])
305; CHECK-NEXT:    ret <4 x float> [[TMP1]]
306;
307  %1 = call fast <4 x float> @llvm.log2.v4f32(<4 x float> %in)
308  ret <4 x float> %1
309}
310
311define <vscale x 2 x double> @llvm_log2_vscale_f64(<vscale x 2 x double> %in) #0 {
312; CHECK-LABEL: define <vscale x 2 x double> @llvm_log2_vscale_f64
313; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
314; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
315; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
316;
317  %1 = call fast <vscale x 2 x double> @llvm.log2.nxv2f64(<vscale x 2 x double> %in)
318  ret <vscale x 2 x double> %1
319}
320
321define <vscale x 4 x float> @llvm_log2_vscale_f32(<vscale x 4 x float> %in) #0 {
322; CHECK-LABEL: define <vscale x 4 x float> @llvm_log2_vscale_f32
323; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
324; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
325; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
326;
327  %1 = call fast <vscale x 4 x float> @llvm.log2.nxv4f32(<vscale x 4 x float> %in)
328  ret <vscale x 4 x float> %1
329}
330
331declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
332declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
333declare <vscale x 2 x double> @llvm.pow.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
334declare <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
335
336define <2 x double> @llvm_pow_f64(<2 x double> %in, <2 x double> %power) {
337; CHECK-LABEL: define <2 x double> @llvm_pow_f64
338; CHECK-SAME: (<2 x double> [[IN:%.*]], <2 x double> [[POWER:%.*]]) {
339; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vpowq_f64(<2 x double> [[IN]], <2 x double> [[POWER]])
340; CHECK-NEXT:    ret <2 x double> [[TMP1]]
341;
342  %1 = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %in, <2 x double> %power)
343  ret <2 x double> %1
344}
345
346define <4 x float> @llvm_pow_f32(<4 x float> %in, <4 x float> %power) {
347; CHECK-LABEL: define <4 x float> @llvm_pow_f32
348; CHECK-SAME: (<4 x float> [[IN:%.*]], <4 x float> [[POWER:%.*]]) {
349; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vpowq_f32(<4 x float> [[IN]], <4 x float> [[POWER]])
350; CHECK-NEXT:    ret <4 x float> [[TMP1]]
351;
352  %1 = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %in, <4 x float> %power)
353  ret <4 x float> %1
354}
355
356define <vscale x 2 x double> @llvm_pow_vscale_f64(<vscale x 2 x double> %in, <vscale x 2 x double> %power) #0 {
357; CHECK-LABEL: define <vscale x 2 x double> @llvm_pow_vscale_f64
358; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]], <vscale x 2 x double> [[POWER:%.*]]) #[[ATTR1]] {
359; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x double> [[POWER]], <vscale x 2 x i1> splat (i1 true))
360; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
361;
362  %1 = call fast <vscale x 2 x double> @llvm.pow.nxv2f64(<vscale x 2 x double> %in, <vscale x 2 x double> %power)
363  ret <vscale x 2 x double> %1
364}
365
366define <vscale x 4 x float> @llvm_pow_vscale_f32(<vscale x 4 x float> %in, <vscale x 4 x float> %power) #0 {
367; CHECK-LABEL: define <vscale x 4 x float> @llvm_pow_vscale_f32
368; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]], <vscale x 4 x float> [[POWER:%.*]]) #[[ATTR1]] {
369; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x float> [[POWER]], <vscale x 4 x i1> splat (i1 true))
370; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
371;
372  %1 = call fast <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float> %in, <vscale x 4 x float> %power)
373  ret <vscale x 4 x float> %1
374}
375
376declare <2 x double> @llvm.sin.v2f64(<2 x double>)
377declare <4 x float> @llvm.sin.v4f32(<4 x float>)
378declare <vscale x 2 x double> @llvm.sin.nxv2f64(<vscale x 2 x double>)
379declare <vscale x 4 x float> @llvm.sin.nxv4f32(<vscale x 4 x float>)
380
381define <2 x double> @llvm_sin_f64(<2 x double> %in) {
382; CHECK-LABEL: define <2 x double> @llvm_sin_f64
383; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
384; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vsinq_f64(<2 x double> [[IN]])
385; CHECK-NEXT:    ret <2 x double> [[TMP1]]
386;
387  %1 = call fast <2 x double> @llvm.sin.v2f64(<2 x double> %in)
388  ret <2 x double> %1
389}
390
391define <4 x float> @llvm_sin_f32(<4 x float> %in) {
392; CHECK-LABEL: define <4 x float> @llvm_sin_f32
393; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
394; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vsinq_f32(<4 x float> [[IN]])
395; CHECK-NEXT:    ret <4 x float> [[TMP1]]
396;
397  %1 = call fast <4 x float> @llvm.sin.v4f32(<4 x float> %in)
398  ret <4 x float> %1
399}
400
401define <vscale x 2 x double> @llvm_sin_vscale_f64(<vscale x 2 x double> %in) #0 {
402; CHECK-LABEL: define <vscale x 2 x double> @llvm_sin_vscale_f64
403; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
404; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
405; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
406;
407  %1 = call fast <vscale x 2 x double> @llvm.sin.nxv2f64(<vscale x 2 x double> %in)
408  ret <vscale x 2 x double> %1
409}
410
411define <vscale x 4 x float> @llvm_sin_vscale_f32(<vscale x 4 x float> %in) #0 {
412; CHECK-LABEL: define <vscale x 4 x float> @llvm_sin_vscale_f32
413; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
414; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
415; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
416;
417  %1 = call fast <vscale x 4 x float> @llvm.sin.nxv4f32(<vscale x 4 x float> %in)
418  ret <vscale x 4 x float> %1
419}
420
421declare <2 x double> @llvm.tan.v2f64(<2 x double>)
422declare <4 x float> @llvm.tan.v4f32(<4 x float>)
423declare <vscale x 2 x double> @llvm.tan.nxv2f64(<vscale x 2 x double>)
424declare <vscale x 4 x float> @llvm.tan.nxv4f32(<vscale x 4 x float>)
425
426define <2 x double> @llvm_tan_f64(<2 x double> %in) {
427; CHECK-LABEL: define <2 x double> @llvm_tan_f64
428; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
429; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vtanq_f64(<2 x double> [[IN]])
430; CHECK-NEXT:    ret <2 x double> [[TMP1]]
431;
432  %1 = call fast <2 x double> @llvm.tan.v2f64(<2 x double> %in)
433  ret <2 x double> %1
434}
435
436define <4 x float> @llvm_tan_f32(<4 x float> %in) {
437; CHECK-LABEL: define <4 x float> @llvm_tan_f32
438; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
439; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vtanq_f32(<4 x float> [[IN]])
440; CHECK-NEXT:    ret <4 x float> [[TMP1]]
441;
442  %1 = call fast <4 x float> @llvm.tan.v4f32(<4 x float> %in)
443  ret <4 x float> %1
444}
445
446define <vscale x 2 x double> @llvm_tan_vscale_f64(<vscale x 2 x double> %in) #0 {
447; CHECK-LABEL: define <vscale x 2 x double> @llvm_tan_vscale_f64
448; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
449; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svtan_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
450; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
451;
452  %1 = call fast <vscale x 2 x double> @llvm.tan.nxv2f64(<vscale x 2 x double> %in)
453  ret <vscale x 2 x double> %1
454}
455
456define <vscale x 4 x float> @llvm_tan_vscale_f32(<vscale x 4 x float> %in) #0 {
457; CHECK-LABEL: define <vscale x 4 x float> @llvm_tan_vscale_f32
458; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
459; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svtan_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
460; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
461;
462  %1 = call fast <vscale x 4 x float> @llvm.tan.nxv4f32(<vscale x 4 x float> %in)
463  ret <vscale x 4 x float> %1
464}
465
466declare <2 x double> @llvm.acos.v2f64(<2 x double>)
467declare <4 x float> @llvm.acos.v4f32(<4 x float>)
468declare <vscale x 2 x double> @llvm.acos.nxv2f64(<vscale x 2 x double>)
469declare <vscale x 4 x float> @llvm.acos.nxv4f32(<vscale x 4 x float>)
470
471define <2 x double> @llvm_acos_f64(<2 x double> %in) {
472; CHECK-LABEL: define <2 x double> @llvm_acos_f64
473; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
474; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vacosq_f64(<2 x double> [[IN]])
475; CHECK-NEXT:    ret <2 x double> [[TMP1]]
476;
477  %1 = call fast <2 x double> @llvm.acos.v2f64(<2 x double> %in)
478  ret <2 x double> %1
479}
480
481define <4 x float> @llvm_acos_f32(<4 x float> %in) {
482; CHECK-LABEL: define <4 x float> @llvm_acos_f32
483; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
484; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vacosq_f32(<4 x float> [[IN]])
485; CHECK-NEXT:    ret <4 x float> [[TMP1]]
486;
487  %1 = call fast <4 x float> @llvm.acos.v4f32(<4 x float> %in)
488  ret <4 x float> %1
489}
490
491define <vscale x 2 x double> @llvm_acos_vscale_f64(<vscale x 2 x double> %in) #0 {
492; CHECK-LABEL: define <vscale x 2 x double> @llvm_acos_vscale_f64
493; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
494; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svacos_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
495; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
496;
497  %1 = call fast <vscale x 2 x double> @llvm.acos.nxv2f64(<vscale x 2 x double> %in)
498  ret <vscale x 2 x double> %1
499}
500
501define <vscale x 4 x float> @llvm_acos_vscale_f32(<vscale x 4 x float> %in) #0 {
502; CHECK-LABEL: define <vscale x 4 x float> @llvm_acos_vscale_f32
503; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
504; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svacos_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
505; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
506;
507  %1 = call fast <vscale x 4 x float> @llvm.acos.nxv4f32(<vscale x 4 x float> %in)
508  ret <vscale x 4 x float> %1
509}
510
511declare <2 x double> @llvm.asin.v2f64(<2 x double>)
512declare <4 x float> @llvm.asin.v4f32(<4 x float>)
513declare <vscale x 2 x double> @llvm.asin.nxv2f64(<vscale x 2 x double>)
514declare <vscale x 4 x float> @llvm.asin.nxv4f32(<vscale x 4 x float>)
515
516define <2 x double> @llvm_asin_f64(<2 x double> %in) {
517; CHECK-LABEL: define <2 x double> @llvm_asin_f64
518; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
519; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vasinq_f64(<2 x double> [[IN]])
520; CHECK-NEXT:    ret <2 x double> [[TMP1]]
521;
522  %1 = call fast <2 x double> @llvm.asin.v2f64(<2 x double> %in)
523  ret <2 x double> %1
524}
525
526define <4 x float> @llvm_asin_f32(<4 x float> %in) {
527; CHECK-LABEL: define <4 x float> @llvm_asin_f32
528; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
529; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vasinq_f32(<4 x float> [[IN]])
530; CHECK-NEXT:    ret <4 x float> [[TMP1]]
531;
532  %1 = call fast <4 x float> @llvm.asin.v4f32(<4 x float> %in)
533  ret <4 x float> %1
534}
535
536define <vscale x 2 x double> @llvm_asin_vscale_f64(<vscale x 2 x double> %in) #0 {
537; CHECK-LABEL: define <vscale x 2 x double> @llvm_asin_vscale_f64
538; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
539; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svasin_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
540; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
541;
542  %1 = call fast <vscale x 2 x double> @llvm.asin.nxv2f64(<vscale x 2 x double> %in)
543  ret <vscale x 2 x double> %1
544}
545
546define <vscale x 4 x float> @llvm_asin_vscale_f32(<vscale x 4 x float> %in) #0 {
547; CHECK-LABEL: define <vscale x 4 x float> @llvm_asin_vscale_f32
548; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
549; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svasin_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
550; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
551;
552  %1 = call fast <vscale x 4 x float> @llvm.asin.nxv4f32(<vscale x 4 x float> %in)
553  ret <vscale x 4 x float> %1
554}
555
556declare <2 x double> @llvm.atan.v2f64(<2 x double>)
557declare <4 x float> @llvm.atan.v4f32(<4 x float>)
558declare <vscale x 2 x double> @llvm.atan.nxv2f64(<vscale x 2 x double>)
559declare <vscale x 4 x float> @llvm.atan.nxv4f32(<vscale x 4 x float>)
560
561define <2 x double> @llvm_atan_f64(<2 x double> %in) {
562; CHECK-LABEL: define <2 x double> @llvm_atan_f64
563; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
564; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vatanq_f64(<2 x double> [[IN]])
565; CHECK-NEXT:    ret <2 x double> [[TMP1]]
566;
567  %1 = call fast <2 x double> @llvm.atan.v2f64(<2 x double> %in)
568  ret <2 x double> %1
569}
570
571define <4 x float> @llvm_atan_f32(<4 x float> %in) {
572; CHECK-LABEL: define <4 x float> @llvm_atan_f32
573; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
574; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vatanq_f32(<4 x float> [[IN]])
575; CHECK-NEXT:    ret <4 x float> [[TMP1]]
576;
577  %1 = call fast <4 x float> @llvm.atan.v4f32(<4 x float> %in)
578  ret <4 x float> %1
579}
580
581define <vscale x 2 x double> @llvm_atan_vscale_f64(<vscale x 2 x double> %in) #0 {
582; CHECK-LABEL: define <vscale x 2 x double> @llvm_atan_vscale_f64
583; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
584; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svatan_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
585; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
586;
587  %1 = call fast <vscale x 2 x double> @llvm.atan.nxv2f64(<vscale x 2 x double> %in)
588  ret <vscale x 2 x double> %1
589}
590
591define <vscale x 4 x float> @llvm_atan_vscale_f32(<vscale x 4 x float> %in) #0 {
592; CHECK-LABEL: define <vscale x 4 x float> @llvm_atan_vscale_f32
593; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
594; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svatan_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
595; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
596;
597  %1 = call fast <vscale x 4 x float> @llvm.atan.nxv4f32(<vscale x 4 x float> %in)
598  ret <vscale x 4 x float> %1
599}
600
601declare <2 x double> @llvm.atan2.v2f64(<2 x double>, <2 x double>)
602declare <4 x float> @llvm.atan2.v4f32(<4 x float>, <4 x float>)
603declare <vscale x 2 x double> @llvm.atan2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
604declare <vscale x 4 x float> @llvm.atan2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
605
606define <2 x double> @llvm_atan2_f64(<2 x double> %in1, <2 x double> %in2) {
607; CHECK-LABEL: define <2 x double> @llvm_atan2_f64
608; CHECK-SAME: (<2 x double> [[IN1:%.*]], <2 x double> [[IN2:%.*]]) {
609; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vatan2q_f64(<2 x double> [[IN1]], <2 x double> [[IN2]])
610; CHECK-NEXT:    ret <2 x double> [[TMP1]]
611;
612  %1 = call fast <2 x double> @llvm.atan2.v2f64(<2 x double> %in1, <2 x double> %in2)
613  ret <2 x double> %1
614}
615
616define <4 x float> @llvm_atan2_f32(<4 x float> %in1, <4 x float> %in2) {
617; CHECK-LABEL: define <4 x float> @llvm_atan2_f32
618; CHECK-SAME: (<4 x float> [[IN1:%.*]], <4 x float> [[IN2:%.*]]) {
619; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vatan2q_f32(<4 x float> [[IN1]], <4 x float> [[IN2]])
620; CHECK-NEXT:    ret <4 x float> [[TMP1]]
621;
622  %1 = call fast <4 x float> @llvm.atan2.v4f32(<4 x float> %in1, <4 x float> %in2)
623  ret <4 x float> %1
624}
625
626define <vscale x 2 x double> @llvm_atan2_vscale_f64(<vscale x 2 x double> %in1, <vscale x 2 x double> %in2) #0 {
627; CHECK-LABEL: define <vscale x 2 x double> @llvm_atan2_vscale_f64
628; CHECK-SAME: (<vscale x 2 x double> [[IN1:%.*]], <vscale x 2 x double> [[IN2:%.*]]) #[[ATTR1]] {
629; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svatan2_f64_x(<vscale x 2 x double> [[IN1]], <vscale x 2 x double> [[IN2]], <vscale x 2 x i1> splat (i1 true))
630; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
631;
632  %1 = call fast <vscale x 2 x double> @llvm.atan2.nxv2f64(<vscale x 2 x double> %in1, <vscale x 2 x double> %in2)
633  ret <vscale x 2 x double> %1
634}
635
636define <vscale x 4 x float> @llvm_atan2_vscale_f32(<vscale x 4 x float> %in1, <vscale x 4 x float> %in2) #0 {
637; CHECK-LABEL: define <vscale x 4 x float> @llvm_atan2_vscale_f32
638; CHECK-SAME: (<vscale x 4 x float> [[IN1:%.*]], <vscale x 4 x float> [[IN2:%.*]]) #[[ATTR1]] {
639; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svatan2_f32_x(<vscale x 4 x float> [[IN1]], <vscale x 4 x float> [[IN2]], <vscale x 4 x i1> splat (i1 true))
640; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
641;
642  %1 = call fast <vscale x 4 x float> @llvm.atan2.nxv4f32(<vscale x 4 x float> %in1, <vscale x 4 x float> %in2)
643  ret <vscale x 4 x float> %1
644}
645
646declare <2 x double> @llvm.cosh.v2f64(<2 x double>)
647declare <4 x float> @llvm.cosh.v4f32(<4 x float>)
648declare <vscale x 2 x double> @llvm.cosh.nxv2f64(<vscale x 2 x double>)
649declare <vscale x 4 x float> @llvm.cosh.nxv4f32(<vscale x 4 x float>)
650
651define <2 x double> @llvm_cosh_f64(<2 x double> %in) {
652; CHECK-LABEL: define <2 x double> @llvm_cosh_f64
653; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
654; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vcoshq_f64(<2 x double> [[IN]])
655; CHECK-NEXT:    ret <2 x double> [[TMP1]]
656;
657  %1 = call fast <2 x double> @llvm.cosh.v2f64(<2 x double> %in)
658  ret <2 x double> %1
659}
660
661define <4 x float> @llvm_cosh_f32(<4 x float> %in) {
662; CHECK-LABEL: define <4 x float> @llvm_cosh_f32
663; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
664; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vcoshq_f32(<4 x float> [[IN]])
665; CHECK-NEXT:    ret <4 x float> [[TMP1]]
666;
667  %1 = call fast <4 x float> @llvm.cosh.v4f32(<4 x float> %in)
668  ret <4 x float> %1
669}
670
671define <vscale x 2 x double> @llvm_cosh_vscale_f64(<vscale x 2 x double> %in) #0 {
672; CHECK-LABEL: define <vscale x 2 x double> @llvm_cosh_vscale_f64
673; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
674; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svcosh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
675; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
676;
677  %1 = call fast <vscale x 2 x double> @llvm.cosh.nxv2f64(<vscale x 2 x double> %in)
678  ret <vscale x 2 x double> %1
679}
680
681define <vscale x 4 x float> @llvm_cosh_vscale_f32(<vscale x 4 x float> %in) #0 {
682; CHECK-LABEL: define <vscale x 4 x float> @llvm_cosh_vscale_f32
683; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
684; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svcosh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
685; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
686;
687  %1 = call fast <vscale x 4 x float> @llvm.cosh.nxv4f32(<vscale x 4 x float> %in)
688  ret <vscale x 4 x float> %1
689}
690
691declare <2 x double> @llvm.sinh.v2f64(<2 x double>)
692declare <4 x float> @llvm.sinh.v4f32(<4 x float>)
693declare <vscale x 2 x double> @llvm.sinh.nxv2f64(<vscale x 2 x double>)
694declare <vscale x 4 x float> @llvm.sinh.nxv4f32(<vscale x 4 x float>)
695
696define <2 x double> @llvm_sinh_f64(<2 x double> %in) {
697; CHECK-LABEL: define <2 x double> @llvm_sinh_f64
698; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
699; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vsinhq_f64(<2 x double> [[IN]])
700; CHECK-NEXT:    ret <2 x double> [[TMP1]]
701;
702  %1 = call fast <2 x double> @llvm.sinh.v2f64(<2 x double> %in)
703  ret <2 x double> %1
704}
705
706define <4 x float> @llvm_sinh_f32(<4 x float> %in) {
707; CHECK-LABEL: define <4 x float> @llvm_sinh_f32
708; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
709; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vsinhq_f32(<4 x float> [[IN]])
710; CHECK-NEXT:    ret <4 x float> [[TMP1]]
711;
712  %1 = call fast <4 x float> @llvm.sinh.v4f32(<4 x float> %in)
713  ret <4 x float> %1
714}
715
716define <vscale x 2 x double> @llvm_sinh_vscale_f64(<vscale x 2 x double> %in) #0 {
717; CHECK-LABEL: define <vscale x 2 x double> @llvm_sinh_vscale_f64
718; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
719; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svsinh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
720; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
721;
722  %1 = call fast <vscale x 2 x double> @llvm.sinh.nxv2f64(<vscale x 2 x double> %in)
723  ret <vscale x 2 x double> %1
724}
725
726define <vscale x 4 x float> @llvm_sinh_vscale_f32(<vscale x 4 x float> %in) #0 {
727; CHECK-LABEL: define <vscale x 4 x float> @llvm_sinh_vscale_f32
728; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
729; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svsinh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
730; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
731;
732  %1 = call fast <vscale x 4 x float> @llvm.sinh.nxv4f32(<vscale x 4 x float> %in)
733  ret <vscale x 4 x float> %1
734}
735
736declare <2 x double> @llvm.tanh.v2f64(<2 x double>)
737declare <4 x float> @llvm.tanh.v4f32(<4 x float>)
738declare <vscale x 2 x double> @llvm.tanh.nxv2f64(<vscale x 2 x double>)
739declare <vscale x 4 x float> @llvm.tanh.nxv4f32(<vscale x 4 x float>)
740
741define <2 x double> @llvm_tanh_f64(<2 x double> %in) {
742; CHECK-LABEL: define <2 x double> @llvm_tanh_f64
743; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
744; CHECK-NEXT:    [[TMP1:%.*]] = call fast <2 x double> @armpl_vtanhq_f64(<2 x double> [[IN]])
745; CHECK-NEXT:    ret <2 x double> [[TMP1]]
746;
747  %1 = call fast <2 x double> @llvm.tanh.v2f64(<2 x double> %in)
748  ret <2 x double> %1
749}
750
751define <4 x float> @llvm_tanh_f32(<4 x float> %in) {
752; CHECK-LABEL: define <4 x float> @llvm_tanh_f32
753; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
754; CHECK-NEXT:    [[TMP1:%.*]] = call fast <4 x float> @armpl_vtanhq_f32(<4 x float> [[IN]])
755; CHECK-NEXT:    ret <4 x float> [[TMP1]]
756;
757  %1 = call fast <4 x float> @llvm.tanh.v4f32(<4 x float> %in)
758  ret <4 x float> %1
759}
760
761define <vscale x 2 x double> @llvm_tanh_vscale_f64(<vscale x 2 x double> %in) #0 {
762; CHECK-LABEL: define <vscale x 2 x double> @llvm_tanh_vscale_f64
763; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
764; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 2 x double> @armpl_svtanh_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x i1> splat (i1 true))
765; CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
766;
767  %1 = call fast <vscale x 2 x double> @llvm.tanh.nxv2f64(<vscale x 2 x double> %in)
768  ret <vscale x 2 x double> %1
769}
770
771define <vscale x 4 x float> @llvm_tanh_vscale_f32(<vscale x 4 x float> %in) #0 {
772; CHECK-LABEL: define <vscale x 4 x float> @llvm_tanh_vscale_f32
773; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
774; CHECK-NEXT:    [[TMP1:%.*]] = call fast <vscale x 4 x float> @armpl_svtanh_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x i1> splat (i1 true))
775; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP1]]
776;
777  %1 = call fast <vscale x 4 x float> @llvm.tanh.nxv4f32(<vscale x 4 x float> %in)
778  ret <vscale x 4 x float> %1
779}
780
781attributes #0 = { "target-features"="+sve" }
782;.
783; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
784; CHECK: attributes #[[ATTR1]] = { "target-features"="+sve" }
785;.
786