xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-neon-add-sub.ll (revision de0707a2b98162ab52fa2dd9277a9bbb4f7256c7)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-enable-simd-scalar| FileCheck %s
3
4define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
5; CHECK-LABEL: add8xi8:
6; CHECK:       // %bb.0:
7; CHECK-NEXT:    add v0.8b, v0.8b, v1.8b
8; CHECK-NEXT:    ret
9	%tmp3 = add <8 x i8> %A, %B;
10	ret <8 x i8> %tmp3
11}
12
13define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
14; CHECK-LABEL: add16xi8:
15; CHECK:       // %bb.0:
16; CHECK-NEXT:    add v0.16b, v0.16b, v1.16b
17; CHECK-NEXT:    ret
18	%tmp3 = add <16 x i8> %A, %B;
19	ret <16 x i8> %tmp3
20}
21
22define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
23; CHECK-LABEL: add4xi16:
24; CHECK:       // %bb.0:
25; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
26; CHECK-NEXT:    ret
27	%tmp3 = add <4 x i16> %A, %B;
28	ret <4 x i16> %tmp3
29}
30
31define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
32; CHECK-LABEL: add8xi16:
33; CHECK:       // %bb.0:
34; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
35; CHECK-NEXT:    ret
36	%tmp3 = add <8 x i16> %A, %B;
37	ret <8 x i16> %tmp3
38}
39
40define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
41; CHECK-LABEL: add2xi32:
42; CHECK:       // %bb.0:
43; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
44; CHECK-NEXT:    ret
45	%tmp3 = add <2 x i32> %A, %B;
46	ret <2 x i32> %tmp3
47}
48
49define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
50; CHECK-LABEL: add4x32:
51; CHECK:       // %bb.0:
52; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
53; CHECK-NEXT:    ret
54	%tmp3 = add <4 x i32> %A, %B;
55	ret <4 x i32> %tmp3
56}
57
58define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
59; CHECK-LABEL: add2xi64:
60; CHECK:       // %bb.0:
61; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
62; CHECK-NEXT:    ret
63	%tmp3 = add <2 x i64> %A, %B;
64	ret <2 x i64> %tmp3
65}
66
67define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
68; CHECK-LABEL: add2xfloat:
69; CHECK:       // %bb.0:
70; CHECK-NEXT:    fadd v0.2s, v0.2s, v1.2s
71; CHECK-NEXT:    ret
72	%tmp3 = fadd <2 x float> %A, %B;
73	ret <2 x float> %tmp3
74}
75
76define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
77; CHECK-LABEL: add4xfloat:
78; CHECK:       // %bb.0:
79; CHECK-NEXT:    fadd v0.4s, v0.4s, v1.4s
80; CHECK-NEXT:    ret
81	%tmp3 = fadd <4 x float> %A, %B;
82	ret <4 x float> %tmp3
83}
84define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
85; CHECK-LABEL: add2xdouble:
86; CHECK:       // %bb.0:
87; CHECK-NEXT:    fadd v0.2d, v0.2d, v1.2d
88; CHECK-NEXT:    ret
89	%tmp3 = fadd <2 x double> %A, %B;
90	ret <2 x double> %tmp3
91}
92
93define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
94; CHECK-LABEL: sub8xi8:
95; CHECK:       // %bb.0:
96; CHECK-NEXT:    sub v0.8b, v0.8b, v1.8b
97; CHECK-NEXT:    ret
98	%tmp3 = sub <8 x i8> %A, %B;
99	ret <8 x i8> %tmp3
100}
101
102define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
103; CHECK-LABEL: sub16xi8:
104; CHECK:       // %bb.0:
105; CHECK-NEXT:    sub v0.16b, v0.16b, v1.16b
106; CHECK-NEXT:    ret
107	%tmp3 = sub <16 x i8> %A, %B;
108	ret <16 x i8> %tmp3
109}
110
111define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
112; CHECK-LABEL: sub4xi16:
113; CHECK:       // %bb.0:
114; CHECK-NEXT:    sub v0.4h, v0.4h, v1.4h
115; CHECK-NEXT:    ret
116	%tmp3 = sub <4 x i16> %A, %B;
117	ret <4 x i16> %tmp3
118}
119
120define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
121; CHECK-LABEL: sub8xi16:
122; CHECK:       // %bb.0:
123; CHECK-NEXT:    sub v0.8h, v0.8h, v1.8h
124; CHECK-NEXT:    ret
125	%tmp3 = sub <8 x i16> %A, %B;
126	ret <8 x i16> %tmp3
127}
128
129define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
130; CHECK-LABEL: sub2xi32:
131; CHECK:       // %bb.0:
132; CHECK-NEXT:    sub v0.2s, v0.2s, v1.2s
133; CHECK-NEXT:    ret
134	%tmp3 = sub <2 x i32> %A, %B;
135	ret <2 x i32> %tmp3
136}
137
138define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
139; CHECK-LABEL: sub4x32:
140; CHECK:       // %bb.0:
141; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
142; CHECK-NEXT:    ret
143	%tmp3 = sub <4 x i32> %A, %B;
144	ret <4 x i32> %tmp3
145}
146
147define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
148; CHECK-LABEL: sub2xi64:
149; CHECK:       // %bb.0:
150; CHECK-NEXT:    sub v0.2d, v0.2d, v1.2d
151; CHECK-NEXT:    ret
152	%tmp3 = sub <2 x i64> %A, %B;
153	ret <2 x i64> %tmp3
154}
155
156define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
157; CHECK-LABEL: sub2xfloat:
158; CHECK:       // %bb.0:
159; CHECK-NEXT:    fsub v0.2s, v0.2s, v1.2s
160; CHECK-NEXT:    ret
161	%tmp3 = fsub <2 x float> %A, %B;
162	ret <2 x float> %tmp3
163}
164
165define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
166; CHECK-LABEL: sub4xfloat:
167; CHECK:       // %bb.0:
168; CHECK-NEXT:    fsub v0.4s, v0.4s, v1.4s
169; CHECK-NEXT:    ret
170	%tmp3 = fsub <4 x float> %A, %B;
171	ret <4 x float> %tmp3
172}
173define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
174; CHECK-LABEL: sub2xdouble:
175; CHECK:       // %bb.0:
176; CHECK-NEXT:    fsub v0.2d, v0.2d, v1.2d
177; CHECK-NEXT:    ret
178	%tmp3 = fsub <2 x double> %A, %B;
179	ret <2 x double> %tmp3
180}
181
182define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
183; CHECK-LABEL: test_vadd_f64:
184; CHECK:       // %bb.0:
185; CHECK-NEXT:    fadd d0, d0, d1
186; CHECK-NEXT:    ret
187  %1 = fadd <1 x double> %a, %b
188  ret <1 x double> %1
189}
190
191define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
192; CHECK-LABEL: test_vmul_f64:
193; CHECK:       // %bb.0:
194; CHECK-NEXT:    fmul d0, d0, d1
195; CHECK-NEXT:    ret
196  %1 = fmul <1 x double> %a, %b
197  ret <1 x double> %1
198}
199
200define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
201; CHECK-LABEL: test_vdiv_f64:
202; CHECK:       // %bb.0:
203; CHECK-NEXT:    fdiv d0, d0, d1
204; CHECK-NEXT:    ret
205  %1 = fdiv <1 x double> %a, %b
206  ret <1 x double> %1
207}
208
209define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
210; CHECK-LABEL: test_vmla_f64:
211; CHECK:       // %bb.0:
212; CHECK-NEXT:    fmul d1, d1, d2
213; CHECK-NEXT:    fadd d0, d1, d0
214; CHECK-NEXT:    ret
215  %1 = fmul <1 x double> %b, %c
216  %2 = fadd <1 x double> %1, %a
217  ret <1 x double> %2
218}
219
220define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
221; CHECK-LABEL: test_vmls_f64:
222; CHECK:       // %bb.0:
223; CHECK-NEXT:    fmul d1, d1, d2
224; CHECK-NEXT:    fsub d0, d0, d1
225; CHECK-NEXT:    ret
226  %1 = fmul <1 x double> %b, %c
227  %2 = fsub <1 x double> %a, %1
228  ret <1 x double> %2
229}
230
231define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
232; CHECK-LABEL: test_vfms_f64:
233; CHECK:       // %bb.0:
234; CHECK-NEXT:    fmsub d0, d1, d2, d0
235; CHECK-NEXT:    ret
236  %1 = fsub <1 x double> <double -0.000000e+00>, %b
237  %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
238  ret <1 x double> %2
239}
240
241define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
242; CHECK-LABEL: test_vfma_f64:
243; CHECK:       // %bb.0:
244; CHECK-NEXT:    fmadd d0, d1, d2, d0
245; CHECK-NEXT:    ret
246  %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
247  ret <1 x double> %1
248}
249
250define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
251; CHECK-LABEL: test_vsub_f64:
252; CHECK:       // %bb.0:
253; CHECK-NEXT:    fsub d0, d0, d1
254; CHECK-NEXT:    ret
255  %1 = fsub <1 x double> %a, %b
256  ret <1 x double> %1
257}
258
259define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
260; CHECK-LABEL: test_vabd_f64:
261; CHECK:       // %bb.0:
262; CHECK-NEXT:    fabd d0, d0, d1
263; CHECK-NEXT:    ret
264  %1 = tail call <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b)
265  ret <1 x double> %1
266}
267
268define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
269; CHECK-LABEL: test_vmax_f64:
270; CHECK:       // %bb.0:
271; CHECK-NEXT:    fmax d0, d0, d1
272; CHECK-NEXT:    ret
273  %1 = tail call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b)
274  ret <1 x double> %1
275}
276
277define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
278; CHECK-LABEL: test_vmin_f64:
279; CHECK:       // %bb.0:
280; CHECK-NEXT:    fmin d0, d0, d1
281; CHECK-NEXT:    ret
282  %1 = tail call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b)
283  ret <1 x double> %1
284}
285
286define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
287; CHECK-LABEL: test_vmaxnm_f64:
288; CHECK:       // %bb.0:
289; CHECK-NEXT:    fmaxnm d0, d0, d1
290; CHECK-NEXT:    ret
291  %1 = tail call <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
292  ret <1 x double> %1
293}
294
295define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
296; CHECK-LABEL: test_vminnm_f64:
297; CHECK:       // %bb.0:
298; CHECK-NEXT:    fminnm d0, d0, d1
299; CHECK-NEXT:    ret
300  %1 = tail call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b)
301  ret <1 x double> %1
302}
303
304define <1 x double> @test_vabs_f64(<1 x double> %a) {
305; CHECK-LABEL: test_vabs_f64:
306; CHECK:       // %bb.0:
307; CHECK-NEXT:    fabs d0, d0
308; CHECK-NEXT:    ret
309  %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
310  ret <1 x double> %1
311}
312
313define <1 x double> @test_vneg_f64(<1 x double> %a) {
314; CHECK-LABEL: test_vneg_f64:
315; CHECK:       // %bb.0:
316; CHECK-NEXT:    fneg d0, d0
317; CHECK-NEXT:    ret
318  %1 = fsub <1 x double> <double -0.000000e+00>, %a
319  ret <1 x double> %1
320}
321
322declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
323declare <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double>, <1 x double>)
324declare <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>)
325declare <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double>, <1 x double>)
326declare <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double>, <1 x double>)
327declare <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double>, <1 x double>)
328declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)
329