xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll (revision 0e520300580a77f1d7c01ada9a047a7fadb5eb1f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512  < %s | FileCheck %s
3
4target triple = "aarch64-unknown-linux-gnu"
5
6define void @add_v64i8(ptr %a, ptr %b) #0 {
7; CHECK-LABEL: add_v64i8:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    ptrue p0.b
10; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
11; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x1]
12; CHECK-NEXT:    add z0.b, z0.b, z1.b
13; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
14; CHECK-NEXT:    ret
15  %op1 = load <64 x i8>, ptr %a
16  %op2 = load <64 x i8>, ptr %b
17  %res = add <64 x i8> %op1, %op2
18  store <64 x i8> %res, ptr %a
19  ret void
20}
21
22define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
23; CHECK-LABEL: add_v32i16:
24; CHECK:       // %bb.0:
25; CHECK-NEXT:    ptrue p0.h
26; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
27; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
28; CHECK-NEXT:    add z0.h, z0.h, z1.h
29; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
30; CHECK-NEXT:    ret
31  %op1 = load <32 x i16>, ptr %a
32  %op2 = load <32 x i16>, ptr %b
33  %res = add <32 x i16> %op1, %op2
34  store <32 x i16> %res, ptr %a
35  ret void
36}
37
38define void @abs_v16i32(ptr %a) #0 {
39; CHECK-LABEL: abs_v16i32:
40; CHECK:       // %bb.0:
41; CHECK-NEXT:    ptrue p0.s
42; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
43; CHECK-NEXT:    abs z0.s, p0/m, z0.s
44; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
45; CHECK-NEXT:    ret
46  %op1 = load <16 x i32>, ptr %a
47  %res = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %op1, i1 false)
48  store <16 x i32> %res, ptr %a
49  ret void
50}
51
52define void @abs_v8i64(ptr %a) #0 {
53; CHECK-LABEL: abs_v8i64:
54; CHECK:       // %bb.0:
55; CHECK-NEXT:    ptrue p0.d
56; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
57; CHECK-NEXT:    abs z0.d, p0/m, z0.d
58; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
59; CHECK-NEXT:    ret
60  %op1 = load <8 x i64>, ptr %a
61  %res = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %op1, i1 false)
62  store <8 x i64> %res, ptr %a
63  ret void
64}
65
66define void @fadd_v32f16(ptr %a, ptr %b) #0 {
67; CHECK-LABEL: fadd_v32f16:
68; CHECK:       // %bb.0:
69; CHECK-NEXT:    ptrue p0.h
70; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
71; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
72; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
73; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
74; CHECK-NEXT:    ret
75  %op1 = load <32 x half>, ptr %a
76  %op2 = load <32 x half>, ptr %b
77  %res = fadd <32 x half> %op1, %op2
78  store <32 x half> %res, ptr %a
79  ret void
80}
81
82define void @fadd_v16f32(ptr %a, ptr %b) #0 {
83; CHECK-LABEL: fadd_v16f32:
84; CHECK:       // %bb.0:
85; CHECK-NEXT:    ptrue p0.s
86; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
87; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
88; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
89; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
90; CHECK-NEXT:    ret
91  %op1 = load <16 x float>, ptr %a
92  %op2 = load <16 x float>, ptr %b
93  %res = fadd <16 x float> %op1, %op2
94  store <16 x float> %res, ptr %a
95  ret void
96}
97
98define void @fadd_v8f64(ptr %a, ptr %b) #0 {
99; CHECK-LABEL: fadd_v8f64:
100; CHECK:       // %bb.0:
101; CHECK-NEXT:    ptrue p0.d
102; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
103; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
104; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
105; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
106; CHECK-NEXT:    ret
107  %op1 = load <8 x double>, ptr %a
108  %op2 = load <8 x double>, ptr %b
109  %res = fadd <8 x double> %op1, %op2
110  store <8 x double> %res, ptr %a
111  ret void
112}
113
114declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
115declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
116
117attributes #0 = { "target-features"="+sve" }
118