xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll (revision 169c32eb49fa9b559d388b9b8f4374ff9e1be9be)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define void @abs_v16i8(ptr %x) {
6; CHECK-LABEL: abs_v16i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
9; CHECK-NEXT:    vle8.v v8, (a0)
10; CHECK-NEXT:    vrsub.vi v9, v8, 0
11; CHECK-NEXT:    vmax.vv v8, v8, v9
12; CHECK-NEXT:    vse8.v v8, (a0)
13; CHECK-NEXT:    ret
14  %a = load <16 x i8>, ptr %x
15  %b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
16  store <16 x i8> %b, ptr %x
17  ret void
18}
19declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
20
21define void @abs_v8i16(ptr %x) {
22; CHECK-LABEL: abs_v8i16:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
25; CHECK-NEXT:    vle16.v v8, (a0)
26; CHECK-NEXT:    vrsub.vi v9, v8, 0
27; CHECK-NEXT:    vmax.vv v8, v8, v9
28; CHECK-NEXT:    vse16.v v8, (a0)
29; CHECK-NEXT:    ret
30  %a = load <8 x i16>, ptr %x
31  %b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
32  store <8 x i16> %b, ptr %x
33  ret void
34}
35declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
36
37define void @abs_v6i16(ptr %x) {
38; CHECK-LABEL: abs_v6i16:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
41; CHECK-NEXT:    vle16.v v8, (a0)
42; CHECK-NEXT:    vrsub.vi v9, v8, 0
43; CHECK-NEXT:    vmax.vv v8, v8, v9
44; CHECK-NEXT:    vse16.v v8, (a0)
45; CHECK-NEXT:    ret
46  %a = load <6 x i16>, ptr %x
47  %b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
48  store <6 x i16> %b, ptr %x
49  ret void
50}
51declare <6 x i16> @llvm.abs.v6i16(<6 x i16>, i1)
52
53define void @abs_v4i32(ptr %x) {
54; CHECK-LABEL: abs_v4i32:
55; CHECK:       # %bb.0:
56; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
57; CHECK-NEXT:    vle32.v v8, (a0)
58; CHECK-NEXT:    vrsub.vi v9, v8, 0
59; CHECK-NEXT:    vmax.vv v8, v8, v9
60; CHECK-NEXT:    vse32.v v8, (a0)
61; CHECK-NEXT:    ret
62  %a = load <4 x i32>, ptr %x
63  %b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
64  store <4 x i32> %b, ptr %x
65  ret void
66}
67declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
68
69define void @abs_v2i64(ptr %x) {
70; CHECK-LABEL: abs_v2i64:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
73; CHECK-NEXT:    vle64.v v8, (a0)
74; CHECK-NEXT:    vrsub.vi v9, v8, 0
75; CHECK-NEXT:    vmax.vv v8, v8, v9
76; CHECK-NEXT:    vse64.v v8, (a0)
77; CHECK-NEXT:    ret
78  %a = load <2 x i64>, ptr %x
79  %b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
80  store <2 x i64> %b, ptr %x
81  ret void
82}
83declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
84
85define void @abs_v32i8(ptr %x) {
86; CHECK-LABEL: abs_v32i8:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    li a1, 32
89; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
90; CHECK-NEXT:    vle8.v v8, (a0)
91; CHECK-NEXT:    vrsub.vi v10, v8, 0
92; CHECK-NEXT:    vmax.vv v8, v8, v10
93; CHECK-NEXT:    vse8.v v8, (a0)
94; CHECK-NEXT:    ret
95  %a = load <32 x i8>, ptr %x
96  %b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
97  store <32 x i8> %b, ptr %x
98  ret void
99}
100declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
101
102define void @abs_v16i16(ptr %x) {
103; CHECK-LABEL: abs_v16i16:
104; CHECK:       # %bb.0:
105; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
106; CHECK-NEXT:    vle16.v v8, (a0)
107; CHECK-NEXT:    vrsub.vi v10, v8, 0
108; CHECK-NEXT:    vmax.vv v8, v8, v10
109; CHECK-NEXT:    vse16.v v8, (a0)
110; CHECK-NEXT:    ret
111  %a = load <16 x i16>, ptr %x
112  %b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
113  store <16 x i16> %b, ptr %x
114  ret void
115}
116declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
117
118define void @abs_v8i32(ptr %x) {
119; CHECK-LABEL: abs_v8i32:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
122; CHECK-NEXT:    vle32.v v8, (a0)
123; CHECK-NEXT:    vrsub.vi v10, v8, 0
124; CHECK-NEXT:    vmax.vv v8, v8, v10
125; CHECK-NEXT:    vse32.v v8, (a0)
126; CHECK-NEXT:    ret
127  %a = load <8 x i32>, ptr %x
128  %b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
129  store <8 x i32> %b, ptr %x
130  ret void
131}
132declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
133
134define void @abs_v4i64(ptr %x) {
135; CHECK-LABEL: abs_v4i64:
136; CHECK:       # %bb.0:
137; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
138; CHECK-NEXT:    vle64.v v8, (a0)
139; CHECK-NEXT:    vrsub.vi v10, v8, 0
140; CHECK-NEXT:    vmax.vv v8, v8, v10
141; CHECK-NEXT:    vse64.v v8, (a0)
142; CHECK-NEXT:    ret
143  %a = load <4 x i64>, ptr %x
144  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
145  store <4 x i64> %b, ptr %x
146  ret void
147}
148declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
149
150define void @abs_v4i64_of_sext_v4i8(ptr %x) {
151; CHECK-LABEL: abs_v4i64_of_sext_v4i8:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
154; CHECK-NEXT:    vle8.v v8, (a0)
155; CHECK-NEXT:    vrsub.vi v9, v8, 0
156; CHECK-NEXT:    vmax.vv v8, v8, v9
157; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
158; CHECK-NEXT:    vzext.vf8 v10, v8
159; CHECK-NEXT:    vse64.v v10, (a0)
160; CHECK-NEXT:    ret
161  %a = load <4 x i8>, ptr %x
162  %a.ext = sext <4 x i8> %a to <4 x i64>
163  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
164  store <4 x i64> %b, ptr %x
165  ret void
166}
167
168define void @abs_v4i64_of_sext_v4i16(ptr %x) {
169; CHECK-LABEL: abs_v4i64_of_sext_v4i16:
170; CHECK:       # %bb.0:
171; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
172; CHECK-NEXT:    vle16.v v8, (a0)
173; CHECK-NEXT:    vrsub.vi v9, v8, 0
174; CHECK-NEXT:    vmax.vv v8, v8, v9
175; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
176; CHECK-NEXT:    vzext.vf4 v10, v8
177; CHECK-NEXT:    vse64.v v10, (a0)
178; CHECK-NEXT:    ret
179  %a = load <4 x i16>, ptr %x
180  %a.ext = sext <4 x i16> %a to <4 x i64>
181  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
182  store <4 x i64> %b, ptr %x
183  ret void
184}
185
186define void @abs_v4i64_of_sext_v4i32(ptr %x) {
187; CHECK-LABEL: abs_v4i64_of_sext_v4i32:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
190; CHECK-NEXT:    vle32.v v8, (a0)
191; CHECK-NEXT:    vrsub.vi v9, v8, 0
192; CHECK-NEXT:    vmax.vv v8, v8, v9
193; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
194; CHECK-NEXT:    vzext.vf2 v10, v8
195; CHECK-NEXT:    vse64.v v10, (a0)
196; CHECK-NEXT:    ret
197  %a = load <4 x i32>, ptr %x
198  %a.ext = sext <4 x i32> %a to <4 x i64>
199  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
200  store <4 x i64> %b, ptr %x
201  ret void
202}
203