xref: /llvm-project/llvm/test/CodeGen/RISCV/iabs.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=RV32I
4; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=RV32ZBB
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s --check-prefix=RV64I
8; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s --check-prefix=RV64ZBB
10
11declare i8 @llvm.abs.i8(i8, i1 immarg)
12declare i16 @llvm.abs.i16(i16, i1 immarg)
13declare i32 @llvm.abs.i32(i32, i1 immarg)
14declare i64 @llvm.abs.i64(i64, i1 immarg)
15declare i128 @llvm.abs.i128(i128, i1 immarg)
16
17define i8 @abs8(i8 %x) {
18; RV32I-LABEL: abs8:
19; RV32I:       # %bb.0:
20; RV32I-NEXT:    slli a1, a0, 24
21; RV32I-NEXT:    srai a1, a1, 31
22; RV32I-NEXT:    xor a0, a0, a1
23; RV32I-NEXT:    sub a0, a0, a1
24; RV32I-NEXT:    ret
25;
26; RV32ZBB-LABEL: abs8:
27; RV32ZBB:       # %bb.0:
28; RV32ZBB-NEXT:    sext.b a0, a0
29; RV32ZBB-NEXT:    neg a1, a0
30; RV32ZBB-NEXT:    max a0, a0, a1
31; RV32ZBB-NEXT:    ret
32;
33; RV64I-LABEL: abs8:
34; RV64I:       # %bb.0:
35; RV64I-NEXT:    slli a1, a0, 56
36; RV64I-NEXT:    srai a1, a1, 63
37; RV64I-NEXT:    xor a0, a0, a1
38; RV64I-NEXT:    sub a0, a0, a1
39; RV64I-NEXT:    ret
40;
41; RV64ZBB-LABEL: abs8:
42; RV64ZBB:       # %bb.0:
43; RV64ZBB-NEXT:    sext.b a0, a0
44; RV64ZBB-NEXT:    neg a1, a0
45; RV64ZBB-NEXT:    max a0, a0, a1
46; RV64ZBB-NEXT:    ret
47  %abs = tail call i8 @llvm.abs.i8(i8 %x, i1 true)
48  ret i8 %abs
49}
50
51define i8 @select_abs8(i8 %x) {
52; RV32I-LABEL: select_abs8:
53; RV32I:       # %bb.0:
54; RV32I-NEXT:    slli a1, a0, 24
55; RV32I-NEXT:    srai a1, a1, 31
56; RV32I-NEXT:    xor a0, a0, a1
57; RV32I-NEXT:    sub a0, a0, a1
58; RV32I-NEXT:    ret
59;
60; RV32ZBB-LABEL: select_abs8:
61; RV32ZBB:       # %bb.0:
62; RV32ZBB-NEXT:    sext.b a0, a0
63; RV32ZBB-NEXT:    neg a1, a0
64; RV32ZBB-NEXT:    max a0, a0, a1
65; RV32ZBB-NEXT:    ret
66;
67; RV64I-LABEL: select_abs8:
68; RV64I:       # %bb.0:
69; RV64I-NEXT:    slli a1, a0, 56
70; RV64I-NEXT:    srai a1, a1, 63
71; RV64I-NEXT:    xor a0, a0, a1
72; RV64I-NEXT:    sub a0, a0, a1
73; RV64I-NEXT:    ret
74;
75; RV64ZBB-LABEL: select_abs8:
76; RV64ZBB:       # %bb.0:
77; RV64ZBB-NEXT:    sext.b a0, a0
78; RV64ZBB-NEXT:    neg a1, a0
79; RV64ZBB-NEXT:    max a0, a0, a1
80; RV64ZBB-NEXT:    ret
81  %1 = icmp slt i8 %x, 0
82  %2 = sub nsw i8 0, %x
83  %3 = select i1 %1, i8 %2, i8 %x
84  ret i8 %3
85}
86
87define i16 @abs16(i16 %x) {
88; RV32I-LABEL: abs16:
89; RV32I:       # %bb.0:
90; RV32I-NEXT:    slli a1, a0, 16
91; RV32I-NEXT:    srai a1, a1, 31
92; RV32I-NEXT:    xor a0, a0, a1
93; RV32I-NEXT:    sub a0, a0, a1
94; RV32I-NEXT:    ret
95;
96; RV32ZBB-LABEL: abs16:
97; RV32ZBB:       # %bb.0:
98; RV32ZBB-NEXT:    sext.h a0, a0
99; RV32ZBB-NEXT:    neg a1, a0
100; RV32ZBB-NEXT:    max a0, a0, a1
101; RV32ZBB-NEXT:    ret
102;
103; RV64I-LABEL: abs16:
104; RV64I:       # %bb.0:
105; RV64I-NEXT:    slli a1, a0, 48
106; RV64I-NEXT:    srai a1, a1, 63
107; RV64I-NEXT:    xor a0, a0, a1
108; RV64I-NEXT:    sub a0, a0, a1
109; RV64I-NEXT:    ret
110;
111; RV64ZBB-LABEL: abs16:
112; RV64ZBB:       # %bb.0:
113; RV64ZBB-NEXT:    sext.h a0, a0
114; RV64ZBB-NEXT:    neg a1, a0
115; RV64ZBB-NEXT:    max a0, a0, a1
116; RV64ZBB-NEXT:    ret
117  %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
118  ret i16 %abs
119}
120
121define i16 @select_abs16(i16 %x) {
122; RV32I-LABEL: select_abs16:
123; RV32I:       # %bb.0:
124; RV32I-NEXT:    slli a1, a0, 16
125; RV32I-NEXT:    srai a1, a1, 31
126; RV32I-NEXT:    xor a0, a0, a1
127; RV32I-NEXT:    sub a0, a0, a1
128; RV32I-NEXT:    ret
129;
130; RV32ZBB-LABEL: select_abs16:
131; RV32ZBB:       # %bb.0:
132; RV32ZBB-NEXT:    sext.h a0, a0
133; RV32ZBB-NEXT:    neg a1, a0
134; RV32ZBB-NEXT:    max a0, a0, a1
135; RV32ZBB-NEXT:    ret
136;
137; RV64I-LABEL: select_abs16:
138; RV64I:       # %bb.0:
139; RV64I-NEXT:    slli a1, a0, 48
140; RV64I-NEXT:    srai a1, a1, 63
141; RV64I-NEXT:    xor a0, a0, a1
142; RV64I-NEXT:    sub a0, a0, a1
143; RV64I-NEXT:    ret
144;
145; RV64ZBB-LABEL: select_abs16:
146; RV64ZBB:       # %bb.0:
147; RV64ZBB-NEXT:    sext.h a0, a0
148; RV64ZBB-NEXT:    neg a1, a0
149; RV64ZBB-NEXT:    max a0, a0, a1
150; RV64ZBB-NEXT:    ret
151  %1 = icmp slt i16 %x, 0
152  %2 = sub nsw i16 0, %x
153  %3 = select i1 %1, i16 %2, i16 %x
154  ret i16 %3
155}
156
157define i32 @abs32(i32 %x) {
158; RV32I-LABEL: abs32:
159; RV32I:       # %bb.0:
160; RV32I-NEXT:    srai a1, a0, 31
161; RV32I-NEXT:    xor a0, a0, a1
162; RV32I-NEXT:    sub a0, a0, a1
163; RV32I-NEXT:    ret
164;
165; RV32ZBB-LABEL: abs32:
166; RV32ZBB:       # %bb.0:
167; RV32ZBB-NEXT:    neg a1, a0
168; RV32ZBB-NEXT:    max a0, a0, a1
169; RV32ZBB-NEXT:    ret
170;
171; RV64I-LABEL: abs32:
172; RV64I:       # %bb.0:
173; RV64I-NEXT:    sraiw a1, a0, 31
174; RV64I-NEXT:    xor a0, a0, a1
175; RV64I-NEXT:    subw a0, a0, a1
176; RV64I-NEXT:    ret
177;
178; RV64ZBB-LABEL: abs32:
179; RV64ZBB:       # %bb.0:
180; RV64ZBB-NEXT:    sext.w a0, a0
181; RV64ZBB-NEXT:    negw a1, a0
182; RV64ZBB-NEXT:    max a0, a0, a1
183; RV64ZBB-NEXT:    ret
184  %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
185  ret i32 %abs
186}
187
188define i32 @select_abs32(i32 %x) {
189; RV32I-LABEL: select_abs32:
190; RV32I:       # %bb.0:
191; RV32I-NEXT:    srai a1, a0, 31
192; RV32I-NEXT:    xor a0, a0, a1
193; RV32I-NEXT:    sub a0, a0, a1
194; RV32I-NEXT:    ret
195;
196; RV32ZBB-LABEL: select_abs32:
197; RV32ZBB:       # %bb.0:
198; RV32ZBB-NEXT:    neg a1, a0
199; RV32ZBB-NEXT:    max a0, a0, a1
200; RV32ZBB-NEXT:    ret
201;
202; RV64I-LABEL: select_abs32:
203; RV64I:       # %bb.0:
204; RV64I-NEXT:    sraiw a1, a0, 31
205; RV64I-NEXT:    xor a0, a0, a1
206; RV64I-NEXT:    subw a0, a0, a1
207; RV64I-NEXT:    ret
208;
209; RV64ZBB-LABEL: select_abs32:
210; RV64ZBB:       # %bb.0:
211; RV64ZBB-NEXT:    sext.w a0, a0
212; RV64ZBB-NEXT:    negw a1, a0
213; RV64ZBB-NEXT:    max a0, a0, a1
214; RV64ZBB-NEXT:    ret
215  %1 = icmp slt i32 %x, 0
216  %2 = sub nsw i32 0, %x
217  %3 = select i1 %1, i32 %2, i32 %x
218  ret i32 %3
219}
220
221define i64 @abs64(i64 %x) {
222; RV32I-LABEL: abs64:
223; RV32I:       # %bb.0:
224; RV32I-NEXT:    bgez a1, .LBB6_2
225; RV32I-NEXT:  # %bb.1:
226; RV32I-NEXT:    snez a2, a0
227; RV32I-NEXT:    neg a0, a0
228; RV32I-NEXT:    neg a1, a1
229; RV32I-NEXT:    sub a1, a1, a2
230; RV32I-NEXT:  .LBB6_2:
231; RV32I-NEXT:    ret
232;
233; RV32ZBB-LABEL: abs64:
234; RV32ZBB:       # %bb.0:
235; RV32ZBB-NEXT:    bgez a1, .LBB6_2
236; RV32ZBB-NEXT:  # %bb.1:
237; RV32ZBB-NEXT:    snez a2, a0
238; RV32ZBB-NEXT:    neg a0, a0
239; RV32ZBB-NEXT:    neg a1, a1
240; RV32ZBB-NEXT:    sub a1, a1, a2
241; RV32ZBB-NEXT:  .LBB6_2:
242; RV32ZBB-NEXT:    ret
243;
244; RV64I-LABEL: abs64:
245; RV64I:       # %bb.0:
246; RV64I-NEXT:    srai a1, a0, 63
247; RV64I-NEXT:    xor a0, a0, a1
248; RV64I-NEXT:    sub a0, a0, a1
249; RV64I-NEXT:    ret
250;
251; RV64ZBB-LABEL: abs64:
252; RV64ZBB:       # %bb.0:
253; RV64ZBB-NEXT:    neg a1, a0
254; RV64ZBB-NEXT:    max a0, a0, a1
255; RV64ZBB-NEXT:    ret
256  %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
257  ret i64 %abs
258}
259
260define i64 @select_abs64(i64 %x) {
261; RV32I-LABEL: select_abs64:
262; RV32I:       # %bb.0:
263; RV32I-NEXT:    bgez a1, .LBB7_2
264; RV32I-NEXT:  # %bb.1:
265; RV32I-NEXT:    snez a2, a0
266; RV32I-NEXT:    neg a0, a0
267; RV32I-NEXT:    neg a1, a1
268; RV32I-NEXT:    sub a1, a1, a2
269; RV32I-NEXT:  .LBB7_2:
270; RV32I-NEXT:    ret
271;
272; RV32ZBB-LABEL: select_abs64:
273; RV32ZBB:       # %bb.0:
274; RV32ZBB-NEXT:    bgez a1, .LBB7_2
275; RV32ZBB-NEXT:  # %bb.1:
276; RV32ZBB-NEXT:    snez a2, a0
277; RV32ZBB-NEXT:    neg a0, a0
278; RV32ZBB-NEXT:    neg a1, a1
279; RV32ZBB-NEXT:    sub a1, a1, a2
280; RV32ZBB-NEXT:  .LBB7_2:
281; RV32ZBB-NEXT:    ret
282;
283; RV64I-LABEL: select_abs64:
284; RV64I:       # %bb.0:
285; RV64I-NEXT:    srai a1, a0, 63
286; RV64I-NEXT:    xor a0, a0, a1
287; RV64I-NEXT:    sub a0, a0, a1
288; RV64I-NEXT:    ret
289;
290; RV64ZBB-LABEL: select_abs64:
291; RV64ZBB:       # %bb.0:
292; RV64ZBB-NEXT:    neg a1, a0
293; RV64ZBB-NEXT:    max a0, a0, a1
294; RV64ZBB-NEXT:    ret
295  %1 = icmp slt i64 %x, 0
296  %2 = sub nsw i64 0, %x
297  %3 = select i1 %1, i64 %2, i64 %x
298  ret i64 %3
299}
300
301define i128 @abs128(i128 %x) {
302; RV32I-LABEL: abs128:
303; RV32I:       # %bb.0:
304; RV32I-NEXT:    lw a3, 12(a1)
305; RV32I-NEXT:    lw a2, 0(a1)
306; RV32I-NEXT:    lw a4, 4(a1)
307; RV32I-NEXT:    lw a1, 8(a1)
308; RV32I-NEXT:    bgez a3, .LBB8_2
309; RV32I-NEXT:  # %bb.1:
310; RV32I-NEXT:    neg a5, a1
311; RV32I-NEXT:    snez a6, a4
312; RV32I-NEXT:    snez a7, a2
313; RV32I-NEXT:    snez a1, a1
314; RV32I-NEXT:    neg a4, a4
315; RV32I-NEXT:    or a6, a7, a6
316; RV32I-NEXT:    add a1, a3, a1
317; RV32I-NEXT:    sub a4, a4, a7
318; RV32I-NEXT:    sltu a3, a5, a6
319; RV32I-NEXT:    neg a7, a1
320; RV32I-NEXT:    sub a1, a5, a6
321; RV32I-NEXT:    sub a3, a7, a3
322; RV32I-NEXT:    neg a2, a2
323; RV32I-NEXT:  .LBB8_2:
324; RV32I-NEXT:    sw a2, 0(a0)
325; RV32I-NEXT:    sw a4, 4(a0)
326; RV32I-NEXT:    sw a1, 8(a0)
327; RV32I-NEXT:    sw a3, 12(a0)
328; RV32I-NEXT:    ret
329;
330; RV32ZBB-LABEL: abs128:
331; RV32ZBB:       # %bb.0:
332; RV32ZBB-NEXT:    lw a3, 12(a1)
333; RV32ZBB-NEXT:    lw a2, 0(a1)
334; RV32ZBB-NEXT:    lw a4, 4(a1)
335; RV32ZBB-NEXT:    lw a1, 8(a1)
336; RV32ZBB-NEXT:    bgez a3, .LBB8_2
337; RV32ZBB-NEXT:  # %bb.1:
338; RV32ZBB-NEXT:    neg a5, a1
339; RV32ZBB-NEXT:    snez a6, a4
340; RV32ZBB-NEXT:    snez a7, a2
341; RV32ZBB-NEXT:    snez a1, a1
342; RV32ZBB-NEXT:    neg a4, a4
343; RV32ZBB-NEXT:    or a6, a7, a6
344; RV32ZBB-NEXT:    add a1, a3, a1
345; RV32ZBB-NEXT:    sub a4, a4, a7
346; RV32ZBB-NEXT:    sltu a3, a5, a6
347; RV32ZBB-NEXT:    neg a7, a1
348; RV32ZBB-NEXT:    sub a1, a5, a6
349; RV32ZBB-NEXT:    sub a3, a7, a3
350; RV32ZBB-NEXT:    neg a2, a2
351; RV32ZBB-NEXT:  .LBB8_2:
352; RV32ZBB-NEXT:    sw a2, 0(a0)
353; RV32ZBB-NEXT:    sw a4, 4(a0)
354; RV32ZBB-NEXT:    sw a1, 8(a0)
355; RV32ZBB-NEXT:    sw a3, 12(a0)
356; RV32ZBB-NEXT:    ret
357;
358; RV64I-LABEL: abs128:
359; RV64I:       # %bb.0:
360; RV64I-NEXT:    bgez a1, .LBB8_2
361; RV64I-NEXT:  # %bb.1:
362; RV64I-NEXT:    snez a2, a0
363; RV64I-NEXT:    neg a0, a0
364; RV64I-NEXT:    neg a1, a1
365; RV64I-NEXT:    sub a1, a1, a2
366; RV64I-NEXT:  .LBB8_2:
367; RV64I-NEXT:    ret
368;
369; RV64ZBB-LABEL: abs128:
370; RV64ZBB:       # %bb.0:
371; RV64ZBB-NEXT:    bgez a1, .LBB8_2
372; RV64ZBB-NEXT:  # %bb.1:
373; RV64ZBB-NEXT:    snez a2, a0
374; RV64ZBB-NEXT:    neg a0, a0
375; RV64ZBB-NEXT:    neg a1, a1
376; RV64ZBB-NEXT:    sub a1, a1, a2
377; RV64ZBB-NEXT:  .LBB8_2:
378; RV64ZBB-NEXT:    ret
379  %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
380  ret i128 %abs
381}
382
383define i128 @select_abs128(i128 %x) {
384; RV32I-LABEL: select_abs128:
385; RV32I:       # %bb.0:
386; RV32I-NEXT:    lw a3, 12(a1)
387; RV32I-NEXT:    lw a2, 0(a1)
388; RV32I-NEXT:    lw a4, 4(a1)
389; RV32I-NEXT:    lw a1, 8(a1)
390; RV32I-NEXT:    bgez a3, .LBB9_2
391; RV32I-NEXT:  # %bb.1:
392; RV32I-NEXT:    neg a5, a1
393; RV32I-NEXT:    snez a6, a4
394; RV32I-NEXT:    snez a7, a2
395; RV32I-NEXT:    snez a1, a1
396; RV32I-NEXT:    neg a4, a4
397; RV32I-NEXT:    or a6, a7, a6
398; RV32I-NEXT:    add a1, a3, a1
399; RV32I-NEXT:    sub a4, a4, a7
400; RV32I-NEXT:    sltu a3, a5, a6
401; RV32I-NEXT:    neg a7, a1
402; RV32I-NEXT:    sub a1, a5, a6
403; RV32I-NEXT:    sub a3, a7, a3
404; RV32I-NEXT:    neg a2, a2
405; RV32I-NEXT:  .LBB9_2:
406; RV32I-NEXT:    sw a2, 0(a0)
407; RV32I-NEXT:    sw a4, 4(a0)
408; RV32I-NEXT:    sw a1, 8(a0)
409; RV32I-NEXT:    sw a3, 12(a0)
410; RV32I-NEXT:    ret
411;
412; RV32ZBB-LABEL: select_abs128:
413; RV32ZBB:       # %bb.0:
414; RV32ZBB-NEXT:    lw a3, 12(a1)
415; RV32ZBB-NEXT:    lw a2, 0(a1)
416; RV32ZBB-NEXT:    lw a4, 4(a1)
417; RV32ZBB-NEXT:    lw a1, 8(a1)
418; RV32ZBB-NEXT:    bgez a3, .LBB9_2
419; RV32ZBB-NEXT:  # %bb.1:
420; RV32ZBB-NEXT:    neg a5, a1
421; RV32ZBB-NEXT:    snez a6, a4
422; RV32ZBB-NEXT:    snez a7, a2
423; RV32ZBB-NEXT:    snez a1, a1
424; RV32ZBB-NEXT:    neg a4, a4
425; RV32ZBB-NEXT:    or a6, a7, a6
426; RV32ZBB-NEXT:    add a1, a3, a1
427; RV32ZBB-NEXT:    sub a4, a4, a7
428; RV32ZBB-NEXT:    sltu a3, a5, a6
429; RV32ZBB-NEXT:    neg a7, a1
430; RV32ZBB-NEXT:    sub a1, a5, a6
431; RV32ZBB-NEXT:    sub a3, a7, a3
432; RV32ZBB-NEXT:    neg a2, a2
433; RV32ZBB-NEXT:  .LBB9_2:
434; RV32ZBB-NEXT:    sw a2, 0(a0)
435; RV32ZBB-NEXT:    sw a4, 4(a0)
436; RV32ZBB-NEXT:    sw a1, 8(a0)
437; RV32ZBB-NEXT:    sw a3, 12(a0)
438; RV32ZBB-NEXT:    ret
439;
440; RV64I-LABEL: select_abs128:
441; RV64I:       # %bb.0:
442; RV64I-NEXT:    bgez a1, .LBB9_2
443; RV64I-NEXT:  # %bb.1:
444; RV64I-NEXT:    snez a2, a0
445; RV64I-NEXT:    neg a0, a0
446; RV64I-NEXT:    neg a1, a1
447; RV64I-NEXT:    sub a1, a1, a2
448; RV64I-NEXT:  .LBB9_2:
449; RV64I-NEXT:    ret
450;
451; RV64ZBB-LABEL: select_abs128:
452; RV64ZBB:       # %bb.0:
453; RV64ZBB-NEXT:    bgez a1, .LBB9_2
454; RV64ZBB-NEXT:  # %bb.1:
455; RV64ZBB-NEXT:    snez a2, a0
456; RV64ZBB-NEXT:    neg a0, a0
457; RV64ZBB-NEXT:    neg a1, a1
458; RV64ZBB-NEXT:    sub a1, a1, a2
459; RV64ZBB-NEXT:  .LBB9_2:
460; RV64ZBB-NEXT:    ret
461  %1 = icmp slt i128 %x, 0
462  %2 = sub nsw i128 0, %x
463  %3 = select i1 %1, i128 %2, i128 %x
464  ret i128 %3
465}
466
467define i64 @zext_abs32(i32 %x) {
468; RV32I-LABEL: zext_abs32:
469; RV32I:       # %bb.0:
470; RV32I-NEXT:    srai a1, a0, 31
471; RV32I-NEXT:    xor a0, a0, a1
472; RV32I-NEXT:    sub a0, a0, a1
473; RV32I-NEXT:    li a1, 0
474; RV32I-NEXT:    ret
475;
476; RV32ZBB-LABEL: zext_abs32:
477; RV32ZBB:       # %bb.0:
478; RV32ZBB-NEXT:    neg a1, a0
479; RV32ZBB-NEXT:    max a0, a0, a1
480; RV32ZBB-NEXT:    li a1, 0
481; RV32ZBB-NEXT:    ret
482;
483; RV64I-LABEL: zext_abs32:
484; RV64I:       # %bb.0:
485; RV64I-NEXT:    sraiw a1, a0, 31
486; RV64I-NEXT:    xor a0, a0, a1
487; RV64I-NEXT:    subw a0, a0, a1
488; RV64I-NEXT:    ret
489;
490; RV64ZBB-LABEL: zext_abs32:
491; RV64ZBB:       # %bb.0:
492; RV64ZBB-NEXT:    sext.w a0, a0
493; RV64ZBB-NEXT:    negw a1, a0
494; RV64ZBB-NEXT:    max a0, a0, a1
495; RV64ZBB-NEXT:    ret
496  %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
497  %zext = zext nneg i32 %abs to i64
498  ret i64 %zext
499}
500
501define signext i32 @zext_abs8(i8 signext %x) {
502; RV32I-LABEL: zext_abs8:
503; RV32I:       # %bb.0:
504; RV32I-NEXT:    srai a1, a0, 31
505; RV32I-NEXT:    xor a0, a0, a1
506; RV32I-NEXT:    sub a0, a0, a1
507; RV32I-NEXT:    ret
508;
509; RV32ZBB-LABEL: zext_abs8:
510; RV32ZBB:       # %bb.0:
511; RV32ZBB-NEXT:    neg a1, a0
512; RV32ZBB-NEXT:    max a0, a0, a1
513; RV32ZBB-NEXT:    ret
514;
515; RV64I-LABEL: zext_abs8:
516; RV64I:       # %bb.0:
517; RV64I-NEXT:    srai a1, a0, 63
518; RV64I-NEXT:    xor a0, a0, a1
519; RV64I-NEXT:    sub a0, a0, a1
520; RV64I-NEXT:    ret
521;
522; RV64ZBB-LABEL: zext_abs8:
523; RV64ZBB:       # %bb.0:
524; RV64ZBB-NEXT:    neg a1, a0
525; RV64ZBB-NEXT:    max a0, a0, a1
526; RV64ZBB-NEXT:    ret
527  %a = call i8 @llvm.abs.i8(i8 %x, i1 false)
528  %b = zext i8 %a to i32
529  ret i32 %b
530}
531
532define signext i32 @zext_abs16(i16 signext %x) {
533; RV32I-LABEL: zext_abs16:
534; RV32I:       # %bb.0:
535; RV32I-NEXT:    srai a1, a0, 31
536; RV32I-NEXT:    xor a0, a0, a1
537; RV32I-NEXT:    sub a0, a0, a1
538; RV32I-NEXT:    ret
539;
540; RV32ZBB-LABEL: zext_abs16:
541; RV32ZBB:       # %bb.0:
542; RV32ZBB-NEXT:    neg a1, a0
543; RV32ZBB-NEXT:    max a0, a0, a1
544; RV32ZBB-NEXT:    ret
545;
546; RV64I-LABEL: zext_abs16:
547; RV64I:       # %bb.0:
548; RV64I-NEXT:    srai a1, a0, 63
549; RV64I-NEXT:    xor a0, a0, a1
550; RV64I-NEXT:    sub a0, a0, a1
551; RV64I-NEXT:    ret
552;
553; RV64ZBB-LABEL: zext_abs16:
554; RV64ZBB:       # %bb.0:
555; RV64ZBB-NEXT:    neg a1, a0
556; RV64ZBB-NEXT:    max a0, a0, a1
557; RV64ZBB-NEXT:    ret
558  %a = call i16 @llvm.abs.i16(i16 %x, i1 false)
559  %b = zext i16 %a to i32
560  ret i32 %b
561}
562
563define i64 @zext64_abs8(i8 signext %x) {
564; RV32I-LABEL: zext64_abs8:
565; RV32I:       # %bb.0:
566; RV32I-NEXT:    srai a1, a0, 31
567; RV32I-NEXT:    xor a0, a0, a1
568; RV32I-NEXT:    sub a0, a0, a1
569; RV32I-NEXT:    li a1, 0
570; RV32I-NEXT:    ret
571;
572; RV32ZBB-LABEL: zext64_abs8:
573; RV32ZBB:       # %bb.0:
574; RV32ZBB-NEXT:    neg a1, a0
575; RV32ZBB-NEXT:    max a0, a0, a1
576; RV32ZBB-NEXT:    li a1, 0
577; RV32ZBB-NEXT:    ret
578;
579; RV64I-LABEL: zext64_abs8:
580; RV64I:       # %bb.0:
581; RV64I-NEXT:    srai a1, a0, 63
582; RV64I-NEXT:    xor a0, a0, a1
583; RV64I-NEXT:    sub a0, a0, a1
584; RV64I-NEXT:    ret
585;
586; RV64ZBB-LABEL: zext64_abs8:
587; RV64ZBB:       # %bb.0:
588; RV64ZBB-NEXT:    neg a1, a0
589; RV64ZBB-NEXT:    max a0, a0, a1
590; RV64ZBB-NEXT:    ret
591  %a = call i8 @llvm.abs.i8(i8 %x, i1 false)
592  %b = zext i8 %a to i64
593  ret i64 %b
594}
595
596define i64 @zext64_abs16(i16 signext %x) {
597; RV32I-LABEL: zext64_abs16:
598; RV32I:       # %bb.0:
599; RV32I-NEXT:    srai a1, a0, 31
600; RV32I-NEXT:    xor a0, a0, a1
601; RV32I-NEXT:    sub a0, a0, a1
602; RV32I-NEXT:    li a1, 0
603; RV32I-NEXT:    ret
604;
605; RV32ZBB-LABEL: zext64_abs16:
606; RV32ZBB:       # %bb.0:
607; RV32ZBB-NEXT:    neg a1, a0
608; RV32ZBB-NEXT:    max a0, a0, a1
609; RV32ZBB-NEXT:    li a1, 0
610; RV32ZBB-NEXT:    ret
611;
612; RV64I-LABEL: zext64_abs16:
613; RV64I:       # %bb.0:
614; RV64I-NEXT:    srai a1, a0, 63
615; RV64I-NEXT:    xor a0, a0, a1
616; RV64I-NEXT:    sub a0, a0, a1
617; RV64I-NEXT:    ret
618;
619; RV64ZBB-LABEL: zext64_abs16:
620; RV64ZBB:       # %bb.0:
621; RV64ZBB-NEXT:    neg a1, a0
622; RV64ZBB-NEXT:    max a0, a0, a1
623; RV64ZBB-NEXT:    ret
624  %a = call i16 @llvm.abs.i16(i16 %x, i1 false)
625  %b = zext i16 %a to i64
626  ret i64 %b
627}
628
629define void @zext16_abs8(i8 %x, ptr %p) {
630; RV32I-LABEL: zext16_abs8:
631; RV32I:       # %bb.0:
632; RV32I-NEXT:    slli a0, a0, 24
633; RV32I-NEXT:    srai a2, a0, 31
634; RV32I-NEXT:    srai a0, a0, 24
635; RV32I-NEXT:    xor a0, a0, a2
636; RV32I-NEXT:    sub a0, a0, a2
637; RV32I-NEXT:    sh a0, 0(a1)
638; RV32I-NEXT:    ret
639;
640; RV32ZBB-LABEL: zext16_abs8:
641; RV32ZBB:       # %bb.0:
642; RV32ZBB-NEXT:    sext.b a0, a0
643; RV32ZBB-NEXT:    neg a2, a0
644; RV32ZBB-NEXT:    max a0, a0, a2
645; RV32ZBB-NEXT:    sh a0, 0(a1)
646; RV32ZBB-NEXT:    ret
647;
648; RV64I-LABEL: zext16_abs8:
649; RV64I:       # %bb.0:
650; RV64I-NEXT:    slli a0, a0, 56
651; RV64I-NEXT:    srai a2, a0, 63
652; RV64I-NEXT:    srai a0, a0, 56
653; RV64I-NEXT:    xor a0, a0, a2
654; RV64I-NEXT:    subw a0, a0, a2
655; RV64I-NEXT:    sh a0, 0(a1)
656; RV64I-NEXT:    ret
657;
658; RV64ZBB-LABEL: zext16_abs8:
659; RV64ZBB:       # %bb.0:
660; RV64ZBB-NEXT:    sext.b a0, a0
661; RV64ZBB-NEXT:    neg a2, a0
662; RV64ZBB-NEXT:    max a0, a0, a2
663; RV64ZBB-NEXT:    sh a0, 0(a1)
664; RV64ZBB-NEXT:    ret
665  %a = call i8 @llvm.abs.i8(i8 %x, i1 false)
666  %b = zext i8 %a to i16
667  store i16 %b, ptr %p
668  ret void
669}
670