xref: /llvm-project/llvm/test/CodeGen/RISCV/neg-abs.ll (revision 2291d0aba927b885cf39150e59fde466a2524bb5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=RV32I
4; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=RV32ZBB
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s --check-prefix=RV64I
8; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s --check-prefix=RV64ZBB
10
11declare i32 @llvm.abs.i32(i32, i1 immarg)
12declare i64 @llvm.abs.i64(i64, i1 immarg)
13
14define i32 @neg_abs32(i32 %x) {
15; RV32I-LABEL: neg_abs32:
16; RV32I:       # %bb.0:
17; RV32I-NEXT:    srai a1, a0, 31
18; RV32I-NEXT:    xor a0, a0, a1
19; RV32I-NEXT:    sub a0, a1, a0
20; RV32I-NEXT:    ret
21;
22; RV32ZBB-LABEL: neg_abs32:
23; RV32ZBB:       # %bb.0:
24; RV32ZBB-NEXT:    neg a1, a0
25; RV32ZBB-NEXT:    min a0, a0, a1
26; RV32ZBB-NEXT:    ret
27;
28; RV64I-LABEL: neg_abs32:
29; RV64I:       # %bb.0:
30; RV64I-NEXT:    sraiw a1, a0, 31
31; RV64I-NEXT:    xor a0, a0, a1
32; RV64I-NEXT:    subw a0, a1, a0
33; RV64I-NEXT:    ret
34;
35; RV64ZBB-LABEL: neg_abs32:
36; RV64ZBB:       # %bb.0:
37; RV64ZBB-NEXT:    sraiw a1, a0, 31
38; RV64ZBB-NEXT:    xor a0, a0, a1
39; RV64ZBB-NEXT:    subw a0, a1, a0
40; RV64ZBB-NEXT:    ret
41  %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
42  %neg = sub nsw i32 0, %abs
43  ret i32 %neg
44}
45
46define i32 @select_neg_abs32(i32 %x) {
47; RV32I-LABEL: select_neg_abs32:
48; RV32I:       # %bb.0:
49; RV32I-NEXT:    srai a1, a0, 31
50; RV32I-NEXT:    xor a0, a0, a1
51; RV32I-NEXT:    sub a0, a1, a0
52; RV32I-NEXT:    ret
53;
54; RV32ZBB-LABEL: select_neg_abs32:
55; RV32ZBB:       # %bb.0:
56; RV32ZBB-NEXT:    neg a1, a0
57; RV32ZBB-NEXT:    min a0, a0, a1
58; RV32ZBB-NEXT:    ret
59;
60; RV64I-LABEL: select_neg_abs32:
61; RV64I:       # %bb.0:
62; RV64I-NEXT:    sraiw a1, a0, 31
63; RV64I-NEXT:    xor a0, a0, a1
64; RV64I-NEXT:    subw a0, a1, a0
65; RV64I-NEXT:    ret
66;
67; RV64ZBB-LABEL: select_neg_abs32:
68; RV64ZBB:       # %bb.0:
69; RV64ZBB-NEXT:    sraiw a1, a0, 31
70; RV64ZBB-NEXT:    xor a0, a0, a1
71; RV64ZBB-NEXT:    subw a0, a1, a0
72; RV64ZBB-NEXT:    ret
73  %1 = icmp slt i32 %x, 0
74  %2 = sub nsw i32 0, %x
75  %3 = select i1 %1, i32 %x, i32 %2
76  ret i32 %3
77}
78
79define i64 @neg_abs64(i64 %x) {
80; RV32I-LABEL: neg_abs64:
81; RV32I:       # %bb.0:
82; RV32I-NEXT:    srai a2, a1, 31
83; RV32I-NEXT:    xor a0, a0, a2
84; RV32I-NEXT:    xor a1, a1, a2
85; RV32I-NEXT:    sltu a3, a2, a0
86; RV32I-NEXT:    sub a1, a2, a1
87; RV32I-NEXT:    sub a1, a1, a3
88; RV32I-NEXT:    sub a0, a2, a0
89; RV32I-NEXT:    ret
90;
91; RV32ZBB-LABEL: neg_abs64:
92; RV32ZBB:       # %bb.0:
93; RV32ZBB-NEXT:    srai a2, a1, 31
94; RV32ZBB-NEXT:    xor a0, a0, a2
95; RV32ZBB-NEXT:    xor a1, a1, a2
96; RV32ZBB-NEXT:    sltu a3, a2, a0
97; RV32ZBB-NEXT:    sub a1, a2, a1
98; RV32ZBB-NEXT:    sub a1, a1, a3
99; RV32ZBB-NEXT:    sub a0, a2, a0
100; RV32ZBB-NEXT:    ret
101;
102; RV64I-LABEL: neg_abs64:
103; RV64I:       # %bb.0:
104; RV64I-NEXT:    srai a1, a0, 63
105; RV64I-NEXT:    xor a0, a0, a1
106; RV64I-NEXT:    sub a0, a1, a0
107; RV64I-NEXT:    ret
108;
109; RV64ZBB-LABEL: neg_abs64:
110; RV64ZBB:       # %bb.0:
111; RV64ZBB-NEXT:    neg a1, a0
112; RV64ZBB-NEXT:    min a0, a0, a1
113; RV64ZBB-NEXT:    ret
114  %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
115  %neg = sub nsw i64 0, %abs
116  ret i64 %neg
117}
118
119define i64 @select_neg_abs64(i64 %x) {
120; RV32I-LABEL: select_neg_abs64:
121; RV32I:       # %bb.0:
122; RV32I-NEXT:    srai a2, a1, 31
123; RV32I-NEXT:    xor a0, a0, a2
124; RV32I-NEXT:    xor a1, a1, a2
125; RV32I-NEXT:    sltu a3, a2, a0
126; RV32I-NEXT:    sub a1, a2, a1
127; RV32I-NEXT:    sub a1, a1, a3
128; RV32I-NEXT:    sub a0, a2, a0
129; RV32I-NEXT:    ret
130;
131; RV32ZBB-LABEL: select_neg_abs64:
132; RV32ZBB:       # %bb.0:
133; RV32ZBB-NEXT:    srai a2, a1, 31
134; RV32ZBB-NEXT:    xor a0, a0, a2
135; RV32ZBB-NEXT:    xor a1, a1, a2
136; RV32ZBB-NEXT:    sltu a3, a2, a0
137; RV32ZBB-NEXT:    sub a1, a2, a1
138; RV32ZBB-NEXT:    sub a1, a1, a3
139; RV32ZBB-NEXT:    sub a0, a2, a0
140; RV32ZBB-NEXT:    ret
141;
142; RV64I-LABEL: select_neg_abs64:
143; RV64I:       # %bb.0:
144; RV64I-NEXT:    srai a1, a0, 63
145; RV64I-NEXT:    xor a0, a0, a1
146; RV64I-NEXT:    sub a0, a1, a0
147; RV64I-NEXT:    ret
148;
149; RV64ZBB-LABEL: select_neg_abs64:
150; RV64ZBB:       # %bb.0:
151; RV64ZBB-NEXT:    neg a1, a0
152; RV64ZBB-NEXT:    min a0, a0, a1
153; RV64ZBB-NEXT:    ret
154  %1 = icmp slt i64 %x, 0
155  %2 = sub nsw i64 0, %x
156  %3 = select i1 %1, i64 %x, i64 %2
157  ret i64 %3
158}
159
160define i32 @neg_abs32_multiuse(i32 %x, ptr %y) {
161; RV32I-LABEL: neg_abs32_multiuse:
162; RV32I:       # %bb.0:
163; RV32I-NEXT:    srai a2, a0, 31
164; RV32I-NEXT:    xor a0, a0, a2
165; RV32I-NEXT:    sub a2, a0, a2
166; RV32I-NEXT:    neg a0, a2
167; RV32I-NEXT:    sw a2, 0(a1)
168; RV32I-NEXT:    ret
169;
170; RV32ZBB-LABEL: neg_abs32_multiuse:
171; RV32ZBB:       # %bb.0:
172; RV32ZBB-NEXT:    neg a2, a0
173; RV32ZBB-NEXT:    max a2, a0, a2
174; RV32ZBB-NEXT:    neg a0, a2
175; RV32ZBB-NEXT:    sw a2, 0(a1)
176; RV32ZBB-NEXT:    ret
177;
178; RV64I-LABEL: neg_abs32_multiuse:
179; RV64I:       # %bb.0:
180; RV64I-NEXT:    sraiw a2, a0, 31
181; RV64I-NEXT:    xor a0, a0, a2
182; RV64I-NEXT:    subw a2, a0, a2
183; RV64I-NEXT:    negw a0, a2
184; RV64I-NEXT:    sw a2, 0(a1)
185; RV64I-NEXT:    ret
186;
187; RV64ZBB-LABEL: neg_abs32_multiuse:
188; RV64ZBB:       # %bb.0:
189; RV64ZBB-NEXT:    sext.w a0, a0
190; RV64ZBB-NEXT:    negw a2, a0
191; RV64ZBB-NEXT:    max a2, a0, a2
192; RV64ZBB-NEXT:    negw a0, a2
193; RV64ZBB-NEXT:    sw a2, 0(a1)
194; RV64ZBB-NEXT:    ret
195  %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
196  store i32 %abs, ptr %y
197  %neg = sub nsw i32 0, %abs
198  ret i32 %neg
199}
200
201define i64 @neg_abs64_multiuse(i64 %x, ptr %y) {
202; RV32I-LABEL: neg_abs64_multiuse:
203; RV32I:       # %bb.0:
204; RV32I-NEXT:    bgez a1, .LBB5_2
205; RV32I-NEXT:  # %bb.1:
206; RV32I-NEXT:    snez a3, a0
207; RV32I-NEXT:    neg a1, a1
208; RV32I-NEXT:    sub a1, a1, a3
209; RV32I-NEXT:    neg a0, a0
210; RV32I-NEXT:  .LBB5_2:
211; RV32I-NEXT:    snez a3, a0
212; RV32I-NEXT:    neg a4, a1
213; RV32I-NEXT:    sub a3, a4, a3
214; RV32I-NEXT:    neg a4, a0
215; RV32I-NEXT:    sw a0, 0(a2)
216; RV32I-NEXT:    sw a1, 4(a2)
217; RV32I-NEXT:    mv a0, a4
218; RV32I-NEXT:    mv a1, a3
219; RV32I-NEXT:    ret
220;
221; RV32ZBB-LABEL: neg_abs64_multiuse:
222; RV32ZBB:       # %bb.0:
223; RV32ZBB-NEXT:    bgez a1, .LBB5_2
224; RV32ZBB-NEXT:  # %bb.1:
225; RV32ZBB-NEXT:    snez a3, a0
226; RV32ZBB-NEXT:    neg a1, a1
227; RV32ZBB-NEXT:    sub a1, a1, a3
228; RV32ZBB-NEXT:    neg a0, a0
229; RV32ZBB-NEXT:  .LBB5_2:
230; RV32ZBB-NEXT:    snez a3, a0
231; RV32ZBB-NEXT:    neg a4, a1
232; RV32ZBB-NEXT:    sub a3, a4, a3
233; RV32ZBB-NEXT:    neg a4, a0
234; RV32ZBB-NEXT:    sw a0, 0(a2)
235; RV32ZBB-NEXT:    sw a1, 4(a2)
236; RV32ZBB-NEXT:    mv a0, a4
237; RV32ZBB-NEXT:    mv a1, a3
238; RV32ZBB-NEXT:    ret
239;
240; RV64I-LABEL: neg_abs64_multiuse:
241; RV64I:       # %bb.0:
242; RV64I-NEXT:    srai a2, a0, 63
243; RV64I-NEXT:    xor a0, a0, a2
244; RV64I-NEXT:    sub a2, a0, a2
245; RV64I-NEXT:    neg a0, a2
246; RV64I-NEXT:    sd a2, 0(a1)
247; RV64I-NEXT:    ret
248;
249; RV64ZBB-LABEL: neg_abs64_multiuse:
250; RV64ZBB:       # %bb.0:
251; RV64ZBB-NEXT:    neg a2, a0
252; RV64ZBB-NEXT:    max a2, a0, a2
253; RV64ZBB-NEXT:    neg a0, a2
254; RV64ZBB-NEXT:    sd a2, 0(a1)
255; RV64ZBB-NEXT:    ret
256  %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
257  store i64 %abs, ptr %y
258  %neg = sub nsw i64 0, %abs
259  ret i64 %neg
260}
261
262define i32 @expanded_neg_abs32(i32 %x) {
263; RV32I-LABEL: expanded_neg_abs32:
264; RV32I:       # %bb.0:
265; RV32I-NEXT:    neg a1, a0
266; RV32I-NEXT:    blt a0, a1, .LBB6_2
267; RV32I-NEXT:  # %bb.1:
268; RV32I-NEXT:    mv a1, a0
269; RV32I-NEXT:  .LBB6_2:
270; RV32I-NEXT:    neg a0, a1
271; RV32I-NEXT:    ret
272;
273; RV32ZBB-LABEL: expanded_neg_abs32:
274; RV32ZBB:       # %bb.0:
275; RV32ZBB-NEXT:    neg a1, a0
276; RV32ZBB-NEXT:    min a0, a0, a1
277; RV32ZBB-NEXT:    ret
278;
279; RV64I-LABEL: expanded_neg_abs32:
280; RV64I:       # %bb.0:
281; RV64I-NEXT:    sext.w a1, a0
282; RV64I-NEXT:    negw a0, a0
283; RV64I-NEXT:    blt a1, a0, .LBB6_2
284; RV64I-NEXT:  # %bb.1:
285; RV64I-NEXT:    mv a0, a1
286; RV64I-NEXT:  .LBB6_2:
287; RV64I-NEXT:    negw a0, a0
288; RV64I-NEXT:    ret
289;
290; RV64ZBB-LABEL: expanded_neg_abs32:
291; RV64ZBB:       # %bb.0:
292; RV64ZBB-NEXT:    sext.w a1, a0
293; RV64ZBB-NEXT:    negw a0, a0
294; RV64ZBB-NEXT:    max a0, a0, a1
295; RV64ZBB-NEXT:    negw a0, a0
296; RV64ZBB-NEXT:    ret
297  %n = sub i32 0, %x
298  %t = call i32 @llvm.smax.i32(i32 %n, i32 %x)
299  %r = sub i32 0, %t
300  ret i32 %r
301}
302
303define i32 @expanded_neg_abs32_unsigned(i32 %x) {
304; RV32I-LABEL: expanded_neg_abs32_unsigned:
305; RV32I:       # %bb.0:
306; RV32I-NEXT:    neg a1, a0
307; RV32I-NEXT:    bltu a0, a1, .LBB7_2
308; RV32I-NEXT:  # %bb.1:
309; RV32I-NEXT:    mv a1, a0
310; RV32I-NEXT:  .LBB7_2:
311; RV32I-NEXT:    neg a0, a1
312; RV32I-NEXT:    ret
313;
314; RV32ZBB-LABEL: expanded_neg_abs32_unsigned:
315; RV32ZBB:       # %bb.0:
316; RV32ZBB-NEXT:    neg a1, a0
317; RV32ZBB-NEXT:    minu a0, a0, a1
318; RV32ZBB-NEXT:    ret
319;
320; RV64I-LABEL: expanded_neg_abs32_unsigned:
321; RV64I:       # %bb.0:
322; RV64I-NEXT:    sext.w a1, a0
323; RV64I-NEXT:    negw a0, a0
324; RV64I-NEXT:    bltu a1, a0, .LBB7_2
325; RV64I-NEXT:  # %bb.1:
326; RV64I-NEXT:    mv a0, a1
327; RV64I-NEXT:  .LBB7_2:
328; RV64I-NEXT:    negw a0, a0
329; RV64I-NEXT:    ret
330;
331; RV64ZBB-LABEL: expanded_neg_abs32_unsigned:
332; RV64ZBB:       # %bb.0:
333; RV64ZBB-NEXT:    sext.w a1, a0
334; RV64ZBB-NEXT:    negw a0, a0
335; RV64ZBB-NEXT:    maxu a0, a0, a1
336; RV64ZBB-NEXT:    negw a0, a0
337; RV64ZBB-NEXT:    ret
338  %n = sub i32 0, %x
339  %t = call i32 @llvm.umax.i32(i32 %n, i32 %x)
340  %r = sub i32 0, %t
341  ret i32 %r
342}
343
344define i64 @expanded_neg_abs64(i64 %x) {
345; RV32I-LABEL: expanded_neg_abs64:
346; RV32I:       # %bb.0:
347; RV32I-NEXT:    snez a2, a0
348; RV32I-NEXT:    neg a3, a1
349; RV32I-NEXT:    sub a2, a3, a2
350; RV32I-NEXT:    neg a3, a0
351; RV32I-NEXT:    beq a2, a1, .LBB8_2
352; RV32I-NEXT:  # %bb.1:
353; RV32I-NEXT:    slt a4, a1, a2
354; RV32I-NEXT:    beqz a4, .LBB8_3
355; RV32I-NEXT:    j .LBB8_4
356; RV32I-NEXT:  .LBB8_2:
357; RV32I-NEXT:    sltu a4, a0, a3
358; RV32I-NEXT:    bnez a4, .LBB8_4
359; RV32I-NEXT:  .LBB8_3:
360; RV32I-NEXT:    mv a2, a1
361; RV32I-NEXT:    mv a3, a0
362; RV32I-NEXT:  .LBB8_4:
363; RV32I-NEXT:    snez a0, a3
364; RV32I-NEXT:    add a0, a2, a0
365; RV32I-NEXT:    neg a1, a0
366; RV32I-NEXT:    neg a0, a3
367; RV32I-NEXT:    ret
368;
369; RV32ZBB-LABEL: expanded_neg_abs64:
370; RV32ZBB:       # %bb.0:
371; RV32ZBB-NEXT:    snez a2, a0
372; RV32ZBB-NEXT:    neg a3, a1
373; RV32ZBB-NEXT:    sub a2, a3, a2
374; RV32ZBB-NEXT:    neg a3, a0
375; RV32ZBB-NEXT:    beq a2, a1, .LBB8_2
376; RV32ZBB-NEXT:  # %bb.1:
377; RV32ZBB-NEXT:    slt a4, a1, a2
378; RV32ZBB-NEXT:    beqz a4, .LBB8_3
379; RV32ZBB-NEXT:    j .LBB8_4
380; RV32ZBB-NEXT:  .LBB8_2:
381; RV32ZBB-NEXT:    sltu a4, a0, a3
382; RV32ZBB-NEXT:    bnez a4, .LBB8_4
383; RV32ZBB-NEXT:  .LBB8_3:
384; RV32ZBB-NEXT:    mv a2, a1
385; RV32ZBB-NEXT:    mv a3, a0
386; RV32ZBB-NEXT:  .LBB8_4:
387; RV32ZBB-NEXT:    snez a0, a3
388; RV32ZBB-NEXT:    add a0, a2, a0
389; RV32ZBB-NEXT:    neg a1, a0
390; RV32ZBB-NEXT:    neg a0, a3
391; RV32ZBB-NEXT:    ret
392;
393; RV64I-LABEL: expanded_neg_abs64:
394; RV64I:       # %bb.0:
395; RV64I-NEXT:    neg a1, a0
396; RV64I-NEXT:    blt a0, a1, .LBB8_2
397; RV64I-NEXT:  # %bb.1:
398; RV64I-NEXT:    mv a1, a0
399; RV64I-NEXT:  .LBB8_2:
400; RV64I-NEXT:    neg a0, a1
401; RV64I-NEXT:    ret
402;
403; RV64ZBB-LABEL: expanded_neg_abs64:
404; RV64ZBB:       # %bb.0:
405; RV64ZBB-NEXT:    neg a1, a0
406; RV64ZBB-NEXT:    min a0, a0, a1
407; RV64ZBB-NEXT:    ret
408  %n = sub i64 0, %x
409  %t = call i64 @llvm.smax.i64(i64 %n, i64 %x)
410  %r = sub i64 0, %t
411  ret i64 %r
412}
413
414define i64 @expanded_neg_abs64_unsigned(i64 %x) {
415; RV32I-LABEL: expanded_neg_abs64_unsigned:
416; RV32I:       # %bb.0:
417; RV32I-NEXT:    snez a2, a0
418; RV32I-NEXT:    neg a3, a1
419; RV32I-NEXT:    sub a2, a3, a2
420; RV32I-NEXT:    neg a3, a0
421; RV32I-NEXT:    beq a2, a1, .LBB9_2
422; RV32I-NEXT:  # %bb.1:
423; RV32I-NEXT:    sltu a4, a1, a2
424; RV32I-NEXT:    beqz a4, .LBB9_3
425; RV32I-NEXT:    j .LBB9_4
426; RV32I-NEXT:  .LBB9_2:
427; RV32I-NEXT:    sltu a4, a0, a3
428; RV32I-NEXT:    bnez a4, .LBB9_4
429; RV32I-NEXT:  .LBB9_3:
430; RV32I-NEXT:    mv a2, a1
431; RV32I-NEXT:    mv a3, a0
432; RV32I-NEXT:  .LBB9_4:
433; RV32I-NEXT:    snez a0, a3
434; RV32I-NEXT:    add a0, a2, a0
435; RV32I-NEXT:    neg a1, a0
436; RV32I-NEXT:    neg a0, a3
437; RV32I-NEXT:    ret
438;
439; RV32ZBB-LABEL: expanded_neg_abs64_unsigned:
440; RV32ZBB:       # %bb.0:
441; RV32ZBB-NEXT:    snez a2, a0
442; RV32ZBB-NEXT:    neg a3, a1
443; RV32ZBB-NEXT:    sub a2, a3, a2
444; RV32ZBB-NEXT:    neg a3, a0
445; RV32ZBB-NEXT:    beq a2, a1, .LBB9_2
446; RV32ZBB-NEXT:  # %bb.1:
447; RV32ZBB-NEXT:    sltu a4, a1, a2
448; RV32ZBB-NEXT:    beqz a4, .LBB9_3
449; RV32ZBB-NEXT:    j .LBB9_4
450; RV32ZBB-NEXT:  .LBB9_2:
451; RV32ZBB-NEXT:    sltu a4, a0, a3
452; RV32ZBB-NEXT:    bnez a4, .LBB9_4
453; RV32ZBB-NEXT:  .LBB9_3:
454; RV32ZBB-NEXT:    mv a2, a1
455; RV32ZBB-NEXT:    mv a3, a0
456; RV32ZBB-NEXT:  .LBB9_4:
457; RV32ZBB-NEXT:    snez a0, a3
458; RV32ZBB-NEXT:    add a0, a2, a0
459; RV32ZBB-NEXT:    neg a1, a0
460; RV32ZBB-NEXT:    neg a0, a3
461; RV32ZBB-NEXT:    ret
462;
463; RV64I-LABEL: expanded_neg_abs64_unsigned:
464; RV64I:       # %bb.0:
465; RV64I-NEXT:    neg a1, a0
466; RV64I-NEXT:    bltu a0, a1, .LBB9_2
467; RV64I-NEXT:  # %bb.1:
468; RV64I-NEXT:    mv a1, a0
469; RV64I-NEXT:  .LBB9_2:
470; RV64I-NEXT:    neg a0, a1
471; RV64I-NEXT:    ret
472;
473; RV64ZBB-LABEL: expanded_neg_abs64_unsigned:
474; RV64ZBB:       # %bb.0:
475; RV64ZBB-NEXT:    neg a1, a0
476; RV64ZBB-NEXT:    minu a0, a0, a1
477; RV64ZBB-NEXT:    ret
478  %n = sub i64 0, %x
479  %t = call i64 @llvm.umax.i64(i64 %n, i64 %x)
480  %r = sub i64 0, %t
481  ret i64 %r
482}
483
484define i32 @expanded_neg_inv_abs32(i32 %x) {
485; RV32I-LABEL: expanded_neg_inv_abs32:
486; RV32I:       # %bb.0:
487; RV32I-NEXT:    neg a1, a0
488; RV32I-NEXT:    blt a1, a0, .LBB10_2
489; RV32I-NEXT:  # %bb.1:
490; RV32I-NEXT:    mv a1, a0
491; RV32I-NEXT:  .LBB10_2:
492; RV32I-NEXT:    neg a0, a1
493; RV32I-NEXT:    ret
494;
495; RV32ZBB-LABEL: expanded_neg_inv_abs32:
496; RV32ZBB:       # %bb.0:
497; RV32ZBB-NEXT:    neg a1, a0
498; RV32ZBB-NEXT:    max a0, a0, a1
499; RV32ZBB-NEXT:    ret
500;
501; RV64I-LABEL: expanded_neg_inv_abs32:
502; RV64I:       # %bb.0:
503; RV64I-NEXT:    sext.w a1, a0
504; RV64I-NEXT:    negw a0, a0
505; RV64I-NEXT:    blt a0, a1, .LBB10_2
506; RV64I-NEXT:  # %bb.1:
507; RV64I-NEXT:    mv a0, a1
508; RV64I-NEXT:  .LBB10_2:
509; RV64I-NEXT:    negw a0, a0
510; RV64I-NEXT:    ret
511;
512; RV64ZBB-LABEL: expanded_neg_inv_abs32:
513; RV64ZBB:       # %bb.0:
514; RV64ZBB-NEXT:    sext.w a1, a0
515; RV64ZBB-NEXT:    negw a0, a0
516; RV64ZBB-NEXT:    min a0, a0, a1
517; RV64ZBB-NEXT:    negw a0, a0
518; RV64ZBB-NEXT:    ret
519  %n = sub i32 0, %x
520  %t = call i32 @llvm.smin.i32(i32 %n, i32 %x)
521  %r = sub i32 0, %t
522  ret i32 %r
523}
524
525define i32 @expanded_neg_inv_abs32_unsigned(i32 %x) {
526; RV32I-LABEL: expanded_neg_inv_abs32_unsigned:
527; RV32I:       # %bb.0:
528; RV32I-NEXT:    neg a1, a0
529; RV32I-NEXT:    bltu a1, a0, .LBB11_2
530; RV32I-NEXT:  # %bb.1:
531; RV32I-NEXT:    mv a1, a0
532; RV32I-NEXT:  .LBB11_2:
533; RV32I-NEXT:    neg a0, a1
534; RV32I-NEXT:    ret
535;
536; RV32ZBB-LABEL: expanded_neg_inv_abs32_unsigned:
537; RV32ZBB:       # %bb.0:
538; RV32ZBB-NEXT:    neg a1, a0
539; RV32ZBB-NEXT:    maxu a0, a0, a1
540; RV32ZBB-NEXT:    ret
541;
542; RV64I-LABEL: expanded_neg_inv_abs32_unsigned:
543; RV64I:       # %bb.0:
544; RV64I-NEXT:    sext.w a1, a0
545; RV64I-NEXT:    negw a0, a0
546; RV64I-NEXT:    bltu a0, a1, .LBB11_2
547; RV64I-NEXT:  # %bb.1:
548; RV64I-NEXT:    mv a0, a1
549; RV64I-NEXT:  .LBB11_2:
550; RV64I-NEXT:    negw a0, a0
551; RV64I-NEXT:    ret
552;
553; RV64ZBB-LABEL: expanded_neg_inv_abs32_unsigned:
554; RV64ZBB:       # %bb.0:
555; RV64ZBB-NEXT:    sext.w a1, a0
556; RV64ZBB-NEXT:    negw a0, a0
557; RV64ZBB-NEXT:    minu a0, a0, a1
558; RV64ZBB-NEXT:    negw a0, a0
559; RV64ZBB-NEXT:    ret
560  %n = sub i32 0, %x
561  %t = call i32 @llvm.umin.i32(i32 %n, i32 %x)
562  %r = sub i32 0, %t
563  ret i32 %r
564}
565
566define i64 @expanded_neg_inv_abs64(i64 %x) {
567; RV32I-LABEL: expanded_neg_inv_abs64:
568; RV32I:       # %bb.0:
569; RV32I-NEXT:    snez a2, a0
570; RV32I-NEXT:    neg a3, a1
571; RV32I-NEXT:    sub a2, a3, a2
572; RV32I-NEXT:    neg a3, a0
573; RV32I-NEXT:    beq a2, a1, .LBB12_2
574; RV32I-NEXT:  # %bb.1:
575; RV32I-NEXT:    slt a4, a2, a1
576; RV32I-NEXT:    beqz a4, .LBB12_3
577; RV32I-NEXT:    j .LBB12_4
578; RV32I-NEXT:  .LBB12_2:
579; RV32I-NEXT:    sltu a4, a3, a0
580; RV32I-NEXT:    bnez a4, .LBB12_4
581; RV32I-NEXT:  .LBB12_3:
582; RV32I-NEXT:    mv a2, a1
583; RV32I-NEXT:    mv a3, a0
584; RV32I-NEXT:  .LBB12_4:
585; RV32I-NEXT:    snez a0, a3
586; RV32I-NEXT:    add a0, a2, a0
587; RV32I-NEXT:    neg a1, a0
588; RV32I-NEXT:    neg a0, a3
589; RV32I-NEXT:    ret
590;
591; RV32ZBB-LABEL: expanded_neg_inv_abs64:
592; RV32ZBB:       # %bb.0:
593; RV32ZBB-NEXT:    snez a2, a0
594; RV32ZBB-NEXT:    neg a3, a1
595; RV32ZBB-NEXT:    sub a2, a3, a2
596; RV32ZBB-NEXT:    neg a3, a0
597; RV32ZBB-NEXT:    beq a2, a1, .LBB12_2
598; RV32ZBB-NEXT:  # %bb.1:
599; RV32ZBB-NEXT:    slt a4, a2, a1
600; RV32ZBB-NEXT:    beqz a4, .LBB12_3
601; RV32ZBB-NEXT:    j .LBB12_4
602; RV32ZBB-NEXT:  .LBB12_2:
603; RV32ZBB-NEXT:    sltu a4, a3, a0
604; RV32ZBB-NEXT:    bnez a4, .LBB12_4
605; RV32ZBB-NEXT:  .LBB12_3:
606; RV32ZBB-NEXT:    mv a2, a1
607; RV32ZBB-NEXT:    mv a3, a0
608; RV32ZBB-NEXT:  .LBB12_4:
609; RV32ZBB-NEXT:    snez a0, a3
610; RV32ZBB-NEXT:    add a0, a2, a0
611; RV32ZBB-NEXT:    neg a1, a0
612; RV32ZBB-NEXT:    neg a0, a3
613; RV32ZBB-NEXT:    ret
614;
615; RV64I-LABEL: expanded_neg_inv_abs64:
616; RV64I:       # %bb.0:
617; RV64I-NEXT:    neg a1, a0
618; RV64I-NEXT:    blt a1, a0, .LBB12_2
619; RV64I-NEXT:  # %bb.1:
620; RV64I-NEXT:    mv a1, a0
621; RV64I-NEXT:  .LBB12_2:
622; RV64I-NEXT:    neg a0, a1
623; RV64I-NEXT:    ret
624;
625; RV64ZBB-LABEL: expanded_neg_inv_abs64:
626; RV64ZBB:       # %bb.0:
627; RV64ZBB-NEXT:    neg a1, a0
628; RV64ZBB-NEXT:    max a0, a0, a1
629; RV64ZBB-NEXT:    ret
630  %n = sub i64 0, %x
631  %t = call i64 @llvm.smin.i64(i64 %n, i64 %x)
632  %r = sub i64 0, %t
633  ret i64 %r
634}
635
636define i64 @expanded_neg_inv_abs64_unsigned(i64 %x) {
637; RV32I-LABEL: expanded_neg_inv_abs64_unsigned:
638; RV32I:       # %bb.0:
639; RV32I-NEXT:    snez a2, a0
640; RV32I-NEXT:    neg a3, a1
641; RV32I-NEXT:    sub a2, a3, a2
642; RV32I-NEXT:    neg a3, a0
643; RV32I-NEXT:    beq a2, a1, .LBB13_2
644; RV32I-NEXT:  # %bb.1:
645; RV32I-NEXT:    sltu a4, a2, a1
646; RV32I-NEXT:    beqz a4, .LBB13_3
647; RV32I-NEXT:    j .LBB13_4
648; RV32I-NEXT:  .LBB13_2:
649; RV32I-NEXT:    sltu a4, a3, a0
650; RV32I-NEXT:    bnez a4, .LBB13_4
651; RV32I-NEXT:  .LBB13_3:
652; RV32I-NEXT:    mv a2, a1
653; RV32I-NEXT:    mv a3, a0
654; RV32I-NEXT:  .LBB13_4:
655; RV32I-NEXT:    snez a0, a3
656; RV32I-NEXT:    add a0, a2, a0
657; RV32I-NEXT:    neg a1, a0
658; RV32I-NEXT:    neg a0, a3
659; RV32I-NEXT:    ret
660;
661; RV32ZBB-LABEL: expanded_neg_inv_abs64_unsigned:
662; RV32ZBB:       # %bb.0:
663; RV32ZBB-NEXT:    snez a2, a0
664; RV32ZBB-NEXT:    neg a3, a1
665; RV32ZBB-NEXT:    sub a2, a3, a2
666; RV32ZBB-NEXT:    neg a3, a0
667; RV32ZBB-NEXT:    beq a2, a1, .LBB13_2
668; RV32ZBB-NEXT:  # %bb.1:
669; RV32ZBB-NEXT:    sltu a4, a2, a1
670; RV32ZBB-NEXT:    beqz a4, .LBB13_3
671; RV32ZBB-NEXT:    j .LBB13_4
672; RV32ZBB-NEXT:  .LBB13_2:
673; RV32ZBB-NEXT:    sltu a4, a3, a0
674; RV32ZBB-NEXT:    bnez a4, .LBB13_4
675; RV32ZBB-NEXT:  .LBB13_3:
676; RV32ZBB-NEXT:    mv a2, a1
677; RV32ZBB-NEXT:    mv a3, a0
678; RV32ZBB-NEXT:  .LBB13_4:
679; RV32ZBB-NEXT:    snez a0, a3
680; RV32ZBB-NEXT:    add a0, a2, a0
681; RV32ZBB-NEXT:    neg a1, a0
682; RV32ZBB-NEXT:    neg a0, a3
683; RV32ZBB-NEXT:    ret
684;
685; RV64I-LABEL: expanded_neg_inv_abs64_unsigned:
686; RV64I:       # %bb.0:
687; RV64I-NEXT:    neg a1, a0
688; RV64I-NEXT:    bltu a1, a0, .LBB13_2
689; RV64I-NEXT:  # %bb.1:
690; RV64I-NEXT:    mv a1, a0
691; RV64I-NEXT:  .LBB13_2:
692; RV64I-NEXT:    neg a0, a1
693; RV64I-NEXT:    ret
694;
695; RV64ZBB-LABEL: expanded_neg_inv_abs64_unsigned:
696; RV64ZBB:       # %bb.0:
697; RV64ZBB-NEXT:    neg a1, a0
698; RV64ZBB-NEXT:    maxu a0, a0, a1
699; RV64ZBB-NEXT:    ret
700  %n = sub i64 0, %x
701  %t = call i64 @llvm.umin.i64(i64 %n, i64 %x)
702  %r = sub i64 0, %t
703  ret i64 %r
704}
705