xref: /llvm-project/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll (revision eabaee0c59110d0e11b33a69db54ccda526b35fd)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefixes=RV32IA %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefixes=RV64IA %s
10
11define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
12; RV32I-LABEL: atomicrmw_sub_i32_constant:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi sp, sp, -16
15; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
16; RV32I-NEXT:    li a1, 1
17; RV32I-NEXT:    li a2, 5
18; RV32I-NEXT:    call __atomic_fetch_sub_4
19; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
20; RV32I-NEXT:    addi sp, sp, 16
21; RV32I-NEXT:    ret
22;
23; RV32IA-LABEL: atomicrmw_sub_i32_constant:
24; RV32IA:       # %bb.0:
25; RV32IA-NEXT:    li a1, -1
26; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
27; RV32IA-NEXT:    ret
28;
29; RV64I-LABEL: atomicrmw_sub_i32_constant:
30; RV64I:       # %bb.0:
31; RV64I-NEXT:    addi sp, sp, -16
32; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
33; RV64I-NEXT:    li a1, 1
34; RV64I-NEXT:    li a2, 5
35; RV64I-NEXT:    call __atomic_fetch_sub_4
36; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
37; RV64I-NEXT:    addi sp, sp, 16
38; RV64I-NEXT:    ret
39;
40; RV64IA-LABEL: atomicrmw_sub_i32_constant:
41; RV64IA:       # %bb.0:
42; RV64IA-NEXT:    li a1, -1
43; RV64IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
44; RV64IA-NEXT:    ret
45  %1 = atomicrmw sub ptr %a, i32 1 seq_cst
46  ret i32 %1
47}
48
49define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
50; RV32I-LABEL: atomicrmw_sub_i64_constant:
51; RV32I:       # %bb.0:
52; RV32I-NEXT:    addi sp, sp, -16
53; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
54; RV32I-NEXT:    li a1, 1
55; RV32I-NEXT:    li a3, 5
56; RV32I-NEXT:    li a2, 0
57; RV32I-NEXT:    call __atomic_fetch_sub_8
58; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
59; RV32I-NEXT:    addi sp, sp, 16
60; RV32I-NEXT:    ret
61;
62; RV32IA-LABEL: atomicrmw_sub_i64_constant:
63; RV32IA:       # %bb.0:
64; RV32IA-NEXT:    addi sp, sp, -16
65; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
66; RV32IA-NEXT:    li a1, 1
67; RV32IA-NEXT:    li a3, 5
68; RV32IA-NEXT:    li a2, 0
69; RV32IA-NEXT:    call __atomic_fetch_sub_8
70; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
71; RV32IA-NEXT:    addi sp, sp, 16
72; RV32IA-NEXT:    ret
73;
74; RV64I-LABEL: atomicrmw_sub_i64_constant:
75; RV64I:       # %bb.0:
76; RV64I-NEXT:    addi sp, sp, -16
77; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
78; RV64I-NEXT:    li a1, 1
79; RV64I-NEXT:    li a2, 5
80; RV64I-NEXT:    call __atomic_fetch_sub_8
81; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
82; RV64I-NEXT:    addi sp, sp, 16
83; RV64I-NEXT:    ret
84;
85; RV64IA-LABEL: atomicrmw_sub_i64_constant:
86; RV64IA:       # %bb.0:
87; RV64IA-NEXT:    li a1, -1
88; RV64IA-NEXT:    amoadd.d.aqrl a0, a1, (a0)
89; RV64IA-NEXT:    ret
90  %1 = atomicrmw sub ptr %a, i64 1 seq_cst
91  ret i64 %1
92}
93
94define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
95; RV32I-LABEL: atomicrmw_sub_i32_neg:
96; RV32I:       # %bb.0:
97; RV32I-NEXT:    addi sp, sp, -16
98; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
99; RV32I-NEXT:    sub a1, a1, a2
100; RV32I-NEXT:    li a2, 5
101; RV32I-NEXT:    call __atomic_fetch_sub_4
102; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
103; RV32I-NEXT:    addi sp, sp, 16
104; RV32I-NEXT:    ret
105;
106; RV32IA-LABEL: atomicrmw_sub_i32_neg:
107; RV32IA:       # %bb.0:
108; RV32IA-NEXT:    sub a2, a2, a1
109; RV32IA-NEXT:    amoadd.w.aqrl a0, a2, (a0)
110; RV32IA-NEXT:    ret
111;
112; RV64I-LABEL: atomicrmw_sub_i32_neg:
113; RV64I:       # %bb.0:
114; RV64I-NEXT:    addi sp, sp, -16
115; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
116; RV64I-NEXT:    subw a1, a1, a2
117; RV64I-NEXT:    li a2, 5
118; RV64I-NEXT:    call __atomic_fetch_sub_4
119; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
120; RV64I-NEXT:    addi sp, sp, 16
121; RV64I-NEXT:    ret
122;
123; RV64IA-LABEL: atomicrmw_sub_i32_neg:
124; RV64IA:       # %bb.0:
125; RV64IA-NEXT:    sub a2, a2, a1
126; RV64IA-NEXT:    amoadd.w.aqrl a0, a2, (a0)
127; RV64IA-NEXT:    ret
128  %b = sub i32 %x, %y
129  %1 = atomicrmw sub ptr %a, i32 %b seq_cst
130  ret i32 %1
131}
132
133define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
134; RV32I-LABEL: atomicrmw_sub_i64_neg:
135; RV32I:       # %bb.0:
136; RV32I-NEXT:    addi sp, sp, -16
137; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
138; RV32I-NEXT:    sltu a5, a1, a3
139; RV32I-NEXT:    sub a2, a2, a4
140; RV32I-NEXT:    sub a2, a2, a5
141; RV32I-NEXT:    sub a1, a1, a3
142; RV32I-NEXT:    li a3, 5
143; RV32I-NEXT:    call __atomic_fetch_sub_8
144; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
145; RV32I-NEXT:    addi sp, sp, 16
146; RV32I-NEXT:    ret
147;
148; RV32IA-LABEL: atomicrmw_sub_i64_neg:
149; RV32IA:       # %bb.0:
150; RV32IA-NEXT:    addi sp, sp, -16
151; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
152; RV32IA-NEXT:    sltu a5, a1, a3
153; RV32IA-NEXT:    sub a2, a2, a4
154; RV32IA-NEXT:    sub a2, a2, a5
155; RV32IA-NEXT:    sub a1, a1, a3
156; RV32IA-NEXT:    li a3, 5
157; RV32IA-NEXT:    call __atomic_fetch_sub_8
158; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
159; RV32IA-NEXT:    addi sp, sp, 16
160; RV32IA-NEXT:    ret
161;
162; RV64I-LABEL: atomicrmw_sub_i64_neg:
163; RV64I:       # %bb.0:
164; RV64I-NEXT:    addi sp, sp, -16
165; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
166; RV64I-NEXT:    sub a1, a1, a2
167; RV64I-NEXT:    li a2, 5
168; RV64I-NEXT:    call __atomic_fetch_sub_8
169; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
170; RV64I-NEXT:    addi sp, sp, 16
171; RV64I-NEXT:    ret
172;
173; RV64IA-LABEL: atomicrmw_sub_i64_neg:
174; RV64IA:       # %bb.0:
175; RV64IA-NEXT:    sub a2, a2, a1
176; RV64IA-NEXT:    amoadd.d.aqrl a0, a2, (a0)
177; RV64IA-NEXT:    ret
178  %b = sub i64 %x, %y
179  %1 = atomicrmw sub ptr %a, i64 %b seq_cst
180  ret i64 %1
181}
182