xref: /llvm-project/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefixes=RV32 %s
4; RUN: llc -O3 -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefixes=RV64 %s
6
7define void @amoswap_w_discard(ptr %a, i32 %b) nounwind {
8; RV32-LABEL: amoswap_w_discard:
9; RV32:       # %bb.0:
10; RV32-NEXT:    amoswap.w.aqrl zero, a1, (a0)
11; RV32-NEXT:    ret
12;
13; RV64-LABEL: amoswap_w_discard:
14; RV64:       # %bb.0:
15; RV64-NEXT:    amoswap.w.aqrl zero, a1, (a0)
16; RV64-NEXT:    ret
17  %1 = atomicrmw xchg ptr %a, i32 %b seq_cst
18  ret void
19}
20
21define void @amoswap_d_discard(ptr %a, i64 %b) nounwind {
22; RV32-LABEL: amoswap_d_discard:
23; RV32:       # %bb.0:
24; RV32-NEXT:    addi sp, sp, -16
25; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
26; RV32-NEXT:    li a3, 5
27; RV32-NEXT:    call __atomic_exchange_8
28; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
29; RV32-NEXT:    addi sp, sp, 16
30; RV32-NEXT:    ret
31;
32; RV64-LABEL: amoswap_d_discard:
33; RV64:       # %bb.0:
34; RV64-NEXT:    amoswap.d.aqrl zero, a1, (a0)
35; RV64-NEXT:    ret
36  %1 = atomicrmw xchg ptr %a, i64 %b seq_cst
37  ret void
38}
39
40define void @amoadd_w_discard(ptr %a, i32 %b) nounwind {
41; RV32-LABEL: amoadd_w_discard:
42; RV32:       # %bb.0:
43; RV32-NEXT:    amoadd.w.aqrl zero, a1, (a0)
44; RV32-NEXT:    ret
45;
46; RV64-LABEL: amoadd_w_discard:
47; RV64:       # %bb.0:
48; RV64-NEXT:    amoadd.w.aqrl zero, a1, (a0)
49; RV64-NEXT:    ret
50  %1 = atomicrmw add ptr %a, i32 %b seq_cst
51  ret void
52}
53
54define void @amoadd_d_discard(ptr %a, i64 %b) nounwind {
55; RV32-LABEL: amoadd_d_discard:
56; RV32:       # %bb.0:
57; RV32-NEXT:    addi sp, sp, -16
58; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
59; RV32-NEXT:    li a3, 5
60; RV32-NEXT:    call __atomic_fetch_add_8
61; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
62; RV32-NEXT:    addi sp, sp, 16
63; RV32-NEXT:    ret
64;
65; RV64-LABEL: amoadd_d_discard:
66; RV64:       # %bb.0:
67; RV64-NEXT:    amoadd.d.aqrl zero, a1, (a0)
68; RV64-NEXT:    ret
69  %1 = atomicrmw add ptr %a, i64 %b seq_cst
70  ret void
71}
72
73define void @amoand_w_discard(ptr %a, i32 %b) nounwind {
74; RV32-LABEL: amoand_w_discard:
75; RV32:       # %bb.0:
76; RV32-NEXT:    amoand.w.aqrl zero, a1, (a0)
77; RV32-NEXT:    ret
78;
79; RV64-LABEL: amoand_w_discard:
80; RV64:       # %bb.0:
81; RV64-NEXT:    amoand.w.aqrl zero, a1, (a0)
82; RV64-NEXT:    ret
83  %1 = atomicrmw and ptr %a, i32 %b seq_cst
84  ret void
85}
86
87define void @amoand_d_discard(ptr %a, i64 %b) nounwind {
88; RV32-LABEL: amoand_d_discard:
89; RV32:       # %bb.0:
90; RV32-NEXT:    addi sp, sp, -16
91; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
92; RV32-NEXT:    li a3, 5
93; RV32-NEXT:    call __atomic_fetch_and_8
94; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
95; RV32-NEXT:    addi sp, sp, 16
96; RV32-NEXT:    ret
97;
98; RV64-LABEL: amoand_d_discard:
99; RV64:       # %bb.0:
100; RV64-NEXT:    amoand.d.aqrl zero, a1, (a0)
101; RV64-NEXT:    ret
102  %1 = atomicrmw and ptr %a, i64 %b seq_cst
103  ret void
104}
105
106define void @amoor_w_discard(ptr %a, i32 %b) nounwind {
107; RV32-LABEL: amoor_w_discard:
108; RV32:       # %bb.0:
109; RV32-NEXT:    amoor.w.aqrl zero, a1, (a0)
110; RV32-NEXT:    ret
111;
112; RV64-LABEL: amoor_w_discard:
113; RV64:       # %bb.0:
114; RV64-NEXT:    amoor.w.aqrl zero, a1, (a0)
115; RV64-NEXT:    ret
116  %1 = atomicrmw or ptr %a, i32 %b seq_cst
117  ret void
118}
119
120define void @amoor_d_discard(ptr %a, i64 %b) nounwind {
121; RV32-LABEL: amoor_d_discard:
122; RV32:       # %bb.0:
123; RV32-NEXT:    addi sp, sp, -16
124; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
125; RV32-NEXT:    li a3, 5
126; RV32-NEXT:    call __atomic_fetch_or_8
127; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
128; RV32-NEXT:    addi sp, sp, 16
129; RV32-NEXT:    ret
130;
131; RV64-LABEL: amoor_d_discard:
132; RV64:       # %bb.0:
133; RV64-NEXT:    amoor.d.aqrl zero, a1, (a0)
134; RV64-NEXT:    ret
135  %1 = atomicrmw or ptr %a, i64 %b seq_cst
136  ret void
137}
138
139define void @amoxor_w_discard(ptr %a, i32 %b) nounwind {
140; RV32-LABEL: amoxor_w_discard:
141; RV32:       # %bb.0:
142; RV32-NEXT:    amoor.w.aqrl zero, a1, (a0)
143; RV32-NEXT:    ret
144;
145; RV64-LABEL: amoxor_w_discard:
146; RV64:       # %bb.0:
147; RV64-NEXT:    amoor.w.aqrl zero, a1, (a0)
148; RV64-NEXT:    ret
149  %1 = atomicrmw or ptr %a, i32 %b seq_cst
150  ret void
151}
152
153define void @amoxor_d_discard(ptr %a, i64 %b) nounwind {
154; RV32-LABEL: amoxor_d_discard:
155; RV32:       # %bb.0:
156; RV32-NEXT:    addi sp, sp, -16
157; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
158; RV32-NEXT:    li a3, 5
159; RV32-NEXT:    call __atomic_fetch_or_8
160; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
161; RV32-NEXT:    addi sp, sp, 16
162; RV32-NEXT:    ret
163;
164; RV64-LABEL: amoxor_d_discard:
165; RV64:       # %bb.0:
166; RV64-NEXT:    amoor.d.aqrl zero, a1, (a0)
167; RV64-NEXT:    ret
168  %1 = atomicrmw or ptr %a, i64 %b seq_cst
169  ret void
170}
171
172define void @amomax_w_discard(ptr %a, i32 %b) nounwind {
173; RV32-LABEL: amomax_w_discard:
174; RV32:       # %bb.0:
175; RV32-NEXT:    amomax.w.aqrl zero, a1, (a0)
176; RV32-NEXT:    ret
177;
178; RV64-LABEL: amomax_w_discard:
179; RV64:       # %bb.0:
180; RV64-NEXT:    amomax.w.aqrl zero, a1, (a0)
181; RV64-NEXT:    ret
182  %1 = atomicrmw max ptr %a, i32 %b seq_cst
183  ret void
184}
185
186define void @amomax_d_discard(ptr %a, i64 %b) nounwind {
187; RV32-LABEL: amomax_d_discard:
188; RV32:       # %bb.0:
189; RV32-NEXT:    addi sp, sp, -32
190; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
191; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
192; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
193; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
194; RV32-NEXT:    mv s0, a2
195; RV32-NEXT:    mv s1, a0
196; RV32-NEXT:    lw a4, 0(a0)
197; RV32-NEXT:    lw a5, 4(a0)
198; RV32-NEXT:    mv s2, a1
199; RV32-NEXT:    j .LBB11_2
200; RV32-NEXT:  .LBB11_1: # %atomicrmw.start
201; RV32-NEXT:    # in Loop: Header=BB11_2 Depth=1
202; RV32-NEXT:    sw a4, 8(sp)
203; RV32-NEXT:    sw a5, 12(sp)
204; RV32-NEXT:    addi a1, sp, 8
205; RV32-NEXT:    li a4, 5
206; RV32-NEXT:    li a5, 5
207; RV32-NEXT:    mv a0, s1
208; RV32-NEXT:    call __atomic_compare_exchange_8
209; RV32-NEXT:    lw a4, 8(sp)
210; RV32-NEXT:    lw a5, 12(sp)
211; RV32-NEXT:    bnez a0, .LBB11_6
212; RV32-NEXT:  .LBB11_2: # %atomicrmw.start
213; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
214; RV32-NEXT:    beq a5, s0, .LBB11_4
215; RV32-NEXT:  # %bb.3: # %atomicrmw.start
216; RV32-NEXT:    # in Loop: Header=BB11_2 Depth=1
217; RV32-NEXT:    slt a0, s0, a5
218; RV32-NEXT:    mv a2, a4
219; RV32-NEXT:    mv a3, a5
220; RV32-NEXT:    bnez a0, .LBB11_1
221; RV32-NEXT:    j .LBB11_5
222; RV32-NEXT:  .LBB11_4: # in Loop: Header=BB11_2 Depth=1
223; RV32-NEXT:    sltu a0, s2, a4
224; RV32-NEXT:    mv a2, a4
225; RV32-NEXT:    mv a3, a5
226; RV32-NEXT:    bnez a0, .LBB11_1
227; RV32-NEXT:  .LBB11_5: # %atomicrmw.start
228; RV32-NEXT:    # in Loop: Header=BB11_2 Depth=1
229; RV32-NEXT:    mv a2, s2
230; RV32-NEXT:    mv a3, s0
231; RV32-NEXT:    j .LBB11_1
232; RV32-NEXT:  .LBB11_6: # %atomicrmw.end
233; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
234; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
235; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
236; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
237; RV32-NEXT:    addi sp, sp, 32
238; RV32-NEXT:    ret
239;
240; RV64-LABEL: amomax_d_discard:
241; RV64:       # %bb.0:
242; RV64-NEXT:    amomax.d.aqrl zero, a1, (a0)
243; RV64-NEXT:    ret
244  %1 = atomicrmw max ptr %a, i64 %b seq_cst
245  ret void
246}
247
248define void @amomaxu_w_discard(ptr %a, i32 %b) nounwind {
249; RV32-LABEL: amomaxu_w_discard:
250; RV32:       # %bb.0:
251; RV32-NEXT:    amomaxu.w.aqrl zero, a1, (a0)
252; RV32-NEXT:    ret
253;
254; RV64-LABEL: amomaxu_w_discard:
255; RV64:       # %bb.0:
256; RV64-NEXT:    amomaxu.w.aqrl zero, a1, (a0)
257; RV64-NEXT:    ret
258  %1 = atomicrmw umax ptr %a, i32 %b seq_cst
259  ret void
260}
261
262define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind {
263; RV32-LABEL: amomaxu_d_discard:
264; RV32:       # %bb.0:
265; RV32-NEXT:    addi sp, sp, -32
266; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
267; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
268; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
269; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
270; RV32-NEXT:    mv s0, a2
271; RV32-NEXT:    mv s1, a0
272; RV32-NEXT:    lw a4, 0(a0)
273; RV32-NEXT:    lw a5, 4(a0)
274; RV32-NEXT:    mv s2, a1
275; RV32-NEXT:    j .LBB13_2
276; RV32-NEXT:  .LBB13_1: # %atomicrmw.start
277; RV32-NEXT:    # in Loop: Header=BB13_2 Depth=1
278; RV32-NEXT:    sw a4, 8(sp)
279; RV32-NEXT:    sw a5, 12(sp)
280; RV32-NEXT:    addi a1, sp, 8
281; RV32-NEXT:    li a4, 5
282; RV32-NEXT:    li a5, 5
283; RV32-NEXT:    mv a0, s1
284; RV32-NEXT:    call __atomic_compare_exchange_8
285; RV32-NEXT:    lw a4, 8(sp)
286; RV32-NEXT:    lw a5, 12(sp)
287; RV32-NEXT:    bnez a0, .LBB13_6
288; RV32-NEXT:  .LBB13_2: # %atomicrmw.start
289; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
290; RV32-NEXT:    beq a5, s0, .LBB13_4
291; RV32-NEXT:  # %bb.3: # %atomicrmw.start
292; RV32-NEXT:    # in Loop: Header=BB13_2 Depth=1
293; RV32-NEXT:    sltu a0, s0, a5
294; RV32-NEXT:    mv a2, a4
295; RV32-NEXT:    mv a3, a5
296; RV32-NEXT:    bnez a0, .LBB13_1
297; RV32-NEXT:    j .LBB13_5
298; RV32-NEXT:  .LBB13_4: # in Loop: Header=BB13_2 Depth=1
299; RV32-NEXT:    sltu a0, s2, a4
300; RV32-NEXT:    mv a2, a4
301; RV32-NEXT:    mv a3, a5
302; RV32-NEXT:    bnez a0, .LBB13_1
303; RV32-NEXT:  .LBB13_5: # %atomicrmw.start
304; RV32-NEXT:    # in Loop: Header=BB13_2 Depth=1
305; RV32-NEXT:    mv a2, s2
306; RV32-NEXT:    mv a3, s0
307; RV32-NEXT:    j .LBB13_1
308; RV32-NEXT:  .LBB13_6: # %atomicrmw.end
309; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
310; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
311; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
312; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
313; RV32-NEXT:    addi sp, sp, 32
314; RV32-NEXT:    ret
315;
316; RV64-LABEL: amomaxu_d_discard:
317; RV64:       # %bb.0:
318; RV64-NEXT:    amomaxu.d.aqrl zero, a1, (a0)
319; RV64-NEXT:    ret
320  %1 = atomicrmw umax ptr %a, i64 %b seq_cst
321  ret void
322}
323
324define void @amomin_w_discard(ptr %a, i32 %b) nounwind {
325; RV32-LABEL: amomin_w_discard:
326; RV32:       # %bb.0:
327; RV32-NEXT:    amomin.w.aqrl zero, a1, (a0)
328; RV32-NEXT:    ret
329;
330; RV64-LABEL: amomin_w_discard:
331; RV64:       # %bb.0:
332; RV64-NEXT:    amomin.w.aqrl zero, a1, (a0)
333; RV64-NEXT:    ret
334  %1 = atomicrmw min ptr %a, i32 %b seq_cst
335  ret void
336}
337
338define void @amomin_d_discard(ptr %a, i64 %b) nounwind {
339; RV32-LABEL: amomin_d_discard:
340; RV32:       # %bb.0:
341; RV32-NEXT:    addi sp, sp, -32
342; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
343; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
344; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
345; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
346; RV32-NEXT:    mv s0, a2
347; RV32-NEXT:    mv s1, a0
348; RV32-NEXT:    lw a4, 0(a0)
349; RV32-NEXT:    lw a5, 4(a0)
350; RV32-NEXT:    mv s2, a1
351; RV32-NEXT:    j .LBB15_2
352; RV32-NEXT:  .LBB15_1: # %atomicrmw.start
353; RV32-NEXT:    # in Loop: Header=BB15_2 Depth=1
354; RV32-NEXT:    sw a4, 8(sp)
355; RV32-NEXT:    sw a5, 12(sp)
356; RV32-NEXT:    addi a1, sp, 8
357; RV32-NEXT:    li a4, 5
358; RV32-NEXT:    li a5, 5
359; RV32-NEXT:    mv a0, s1
360; RV32-NEXT:    call __atomic_compare_exchange_8
361; RV32-NEXT:    lw a4, 8(sp)
362; RV32-NEXT:    lw a5, 12(sp)
363; RV32-NEXT:    bnez a0, .LBB15_6
364; RV32-NEXT:  .LBB15_2: # %atomicrmw.start
365; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
366; RV32-NEXT:    beq a5, s0, .LBB15_4
367; RV32-NEXT:  # %bb.3: # %atomicrmw.start
368; RV32-NEXT:    # in Loop: Header=BB15_2 Depth=1
369; RV32-NEXT:    slt a0, s0, a5
370; RV32-NEXT:    mv a2, a4
371; RV32-NEXT:    mv a3, a5
372; RV32-NEXT:    beqz a0, .LBB15_1
373; RV32-NEXT:    j .LBB15_5
374; RV32-NEXT:  .LBB15_4: # in Loop: Header=BB15_2 Depth=1
375; RV32-NEXT:    sltu a0, s2, a4
376; RV32-NEXT:    mv a2, a4
377; RV32-NEXT:    mv a3, a5
378; RV32-NEXT:    beqz a0, .LBB15_1
379; RV32-NEXT:  .LBB15_5: # %atomicrmw.start
380; RV32-NEXT:    # in Loop: Header=BB15_2 Depth=1
381; RV32-NEXT:    mv a2, s2
382; RV32-NEXT:    mv a3, s0
383; RV32-NEXT:    j .LBB15_1
384; RV32-NEXT:  .LBB15_6: # %atomicrmw.end
385; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
386; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
387; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
388; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
389; RV32-NEXT:    addi sp, sp, 32
390; RV32-NEXT:    ret
391;
392; RV64-LABEL: amomin_d_discard:
393; RV64:       # %bb.0:
394; RV64-NEXT:    amomin.d.aqrl zero, a1, (a0)
395; RV64-NEXT:    ret
396  %1 = atomicrmw min ptr %a, i64 %b seq_cst
397  ret void
398}
399
400define void @amominu_w_discard(ptr %a, i32 %b) nounwind {
401; RV32-LABEL: amominu_w_discard:
402; RV32:       # %bb.0:
403; RV32-NEXT:    amominu.w.aqrl zero, a1, (a0)
404; RV32-NEXT:    ret
405;
406; RV64-LABEL: amominu_w_discard:
407; RV64:       # %bb.0:
408; RV64-NEXT:    amominu.w.aqrl zero, a1, (a0)
409; RV64-NEXT:    ret
410  %1 = atomicrmw umin ptr %a, i32 %b seq_cst
411  ret void
412}
413
414define void @amominu_d_discard(ptr %a, i64 %b) nounwind {
415; RV32-LABEL: amominu_d_discard:
416; RV32:       # %bb.0:
417; RV32-NEXT:    addi sp, sp, -32
418; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
419; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
420; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
421; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
422; RV32-NEXT:    mv s0, a2
423; RV32-NEXT:    mv s1, a0
424; RV32-NEXT:    lw a4, 0(a0)
425; RV32-NEXT:    lw a5, 4(a0)
426; RV32-NEXT:    mv s2, a1
427; RV32-NEXT:    j .LBB17_2
428; RV32-NEXT:  .LBB17_1: # %atomicrmw.start
429; RV32-NEXT:    # in Loop: Header=BB17_2 Depth=1
430; RV32-NEXT:    sw a4, 8(sp)
431; RV32-NEXT:    sw a5, 12(sp)
432; RV32-NEXT:    addi a1, sp, 8
433; RV32-NEXT:    li a4, 5
434; RV32-NEXT:    li a5, 5
435; RV32-NEXT:    mv a0, s1
436; RV32-NEXT:    call __atomic_compare_exchange_8
437; RV32-NEXT:    lw a4, 8(sp)
438; RV32-NEXT:    lw a5, 12(sp)
439; RV32-NEXT:    bnez a0, .LBB17_6
440; RV32-NEXT:  .LBB17_2: # %atomicrmw.start
441; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
442; RV32-NEXT:    beq a5, s0, .LBB17_4
443; RV32-NEXT:  # %bb.3: # %atomicrmw.start
444; RV32-NEXT:    # in Loop: Header=BB17_2 Depth=1
445; RV32-NEXT:    sltu a0, s0, a5
446; RV32-NEXT:    mv a2, a4
447; RV32-NEXT:    mv a3, a5
448; RV32-NEXT:    beqz a0, .LBB17_1
449; RV32-NEXT:    j .LBB17_5
450; RV32-NEXT:  .LBB17_4: # in Loop: Header=BB17_2 Depth=1
451; RV32-NEXT:    sltu a0, s2, a4
452; RV32-NEXT:    mv a2, a4
453; RV32-NEXT:    mv a3, a5
454; RV32-NEXT:    beqz a0, .LBB17_1
455; RV32-NEXT:  .LBB17_5: # %atomicrmw.start
456; RV32-NEXT:    # in Loop: Header=BB17_2 Depth=1
457; RV32-NEXT:    mv a2, s2
458; RV32-NEXT:    mv a3, s0
459; RV32-NEXT:    j .LBB17_1
460; RV32-NEXT:  .LBB17_6: # %atomicrmw.end
461; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
462; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
463; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
464; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
465; RV32-NEXT:    addi sp, sp, 32
466; RV32-NEXT:    ret
467;
468; RV64-LABEL: amominu_d_discard:
469; RV64:       # %bb.0:
470; RV64-NEXT:    amominu.d.aqrl zero, a1, (a0)
471; RV64-NEXT:    ret
472  %1 = atomicrmw umin ptr %a, i64 %b seq_cst
473  ret void
474}
475