xref: /llvm-project/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll (revision ba5676cf91f91bbddfacae06c036cf79af0f2088)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch64 -mattr=+d --verify-machineinstrs < %s | \
3; RUN:   FileCheck %s --check-prefix=LA64
4
5;; TODO: Testing for LA32 architecture will be added later
6
7define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
8; LA64-LABEL: atomicrmw_umax_i8_acquire:
9; LA64:       # %bb.0:
10; LA64-NEXT:    slli.d $a2, $a0, 3
11; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
12; LA64-NEXT:    ori $a3, $zero, 255
13; LA64-NEXT:    sll.w $a3, $a3, $a2
14; LA64-NEXT:    andi $a1, $a1, 255
15; LA64-NEXT:    sll.w $a1, $a1, $a2
16; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
17; LA64-NEXT:    ll.w $a4, $a0, 0
18; LA64-NEXT:    and $a6, $a4, $a3
19; LA64-NEXT:    move $a5, $a4
20; LA64-NEXT:    bgeu $a6, $a1, .LBB0_3
21; LA64-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
22; LA64-NEXT:    xor $a5, $a4, $a1
23; LA64-NEXT:    and $a5, $a5, $a3
24; LA64-NEXT:    xor $a5, $a4, $a5
25; LA64-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
26; LA64-NEXT:    sc.w $a5, $a0, 0
27; LA64-NEXT:    beqz $a5, .LBB0_1
28; LA64-NEXT:  # %bb.4:
29; LA64-NEXT:    srl.w $a0, $a4, $a2
30; LA64-NEXT:    ret
31  %1 = atomicrmw umax ptr %a, i8 %b acquire
32  ret i8 %1
33}
34
35define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
36; LA64-LABEL: atomicrmw_umax_i16_acquire:
37; LA64:       # %bb.0:
38; LA64-NEXT:    slli.d $a2, $a0, 3
39; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
40; LA64-NEXT:    lu12i.w $a3, 15
41; LA64-NEXT:    ori $a3, $a3, 4095
42; LA64-NEXT:    sll.w $a3, $a3, $a2
43; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
44; LA64-NEXT:    sll.w $a1, $a1, $a2
45; LA64-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
46; LA64-NEXT:    ll.w $a4, $a0, 0
47; LA64-NEXT:    and $a6, $a4, $a3
48; LA64-NEXT:    move $a5, $a4
49; LA64-NEXT:    bgeu $a6, $a1, .LBB1_3
50; LA64-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
51; LA64-NEXT:    xor $a5, $a4, $a1
52; LA64-NEXT:    and $a5, $a5, $a3
53; LA64-NEXT:    xor $a5, $a4, $a5
54; LA64-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
55; LA64-NEXT:    sc.w $a5, $a0, 0
56; LA64-NEXT:    beqz $a5, .LBB1_1
57; LA64-NEXT:  # %bb.4:
58; LA64-NEXT:    srl.w $a0, $a4, $a2
59; LA64-NEXT:    ret
60  %1 = atomicrmw umax ptr %a, i16 %b acquire
61  ret i16 %1
62}
63
64define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
65; LA64-LABEL: atomicrmw_umax_i32_acquire:
66; LA64:       # %bb.0:
67; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
68; LA64-NEXT:    move $a0, $a2
69; LA64-NEXT:    ret
70  %1 = atomicrmw umax ptr %a, i32 %b acquire
71  ret i32 %1
72}
73
74define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
75; LA64-LABEL: atomicrmw_umax_i64_acquire:
76; LA64:       # %bb.0:
77; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
78; LA64-NEXT:    move $a0, $a2
79; LA64-NEXT:    ret
80  %1 = atomicrmw umax ptr %a, i64 %b acquire
81  ret i64 %1
82}
83
84define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
85; LA64-LABEL: atomicrmw_umin_i8_acquire:
86; LA64:       # %bb.0:
87; LA64-NEXT:    slli.d $a2, $a0, 3
88; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
89; LA64-NEXT:    ori $a3, $zero, 255
90; LA64-NEXT:    sll.w $a3, $a3, $a2
91; LA64-NEXT:    andi $a1, $a1, 255
92; LA64-NEXT:    sll.w $a1, $a1, $a2
93; LA64-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
94; LA64-NEXT:    ll.w $a4, $a0, 0
95; LA64-NEXT:    and $a6, $a4, $a3
96; LA64-NEXT:    move $a5, $a4
97; LA64-NEXT:    bgeu $a1, $a6, .LBB4_3
98; LA64-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
99; LA64-NEXT:    xor $a5, $a4, $a1
100; LA64-NEXT:    and $a5, $a5, $a3
101; LA64-NEXT:    xor $a5, $a4, $a5
102; LA64-NEXT:  .LBB4_3: # in Loop: Header=BB4_1 Depth=1
103; LA64-NEXT:    sc.w $a5, $a0, 0
104; LA64-NEXT:    beqz $a5, .LBB4_1
105; LA64-NEXT:  # %bb.4:
106; LA64-NEXT:    srl.w $a0, $a4, $a2
107; LA64-NEXT:    ret
108  %1 = atomicrmw umin ptr %a, i8 %b acquire
109  ret i8 %1
110}
111
112define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
113; LA64-LABEL: atomicrmw_umin_i16_acquire:
114; LA64:       # %bb.0:
115; LA64-NEXT:    slli.d $a2, $a0, 3
116; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
117; LA64-NEXT:    lu12i.w $a3, 15
118; LA64-NEXT:    ori $a3, $a3, 4095
119; LA64-NEXT:    sll.w $a3, $a3, $a2
120; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
121; LA64-NEXT:    sll.w $a1, $a1, $a2
122; LA64-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
123; LA64-NEXT:    ll.w $a4, $a0, 0
124; LA64-NEXT:    and $a6, $a4, $a3
125; LA64-NEXT:    move $a5, $a4
126; LA64-NEXT:    bgeu $a1, $a6, .LBB5_3
127; LA64-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
128; LA64-NEXT:    xor $a5, $a4, $a1
129; LA64-NEXT:    and $a5, $a5, $a3
130; LA64-NEXT:    xor $a5, $a4, $a5
131; LA64-NEXT:  .LBB5_3: # in Loop: Header=BB5_1 Depth=1
132; LA64-NEXT:    sc.w $a5, $a0, 0
133; LA64-NEXT:    beqz $a5, .LBB5_1
134; LA64-NEXT:  # %bb.4:
135; LA64-NEXT:    srl.w $a0, $a4, $a2
136; LA64-NEXT:    ret
137  %1 = atomicrmw umin ptr %a, i16 %b acquire
138  ret i16 %1
139}
140
141define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
142; LA64-LABEL: atomicrmw_umin_i32_acquire:
143; LA64:       # %bb.0:
144; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
145; LA64-NEXT:    move $a0, $a2
146; LA64-NEXT:    ret
147  %1 = atomicrmw umin ptr %a, i32 %b acquire
148  ret i32 %1
149}
150
151define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
152; LA64-LABEL: atomicrmw_umin_i64_acquire:
153; LA64:       # %bb.0:
154; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
155; LA64-NEXT:    move $a0, $a2
156; LA64-NEXT:    ret
157  %1 = atomicrmw umin ptr %a, i64 %b acquire
158  ret i64 %1
159}
160
161define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
162; LA64-LABEL: atomicrmw_max_i8_acquire:
163; LA64:       # %bb.0:
164; LA64-NEXT:    slli.d $a2, $a0, 3
165; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
166; LA64-NEXT:    andi $a3, $a2, 24
167; LA64-NEXT:    ori $a4, $zero, 255
168; LA64-NEXT:    sll.w $a4, $a4, $a2
169; LA64-NEXT:    ext.w.b $a1, $a1
170; LA64-NEXT:    sll.w $a1, $a1, $a2
171; LA64-NEXT:    xori $a3, $a3, 56
172; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
173; LA64-NEXT:    ll.w $a5, $a0, 0
174; LA64-NEXT:    and $a7, $a5, $a4
175; LA64-NEXT:    move $a6, $a5
176; LA64-NEXT:    sll.w $a7, $a7, $a3
177; LA64-NEXT:    sra.w $a7, $a7, $a3
178; LA64-NEXT:    bge $a7, $a1, .LBB8_3
179; LA64-NEXT:  # %bb.2: # in Loop: Header=BB8_1 Depth=1
180; LA64-NEXT:    xor $a6, $a5, $a1
181; LA64-NEXT:    and $a6, $a6, $a4
182; LA64-NEXT:    xor $a6, $a5, $a6
183; LA64-NEXT:  .LBB8_3: # in Loop: Header=BB8_1 Depth=1
184; LA64-NEXT:    sc.w $a6, $a0, 0
185; LA64-NEXT:    beqz $a6, .LBB8_1
186; LA64-NEXT:  # %bb.4:
187; LA64-NEXT:    srl.w $a0, $a5, $a2
188; LA64-NEXT:    ret
189  %1 = atomicrmw max ptr %a, i8 %b acquire
190  ret i8 %1
191}
192
193define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
194; LA64-LABEL: atomicrmw_max_i16_acquire:
195; LA64:       # %bb.0:
196; LA64-NEXT:    slli.d $a2, $a0, 3
197; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
198; LA64-NEXT:    andi $a3, $a2, 24
199; LA64-NEXT:    lu12i.w $a4, 15
200; LA64-NEXT:    ori $a4, $a4, 4095
201; LA64-NEXT:    sll.w $a4, $a4, $a2
202; LA64-NEXT:    ext.w.h $a1, $a1
203; LA64-NEXT:    sll.w $a1, $a1, $a2
204; LA64-NEXT:    ori $a5, $zero, 48
205; LA64-NEXT:    sub.d $a3, $a5, $a3
206; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
207; LA64-NEXT:    ll.w $a5, $a0, 0
208; LA64-NEXT:    and $a7, $a5, $a4
209; LA64-NEXT:    move $a6, $a5
210; LA64-NEXT:    sll.w $a7, $a7, $a3
211; LA64-NEXT:    sra.w $a7, $a7, $a3
212; LA64-NEXT:    bge $a7, $a1, .LBB9_3
213; LA64-NEXT:  # %bb.2: # in Loop: Header=BB9_1 Depth=1
214; LA64-NEXT:    xor $a6, $a5, $a1
215; LA64-NEXT:    and $a6, $a6, $a4
216; LA64-NEXT:    xor $a6, $a5, $a6
217; LA64-NEXT:  .LBB9_3: # in Loop: Header=BB9_1 Depth=1
218; LA64-NEXT:    sc.w $a6, $a0, 0
219; LA64-NEXT:    beqz $a6, .LBB9_1
220; LA64-NEXT:  # %bb.4:
221; LA64-NEXT:    srl.w $a0, $a5, $a2
222; LA64-NEXT:    ret
223  %1 = atomicrmw max ptr %a, i16 %b acquire
224  ret i16 %1
225}
226
227define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
228; LA64-LABEL: atomicrmw_max_i32_acquire:
229; LA64:       # %bb.0:
230; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
231; LA64-NEXT:    move $a0, $a2
232; LA64-NEXT:    ret
233  %1 = atomicrmw max ptr %a, i32 %b acquire
234  ret i32 %1
235}
236
237define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
238; LA64-LABEL: atomicrmw_max_i64_acquire:
239; LA64:       # %bb.0:
240; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
241; LA64-NEXT:    move $a0, $a2
242; LA64-NEXT:    ret
243  %1 = atomicrmw max ptr %a, i64 %b acquire
244  ret i64 %1
245}
246
247define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
248; LA64-LABEL: atomicrmw_min_i8_acquire:
249; LA64:       # %bb.0:
250; LA64-NEXT:    slli.d $a2, $a0, 3
251; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
252; LA64-NEXT:    andi $a3, $a2, 24
253; LA64-NEXT:    ori $a4, $zero, 255
254; LA64-NEXT:    sll.w $a4, $a4, $a2
255; LA64-NEXT:    ext.w.b $a1, $a1
256; LA64-NEXT:    sll.w $a1, $a1, $a2
257; LA64-NEXT:    xori $a3, $a3, 56
258; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
259; LA64-NEXT:    ll.w $a5, $a0, 0
260; LA64-NEXT:    and $a7, $a5, $a4
261; LA64-NEXT:    move $a6, $a5
262; LA64-NEXT:    sll.w $a7, $a7, $a3
263; LA64-NEXT:    sra.w $a7, $a7, $a3
264; LA64-NEXT:    bge $a1, $a7, .LBB12_3
265; LA64-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
266; LA64-NEXT:    xor $a6, $a5, $a1
267; LA64-NEXT:    and $a6, $a6, $a4
268; LA64-NEXT:    xor $a6, $a5, $a6
269; LA64-NEXT:  .LBB12_3: # in Loop: Header=BB12_1 Depth=1
270; LA64-NEXT:    sc.w $a6, $a0, 0
271; LA64-NEXT:    beqz $a6, .LBB12_1
272; LA64-NEXT:  # %bb.4:
273; LA64-NEXT:    srl.w $a0, $a5, $a2
274; LA64-NEXT:    ret
275  %1 = atomicrmw min ptr %a, i8 %b acquire
276  ret i8 %1
277}
278
279define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
280; LA64-LABEL: atomicrmw_min_i16_acquire:
281; LA64:       # %bb.0:
282; LA64-NEXT:    slli.d $a2, $a0, 3
283; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
284; LA64-NEXT:    andi $a3, $a2, 24
285; LA64-NEXT:    lu12i.w $a4, 15
286; LA64-NEXT:    ori $a4, $a4, 4095
287; LA64-NEXT:    sll.w $a4, $a4, $a2
288; LA64-NEXT:    ext.w.h $a1, $a1
289; LA64-NEXT:    sll.w $a1, $a1, $a2
290; LA64-NEXT:    ori $a5, $zero, 48
291; LA64-NEXT:    sub.d $a3, $a5, $a3
292; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
293; LA64-NEXT:    ll.w $a5, $a0, 0
294; LA64-NEXT:    and $a7, $a5, $a4
295; LA64-NEXT:    move $a6, $a5
296; LA64-NEXT:    sll.w $a7, $a7, $a3
297; LA64-NEXT:    sra.w $a7, $a7, $a3
298; LA64-NEXT:    bge $a1, $a7, .LBB13_3
299; LA64-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
300; LA64-NEXT:    xor $a6, $a5, $a1
301; LA64-NEXT:    and $a6, $a6, $a4
302; LA64-NEXT:    xor $a6, $a5, $a6
303; LA64-NEXT:  .LBB13_3: # in Loop: Header=BB13_1 Depth=1
304; LA64-NEXT:    sc.w $a6, $a0, 0
305; LA64-NEXT:    beqz $a6, .LBB13_1
306; LA64-NEXT:  # %bb.4:
307; LA64-NEXT:    srl.w $a0, $a5, $a2
308; LA64-NEXT:    ret
309  %1 = atomicrmw min ptr %a, i16 %b acquire
310  ret i16 %1
311}
312
313define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
314; LA64-LABEL: atomicrmw_min_i32_acquire:
315; LA64:       # %bb.0:
316; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
317; LA64-NEXT:    move $a0, $a2
318; LA64-NEXT:    ret
319  %1 = atomicrmw min ptr %a, i32 %b acquire
320  ret i32 %1
321}
322
323define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
324; LA64-LABEL: atomicrmw_min_i64_acquire:
325; LA64:       # %bb.0:
326; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
327; LA64-NEXT:    move $a0, $a2
328; LA64-NEXT:    ret
329  %1 = atomicrmw min ptr %a, i64 %b acquire
330  ret i64 %1
331}
332
333define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
334; LA64-LABEL: atomicrmw_umax_i8_release:
335; LA64:       # %bb.0:
336; LA64-NEXT:    slli.d $a2, $a0, 3
337; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
338; LA64-NEXT:    ori $a3, $zero, 255
339; LA64-NEXT:    sll.w $a3, $a3, $a2
340; LA64-NEXT:    andi $a1, $a1, 255
341; LA64-NEXT:    sll.w $a1, $a1, $a2
342; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
343; LA64-NEXT:    ll.w $a4, $a0, 0
344; LA64-NEXT:    and $a6, $a4, $a3
345; LA64-NEXT:    move $a5, $a4
346; LA64-NEXT:    bgeu $a6, $a1, .LBB16_3
347; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
348; LA64-NEXT:    xor $a5, $a4, $a1
349; LA64-NEXT:    and $a5, $a5, $a3
350; LA64-NEXT:    xor $a5, $a4, $a5
351; LA64-NEXT:  .LBB16_3: # in Loop: Header=BB16_1 Depth=1
352; LA64-NEXT:    sc.w $a5, $a0, 0
353; LA64-NEXT:    beqz $a5, .LBB16_1
354; LA64-NEXT:  # %bb.4:
355; LA64-NEXT:    srl.w $a0, $a4, $a2
356; LA64-NEXT:    ret
357  %1 = atomicrmw umax ptr %a, i8 %b release
358  ret i8 %1
359}
360
361define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
362; LA64-LABEL: atomicrmw_umax_i16_release:
363; LA64:       # %bb.0:
364; LA64-NEXT:    slli.d $a2, $a0, 3
365; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
366; LA64-NEXT:    lu12i.w $a3, 15
367; LA64-NEXT:    ori $a3, $a3, 4095
368; LA64-NEXT:    sll.w $a3, $a3, $a2
369; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
370; LA64-NEXT:    sll.w $a1, $a1, $a2
371; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
372; LA64-NEXT:    ll.w $a4, $a0, 0
373; LA64-NEXT:    and $a6, $a4, $a3
374; LA64-NEXT:    move $a5, $a4
375; LA64-NEXT:    bgeu $a6, $a1, .LBB17_3
376; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
377; LA64-NEXT:    xor $a5, $a4, $a1
378; LA64-NEXT:    and $a5, $a5, $a3
379; LA64-NEXT:    xor $a5, $a4, $a5
380; LA64-NEXT:  .LBB17_3: # in Loop: Header=BB17_1 Depth=1
381; LA64-NEXT:    sc.w $a5, $a0, 0
382; LA64-NEXT:    beqz $a5, .LBB17_1
383; LA64-NEXT:  # %bb.4:
384; LA64-NEXT:    srl.w $a0, $a4, $a2
385; LA64-NEXT:    ret
386  %1 = atomicrmw umax ptr %a, i16 %b release
387  ret i16 %1
388}
389
390define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
391; LA64-LABEL: atomicrmw_umax_i32_release:
392; LA64:       # %bb.0:
393; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
394; LA64-NEXT:    move $a0, $a2
395; LA64-NEXT:    ret
396  %1 = atomicrmw umax ptr %a, i32 %b release
397  ret i32 %1
398}
399
400define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
401; LA64-LABEL: atomicrmw_umax_i64_release:
402; LA64:       # %bb.0:
403; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
404; LA64-NEXT:    move $a0, $a2
405; LA64-NEXT:    ret
406  %1 = atomicrmw umax ptr %a, i64 %b release
407  ret i64 %1
408}
409
410define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
411; LA64-LABEL: atomicrmw_umin_i8_release:
412; LA64:       # %bb.0:
413; LA64-NEXT:    slli.d $a2, $a0, 3
414; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
415; LA64-NEXT:    ori $a3, $zero, 255
416; LA64-NEXT:    sll.w $a3, $a3, $a2
417; LA64-NEXT:    andi $a1, $a1, 255
418; LA64-NEXT:    sll.w $a1, $a1, $a2
419; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
420; LA64-NEXT:    ll.w $a4, $a0, 0
421; LA64-NEXT:    and $a6, $a4, $a3
422; LA64-NEXT:    move $a5, $a4
423; LA64-NEXT:    bgeu $a1, $a6, .LBB20_3
424; LA64-NEXT:  # %bb.2: # in Loop: Header=BB20_1 Depth=1
425; LA64-NEXT:    xor $a5, $a4, $a1
426; LA64-NEXT:    and $a5, $a5, $a3
427; LA64-NEXT:    xor $a5, $a4, $a5
428; LA64-NEXT:  .LBB20_3: # in Loop: Header=BB20_1 Depth=1
429; LA64-NEXT:    sc.w $a5, $a0, 0
430; LA64-NEXT:    beqz $a5, .LBB20_1
431; LA64-NEXT:  # %bb.4:
432; LA64-NEXT:    srl.w $a0, $a4, $a2
433; LA64-NEXT:    ret
434  %1 = atomicrmw umin ptr %a, i8 %b release
435  ret i8 %1
436}
437
438define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
439; LA64-LABEL: atomicrmw_umin_i16_release:
440; LA64:       # %bb.0:
441; LA64-NEXT:    slli.d $a2, $a0, 3
442; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
443; LA64-NEXT:    lu12i.w $a3, 15
444; LA64-NEXT:    ori $a3, $a3, 4095
445; LA64-NEXT:    sll.w $a3, $a3, $a2
446; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
447; LA64-NEXT:    sll.w $a1, $a1, $a2
448; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
449; LA64-NEXT:    ll.w $a4, $a0, 0
450; LA64-NEXT:    and $a6, $a4, $a3
451; LA64-NEXT:    move $a5, $a4
452; LA64-NEXT:    bgeu $a1, $a6, .LBB21_3
453; LA64-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
454; LA64-NEXT:    xor $a5, $a4, $a1
455; LA64-NEXT:    and $a5, $a5, $a3
456; LA64-NEXT:    xor $a5, $a4, $a5
457; LA64-NEXT:  .LBB21_3: # in Loop: Header=BB21_1 Depth=1
458; LA64-NEXT:    sc.w $a5, $a0, 0
459; LA64-NEXT:    beqz $a5, .LBB21_1
460; LA64-NEXT:  # %bb.4:
461; LA64-NEXT:    srl.w $a0, $a4, $a2
462; LA64-NEXT:    ret
463  %1 = atomicrmw umin ptr %a, i16 %b release
464  ret i16 %1
465}
466
467define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
468; LA64-LABEL: atomicrmw_umin_i32_release:
469; LA64:       # %bb.0:
470; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
471; LA64-NEXT:    move $a0, $a2
472; LA64-NEXT:    ret
473  %1 = atomicrmw umin ptr %a, i32 %b release
474  ret i32 %1
475}
476
477define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
478; LA64-LABEL: atomicrmw_umin_i64_release:
479; LA64:       # %bb.0:
480; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
481; LA64-NEXT:    move $a0, $a2
482; LA64-NEXT:    ret
483  %1 = atomicrmw umin ptr %a, i64 %b release
484  ret i64 %1
485}
486
487define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
488; LA64-LABEL: atomicrmw_max_i8_release:
489; LA64:       # %bb.0:
490; LA64-NEXT:    slli.d $a2, $a0, 3
491; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
492; LA64-NEXT:    andi $a3, $a2, 24
493; LA64-NEXT:    ori $a4, $zero, 255
494; LA64-NEXT:    sll.w $a4, $a4, $a2
495; LA64-NEXT:    ext.w.b $a1, $a1
496; LA64-NEXT:    sll.w $a1, $a1, $a2
497; LA64-NEXT:    xori $a3, $a3, 56
498; LA64-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
499; LA64-NEXT:    ll.w $a5, $a0, 0
500; LA64-NEXT:    and $a7, $a5, $a4
501; LA64-NEXT:    move $a6, $a5
502; LA64-NEXT:    sll.w $a7, $a7, $a3
503; LA64-NEXT:    sra.w $a7, $a7, $a3
504; LA64-NEXT:    bge $a7, $a1, .LBB24_3
505; LA64-NEXT:  # %bb.2: # in Loop: Header=BB24_1 Depth=1
506; LA64-NEXT:    xor $a6, $a5, $a1
507; LA64-NEXT:    and $a6, $a6, $a4
508; LA64-NEXT:    xor $a6, $a5, $a6
509; LA64-NEXT:  .LBB24_3: # in Loop: Header=BB24_1 Depth=1
510; LA64-NEXT:    sc.w $a6, $a0, 0
511; LA64-NEXT:    beqz $a6, .LBB24_1
512; LA64-NEXT:  # %bb.4:
513; LA64-NEXT:    srl.w $a0, $a5, $a2
514; LA64-NEXT:    ret
515  %1 = atomicrmw max ptr %a, i8 %b release
516  ret i8 %1
517}
518
519define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
520; LA64-LABEL: atomicrmw_max_i16_release:
521; LA64:       # %bb.0:
522; LA64-NEXT:    slli.d $a2, $a0, 3
523; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
524; LA64-NEXT:    andi $a3, $a2, 24
525; LA64-NEXT:    lu12i.w $a4, 15
526; LA64-NEXT:    ori $a4, $a4, 4095
527; LA64-NEXT:    sll.w $a4, $a4, $a2
528; LA64-NEXT:    ext.w.h $a1, $a1
529; LA64-NEXT:    sll.w $a1, $a1, $a2
530; LA64-NEXT:    ori $a5, $zero, 48
531; LA64-NEXT:    sub.d $a3, $a5, $a3
532; LA64-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
533; LA64-NEXT:    ll.w $a5, $a0, 0
534; LA64-NEXT:    and $a7, $a5, $a4
535; LA64-NEXT:    move $a6, $a5
536; LA64-NEXT:    sll.w $a7, $a7, $a3
537; LA64-NEXT:    sra.w $a7, $a7, $a3
538; LA64-NEXT:    bge $a7, $a1, .LBB25_3
539; LA64-NEXT:  # %bb.2: # in Loop: Header=BB25_1 Depth=1
540; LA64-NEXT:    xor $a6, $a5, $a1
541; LA64-NEXT:    and $a6, $a6, $a4
542; LA64-NEXT:    xor $a6, $a5, $a6
543; LA64-NEXT:  .LBB25_3: # in Loop: Header=BB25_1 Depth=1
544; LA64-NEXT:    sc.w $a6, $a0, 0
545; LA64-NEXT:    beqz $a6, .LBB25_1
546; LA64-NEXT:  # %bb.4:
547; LA64-NEXT:    srl.w $a0, $a5, $a2
548; LA64-NEXT:    ret
549  %1 = atomicrmw max ptr %a, i16 %b release
550  ret i16 %1
551}
552
553define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
554; LA64-LABEL: atomicrmw_max_i32_release:
555; LA64:       # %bb.0:
556; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
557; LA64-NEXT:    move $a0, $a2
558; LA64-NEXT:    ret
559  %1 = atomicrmw max ptr %a, i32 %b release
560  ret i32 %1
561}
562
563define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
564; LA64-LABEL: atomicrmw_max_i64_release:
565; LA64:       # %bb.0:
566; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
567; LA64-NEXT:    move $a0, $a2
568; LA64-NEXT:    ret
569  %1 = atomicrmw max ptr %a, i64 %b release
570  ret i64 %1
571}
572
573define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
574; LA64-LABEL: atomicrmw_min_i8_release:
575; LA64:       # %bb.0:
576; LA64-NEXT:    slli.d $a2, $a0, 3
577; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
578; LA64-NEXT:    andi $a3, $a2, 24
579; LA64-NEXT:    ori $a4, $zero, 255
580; LA64-NEXT:    sll.w $a4, $a4, $a2
581; LA64-NEXT:    ext.w.b $a1, $a1
582; LA64-NEXT:    sll.w $a1, $a1, $a2
583; LA64-NEXT:    xori $a3, $a3, 56
584; LA64-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
585; LA64-NEXT:    ll.w $a5, $a0, 0
586; LA64-NEXT:    and $a7, $a5, $a4
587; LA64-NEXT:    move $a6, $a5
588; LA64-NEXT:    sll.w $a7, $a7, $a3
589; LA64-NEXT:    sra.w $a7, $a7, $a3
590; LA64-NEXT:    bge $a1, $a7, .LBB28_3
591; LA64-NEXT:  # %bb.2: # in Loop: Header=BB28_1 Depth=1
592; LA64-NEXT:    xor $a6, $a5, $a1
593; LA64-NEXT:    and $a6, $a6, $a4
594; LA64-NEXT:    xor $a6, $a5, $a6
595; LA64-NEXT:  .LBB28_3: # in Loop: Header=BB28_1 Depth=1
596; LA64-NEXT:    sc.w $a6, $a0, 0
597; LA64-NEXT:    beqz $a6, .LBB28_1
598; LA64-NEXT:  # %bb.4:
599; LA64-NEXT:    srl.w $a0, $a5, $a2
600; LA64-NEXT:    ret
601  %1 = atomicrmw min ptr %a, i8 %b release
602  ret i8 %1
603}
604
605define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
606; LA64-LABEL: atomicrmw_min_i16_release:
607; LA64:       # %bb.0:
608; LA64-NEXT:    slli.d $a2, $a0, 3
609; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
610; LA64-NEXT:    andi $a3, $a2, 24
611; LA64-NEXT:    lu12i.w $a4, 15
612; LA64-NEXT:    ori $a4, $a4, 4095
613; LA64-NEXT:    sll.w $a4, $a4, $a2
614; LA64-NEXT:    ext.w.h $a1, $a1
615; LA64-NEXT:    sll.w $a1, $a1, $a2
616; LA64-NEXT:    ori $a5, $zero, 48
617; LA64-NEXT:    sub.d $a3, $a5, $a3
618; LA64-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
619; LA64-NEXT:    ll.w $a5, $a0, 0
620; LA64-NEXT:    and $a7, $a5, $a4
621; LA64-NEXT:    move $a6, $a5
622; LA64-NEXT:    sll.w $a7, $a7, $a3
623; LA64-NEXT:    sra.w $a7, $a7, $a3
624; LA64-NEXT:    bge $a1, $a7, .LBB29_3
625; LA64-NEXT:  # %bb.2: # in Loop: Header=BB29_1 Depth=1
626; LA64-NEXT:    xor $a6, $a5, $a1
627; LA64-NEXT:    and $a6, $a6, $a4
628; LA64-NEXT:    xor $a6, $a5, $a6
629; LA64-NEXT:  .LBB29_3: # in Loop: Header=BB29_1 Depth=1
630; LA64-NEXT:    sc.w $a6, $a0, 0
631; LA64-NEXT:    beqz $a6, .LBB29_1
632; LA64-NEXT:  # %bb.4:
633; LA64-NEXT:    srl.w $a0, $a5, $a2
634; LA64-NEXT:    ret
635  %1 = atomicrmw min ptr %a, i16 %b release
636  ret i16 %1
637}
638
639define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
640; LA64-LABEL: atomicrmw_min_i32_release:
641; LA64:       # %bb.0:
642; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
643; LA64-NEXT:    move $a0, $a2
644; LA64-NEXT:    ret
645  %1 = atomicrmw min ptr %a, i32 %b release
646  ret i32 %1
647}
648
649define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
650; LA64-LABEL: atomicrmw_min_i64_release:
651; LA64:       # %bb.0:
652; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
653; LA64-NEXT:    move $a0, $a2
654; LA64-NEXT:    ret
655  %1 = atomicrmw min ptr %a, i64 %b release
656  ret i64 %1
657}
658
659define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
660; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
661; LA64:       # %bb.0:
662; LA64-NEXT:    slli.d $a2, $a0, 3
663; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
664; LA64-NEXT:    ori $a3, $zero, 255
665; LA64-NEXT:    sll.w $a3, $a3, $a2
666; LA64-NEXT:    andi $a1, $a1, 255
667; LA64-NEXT:    sll.w $a1, $a1, $a2
668; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
669; LA64-NEXT:    ll.w $a4, $a0, 0
670; LA64-NEXT:    and $a6, $a4, $a3
671; LA64-NEXT:    move $a5, $a4
672; LA64-NEXT:    bgeu $a6, $a1, .LBB32_3
673; LA64-NEXT:  # %bb.2: # in Loop: Header=BB32_1 Depth=1
674; LA64-NEXT:    xor $a5, $a4, $a1
675; LA64-NEXT:    and $a5, $a5, $a3
676; LA64-NEXT:    xor $a5, $a4, $a5
677; LA64-NEXT:  .LBB32_3: # in Loop: Header=BB32_1 Depth=1
678; LA64-NEXT:    sc.w $a5, $a0, 0
679; LA64-NEXT:    beqz $a5, .LBB32_1
680; LA64-NEXT:  # %bb.4:
681; LA64-NEXT:    srl.w $a0, $a4, $a2
682; LA64-NEXT:    ret
683  %1 = atomicrmw umax ptr %a, i8 %b acq_rel
684  ret i8 %1
685}
686
687define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
688; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
689; LA64:       # %bb.0:
690; LA64-NEXT:    slli.d $a2, $a0, 3
691; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
692; LA64-NEXT:    lu12i.w $a3, 15
693; LA64-NEXT:    ori $a3, $a3, 4095
694; LA64-NEXT:    sll.w $a3, $a3, $a2
695; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
696; LA64-NEXT:    sll.w $a1, $a1, $a2
697; LA64-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
698; LA64-NEXT:    ll.w $a4, $a0, 0
699; LA64-NEXT:    and $a6, $a4, $a3
700; LA64-NEXT:    move $a5, $a4
701; LA64-NEXT:    bgeu $a6, $a1, .LBB33_3
702; LA64-NEXT:  # %bb.2: # in Loop: Header=BB33_1 Depth=1
703; LA64-NEXT:    xor $a5, $a4, $a1
704; LA64-NEXT:    and $a5, $a5, $a3
705; LA64-NEXT:    xor $a5, $a4, $a5
706; LA64-NEXT:  .LBB33_3: # in Loop: Header=BB33_1 Depth=1
707; LA64-NEXT:    sc.w $a5, $a0, 0
708; LA64-NEXT:    beqz $a5, .LBB33_1
709; LA64-NEXT:  # %bb.4:
710; LA64-NEXT:    srl.w $a0, $a4, $a2
711; LA64-NEXT:    ret
712  %1 = atomicrmw umax ptr %a, i16 %b acq_rel
713  ret i16 %1
714}
715
716define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
717; LA64-LABEL: atomicrmw_umax_i32_acq_rel:
718; LA64:       # %bb.0:
719; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
720; LA64-NEXT:    move $a0, $a2
721; LA64-NEXT:    ret
722  %1 = atomicrmw umax ptr %a, i32 %b acq_rel
723  ret i32 %1
724}
725
726define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
727; LA64-LABEL: atomicrmw_umax_i64_acq_rel:
728; LA64:       # %bb.0:
729; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
730; LA64-NEXT:    move $a0, $a2
731; LA64-NEXT:    ret
732  %1 = atomicrmw umax ptr %a, i64 %b acq_rel
733  ret i64 %1
734}
735
736define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
737; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
738; LA64:       # %bb.0:
739; LA64-NEXT:    slli.d $a2, $a0, 3
740; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
741; LA64-NEXT:    ori $a3, $zero, 255
742; LA64-NEXT:    sll.w $a3, $a3, $a2
743; LA64-NEXT:    andi $a1, $a1, 255
744; LA64-NEXT:    sll.w $a1, $a1, $a2
745; LA64-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
746; LA64-NEXT:    ll.w $a4, $a0, 0
747; LA64-NEXT:    and $a6, $a4, $a3
748; LA64-NEXT:    move $a5, $a4
749; LA64-NEXT:    bgeu $a1, $a6, .LBB36_3
750; LA64-NEXT:  # %bb.2: # in Loop: Header=BB36_1 Depth=1
751; LA64-NEXT:    xor $a5, $a4, $a1
752; LA64-NEXT:    and $a5, $a5, $a3
753; LA64-NEXT:    xor $a5, $a4, $a5
754; LA64-NEXT:  .LBB36_3: # in Loop: Header=BB36_1 Depth=1
755; LA64-NEXT:    sc.w $a5, $a0, 0
756; LA64-NEXT:    beqz $a5, .LBB36_1
757; LA64-NEXT:  # %bb.4:
758; LA64-NEXT:    srl.w $a0, $a4, $a2
759; LA64-NEXT:    ret
760  %1 = atomicrmw umin ptr %a, i8 %b acq_rel
761  ret i8 %1
762}
763
764define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
765; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
766; LA64:       # %bb.0:
767; LA64-NEXT:    slli.d $a2, $a0, 3
768; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
769; LA64-NEXT:    lu12i.w $a3, 15
770; LA64-NEXT:    ori $a3, $a3, 4095
771; LA64-NEXT:    sll.w $a3, $a3, $a2
772; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
773; LA64-NEXT:    sll.w $a1, $a1, $a2
774; LA64-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
775; LA64-NEXT:    ll.w $a4, $a0, 0
776; LA64-NEXT:    and $a6, $a4, $a3
777; LA64-NEXT:    move $a5, $a4
778; LA64-NEXT:    bgeu $a1, $a6, .LBB37_3
779; LA64-NEXT:  # %bb.2: # in Loop: Header=BB37_1 Depth=1
780; LA64-NEXT:    xor $a5, $a4, $a1
781; LA64-NEXT:    and $a5, $a5, $a3
782; LA64-NEXT:    xor $a5, $a4, $a5
783; LA64-NEXT:  .LBB37_3: # in Loop: Header=BB37_1 Depth=1
784; LA64-NEXT:    sc.w $a5, $a0, 0
785; LA64-NEXT:    beqz $a5, .LBB37_1
786; LA64-NEXT:  # %bb.4:
787; LA64-NEXT:    srl.w $a0, $a4, $a2
788; LA64-NEXT:    ret
789  %1 = atomicrmw umin ptr %a, i16 %b acq_rel
790  ret i16 %1
791}
792
793define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
794; LA64-LABEL: atomicrmw_umin_i32_acq_rel:
795; LA64:       # %bb.0:
796; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
797; LA64-NEXT:    move $a0, $a2
798; LA64-NEXT:    ret
799  %1 = atomicrmw umin ptr %a, i32 %b acq_rel
800  ret i32 %1
801}
802
803define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
804; LA64-LABEL: atomicrmw_umin_i64_acq_rel:
805; LA64:       # %bb.0:
806; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
807; LA64-NEXT:    move $a0, $a2
808; LA64-NEXT:    ret
809  %1 = atomicrmw umin ptr %a, i64 %b acq_rel
810  ret i64 %1
811}
812
813define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
814; LA64-LABEL: atomicrmw_max_i8_acq_rel:
815; LA64:       # %bb.0:
816; LA64-NEXT:    slli.d $a2, $a0, 3
817; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
818; LA64-NEXT:    andi $a3, $a2, 24
819; LA64-NEXT:    ori $a4, $zero, 255
820; LA64-NEXT:    sll.w $a4, $a4, $a2
821; LA64-NEXT:    ext.w.b $a1, $a1
822; LA64-NEXT:    sll.w $a1, $a1, $a2
823; LA64-NEXT:    xori $a3, $a3, 56
824; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
825; LA64-NEXT:    ll.w $a5, $a0, 0
826; LA64-NEXT:    and $a7, $a5, $a4
827; LA64-NEXT:    move $a6, $a5
828; LA64-NEXT:    sll.w $a7, $a7, $a3
829; LA64-NEXT:    sra.w $a7, $a7, $a3
830; LA64-NEXT:    bge $a7, $a1, .LBB40_3
831; LA64-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
832; LA64-NEXT:    xor $a6, $a5, $a1
833; LA64-NEXT:    and $a6, $a6, $a4
834; LA64-NEXT:    xor $a6, $a5, $a6
835; LA64-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
836; LA64-NEXT:    sc.w $a6, $a0, 0
837; LA64-NEXT:    beqz $a6, .LBB40_1
838; LA64-NEXT:  # %bb.4:
839; LA64-NEXT:    srl.w $a0, $a5, $a2
840; LA64-NEXT:    ret
841  %1 = atomicrmw max ptr %a, i8 %b acq_rel
842  ret i8 %1
843}
844
845define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
846; LA64-LABEL: atomicrmw_max_i16_acq_rel:
847; LA64:       # %bb.0:
848; LA64-NEXT:    slli.d $a2, $a0, 3
849; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
850; LA64-NEXT:    andi $a3, $a2, 24
851; LA64-NEXT:    lu12i.w $a4, 15
852; LA64-NEXT:    ori $a4, $a4, 4095
853; LA64-NEXT:    sll.w $a4, $a4, $a2
854; LA64-NEXT:    ext.w.h $a1, $a1
855; LA64-NEXT:    sll.w $a1, $a1, $a2
856; LA64-NEXT:    ori $a5, $zero, 48
857; LA64-NEXT:    sub.d $a3, $a5, $a3
858; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
859; LA64-NEXT:    ll.w $a5, $a0, 0
860; LA64-NEXT:    and $a7, $a5, $a4
861; LA64-NEXT:    move $a6, $a5
862; LA64-NEXT:    sll.w $a7, $a7, $a3
863; LA64-NEXT:    sra.w $a7, $a7, $a3
864; LA64-NEXT:    bge $a7, $a1, .LBB41_3
865; LA64-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
866; LA64-NEXT:    xor $a6, $a5, $a1
867; LA64-NEXT:    and $a6, $a6, $a4
868; LA64-NEXT:    xor $a6, $a5, $a6
869; LA64-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
870; LA64-NEXT:    sc.w $a6, $a0, 0
871; LA64-NEXT:    beqz $a6, .LBB41_1
872; LA64-NEXT:  # %bb.4:
873; LA64-NEXT:    srl.w $a0, $a5, $a2
874; LA64-NEXT:    ret
875  %1 = atomicrmw max ptr %a, i16 %b acq_rel
876  ret i16 %1
877}
878
879define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
880; LA64-LABEL: atomicrmw_max_i32_acq_rel:
881; LA64:       # %bb.0:
882; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
883; LA64-NEXT:    move $a0, $a2
884; LA64-NEXT:    ret
885  %1 = atomicrmw max ptr %a, i32 %b acq_rel
886  ret i32 %1
887}
888
889define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
890; LA64-LABEL: atomicrmw_max_i64_acq_rel:
891; LA64:       # %bb.0:
892; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
893; LA64-NEXT:    move $a0, $a2
894; LA64-NEXT:    ret
895  %1 = atomicrmw max ptr %a, i64 %b acq_rel
896  ret i64 %1
897}
898
899define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
900; LA64-LABEL: atomicrmw_min_i8_acq_rel:
901; LA64:       # %bb.0:
902; LA64-NEXT:    slli.d $a2, $a0, 3
903; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
904; LA64-NEXT:    andi $a3, $a2, 24
905; LA64-NEXT:    ori $a4, $zero, 255
906; LA64-NEXT:    sll.w $a4, $a4, $a2
907; LA64-NEXT:    ext.w.b $a1, $a1
908; LA64-NEXT:    sll.w $a1, $a1, $a2
909; LA64-NEXT:    xori $a3, $a3, 56
910; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
911; LA64-NEXT:    ll.w $a5, $a0, 0
912; LA64-NEXT:    and $a7, $a5, $a4
913; LA64-NEXT:    move $a6, $a5
914; LA64-NEXT:    sll.w $a7, $a7, $a3
915; LA64-NEXT:    sra.w $a7, $a7, $a3
916; LA64-NEXT:    bge $a1, $a7, .LBB44_3
917; LA64-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
918; LA64-NEXT:    xor $a6, $a5, $a1
919; LA64-NEXT:    and $a6, $a6, $a4
920; LA64-NEXT:    xor $a6, $a5, $a6
921; LA64-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
922; LA64-NEXT:    sc.w $a6, $a0, 0
923; LA64-NEXT:    beqz $a6, .LBB44_1
924; LA64-NEXT:  # %bb.4:
925; LA64-NEXT:    srl.w $a0, $a5, $a2
926; LA64-NEXT:    ret
927  %1 = atomicrmw min ptr %a, i8 %b acq_rel
928  ret i8 %1
929}
930
931define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
932; LA64-LABEL: atomicrmw_min_i16_acq_rel:
933; LA64:       # %bb.0:
934; LA64-NEXT:    slli.d $a2, $a0, 3
935; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
936; LA64-NEXT:    andi $a3, $a2, 24
937; LA64-NEXT:    lu12i.w $a4, 15
938; LA64-NEXT:    ori $a4, $a4, 4095
939; LA64-NEXT:    sll.w $a4, $a4, $a2
940; LA64-NEXT:    ext.w.h $a1, $a1
941; LA64-NEXT:    sll.w $a1, $a1, $a2
942; LA64-NEXT:    ori $a5, $zero, 48
943; LA64-NEXT:    sub.d $a3, $a5, $a3
944; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
945; LA64-NEXT:    ll.w $a5, $a0, 0
946; LA64-NEXT:    and $a7, $a5, $a4
947; LA64-NEXT:    move $a6, $a5
948; LA64-NEXT:    sll.w $a7, $a7, $a3
949; LA64-NEXT:    sra.w $a7, $a7, $a3
950; LA64-NEXT:    bge $a1, $a7, .LBB45_3
951; LA64-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
952; LA64-NEXT:    xor $a6, $a5, $a1
953; LA64-NEXT:    and $a6, $a6, $a4
954; LA64-NEXT:    xor $a6, $a5, $a6
955; LA64-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
956; LA64-NEXT:    sc.w $a6, $a0, 0
957; LA64-NEXT:    beqz $a6, .LBB45_1
958; LA64-NEXT:  # %bb.4:
959; LA64-NEXT:    srl.w $a0, $a5, $a2
960; LA64-NEXT:    ret
961  %1 = atomicrmw min ptr %a, i16 %b acq_rel
962  ret i16 %1
963}
964
965define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
966; LA64-LABEL: atomicrmw_min_i32_acq_rel:
967; LA64:       # %bb.0:
968; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
969; LA64-NEXT:    move $a0, $a2
970; LA64-NEXT:    ret
971  %1 = atomicrmw min ptr %a, i32 %b acq_rel
972  ret i32 %1
973}
974
975define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
976; LA64-LABEL: atomicrmw_min_i64_acq_rel:
977; LA64:       # %bb.0:
978; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
979; LA64-NEXT:    move $a0, $a2
980; LA64-NEXT:    ret
981  %1 = atomicrmw min ptr %a, i64 %b acq_rel
982  ret i64 %1
983}
984
985define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
986; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
987; LA64:       # %bb.0:
988; LA64-NEXT:    slli.d $a2, $a0, 3
989; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
990; LA64-NEXT:    ori $a3, $zero, 255
991; LA64-NEXT:    sll.w $a3, $a3, $a2
992; LA64-NEXT:    andi $a1, $a1, 255
993; LA64-NEXT:    sll.w $a1, $a1, $a2
994; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
995; LA64-NEXT:    ll.w $a4, $a0, 0
996; LA64-NEXT:    and $a6, $a4, $a3
997; LA64-NEXT:    move $a5, $a4
998; LA64-NEXT:    bgeu $a6, $a1, .LBB48_3
999; LA64-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
1000; LA64-NEXT:    xor $a5, $a4, $a1
1001; LA64-NEXT:    and $a5, $a5, $a3
1002; LA64-NEXT:    xor $a5, $a4, $a5
1003; LA64-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
1004; LA64-NEXT:    sc.w $a5, $a0, 0
1005; LA64-NEXT:    beqz $a5, .LBB48_1
1006; LA64-NEXT:  # %bb.4:
1007; LA64-NEXT:    srl.w $a0, $a4, $a2
1008; LA64-NEXT:    ret
1009  %1 = atomicrmw umax ptr %a, i8 %b seq_cst
1010  ret i8 %1
1011}
1012
1013define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
1014; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
1015; LA64:       # %bb.0:
1016; LA64-NEXT:    slli.d $a2, $a0, 3
1017; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1018; LA64-NEXT:    lu12i.w $a3, 15
1019; LA64-NEXT:    ori $a3, $a3, 4095
1020; LA64-NEXT:    sll.w $a3, $a3, $a2
1021; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1022; LA64-NEXT:    sll.w $a1, $a1, $a2
1023; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
1024; LA64-NEXT:    ll.w $a4, $a0, 0
1025; LA64-NEXT:    and $a6, $a4, $a3
1026; LA64-NEXT:    move $a5, $a4
1027; LA64-NEXT:    bgeu $a6, $a1, .LBB49_3
1028; LA64-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
1029; LA64-NEXT:    xor $a5, $a4, $a1
1030; LA64-NEXT:    and $a5, $a5, $a3
1031; LA64-NEXT:    xor $a5, $a4, $a5
1032; LA64-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
1033; LA64-NEXT:    sc.w $a5, $a0, 0
1034; LA64-NEXT:    beqz $a5, .LBB49_1
1035; LA64-NEXT:  # %bb.4:
1036; LA64-NEXT:    srl.w $a0, $a4, $a2
1037; LA64-NEXT:    ret
1038  %1 = atomicrmw umax ptr %a, i16 %b seq_cst
1039  ret i16 %1
1040}
1041
1042define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
1043; LA64-LABEL: atomicrmw_umax_i32_seq_cst:
1044; LA64:       # %bb.0:
1045; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
1046; LA64-NEXT:    move $a0, $a2
1047; LA64-NEXT:    ret
1048  %1 = atomicrmw umax ptr %a, i32 %b seq_cst
1049  ret i32 %1
1050}
1051
1052define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
1053; LA64-LABEL: atomicrmw_umax_i64_seq_cst:
1054; LA64:       # %bb.0:
1055; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
1056; LA64-NEXT:    move $a0, $a2
1057; LA64-NEXT:    ret
1058  %1 = atomicrmw umax ptr %a, i64 %b seq_cst
1059  ret i64 %1
1060}
1061
1062define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
1063; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
1064; LA64:       # %bb.0:
1065; LA64-NEXT:    slli.d $a2, $a0, 3
1066; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1067; LA64-NEXT:    ori $a3, $zero, 255
1068; LA64-NEXT:    sll.w $a3, $a3, $a2
1069; LA64-NEXT:    andi $a1, $a1, 255
1070; LA64-NEXT:    sll.w $a1, $a1, $a2
1071; LA64-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
1072; LA64-NEXT:    ll.w $a4, $a0, 0
1073; LA64-NEXT:    and $a6, $a4, $a3
1074; LA64-NEXT:    move $a5, $a4
1075; LA64-NEXT:    bgeu $a1, $a6, .LBB52_3
1076; LA64-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
1077; LA64-NEXT:    xor $a5, $a4, $a1
1078; LA64-NEXT:    and $a5, $a5, $a3
1079; LA64-NEXT:    xor $a5, $a4, $a5
1080; LA64-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
1081; LA64-NEXT:    sc.w $a5, $a0, 0
1082; LA64-NEXT:    beqz $a5, .LBB52_1
1083; LA64-NEXT:  # %bb.4:
1084; LA64-NEXT:    srl.w $a0, $a4, $a2
1085; LA64-NEXT:    ret
1086  %1 = atomicrmw umin ptr %a, i8 %b seq_cst
1087  ret i8 %1
1088}
1089
1090define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
1091; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
1092; LA64:       # %bb.0:
1093; LA64-NEXT:    slli.d $a2, $a0, 3
1094; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1095; LA64-NEXT:    lu12i.w $a3, 15
1096; LA64-NEXT:    ori $a3, $a3, 4095
1097; LA64-NEXT:    sll.w $a3, $a3, $a2
1098; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1099; LA64-NEXT:    sll.w $a1, $a1, $a2
1100; LA64-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
1101; LA64-NEXT:    ll.w $a4, $a0, 0
1102; LA64-NEXT:    and $a6, $a4, $a3
1103; LA64-NEXT:    move $a5, $a4
1104; LA64-NEXT:    bgeu $a1, $a6, .LBB53_3
1105; LA64-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
1106; LA64-NEXT:    xor $a5, $a4, $a1
1107; LA64-NEXT:    and $a5, $a5, $a3
1108; LA64-NEXT:    xor $a5, $a4, $a5
1109; LA64-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
1110; LA64-NEXT:    sc.w $a5, $a0, 0
1111; LA64-NEXT:    beqz $a5, .LBB53_1
1112; LA64-NEXT:  # %bb.4:
1113; LA64-NEXT:    srl.w $a0, $a4, $a2
1114; LA64-NEXT:    ret
1115  %1 = atomicrmw umin ptr %a, i16 %b seq_cst
1116  ret i16 %1
1117}
1118
1119define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
1120; LA64-LABEL: atomicrmw_umin_i32_seq_cst:
1121; LA64:       # %bb.0:
1122; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
1123; LA64-NEXT:    move $a0, $a2
1124; LA64-NEXT:    ret
1125  %1 = atomicrmw umin ptr %a, i32 %b seq_cst
1126  ret i32 %1
1127}
1128
1129define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
1130; LA64-LABEL: atomicrmw_umin_i64_seq_cst:
1131; LA64:       # %bb.0:
1132; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
1133; LA64-NEXT:    move $a0, $a2
1134; LA64-NEXT:    ret
1135  %1 = atomicrmw umin ptr %a, i64 %b seq_cst
1136  ret i64 %1
1137}
1138
1139define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
1140; LA64-LABEL: atomicrmw_max_i8_seq_cst:
1141; LA64:       # %bb.0:
1142; LA64-NEXT:    slli.d $a2, $a0, 3
1143; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1144; LA64-NEXT:    andi $a3, $a2, 24
1145; LA64-NEXT:    ori $a4, $zero, 255
1146; LA64-NEXT:    sll.w $a4, $a4, $a2
1147; LA64-NEXT:    ext.w.b $a1, $a1
1148; LA64-NEXT:    sll.w $a1, $a1, $a2
1149; LA64-NEXT:    xori $a3, $a3, 56
1150; LA64-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
1151; LA64-NEXT:    ll.w $a5, $a0, 0
1152; LA64-NEXT:    and $a7, $a5, $a4
1153; LA64-NEXT:    move $a6, $a5
1154; LA64-NEXT:    sll.w $a7, $a7, $a3
1155; LA64-NEXT:    sra.w $a7, $a7, $a3
1156; LA64-NEXT:    bge $a7, $a1, .LBB56_3
1157; LA64-NEXT:  # %bb.2: # in Loop: Header=BB56_1 Depth=1
1158; LA64-NEXT:    xor $a6, $a5, $a1
1159; LA64-NEXT:    and $a6, $a6, $a4
1160; LA64-NEXT:    xor $a6, $a5, $a6
1161; LA64-NEXT:  .LBB56_3: # in Loop: Header=BB56_1 Depth=1
1162; LA64-NEXT:    sc.w $a6, $a0, 0
1163; LA64-NEXT:    beqz $a6, .LBB56_1
1164; LA64-NEXT:  # %bb.4:
1165; LA64-NEXT:    srl.w $a0, $a5, $a2
1166; LA64-NEXT:    ret
1167  %1 = atomicrmw max ptr %a, i8 %b seq_cst
1168  ret i8 %1
1169}
1170
1171define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
1172; LA64-LABEL: atomicrmw_max_i16_seq_cst:
1173; LA64:       # %bb.0:
1174; LA64-NEXT:    slli.d $a2, $a0, 3
1175; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1176; LA64-NEXT:    andi $a3, $a2, 24
1177; LA64-NEXT:    lu12i.w $a4, 15
1178; LA64-NEXT:    ori $a4, $a4, 4095
1179; LA64-NEXT:    sll.w $a4, $a4, $a2
1180; LA64-NEXT:    ext.w.h $a1, $a1
1181; LA64-NEXT:    sll.w $a1, $a1, $a2
1182; LA64-NEXT:    ori $a5, $zero, 48
1183; LA64-NEXT:    sub.d $a3, $a5, $a3
1184; LA64-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
1185; LA64-NEXT:    ll.w $a5, $a0, 0
1186; LA64-NEXT:    and $a7, $a5, $a4
1187; LA64-NEXT:    move $a6, $a5
1188; LA64-NEXT:    sll.w $a7, $a7, $a3
1189; LA64-NEXT:    sra.w $a7, $a7, $a3
1190; LA64-NEXT:    bge $a7, $a1, .LBB57_3
1191; LA64-NEXT:  # %bb.2: # in Loop: Header=BB57_1 Depth=1
1192; LA64-NEXT:    xor $a6, $a5, $a1
1193; LA64-NEXT:    and $a6, $a6, $a4
1194; LA64-NEXT:    xor $a6, $a5, $a6
1195; LA64-NEXT:  .LBB57_3: # in Loop: Header=BB57_1 Depth=1
1196; LA64-NEXT:    sc.w $a6, $a0, 0
1197; LA64-NEXT:    beqz $a6, .LBB57_1
1198; LA64-NEXT:  # %bb.4:
1199; LA64-NEXT:    srl.w $a0, $a5, $a2
1200; LA64-NEXT:    ret
1201  %1 = atomicrmw max ptr %a, i16 %b seq_cst
1202  ret i16 %1
1203}
1204
1205define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
1206; LA64-LABEL: atomicrmw_max_i32_seq_cst:
1207; LA64:       # %bb.0:
1208; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
1209; LA64-NEXT:    move $a0, $a2
1210; LA64-NEXT:    ret
1211  %1 = atomicrmw max ptr %a, i32 %b seq_cst
1212  ret i32 %1
1213}
1214
1215define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
1216; LA64-LABEL: atomicrmw_max_i64_seq_cst:
1217; LA64:       # %bb.0:
1218; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
1219; LA64-NEXT:    move $a0, $a2
1220; LA64-NEXT:    ret
1221  %1 = atomicrmw max ptr %a, i64 %b seq_cst
1222  ret i64 %1
1223}
1224
1225define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
1226; LA64-LABEL: atomicrmw_min_i8_seq_cst:
1227; LA64:       # %bb.0:
1228; LA64-NEXT:    slli.d $a2, $a0, 3
1229; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1230; LA64-NEXT:    andi $a3, $a2, 24
1231; LA64-NEXT:    ori $a4, $zero, 255
1232; LA64-NEXT:    sll.w $a4, $a4, $a2
1233; LA64-NEXT:    ext.w.b $a1, $a1
1234; LA64-NEXT:    sll.w $a1, $a1, $a2
1235; LA64-NEXT:    xori $a3, $a3, 56
1236; LA64-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
1237; LA64-NEXT:    ll.w $a5, $a0, 0
1238; LA64-NEXT:    and $a7, $a5, $a4
1239; LA64-NEXT:    move $a6, $a5
1240; LA64-NEXT:    sll.w $a7, $a7, $a3
1241; LA64-NEXT:    sra.w $a7, $a7, $a3
1242; LA64-NEXT:    bge $a1, $a7, .LBB60_3
1243; LA64-NEXT:  # %bb.2: # in Loop: Header=BB60_1 Depth=1
1244; LA64-NEXT:    xor $a6, $a5, $a1
1245; LA64-NEXT:    and $a6, $a6, $a4
1246; LA64-NEXT:    xor $a6, $a5, $a6
1247; LA64-NEXT:  .LBB60_3: # in Loop: Header=BB60_1 Depth=1
1248; LA64-NEXT:    sc.w $a6, $a0, 0
1249; LA64-NEXT:    beqz $a6, .LBB60_1
1250; LA64-NEXT:  # %bb.4:
1251; LA64-NEXT:    srl.w $a0, $a5, $a2
1252; LA64-NEXT:    ret
1253  %1 = atomicrmw min ptr %a, i8 %b seq_cst
1254  ret i8 %1
1255}
1256
1257define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
1258; LA64-LABEL: atomicrmw_min_i16_seq_cst:
1259; LA64:       # %bb.0:
1260; LA64-NEXT:    slli.d $a2, $a0, 3
1261; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1262; LA64-NEXT:    andi $a3, $a2, 24
1263; LA64-NEXT:    lu12i.w $a4, 15
1264; LA64-NEXT:    ori $a4, $a4, 4095
1265; LA64-NEXT:    sll.w $a4, $a4, $a2
1266; LA64-NEXT:    ext.w.h $a1, $a1
1267; LA64-NEXT:    sll.w $a1, $a1, $a2
1268; LA64-NEXT:    ori $a5, $zero, 48
1269; LA64-NEXT:    sub.d $a3, $a5, $a3
1270; LA64-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
1271; LA64-NEXT:    ll.w $a5, $a0, 0
1272; LA64-NEXT:    and $a7, $a5, $a4
1273; LA64-NEXT:    move $a6, $a5
1274; LA64-NEXT:    sll.w $a7, $a7, $a3
1275; LA64-NEXT:    sra.w $a7, $a7, $a3
1276; LA64-NEXT:    bge $a1, $a7, .LBB61_3
1277; LA64-NEXT:  # %bb.2: # in Loop: Header=BB61_1 Depth=1
1278; LA64-NEXT:    xor $a6, $a5, $a1
1279; LA64-NEXT:    and $a6, $a6, $a4
1280; LA64-NEXT:    xor $a6, $a5, $a6
1281; LA64-NEXT:  .LBB61_3: # in Loop: Header=BB61_1 Depth=1
1282; LA64-NEXT:    sc.w $a6, $a0, 0
1283; LA64-NEXT:    beqz $a6, .LBB61_1
1284; LA64-NEXT:  # %bb.4:
1285; LA64-NEXT:    srl.w $a0, $a5, $a2
1286; LA64-NEXT:    ret
1287  %1 = atomicrmw min ptr %a, i16 %b seq_cst
1288  ret i16 %1
1289}
1290
1291define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
1292; LA64-LABEL: atomicrmw_min_i32_seq_cst:
1293; LA64:       # %bb.0:
1294; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
1295; LA64-NEXT:    move $a0, $a2
1296; LA64-NEXT:    ret
1297  %1 = atomicrmw min ptr %a, i32 %b seq_cst
1298  ret i32 %1
1299}
1300
1301define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
1302; LA64-LABEL: atomicrmw_min_i64_seq_cst:
1303; LA64:       # %bb.0:
1304; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
1305; LA64-NEXT:    move $a0, $a2
1306; LA64-NEXT:    ret
1307  %1 = atomicrmw min ptr %a, i64 %b seq_cst
1308  ret i64 %1
1309}
1310
1311define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
1312; LA64-LABEL: atomicrmw_umax_i8_monotonic:
1313; LA64:       # %bb.0:
1314; LA64-NEXT:    slli.d $a2, $a0, 3
1315; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1316; LA64-NEXT:    ori $a3, $zero, 255
1317; LA64-NEXT:    sll.w $a3, $a3, $a2
1318; LA64-NEXT:    andi $a1, $a1, 255
1319; LA64-NEXT:    sll.w $a1, $a1, $a2
1320; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
1321; LA64-NEXT:    ll.w $a4, $a0, 0
1322; LA64-NEXT:    and $a6, $a4, $a3
1323; LA64-NEXT:    move $a5, $a4
1324; LA64-NEXT:    bgeu $a6, $a1, .LBB64_3
1325; LA64-NEXT:  # %bb.2: # in Loop: Header=BB64_1 Depth=1
1326; LA64-NEXT:    xor $a5, $a4, $a1
1327; LA64-NEXT:    and $a5, $a5, $a3
1328; LA64-NEXT:    xor $a5, $a4, $a5
1329; LA64-NEXT:  .LBB64_3: # in Loop: Header=BB64_1 Depth=1
1330; LA64-NEXT:    sc.w $a5, $a0, 0
1331; LA64-NEXT:    beqz $a5, .LBB64_1
1332; LA64-NEXT:  # %bb.4:
1333; LA64-NEXT:    srl.w $a0, $a4, $a2
1334; LA64-NEXT:    ret
1335  %1 = atomicrmw umax ptr %a, i8 %b monotonic
1336  ret i8 %1
1337}
1338
1339define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
1340; LA64-LABEL: atomicrmw_umax_i16_monotonic:
1341; LA64:       # %bb.0:
1342; LA64-NEXT:    slli.d $a2, $a0, 3
1343; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1344; LA64-NEXT:    lu12i.w $a3, 15
1345; LA64-NEXT:    ori $a3, $a3, 4095
1346; LA64-NEXT:    sll.w $a3, $a3, $a2
1347; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1348; LA64-NEXT:    sll.w $a1, $a1, $a2
1349; LA64-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
1350; LA64-NEXT:    ll.w $a4, $a0, 0
1351; LA64-NEXT:    and $a6, $a4, $a3
1352; LA64-NEXT:    move $a5, $a4
1353; LA64-NEXT:    bgeu $a6, $a1, .LBB65_3
1354; LA64-NEXT:  # %bb.2: # in Loop: Header=BB65_1 Depth=1
1355; LA64-NEXT:    xor $a5, $a4, $a1
1356; LA64-NEXT:    and $a5, $a5, $a3
1357; LA64-NEXT:    xor $a5, $a4, $a5
1358; LA64-NEXT:  .LBB65_3: # in Loop: Header=BB65_1 Depth=1
1359; LA64-NEXT:    sc.w $a5, $a0, 0
1360; LA64-NEXT:    beqz $a5, .LBB65_1
1361; LA64-NEXT:  # %bb.4:
1362; LA64-NEXT:    srl.w $a0, $a4, $a2
1363; LA64-NEXT:    ret
1364  %1 = atomicrmw umax ptr %a, i16 %b monotonic
1365  ret i16 %1
1366}
1367
1368define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
1369; LA64-LABEL: atomicrmw_umax_i32_monotonic:
1370; LA64:       # %bb.0:
1371; LA64-NEXT:    ammax.wu $a2, $a1, $a0
1372; LA64-NEXT:    move $a0, $a2
1373; LA64-NEXT:    ret
1374  %1 = atomicrmw umax ptr %a, i32 %b monotonic
1375  ret i32 %1
1376}
1377
1378define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
1379; LA64-LABEL: atomicrmw_umax_i64_monotonic:
1380; LA64:       # %bb.0:
1381; LA64-NEXT:    ammax.du $a2, $a1, $a0
1382; LA64-NEXT:    move $a0, $a2
1383; LA64-NEXT:    ret
1384  %1 = atomicrmw umax ptr %a, i64 %b monotonic
1385  ret i64 %1
1386}
1387
1388define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
1389; LA64-LABEL: atomicrmw_umin_i8_monotonic:
1390; LA64:       # %bb.0:
1391; LA64-NEXT:    slli.d $a2, $a0, 3
1392; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1393; LA64-NEXT:    ori $a3, $zero, 255
1394; LA64-NEXT:    sll.w $a3, $a3, $a2
1395; LA64-NEXT:    andi $a1, $a1, 255
1396; LA64-NEXT:    sll.w $a1, $a1, $a2
1397; LA64-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
1398; LA64-NEXT:    ll.w $a4, $a0, 0
1399; LA64-NEXT:    and $a6, $a4, $a3
1400; LA64-NEXT:    move $a5, $a4
1401; LA64-NEXT:    bgeu $a1, $a6, .LBB68_3
1402; LA64-NEXT:  # %bb.2: # in Loop: Header=BB68_1 Depth=1
1403; LA64-NEXT:    xor $a5, $a4, $a1
1404; LA64-NEXT:    and $a5, $a5, $a3
1405; LA64-NEXT:    xor $a5, $a4, $a5
1406; LA64-NEXT:  .LBB68_3: # in Loop: Header=BB68_1 Depth=1
1407; LA64-NEXT:    sc.w $a5, $a0, 0
1408; LA64-NEXT:    beqz $a5, .LBB68_1
1409; LA64-NEXT:  # %bb.4:
1410; LA64-NEXT:    srl.w $a0, $a4, $a2
1411; LA64-NEXT:    ret
1412  %1 = atomicrmw umin ptr %a, i8 %b monotonic
1413  ret i8 %1
1414}
1415
1416define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
1417; LA64-LABEL: atomicrmw_umin_i16_monotonic:
1418; LA64:       # %bb.0:
1419; LA64-NEXT:    slli.d $a2, $a0, 3
1420; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1421; LA64-NEXT:    lu12i.w $a3, 15
1422; LA64-NEXT:    ori $a3, $a3, 4095
1423; LA64-NEXT:    sll.w $a3, $a3, $a2
1424; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1425; LA64-NEXT:    sll.w $a1, $a1, $a2
1426; LA64-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
1427; LA64-NEXT:    ll.w $a4, $a0, 0
1428; LA64-NEXT:    and $a6, $a4, $a3
1429; LA64-NEXT:    move $a5, $a4
1430; LA64-NEXT:    bgeu $a1, $a6, .LBB69_3
1431; LA64-NEXT:  # %bb.2: # in Loop: Header=BB69_1 Depth=1
1432; LA64-NEXT:    xor $a5, $a4, $a1
1433; LA64-NEXT:    and $a5, $a5, $a3
1434; LA64-NEXT:    xor $a5, $a4, $a5
1435; LA64-NEXT:  .LBB69_3: # in Loop: Header=BB69_1 Depth=1
1436; LA64-NEXT:    sc.w $a5, $a0, 0
1437; LA64-NEXT:    beqz $a5, .LBB69_1
1438; LA64-NEXT:  # %bb.4:
1439; LA64-NEXT:    srl.w $a0, $a4, $a2
1440; LA64-NEXT:    ret
1441  %1 = atomicrmw umin ptr %a, i16 %b monotonic
1442  ret i16 %1
1443}
1444
1445define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
1446; LA64-LABEL: atomicrmw_umin_i32_monotonic:
1447; LA64:       # %bb.0:
1448; LA64-NEXT:    ammin.wu $a2, $a1, $a0
1449; LA64-NEXT:    move $a0, $a2
1450; LA64-NEXT:    ret
1451  %1 = atomicrmw umin ptr %a, i32 %b monotonic
1452  ret i32 %1
1453}
1454
1455define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
1456; LA64-LABEL: atomicrmw_umin_i64_monotonic:
1457; LA64:       # %bb.0:
1458; LA64-NEXT:    ammin.du $a2, $a1, $a0
1459; LA64-NEXT:    move $a0, $a2
1460; LA64-NEXT:    ret
1461  %1 = atomicrmw umin ptr %a, i64 %b monotonic
1462  ret i64 %1
1463}
1464
1465define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
1466; LA64-LABEL: atomicrmw_max_i8_monotonic:
1467; LA64:       # %bb.0:
1468; LA64-NEXT:    slli.d $a2, $a0, 3
1469; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1470; LA64-NEXT:    andi $a3, $a2, 24
1471; LA64-NEXT:    ori $a4, $zero, 255
1472; LA64-NEXT:    sll.w $a4, $a4, $a2
1473; LA64-NEXT:    ext.w.b $a1, $a1
1474; LA64-NEXT:    sll.w $a1, $a1, $a2
1475; LA64-NEXT:    xori $a3, $a3, 56
1476; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
1477; LA64-NEXT:    ll.w $a5, $a0, 0
1478; LA64-NEXT:    and $a7, $a5, $a4
1479; LA64-NEXT:    move $a6, $a5
1480; LA64-NEXT:    sll.w $a7, $a7, $a3
1481; LA64-NEXT:    sra.w $a7, $a7, $a3
1482; LA64-NEXT:    bge $a7, $a1, .LBB72_3
1483; LA64-NEXT:  # %bb.2: # in Loop: Header=BB72_1 Depth=1
1484; LA64-NEXT:    xor $a6, $a5, $a1
1485; LA64-NEXT:    and $a6, $a6, $a4
1486; LA64-NEXT:    xor $a6, $a5, $a6
1487; LA64-NEXT:  .LBB72_3: # in Loop: Header=BB72_1 Depth=1
1488; LA64-NEXT:    sc.w $a6, $a0, 0
1489; LA64-NEXT:    beqz $a6, .LBB72_1
1490; LA64-NEXT:  # %bb.4:
1491; LA64-NEXT:    srl.w $a0, $a5, $a2
1492; LA64-NEXT:    ret
1493  %1 = atomicrmw max ptr %a, i8 %b monotonic
1494  ret i8 %1
1495}
1496
1497define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
1498; LA64-LABEL: atomicrmw_max_i16_monotonic:
1499; LA64:       # %bb.0:
1500; LA64-NEXT:    slli.d $a2, $a0, 3
1501; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1502; LA64-NEXT:    andi $a3, $a2, 24
1503; LA64-NEXT:    lu12i.w $a4, 15
1504; LA64-NEXT:    ori $a4, $a4, 4095
1505; LA64-NEXT:    sll.w $a4, $a4, $a2
1506; LA64-NEXT:    ext.w.h $a1, $a1
1507; LA64-NEXT:    sll.w $a1, $a1, $a2
1508; LA64-NEXT:    ori $a5, $zero, 48
1509; LA64-NEXT:    sub.d $a3, $a5, $a3
1510; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
1511; LA64-NEXT:    ll.w $a5, $a0, 0
1512; LA64-NEXT:    and $a7, $a5, $a4
1513; LA64-NEXT:    move $a6, $a5
1514; LA64-NEXT:    sll.w $a7, $a7, $a3
1515; LA64-NEXT:    sra.w $a7, $a7, $a3
1516; LA64-NEXT:    bge $a7, $a1, .LBB73_3
1517; LA64-NEXT:  # %bb.2: # in Loop: Header=BB73_1 Depth=1
1518; LA64-NEXT:    xor $a6, $a5, $a1
1519; LA64-NEXT:    and $a6, $a6, $a4
1520; LA64-NEXT:    xor $a6, $a5, $a6
1521; LA64-NEXT:  .LBB73_3: # in Loop: Header=BB73_1 Depth=1
1522; LA64-NEXT:    sc.w $a6, $a0, 0
1523; LA64-NEXT:    beqz $a6, .LBB73_1
1524; LA64-NEXT:  # %bb.4:
1525; LA64-NEXT:    srl.w $a0, $a5, $a2
1526; LA64-NEXT:    ret
1527  %1 = atomicrmw max ptr %a, i16 %b monotonic
1528  ret i16 %1
1529}
1530
1531define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
1532; LA64-LABEL: atomicrmw_max_i32_monotonic:
1533; LA64:       # %bb.0:
1534; LA64-NEXT:    ammax.w $a2, $a1, $a0
1535; LA64-NEXT:    move $a0, $a2
1536; LA64-NEXT:    ret
1537  %1 = atomicrmw max ptr %a, i32 %b monotonic
1538  ret i32 %1
1539}
1540
1541define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
1542; LA64-LABEL: atomicrmw_max_i64_monotonic:
1543; LA64:       # %bb.0:
1544; LA64-NEXT:    ammax.d $a2, $a1, $a0
1545; LA64-NEXT:    move $a0, $a2
1546; LA64-NEXT:    ret
1547  %1 = atomicrmw max ptr %a, i64 %b monotonic
1548  ret i64 %1
1549}
1550
1551define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
1552; LA64-LABEL: atomicrmw_min_i8_monotonic:
1553; LA64:       # %bb.0:
1554; LA64-NEXT:    slli.d $a2, $a0, 3
1555; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1556; LA64-NEXT:    andi $a3, $a2, 24
1557; LA64-NEXT:    ori $a4, $zero, 255
1558; LA64-NEXT:    sll.w $a4, $a4, $a2
1559; LA64-NEXT:    ext.w.b $a1, $a1
1560; LA64-NEXT:    sll.w $a1, $a1, $a2
1561; LA64-NEXT:    xori $a3, $a3, 56
1562; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
1563; LA64-NEXT:    ll.w $a5, $a0, 0
1564; LA64-NEXT:    and $a7, $a5, $a4
1565; LA64-NEXT:    move $a6, $a5
1566; LA64-NEXT:    sll.w $a7, $a7, $a3
1567; LA64-NEXT:    sra.w $a7, $a7, $a3
1568; LA64-NEXT:    bge $a1, $a7, .LBB76_3
1569; LA64-NEXT:  # %bb.2: # in Loop: Header=BB76_1 Depth=1
1570; LA64-NEXT:    xor $a6, $a5, $a1
1571; LA64-NEXT:    and $a6, $a6, $a4
1572; LA64-NEXT:    xor $a6, $a5, $a6
1573; LA64-NEXT:  .LBB76_3: # in Loop: Header=BB76_1 Depth=1
1574; LA64-NEXT:    sc.w $a6, $a0, 0
1575; LA64-NEXT:    beqz $a6, .LBB76_1
1576; LA64-NEXT:  # %bb.4:
1577; LA64-NEXT:    srl.w $a0, $a5, $a2
1578; LA64-NEXT:    ret
1579  %1 = atomicrmw min ptr %a, i8 %b monotonic
1580  ret i8 %1
1581}
1582
1583define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
1584; LA64-LABEL: atomicrmw_min_i16_monotonic:
1585; LA64:       # %bb.0:
1586; LA64-NEXT:    slli.d $a2, $a0, 3
1587; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1588; LA64-NEXT:    andi $a3, $a2, 24
1589; LA64-NEXT:    lu12i.w $a4, 15
1590; LA64-NEXT:    ori $a4, $a4, 4095
1591; LA64-NEXT:    sll.w $a4, $a4, $a2
1592; LA64-NEXT:    ext.w.h $a1, $a1
1593; LA64-NEXT:    sll.w $a1, $a1, $a2
1594; LA64-NEXT:    ori $a5, $zero, 48
1595; LA64-NEXT:    sub.d $a3, $a5, $a3
1596; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
1597; LA64-NEXT:    ll.w $a5, $a0, 0
1598; LA64-NEXT:    and $a7, $a5, $a4
1599; LA64-NEXT:    move $a6, $a5
1600; LA64-NEXT:    sll.w $a7, $a7, $a3
1601; LA64-NEXT:    sra.w $a7, $a7, $a3
1602; LA64-NEXT:    bge $a1, $a7, .LBB77_3
1603; LA64-NEXT:  # %bb.2: # in Loop: Header=BB77_1 Depth=1
1604; LA64-NEXT:    xor $a6, $a5, $a1
1605; LA64-NEXT:    and $a6, $a6, $a4
1606; LA64-NEXT:    xor $a6, $a5, $a6
1607; LA64-NEXT:  .LBB77_3: # in Loop: Header=BB77_1 Depth=1
1608; LA64-NEXT:    sc.w $a6, $a0, 0
1609; LA64-NEXT:    beqz $a6, .LBB77_1
1610; LA64-NEXT:  # %bb.4:
1611; LA64-NEXT:    srl.w $a0, $a5, $a2
1612; LA64-NEXT:    ret
1613  %1 = atomicrmw min ptr %a, i16 %b monotonic
1614  ret i16 %1
1615}
1616
1617define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
1618; LA64-LABEL: atomicrmw_min_i32_monotonic:
1619; LA64:       # %bb.0:
1620; LA64-NEXT:    ammin.w $a2, $a1, $a0
1621; LA64-NEXT:    move $a0, $a2
1622; LA64-NEXT:    ret
1623  %1 = atomicrmw min ptr %a, i32 %b monotonic
1624  ret i32 %1
1625}
1626
1627define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
1628; LA64-LABEL: atomicrmw_min_i64_monotonic:
1629; LA64:       # %bb.0:
1630; LA64-NEXT:    ammin.d $a2, $a1, $a0
1631; LA64-NEXT:    move $a0, $a2
1632; LA64-NEXT:    ret
1633  %1 = atomicrmw min ptr %a, i64 %b monotonic
1634  ret i64 %1
1635}
1636