xref: /llvm-project/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll (revision ba5676cf91f91bbddfacae06c036cf79af0f2088)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32
3; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64
4
5define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
6; LA32-LABEL: atomicrmw_xchg_i8_acquire:
7; LA32:       # %bb.0:
8; LA32-NEXT:    slli.w $a2, $a0, 3
9; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
10; LA32-NEXT:    ori $a3, $zero, 255
11; LA32-NEXT:    sll.w $a3, $a3, $a2
12; LA32-NEXT:    andi $a1, $a1, 255
13; LA32-NEXT:    sll.w $a1, $a1, $a2
14; LA32-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
15; LA32-NEXT:    ll.w $a4, $a0, 0
16; LA32-NEXT:    addi.w $a5, $a1, 0
17; LA32-NEXT:    xor $a5, $a4, $a5
18; LA32-NEXT:    and $a5, $a5, $a3
19; LA32-NEXT:    xor $a5, $a4, $a5
20; LA32-NEXT:    sc.w $a5, $a0, 0
21; LA32-NEXT:    beqz $a5, .LBB0_1
22; LA32-NEXT:  # %bb.2:
23; LA32-NEXT:    srl.w $a0, $a4, $a2
24; LA32-NEXT:    ret
25;
26; LA64-LABEL: atomicrmw_xchg_i8_acquire:
27; LA64:       # %bb.0:
28; LA64-NEXT:    slli.d $a2, $a0, 3
29; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
30; LA64-NEXT:    ori $a3, $zero, 255
31; LA64-NEXT:    sll.w $a3, $a3, $a2
32; LA64-NEXT:    andi $a1, $a1, 255
33; LA64-NEXT:    sll.w $a1, $a1, $a2
34; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
35; LA64-NEXT:    ll.w $a4, $a0, 0
36; LA64-NEXT:    addi.w $a5, $a1, 0
37; LA64-NEXT:    xor $a5, $a4, $a5
38; LA64-NEXT:    and $a5, $a5, $a3
39; LA64-NEXT:    xor $a5, $a4, $a5
40; LA64-NEXT:    sc.w $a5, $a0, 0
41; LA64-NEXT:    beqz $a5, .LBB0_1
42; LA64-NEXT:  # %bb.2:
43; LA64-NEXT:    srl.w $a0, $a4, $a2
44; LA64-NEXT:    ret
45  %1 = atomicrmw xchg ptr %a, i8 %b acquire
46  ret i8 %1
47}
48
49define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
50; LA32-LABEL: atomicrmw_xchg_0_i8_acquire:
51; LA32:       # %bb.0:
52; LA32-NEXT:    slli.w $a1, $a0, 3
53; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
54; LA32-NEXT:    ori $a2, $zero, 255
55; LA32-NEXT:    sll.w $a2, $a2, $a1
56; LA32-NEXT:    nor $a2, $a2, $zero
57; LA32-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
58; LA32-NEXT:    ll.w $a3, $a0, 0
59; LA32-NEXT:    and $a4, $a3, $a2
60; LA32-NEXT:    sc.w $a4, $a0, 0
61; LA32-NEXT:    beqz $a4, .LBB1_1
62; LA32-NEXT:  # %bb.2:
63; LA32-NEXT:    srl.w $a0, $a3, $a1
64; LA32-NEXT:    ret
65;
66; LA64-LABEL: atomicrmw_xchg_0_i8_acquire:
67; LA64:       # %bb.0:
68; LA64-NEXT:    slli.d $a1, $a0, 3
69; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
70; LA64-NEXT:    ori $a2, $zero, 255
71; LA64-NEXT:    sll.w $a2, $a2, $a1
72; LA64-NEXT:    nor $a2, $a2, $zero
73; LA64-NEXT:    amand_db.w $a3, $a2, $a0
74; LA64-NEXT:    srl.w $a0, $a3, $a1
75; LA64-NEXT:    ret
76  %1 = atomicrmw xchg ptr %a, i8 0 acquire
77  ret i8 %1
78}
79
80define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
81; LA32-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
82; LA32:       # %bb.0:
83; LA32-NEXT:    slli.w $a1, $a0, 3
84; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
85; LA32-NEXT:    ori $a2, $zero, 255
86; LA32-NEXT:    sll.w $a2, $a2, $a1
87; LA32-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
88; LA32-NEXT:    ll.w $a3, $a0, 0
89; LA32-NEXT:    or $a4, $a3, $a2
90; LA32-NEXT:    sc.w $a4, $a0, 0
91; LA32-NEXT:    beqz $a4, .LBB2_1
92; LA32-NEXT:  # %bb.2:
93; LA32-NEXT:    srl.w $a0, $a3, $a1
94; LA32-NEXT:    ret
95;
96; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
97; LA64:       # %bb.0:
98; LA64-NEXT:    slli.d $a1, $a0, 3
99; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
100; LA64-NEXT:    ori $a2, $zero, 255
101; LA64-NEXT:    sll.w $a2, $a2, $a1
102; LA64-NEXT:    amor_db.w $a3, $a2, $a0
103; LA64-NEXT:    srl.w $a0, $a3, $a1
104; LA64-NEXT:    ret
105  %1 = atomicrmw xchg ptr %a, i8 -1 acquire
106  ret i8 %1
107}
108
109define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
110; LA32-LABEL: atomicrmw_xchg_i16_acquire:
111; LA32:       # %bb.0:
112; LA32-NEXT:    slli.w $a2, $a0, 3
113; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
114; LA32-NEXT:    lu12i.w $a3, 15
115; LA32-NEXT:    ori $a3, $a3, 4095
116; LA32-NEXT:    sll.w $a3, $a3, $a2
117; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
118; LA32-NEXT:    sll.w $a1, $a1, $a2
119; LA32-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
120; LA32-NEXT:    ll.w $a4, $a0, 0
121; LA32-NEXT:    addi.w $a5, $a1, 0
122; LA32-NEXT:    xor $a5, $a4, $a5
123; LA32-NEXT:    and $a5, $a5, $a3
124; LA32-NEXT:    xor $a5, $a4, $a5
125; LA32-NEXT:    sc.w $a5, $a0, 0
126; LA32-NEXT:    beqz $a5, .LBB3_1
127; LA32-NEXT:  # %bb.2:
128; LA32-NEXT:    srl.w $a0, $a4, $a2
129; LA32-NEXT:    ret
130;
131; LA64-LABEL: atomicrmw_xchg_i16_acquire:
132; LA64:       # %bb.0:
133; LA64-NEXT:    slli.d $a2, $a0, 3
134; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
135; LA64-NEXT:    lu12i.w $a3, 15
136; LA64-NEXT:    ori $a3, $a3, 4095
137; LA64-NEXT:    sll.w $a3, $a3, $a2
138; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
139; LA64-NEXT:    sll.w $a1, $a1, $a2
140; LA64-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
141; LA64-NEXT:    ll.w $a4, $a0, 0
142; LA64-NEXT:    addi.w $a5, $a1, 0
143; LA64-NEXT:    xor $a5, $a4, $a5
144; LA64-NEXT:    and $a5, $a5, $a3
145; LA64-NEXT:    xor $a5, $a4, $a5
146; LA64-NEXT:    sc.w $a5, $a0, 0
147; LA64-NEXT:    beqz $a5, .LBB3_1
148; LA64-NEXT:  # %bb.2:
149; LA64-NEXT:    srl.w $a0, $a4, $a2
150; LA64-NEXT:    ret
151  %1 = atomicrmw xchg ptr %a, i16 %b acquire
152  ret i16 %1
153}
154
155define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
156; LA32-LABEL: atomicrmw_xchg_0_i16_acquire:
157; LA32:       # %bb.0:
158; LA32-NEXT:    slli.w $a1, $a0, 3
159; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
160; LA32-NEXT:    lu12i.w $a2, 15
161; LA32-NEXT:    ori $a2, $a2, 4095
162; LA32-NEXT:    sll.w $a2, $a2, $a1
163; LA32-NEXT:    nor $a2, $a2, $zero
164; LA32-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
165; LA32-NEXT:    ll.w $a3, $a0, 0
166; LA32-NEXT:    and $a4, $a3, $a2
167; LA32-NEXT:    sc.w $a4, $a0, 0
168; LA32-NEXT:    beqz $a4, .LBB4_1
169; LA32-NEXT:  # %bb.2:
170; LA32-NEXT:    srl.w $a0, $a3, $a1
171; LA32-NEXT:    ret
172;
173; LA64-LABEL: atomicrmw_xchg_0_i16_acquire:
174; LA64:       # %bb.0:
175; LA64-NEXT:    slli.d $a1, $a0, 3
176; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
177; LA64-NEXT:    lu12i.w $a2, 15
178; LA64-NEXT:    ori $a2, $a2, 4095
179; LA64-NEXT:    sll.w $a2, $a2, $a1
180; LA64-NEXT:    nor $a2, $a2, $zero
181; LA64-NEXT:    amand_db.w $a3, $a2, $a0
182; LA64-NEXT:    srl.w $a0, $a3, $a1
183; LA64-NEXT:    ret
184  %1 = atomicrmw xchg ptr %a, i16 0 acquire
185  ret i16 %1
186}
187
188define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
189; LA32-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
190; LA32:       # %bb.0:
191; LA32-NEXT:    slli.w $a1, $a0, 3
192; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
193; LA32-NEXT:    lu12i.w $a2, 15
194; LA32-NEXT:    ori $a2, $a2, 4095
195; LA32-NEXT:    sll.w $a2, $a2, $a1
196; LA32-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
197; LA32-NEXT:    ll.w $a3, $a0, 0
198; LA32-NEXT:    or $a4, $a3, $a2
199; LA32-NEXT:    sc.w $a4, $a0, 0
200; LA32-NEXT:    beqz $a4, .LBB5_1
201; LA32-NEXT:  # %bb.2:
202; LA32-NEXT:    srl.w $a0, $a3, $a1
203; LA32-NEXT:    ret
204;
205; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
206; LA64:       # %bb.0:
207; LA64-NEXT:    slli.d $a1, $a0, 3
208; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
209; LA64-NEXT:    lu12i.w $a2, 15
210; LA64-NEXT:    ori $a2, $a2, 4095
211; LA64-NEXT:    sll.w $a2, $a2, $a1
212; LA64-NEXT:    amor_db.w $a3, $a2, $a0
213; LA64-NEXT:    srl.w $a0, $a3, $a1
214; LA64-NEXT:    ret
215  %1 = atomicrmw xchg ptr %a, i16 -1 acquire
216  ret i16 %1
217}
218
219define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
220; LA32-LABEL: atomicrmw_xchg_i32_acquire:
221; LA32:       # %bb.0:
222; LA32-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
223; LA32-NEXT:    ll.w $a2, $a0, 0
224; LA32-NEXT:    move $a3, $a1
225; LA32-NEXT:    sc.w $a3, $a0, 0
226; LA32-NEXT:    beqz $a3, .LBB6_1
227; LA32-NEXT:  # %bb.2:
228; LA32-NEXT:    move $a0, $a2
229; LA32-NEXT:    ret
230;
231; LA64-LABEL: atomicrmw_xchg_i32_acquire:
232; LA64:       # %bb.0:
233; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
234; LA64-NEXT:    move $a0, $a2
235; LA64-NEXT:    ret
236  %1 = atomicrmw xchg ptr %a, i32 %b acquire
237  ret i32 %1
238}
239
240define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
241; LA32-LABEL: atomicrmw_xchg_i64_acquire:
242; LA32:       # %bb.0:
243; LA32-NEXT:    addi.w $sp, $sp, -16
244; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
245; LA32-NEXT:    ori $a3, $zero, 2
246; LA32-NEXT:    bl %plt(__atomic_exchange_8)
247; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
248; LA32-NEXT:    addi.w $sp, $sp, 16
249; LA32-NEXT:    ret
250;
251; LA64-LABEL: atomicrmw_xchg_i64_acquire:
252; LA64:       # %bb.0:
253; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
254; LA64-NEXT:    move $a0, $a2
255; LA64-NEXT:    ret
256  %1 = atomicrmw xchg ptr %a, i64 %b acquire
257  ret i64 %1
258}
259
260define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
261; LA32-LABEL: atomicrmw_add_i8_acquire:
262; LA32:       # %bb.0:
263; LA32-NEXT:    slli.w $a2, $a0, 3
264; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
265; LA32-NEXT:    ori $a3, $zero, 255
266; LA32-NEXT:    sll.w $a3, $a3, $a2
267; LA32-NEXT:    andi $a1, $a1, 255
268; LA32-NEXT:    sll.w $a1, $a1, $a2
269; LA32-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
270; LA32-NEXT:    ll.w $a4, $a0, 0
271; LA32-NEXT:    add.w $a5, $a4, $a1
272; LA32-NEXT:    xor $a5, $a4, $a5
273; LA32-NEXT:    and $a5, $a5, $a3
274; LA32-NEXT:    xor $a5, $a4, $a5
275; LA32-NEXT:    sc.w $a5, $a0, 0
276; LA32-NEXT:    beqz $a5, .LBB8_1
277; LA32-NEXT:  # %bb.2:
278; LA32-NEXT:    srl.w $a0, $a4, $a2
279; LA32-NEXT:    ret
280;
281; LA64-LABEL: atomicrmw_add_i8_acquire:
282; LA64:       # %bb.0:
283; LA64-NEXT:    slli.d $a2, $a0, 3
284; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
285; LA64-NEXT:    ori $a3, $zero, 255
286; LA64-NEXT:    sll.w $a3, $a3, $a2
287; LA64-NEXT:    andi $a1, $a1, 255
288; LA64-NEXT:    sll.w $a1, $a1, $a2
289; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
290; LA64-NEXT:    ll.w $a4, $a0, 0
291; LA64-NEXT:    add.w $a5, $a4, $a1
292; LA64-NEXT:    xor $a5, $a4, $a5
293; LA64-NEXT:    and $a5, $a5, $a3
294; LA64-NEXT:    xor $a5, $a4, $a5
295; LA64-NEXT:    sc.w $a5, $a0, 0
296; LA64-NEXT:    beqz $a5, .LBB8_1
297; LA64-NEXT:  # %bb.2:
298; LA64-NEXT:    srl.w $a0, $a4, $a2
299; LA64-NEXT:    ret
300  %1 = atomicrmw add ptr %a, i8 %b acquire
301  ret i8 %1
302}
303
304define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
305; LA32-LABEL: atomicrmw_add_i16_acquire:
306; LA32:       # %bb.0:
307; LA32-NEXT:    slli.w $a2, $a0, 3
308; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
309; LA32-NEXT:    lu12i.w $a3, 15
310; LA32-NEXT:    ori $a3, $a3, 4095
311; LA32-NEXT:    sll.w $a3, $a3, $a2
312; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
313; LA32-NEXT:    sll.w $a1, $a1, $a2
314; LA32-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
315; LA32-NEXT:    ll.w $a4, $a0, 0
316; LA32-NEXT:    add.w $a5, $a4, $a1
317; LA32-NEXT:    xor $a5, $a4, $a5
318; LA32-NEXT:    and $a5, $a5, $a3
319; LA32-NEXT:    xor $a5, $a4, $a5
320; LA32-NEXT:    sc.w $a5, $a0, 0
321; LA32-NEXT:    beqz $a5, .LBB9_1
322; LA32-NEXT:  # %bb.2:
323; LA32-NEXT:    srl.w $a0, $a4, $a2
324; LA32-NEXT:    ret
325;
326; LA64-LABEL: atomicrmw_add_i16_acquire:
327; LA64:       # %bb.0:
328; LA64-NEXT:    slli.d $a2, $a0, 3
329; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
330; LA64-NEXT:    lu12i.w $a3, 15
331; LA64-NEXT:    ori $a3, $a3, 4095
332; LA64-NEXT:    sll.w $a3, $a3, $a2
333; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
334; LA64-NEXT:    sll.w $a1, $a1, $a2
335; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
336; LA64-NEXT:    ll.w $a4, $a0, 0
337; LA64-NEXT:    add.w $a5, $a4, $a1
338; LA64-NEXT:    xor $a5, $a4, $a5
339; LA64-NEXT:    and $a5, $a5, $a3
340; LA64-NEXT:    xor $a5, $a4, $a5
341; LA64-NEXT:    sc.w $a5, $a0, 0
342; LA64-NEXT:    beqz $a5, .LBB9_1
343; LA64-NEXT:  # %bb.2:
344; LA64-NEXT:    srl.w $a0, $a4, $a2
345; LA64-NEXT:    ret
346  %1 = atomicrmw add ptr %a, i16 %b acquire
347  ret i16 %1
348}
349
350define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
351; LA32-LABEL: atomicrmw_add_i32_acquire:
352; LA32:       # %bb.0:
353; LA32-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
354; LA32-NEXT:    ll.w $a2, $a0, 0
355; LA32-NEXT:    add.w $a3, $a2, $a1
356; LA32-NEXT:    sc.w $a3, $a0, 0
357; LA32-NEXT:    beqz $a3, .LBB10_1
358; LA32-NEXT:  # %bb.2:
359; LA32-NEXT:    move $a0, $a2
360; LA32-NEXT:    ret
361;
362; LA64-LABEL: atomicrmw_add_i32_acquire:
363; LA64:       # %bb.0:
364; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
365; LA64-NEXT:    move $a0, $a2
366; LA64-NEXT:    ret
367  %1 = atomicrmw add ptr %a, i32 %b acquire
368  ret i32 %1
369}
370
371define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
372; LA32-LABEL: atomicrmw_add_i64_acquire:
373; LA32:       # %bb.0:
374; LA32-NEXT:    addi.w $sp, $sp, -16
375; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
376; LA32-NEXT:    ori $a3, $zero, 2
377; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
378; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
379; LA32-NEXT:    addi.w $sp, $sp, 16
380; LA32-NEXT:    ret
381;
382; LA64-LABEL: atomicrmw_add_i64_acquire:
383; LA64:       # %bb.0:
384; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
385; LA64-NEXT:    move $a0, $a2
386; LA64-NEXT:    ret
387  %1 = atomicrmw add ptr %a, i64 %b acquire
388  ret i64 %1
389}
390
391define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
392; LA32-LABEL: atomicrmw_sub_i8_acquire:
393; LA32:       # %bb.0:
394; LA32-NEXT:    slli.w $a2, $a0, 3
395; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
396; LA32-NEXT:    ori $a3, $zero, 255
397; LA32-NEXT:    sll.w $a3, $a3, $a2
398; LA32-NEXT:    andi $a1, $a1, 255
399; LA32-NEXT:    sll.w $a1, $a1, $a2
400; LA32-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
401; LA32-NEXT:    ll.w $a4, $a0, 0
402; LA32-NEXT:    sub.w $a5, $a4, $a1
403; LA32-NEXT:    xor $a5, $a4, $a5
404; LA32-NEXT:    and $a5, $a5, $a3
405; LA32-NEXT:    xor $a5, $a4, $a5
406; LA32-NEXT:    sc.w $a5, $a0, 0
407; LA32-NEXT:    beqz $a5, .LBB12_1
408; LA32-NEXT:  # %bb.2:
409; LA32-NEXT:    srl.w $a0, $a4, $a2
410; LA32-NEXT:    ret
411;
412; LA64-LABEL: atomicrmw_sub_i8_acquire:
413; LA64:       # %bb.0:
414; LA64-NEXT:    slli.d $a2, $a0, 3
415; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
416; LA64-NEXT:    ori $a3, $zero, 255
417; LA64-NEXT:    sll.w $a3, $a3, $a2
418; LA64-NEXT:    andi $a1, $a1, 255
419; LA64-NEXT:    sll.w $a1, $a1, $a2
420; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
421; LA64-NEXT:    ll.w $a4, $a0, 0
422; LA64-NEXT:    sub.w $a5, $a4, $a1
423; LA64-NEXT:    xor $a5, $a4, $a5
424; LA64-NEXT:    and $a5, $a5, $a3
425; LA64-NEXT:    xor $a5, $a4, $a5
426; LA64-NEXT:    sc.w $a5, $a0, 0
427; LA64-NEXT:    beqz $a5, .LBB12_1
428; LA64-NEXT:  # %bb.2:
429; LA64-NEXT:    srl.w $a0, $a4, $a2
430; LA64-NEXT:    ret
431  %1 = atomicrmw sub ptr %a, i8 %b acquire
432  ret i8 %1
433}
434
435define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
436; LA32-LABEL: atomicrmw_sub_i16_acquire:
437; LA32:       # %bb.0:
438; LA32-NEXT:    slli.w $a2, $a0, 3
439; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
440; LA32-NEXT:    lu12i.w $a3, 15
441; LA32-NEXT:    ori $a3, $a3, 4095
442; LA32-NEXT:    sll.w $a3, $a3, $a2
443; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
444; LA32-NEXT:    sll.w $a1, $a1, $a2
445; LA32-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
446; LA32-NEXT:    ll.w $a4, $a0, 0
447; LA32-NEXT:    sub.w $a5, $a4, $a1
448; LA32-NEXT:    xor $a5, $a4, $a5
449; LA32-NEXT:    and $a5, $a5, $a3
450; LA32-NEXT:    xor $a5, $a4, $a5
451; LA32-NEXT:    sc.w $a5, $a0, 0
452; LA32-NEXT:    beqz $a5, .LBB13_1
453; LA32-NEXT:  # %bb.2:
454; LA32-NEXT:    srl.w $a0, $a4, $a2
455; LA32-NEXT:    ret
456;
457; LA64-LABEL: atomicrmw_sub_i16_acquire:
458; LA64:       # %bb.0:
459; LA64-NEXT:    slli.d $a2, $a0, 3
460; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
461; LA64-NEXT:    lu12i.w $a3, 15
462; LA64-NEXT:    ori $a3, $a3, 4095
463; LA64-NEXT:    sll.w $a3, $a3, $a2
464; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
465; LA64-NEXT:    sll.w $a1, $a1, $a2
466; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
467; LA64-NEXT:    ll.w $a4, $a0, 0
468; LA64-NEXT:    sub.w $a5, $a4, $a1
469; LA64-NEXT:    xor $a5, $a4, $a5
470; LA64-NEXT:    and $a5, $a5, $a3
471; LA64-NEXT:    xor $a5, $a4, $a5
472; LA64-NEXT:    sc.w $a5, $a0, 0
473; LA64-NEXT:    beqz $a5, .LBB13_1
474; LA64-NEXT:  # %bb.2:
475; LA64-NEXT:    srl.w $a0, $a4, $a2
476; LA64-NEXT:    ret
477  %1 = atomicrmw sub ptr %a, i16 %b acquire
478  ret i16 %1
479}
480
481define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
482; LA32-LABEL: atomicrmw_sub_i32_acquire:
483; LA32:       # %bb.0:
484; LA32-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
485; LA32-NEXT:    ll.w $a2, $a0, 0
486; LA32-NEXT:    sub.w $a3, $a2, $a1
487; LA32-NEXT:    sc.w $a3, $a0, 0
488; LA32-NEXT:    beqz $a3, .LBB14_1
489; LA32-NEXT:  # %bb.2:
490; LA32-NEXT:    move $a0, $a2
491; LA32-NEXT:    ret
492;
493; LA64-LABEL: atomicrmw_sub_i32_acquire:
494; LA64:       # %bb.0:
495; LA64-NEXT:    sub.w $a2, $zero, $a1
496; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
497; LA64-NEXT:    move $a0, $a1
498; LA64-NEXT:    ret
499  %1 = atomicrmw sub ptr %a, i32 %b acquire
500  ret i32 %1
501}
502
503define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
504; LA32-LABEL: atomicrmw_sub_i64_acquire:
505; LA32:       # %bb.0:
506; LA32-NEXT:    addi.w $sp, $sp, -16
507; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
508; LA32-NEXT:    ori $a3, $zero, 2
509; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
510; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
511; LA32-NEXT:    addi.w $sp, $sp, 16
512; LA32-NEXT:    ret
513;
514; LA64-LABEL: atomicrmw_sub_i64_acquire:
515; LA64:       # %bb.0:
516; LA64-NEXT:    sub.d $a2, $zero, $a1
517; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
518; LA64-NEXT:    move $a0, $a1
519; LA64-NEXT:    ret
520  %1 = atomicrmw sub ptr %a, i64 %b acquire
521  ret i64 %1
522}
523
524define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
525; LA32-LABEL: atomicrmw_nand_i8_acquire:
526; LA32:       # %bb.0:
527; LA32-NEXT:    slli.w $a2, $a0, 3
528; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
529; LA32-NEXT:    ori $a3, $zero, 255
530; LA32-NEXT:    sll.w $a3, $a3, $a2
531; LA32-NEXT:    andi $a1, $a1, 255
532; LA32-NEXT:    sll.w $a1, $a1, $a2
533; LA32-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
534; LA32-NEXT:    ll.w $a4, $a0, 0
535; LA32-NEXT:    and $a5, $a4, $a1
536; LA32-NEXT:    nor $a5, $a5, $zero
537; LA32-NEXT:    xor $a5, $a4, $a5
538; LA32-NEXT:    and $a5, $a5, $a3
539; LA32-NEXT:    xor $a5, $a4, $a5
540; LA32-NEXT:    sc.w $a5, $a0, 0
541; LA32-NEXT:    beqz $a5, .LBB16_1
542; LA32-NEXT:  # %bb.2:
543; LA32-NEXT:    srl.w $a0, $a4, $a2
544; LA32-NEXT:    ret
545;
546; LA64-LABEL: atomicrmw_nand_i8_acquire:
547; LA64:       # %bb.0:
548; LA64-NEXT:    slli.d $a2, $a0, 3
549; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
550; LA64-NEXT:    ori $a3, $zero, 255
551; LA64-NEXT:    sll.w $a3, $a3, $a2
552; LA64-NEXT:    andi $a1, $a1, 255
553; LA64-NEXT:    sll.w $a1, $a1, $a2
554; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
555; LA64-NEXT:    ll.w $a4, $a0, 0
556; LA64-NEXT:    and $a5, $a4, $a1
557; LA64-NEXT:    nor $a5, $a5, $zero
558; LA64-NEXT:    xor $a5, $a4, $a5
559; LA64-NEXT:    and $a5, $a5, $a3
560; LA64-NEXT:    xor $a5, $a4, $a5
561; LA64-NEXT:    sc.w $a5, $a0, 0
562; LA64-NEXT:    beqz $a5, .LBB16_1
563; LA64-NEXT:  # %bb.2:
564; LA64-NEXT:    srl.w $a0, $a4, $a2
565; LA64-NEXT:    ret
566  %1 = atomicrmw nand ptr %a, i8 %b acquire
567  ret i8 %1
568}
569
570define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
571; LA32-LABEL: atomicrmw_nand_i16_acquire:
572; LA32:       # %bb.0:
573; LA32-NEXT:    slli.w $a2, $a0, 3
574; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
575; LA32-NEXT:    lu12i.w $a3, 15
576; LA32-NEXT:    ori $a3, $a3, 4095
577; LA32-NEXT:    sll.w $a3, $a3, $a2
578; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
579; LA32-NEXT:    sll.w $a1, $a1, $a2
580; LA32-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
581; LA32-NEXT:    ll.w $a4, $a0, 0
582; LA32-NEXT:    and $a5, $a4, $a1
583; LA32-NEXT:    nor $a5, $a5, $zero
584; LA32-NEXT:    xor $a5, $a4, $a5
585; LA32-NEXT:    and $a5, $a5, $a3
586; LA32-NEXT:    xor $a5, $a4, $a5
587; LA32-NEXT:    sc.w $a5, $a0, 0
588; LA32-NEXT:    beqz $a5, .LBB17_1
589; LA32-NEXT:  # %bb.2:
590; LA32-NEXT:    srl.w $a0, $a4, $a2
591; LA32-NEXT:    ret
592;
593; LA64-LABEL: atomicrmw_nand_i16_acquire:
594; LA64:       # %bb.0:
595; LA64-NEXT:    slli.d $a2, $a0, 3
596; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
597; LA64-NEXT:    lu12i.w $a3, 15
598; LA64-NEXT:    ori $a3, $a3, 4095
599; LA64-NEXT:    sll.w $a3, $a3, $a2
600; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
601; LA64-NEXT:    sll.w $a1, $a1, $a2
602; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
603; LA64-NEXT:    ll.w $a4, $a0, 0
604; LA64-NEXT:    and $a5, $a4, $a1
605; LA64-NEXT:    nor $a5, $a5, $zero
606; LA64-NEXT:    xor $a5, $a4, $a5
607; LA64-NEXT:    and $a5, $a5, $a3
608; LA64-NEXT:    xor $a5, $a4, $a5
609; LA64-NEXT:    sc.w $a5, $a0, 0
610; LA64-NEXT:    beqz $a5, .LBB17_1
611; LA64-NEXT:  # %bb.2:
612; LA64-NEXT:    srl.w $a0, $a4, $a2
613; LA64-NEXT:    ret
614  %1 = atomicrmw nand ptr %a, i16 %b acquire
615  ret i16 %1
616}
617
618define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
619; LA32-LABEL: atomicrmw_nand_i32_acquire:
620; LA32:       # %bb.0:
621; LA32-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
622; LA32-NEXT:    ll.w $a2, $a0, 0
623; LA32-NEXT:    and $a3, $a2, $a1
624; LA32-NEXT:    nor $a3, $a3, $zero
625; LA32-NEXT:    sc.w $a3, $a0, 0
626; LA32-NEXT:    beqz $a3, .LBB18_1
627; LA32-NEXT:  # %bb.2:
628; LA32-NEXT:    move $a0, $a2
629; LA32-NEXT:    ret
630;
631; LA64-LABEL: atomicrmw_nand_i32_acquire:
632; LA64:       # %bb.0:
633; LA64-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
634; LA64-NEXT:    ll.w $a2, $a0, 0
635; LA64-NEXT:    and $a3, $a2, $a1
636; LA64-NEXT:    nor $a3, $a3, $zero
637; LA64-NEXT:    sc.w $a3, $a0, 0
638; LA64-NEXT:    beqz $a3, .LBB18_1
639; LA64-NEXT:  # %bb.2:
640; LA64-NEXT:    move $a0, $a2
641; LA64-NEXT:    ret
642  %1 = atomicrmw nand ptr %a, i32 %b acquire
643  ret i32 %1
644}
645
646define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
647; LA32-LABEL: atomicrmw_nand_i64_acquire:
648; LA32:       # %bb.0:
649; LA32-NEXT:    addi.w $sp, $sp, -16
650; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
651; LA32-NEXT:    ori $a3, $zero, 2
652; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
653; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
654; LA32-NEXT:    addi.w $sp, $sp, 16
655; LA32-NEXT:    ret
656;
657; LA64-LABEL: atomicrmw_nand_i64_acquire:
658; LA64:       # %bb.0:
659; LA64-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
660; LA64-NEXT:    ll.d $a2, $a0, 0
661; LA64-NEXT:    and $a3, $a2, $a1
662; LA64-NEXT:    nor $a3, $a3, $zero
663; LA64-NEXT:    sc.d $a3, $a0, 0
664; LA64-NEXT:    beqz $a3, .LBB19_1
665; LA64-NEXT:  # %bb.2:
666; LA64-NEXT:    move $a0, $a2
667; LA64-NEXT:    ret
668  %1 = atomicrmw nand ptr %a, i64 %b acquire
669  ret i64 %1
670}
671
672define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
673; LA32-LABEL: atomicrmw_and_i8_acquire:
674; LA32:       # %bb.0:
675; LA32-NEXT:    slli.w $a2, $a0, 3
676; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
677; LA32-NEXT:    ori $a3, $zero, 255
678; LA32-NEXT:    sll.w $a3, $a3, $a2
679; LA32-NEXT:    andi $a1, $a1, 255
680; LA32-NEXT:    sll.w $a1, $a1, $a2
681; LA32-NEXT:    orn $a1, $a1, $a3
682; LA32-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
683; LA32-NEXT:    ll.w $a3, $a0, 0
684; LA32-NEXT:    and $a4, $a3, $a1
685; LA32-NEXT:    sc.w $a4, $a0, 0
686; LA32-NEXT:    beqz $a4, .LBB20_1
687; LA32-NEXT:  # %bb.2:
688; LA32-NEXT:    srl.w $a0, $a3, $a2
689; LA32-NEXT:    ret
690;
691; LA64-LABEL: atomicrmw_and_i8_acquire:
692; LA64:       # %bb.0:
693; LA64-NEXT:    slli.d $a2, $a0, 3
694; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
695; LA64-NEXT:    ori $a3, $zero, 255
696; LA64-NEXT:    sll.w $a3, $a3, $a2
697; LA64-NEXT:    andi $a1, $a1, 255
698; LA64-NEXT:    sll.w $a1, $a1, $a2
699; LA64-NEXT:    orn $a1, $a1, $a3
700; LA64-NEXT:    amand_db.w $a3, $a1, $a0
701; LA64-NEXT:    srl.w $a0, $a3, $a2
702; LA64-NEXT:    ret
703  %1 = atomicrmw and ptr %a, i8 %b acquire
704  ret i8 %1
705}
706
707define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
708; LA32-LABEL: atomicrmw_and_i16_acquire:
709; LA32:       # %bb.0:
710; LA32-NEXT:    slli.w $a2, $a0, 3
711; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
712; LA32-NEXT:    lu12i.w $a3, 15
713; LA32-NEXT:    ori $a3, $a3, 4095
714; LA32-NEXT:    sll.w $a3, $a3, $a2
715; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
716; LA32-NEXT:    sll.w $a1, $a1, $a2
717; LA32-NEXT:    orn $a1, $a1, $a3
718; LA32-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
719; LA32-NEXT:    ll.w $a3, $a0, 0
720; LA32-NEXT:    and $a4, $a3, $a1
721; LA32-NEXT:    sc.w $a4, $a0, 0
722; LA32-NEXT:    beqz $a4, .LBB21_1
723; LA32-NEXT:  # %bb.2:
724; LA32-NEXT:    srl.w $a0, $a3, $a2
725; LA32-NEXT:    ret
726;
727; LA64-LABEL: atomicrmw_and_i16_acquire:
728; LA64:       # %bb.0:
729; LA64-NEXT:    slli.d $a2, $a0, 3
730; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
731; LA64-NEXT:    lu12i.w $a3, 15
732; LA64-NEXT:    ori $a3, $a3, 4095
733; LA64-NEXT:    sll.w $a3, $a3, $a2
734; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
735; LA64-NEXT:    sll.w $a1, $a1, $a2
736; LA64-NEXT:    orn $a1, $a1, $a3
737; LA64-NEXT:    amand_db.w $a3, $a1, $a0
738; LA64-NEXT:    srl.w $a0, $a3, $a2
739; LA64-NEXT:    ret
740  %1 = atomicrmw and ptr %a, i16 %b acquire
741  ret i16 %1
742}
743
744define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
745; LA32-LABEL: atomicrmw_and_i32_acquire:
746; LA32:       # %bb.0:
747; LA32-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
748; LA32-NEXT:    ll.w $a2, $a0, 0
749; LA32-NEXT:    and $a3, $a2, $a1
750; LA32-NEXT:    sc.w $a3, $a0, 0
751; LA32-NEXT:    beqz $a3, .LBB22_1
752; LA32-NEXT:  # %bb.2:
753; LA32-NEXT:    move $a0, $a2
754; LA32-NEXT:    ret
755;
756; LA64-LABEL: atomicrmw_and_i32_acquire:
757; LA64:       # %bb.0:
758; LA64-NEXT:    amand_db.w $a2, $a1, $a0
759; LA64-NEXT:    move $a0, $a2
760; LA64-NEXT:    ret
761  %1 = atomicrmw and ptr %a, i32 %b acquire
762  ret i32 %1
763}
764
765define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
766; LA32-LABEL: atomicrmw_and_i64_acquire:
767; LA32:       # %bb.0:
768; LA32-NEXT:    addi.w $sp, $sp, -16
769; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
770; LA32-NEXT:    ori $a3, $zero, 2
771; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
772; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
773; LA32-NEXT:    addi.w $sp, $sp, 16
774; LA32-NEXT:    ret
775;
776; LA64-LABEL: atomicrmw_and_i64_acquire:
777; LA64:       # %bb.0:
778; LA64-NEXT:    amand_db.d $a2, $a1, $a0
779; LA64-NEXT:    move $a0, $a2
780; LA64-NEXT:    ret
781  %1 = atomicrmw and ptr %a, i64 %b acquire
782  ret i64 %1
783}
784
785define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
786; LA32-LABEL: atomicrmw_or_i8_acquire:
787; LA32:       # %bb.0:
788; LA32-NEXT:    slli.w $a2, $a0, 3
789; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
790; LA32-NEXT:    andi $a1, $a1, 255
791; LA32-NEXT:    sll.w $a1, $a1, $a2
792; LA32-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
793; LA32-NEXT:    ll.w $a3, $a0, 0
794; LA32-NEXT:    or $a4, $a3, $a1
795; LA32-NEXT:    sc.w $a4, $a0, 0
796; LA32-NEXT:    beqz $a4, .LBB24_1
797; LA32-NEXT:  # %bb.2:
798; LA32-NEXT:    srl.w $a0, $a3, $a2
799; LA32-NEXT:    ret
800;
801; LA64-LABEL: atomicrmw_or_i8_acquire:
802; LA64:       # %bb.0:
803; LA64-NEXT:    slli.d $a2, $a0, 3
804; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
805; LA64-NEXT:    andi $a1, $a1, 255
806; LA64-NEXT:    sll.w $a1, $a1, $a2
807; LA64-NEXT:    amor_db.w $a3, $a1, $a0
808; LA64-NEXT:    srl.w $a0, $a3, $a2
809; LA64-NEXT:    ret
810  %1 = atomicrmw or ptr %a, i8 %b acquire
811  ret i8 %1
812}
813
814define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
815; LA32-LABEL: atomicrmw_or_i16_acquire:
816; LA32:       # %bb.0:
817; LA32-NEXT:    slli.w $a2, $a0, 3
818; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
819; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
820; LA32-NEXT:    sll.w $a1, $a1, $a2
821; LA32-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
822; LA32-NEXT:    ll.w $a3, $a0, 0
823; LA32-NEXT:    or $a4, $a3, $a1
824; LA32-NEXT:    sc.w $a4, $a0, 0
825; LA32-NEXT:    beqz $a4, .LBB25_1
826; LA32-NEXT:  # %bb.2:
827; LA32-NEXT:    srl.w $a0, $a3, $a2
828; LA32-NEXT:    ret
829;
830; LA64-LABEL: atomicrmw_or_i16_acquire:
831; LA64:       # %bb.0:
832; LA64-NEXT:    slli.d $a2, $a0, 3
833; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
834; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
835; LA64-NEXT:    sll.w $a1, $a1, $a2
836; LA64-NEXT:    amor_db.w $a3, $a1, $a0
837; LA64-NEXT:    srl.w $a0, $a3, $a2
838; LA64-NEXT:    ret
839  %1 = atomicrmw or ptr %a, i16 %b acquire
840  ret i16 %1
841}
842
843define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
844; LA32-LABEL: atomicrmw_or_i32_acquire:
845; LA32:       # %bb.0:
846; LA32-NEXT:  .LBB26_1: # =>This Inner Loop Header: Depth=1
847; LA32-NEXT:    ll.w $a2, $a0, 0
848; LA32-NEXT:    or $a3, $a2, $a1
849; LA32-NEXT:    sc.w $a3, $a0, 0
850; LA32-NEXT:    beqz $a3, .LBB26_1
851; LA32-NEXT:  # %bb.2:
852; LA32-NEXT:    move $a0, $a2
853; LA32-NEXT:    ret
854;
855; LA64-LABEL: atomicrmw_or_i32_acquire:
856; LA64:       # %bb.0:
857; LA64-NEXT:    amor_db.w $a2, $a1, $a0
858; LA64-NEXT:    move $a0, $a2
859; LA64-NEXT:    ret
860  %1 = atomicrmw or ptr %a, i32 %b acquire
861  ret i32 %1
862}
863
864define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
865; LA32-LABEL: atomicrmw_or_i64_acquire:
866; LA32:       # %bb.0:
867; LA32-NEXT:    addi.w $sp, $sp, -16
868; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
869; LA32-NEXT:    ori $a3, $zero, 2
870; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
871; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
872; LA32-NEXT:    addi.w $sp, $sp, 16
873; LA32-NEXT:    ret
874;
875; LA64-LABEL: atomicrmw_or_i64_acquire:
876; LA64:       # %bb.0:
877; LA64-NEXT:    amor_db.d $a2, $a1, $a0
878; LA64-NEXT:    move $a0, $a2
879; LA64-NEXT:    ret
880  %1 = atomicrmw or ptr %a, i64 %b acquire
881  ret i64 %1
882}
883
884define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
885; LA32-LABEL: atomicrmw_xor_i8_acquire:
886; LA32:       # %bb.0:
887; LA32-NEXT:    slli.w $a2, $a0, 3
888; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
889; LA32-NEXT:    andi $a1, $a1, 255
890; LA32-NEXT:    sll.w $a1, $a1, $a2
891; LA32-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
892; LA32-NEXT:    ll.w $a3, $a0, 0
893; LA32-NEXT:    xor $a4, $a3, $a1
894; LA32-NEXT:    sc.w $a4, $a0, 0
895; LA32-NEXT:    beqz $a4, .LBB28_1
896; LA32-NEXT:  # %bb.2:
897; LA32-NEXT:    srl.w $a0, $a3, $a2
898; LA32-NEXT:    ret
899;
900; LA64-LABEL: atomicrmw_xor_i8_acquire:
901; LA64:       # %bb.0:
902; LA64-NEXT:    slli.d $a2, $a0, 3
903; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
904; LA64-NEXT:    andi $a1, $a1, 255
905; LA64-NEXT:    sll.w $a1, $a1, $a2
906; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
907; LA64-NEXT:    srl.w $a0, $a3, $a2
908; LA64-NEXT:    ret
909  %1 = atomicrmw xor ptr %a, i8 %b acquire
910  ret i8 %1
911}
912
913define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
914; LA32-LABEL: atomicrmw_xor_i16_acquire:
915; LA32:       # %bb.0:
916; LA32-NEXT:    slli.w $a2, $a0, 3
917; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
918; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
919; LA32-NEXT:    sll.w $a1, $a1, $a2
920; LA32-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
921; LA32-NEXT:    ll.w $a3, $a0, 0
922; LA32-NEXT:    xor $a4, $a3, $a1
923; LA32-NEXT:    sc.w $a4, $a0, 0
924; LA32-NEXT:    beqz $a4, .LBB29_1
925; LA32-NEXT:  # %bb.2:
926; LA32-NEXT:    srl.w $a0, $a3, $a2
927; LA32-NEXT:    ret
928;
929; LA64-LABEL: atomicrmw_xor_i16_acquire:
930; LA64:       # %bb.0:
931; LA64-NEXT:    slli.d $a2, $a0, 3
932; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
933; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
934; LA64-NEXT:    sll.w $a1, $a1, $a2
935; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
936; LA64-NEXT:    srl.w $a0, $a3, $a2
937; LA64-NEXT:    ret
938  %1 = atomicrmw xor ptr %a, i16 %b acquire
939  ret i16 %1
940}
941
942define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
943; LA32-LABEL: atomicrmw_xor_i32_acquire:
944; LA32:       # %bb.0:
945; LA32-NEXT:  .LBB30_1: # =>This Inner Loop Header: Depth=1
946; LA32-NEXT:    ll.w $a2, $a0, 0
947; LA32-NEXT:    xor $a3, $a2, $a1
948; LA32-NEXT:    sc.w $a3, $a0, 0
949; LA32-NEXT:    beqz $a3, .LBB30_1
950; LA32-NEXT:  # %bb.2:
951; LA32-NEXT:    move $a0, $a2
952; LA32-NEXT:    ret
953;
954; LA64-LABEL: atomicrmw_xor_i32_acquire:
955; LA64:       # %bb.0:
956; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
957; LA64-NEXT:    move $a0, $a2
958; LA64-NEXT:    ret
959  %1 = atomicrmw xor ptr %a, i32 %b acquire
960  ret i32 %1
961}
962
963define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
964; LA32-LABEL: atomicrmw_xor_i64_acquire:
965; LA32:       # %bb.0:
966; LA32-NEXT:    addi.w $sp, $sp, -16
967; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
968; LA32-NEXT:    ori $a3, $zero, 2
969; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
970; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
971; LA32-NEXT:    addi.w $sp, $sp, 16
972; LA32-NEXT:    ret
973;
974; LA64-LABEL: atomicrmw_xor_i64_acquire:
975; LA64:       # %bb.0:
976; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
977; LA64-NEXT:    move $a0, $a2
978; LA64-NEXT:    ret
979  %1 = atomicrmw xor ptr %a, i64 %b acquire
980  ret i64 %1
981}
982
983define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
984; LA32-LABEL: atomicrmw_xchg_i8_release:
985; LA32:       # %bb.0:
986; LA32-NEXT:    slli.w $a2, $a0, 3
987; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
988; LA32-NEXT:    ori $a3, $zero, 255
989; LA32-NEXT:    sll.w $a3, $a3, $a2
990; LA32-NEXT:    andi $a1, $a1, 255
991; LA32-NEXT:    sll.w $a1, $a1, $a2
992; LA32-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
993; LA32-NEXT:    ll.w $a4, $a0, 0
994; LA32-NEXT:    addi.w $a5, $a1, 0
995; LA32-NEXT:    xor $a5, $a4, $a5
996; LA32-NEXT:    and $a5, $a5, $a3
997; LA32-NEXT:    xor $a5, $a4, $a5
998; LA32-NEXT:    sc.w $a5, $a0, 0
999; LA32-NEXT:    beqz $a5, .LBB32_1
1000; LA32-NEXT:  # %bb.2:
1001; LA32-NEXT:    srl.w $a0, $a4, $a2
1002; LA32-NEXT:    ret
1003;
1004; LA64-LABEL: atomicrmw_xchg_i8_release:
1005; LA64:       # %bb.0:
1006; LA64-NEXT:    slli.d $a2, $a0, 3
1007; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1008; LA64-NEXT:    ori $a3, $zero, 255
1009; LA64-NEXT:    sll.w $a3, $a3, $a2
1010; LA64-NEXT:    andi $a1, $a1, 255
1011; LA64-NEXT:    sll.w $a1, $a1, $a2
1012; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
1013; LA64-NEXT:    ll.w $a4, $a0, 0
1014; LA64-NEXT:    addi.w $a5, $a1, 0
1015; LA64-NEXT:    xor $a5, $a4, $a5
1016; LA64-NEXT:    and $a5, $a5, $a3
1017; LA64-NEXT:    xor $a5, $a4, $a5
1018; LA64-NEXT:    sc.w $a5, $a0, 0
1019; LA64-NEXT:    beqz $a5, .LBB32_1
1020; LA64-NEXT:  # %bb.2:
1021; LA64-NEXT:    srl.w $a0, $a4, $a2
1022; LA64-NEXT:    ret
1023  %1 = atomicrmw xchg ptr %a, i8 %b release
1024  ret i8 %1
1025}
1026
1027define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
1028; LA32-LABEL: atomicrmw_xchg_0_i8_release:
1029; LA32:       # %bb.0:
1030; LA32-NEXT:    slli.w $a1, $a0, 3
1031; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1032; LA32-NEXT:    ori $a2, $zero, 255
1033; LA32-NEXT:    sll.w $a2, $a2, $a1
1034; LA32-NEXT:    nor $a2, $a2, $zero
1035; LA32-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
1036; LA32-NEXT:    ll.w $a3, $a0, 0
1037; LA32-NEXT:    and $a4, $a3, $a2
1038; LA32-NEXT:    sc.w $a4, $a0, 0
1039; LA32-NEXT:    beqz $a4, .LBB33_1
1040; LA32-NEXT:  # %bb.2:
1041; LA32-NEXT:    srl.w $a0, $a3, $a1
1042; LA32-NEXT:    ret
1043;
1044; LA64-LABEL: atomicrmw_xchg_0_i8_release:
1045; LA64:       # %bb.0:
1046; LA64-NEXT:    slli.d $a1, $a0, 3
1047; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1048; LA64-NEXT:    ori $a2, $zero, 255
1049; LA64-NEXT:    sll.w $a2, $a2, $a1
1050; LA64-NEXT:    nor $a2, $a2, $zero
1051; LA64-NEXT:    amand_db.w $a3, $a2, $a0
1052; LA64-NEXT:    srl.w $a0, $a3, $a1
1053; LA64-NEXT:    ret
1054  %1 = atomicrmw xchg ptr %a, i8 0 release
1055  ret i8 %1
1056}
1057
1058define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
1059; LA32-LABEL: atomicrmw_xchg_minus_1_i8_release:
1060; LA32:       # %bb.0:
1061; LA32-NEXT:    slli.w $a1, $a0, 3
1062; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1063; LA32-NEXT:    ori $a2, $zero, 255
1064; LA32-NEXT:    sll.w $a2, $a2, $a1
1065; LA32-NEXT:  .LBB34_1: # =>This Inner Loop Header: Depth=1
1066; LA32-NEXT:    ll.w $a3, $a0, 0
1067; LA32-NEXT:    or $a4, $a3, $a2
1068; LA32-NEXT:    sc.w $a4, $a0, 0
1069; LA32-NEXT:    beqz $a4, .LBB34_1
1070; LA32-NEXT:  # %bb.2:
1071; LA32-NEXT:    srl.w $a0, $a3, $a1
1072; LA32-NEXT:    ret
1073;
1074; LA64-LABEL: atomicrmw_xchg_minus_1_i8_release:
1075; LA64:       # %bb.0:
1076; LA64-NEXT:    slli.d $a1, $a0, 3
1077; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1078; LA64-NEXT:    ori $a2, $zero, 255
1079; LA64-NEXT:    sll.w $a2, $a2, $a1
1080; LA64-NEXT:    amor_db.w $a3, $a2, $a0
1081; LA64-NEXT:    srl.w $a0, $a3, $a1
1082; LA64-NEXT:    ret
1083  %1 = atomicrmw xchg ptr %a, i8 -1 release
1084  ret i8 %1
1085}
1086
1087define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
1088; LA32-LABEL: atomicrmw_xchg_i16_release:
1089; LA32:       # %bb.0:
1090; LA32-NEXT:    slli.w $a2, $a0, 3
1091; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1092; LA32-NEXT:    lu12i.w $a3, 15
1093; LA32-NEXT:    ori $a3, $a3, 4095
1094; LA32-NEXT:    sll.w $a3, $a3, $a2
1095; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1096; LA32-NEXT:    sll.w $a1, $a1, $a2
1097; LA32-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
1098; LA32-NEXT:    ll.w $a4, $a0, 0
1099; LA32-NEXT:    addi.w $a5, $a1, 0
1100; LA32-NEXT:    xor $a5, $a4, $a5
1101; LA32-NEXT:    and $a5, $a5, $a3
1102; LA32-NEXT:    xor $a5, $a4, $a5
1103; LA32-NEXT:    sc.w $a5, $a0, 0
1104; LA32-NEXT:    beqz $a5, .LBB35_1
1105; LA32-NEXT:  # %bb.2:
1106; LA32-NEXT:    srl.w $a0, $a4, $a2
1107; LA32-NEXT:    ret
1108;
1109; LA64-LABEL: atomicrmw_xchg_i16_release:
1110; LA64:       # %bb.0:
1111; LA64-NEXT:    slli.d $a2, $a0, 3
1112; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1113; LA64-NEXT:    lu12i.w $a3, 15
1114; LA64-NEXT:    ori $a3, $a3, 4095
1115; LA64-NEXT:    sll.w $a3, $a3, $a2
1116; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1117; LA64-NEXT:    sll.w $a1, $a1, $a2
1118; LA64-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
1119; LA64-NEXT:    ll.w $a4, $a0, 0
1120; LA64-NEXT:    addi.w $a5, $a1, 0
1121; LA64-NEXT:    xor $a5, $a4, $a5
1122; LA64-NEXT:    and $a5, $a5, $a3
1123; LA64-NEXT:    xor $a5, $a4, $a5
1124; LA64-NEXT:    sc.w $a5, $a0, 0
1125; LA64-NEXT:    beqz $a5, .LBB35_1
1126; LA64-NEXT:  # %bb.2:
1127; LA64-NEXT:    srl.w $a0, $a4, $a2
1128; LA64-NEXT:    ret
1129  %1 = atomicrmw xchg ptr %a, i16 %b release
1130  ret i16 %1
1131}
1132
1133define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
1134; LA32-LABEL: atomicrmw_xchg_0_i16_release:
1135; LA32:       # %bb.0:
1136; LA32-NEXT:    slli.w $a1, $a0, 3
1137; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1138; LA32-NEXT:    lu12i.w $a2, 15
1139; LA32-NEXT:    ori $a2, $a2, 4095
1140; LA32-NEXT:    sll.w $a2, $a2, $a1
1141; LA32-NEXT:    nor $a2, $a2, $zero
1142; LA32-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
1143; LA32-NEXT:    ll.w $a3, $a0, 0
1144; LA32-NEXT:    and $a4, $a3, $a2
1145; LA32-NEXT:    sc.w $a4, $a0, 0
1146; LA32-NEXT:    beqz $a4, .LBB36_1
1147; LA32-NEXT:  # %bb.2:
1148; LA32-NEXT:    srl.w $a0, $a3, $a1
1149; LA32-NEXT:    ret
1150;
1151; LA64-LABEL: atomicrmw_xchg_0_i16_release:
1152; LA64:       # %bb.0:
1153; LA64-NEXT:    slli.d $a1, $a0, 3
1154; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1155; LA64-NEXT:    lu12i.w $a2, 15
1156; LA64-NEXT:    ori $a2, $a2, 4095
1157; LA64-NEXT:    sll.w $a2, $a2, $a1
1158; LA64-NEXT:    nor $a2, $a2, $zero
1159; LA64-NEXT:    amand_db.w $a3, $a2, $a0
1160; LA64-NEXT:    srl.w $a0, $a3, $a1
1161; LA64-NEXT:    ret
1162  %1 = atomicrmw xchg ptr %a, i16 0 release
1163  ret i16 %1
1164}
1165
1166define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
1167; LA32-LABEL: atomicrmw_xchg_minus_1_i16_release:
1168; LA32:       # %bb.0:
1169; LA32-NEXT:    slli.w $a1, $a0, 3
1170; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1171; LA32-NEXT:    lu12i.w $a2, 15
1172; LA32-NEXT:    ori $a2, $a2, 4095
1173; LA32-NEXT:    sll.w $a2, $a2, $a1
1174; LA32-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
1175; LA32-NEXT:    ll.w $a3, $a0, 0
1176; LA32-NEXT:    or $a4, $a3, $a2
1177; LA32-NEXT:    sc.w $a4, $a0, 0
1178; LA32-NEXT:    beqz $a4, .LBB37_1
1179; LA32-NEXT:  # %bb.2:
1180; LA32-NEXT:    srl.w $a0, $a3, $a1
1181; LA32-NEXT:    ret
1182;
1183; LA64-LABEL: atomicrmw_xchg_minus_1_i16_release:
1184; LA64:       # %bb.0:
1185; LA64-NEXT:    slli.d $a1, $a0, 3
1186; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1187; LA64-NEXT:    lu12i.w $a2, 15
1188; LA64-NEXT:    ori $a2, $a2, 4095
1189; LA64-NEXT:    sll.w $a2, $a2, $a1
1190; LA64-NEXT:    amor_db.w $a3, $a2, $a0
1191; LA64-NEXT:    srl.w $a0, $a3, $a1
1192; LA64-NEXT:    ret
1193  %1 = atomicrmw xchg ptr %a, i16 -1 release
1194  ret i16 %1
1195}
1196
1197define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
1198; LA32-LABEL: atomicrmw_xchg_i32_release:
1199; LA32:       # %bb.0:
1200; LA32-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
1201; LA32-NEXT:    ll.w $a2, $a0, 0
1202; LA32-NEXT:    move $a3, $a1
1203; LA32-NEXT:    sc.w $a3, $a0, 0
1204; LA32-NEXT:    beqz $a3, .LBB38_1
1205; LA32-NEXT:  # %bb.2:
1206; LA32-NEXT:    move $a0, $a2
1207; LA32-NEXT:    ret
1208;
1209; LA64-LABEL: atomicrmw_xchg_i32_release:
1210; LA64:       # %bb.0:
1211; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
1212; LA64-NEXT:    move $a0, $a2
1213; LA64-NEXT:    ret
1214  %1 = atomicrmw xchg ptr %a, i32 %b release
1215  ret i32 %1
1216}
1217
1218define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
1219; LA32-LABEL: atomicrmw_xchg_i64_release:
1220; LA32:       # %bb.0:
1221; LA32-NEXT:    addi.w $sp, $sp, -16
1222; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1223; LA32-NEXT:    ori $a3, $zero, 3
1224; LA32-NEXT:    bl %plt(__atomic_exchange_8)
1225; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1226; LA32-NEXT:    addi.w $sp, $sp, 16
1227; LA32-NEXT:    ret
1228;
1229; LA64-LABEL: atomicrmw_xchg_i64_release:
1230; LA64:       # %bb.0:
1231; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
1232; LA64-NEXT:    move $a0, $a2
1233; LA64-NEXT:    ret
1234  %1 = atomicrmw xchg ptr %a, i64 %b release
1235  ret i64 %1
1236}
1237
1238define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
1239; LA32-LABEL: atomicrmw_add_i8_release:
1240; LA32:       # %bb.0:
1241; LA32-NEXT:    slli.w $a2, $a0, 3
1242; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1243; LA32-NEXT:    ori $a3, $zero, 255
1244; LA32-NEXT:    sll.w $a3, $a3, $a2
1245; LA32-NEXT:    andi $a1, $a1, 255
1246; LA32-NEXT:    sll.w $a1, $a1, $a2
1247; LA32-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
1248; LA32-NEXT:    ll.w $a4, $a0, 0
1249; LA32-NEXT:    add.w $a5, $a4, $a1
1250; LA32-NEXT:    xor $a5, $a4, $a5
1251; LA32-NEXT:    and $a5, $a5, $a3
1252; LA32-NEXT:    xor $a5, $a4, $a5
1253; LA32-NEXT:    sc.w $a5, $a0, 0
1254; LA32-NEXT:    beqz $a5, .LBB40_1
1255; LA32-NEXT:  # %bb.2:
1256; LA32-NEXT:    srl.w $a0, $a4, $a2
1257; LA32-NEXT:    ret
1258;
1259; LA64-LABEL: atomicrmw_add_i8_release:
1260; LA64:       # %bb.0:
1261; LA64-NEXT:    slli.d $a2, $a0, 3
1262; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1263; LA64-NEXT:    ori $a3, $zero, 255
1264; LA64-NEXT:    sll.w $a3, $a3, $a2
1265; LA64-NEXT:    andi $a1, $a1, 255
1266; LA64-NEXT:    sll.w $a1, $a1, $a2
1267; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
1268; LA64-NEXT:    ll.w $a4, $a0, 0
1269; LA64-NEXT:    add.w $a5, $a4, $a1
1270; LA64-NEXT:    xor $a5, $a4, $a5
1271; LA64-NEXT:    and $a5, $a5, $a3
1272; LA64-NEXT:    xor $a5, $a4, $a5
1273; LA64-NEXT:    sc.w $a5, $a0, 0
1274; LA64-NEXT:    beqz $a5, .LBB40_1
1275; LA64-NEXT:  # %bb.2:
1276; LA64-NEXT:    srl.w $a0, $a4, $a2
1277; LA64-NEXT:    ret
1278  %1 = atomicrmw add ptr %a, i8 %b release
1279  ret i8 %1
1280}
1281
1282define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
1283; LA32-LABEL: atomicrmw_add_i16_release:
1284; LA32:       # %bb.0:
1285; LA32-NEXT:    slli.w $a2, $a0, 3
1286; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1287; LA32-NEXT:    lu12i.w $a3, 15
1288; LA32-NEXT:    ori $a3, $a3, 4095
1289; LA32-NEXT:    sll.w $a3, $a3, $a2
1290; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1291; LA32-NEXT:    sll.w $a1, $a1, $a2
1292; LA32-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
1293; LA32-NEXT:    ll.w $a4, $a0, 0
1294; LA32-NEXT:    add.w $a5, $a4, $a1
1295; LA32-NEXT:    xor $a5, $a4, $a5
1296; LA32-NEXT:    and $a5, $a5, $a3
1297; LA32-NEXT:    xor $a5, $a4, $a5
1298; LA32-NEXT:    sc.w $a5, $a0, 0
1299; LA32-NEXT:    beqz $a5, .LBB41_1
1300; LA32-NEXT:  # %bb.2:
1301; LA32-NEXT:    srl.w $a0, $a4, $a2
1302; LA32-NEXT:    ret
1303;
1304; LA64-LABEL: atomicrmw_add_i16_release:
1305; LA64:       # %bb.0:
1306; LA64-NEXT:    slli.d $a2, $a0, 3
1307; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1308; LA64-NEXT:    lu12i.w $a3, 15
1309; LA64-NEXT:    ori $a3, $a3, 4095
1310; LA64-NEXT:    sll.w $a3, $a3, $a2
1311; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1312; LA64-NEXT:    sll.w $a1, $a1, $a2
1313; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
1314; LA64-NEXT:    ll.w $a4, $a0, 0
1315; LA64-NEXT:    add.w $a5, $a4, $a1
1316; LA64-NEXT:    xor $a5, $a4, $a5
1317; LA64-NEXT:    and $a5, $a5, $a3
1318; LA64-NEXT:    xor $a5, $a4, $a5
1319; LA64-NEXT:    sc.w $a5, $a0, 0
1320; LA64-NEXT:    beqz $a5, .LBB41_1
1321; LA64-NEXT:  # %bb.2:
1322; LA64-NEXT:    srl.w $a0, $a4, $a2
1323; LA64-NEXT:    ret
1324  %1 = atomicrmw add ptr %a, i16 %b release
1325  ret i16 %1
1326}
1327
1328define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
1329; LA32-LABEL: atomicrmw_add_i32_release:
1330; LA32:       # %bb.0:
1331; LA32-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
1332; LA32-NEXT:    ll.w $a2, $a0, 0
1333; LA32-NEXT:    add.w $a3, $a2, $a1
1334; LA32-NEXT:    sc.w $a3, $a0, 0
1335; LA32-NEXT:    beqz $a3, .LBB42_1
1336; LA32-NEXT:  # %bb.2:
1337; LA32-NEXT:    move $a0, $a2
1338; LA32-NEXT:    ret
1339;
1340; LA64-LABEL: atomicrmw_add_i32_release:
1341; LA64:       # %bb.0:
1342; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
1343; LA64-NEXT:    move $a0, $a2
1344; LA64-NEXT:    ret
1345  %1 = atomicrmw add ptr %a, i32 %b release
1346  ret i32 %1
1347}
1348
1349define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
1350; LA32-LABEL: atomicrmw_add_i64_release:
1351; LA32:       # %bb.0:
1352; LA32-NEXT:    addi.w $sp, $sp, -16
1353; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1354; LA32-NEXT:    ori $a3, $zero, 3
1355; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
1356; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1357; LA32-NEXT:    addi.w $sp, $sp, 16
1358; LA32-NEXT:    ret
1359;
1360; LA64-LABEL: atomicrmw_add_i64_release:
1361; LA64:       # %bb.0:
1362; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
1363; LA64-NEXT:    move $a0, $a2
1364; LA64-NEXT:    ret
1365  %1 = atomicrmw add ptr %a, i64 %b release
1366  ret i64 %1
1367}
1368
1369define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
1370; LA32-LABEL: atomicrmw_sub_i8_release:
1371; LA32:       # %bb.0:
1372; LA32-NEXT:    slli.w $a2, $a0, 3
1373; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1374; LA32-NEXT:    ori $a3, $zero, 255
1375; LA32-NEXT:    sll.w $a3, $a3, $a2
1376; LA32-NEXT:    andi $a1, $a1, 255
1377; LA32-NEXT:    sll.w $a1, $a1, $a2
1378; LA32-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
1379; LA32-NEXT:    ll.w $a4, $a0, 0
1380; LA32-NEXT:    sub.w $a5, $a4, $a1
1381; LA32-NEXT:    xor $a5, $a4, $a5
1382; LA32-NEXT:    and $a5, $a5, $a3
1383; LA32-NEXT:    xor $a5, $a4, $a5
1384; LA32-NEXT:    sc.w $a5, $a0, 0
1385; LA32-NEXT:    beqz $a5, .LBB44_1
1386; LA32-NEXT:  # %bb.2:
1387; LA32-NEXT:    srl.w $a0, $a4, $a2
1388; LA32-NEXT:    ret
1389;
1390; LA64-LABEL: atomicrmw_sub_i8_release:
1391; LA64:       # %bb.0:
1392; LA64-NEXT:    slli.d $a2, $a0, 3
1393; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1394; LA64-NEXT:    ori $a3, $zero, 255
1395; LA64-NEXT:    sll.w $a3, $a3, $a2
1396; LA64-NEXT:    andi $a1, $a1, 255
1397; LA64-NEXT:    sll.w $a1, $a1, $a2
1398; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
1399; LA64-NEXT:    ll.w $a4, $a0, 0
1400; LA64-NEXT:    sub.w $a5, $a4, $a1
1401; LA64-NEXT:    xor $a5, $a4, $a5
1402; LA64-NEXT:    and $a5, $a5, $a3
1403; LA64-NEXT:    xor $a5, $a4, $a5
1404; LA64-NEXT:    sc.w $a5, $a0, 0
1405; LA64-NEXT:    beqz $a5, .LBB44_1
1406; LA64-NEXT:  # %bb.2:
1407; LA64-NEXT:    srl.w $a0, $a4, $a2
1408; LA64-NEXT:    ret
1409  %1 = atomicrmw sub ptr %a, i8 %b release
1410  ret i8 %1
1411}
1412
1413define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
1414; LA32-LABEL: atomicrmw_sub_i16_release:
1415; LA32:       # %bb.0:
1416; LA32-NEXT:    slli.w $a2, $a0, 3
1417; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1418; LA32-NEXT:    lu12i.w $a3, 15
1419; LA32-NEXT:    ori $a3, $a3, 4095
1420; LA32-NEXT:    sll.w $a3, $a3, $a2
1421; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1422; LA32-NEXT:    sll.w $a1, $a1, $a2
1423; LA32-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
1424; LA32-NEXT:    ll.w $a4, $a0, 0
1425; LA32-NEXT:    sub.w $a5, $a4, $a1
1426; LA32-NEXT:    xor $a5, $a4, $a5
1427; LA32-NEXT:    and $a5, $a5, $a3
1428; LA32-NEXT:    xor $a5, $a4, $a5
1429; LA32-NEXT:    sc.w $a5, $a0, 0
1430; LA32-NEXT:    beqz $a5, .LBB45_1
1431; LA32-NEXT:  # %bb.2:
1432; LA32-NEXT:    srl.w $a0, $a4, $a2
1433; LA32-NEXT:    ret
1434;
1435; LA64-LABEL: atomicrmw_sub_i16_release:
1436; LA64:       # %bb.0:
1437; LA64-NEXT:    slli.d $a2, $a0, 3
1438; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1439; LA64-NEXT:    lu12i.w $a3, 15
1440; LA64-NEXT:    ori $a3, $a3, 4095
1441; LA64-NEXT:    sll.w $a3, $a3, $a2
1442; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1443; LA64-NEXT:    sll.w $a1, $a1, $a2
1444; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
1445; LA64-NEXT:    ll.w $a4, $a0, 0
1446; LA64-NEXT:    sub.w $a5, $a4, $a1
1447; LA64-NEXT:    xor $a5, $a4, $a5
1448; LA64-NEXT:    and $a5, $a5, $a3
1449; LA64-NEXT:    xor $a5, $a4, $a5
1450; LA64-NEXT:    sc.w $a5, $a0, 0
1451; LA64-NEXT:    beqz $a5, .LBB45_1
1452; LA64-NEXT:  # %bb.2:
1453; LA64-NEXT:    srl.w $a0, $a4, $a2
1454; LA64-NEXT:    ret
1455  %1 = atomicrmw sub ptr %a, i16 %b release
1456  ret i16 %1
1457}
1458
1459define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
1460; LA32-LABEL: atomicrmw_sub_i32_release:
1461; LA32:       # %bb.0:
1462; LA32-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
1463; LA32-NEXT:    ll.w $a2, $a0, 0
1464; LA32-NEXT:    sub.w $a3, $a2, $a1
1465; LA32-NEXT:    sc.w $a3, $a0, 0
1466; LA32-NEXT:    beqz $a3, .LBB46_1
1467; LA32-NEXT:  # %bb.2:
1468; LA32-NEXT:    move $a0, $a2
1469; LA32-NEXT:    ret
1470;
1471; LA64-LABEL: atomicrmw_sub_i32_release:
1472; LA64:       # %bb.0:
1473; LA64-NEXT:    sub.w $a2, $zero, $a1
1474; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
1475; LA64-NEXT:    move $a0, $a1
1476; LA64-NEXT:    ret
1477  %1 = atomicrmw sub ptr %a, i32 %b release
1478  ret i32 %1
1479}
1480
1481define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
1482; LA32-LABEL: atomicrmw_sub_i64_release:
1483; LA32:       # %bb.0:
1484; LA32-NEXT:    addi.w $sp, $sp, -16
1485; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1486; LA32-NEXT:    ori $a3, $zero, 3
1487; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
1488; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1489; LA32-NEXT:    addi.w $sp, $sp, 16
1490; LA32-NEXT:    ret
1491;
1492; LA64-LABEL: atomicrmw_sub_i64_release:
1493; LA64:       # %bb.0:
1494; LA64-NEXT:    sub.d $a2, $zero, $a1
1495; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
1496; LA64-NEXT:    move $a0, $a1
1497; LA64-NEXT:    ret
1498  %1 = atomicrmw sub ptr %a, i64 %b release
1499  ret i64 %1
1500}
1501
1502define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
1503; LA32-LABEL: atomicrmw_nand_i8_release:
1504; LA32:       # %bb.0:
1505; LA32-NEXT:    slli.w $a2, $a0, 3
1506; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1507; LA32-NEXT:    ori $a3, $zero, 255
1508; LA32-NEXT:    sll.w $a3, $a3, $a2
1509; LA32-NEXT:    andi $a1, $a1, 255
1510; LA32-NEXT:    sll.w $a1, $a1, $a2
1511; LA32-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
1512; LA32-NEXT:    ll.w $a4, $a0, 0
1513; LA32-NEXT:    and $a5, $a4, $a1
1514; LA32-NEXT:    nor $a5, $a5, $zero
1515; LA32-NEXT:    xor $a5, $a4, $a5
1516; LA32-NEXT:    and $a5, $a5, $a3
1517; LA32-NEXT:    xor $a5, $a4, $a5
1518; LA32-NEXT:    sc.w $a5, $a0, 0
1519; LA32-NEXT:    beqz $a5, .LBB48_1
1520; LA32-NEXT:  # %bb.2:
1521; LA32-NEXT:    srl.w $a0, $a4, $a2
1522; LA32-NEXT:    ret
1523;
1524; LA64-LABEL: atomicrmw_nand_i8_release:
1525; LA64:       # %bb.0:
1526; LA64-NEXT:    slli.d $a2, $a0, 3
1527; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1528; LA64-NEXT:    ori $a3, $zero, 255
1529; LA64-NEXT:    sll.w $a3, $a3, $a2
1530; LA64-NEXT:    andi $a1, $a1, 255
1531; LA64-NEXT:    sll.w $a1, $a1, $a2
1532; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
1533; LA64-NEXT:    ll.w $a4, $a0, 0
1534; LA64-NEXT:    and $a5, $a4, $a1
1535; LA64-NEXT:    nor $a5, $a5, $zero
1536; LA64-NEXT:    xor $a5, $a4, $a5
1537; LA64-NEXT:    and $a5, $a5, $a3
1538; LA64-NEXT:    xor $a5, $a4, $a5
1539; LA64-NEXT:    sc.w $a5, $a0, 0
1540; LA64-NEXT:    beqz $a5, .LBB48_1
1541; LA64-NEXT:  # %bb.2:
1542; LA64-NEXT:    srl.w $a0, $a4, $a2
1543; LA64-NEXT:    ret
1544  %1 = atomicrmw nand ptr %a, i8 %b release
1545  ret i8 %1
1546}
1547
1548define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
1549; LA32-LABEL: atomicrmw_nand_i16_release:
1550; LA32:       # %bb.0:
1551; LA32-NEXT:    slli.w $a2, $a0, 3
1552; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1553; LA32-NEXT:    lu12i.w $a3, 15
1554; LA32-NEXT:    ori $a3, $a3, 4095
1555; LA32-NEXT:    sll.w $a3, $a3, $a2
1556; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1557; LA32-NEXT:    sll.w $a1, $a1, $a2
1558; LA32-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
1559; LA32-NEXT:    ll.w $a4, $a0, 0
1560; LA32-NEXT:    and $a5, $a4, $a1
1561; LA32-NEXT:    nor $a5, $a5, $zero
1562; LA32-NEXT:    xor $a5, $a4, $a5
1563; LA32-NEXT:    and $a5, $a5, $a3
1564; LA32-NEXT:    xor $a5, $a4, $a5
1565; LA32-NEXT:    sc.w $a5, $a0, 0
1566; LA32-NEXT:    beqz $a5, .LBB49_1
1567; LA32-NEXT:  # %bb.2:
1568; LA32-NEXT:    srl.w $a0, $a4, $a2
1569; LA32-NEXT:    ret
1570;
1571; LA64-LABEL: atomicrmw_nand_i16_release:
1572; LA64:       # %bb.0:
1573; LA64-NEXT:    slli.d $a2, $a0, 3
1574; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1575; LA64-NEXT:    lu12i.w $a3, 15
1576; LA64-NEXT:    ori $a3, $a3, 4095
1577; LA64-NEXT:    sll.w $a3, $a3, $a2
1578; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1579; LA64-NEXT:    sll.w $a1, $a1, $a2
1580; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
1581; LA64-NEXT:    ll.w $a4, $a0, 0
1582; LA64-NEXT:    and $a5, $a4, $a1
1583; LA64-NEXT:    nor $a5, $a5, $zero
1584; LA64-NEXT:    xor $a5, $a4, $a5
1585; LA64-NEXT:    and $a5, $a5, $a3
1586; LA64-NEXT:    xor $a5, $a4, $a5
1587; LA64-NEXT:    sc.w $a5, $a0, 0
1588; LA64-NEXT:    beqz $a5, .LBB49_1
1589; LA64-NEXT:  # %bb.2:
1590; LA64-NEXT:    srl.w $a0, $a4, $a2
1591; LA64-NEXT:    ret
1592  %1 = atomicrmw nand ptr %a, i16 %b release
1593  ret i16 %1
1594}
1595
1596define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
1597; LA32-LABEL: atomicrmw_nand_i32_release:
1598; LA32:       # %bb.0:
1599; LA32-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
1600; LA32-NEXT:    ll.w $a2, $a0, 0
1601; LA32-NEXT:    and $a3, $a2, $a1
1602; LA32-NEXT:    nor $a3, $a3, $zero
1603; LA32-NEXT:    sc.w $a3, $a0, 0
1604; LA32-NEXT:    beqz $a3, .LBB50_1
1605; LA32-NEXT:  # %bb.2:
1606; LA32-NEXT:    move $a0, $a2
1607; LA32-NEXT:    ret
1608;
1609; LA64-LABEL: atomicrmw_nand_i32_release:
1610; LA64:       # %bb.0:
1611; LA64-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
1612; LA64-NEXT:    ll.w $a2, $a0, 0
1613; LA64-NEXT:    and $a3, $a2, $a1
1614; LA64-NEXT:    nor $a3, $a3, $zero
1615; LA64-NEXT:    sc.w $a3, $a0, 0
1616; LA64-NEXT:    beqz $a3, .LBB50_1
1617; LA64-NEXT:  # %bb.2:
1618; LA64-NEXT:    move $a0, $a2
1619; LA64-NEXT:    ret
1620  %1 = atomicrmw nand ptr %a, i32 %b release
1621  ret i32 %1
1622}
1623
1624define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
1625; LA32-LABEL: atomicrmw_nand_i64_release:
1626; LA32:       # %bb.0:
1627; LA32-NEXT:    addi.w $sp, $sp, -16
1628; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1629; LA32-NEXT:    ori $a3, $zero, 3
1630; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
1631; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1632; LA32-NEXT:    addi.w $sp, $sp, 16
1633; LA32-NEXT:    ret
1634;
1635; LA64-LABEL: atomicrmw_nand_i64_release:
1636; LA64:       # %bb.0:
1637; LA64-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
1638; LA64-NEXT:    ll.d $a2, $a0, 0
1639; LA64-NEXT:    and $a3, $a2, $a1
1640; LA64-NEXT:    nor $a3, $a3, $zero
1641; LA64-NEXT:    sc.d $a3, $a0, 0
1642; LA64-NEXT:    beqz $a3, .LBB51_1
1643; LA64-NEXT:  # %bb.2:
1644; LA64-NEXT:    move $a0, $a2
1645; LA64-NEXT:    ret
1646  %1 = atomicrmw nand ptr %a, i64 %b release
1647  ret i64 %1
1648}
1649
1650define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
1651; LA32-LABEL: atomicrmw_and_i8_release:
1652; LA32:       # %bb.0:
1653; LA32-NEXT:    slli.w $a2, $a0, 3
1654; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1655; LA32-NEXT:    ori $a3, $zero, 255
1656; LA32-NEXT:    sll.w $a3, $a3, $a2
1657; LA32-NEXT:    andi $a1, $a1, 255
1658; LA32-NEXT:    sll.w $a1, $a1, $a2
1659; LA32-NEXT:    orn $a1, $a1, $a3
1660; LA32-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
1661; LA32-NEXT:    ll.w $a3, $a0, 0
1662; LA32-NEXT:    and $a4, $a3, $a1
1663; LA32-NEXT:    sc.w $a4, $a0, 0
1664; LA32-NEXT:    beqz $a4, .LBB52_1
1665; LA32-NEXT:  # %bb.2:
1666; LA32-NEXT:    srl.w $a0, $a3, $a2
1667; LA32-NEXT:    ret
1668;
1669; LA64-LABEL: atomicrmw_and_i8_release:
1670; LA64:       # %bb.0:
1671; LA64-NEXT:    slli.d $a2, $a0, 3
1672; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1673; LA64-NEXT:    ori $a3, $zero, 255
1674; LA64-NEXT:    sll.w $a3, $a3, $a2
1675; LA64-NEXT:    andi $a1, $a1, 255
1676; LA64-NEXT:    sll.w $a1, $a1, $a2
1677; LA64-NEXT:    orn $a1, $a1, $a3
1678; LA64-NEXT:    amand_db.w $a3, $a1, $a0
1679; LA64-NEXT:    srl.w $a0, $a3, $a2
1680; LA64-NEXT:    ret
1681  %1 = atomicrmw and ptr %a, i8 %b release
1682  ret i8 %1
1683}
1684
1685define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
1686; LA32-LABEL: atomicrmw_and_i16_release:
1687; LA32:       # %bb.0:
1688; LA32-NEXT:    slli.w $a2, $a0, 3
1689; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1690; LA32-NEXT:    lu12i.w $a3, 15
1691; LA32-NEXT:    ori $a3, $a3, 4095
1692; LA32-NEXT:    sll.w $a3, $a3, $a2
1693; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1694; LA32-NEXT:    sll.w $a1, $a1, $a2
1695; LA32-NEXT:    orn $a1, $a1, $a3
1696; LA32-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
1697; LA32-NEXT:    ll.w $a3, $a0, 0
1698; LA32-NEXT:    and $a4, $a3, $a1
1699; LA32-NEXT:    sc.w $a4, $a0, 0
1700; LA32-NEXT:    beqz $a4, .LBB53_1
1701; LA32-NEXT:  # %bb.2:
1702; LA32-NEXT:    srl.w $a0, $a3, $a2
1703; LA32-NEXT:    ret
1704;
1705; LA64-LABEL: atomicrmw_and_i16_release:
1706; LA64:       # %bb.0:
1707; LA64-NEXT:    slli.d $a2, $a0, 3
1708; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1709; LA64-NEXT:    lu12i.w $a3, 15
1710; LA64-NEXT:    ori $a3, $a3, 4095
1711; LA64-NEXT:    sll.w $a3, $a3, $a2
1712; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1713; LA64-NEXT:    sll.w $a1, $a1, $a2
1714; LA64-NEXT:    orn $a1, $a1, $a3
1715; LA64-NEXT:    amand_db.w $a3, $a1, $a0
1716; LA64-NEXT:    srl.w $a0, $a3, $a2
1717; LA64-NEXT:    ret
1718  %1 = atomicrmw and ptr %a, i16 %b release
1719  ret i16 %1
1720}
1721
1722define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
1723; LA32-LABEL: atomicrmw_and_i32_release:
1724; LA32:       # %bb.0:
1725; LA32-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
1726; LA32-NEXT:    ll.w $a2, $a0, 0
1727; LA32-NEXT:    and $a3, $a2, $a1
1728; LA32-NEXT:    sc.w $a3, $a0, 0
1729; LA32-NEXT:    beqz $a3, .LBB54_1
1730; LA32-NEXT:  # %bb.2:
1731; LA32-NEXT:    move $a0, $a2
1732; LA32-NEXT:    ret
1733;
1734; LA64-LABEL: atomicrmw_and_i32_release:
1735; LA64:       # %bb.0:
1736; LA64-NEXT:    amand_db.w $a2, $a1, $a0
1737; LA64-NEXT:    move $a0, $a2
1738; LA64-NEXT:    ret
1739  %1 = atomicrmw and ptr %a, i32 %b release
1740  ret i32 %1
1741}
1742
1743define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
1744; LA32-LABEL: atomicrmw_and_i64_release:
1745; LA32:       # %bb.0:
1746; LA32-NEXT:    addi.w $sp, $sp, -16
1747; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1748; LA32-NEXT:    ori $a3, $zero, 3
1749; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
1750; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1751; LA32-NEXT:    addi.w $sp, $sp, 16
1752; LA32-NEXT:    ret
1753;
1754; LA64-LABEL: atomicrmw_and_i64_release:
1755; LA64:       # %bb.0:
1756; LA64-NEXT:    amand_db.d $a2, $a1, $a0
1757; LA64-NEXT:    move $a0, $a2
1758; LA64-NEXT:    ret
1759  %1 = atomicrmw and ptr %a, i64 %b release
1760  ret i64 %1
1761}
1762
1763define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
1764; LA32-LABEL: atomicrmw_or_i8_release:
1765; LA32:       # %bb.0:
1766; LA32-NEXT:    slli.w $a2, $a0, 3
1767; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1768; LA32-NEXT:    andi $a1, $a1, 255
1769; LA32-NEXT:    sll.w $a1, $a1, $a2
1770; LA32-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
1771; LA32-NEXT:    ll.w $a3, $a0, 0
1772; LA32-NEXT:    or $a4, $a3, $a1
1773; LA32-NEXT:    sc.w $a4, $a0, 0
1774; LA32-NEXT:    beqz $a4, .LBB56_1
1775; LA32-NEXT:  # %bb.2:
1776; LA32-NEXT:    srl.w $a0, $a3, $a2
1777; LA32-NEXT:    ret
1778;
1779; LA64-LABEL: atomicrmw_or_i8_release:
1780; LA64:       # %bb.0:
1781; LA64-NEXT:    slli.d $a2, $a0, 3
1782; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1783; LA64-NEXT:    andi $a1, $a1, 255
1784; LA64-NEXT:    sll.w $a1, $a1, $a2
1785; LA64-NEXT:    amor_db.w $a3, $a1, $a0
1786; LA64-NEXT:    srl.w $a0, $a3, $a2
1787; LA64-NEXT:    ret
1788  %1 = atomicrmw or ptr %a, i8 %b release
1789  ret i8 %1
1790}
1791
1792define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
1793; LA32-LABEL: atomicrmw_or_i16_release:
1794; LA32:       # %bb.0:
1795; LA32-NEXT:    slli.w $a2, $a0, 3
1796; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1797; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1798; LA32-NEXT:    sll.w $a1, $a1, $a2
1799; LA32-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
1800; LA32-NEXT:    ll.w $a3, $a0, 0
1801; LA32-NEXT:    or $a4, $a3, $a1
1802; LA32-NEXT:    sc.w $a4, $a0, 0
1803; LA32-NEXT:    beqz $a4, .LBB57_1
1804; LA32-NEXT:  # %bb.2:
1805; LA32-NEXT:    srl.w $a0, $a3, $a2
1806; LA32-NEXT:    ret
1807;
1808; LA64-LABEL: atomicrmw_or_i16_release:
1809; LA64:       # %bb.0:
1810; LA64-NEXT:    slli.d $a2, $a0, 3
1811; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1812; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1813; LA64-NEXT:    sll.w $a1, $a1, $a2
1814; LA64-NEXT:    amor_db.w $a3, $a1, $a0
1815; LA64-NEXT:    srl.w $a0, $a3, $a2
1816; LA64-NEXT:    ret
1817  %1 = atomicrmw or ptr %a, i16 %b release
1818  ret i16 %1
1819}
1820
1821define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
1822; LA32-LABEL: atomicrmw_or_i32_release:
1823; LA32:       # %bb.0:
1824; LA32-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
1825; LA32-NEXT:    ll.w $a2, $a0, 0
1826; LA32-NEXT:    or $a3, $a2, $a1
1827; LA32-NEXT:    sc.w $a3, $a0, 0
1828; LA32-NEXT:    beqz $a3, .LBB58_1
1829; LA32-NEXT:  # %bb.2:
1830; LA32-NEXT:    move $a0, $a2
1831; LA32-NEXT:    ret
1832;
1833; LA64-LABEL: atomicrmw_or_i32_release:
1834; LA64:       # %bb.0:
1835; LA64-NEXT:    amor_db.w $a2, $a1, $a0
1836; LA64-NEXT:    move $a0, $a2
1837; LA64-NEXT:    ret
1838  %1 = atomicrmw or ptr %a, i32 %b release
1839  ret i32 %1
1840}
1841
1842define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
1843; LA32-LABEL: atomicrmw_or_i64_release:
1844; LA32:       # %bb.0:
1845; LA32-NEXT:    addi.w $sp, $sp, -16
1846; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1847; LA32-NEXT:    ori $a3, $zero, 3
1848; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
1849; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1850; LA32-NEXT:    addi.w $sp, $sp, 16
1851; LA32-NEXT:    ret
1852;
1853; LA64-LABEL: atomicrmw_or_i64_release:
1854; LA64:       # %bb.0:
1855; LA64-NEXT:    amor_db.d $a2, $a1, $a0
1856; LA64-NEXT:    move $a0, $a2
1857; LA64-NEXT:    ret
1858  %1 = atomicrmw or ptr %a, i64 %b release
1859  ret i64 %1
1860}
1861
1862define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
1863; LA32-LABEL: atomicrmw_xor_i8_release:
1864; LA32:       # %bb.0:
1865; LA32-NEXT:    slli.w $a2, $a0, 3
1866; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1867; LA32-NEXT:    andi $a1, $a1, 255
1868; LA32-NEXT:    sll.w $a1, $a1, $a2
1869; LA32-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
1870; LA32-NEXT:    ll.w $a3, $a0, 0
1871; LA32-NEXT:    xor $a4, $a3, $a1
1872; LA32-NEXT:    sc.w $a4, $a0, 0
1873; LA32-NEXT:    beqz $a4, .LBB60_1
1874; LA32-NEXT:  # %bb.2:
1875; LA32-NEXT:    srl.w $a0, $a3, $a2
1876; LA32-NEXT:    ret
1877;
1878; LA64-LABEL: atomicrmw_xor_i8_release:
1879; LA64:       # %bb.0:
1880; LA64-NEXT:    slli.d $a2, $a0, 3
1881; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1882; LA64-NEXT:    andi $a1, $a1, 255
1883; LA64-NEXT:    sll.w $a1, $a1, $a2
1884; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
1885; LA64-NEXT:    srl.w $a0, $a3, $a2
1886; LA64-NEXT:    ret
1887  %1 = atomicrmw xor ptr %a, i8 %b release
1888  ret i8 %1
1889}
1890
1891define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
1892; LA32-LABEL: atomicrmw_xor_i16_release:
1893; LA32:       # %bb.0:
1894; LA32-NEXT:    slli.w $a2, $a0, 3
1895; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1896; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
1897; LA32-NEXT:    sll.w $a1, $a1, $a2
1898; LA32-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
1899; LA32-NEXT:    ll.w $a3, $a0, 0
1900; LA32-NEXT:    xor $a4, $a3, $a1
1901; LA32-NEXT:    sc.w $a4, $a0, 0
1902; LA32-NEXT:    beqz $a4, .LBB61_1
1903; LA32-NEXT:  # %bb.2:
1904; LA32-NEXT:    srl.w $a0, $a3, $a2
1905; LA32-NEXT:    ret
1906;
1907; LA64-LABEL: atomicrmw_xor_i16_release:
1908; LA64:       # %bb.0:
1909; LA64-NEXT:    slli.d $a2, $a0, 3
1910; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1911; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
1912; LA64-NEXT:    sll.w $a1, $a1, $a2
1913; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
1914; LA64-NEXT:    srl.w $a0, $a3, $a2
1915; LA64-NEXT:    ret
1916  %1 = atomicrmw xor ptr %a, i16 %b release
1917  ret i16 %1
1918}
1919
1920define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
1921; LA32-LABEL: atomicrmw_xor_i32_release:
1922; LA32:       # %bb.0:
1923; LA32-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
1924; LA32-NEXT:    ll.w $a2, $a0, 0
1925; LA32-NEXT:    xor $a3, $a2, $a1
1926; LA32-NEXT:    sc.w $a3, $a0, 0
1927; LA32-NEXT:    beqz $a3, .LBB62_1
1928; LA32-NEXT:  # %bb.2:
1929; LA32-NEXT:    move $a0, $a2
1930; LA32-NEXT:    ret
1931;
1932; LA64-LABEL: atomicrmw_xor_i32_release:
1933; LA64:       # %bb.0:
1934; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
1935; LA64-NEXT:    move $a0, $a2
1936; LA64-NEXT:    ret
1937  %1 = atomicrmw xor ptr %a, i32 %b release
1938  ret i32 %1
1939}
1940
1941define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
1942; LA32-LABEL: atomicrmw_xor_i64_release:
1943; LA32:       # %bb.0:
1944; LA32-NEXT:    addi.w $sp, $sp, -16
1945; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
1946; LA32-NEXT:    ori $a3, $zero, 3
1947; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
1948; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
1949; LA32-NEXT:    addi.w $sp, $sp, 16
1950; LA32-NEXT:    ret
1951;
1952; LA64-LABEL: atomicrmw_xor_i64_release:
1953; LA64:       # %bb.0:
1954; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
1955; LA64-NEXT:    move $a0, $a2
1956; LA64-NEXT:    ret
1957  %1 = atomicrmw xor ptr %a, i64 %b release
1958  ret i64 %1
1959}
1960
1961define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
1962; LA32-LABEL: atomicrmw_xchg_i8_acq_rel:
1963; LA32:       # %bb.0:
1964; LA32-NEXT:    slli.w $a2, $a0, 3
1965; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
1966; LA32-NEXT:    ori $a3, $zero, 255
1967; LA32-NEXT:    sll.w $a3, $a3, $a2
1968; LA32-NEXT:    andi $a1, $a1, 255
1969; LA32-NEXT:    sll.w $a1, $a1, $a2
1970; LA32-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
1971; LA32-NEXT:    ll.w $a4, $a0, 0
1972; LA32-NEXT:    addi.w $a5, $a1, 0
1973; LA32-NEXT:    xor $a5, $a4, $a5
1974; LA32-NEXT:    and $a5, $a5, $a3
1975; LA32-NEXT:    xor $a5, $a4, $a5
1976; LA32-NEXT:    sc.w $a5, $a0, 0
1977; LA32-NEXT:    beqz $a5, .LBB64_1
1978; LA32-NEXT:  # %bb.2:
1979; LA32-NEXT:    srl.w $a0, $a4, $a2
1980; LA32-NEXT:    ret
1981;
1982; LA64-LABEL: atomicrmw_xchg_i8_acq_rel:
1983; LA64:       # %bb.0:
1984; LA64-NEXT:    slli.d $a2, $a0, 3
1985; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
1986; LA64-NEXT:    ori $a3, $zero, 255
1987; LA64-NEXT:    sll.w $a3, $a3, $a2
1988; LA64-NEXT:    andi $a1, $a1, 255
1989; LA64-NEXT:    sll.w $a1, $a1, $a2
1990; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
1991; LA64-NEXT:    ll.w $a4, $a0, 0
1992; LA64-NEXT:    addi.w $a5, $a1, 0
1993; LA64-NEXT:    xor $a5, $a4, $a5
1994; LA64-NEXT:    and $a5, $a5, $a3
1995; LA64-NEXT:    xor $a5, $a4, $a5
1996; LA64-NEXT:    sc.w $a5, $a0, 0
1997; LA64-NEXT:    beqz $a5, .LBB64_1
1998; LA64-NEXT:  # %bb.2:
1999; LA64-NEXT:    srl.w $a0, $a4, $a2
2000; LA64-NEXT:    ret
2001  %1 = atomicrmw xchg ptr %a, i8 %b acq_rel
2002  ret i8 %1
2003}
2004
2005define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
2006; LA32-LABEL: atomicrmw_xchg_0_i8_acq_rel:
2007; LA32:       # %bb.0:
2008; LA32-NEXT:    slli.w $a1, $a0, 3
2009; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2010; LA32-NEXT:    ori $a2, $zero, 255
2011; LA32-NEXT:    sll.w $a2, $a2, $a1
2012; LA32-NEXT:    nor $a2, $a2, $zero
2013; LA32-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
2014; LA32-NEXT:    ll.w $a3, $a0, 0
2015; LA32-NEXT:    and $a4, $a3, $a2
2016; LA32-NEXT:    sc.w $a4, $a0, 0
2017; LA32-NEXT:    beqz $a4, .LBB65_1
2018; LA32-NEXT:  # %bb.2:
2019; LA32-NEXT:    srl.w $a0, $a3, $a1
2020; LA32-NEXT:    ret
2021;
2022; LA64-LABEL: atomicrmw_xchg_0_i8_acq_rel:
2023; LA64:       # %bb.0:
2024; LA64-NEXT:    slli.d $a1, $a0, 3
2025; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2026; LA64-NEXT:    ori $a2, $zero, 255
2027; LA64-NEXT:    sll.w $a2, $a2, $a1
2028; LA64-NEXT:    nor $a2, $a2, $zero
2029; LA64-NEXT:    amand_db.w $a3, $a2, $a0
2030; LA64-NEXT:    srl.w $a0, $a3, $a1
2031; LA64-NEXT:    ret
2032  %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
2033  ret i8 %1
2034}
2035
2036define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
2037; LA32-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
2038; LA32:       # %bb.0:
2039; LA32-NEXT:    slli.w $a1, $a0, 3
2040; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2041; LA32-NEXT:    ori $a2, $zero, 255
2042; LA32-NEXT:    sll.w $a2, $a2, $a1
2043; LA32-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
2044; LA32-NEXT:    ll.w $a3, $a0, 0
2045; LA32-NEXT:    or $a4, $a3, $a2
2046; LA32-NEXT:    sc.w $a4, $a0, 0
2047; LA32-NEXT:    beqz $a4, .LBB66_1
2048; LA32-NEXT:  # %bb.2:
2049; LA32-NEXT:    srl.w $a0, $a3, $a1
2050; LA32-NEXT:    ret
2051;
2052; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
2053; LA64:       # %bb.0:
2054; LA64-NEXT:    slli.d $a1, $a0, 3
2055; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2056; LA64-NEXT:    ori $a2, $zero, 255
2057; LA64-NEXT:    sll.w $a2, $a2, $a1
2058; LA64-NEXT:    amor_db.w $a3, $a2, $a0
2059; LA64-NEXT:    srl.w $a0, $a3, $a1
2060; LA64-NEXT:    ret
2061  %1 = atomicrmw xchg ptr %a, i8 -1 acq_rel
2062  ret i8 %1
2063}
2064
2065define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
2066; LA32-LABEL: atomicrmw_xchg_i16_acq_rel:
2067; LA32:       # %bb.0:
2068; LA32-NEXT:    slli.w $a2, $a0, 3
2069; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2070; LA32-NEXT:    lu12i.w $a3, 15
2071; LA32-NEXT:    ori $a3, $a3, 4095
2072; LA32-NEXT:    sll.w $a3, $a3, $a2
2073; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2074; LA32-NEXT:    sll.w $a1, $a1, $a2
2075; LA32-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
2076; LA32-NEXT:    ll.w $a4, $a0, 0
2077; LA32-NEXT:    addi.w $a5, $a1, 0
2078; LA32-NEXT:    xor $a5, $a4, $a5
2079; LA32-NEXT:    and $a5, $a5, $a3
2080; LA32-NEXT:    xor $a5, $a4, $a5
2081; LA32-NEXT:    sc.w $a5, $a0, 0
2082; LA32-NEXT:    beqz $a5, .LBB67_1
2083; LA32-NEXT:  # %bb.2:
2084; LA32-NEXT:    srl.w $a0, $a4, $a2
2085; LA32-NEXT:    ret
2086;
2087; LA64-LABEL: atomicrmw_xchg_i16_acq_rel:
2088; LA64:       # %bb.0:
2089; LA64-NEXT:    slli.d $a2, $a0, 3
2090; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2091; LA64-NEXT:    lu12i.w $a3, 15
2092; LA64-NEXT:    ori $a3, $a3, 4095
2093; LA64-NEXT:    sll.w $a3, $a3, $a2
2094; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2095; LA64-NEXT:    sll.w $a1, $a1, $a2
2096; LA64-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
2097; LA64-NEXT:    ll.w $a4, $a0, 0
2098; LA64-NEXT:    addi.w $a5, $a1, 0
2099; LA64-NEXT:    xor $a5, $a4, $a5
2100; LA64-NEXT:    and $a5, $a5, $a3
2101; LA64-NEXT:    xor $a5, $a4, $a5
2102; LA64-NEXT:    sc.w $a5, $a0, 0
2103; LA64-NEXT:    beqz $a5, .LBB67_1
2104; LA64-NEXT:  # %bb.2:
2105; LA64-NEXT:    srl.w $a0, $a4, $a2
2106; LA64-NEXT:    ret
2107  %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
2108  ret i16 %1
2109}
2110
2111define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
2112; LA32-LABEL: atomicrmw_xchg_0_i16_acq_rel:
2113; LA32:       # %bb.0:
2114; LA32-NEXT:    slli.w $a1, $a0, 3
2115; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2116; LA32-NEXT:    lu12i.w $a2, 15
2117; LA32-NEXT:    ori $a2, $a2, 4095
2118; LA32-NEXT:    sll.w $a2, $a2, $a1
2119; LA32-NEXT:    nor $a2, $a2, $zero
2120; LA32-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
2121; LA32-NEXT:    ll.w $a3, $a0, 0
2122; LA32-NEXT:    and $a4, $a3, $a2
2123; LA32-NEXT:    sc.w $a4, $a0, 0
2124; LA32-NEXT:    beqz $a4, .LBB68_1
2125; LA32-NEXT:  # %bb.2:
2126; LA32-NEXT:    srl.w $a0, $a3, $a1
2127; LA32-NEXT:    ret
2128;
2129; LA64-LABEL: atomicrmw_xchg_0_i16_acq_rel:
2130; LA64:       # %bb.0:
2131; LA64-NEXT:    slli.d $a1, $a0, 3
2132; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2133; LA64-NEXT:    lu12i.w $a2, 15
2134; LA64-NEXT:    ori $a2, $a2, 4095
2135; LA64-NEXT:    sll.w $a2, $a2, $a1
2136; LA64-NEXT:    nor $a2, $a2, $zero
2137; LA64-NEXT:    amand_db.w $a3, $a2, $a0
2138; LA64-NEXT:    srl.w $a0, $a3, $a1
2139; LA64-NEXT:    ret
2140  %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
2141  ret i16 %1
2142}
2143
2144define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
2145; LA32-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
2146; LA32:       # %bb.0:
2147; LA32-NEXT:    slli.w $a1, $a0, 3
2148; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2149; LA32-NEXT:    lu12i.w $a2, 15
2150; LA32-NEXT:    ori $a2, $a2, 4095
2151; LA32-NEXT:    sll.w $a2, $a2, $a1
2152; LA32-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
2153; LA32-NEXT:    ll.w $a3, $a0, 0
2154; LA32-NEXT:    or $a4, $a3, $a2
2155; LA32-NEXT:    sc.w $a4, $a0, 0
2156; LA32-NEXT:    beqz $a4, .LBB69_1
2157; LA32-NEXT:  # %bb.2:
2158; LA32-NEXT:    srl.w $a0, $a3, $a1
2159; LA32-NEXT:    ret
2160;
2161; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
2162; LA64:       # %bb.0:
2163; LA64-NEXT:    slli.d $a1, $a0, 3
2164; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2165; LA64-NEXT:    lu12i.w $a2, 15
2166; LA64-NEXT:    ori $a2, $a2, 4095
2167; LA64-NEXT:    sll.w $a2, $a2, $a1
2168; LA64-NEXT:    amor_db.w $a3, $a2, $a0
2169; LA64-NEXT:    srl.w $a0, $a3, $a1
2170; LA64-NEXT:    ret
2171  %1 = atomicrmw xchg ptr %a, i16 -1 acq_rel
2172  ret i16 %1
2173}
2174
2175define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
2176; LA32-LABEL: atomicrmw_xchg_i32_acq_rel:
2177; LA32:       # %bb.0:
2178; LA32-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
2179; LA32-NEXT:    ll.w $a2, $a0, 0
2180; LA32-NEXT:    move $a3, $a1
2181; LA32-NEXT:    sc.w $a3, $a0, 0
2182; LA32-NEXT:    beqz $a3, .LBB70_1
2183; LA32-NEXT:  # %bb.2:
2184; LA32-NEXT:    move $a0, $a2
2185; LA32-NEXT:    ret
2186;
2187; LA64-LABEL: atomicrmw_xchg_i32_acq_rel:
2188; LA64:       # %bb.0:
2189; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
2190; LA64-NEXT:    move $a0, $a2
2191; LA64-NEXT:    ret
2192  %1 = atomicrmw xchg ptr %a, i32 %b acq_rel
2193  ret i32 %1
2194}
2195
2196define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
2197; LA32-LABEL: atomicrmw_xchg_i64_acq_rel:
2198; LA32:       # %bb.0:
2199; LA32-NEXT:    addi.w $sp, $sp, -16
2200; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2201; LA32-NEXT:    ori $a3, $zero, 4
2202; LA32-NEXT:    bl %plt(__atomic_exchange_8)
2203; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2204; LA32-NEXT:    addi.w $sp, $sp, 16
2205; LA32-NEXT:    ret
2206;
2207; LA64-LABEL: atomicrmw_xchg_i64_acq_rel:
2208; LA64:       # %bb.0:
2209; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
2210; LA64-NEXT:    move $a0, $a2
2211; LA64-NEXT:    ret
2212  %1 = atomicrmw xchg ptr %a, i64 %b acq_rel
2213  ret i64 %1
2214}
2215
2216define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
2217; LA32-LABEL: atomicrmw_add_i8_acq_rel:
2218; LA32:       # %bb.0:
2219; LA32-NEXT:    slli.w $a2, $a0, 3
2220; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2221; LA32-NEXT:    ori $a3, $zero, 255
2222; LA32-NEXT:    sll.w $a3, $a3, $a2
2223; LA32-NEXT:    andi $a1, $a1, 255
2224; LA32-NEXT:    sll.w $a1, $a1, $a2
2225; LA32-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
2226; LA32-NEXT:    ll.w $a4, $a0, 0
2227; LA32-NEXT:    add.w $a5, $a4, $a1
2228; LA32-NEXT:    xor $a5, $a4, $a5
2229; LA32-NEXT:    and $a5, $a5, $a3
2230; LA32-NEXT:    xor $a5, $a4, $a5
2231; LA32-NEXT:    sc.w $a5, $a0, 0
2232; LA32-NEXT:    beqz $a5, .LBB72_1
2233; LA32-NEXT:  # %bb.2:
2234; LA32-NEXT:    srl.w $a0, $a4, $a2
2235; LA32-NEXT:    ret
2236;
2237; LA64-LABEL: atomicrmw_add_i8_acq_rel:
2238; LA64:       # %bb.0:
2239; LA64-NEXT:    slli.d $a2, $a0, 3
2240; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2241; LA64-NEXT:    ori $a3, $zero, 255
2242; LA64-NEXT:    sll.w $a3, $a3, $a2
2243; LA64-NEXT:    andi $a1, $a1, 255
2244; LA64-NEXT:    sll.w $a1, $a1, $a2
2245; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
2246; LA64-NEXT:    ll.w $a4, $a0, 0
2247; LA64-NEXT:    add.w $a5, $a4, $a1
2248; LA64-NEXT:    xor $a5, $a4, $a5
2249; LA64-NEXT:    and $a5, $a5, $a3
2250; LA64-NEXT:    xor $a5, $a4, $a5
2251; LA64-NEXT:    sc.w $a5, $a0, 0
2252; LA64-NEXT:    beqz $a5, .LBB72_1
2253; LA64-NEXT:  # %bb.2:
2254; LA64-NEXT:    srl.w $a0, $a4, $a2
2255; LA64-NEXT:    ret
2256  %1 = atomicrmw add ptr %a, i8 %b acq_rel
2257  ret i8 %1
2258}
2259
2260define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
2261; LA32-LABEL: atomicrmw_add_i16_acq_rel:
2262; LA32:       # %bb.0:
2263; LA32-NEXT:    slli.w $a2, $a0, 3
2264; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2265; LA32-NEXT:    lu12i.w $a3, 15
2266; LA32-NEXT:    ori $a3, $a3, 4095
2267; LA32-NEXT:    sll.w $a3, $a3, $a2
2268; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2269; LA32-NEXT:    sll.w $a1, $a1, $a2
2270; LA32-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
2271; LA32-NEXT:    ll.w $a4, $a0, 0
2272; LA32-NEXT:    add.w $a5, $a4, $a1
2273; LA32-NEXT:    xor $a5, $a4, $a5
2274; LA32-NEXT:    and $a5, $a5, $a3
2275; LA32-NEXT:    xor $a5, $a4, $a5
2276; LA32-NEXT:    sc.w $a5, $a0, 0
2277; LA32-NEXT:    beqz $a5, .LBB73_1
2278; LA32-NEXT:  # %bb.2:
2279; LA32-NEXT:    srl.w $a0, $a4, $a2
2280; LA32-NEXT:    ret
2281;
2282; LA64-LABEL: atomicrmw_add_i16_acq_rel:
2283; LA64:       # %bb.0:
2284; LA64-NEXT:    slli.d $a2, $a0, 3
2285; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2286; LA64-NEXT:    lu12i.w $a3, 15
2287; LA64-NEXT:    ori $a3, $a3, 4095
2288; LA64-NEXT:    sll.w $a3, $a3, $a2
2289; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2290; LA64-NEXT:    sll.w $a1, $a1, $a2
2291; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
2292; LA64-NEXT:    ll.w $a4, $a0, 0
2293; LA64-NEXT:    add.w $a5, $a4, $a1
2294; LA64-NEXT:    xor $a5, $a4, $a5
2295; LA64-NEXT:    and $a5, $a5, $a3
2296; LA64-NEXT:    xor $a5, $a4, $a5
2297; LA64-NEXT:    sc.w $a5, $a0, 0
2298; LA64-NEXT:    beqz $a5, .LBB73_1
2299; LA64-NEXT:  # %bb.2:
2300; LA64-NEXT:    srl.w $a0, $a4, $a2
2301; LA64-NEXT:    ret
2302  %1 = atomicrmw add ptr %a, i16 %b acq_rel
2303  ret i16 %1
2304}
2305
2306define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
2307; LA32-LABEL: atomicrmw_add_i32_acq_rel:
2308; LA32:       # %bb.0:
2309; LA32-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
2310; LA32-NEXT:    ll.w $a2, $a0, 0
2311; LA32-NEXT:    add.w $a3, $a2, $a1
2312; LA32-NEXT:    sc.w $a3, $a0, 0
2313; LA32-NEXT:    beqz $a3, .LBB74_1
2314; LA32-NEXT:  # %bb.2:
2315; LA32-NEXT:    move $a0, $a2
2316; LA32-NEXT:    ret
2317;
2318; LA64-LABEL: atomicrmw_add_i32_acq_rel:
2319; LA64:       # %bb.0:
2320; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
2321; LA64-NEXT:    move $a0, $a2
2322; LA64-NEXT:    ret
2323  %1 = atomicrmw add ptr %a, i32 %b acq_rel
2324  ret i32 %1
2325}
2326
2327define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
2328; LA32-LABEL: atomicrmw_add_i64_acq_rel:
2329; LA32:       # %bb.0:
2330; LA32-NEXT:    addi.w $sp, $sp, -16
2331; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2332; LA32-NEXT:    ori $a3, $zero, 4
2333; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
2334; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2335; LA32-NEXT:    addi.w $sp, $sp, 16
2336; LA32-NEXT:    ret
2337;
2338; LA64-LABEL: atomicrmw_add_i64_acq_rel:
2339; LA64:       # %bb.0:
2340; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
2341; LA64-NEXT:    move $a0, $a2
2342; LA64-NEXT:    ret
2343  %1 = atomicrmw add ptr %a, i64 %b acq_rel
2344  ret i64 %1
2345}
2346
2347define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
2348; LA32-LABEL: atomicrmw_sub_i8_acq_rel:
2349; LA32:       # %bb.0:
2350; LA32-NEXT:    slli.w $a2, $a0, 3
2351; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2352; LA32-NEXT:    ori $a3, $zero, 255
2353; LA32-NEXT:    sll.w $a3, $a3, $a2
2354; LA32-NEXT:    andi $a1, $a1, 255
2355; LA32-NEXT:    sll.w $a1, $a1, $a2
2356; LA32-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
2357; LA32-NEXT:    ll.w $a4, $a0, 0
2358; LA32-NEXT:    sub.w $a5, $a4, $a1
2359; LA32-NEXT:    xor $a5, $a4, $a5
2360; LA32-NEXT:    and $a5, $a5, $a3
2361; LA32-NEXT:    xor $a5, $a4, $a5
2362; LA32-NEXT:    sc.w $a5, $a0, 0
2363; LA32-NEXT:    beqz $a5, .LBB76_1
2364; LA32-NEXT:  # %bb.2:
2365; LA32-NEXT:    srl.w $a0, $a4, $a2
2366; LA32-NEXT:    ret
2367;
2368; LA64-LABEL: atomicrmw_sub_i8_acq_rel:
2369; LA64:       # %bb.0:
2370; LA64-NEXT:    slli.d $a2, $a0, 3
2371; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2372; LA64-NEXT:    ori $a3, $zero, 255
2373; LA64-NEXT:    sll.w $a3, $a3, $a2
2374; LA64-NEXT:    andi $a1, $a1, 255
2375; LA64-NEXT:    sll.w $a1, $a1, $a2
2376; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
2377; LA64-NEXT:    ll.w $a4, $a0, 0
2378; LA64-NEXT:    sub.w $a5, $a4, $a1
2379; LA64-NEXT:    xor $a5, $a4, $a5
2380; LA64-NEXT:    and $a5, $a5, $a3
2381; LA64-NEXT:    xor $a5, $a4, $a5
2382; LA64-NEXT:    sc.w $a5, $a0, 0
2383; LA64-NEXT:    beqz $a5, .LBB76_1
2384; LA64-NEXT:  # %bb.2:
2385; LA64-NEXT:    srl.w $a0, $a4, $a2
2386; LA64-NEXT:    ret
2387  %1 = atomicrmw sub ptr %a, i8 %b acq_rel
2388  ret i8 %1
2389}
2390
2391define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
2392; LA32-LABEL: atomicrmw_sub_i16_acq_rel:
2393; LA32:       # %bb.0:
2394; LA32-NEXT:    slli.w $a2, $a0, 3
2395; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2396; LA32-NEXT:    lu12i.w $a3, 15
2397; LA32-NEXT:    ori $a3, $a3, 4095
2398; LA32-NEXT:    sll.w $a3, $a3, $a2
2399; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2400; LA32-NEXT:    sll.w $a1, $a1, $a2
2401; LA32-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
2402; LA32-NEXT:    ll.w $a4, $a0, 0
2403; LA32-NEXT:    sub.w $a5, $a4, $a1
2404; LA32-NEXT:    xor $a5, $a4, $a5
2405; LA32-NEXT:    and $a5, $a5, $a3
2406; LA32-NEXT:    xor $a5, $a4, $a5
2407; LA32-NEXT:    sc.w $a5, $a0, 0
2408; LA32-NEXT:    beqz $a5, .LBB77_1
2409; LA32-NEXT:  # %bb.2:
2410; LA32-NEXT:    srl.w $a0, $a4, $a2
2411; LA32-NEXT:    ret
2412;
2413; LA64-LABEL: atomicrmw_sub_i16_acq_rel:
2414; LA64:       # %bb.0:
2415; LA64-NEXT:    slli.d $a2, $a0, 3
2416; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2417; LA64-NEXT:    lu12i.w $a3, 15
2418; LA64-NEXT:    ori $a3, $a3, 4095
2419; LA64-NEXT:    sll.w $a3, $a3, $a2
2420; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2421; LA64-NEXT:    sll.w $a1, $a1, $a2
2422; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
2423; LA64-NEXT:    ll.w $a4, $a0, 0
2424; LA64-NEXT:    sub.w $a5, $a4, $a1
2425; LA64-NEXT:    xor $a5, $a4, $a5
2426; LA64-NEXT:    and $a5, $a5, $a3
2427; LA64-NEXT:    xor $a5, $a4, $a5
2428; LA64-NEXT:    sc.w $a5, $a0, 0
2429; LA64-NEXT:    beqz $a5, .LBB77_1
2430; LA64-NEXT:  # %bb.2:
2431; LA64-NEXT:    srl.w $a0, $a4, $a2
2432; LA64-NEXT:    ret
2433  %1 = atomicrmw sub ptr %a, i16 %b acq_rel
2434  ret i16 %1
2435}
2436
2437define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
2438; LA32-LABEL: atomicrmw_sub_i32_acq_rel:
2439; LA32:       # %bb.0:
2440; LA32-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
2441; LA32-NEXT:    ll.w $a2, $a0, 0
2442; LA32-NEXT:    sub.w $a3, $a2, $a1
2443; LA32-NEXT:    sc.w $a3, $a0, 0
2444; LA32-NEXT:    beqz $a3, .LBB78_1
2445; LA32-NEXT:  # %bb.2:
2446; LA32-NEXT:    move $a0, $a2
2447; LA32-NEXT:    ret
2448;
2449; LA64-LABEL: atomicrmw_sub_i32_acq_rel:
2450; LA64:       # %bb.0:
2451; LA64-NEXT:    sub.w $a2, $zero, $a1
2452; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
2453; LA64-NEXT:    move $a0, $a1
2454; LA64-NEXT:    ret
2455  %1 = atomicrmw sub ptr %a, i32 %b acq_rel
2456  ret i32 %1
2457}
2458
2459define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
2460; LA32-LABEL: atomicrmw_sub_i64_acq_rel:
2461; LA32:       # %bb.0:
2462; LA32-NEXT:    addi.w $sp, $sp, -16
2463; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2464; LA32-NEXT:    ori $a3, $zero, 4
2465; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
2466; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2467; LA32-NEXT:    addi.w $sp, $sp, 16
2468; LA32-NEXT:    ret
2469;
2470; LA64-LABEL: atomicrmw_sub_i64_acq_rel:
2471; LA64:       # %bb.0:
2472; LA64-NEXT:    sub.d $a2, $zero, $a1
2473; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
2474; LA64-NEXT:    move $a0, $a1
2475; LA64-NEXT:    ret
2476  %1 = atomicrmw sub ptr %a, i64 %b acq_rel
2477  ret i64 %1
2478}
2479
2480define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
2481; LA32-LABEL: atomicrmw_nand_i8_acq_rel:
2482; LA32:       # %bb.0:
2483; LA32-NEXT:    slli.w $a2, $a0, 3
2484; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2485; LA32-NEXT:    ori $a3, $zero, 255
2486; LA32-NEXT:    sll.w $a3, $a3, $a2
2487; LA32-NEXT:    andi $a1, $a1, 255
2488; LA32-NEXT:    sll.w $a1, $a1, $a2
2489; LA32-NEXT:  .LBB80_1: # =>This Inner Loop Header: Depth=1
2490; LA32-NEXT:    ll.w $a4, $a0, 0
2491; LA32-NEXT:    and $a5, $a4, $a1
2492; LA32-NEXT:    nor $a5, $a5, $zero
2493; LA32-NEXT:    xor $a5, $a4, $a5
2494; LA32-NEXT:    and $a5, $a5, $a3
2495; LA32-NEXT:    xor $a5, $a4, $a5
2496; LA32-NEXT:    sc.w $a5, $a0, 0
2497; LA32-NEXT:    beqz $a5, .LBB80_1
2498; LA32-NEXT:  # %bb.2:
2499; LA32-NEXT:    srl.w $a0, $a4, $a2
2500; LA32-NEXT:    ret
2501;
2502; LA64-LABEL: atomicrmw_nand_i8_acq_rel:
2503; LA64:       # %bb.0:
2504; LA64-NEXT:    slli.d $a2, $a0, 3
2505; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2506; LA64-NEXT:    ori $a3, $zero, 255
2507; LA64-NEXT:    sll.w $a3, $a3, $a2
2508; LA64-NEXT:    andi $a1, $a1, 255
2509; LA64-NEXT:    sll.w $a1, $a1, $a2
2510; LA64-NEXT:  .LBB80_1: # =>This Inner Loop Header: Depth=1
2511; LA64-NEXT:    ll.w $a4, $a0, 0
2512; LA64-NEXT:    and $a5, $a4, $a1
2513; LA64-NEXT:    nor $a5, $a5, $zero
2514; LA64-NEXT:    xor $a5, $a4, $a5
2515; LA64-NEXT:    and $a5, $a5, $a3
2516; LA64-NEXT:    xor $a5, $a4, $a5
2517; LA64-NEXT:    sc.w $a5, $a0, 0
2518; LA64-NEXT:    beqz $a5, .LBB80_1
2519; LA64-NEXT:  # %bb.2:
2520; LA64-NEXT:    srl.w $a0, $a4, $a2
2521; LA64-NEXT:    ret
2522  %1 = atomicrmw nand ptr %a, i8 %b acq_rel
2523  ret i8 %1
2524}
2525
2526define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
2527; LA32-LABEL: atomicrmw_nand_i16_acq_rel:
2528; LA32:       # %bb.0:
2529; LA32-NEXT:    slli.w $a2, $a0, 3
2530; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2531; LA32-NEXT:    lu12i.w $a3, 15
2532; LA32-NEXT:    ori $a3, $a3, 4095
2533; LA32-NEXT:    sll.w $a3, $a3, $a2
2534; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2535; LA32-NEXT:    sll.w $a1, $a1, $a2
2536; LA32-NEXT:  .LBB81_1: # =>This Inner Loop Header: Depth=1
2537; LA32-NEXT:    ll.w $a4, $a0, 0
2538; LA32-NEXT:    and $a5, $a4, $a1
2539; LA32-NEXT:    nor $a5, $a5, $zero
2540; LA32-NEXT:    xor $a5, $a4, $a5
2541; LA32-NEXT:    and $a5, $a5, $a3
2542; LA32-NEXT:    xor $a5, $a4, $a5
2543; LA32-NEXT:    sc.w $a5, $a0, 0
2544; LA32-NEXT:    beqz $a5, .LBB81_1
2545; LA32-NEXT:  # %bb.2:
2546; LA32-NEXT:    srl.w $a0, $a4, $a2
2547; LA32-NEXT:    ret
2548;
2549; LA64-LABEL: atomicrmw_nand_i16_acq_rel:
2550; LA64:       # %bb.0:
2551; LA64-NEXT:    slli.d $a2, $a0, 3
2552; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2553; LA64-NEXT:    lu12i.w $a3, 15
2554; LA64-NEXT:    ori $a3, $a3, 4095
2555; LA64-NEXT:    sll.w $a3, $a3, $a2
2556; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2557; LA64-NEXT:    sll.w $a1, $a1, $a2
2558; LA64-NEXT:  .LBB81_1: # =>This Inner Loop Header: Depth=1
2559; LA64-NEXT:    ll.w $a4, $a0, 0
2560; LA64-NEXT:    and $a5, $a4, $a1
2561; LA64-NEXT:    nor $a5, $a5, $zero
2562; LA64-NEXT:    xor $a5, $a4, $a5
2563; LA64-NEXT:    and $a5, $a5, $a3
2564; LA64-NEXT:    xor $a5, $a4, $a5
2565; LA64-NEXT:    sc.w $a5, $a0, 0
2566; LA64-NEXT:    beqz $a5, .LBB81_1
2567; LA64-NEXT:  # %bb.2:
2568; LA64-NEXT:    srl.w $a0, $a4, $a2
2569; LA64-NEXT:    ret
2570  %1 = atomicrmw nand ptr %a, i16 %b acq_rel
2571  ret i16 %1
2572}
2573
2574define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
2575; LA32-LABEL: atomicrmw_nand_i32_acq_rel:
2576; LA32:       # %bb.0:
2577; LA32-NEXT:  .LBB82_1: # =>This Inner Loop Header: Depth=1
2578; LA32-NEXT:    ll.w $a2, $a0, 0
2579; LA32-NEXT:    and $a3, $a2, $a1
2580; LA32-NEXT:    nor $a3, $a3, $zero
2581; LA32-NEXT:    sc.w $a3, $a0, 0
2582; LA32-NEXT:    beqz $a3, .LBB82_1
2583; LA32-NEXT:  # %bb.2:
2584; LA32-NEXT:    move $a0, $a2
2585; LA32-NEXT:    ret
2586;
2587; LA64-LABEL: atomicrmw_nand_i32_acq_rel:
2588; LA64:       # %bb.0:
2589; LA64-NEXT:  .LBB82_1: # =>This Inner Loop Header: Depth=1
2590; LA64-NEXT:    ll.w $a2, $a0, 0
2591; LA64-NEXT:    and $a3, $a2, $a1
2592; LA64-NEXT:    nor $a3, $a3, $zero
2593; LA64-NEXT:    sc.w $a3, $a0, 0
2594; LA64-NEXT:    beqz $a3, .LBB82_1
2595; LA64-NEXT:  # %bb.2:
2596; LA64-NEXT:    move $a0, $a2
2597; LA64-NEXT:    ret
2598  %1 = atomicrmw nand ptr %a, i32 %b acq_rel
2599  ret i32 %1
2600}
2601
2602define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
2603; LA32-LABEL: atomicrmw_nand_i64_acq_rel:
2604; LA32:       # %bb.0:
2605; LA32-NEXT:    addi.w $sp, $sp, -16
2606; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2607; LA32-NEXT:    ori $a3, $zero, 4
2608; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
2609; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2610; LA32-NEXT:    addi.w $sp, $sp, 16
2611; LA32-NEXT:    ret
2612;
2613; LA64-LABEL: atomicrmw_nand_i64_acq_rel:
2614; LA64:       # %bb.0:
2615; LA64-NEXT:  .LBB83_1: # =>This Inner Loop Header: Depth=1
2616; LA64-NEXT:    ll.d $a2, $a0, 0
2617; LA64-NEXT:    and $a3, $a2, $a1
2618; LA64-NEXT:    nor $a3, $a3, $zero
2619; LA64-NEXT:    sc.d $a3, $a0, 0
2620; LA64-NEXT:    beqz $a3, .LBB83_1
2621; LA64-NEXT:  # %bb.2:
2622; LA64-NEXT:    move $a0, $a2
2623; LA64-NEXT:    ret
2624  %1 = atomicrmw nand ptr %a, i64 %b acq_rel
2625  ret i64 %1
2626}
2627
2628define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
2629; LA32-LABEL: atomicrmw_and_i8_acq_rel:
2630; LA32:       # %bb.0:
2631; LA32-NEXT:    slli.w $a2, $a0, 3
2632; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2633; LA32-NEXT:    ori $a3, $zero, 255
2634; LA32-NEXT:    sll.w $a3, $a3, $a2
2635; LA32-NEXT:    andi $a1, $a1, 255
2636; LA32-NEXT:    sll.w $a1, $a1, $a2
2637; LA32-NEXT:    orn $a1, $a1, $a3
2638; LA32-NEXT:  .LBB84_1: # =>This Inner Loop Header: Depth=1
2639; LA32-NEXT:    ll.w $a3, $a0, 0
2640; LA32-NEXT:    and $a4, $a3, $a1
2641; LA32-NEXT:    sc.w $a4, $a0, 0
2642; LA32-NEXT:    beqz $a4, .LBB84_1
2643; LA32-NEXT:  # %bb.2:
2644; LA32-NEXT:    srl.w $a0, $a3, $a2
2645; LA32-NEXT:    ret
2646;
2647; LA64-LABEL: atomicrmw_and_i8_acq_rel:
2648; LA64:       # %bb.0:
2649; LA64-NEXT:    slli.d $a2, $a0, 3
2650; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2651; LA64-NEXT:    ori $a3, $zero, 255
2652; LA64-NEXT:    sll.w $a3, $a3, $a2
2653; LA64-NEXT:    andi $a1, $a1, 255
2654; LA64-NEXT:    sll.w $a1, $a1, $a2
2655; LA64-NEXT:    orn $a1, $a1, $a3
2656; LA64-NEXT:    amand_db.w $a3, $a1, $a0
2657; LA64-NEXT:    srl.w $a0, $a3, $a2
2658; LA64-NEXT:    ret
2659  %1 = atomicrmw and ptr %a, i8 %b acq_rel
2660  ret i8 %1
2661}
2662
2663define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
2664; LA32-LABEL: atomicrmw_and_i16_acq_rel:
2665; LA32:       # %bb.0:
2666; LA32-NEXT:    slli.w $a2, $a0, 3
2667; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2668; LA32-NEXT:    lu12i.w $a3, 15
2669; LA32-NEXT:    ori $a3, $a3, 4095
2670; LA32-NEXT:    sll.w $a3, $a3, $a2
2671; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2672; LA32-NEXT:    sll.w $a1, $a1, $a2
2673; LA32-NEXT:    orn $a1, $a1, $a3
2674; LA32-NEXT:  .LBB85_1: # =>This Inner Loop Header: Depth=1
2675; LA32-NEXT:    ll.w $a3, $a0, 0
2676; LA32-NEXT:    and $a4, $a3, $a1
2677; LA32-NEXT:    sc.w $a4, $a0, 0
2678; LA32-NEXT:    beqz $a4, .LBB85_1
2679; LA32-NEXT:  # %bb.2:
2680; LA32-NEXT:    srl.w $a0, $a3, $a2
2681; LA32-NEXT:    ret
2682;
2683; LA64-LABEL: atomicrmw_and_i16_acq_rel:
2684; LA64:       # %bb.0:
2685; LA64-NEXT:    slli.d $a2, $a0, 3
2686; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2687; LA64-NEXT:    lu12i.w $a3, 15
2688; LA64-NEXT:    ori $a3, $a3, 4095
2689; LA64-NEXT:    sll.w $a3, $a3, $a2
2690; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2691; LA64-NEXT:    sll.w $a1, $a1, $a2
2692; LA64-NEXT:    orn $a1, $a1, $a3
2693; LA64-NEXT:    amand_db.w $a3, $a1, $a0
2694; LA64-NEXT:    srl.w $a0, $a3, $a2
2695; LA64-NEXT:    ret
2696  %1 = atomicrmw and ptr %a, i16 %b acq_rel
2697  ret i16 %1
2698}
2699
2700define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
2701; LA32-LABEL: atomicrmw_and_i32_acq_rel:
2702; LA32:       # %bb.0:
2703; LA32-NEXT:  .LBB86_1: # =>This Inner Loop Header: Depth=1
2704; LA32-NEXT:    ll.w $a2, $a0, 0
2705; LA32-NEXT:    and $a3, $a2, $a1
2706; LA32-NEXT:    sc.w $a3, $a0, 0
2707; LA32-NEXT:    beqz $a3, .LBB86_1
2708; LA32-NEXT:  # %bb.2:
2709; LA32-NEXT:    move $a0, $a2
2710; LA32-NEXT:    ret
2711;
2712; LA64-LABEL: atomicrmw_and_i32_acq_rel:
2713; LA64:       # %bb.0:
2714; LA64-NEXT:    amand_db.w $a2, $a1, $a0
2715; LA64-NEXT:    move $a0, $a2
2716; LA64-NEXT:    ret
2717  %1 = atomicrmw and ptr %a, i32 %b acq_rel
2718  ret i32 %1
2719}
2720
2721define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
2722; LA32-LABEL: atomicrmw_and_i64_acq_rel:
2723; LA32:       # %bb.0:
2724; LA32-NEXT:    addi.w $sp, $sp, -16
2725; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2726; LA32-NEXT:    ori $a3, $zero, 4
2727; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
2728; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2729; LA32-NEXT:    addi.w $sp, $sp, 16
2730; LA32-NEXT:    ret
2731;
2732; LA64-LABEL: atomicrmw_and_i64_acq_rel:
2733; LA64:       # %bb.0:
2734; LA64-NEXT:    amand_db.d $a2, $a1, $a0
2735; LA64-NEXT:    move $a0, $a2
2736; LA64-NEXT:    ret
2737  %1 = atomicrmw and ptr %a, i64 %b acq_rel
2738  ret i64 %1
2739}
2740
2741define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
2742; LA32-LABEL: atomicrmw_or_i8_acq_rel:
2743; LA32:       # %bb.0:
2744; LA32-NEXT:    slli.w $a2, $a0, 3
2745; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2746; LA32-NEXT:    andi $a1, $a1, 255
2747; LA32-NEXT:    sll.w $a1, $a1, $a2
2748; LA32-NEXT:  .LBB88_1: # =>This Inner Loop Header: Depth=1
2749; LA32-NEXT:    ll.w $a3, $a0, 0
2750; LA32-NEXT:    or $a4, $a3, $a1
2751; LA32-NEXT:    sc.w $a4, $a0, 0
2752; LA32-NEXT:    beqz $a4, .LBB88_1
2753; LA32-NEXT:  # %bb.2:
2754; LA32-NEXT:    srl.w $a0, $a3, $a2
2755; LA32-NEXT:    ret
2756;
2757; LA64-LABEL: atomicrmw_or_i8_acq_rel:
2758; LA64:       # %bb.0:
2759; LA64-NEXT:    slli.d $a2, $a0, 3
2760; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2761; LA64-NEXT:    andi $a1, $a1, 255
2762; LA64-NEXT:    sll.w $a1, $a1, $a2
2763; LA64-NEXT:    amor_db.w $a3, $a1, $a0
2764; LA64-NEXT:    srl.w $a0, $a3, $a2
2765; LA64-NEXT:    ret
2766  %1 = atomicrmw or ptr %a, i8 %b acq_rel
2767  ret i8 %1
2768}
2769
2770define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
2771; LA32-LABEL: atomicrmw_or_i16_acq_rel:
2772; LA32:       # %bb.0:
2773; LA32-NEXT:    slli.w $a2, $a0, 3
2774; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2775; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2776; LA32-NEXT:    sll.w $a1, $a1, $a2
2777; LA32-NEXT:  .LBB89_1: # =>This Inner Loop Header: Depth=1
2778; LA32-NEXT:    ll.w $a3, $a0, 0
2779; LA32-NEXT:    or $a4, $a3, $a1
2780; LA32-NEXT:    sc.w $a4, $a0, 0
2781; LA32-NEXT:    beqz $a4, .LBB89_1
2782; LA32-NEXT:  # %bb.2:
2783; LA32-NEXT:    srl.w $a0, $a3, $a2
2784; LA32-NEXT:    ret
2785;
2786; LA64-LABEL: atomicrmw_or_i16_acq_rel:
2787; LA64:       # %bb.0:
2788; LA64-NEXT:    slli.d $a2, $a0, 3
2789; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2790; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2791; LA64-NEXT:    sll.w $a1, $a1, $a2
2792; LA64-NEXT:    amor_db.w $a3, $a1, $a0
2793; LA64-NEXT:    srl.w $a0, $a3, $a2
2794; LA64-NEXT:    ret
2795  %1 = atomicrmw or ptr %a, i16 %b acq_rel
2796  ret i16 %1
2797}
2798
2799define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
2800; LA32-LABEL: atomicrmw_or_i32_acq_rel:
2801; LA32:       # %bb.0:
2802; LA32-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
2803; LA32-NEXT:    ll.w $a2, $a0, 0
2804; LA32-NEXT:    or $a3, $a2, $a1
2805; LA32-NEXT:    sc.w $a3, $a0, 0
2806; LA32-NEXT:    beqz $a3, .LBB90_1
2807; LA32-NEXT:  # %bb.2:
2808; LA32-NEXT:    move $a0, $a2
2809; LA32-NEXT:    ret
2810;
2811; LA64-LABEL: atomicrmw_or_i32_acq_rel:
2812; LA64:       # %bb.0:
2813; LA64-NEXT:    amor_db.w $a2, $a1, $a0
2814; LA64-NEXT:    move $a0, $a2
2815; LA64-NEXT:    ret
2816  %1 = atomicrmw or ptr %a, i32 %b acq_rel
2817  ret i32 %1
2818}
2819
2820define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
2821; LA32-LABEL: atomicrmw_or_i64_acq_rel:
2822; LA32:       # %bb.0:
2823; LA32-NEXT:    addi.w $sp, $sp, -16
2824; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2825; LA32-NEXT:    ori $a3, $zero, 4
2826; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
2827; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2828; LA32-NEXT:    addi.w $sp, $sp, 16
2829; LA32-NEXT:    ret
2830;
2831; LA64-LABEL: atomicrmw_or_i64_acq_rel:
2832; LA64:       # %bb.0:
2833; LA64-NEXT:    amor_db.d $a2, $a1, $a0
2834; LA64-NEXT:    move $a0, $a2
2835; LA64-NEXT:    ret
2836  %1 = atomicrmw or ptr %a, i64 %b acq_rel
2837  ret i64 %1
2838}
2839
2840define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
2841; LA32-LABEL: atomicrmw_xor_i8_acq_rel:
2842; LA32:       # %bb.0:
2843; LA32-NEXT:    slli.w $a2, $a0, 3
2844; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2845; LA32-NEXT:    andi $a1, $a1, 255
2846; LA32-NEXT:    sll.w $a1, $a1, $a2
2847; LA32-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
2848; LA32-NEXT:    ll.w $a3, $a0, 0
2849; LA32-NEXT:    xor $a4, $a3, $a1
2850; LA32-NEXT:    sc.w $a4, $a0, 0
2851; LA32-NEXT:    beqz $a4, .LBB92_1
2852; LA32-NEXT:  # %bb.2:
2853; LA32-NEXT:    srl.w $a0, $a3, $a2
2854; LA32-NEXT:    ret
2855;
2856; LA64-LABEL: atomicrmw_xor_i8_acq_rel:
2857; LA64:       # %bb.0:
2858; LA64-NEXT:    slli.d $a2, $a0, 3
2859; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2860; LA64-NEXT:    andi $a1, $a1, 255
2861; LA64-NEXT:    sll.w $a1, $a1, $a2
2862; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
2863; LA64-NEXT:    srl.w $a0, $a3, $a2
2864; LA64-NEXT:    ret
2865  %1 = atomicrmw xor ptr %a, i8 %b acq_rel
2866  ret i8 %1
2867}
2868
2869define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
2870; LA32-LABEL: atomicrmw_xor_i16_acq_rel:
2871; LA32:       # %bb.0:
2872; LA32-NEXT:    slli.w $a2, $a0, 3
2873; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2874; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
2875; LA32-NEXT:    sll.w $a1, $a1, $a2
2876; LA32-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
2877; LA32-NEXT:    ll.w $a3, $a0, 0
2878; LA32-NEXT:    xor $a4, $a3, $a1
2879; LA32-NEXT:    sc.w $a4, $a0, 0
2880; LA32-NEXT:    beqz $a4, .LBB93_1
2881; LA32-NEXT:  # %bb.2:
2882; LA32-NEXT:    srl.w $a0, $a3, $a2
2883; LA32-NEXT:    ret
2884;
2885; LA64-LABEL: atomicrmw_xor_i16_acq_rel:
2886; LA64:       # %bb.0:
2887; LA64-NEXT:    slli.d $a2, $a0, 3
2888; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2889; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
2890; LA64-NEXT:    sll.w $a1, $a1, $a2
2891; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
2892; LA64-NEXT:    srl.w $a0, $a3, $a2
2893; LA64-NEXT:    ret
2894  %1 = atomicrmw xor ptr %a, i16 %b acq_rel
2895  ret i16 %1
2896}
2897
2898define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
2899; LA32-LABEL: atomicrmw_xor_i32_acq_rel:
2900; LA32:       # %bb.0:
2901; LA32-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
2902; LA32-NEXT:    ll.w $a2, $a0, 0
2903; LA32-NEXT:    xor $a3, $a2, $a1
2904; LA32-NEXT:    sc.w $a3, $a0, 0
2905; LA32-NEXT:    beqz $a3, .LBB94_1
2906; LA32-NEXT:  # %bb.2:
2907; LA32-NEXT:    move $a0, $a2
2908; LA32-NEXT:    ret
2909;
2910; LA64-LABEL: atomicrmw_xor_i32_acq_rel:
2911; LA64:       # %bb.0:
2912; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
2913; LA64-NEXT:    move $a0, $a2
2914; LA64-NEXT:    ret
2915  %1 = atomicrmw xor ptr %a, i32 %b acq_rel
2916  ret i32 %1
2917}
2918
2919define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
2920; LA32-LABEL: atomicrmw_xor_i64_acq_rel:
2921; LA32:       # %bb.0:
2922; LA32-NEXT:    addi.w $sp, $sp, -16
2923; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
2924; LA32-NEXT:    ori $a3, $zero, 4
2925; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
2926; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
2927; LA32-NEXT:    addi.w $sp, $sp, 16
2928; LA32-NEXT:    ret
2929;
2930; LA64-LABEL: atomicrmw_xor_i64_acq_rel:
2931; LA64:       # %bb.0:
2932; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
2933; LA64-NEXT:    move $a0, $a2
2934; LA64-NEXT:    ret
2935  %1 = atomicrmw xor ptr %a, i64 %b acq_rel
2936  ret i64 %1
2937}
2938
2939define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
2940; LA32-LABEL: atomicrmw_xchg_i8_seq_cst:
2941; LA32:       # %bb.0:
2942; LA32-NEXT:    slli.w $a2, $a0, 3
2943; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2944; LA32-NEXT:    ori $a3, $zero, 255
2945; LA32-NEXT:    sll.w $a3, $a3, $a2
2946; LA32-NEXT:    andi $a1, $a1, 255
2947; LA32-NEXT:    sll.w $a1, $a1, $a2
2948; LA32-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
2949; LA32-NEXT:    ll.w $a4, $a0, 0
2950; LA32-NEXT:    addi.w $a5, $a1, 0
2951; LA32-NEXT:    xor $a5, $a4, $a5
2952; LA32-NEXT:    and $a5, $a5, $a3
2953; LA32-NEXT:    xor $a5, $a4, $a5
2954; LA32-NEXT:    sc.w $a5, $a0, 0
2955; LA32-NEXT:    beqz $a5, .LBB96_1
2956; LA32-NEXT:  # %bb.2:
2957; LA32-NEXT:    srl.w $a0, $a4, $a2
2958; LA32-NEXT:    ret
2959;
2960; LA64-LABEL: atomicrmw_xchg_i8_seq_cst:
2961; LA64:       # %bb.0:
2962; LA64-NEXT:    slli.d $a2, $a0, 3
2963; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
2964; LA64-NEXT:    ori $a3, $zero, 255
2965; LA64-NEXT:    sll.w $a3, $a3, $a2
2966; LA64-NEXT:    andi $a1, $a1, 255
2967; LA64-NEXT:    sll.w $a1, $a1, $a2
2968; LA64-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
2969; LA64-NEXT:    ll.w $a4, $a0, 0
2970; LA64-NEXT:    addi.w $a5, $a1, 0
2971; LA64-NEXT:    xor $a5, $a4, $a5
2972; LA64-NEXT:    and $a5, $a5, $a3
2973; LA64-NEXT:    xor $a5, $a4, $a5
2974; LA64-NEXT:    sc.w $a5, $a0, 0
2975; LA64-NEXT:    beqz $a5, .LBB96_1
2976; LA64-NEXT:  # %bb.2:
2977; LA64-NEXT:    srl.w $a0, $a4, $a2
2978; LA64-NEXT:    ret
2979  %1 = atomicrmw xchg ptr %a, i8 %b seq_cst
2980  ret i8 %1
2981}
2982
2983define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
2984; LA32-LABEL: atomicrmw_xchg_0_i8_seq_cst:
2985; LA32:       # %bb.0:
2986; LA32-NEXT:    slli.w $a1, $a0, 3
2987; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
2988; LA32-NEXT:    ori $a2, $zero, 255
2989; LA32-NEXT:    sll.w $a2, $a2, $a1
2990; LA32-NEXT:    nor $a2, $a2, $zero
2991; LA32-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
2992; LA32-NEXT:    ll.w $a3, $a0, 0
2993; LA32-NEXT:    and $a4, $a3, $a2
2994; LA32-NEXT:    sc.w $a4, $a0, 0
2995; LA32-NEXT:    beqz $a4, .LBB97_1
2996; LA32-NEXT:  # %bb.2:
2997; LA32-NEXT:    srl.w $a0, $a3, $a1
2998; LA32-NEXT:    ret
2999;
3000; LA64-LABEL: atomicrmw_xchg_0_i8_seq_cst:
3001; LA64:       # %bb.0:
3002; LA64-NEXT:    slli.d $a1, $a0, 3
3003; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3004; LA64-NEXT:    ori $a2, $zero, 255
3005; LA64-NEXT:    sll.w $a2, $a2, $a1
3006; LA64-NEXT:    nor $a2, $a2, $zero
3007; LA64-NEXT:    amand_db.w $a3, $a2, $a0
3008; LA64-NEXT:    srl.w $a0, $a3, $a1
3009; LA64-NEXT:    ret
3010  %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
3011  ret i8 %1
3012}
3013
3014define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
3015; LA32-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
3016; LA32:       # %bb.0:
3017; LA32-NEXT:    slli.w $a1, $a0, 3
3018; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3019; LA32-NEXT:    ori $a2, $zero, 255
3020; LA32-NEXT:    sll.w $a2, $a2, $a1
3021; LA32-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
3022; LA32-NEXT:    ll.w $a3, $a0, 0
3023; LA32-NEXT:    or $a4, $a3, $a2
3024; LA32-NEXT:    sc.w $a4, $a0, 0
3025; LA32-NEXT:    beqz $a4, .LBB98_1
3026; LA32-NEXT:  # %bb.2:
3027; LA32-NEXT:    srl.w $a0, $a3, $a1
3028; LA32-NEXT:    ret
3029;
3030; LA64-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
3031; LA64:       # %bb.0:
3032; LA64-NEXT:    slli.d $a1, $a0, 3
3033; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3034; LA64-NEXT:    ori $a2, $zero, 255
3035; LA64-NEXT:    sll.w $a2, $a2, $a1
3036; LA64-NEXT:    amor_db.w $a3, $a2, $a0
3037; LA64-NEXT:    srl.w $a0, $a3, $a1
3038; LA64-NEXT:    ret
3039  %1 = atomicrmw xchg ptr %a, i8 -1 seq_cst
3040  ret i8 %1
3041}
3042
3043define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
3044; LA32-LABEL: atomicrmw_xchg_i16_seq_cst:
3045; LA32:       # %bb.0:
3046; LA32-NEXT:    slli.w $a2, $a0, 3
3047; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3048; LA32-NEXT:    lu12i.w $a3, 15
3049; LA32-NEXT:    ori $a3, $a3, 4095
3050; LA32-NEXT:    sll.w $a3, $a3, $a2
3051; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3052; LA32-NEXT:    sll.w $a1, $a1, $a2
3053; LA32-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
3054; LA32-NEXT:    ll.w $a4, $a0, 0
3055; LA32-NEXT:    addi.w $a5, $a1, 0
3056; LA32-NEXT:    xor $a5, $a4, $a5
3057; LA32-NEXT:    and $a5, $a5, $a3
3058; LA32-NEXT:    xor $a5, $a4, $a5
3059; LA32-NEXT:    sc.w $a5, $a0, 0
3060; LA32-NEXT:    beqz $a5, .LBB99_1
3061; LA32-NEXT:  # %bb.2:
3062; LA32-NEXT:    srl.w $a0, $a4, $a2
3063; LA32-NEXT:    ret
3064;
3065; LA64-LABEL: atomicrmw_xchg_i16_seq_cst:
3066; LA64:       # %bb.0:
3067; LA64-NEXT:    slli.d $a2, $a0, 3
3068; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3069; LA64-NEXT:    lu12i.w $a3, 15
3070; LA64-NEXT:    ori $a3, $a3, 4095
3071; LA64-NEXT:    sll.w $a3, $a3, $a2
3072; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3073; LA64-NEXT:    sll.w $a1, $a1, $a2
3074; LA64-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
3075; LA64-NEXT:    ll.w $a4, $a0, 0
3076; LA64-NEXT:    addi.w $a5, $a1, 0
3077; LA64-NEXT:    xor $a5, $a4, $a5
3078; LA64-NEXT:    and $a5, $a5, $a3
3079; LA64-NEXT:    xor $a5, $a4, $a5
3080; LA64-NEXT:    sc.w $a5, $a0, 0
3081; LA64-NEXT:    beqz $a5, .LBB99_1
3082; LA64-NEXT:  # %bb.2:
3083; LA64-NEXT:    srl.w $a0, $a4, $a2
3084; LA64-NEXT:    ret
3085  %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
3086  ret i16 %1
3087}
3088
3089define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
3090; LA32-LABEL: atomicrmw_xchg_0_i16_seq_cst:
3091; LA32:       # %bb.0:
3092; LA32-NEXT:    slli.w $a1, $a0, 3
3093; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3094; LA32-NEXT:    lu12i.w $a2, 15
3095; LA32-NEXT:    ori $a2, $a2, 4095
3096; LA32-NEXT:    sll.w $a2, $a2, $a1
3097; LA32-NEXT:    nor $a2, $a2, $zero
3098; LA32-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
3099; LA32-NEXT:    ll.w $a3, $a0, 0
3100; LA32-NEXT:    and $a4, $a3, $a2
3101; LA32-NEXT:    sc.w $a4, $a0, 0
3102; LA32-NEXT:    beqz $a4, .LBB100_1
3103; LA32-NEXT:  # %bb.2:
3104; LA32-NEXT:    srl.w $a0, $a3, $a1
3105; LA32-NEXT:    ret
3106;
3107; LA64-LABEL: atomicrmw_xchg_0_i16_seq_cst:
3108; LA64:       # %bb.0:
3109; LA64-NEXT:    slli.d $a1, $a0, 3
3110; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3111; LA64-NEXT:    lu12i.w $a2, 15
3112; LA64-NEXT:    ori $a2, $a2, 4095
3113; LA64-NEXT:    sll.w $a2, $a2, $a1
3114; LA64-NEXT:    nor $a2, $a2, $zero
3115; LA64-NEXT:    amand_db.w $a3, $a2, $a0
3116; LA64-NEXT:    srl.w $a0, $a3, $a1
3117; LA64-NEXT:    ret
3118  %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
3119  ret i16 %1
3120}
3121
3122define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
3123; LA32-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
3124; LA32:       # %bb.0:
3125; LA32-NEXT:    slli.w $a1, $a0, 3
3126; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3127; LA32-NEXT:    lu12i.w $a2, 15
3128; LA32-NEXT:    ori $a2, $a2, 4095
3129; LA32-NEXT:    sll.w $a2, $a2, $a1
3130; LA32-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
3131; LA32-NEXT:    ll.w $a3, $a0, 0
3132; LA32-NEXT:    or $a4, $a3, $a2
3133; LA32-NEXT:    sc.w $a4, $a0, 0
3134; LA32-NEXT:    beqz $a4, .LBB101_1
3135; LA32-NEXT:  # %bb.2:
3136; LA32-NEXT:    srl.w $a0, $a3, $a1
3137; LA32-NEXT:    ret
3138;
3139; LA64-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
3140; LA64:       # %bb.0:
3141; LA64-NEXT:    slli.d $a1, $a0, 3
3142; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3143; LA64-NEXT:    lu12i.w $a2, 15
3144; LA64-NEXT:    ori $a2, $a2, 4095
3145; LA64-NEXT:    sll.w $a2, $a2, $a1
3146; LA64-NEXT:    amor_db.w $a3, $a2, $a0
3147; LA64-NEXT:    srl.w $a0, $a3, $a1
3148; LA64-NEXT:    ret
3149  %1 = atomicrmw xchg ptr %a, i16 -1 seq_cst
3150  ret i16 %1
3151}
3152
3153define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
3154; LA32-LABEL: atomicrmw_xchg_i32_seq_cst:
3155; LA32:       # %bb.0:
3156; LA32-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
3157; LA32-NEXT:    ll.w $a2, $a0, 0
3158; LA32-NEXT:    move $a3, $a1
3159; LA32-NEXT:    sc.w $a3, $a0, 0
3160; LA32-NEXT:    beqz $a3, .LBB102_1
3161; LA32-NEXT:  # %bb.2:
3162; LA32-NEXT:    move $a0, $a2
3163; LA32-NEXT:    ret
3164;
3165; LA64-LABEL: atomicrmw_xchg_i32_seq_cst:
3166; LA64:       # %bb.0:
3167; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
3168; LA64-NEXT:    move $a0, $a2
3169; LA64-NEXT:    ret
3170  %1 = atomicrmw xchg ptr %a, i32 %b seq_cst
3171  ret i32 %1
3172}
3173
3174define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
3175; LA32-LABEL: atomicrmw_xchg_i64_seq_cst:
3176; LA32:       # %bb.0:
3177; LA32-NEXT:    addi.w $sp, $sp, -16
3178; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3179; LA32-NEXT:    ori $a3, $zero, 5
3180; LA32-NEXT:    bl %plt(__atomic_exchange_8)
3181; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3182; LA32-NEXT:    addi.w $sp, $sp, 16
3183; LA32-NEXT:    ret
3184;
3185; LA64-LABEL: atomicrmw_xchg_i64_seq_cst:
3186; LA64:       # %bb.0:
3187; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
3188; LA64-NEXT:    move $a0, $a2
3189; LA64-NEXT:    ret
3190  %1 = atomicrmw xchg ptr %a, i64 %b seq_cst
3191  ret i64 %1
3192}
3193
3194define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
3195; LA32-LABEL: atomicrmw_add_i8_seq_cst:
3196; LA32:       # %bb.0:
3197; LA32-NEXT:    slli.w $a2, $a0, 3
3198; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3199; LA32-NEXT:    ori $a3, $zero, 255
3200; LA32-NEXT:    sll.w $a3, $a3, $a2
3201; LA32-NEXT:    andi $a1, $a1, 255
3202; LA32-NEXT:    sll.w $a1, $a1, $a2
3203; LA32-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
3204; LA32-NEXT:    ll.w $a4, $a0, 0
3205; LA32-NEXT:    add.w $a5, $a4, $a1
3206; LA32-NEXT:    xor $a5, $a4, $a5
3207; LA32-NEXT:    and $a5, $a5, $a3
3208; LA32-NEXT:    xor $a5, $a4, $a5
3209; LA32-NEXT:    sc.w $a5, $a0, 0
3210; LA32-NEXT:    beqz $a5, .LBB104_1
3211; LA32-NEXT:  # %bb.2:
3212; LA32-NEXT:    srl.w $a0, $a4, $a2
3213; LA32-NEXT:    ret
3214;
3215; LA64-LABEL: atomicrmw_add_i8_seq_cst:
3216; LA64:       # %bb.0:
3217; LA64-NEXT:    slli.d $a2, $a0, 3
3218; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3219; LA64-NEXT:    ori $a3, $zero, 255
3220; LA64-NEXT:    sll.w $a3, $a3, $a2
3221; LA64-NEXT:    andi $a1, $a1, 255
3222; LA64-NEXT:    sll.w $a1, $a1, $a2
3223; LA64-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
3224; LA64-NEXT:    ll.w $a4, $a0, 0
3225; LA64-NEXT:    add.w $a5, $a4, $a1
3226; LA64-NEXT:    xor $a5, $a4, $a5
3227; LA64-NEXT:    and $a5, $a5, $a3
3228; LA64-NEXT:    xor $a5, $a4, $a5
3229; LA64-NEXT:    sc.w $a5, $a0, 0
3230; LA64-NEXT:    beqz $a5, .LBB104_1
3231; LA64-NEXT:  # %bb.2:
3232; LA64-NEXT:    srl.w $a0, $a4, $a2
3233; LA64-NEXT:    ret
3234  %1 = atomicrmw add ptr %a, i8 %b seq_cst
3235  ret i8 %1
3236}
3237
3238define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
3239; LA32-LABEL: atomicrmw_add_i16_seq_cst:
3240; LA32:       # %bb.0:
3241; LA32-NEXT:    slli.w $a2, $a0, 3
3242; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3243; LA32-NEXT:    lu12i.w $a3, 15
3244; LA32-NEXT:    ori $a3, $a3, 4095
3245; LA32-NEXT:    sll.w $a3, $a3, $a2
3246; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3247; LA32-NEXT:    sll.w $a1, $a1, $a2
3248; LA32-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
3249; LA32-NEXT:    ll.w $a4, $a0, 0
3250; LA32-NEXT:    add.w $a5, $a4, $a1
3251; LA32-NEXT:    xor $a5, $a4, $a5
3252; LA32-NEXT:    and $a5, $a5, $a3
3253; LA32-NEXT:    xor $a5, $a4, $a5
3254; LA32-NEXT:    sc.w $a5, $a0, 0
3255; LA32-NEXT:    beqz $a5, .LBB105_1
3256; LA32-NEXT:  # %bb.2:
3257; LA32-NEXT:    srl.w $a0, $a4, $a2
3258; LA32-NEXT:    ret
3259;
3260; LA64-LABEL: atomicrmw_add_i16_seq_cst:
3261; LA64:       # %bb.0:
3262; LA64-NEXT:    slli.d $a2, $a0, 3
3263; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3264; LA64-NEXT:    lu12i.w $a3, 15
3265; LA64-NEXT:    ori $a3, $a3, 4095
3266; LA64-NEXT:    sll.w $a3, $a3, $a2
3267; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3268; LA64-NEXT:    sll.w $a1, $a1, $a2
3269; LA64-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
3270; LA64-NEXT:    ll.w $a4, $a0, 0
3271; LA64-NEXT:    add.w $a5, $a4, $a1
3272; LA64-NEXT:    xor $a5, $a4, $a5
3273; LA64-NEXT:    and $a5, $a5, $a3
3274; LA64-NEXT:    xor $a5, $a4, $a5
3275; LA64-NEXT:    sc.w $a5, $a0, 0
3276; LA64-NEXT:    beqz $a5, .LBB105_1
3277; LA64-NEXT:  # %bb.2:
3278; LA64-NEXT:    srl.w $a0, $a4, $a2
3279; LA64-NEXT:    ret
3280  %1 = atomicrmw add ptr %a, i16 %b seq_cst
3281  ret i16 %1
3282}
3283
3284define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
3285; LA32-LABEL: atomicrmw_add_i32_seq_cst:
3286; LA32:       # %bb.0:
3287; LA32-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
3288; LA32-NEXT:    ll.w $a2, $a0, 0
3289; LA32-NEXT:    add.w $a3, $a2, $a1
3290; LA32-NEXT:    sc.w $a3, $a0, 0
3291; LA32-NEXT:    beqz $a3, .LBB106_1
3292; LA32-NEXT:  # %bb.2:
3293; LA32-NEXT:    move $a0, $a2
3294; LA32-NEXT:    ret
3295;
3296; LA64-LABEL: atomicrmw_add_i32_seq_cst:
3297; LA64:       # %bb.0:
3298; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
3299; LA64-NEXT:    move $a0, $a2
3300; LA64-NEXT:    ret
3301  %1 = atomicrmw add ptr %a, i32 %b seq_cst
3302  ret i32 %1
3303}
3304
3305define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
3306; LA32-LABEL: atomicrmw_add_i64_seq_cst:
3307; LA32:       # %bb.0:
3308; LA32-NEXT:    addi.w $sp, $sp, -16
3309; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3310; LA32-NEXT:    ori $a3, $zero, 5
3311; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
3312; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3313; LA32-NEXT:    addi.w $sp, $sp, 16
3314; LA32-NEXT:    ret
3315;
3316; LA64-LABEL: atomicrmw_add_i64_seq_cst:
3317; LA64:       # %bb.0:
3318; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
3319; LA64-NEXT:    move $a0, $a2
3320; LA64-NEXT:    ret
3321  %1 = atomicrmw add ptr %a, i64 %b seq_cst
3322  ret i64 %1
3323}
3324
3325define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
3326; LA32-LABEL: atomicrmw_sub_i8_seq_cst:
3327; LA32:       # %bb.0:
3328; LA32-NEXT:    slli.w $a2, $a0, 3
3329; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3330; LA32-NEXT:    ori $a3, $zero, 255
3331; LA32-NEXT:    sll.w $a3, $a3, $a2
3332; LA32-NEXT:    andi $a1, $a1, 255
3333; LA32-NEXT:    sll.w $a1, $a1, $a2
3334; LA32-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
3335; LA32-NEXT:    ll.w $a4, $a0, 0
3336; LA32-NEXT:    sub.w $a5, $a4, $a1
3337; LA32-NEXT:    xor $a5, $a4, $a5
3338; LA32-NEXT:    and $a5, $a5, $a3
3339; LA32-NEXT:    xor $a5, $a4, $a5
3340; LA32-NEXT:    sc.w $a5, $a0, 0
3341; LA32-NEXT:    beqz $a5, .LBB108_1
3342; LA32-NEXT:  # %bb.2:
3343; LA32-NEXT:    srl.w $a0, $a4, $a2
3344; LA32-NEXT:    ret
3345;
3346; LA64-LABEL: atomicrmw_sub_i8_seq_cst:
3347; LA64:       # %bb.0:
3348; LA64-NEXT:    slli.d $a2, $a0, 3
3349; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3350; LA64-NEXT:    ori $a3, $zero, 255
3351; LA64-NEXT:    sll.w $a3, $a3, $a2
3352; LA64-NEXT:    andi $a1, $a1, 255
3353; LA64-NEXT:    sll.w $a1, $a1, $a2
3354; LA64-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
3355; LA64-NEXT:    ll.w $a4, $a0, 0
3356; LA64-NEXT:    sub.w $a5, $a4, $a1
3357; LA64-NEXT:    xor $a5, $a4, $a5
3358; LA64-NEXT:    and $a5, $a5, $a3
3359; LA64-NEXT:    xor $a5, $a4, $a5
3360; LA64-NEXT:    sc.w $a5, $a0, 0
3361; LA64-NEXT:    beqz $a5, .LBB108_1
3362; LA64-NEXT:  # %bb.2:
3363; LA64-NEXT:    srl.w $a0, $a4, $a2
3364; LA64-NEXT:    ret
3365  %1 = atomicrmw sub ptr %a, i8 %b seq_cst
3366  ret i8 %1
3367}
3368
3369define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
3370; LA32-LABEL: atomicrmw_sub_i16_seq_cst:
3371; LA32:       # %bb.0:
3372; LA32-NEXT:    slli.w $a2, $a0, 3
3373; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3374; LA32-NEXT:    lu12i.w $a3, 15
3375; LA32-NEXT:    ori $a3, $a3, 4095
3376; LA32-NEXT:    sll.w $a3, $a3, $a2
3377; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3378; LA32-NEXT:    sll.w $a1, $a1, $a2
3379; LA32-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
3380; LA32-NEXT:    ll.w $a4, $a0, 0
3381; LA32-NEXT:    sub.w $a5, $a4, $a1
3382; LA32-NEXT:    xor $a5, $a4, $a5
3383; LA32-NEXT:    and $a5, $a5, $a3
3384; LA32-NEXT:    xor $a5, $a4, $a5
3385; LA32-NEXT:    sc.w $a5, $a0, 0
3386; LA32-NEXT:    beqz $a5, .LBB109_1
3387; LA32-NEXT:  # %bb.2:
3388; LA32-NEXT:    srl.w $a0, $a4, $a2
3389; LA32-NEXT:    ret
3390;
3391; LA64-LABEL: atomicrmw_sub_i16_seq_cst:
3392; LA64:       # %bb.0:
3393; LA64-NEXT:    slli.d $a2, $a0, 3
3394; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3395; LA64-NEXT:    lu12i.w $a3, 15
3396; LA64-NEXT:    ori $a3, $a3, 4095
3397; LA64-NEXT:    sll.w $a3, $a3, $a2
3398; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3399; LA64-NEXT:    sll.w $a1, $a1, $a2
3400; LA64-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
3401; LA64-NEXT:    ll.w $a4, $a0, 0
3402; LA64-NEXT:    sub.w $a5, $a4, $a1
3403; LA64-NEXT:    xor $a5, $a4, $a5
3404; LA64-NEXT:    and $a5, $a5, $a3
3405; LA64-NEXT:    xor $a5, $a4, $a5
3406; LA64-NEXT:    sc.w $a5, $a0, 0
3407; LA64-NEXT:    beqz $a5, .LBB109_1
3408; LA64-NEXT:  # %bb.2:
3409; LA64-NEXT:    srl.w $a0, $a4, $a2
3410; LA64-NEXT:    ret
3411  %1 = atomicrmw sub ptr %a, i16 %b seq_cst
3412  ret i16 %1
3413}
3414
3415define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
3416; LA32-LABEL: atomicrmw_sub_i32_seq_cst:
3417; LA32:       # %bb.0:
3418; LA32-NEXT:  .LBB110_1: # =>This Inner Loop Header: Depth=1
3419; LA32-NEXT:    ll.w $a2, $a0, 0
3420; LA32-NEXT:    sub.w $a3, $a2, $a1
3421; LA32-NEXT:    sc.w $a3, $a0, 0
3422; LA32-NEXT:    beqz $a3, .LBB110_1
3423; LA32-NEXT:  # %bb.2:
3424; LA32-NEXT:    move $a0, $a2
3425; LA32-NEXT:    ret
3426;
3427; LA64-LABEL: atomicrmw_sub_i32_seq_cst:
3428; LA64:       # %bb.0:
3429; LA64-NEXT:    sub.w $a2, $zero, $a1
3430; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
3431; LA64-NEXT:    move $a0, $a1
3432; LA64-NEXT:    ret
3433  %1 = atomicrmw sub ptr %a, i32 %b seq_cst
3434  ret i32 %1
3435}
3436
3437define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
3438; LA32-LABEL: atomicrmw_sub_i64_seq_cst:
3439; LA32:       # %bb.0:
3440; LA32-NEXT:    addi.w $sp, $sp, -16
3441; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3442; LA32-NEXT:    ori $a3, $zero, 5
3443; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
3444; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3445; LA32-NEXT:    addi.w $sp, $sp, 16
3446; LA32-NEXT:    ret
3447;
3448; LA64-LABEL: atomicrmw_sub_i64_seq_cst:
3449; LA64:       # %bb.0:
3450; LA64-NEXT:    sub.d $a2, $zero, $a1
3451; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
3452; LA64-NEXT:    move $a0, $a1
3453; LA64-NEXT:    ret
3454  %1 = atomicrmw sub ptr %a, i64 %b seq_cst
3455  ret i64 %1
3456}
3457
3458define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
3459; LA32-LABEL: atomicrmw_nand_i8_seq_cst:
3460; LA32:       # %bb.0:
3461; LA32-NEXT:    slli.w $a2, $a0, 3
3462; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3463; LA32-NEXT:    ori $a3, $zero, 255
3464; LA32-NEXT:    sll.w $a3, $a3, $a2
3465; LA32-NEXT:    andi $a1, $a1, 255
3466; LA32-NEXT:    sll.w $a1, $a1, $a2
3467; LA32-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
3468; LA32-NEXT:    ll.w $a4, $a0, 0
3469; LA32-NEXT:    and $a5, $a4, $a1
3470; LA32-NEXT:    nor $a5, $a5, $zero
3471; LA32-NEXT:    xor $a5, $a4, $a5
3472; LA32-NEXT:    and $a5, $a5, $a3
3473; LA32-NEXT:    xor $a5, $a4, $a5
3474; LA32-NEXT:    sc.w $a5, $a0, 0
3475; LA32-NEXT:    beqz $a5, .LBB112_1
3476; LA32-NEXT:  # %bb.2:
3477; LA32-NEXT:    srl.w $a0, $a4, $a2
3478; LA32-NEXT:    ret
3479;
3480; LA64-LABEL: atomicrmw_nand_i8_seq_cst:
3481; LA64:       # %bb.0:
3482; LA64-NEXT:    slli.d $a2, $a0, 3
3483; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3484; LA64-NEXT:    ori $a3, $zero, 255
3485; LA64-NEXT:    sll.w $a3, $a3, $a2
3486; LA64-NEXT:    andi $a1, $a1, 255
3487; LA64-NEXT:    sll.w $a1, $a1, $a2
3488; LA64-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
3489; LA64-NEXT:    ll.w $a4, $a0, 0
3490; LA64-NEXT:    and $a5, $a4, $a1
3491; LA64-NEXT:    nor $a5, $a5, $zero
3492; LA64-NEXT:    xor $a5, $a4, $a5
3493; LA64-NEXT:    and $a5, $a5, $a3
3494; LA64-NEXT:    xor $a5, $a4, $a5
3495; LA64-NEXT:    sc.w $a5, $a0, 0
3496; LA64-NEXT:    beqz $a5, .LBB112_1
3497; LA64-NEXT:  # %bb.2:
3498; LA64-NEXT:    srl.w $a0, $a4, $a2
3499; LA64-NEXT:    ret
3500  %1 = atomicrmw nand ptr %a, i8 %b seq_cst
3501  ret i8 %1
3502}
3503
3504define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
3505; LA32-LABEL: atomicrmw_nand_i16_seq_cst:
3506; LA32:       # %bb.0:
3507; LA32-NEXT:    slli.w $a2, $a0, 3
3508; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3509; LA32-NEXT:    lu12i.w $a3, 15
3510; LA32-NEXT:    ori $a3, $a3, 4095
3511; LA32-NEXT:    sll.w $a3, $a3, $a2
3512; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3513; LA32-NEXT:    sll.w $a1, $a1, $a2
3514; LA32-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
3515; LA32-NEXT:    ll.w $a4, $a0, 0
3516; LA32-NEXT:    and $a5, $a4, $a1
3517; LA32-NEXT:    nor $a5, $a5, $zero
3518; LA32-NEXT:    xor $a5, $a4, $a5
3519; LA32-NEXT:    and $a5, $a5, $a3
3520; LA32-NEXT:    xor $a5, $a4, $a5
3521; LA32-NEXT:    sc.w $a5, $a0, 0
3522; LA32-NEXT:    beqz $a5, .LBB113_1
3523; LA32-NEXT:  # %bb.2:
3524; LA32-NEXT:    srl.w $a0, $a4, $a2
3525; LA32-NEXT:    ret
3526;
3527; LA64-LABEL: atomicrmw_nand_i16_seq_cst:
3528; LA64:       # %bb.0:
3529; LA64-NEXT:    slli.d $a2, $a0, 3
3530; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3531; LA64-NEXT:    lu12i.w $a3, 15
3532; LA64-NEXT:    ori $a3, $a3, 4095
3533; LA64-NEXT:    sll.w $a3, $a3, $a2
3534; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3535; LA64-NEXT:    sll.w $a1, $a1, $a2
3536; LA64-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
3537; LA64-NEXT:    ll.w $a4, $a0, 0
3538; LA64-NEXT:    and $a5, $a4, $a1
3539; LA64-NEXT:    nor $a5, $a5, $zero
3540; LA64-NEXT:    xor $a5, $a4, $a5
3541; LA64-NEXT:    and $a5, $a5, $a3
3542; LA64-NEXT:    xor $a5, $a4, $a5
3543; LA64-NEXT:    sc.w $a5, $a0, 0
3544; LA64-NEXT:    beqz $a5, .LBB113_1
3545; LA64-NEXT:  # %bb.2:
3546; LA64-NEXT:    srl.w $a0, $a4, $a2
3547; LA64-NEXT:    ret
3548  %1 = atomicrmw nand ptr %a, i16 %b seq_cst
3549  ret i16 %1
3550}
3551
3552define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
3553; LA32-LABEL: atomicrmw_nand_i32_seq_cst:
3554; LA32:       # %bb.0:
3555; LA32-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
3556; LA32-NEXT:    ll.w $a2, $a0, 0
3557; LA32-NEXT:    and $a3, $a2, $a1
3558; LA32-NEXT:    nor $a3, $a3, $zero
3559; LA32-NEXT:    sc.w $a3, $a0, 0
3560; LA32-NEXT:    beqz $a3, .LBB114_1
3561; LA32-NEXT:  # %bb.2:
3562; LA32-NEXT:    move $a0, $a2
3563; LA32-NEXT:    ret
3564;
3565; LA64-LABEL: atomicrmw_nand_i32_seq_cst:
3566; LA64:       # %bb.0:
3567; LA64-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
3568; LA64-NEXT:    ll.w $a2, $a0, 0
3569; LA64-NEXT:    and $a3, $a2, $a1
3570; LA64-NEXT:    nor $a3, $a3, $zero
3571; LA64-NEXT:    sc.w $a3, $a0, 0
3572; LA64-NEXT:    beqz $a3, .LBB114_1
3573; LA64-NEXT:  # %bb.2:
3574; LA64-NEXT:    move $a0, $a2
3575; LA64-NEXT:    ret
3576  %1 = atomicrmw nand ptr %a, i32 %b seq_cst
3577  ret i32 %1
3578}
3579
3580define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
3581; LA32-LABEL: atomicrmw_nand_i64_seq_cst:
3582; LA32:       # %bb.0:
3583; LA32-NEXT:    addi.w $sp, $sp, -16
3584; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3585; LA32-NEXT:    ori $a3, $zero, 5
3586; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
3587; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3588; LA32-NEXT:    addi.w $sp, $sp, 16
3589; LA32-NEXT:    ret
3590;
3591; LA64-LABEL: atomicrmw_nand_i64_seq_cst:
3592; LA64:       # %bb.0:
3593; LA64-NEXT:  .LBB115_1: # =>This Inner Loop Header: Depth=1
3594; LA64-NEXT:    ll.d $a2, $a0, 0
3595; LA64-NEXT:    and $a3, $a2, $a1
3596; LA64-NEXT:    nor $a3, $a3, $zero
3597; LA64-NEXT:    sc.d $a3, $a0, 0
3598; LA64-NEXT:    beqz $a3, .LBB115_1
3599; LA64-NEXT:  # %bb.2:
3600; LA64-NEXT:    move $a0, $a2
3601; LA64-NEXT:    ret
3602  %1 = atomicrmw nand ptr %a, i64 %b seq_cst
3603  ret i64 %1
3604}
3605
3606define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
3607; LA32-LABEL: atomicrmw_and_i8_seq_cst:
3608; LA32:       # %bb.0:
3609; LA32-NEXT:    slli.w $a2, $a0, 3
3610; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3611; LA32-NEXT:    ori $a3, $zero, 255
3612; LA32-NEXT:    sll.w $a3, $a3, $a2
3613; LA32-NEXT:    andi $a1, $a1, 255
3614; LA32-NEXT:    sll.w $a1, $a1, $a2
3615; LA32-NEXT:    orn $a1, $a1, $a3
3616; LA32-NEXT:  .LBB116_1: # =>This Inner Loop Header: Depth=1
3617; LA32-NEXT:    ll.w $a3, $a0, 0
3618; LA32-NEXT:    and $a4, $a3, $a1
3619; LA32-NEXT:    sc.w $a4, $a0, 0
3620; LA32-NEXT:    beqz $a4, .LBB116_1
3621; LA32-NEXT:  # %bb.2:
3622; LA32-NEXT:    srl.w $a0, $a3, $a2
3623; LA32-NEXT:    ret
3624;
3625; LA64-LABEL: atomicrmw_and_i8_seq_cst:
3626; LA64:       # %bb.0:
3627; LA64-NEXT:    slli.d $a2, $a0, 3
3628; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3629; LA64-NEXT:    ori $a3, $zero, 255
3630; LA64-NEXT:    sll.w $a3, $a3, $a2
3631; LA64-NEXT:    andi $a1, $a1, 255
3632; LA64-NEXT:    sll.w $a1, $a1, $a2
3633; LA64-NEXT:    orn $a1, $a1, $a3
3634; LA64-NEXT:    amand_db.w $a3, $a1, $a0
3635; LA64-NEXT:    srl.w $a0, $a3, $a2
3636; LA64-NEXT:    ret
3637  %1 = atomicrmw and ptr %a, i8 %b seq_cst
3638  ret i8 %1
3639}
3640
3641define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
3642; LA32-LABEL: atomicrmw_and_i16_seq_cst:
3643; LA32:       # %bb.0:
3644; LA32-NEXT:    slli.w $a2, $a0, 3
3645; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3646; LA32-NEXT:    lu12i.w $a3, 15
3647; LA32-NEXT:    ori $a3, $a3, 4095
3648; LA32-NEXT:    sll.w $a3, $a3, $a2
3649; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3650; LA32-NEXT:    sll.w $a1, $a1, $a2
3651; LA32-NEXT:    orn $a1, $a1, $a3
3652; LA32-NEXT:  .LBB117_1: # =>This Inner Loop Header: Depth=1
3653; LA32-NEXT:    ll.w $a3, $a0, 0
3654; LA32-NEXT:    and $a4, $a3, $a1
3655; LA32-NEXT:    sc.w $a4, $a0, 0
3656; LA32-NEXT:    beqz $a4, .LBB117_1
3657; LA32-NEXT:  # %bb.2:
3658; LA32-NEXT:    srl.w $a0, $a3, $a2
3659; LA32-NEXT:    ret
3660;
3661; LA64-LABEL: atomicrmw_and_i16_seq_cst:
3662; LA64:       # %bb.0:
3663; LA64-NEXT:    slli.d $a2, $a0, 3
3664; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3665; LA64-NEXT:    lu12i.w $a3, 15
3666; LA64-NEXT:    ori $a3, $a3, 4095
3667; LA64-NEXT:    sll.w $a3, $a3, $a2
3668; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3669; LA64-NEXT:    sll.w $a1, $a1, $a2
3670; LA64-NEXT:    orn $a1, $a1, $a3
3671; LA64-NEXT:    amand_db.w $a3, $a1, $a0
3672; LA64-NEXT:    srl.w $a0, $a3, $a2
3673; LA64-NEXT:    ret
3674  %1 = atomicrmw and ptr %a, i16 %b seq_cst
3675  ret i16 %1
3676}
3677
3678define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
3679; LA32-LABEL: atomicrmw_and_i32_seq_cst:
3680; LA32:       # %bb.0:
3681; LA32-NEXT:  .LBB118_1: # =>This Inner Loop Header: Depth=1
3682; LA32-NEXT:    ll.w $a2, $a0, 0
3683; LA32-NEXT:    and $a3, $a2, $a1
3684; LA32-NEXT:    sc.w $a3, $a0, 0
3685; LA32-NEXT:    beqz $a3, .LBB118_1
3686; LA32-NEXT:  # %bb.2:
3687; LA32-NEXT:    move $a0, $a2
3688; LA32-NEXT:    ret
3689;
3690; LA64-LABEL: atomicrmw_and_i32_seq_cst:
3691; LA64:       # %bb.0:
3692; LA64-NEXT:    amand_db.w $a2, $a1, $a0
3693; LA64-NEXT:    move $a0, $a2
3694; LA64-NEXT:    ret
3695  %1 = atomicrmw and ptr %a, i32 %b seq_cst
3696  ret i32 %1
3697}
3698
3699define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
3700; LA32-LABEL: atomicrmw_and_i64_seq_cst:
3701; LA32:       # %bb.0:
3702; LA32-NEXT:    addi.w $sp, $sp, -16
3703; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3704; LA32-NEXT:    ori $a3, $zero, 5
3705; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
3706; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3707; LA32-NEXT:    addi.w $sp, $sp, 16
3708; LA32-NEXT:    ret
3709;
3710; LA64-LABEL: atomicrmw_and_i64_seq_cst:
3711; LA64:       # %bb.0:
3712; LA64-NEXT:    amand_db.d $a2, $a1, $a0
3713; LA64-NEXT:    move $a0, $a2
3714; LA64-NEXT:    ret
3715  %1 = atomicrmw and ptr %a, i64 %b seq_cst
3716  ret i64 %1
3717}
3718
3719define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
3720; LA32-LABEL: atomicrmw_or_i8_seq_cst:
3721; LA32:       # %bb.0:
3722; LA32-NEXT:    slli.w $a2, $a0, 3
3723; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3724; LA32-NEXT:    andi $a1, $a1, 255
3725; LA32-NEXT:    sll.w $a1, $a1, $a2
3726; LA32-NEXT:  .LBB120_1: # =>This Inner Loop Header: Depth=1
3727; LA32-NEXT:    ll.w $a3, $a0, 0
3728; LA32-NEXT:    or $a4, $a3, $a1
3729; LA32-NEXT:    sc.w $a4, $a0, 0
3730; LA32-NEXT:    beqz $a4, .LBB120_1
3731; LA32-NEXT:  # %bb.2:
3732; LA32-NEXT:    srl.w $a0, $a3, $a2
3733; LA32-NEXT:    ret
3734;
3735; LA64-LABEL: atomicrmw_or_i8_seq_cst:
3736; LA64:       # %bb.0:
3737; LA64-NEXT:    slli.d $a2, $a0, 3
3738; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3739; LA64-NEXT:    andi $a1, $a1, 255
3740; LA64-NEXT:    sll.w $a1, $a1, $a2
3741; LA64-NEXT:    amor_db.w $a3, $a1, $a0
3742; LA64-NEXT:    srl.w $a0, $a3, $a2
3743; LA64-NEXT:    ret
3744  %1 = atomicrmw or ptr %a, i8 %b seq_cst
3745  ret i8 %1
3746}
3747
3748define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
3749; LA32-LABEL: atomicrmw_or_i16_seq_cst:
3750; LA32:       # %bb.0:
3751; LA32-NEXT:    slli.w $a2, $a0, 3
3752; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3753; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3754; LA32-NEXT:    sll.w $a1, $a1, $a2
3755; LA32-NEXT:  .LBB121_1: # =>This Inner Loop Header: Depth=1
3756; LA32-NEXT:    ll.w $a3, $a0, 0
3757; LA32-NEXT:    or $a4, $a3, $a1
3758; LA32-NEXT:    sc.w $a4, $a0, 0
3759; LA32-NEXT:    beqz $a4, .LBB121_1
3760; LA32-NEXT:  # %bb.2:
3761; LA32-NEXT:    srl.w $a0, $a3, $a2
3762; LA32-NEXT:    ret
3763;
3764; LA64-LABEL: atomicrmw_or_i16_seq_cst:
3765; LA64:       # %bb.0:
3766; LA64-NEXT:    slli.d $a2, $a0, 3
3767; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3768; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3769; LA64-NEXT:    sll.w $a1, $a1, $a2
3770; LA64-NEXT:    amor_db.w $a3, $a1, $a0
3771; LA64-NEXT:    srl.w $a0, $a3, $a2
3772; LA64-NEXT:    ret
3773  %1 = atomicrmw or ptr %a, i16 %b seq_cst
3774  ret i16 %1
3775}
3776
3777define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
3778; LA32-LABEL: atomicrmw_or_i32_seq_cst:
3779; LA32:       # %bb.0:
3780; LA32-NEXT:  .LBB122_1: # =>This Inner Loop Header: Depth=1
3781; LA32-NEXT:    ll.w $a2, $a0, 0
3782; LA32-NEXT:    or $a3, $a2, $a1
3783; LA32-NEXT:    sc.w $a3, $a0, 0
3784; LA32-NEXT:    beqz $a3, .LBB122_1
3785; LA32-NEXT:  # %bb.2:
3786; LA32-NEXT:    move $a0, $a2
3787; LA32-NEXT:    ret
3788;
3789; LA64-LABEL: atomicrmw_or_i32_seq_cst:
3790; LA64:       # %bb.0:
3791; LA64-NEXT:    amor_db.w $a2, $a1, $a0
3792; LA64-NEXT:    move $a0, $a2
3793; LA64-NEXT:    ret
3794  %1 = atomicrmw or ptr %a, i32 %b seq_cst
3795  ret i32 %1
3796}
3797
3798define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
3799; LA32-LABEL: atomicrmw_or_i64_seq_cst:
3800; LA32:       # %bb.0:
3801; LA32-NEXT:    addi.w $sp, $sp, -16
3802; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3803; LA32-NEXT:    ori $a3, $zero, 5
3804; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
3805; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3806; LA32-NEXT:    addi.w $sp, $sp, 16
3807; LA32-NEXT:    ret
3808;
3809; LA64-LABEL: atomicrmw_or_i64_seq_cst:
3810; LA64:       # %bb.0:
3811; LA64-NEXT:    amor_db.d $a2, $a1, $a0
3812; LA64-NEXT:    move $a0, $a2
3813; LA64-NEXT:    ret
3814  %1 = atomicrmw or ptr %a, i64 %b seq_cst
3815  ret i64 %1
3816}
3817
3818define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
3819; LA32-LABEL: atomicrmw_xor_i8_seq_cst:
3820; LA32:       # %bb.0:
3821; LA32-NEXT:    slli.w $a2, $a0, 3
3822; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3823; LA32-NEXT:    andi $a1, $a1, 255
3824; LA32-NEXT:    sll.w $a1, $a1, $a2
3825; LA32-NEXT:  .LBB124_1: # =>This Inner Loop Header: Depth=1
3826; LA32-NEXT:    ll.w $a3, $a0, 0
3827; LA32-NEXT:    xor $a4, $a3, $a1
3828; LA32-NEXT:    sc.w $a4, $a0, 0
3829; LA32-NEXT:    beqz $a4, .LBB124_1
3830; LA32-NEXT:  # %bb.2:
3831; LA32-NEXT:    srl.w $a0, $a3, $a2
3832; LA32-NEXT:    ret
3833;
3834; LA64-LABEL: atomicrmw_xor_i8_seq_cst:
3835; LA64:       # %bb.0:
3836; LA64-NEXT:    slli.d $a2, $a0, 3
3837; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3838; LA64-NEXT:    andi $a1, $a1, 255
3839; LA64-NEXT:    sll.w $a1, $a1, $a2
3840; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
3841; LA64-NEXT:    srl.w $a0, $a3, $a2
3842; LA64-NEXT:    ret
3843  %1 = atomicrmw xor ptr %a, i8 %b seq_cst
3844  ret i8 %1
3845}
3846
3847define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
3848; LA32-LABEL: atomicrmw_xor_i16_seq_cst:
3849; LA32:       # %bb.0:
3850; LA32-NEXT:    slli.w $a2, $a0, 3
3851; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3852; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
3853; LA32-NEXT:    sll.w $a1, $a1, $a2
3854; LA32-NEXT:  .LBB125_1: # =>This Inner Loop Header: Depth=1
3855; LA32-NEXT:    ll.w $a3, $a0, 0
3856; LA32-NEXT:    xor $a4, $a3, $a1
3857; LA32-NEXT:    sc.w $a4, $a0, 0
3858; LA32-NEXT:    beqz $a4, .LBB125_1
3859; LA32-NEXT:  # %bb.2:
3860; LA32-NEXT:    srl.w $a0, $a3, $a2
3861; LA32-NEXT:    ret
3862;
3863; LA64-LABEL: atomicrmw_xor_i16_seq_cst:
3864; LA64:       # %bb.0:
3865; LA64-NEXT:    slli.d $a2, $a0, 3
3866; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3867; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
3868; LA64-NEXT:    sll.w $a1, $a1, $a2
3869; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
3870; LA64-NEXT:    srl.w $a0, $a3, $a2
3871; LA64-NEXT:    ret
3872  %1 = atomicrmw xor ptr %a, i16 %b seq_cst
3873  ret i16 %1
3874}
3875
3876define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
3877; LA32-LABEL: atomicrmw_xor_i32_seq_cst:
3878; LA32:       # %bb.0:
3879; LA32-NEXT:  .LBB126_1: # =>This Inner Loop Header: Depth=1
3880; LA32-NEXT:    ll.w $a2, $a0, 0
3881; LA32-NEXT:    xor $a3, $a2, $a1
3882; LA32-NEXT:    sc.w $a3, $a0, 0
3883; LA32-NEXT:    beqz $a3, .LBB126_1
3884; LA32-NEXT:  # %bb.2:
3885; LA32-NEXT:    move $a0, $a2
3886; LA32-NEXT:    ret
3887;
3888; LA64-LABEL: atomicrmw_xor_i32_seq_cst:
3889; LA64:       # %bb.0:
3890; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
3891; LA64-NEXT:    move $a0, $a2
3892; LA64-NEXT:    ret
3893  %1 = atomicrmw xor ptr %a, i32 %b seq_cst
3894  ret i32 %1
3895}
3896
3897define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
3898; LA32-LABEL: atomicrmw_xor_i64_seq_cst:
3899; LA32:       # %bb.0:
3900; LA32-NEXT:    addi.w $sp, $sp, -16
3901; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
3902; LA32-NEXT:    ori $a3, $zero, 5
3903; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
3904; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
3905; LA32-NEXT:    addi.w $sp, $sp, 16
3906; LA32-NEXT:    ret
3907;
3908; LA64-LABEL: atomicrmw_xor_i64_seq_cst:
3909; LA64:       # %bb.0:
3910; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
3911; LA64-NEXT:    move $a0, $a2
3912; LA64-NEXT:    ret
3913  %1 = atomicrmw xor ptr %a, i64 %b seq_cst
3914  ret i64 %1
3915}
3916
3917define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
3918; LA32-LABEL: atomicrmw_xchg_i8_monotonic:
3919; LA32:       # %bb.0:
3920; LA32-NEXT:    slli.w $a2, $a0, 3
3921; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3922; LA32-NEXT:    ori $a3, $zero, 255
3923; LA32-NEXT:    sll.w $a3, $a3, $a2
3924; LA32-NEXT:    andi $a1, $a1, 255
3925; LA32-NEXT:    sll.w $a1, $a1, $a2
3926; LA32-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
3927; LA32-NEXT:    ll.w $a4, $a0, 0
3928; LA32-NEXT:    addi.w $a5, $a1, 0
3929; LA32-NEXT:    xor $a5, $a4, $a5
3930; LA32-NEXT:    and $a5, $a5, $a3
3931; LA32-NEXT:    xor $a5, $a4, $a5
3932; LA32-NEXT:    sc.w $a5, $a0, 0
3933; LA32-NEXT:    beqz $a5, .LBB128_1
3934; LA32-NEXT:  # %bb.2:
3935; LA32-NEXT:    srl.w $a0, $a4, $a2
3936; LA32-NEXT:    ret
3937;
3938; LA64-LABEL: atomicrmw_xchg_i8_monotonic:
3939; LA64:       # %bb.0:
3940; LA64-NEXT:    slli.d $a2, $a0, 3
3941; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3942; LA64-NEXT:    ori $a3, $zero, 255
3943; LA64-NEXT:    sll.w $a3, $a3, $a2
3944; LA64-NEXT:    andi $a1, $a1, 255
3945; LA64-NEXT:    sll.w $a1, $a1, $a2
3946; LA64-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
3947; LA64-NEXT:    ll.w $a4, $a0, 0
3948; LA64-NEXT:    addi.w $a5, $a1, 0
3949; LA64-NEXT:    xor $a5, $a4, $a5
3950; LA64-NEXT:    and $a5, $a5, $a3
3951; LA64-NEXT:    xor $a5, $a4, $a5
3952; LA64-NEXT:    sc.w $a5, $a0, 0
3953; LA64-NEXT:    beqz $a5, .LBB128_1
3954; LA64-NEXT:  # %bb.2:
3955; LA64-NEXT:    srl.w $a0, $a4, $a2
3956; LA64-NEXT:    ret
3957  %1 = atomicrmw xchg ptr %a, i8 %b monotonic
3958  ret i8 %1
3959}
3960
3961define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
3962; LA32-LABEL: atomicrmw_xchg_0_i8_monotonic:
3963; LA32:       # %bb.0:
3964; LA32-NEXT:    slli.w $a1, $a0, 3
3965; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3966; LA32-NEXT:    ori $a2, $zero, 255
3967; LA32-NEXT:    sll.w $a2, $a2, $a1
3968; LA32-NEXT:    nor $a2, $a2, $zero
3969; LA32-NEXT:  .LBB129_1: # =>This Inner Loop Header: Depth=1
3970; LA32-NEXT:    ll.w $a3, $a0, 0
3971; LA32-NEXT:    and $a4, $a3, $a2
3972; LA32-NEXT:    sc.w $a4, $a0, 0
3973; LA32-NEXT:    beqz $a4, .LBB129_1
3974; LA32-NEXT:  # %bb.2:
3975; LA32-NEXT:    srl.w $a0, $a3, $a1
3976; LA32-NEXT:    ret
3977;
3978; LA64-LABEL: atomicrmw_xchg_0_i8_monotonic:
3979; LA64:       # %bb.0:
3980; LA64-NEXT:    slli.d $a1, $a0, 3
3981; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
3982; LA64-NEXT:    ori $a2, $zero, 255
3983; LA64-NEXT:    sll.w $a2, $a2, $a1
3984; LA64-NEXT:    nor $a2, $a2, $zero
3985; LA64-NEXT:    amand.w $a3, $a2, $a0
3986; LA64-NEXT:    srl.w $a0, $a3, $a1
3987; LA64-NEXT:    ret
3988  %1 = atomicrmw xchg ptr %a, i8 0 monotonic
3989  ret i8 %1
3990}
3991
3992define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
3993; LA32-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
3994; LA32:       # %bb.0:
3995; LA32-NEXT:    slli.w $a1, $a0, 3
3996; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
3997; LA32-NEXT:    ori $a2, $zero, 255
3998; LA32-NEXT:    sll.w $a2, $a2, $a1
3999; LA32-NEXT:  .LBB130_1: # =>This Inner Loop Header: Depth=1
4000; LA32-NEXT:    ll.w $a3, $a0, 0
4001; LA32-NEXT:    or $a4, $a3, $a2
4002; LA32-NEXT:    sc.w $a4, $a0, 0
4003; LA32-NEXT:    beqz $a4, .LBB130_1
4004; LA32-NEXT:  # %bb.2:
4005; LA32-NEXT:    srl.w $a0, $a3, $a1
4006; LA32-NEXT:    ret
4007;
4008; LA64-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
4009; LA64:       # %bb.0:
4010; LA64-NEXT:    slli.d $a1, $a0, 3
4011; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4012; LA64-NEXT:    ori $a2, $zero, 255
4013; LA64-NEXT:    sll.w $a2, $a2, $a1
4014; LA64-NEXT:    amor.w $a3, $a2, $a0
4015; LA64-NEXT:    srl.w $a0, $a3, $a1
4016; LA64-NEXT:    ret
4017  %1 = atomicrmw xchg ptr %a, i8 -1 monotonic
4018  ret i8 %1
4019}
4020
4021define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
4022; LA32-LABEL: atomicrmw_xchg_i16_monotonic:
4023; LA32:       # %bb.0:
4024; LA32-NEXT:    slli.w $a2, $a0, 3
4025; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4026; LA32-NEXT:    lu12i.w $a3, 15
4027; LA32-NEXT:    ori $a3, $a3, 4095
4028; LA32-NEXT:    sll.w $a3, $a3, $a2
4029; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4030; LA32-NEXT:    sll.w $a1, $a1, $a2
4031; LA32-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
4032; LA32-NEXT:    ll.w $a4, $a0, 0
4033; LA32-NEXT:    addi.w $a5, $a1, 0
4034; LA32-NEXT:    xor $a5, $a4, $a5
4035; LA32-NEXT:    and $a5, $a5, $a3
4036; LA32-NEXT:    xor $a5, $a4, $a5
4037; LA32-NEXT:    sc.w $a5, $a0, 0
4038; LA32-NEXT:    beqz $a5, .LBB131_1
4039; LA32-NEXT:  # %bb.2:
4040; LA32-NEXT:    srl.w $a0, $a4, $a2
4041; LA32-NEXT:    ret
4042;
4043; LA64-LABEL: atomicrmw_xchg_i16_monotonic:
4044; LA64:       # %bb.0:
4045; LA64-NEXT:    slli.d $a2, $a0, 3
4046; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4047; LA64-NEXT:    lu12i.w $a3, 15
4048; LA64-NEXT:    ori $a3, $a3, 4095
4049; LA64-NEXT:    sll.w $a3, $a3, $a2
4050; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4051; LA64-NEXT:    sll.w $a1, $a1, $a2
4052; LA64-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
4053; LA64-NEXT:    ll.w $a4, $a0, 0
4054; LA64-NEXT:    addi.w $a5, $a1, 0
4055; LA64-NEXT:    xor $a5, $a4, $a5
4056; LA64-NEXT:    and $a5, $a5, $a3
4057; LA64-NEXT:    xor $a5, $a4, $a5
4058; LA64-NEXT:    sc.w $a5, $a0, 0
4059; LA64-NEXT:    beqz $a5, .LBB131_1
4060; LA64-NEXT:  # %bb.2:
4061; LA64-NEXT:    srl.w $a0, $a4, $a2
4062; LA64-NEXT:    ret
4063  %1 = atomicrmw xchg ptr %a, i16 %b monotonic
4064  ret i16 %1
4065}
4066
4067define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
4068; LA32-LABEL: atomicrmw_xchg_0_i16_monotonic:
4069; LA32:       # %bb.0:
4070; LA32-NEXT:    slli.w $a1, $a0, 3
4071; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4072; LA32-NEXT:    lu12i.w $a2, 15
4073; LA32-NEXT:    ori $a2, $a2, 4095
4074; LA32-NEXT:    sll.w $a2, $a2, $a1
4075; LA32-NEXT:    nor $a2, $a2, $zero
4076; LA32-NEXT:  .LBB132_1: # =>This Inner Loop Header: Depth=1
4077; LA32-NEXT:    ll.w $a3, $a0, 0
4078; LA32-NEXT:    and $a4, $a3, $a2
4079; LA32-NEXT:    sc.w $a4, $a0, 0
4080; LA32-NEXT:    beqz $a4, .LBB132_1
4081; LA32-NEXT:  # %bb.2:
4082; LA32-NEXT:    srl.w $a0, $a3, $a1
4083; LA32-NEXT:    ret
4084;
4085; LA64-LABEL: atomicrmw_xchg_0_i16_monotonic:
4086; LA64:       # %bb.0:
4087; LA64-NEXT:    slli.d $a1, $a0, 3
4088; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4089; LA64-NEXT:    lu12i.w $a2, 15
4090; LA64-NEXT:    ori $a2, $a2, 4095
4091; LA64-NEXT:    sll.w $a2, $a2, $a1
4092; LA64-NEXT:    nor $a2, $a2, $zero
4093; LA64-NEXT:    amand.w $a3, $a2, $a0
4094; LA64-NEXT:    srl.w $a0, $a3, $a1
4095; LA64-NEXT:    ret
4096  %1 = atomicrmw xchg ptr %a, i16 0 monotonic
4097  ret i16 %1
4098}
4099
4100define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
4101; LA32-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
4102; LA32:       # %bb.0:
4103; LA32-NEXT:    slli.w $a1, $a0, 3
4104; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4105; LA32-NEXT:    lu12i.w $a2, 15
4106; LA32-NEXT:    ori $a2, $a2, 4095
4107; LA32-NEXT:    sll.w $a2, $a2, $a1
4108; LA32-NEXT:  .LBB133_1: # =>This Inner Loop Header: Depth=1
4109; LA32-NEXT:    ll.w $a3, $a0, 0
4110; LA32-NEXT:    or $a4, $a3, $a2
4111; LA32-NEXT:    sc.w $a4, $a0, 0
4112; LA32-NEXT:    beqz $a4, .LBB133_1
4113; LA32-NEXT:  # %bb.2:
4114; LA32-NEXT:    srl.w $a0, $a3, $a1
4115; LA32-NEXT:    ret
4116;
4117; LA64-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
4118; LA64:       # %bb.0:
4119; LA64-NEXT:    slli.d $a1, $a0, 3
4120; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4121; LA64-NEXT:    lu12i.w $a2, 15
4122; LA64-NEXT:    ori $a2, $a2, 4095
4123; LA64-NEXT:    sll.w $a2, $a2, $a1
4124; LA64-NEXT:    amor.w $a3, $a2, $a0
4125; LA64-NEXT:    srl.w $a0, $a3, $a1
4126; LA64-NEXT:    ret
4127  %1 = atomicrmw xchg ptr %a, i16 -1 monotonic
4128  ret i16 %1
4129}
4130
4131define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
4132; LA32-LABEL: atomicrmw_xchg_i32_monotonic:
4133; LA32:       # %bb.0:
4134; LA32-NEXT:  .LBB134_1: # =>This Inner Loop Header: Depth=1
4135; LA32-NEXT:    ll.w $a2, $a0, 0
4136; LA32-NEXT:    move $a3, $a1
4137; LA32-NEXT:    sc.w $a3, $a0, 0
4138; LA32-NEXT:    beqz $a3, .LBB134_1
4139; LA32-NEXT:  # %bb.2:
4140; LA32-NEXT:    move $a0, $a2
4141; LA32-NEXT:    ret
4142;
4143; LA64-LABEL: atomicrmw_xchg_i32_monotonic:
4144; LA64:       # %bb.0:
4145; LA64-NEXT:    amswap.w $a2, $a1, $a0
4146; LA64-NEXT:    move $a0, $a2
4147; LA64-NEXT:    ret
4148  %1 = atomicrmw xchg ptr %a, i32 %b monotonic
4149  ret i32 %1
4150}
4151
4152define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
4153; LA32-LABEL: atomicrmw_xchg_i64_monotonic:
4154; LA32:       # %bb.0:
4155; LA32-NEXT:    addi.w $sp, $sp, -16
4156; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4157; LA32-NEXT:    move $a3, $zero
4158; LA32-NEXT:    bl %plt(__atomic_exchange_8)
4159; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4160; LA32-NEXT:    addi.w $sp, $sp, 16
4161; LA32-NEXT:    ret
4162;
4163; LA64-LABEL: atomicrmw_xchg_i64_monotonic:
4164; LA64:       # %bb.0:
4165; LA64-NEXT:    amswap.d $a2, $a1, $a0
4166; LA64-NEXT:    move $a0, $a2
4167; LA64-NEXT:    ret
4168  %1 = atomicrmw xchg ptr %a, i64 %b monotonic
4169  ret i64 %1
4170}
4171
4172define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
4173; LA32-LABEL: atomicrmw_add_i8_monotonic:
4174; LA32:       # %bb.0:
4175; LA32-NEXT:    slli.w $a2, $a0, 3
4176; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4177; LA32-NEXT:    ori $a3, $zero, 255
4178; LA32-NEXT:    sll.w $a3, $a3, $a2
4179; LA32-NEXT:    andi $a1, $a1, 255
4180; LA32-NEXT:    sll.w $a1, $a1, $a2
4181; LA32-NEXT:  .LBB136_1: # =>This Inner Loop Header: Depth=1
4182; LA32-NEXT:    ll.w $a4, $a0, 0
4183; LA32-NEXT:    add.w $a5, $a4, $a1
4184; LA32-NEXT:    xor $a5, $a4, $a5
4185; LA32-NEXT:    and $a5, $a5, $a3
4186; LA32-NEXT:    xor $a5, $a4, $a5
4187; LA32-NEXT:    sc.w $a5, $a0, 0
4188; LA32-NEXT:    beqz $a5, .LBB136_1
4189; LA32-NEXT:  # %bb.2:
4190; LA32-NEXT:    srl.w $a0, $a4, $a2
4191; LA32-NEXT:    ret
4192;
4193; LA64-LABEL: atomicrmw_add_i8_monotonic:
4194; LA64:       # %bb.0:
4195; LA64-NEXT:    slli.d $a2, $a0, 3
4196; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4197; LA64-NEXT:    ori $a3, $zero, 255
4198; LA64-NEXT:    sll.w $a3, $a3, $a2
4199; LA64-NEXT:    andi $a1, $a1, 255
4200; LA64-NEXT:    sll.w $a1, $a1, $a2
4201; LA64-NEXT:  .LBB136_1: # =>This Inner Loop Header: Depth=1
4202; LA64-NEXT:    ll.w $a4, $a0, 0
4203; LA64-NEXT:    add.w $a5, $a4, $a1
4204; LA64-NEXT:    xor $a5, $a4, $a5
4205; LA64-NEXT:    and $a5, $a5, $a3
4206; LA64-NEXT:    xor $a5, $a4, $a5
4207; LA64-NEXT:    sc.w $a5, $a0, 0
4208; LA64-NEXT:    beqz $a5, .LBB136_1
4209; LA64-NEXT:  # %bb.2:
4210; LA64-NEXT:    srl.w $a0, $a4, $a2
4211; LA64-NEXT:    ret
4212  %1 = atomicrmw add ptr %a, i8 %b monotonic
4213  ret i8 %1
4214}
4215
4216define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
4217; LA32-LABEL: atomicrmw_add_i16_monotonic:
4218; LA32:       # %bb.0:
4219; LA32-NEXT:    slli.w $a2, $a0, 3
4220; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4221; LA32-NEXT:    lu12i.w $a3, 15
4222; LA32-NEXT:    ori $a3, $a3, 4095
4223; LA32-NEXT:    sll.w $a3, $a3, $a2
4224; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4225; LA32-NEXT:    sll.w $a1, $a1, $a2
4226; LA32-NEXT:  .LBB137_1: # =>This Inner Loop Header: Depth=1
4227; LA32-NEXT:    ll.w $a4, $a0, 0
4228; LA32-NEXT:    add.w $a5, $a4, $a1
4229; LA32-NEXT:    xor $a5, $a4, $a5
4230; LA32-NEXT:    and $a5, $a5, $a3
4231; LA32-NEXT:    xor $a5, $a4, $a5
4232; LA32-NEXT:    sc.w $a5, $a0, 0
4233; LA32-NEXT:    beqz $a5, .LBB137_1
4234; LA32-NEXT:  # %bb.2:
4235; LA32-NEXT:    srl.w $a0, $a4, $a2
4236; LA32-NEXT:    ret
4237;
4238; LA64-LABEL: atomicrmw_add_i16_monotonic:
4239; LA64:       # %bb.0:
4240; LA64-NEXT:    slli.d $a2, $a0, 3
4241; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4242; LA64-NEXT:    lu12i.w $a3, 15
4243; LA64-NEXT:    ori $a3, $a3, 4095
4244; LA64-NEXT:    sll.w $a3, $a3, $a2
4245; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4246; LA64-NEXT:    sll.w $a1, $a1, $a2
4247; LA64-NEXT:  .LBB137_1: # =>This Inner Loop Header: Depth=1
4248; LA64-NEXT:    ll.w $a4, $a0, 0
4249; LA64-NEXT:    add.w $a5, $a4, $a1
4250; LA64-NEXT:    xor $a5, $a4, $a5
4251; LA64-NEXT:    and $a5, $a5, $a3
4252; LA64-NEXT:    xor $a5, $a4, $a5
4253; LA64-NEXT:    sc.w $a5, $a0, 0
4254; LA64-NEXT:    beqz $a5, .LBB137_1
4255; LA64-NEXT:  # %bb.2:
4256; LA64-NEXT:    srl.w $a0, $a4, $a2
4257; LA64-NEXT:    ret
4258  %1 = atomicrmw add ptr %a, i16 %b monotonic
4259  ret i16 %1
4260}
4261
4262define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
4263; LA32-LABEL: atomicrmw_add_i32_monotonic:
4264; LA32:       # %bb.0:
4265; LA32-NEXT:  .LBB138_1: # =>This Inner Loop Header: Depth=1
4266; LA32-NEXT:    ll.w $a2, $a0, 0
4267; LA32-NEXT:    add.w $a3, $a2, $a1
4268; LA32-NEXT:    sc.w $a3, $a0, 0
4269; LA32-NEXT:    beqz $a3, .LBB138_1
4270; LA32-NEXT:  # %bb.2:
4271; LA32-NEXT:    move $a0, $a2
4272; LA32-NEXT:    ret
4273;
4274; LA64-LABEL: atomicrmw_add_i32_monotonic:
4275; LA64:       # %bb.0:
4276; LA64-NEXT:    amadd.w $a2, $a1, $a0
4277; LA64-NEXT:    move $a0, $a2
4278; LA64-NEXT:    ret
4279  %1 = atomicrmw add ptr %a, i32 %b monotonic
4280  ret i32 %1
4281}
4282
4283define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
4284; LA32-LABEL: atomicrmw_add_i64_monotonic:
4285; LA32:       # %bb.0:
4286; LA32-NEXT:    addi.w $sp, $sp, -16
4287; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4288; LA32-NEXT:    move $a3, $zero
4289; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
4290; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4291; LA32-NEXT:    addi.w $sp, $sp, 16
4292; LA32-NEXT:    ret
4293;
4294; LA64-LABEL: atomicrmw_add_i64_monotonic:
4295; LA64:       # %bb.0:
4296; LA64-NEXT:    amadd.d $a2, $a1, $a0
4297; LA64-NEXT:    move $a0, $a2
4298; LA64-NEXT:    ret
4299  %1 = atomicrmw add ptr %a, i64 %b monotonic
4300  ret i64 %1
4301}
4302
4303define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
4304; LA32-LABEL: atomicrmw_sub_i8_monotonic:
4305; LA32:       # %bb.0:
4306; LA32-NEXT:    slli.w $a2, $a0, 3
4307; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4308; LA32-NEXT:    ori $a3, $zero, 255
4309; LA32-NEXT:    sll.w $a3, $a3, $a2
4310; LA32-NEXT:    andi $a1, $a1, 255
4311; LA32-NEXT:    sll.w $a1, $a1, $a2
4312; LA32-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
4313; LA32-NEXT:    ll.w $a4, $a0, 0
4314; LA32-NEXT:    sub.w $a5, $a4, $a1
4315; LA32-NEXT:    xor $a5, $a4, $a5
4316; LA32-NEXT:    and $a5, $a5, $a3
4317; LA32-NEXT:    xor $a5, $a4, $a5
4318; LA32-NEXT:    sc.w $a5, $a0, 0
4319; LA32-NEXT:    beqz $a5, .LBB140_1
4320; LA32-NEXT:  # %bb.2:
4321; LA32-NEXT:    srl.w $a0, $a4, $a2
4322; LA32-NEXT:    ret
4323;
4324; LA64-LABEL: atomicrmw_sub_i8_monotonic:
4325; LA64:       # %bb.0:
4326; LA64-NEXT:    slli.d $a2, $a0, 3
4327; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4328; LA64-NEXT:    ori $a3, $zero, 255
4329; LA64-NEXT:    sll.w $a3, $a3, $a2
4330; LA64-NEXT:    andi $a1, $a1, 255
4331; LA64-NEXT:    sll.w $a1, $a1, $a2
4332; LA64-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
4333; LA64-NEXT:    ll.w $a4, $a0, 0
4334; LA64-NEXT:    sub.w $a5, $a4, $a1
4335; LA64-NEXT:    xor $a5, $a4, $a5
4336; LA64-NEXT:    and $a5, $a5, $a3
4337; LA64-NEXT:    xor $a5, $a4, $a5
4338; LA64-NEXT:    sc.w $a5, $a0, 0
4339; LA64-NEXT:    beqz $a5, .LBB140_1
4340; LA64-NEXT:  # %bb.2:
4341; LA64-NEXT:    srl.w $a0, $a4, $a2
4342; LA64-NEXT:    ret
4343  %1 = atomicrmw sub ptr %a, i8 %b monotonic
4344  ret i8 %1
4345}
4346
4347define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
4348; LA32-LABEL: atomicrmw_sub_i16_monotonic:
4349; LA32:       # %bb.0:
4350; LA32-NEXT:    slli.w $a2, $a0, 3
4351; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4352; LA32-NEXT:    lu12i.w $a3, 15
4353; LA32-NEXT:    ori $a3, $a3, 4095
4354; LA32-NEXT:    sll.w $a3, $a3, $a2
4355; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4356; LA32-NEXT:    sll.w $a1, $a1, $a2
4357; LA32-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
4358; LA32-NEXT:    ll.w $a4, $a0, 0
4359; LA32-NEXT:    sub.w $a5, $a4, $a1
4360; LA32-NEXT:    xor $a5, $a4, $a5
4361; LA32-NEXT:    and $a5, $a5, $a3
4362; LA32-NEXT:    xor $a5, $a4, $a5
4363; LA32-NEXT:    sc.w $a5, $a0, 0
4364; LA32-NEXT:    beqz $a5, .LBB141_1
4365; LA32-NEXT:  # %bb.2:
4366; LA32-NEXT:    srl.w $a0, $a4, $a2
4367; LA32-NEXT:    ret
4368;
4369; LA64-LABEL: atomicrmw_sub_i16_monotonic:
4370; LA64:       # %bb.0:
4371; LA64-NEXT:    slli.d $a2, $a0, 3
4372; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4373; LA64-NEXT:    lu12i.w $a3, 15
4374; LA64-NEXT:    ori $a3, $a3, 4095
4375; LA64-NEXT:    sll.w $a3, $a3, $a2
4376; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4377; LA64-NEXT:    sll.w $a1, $a1, $a2
4378; LA64-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
4379; LA64-NEXT:    ll.w $a4, $a0, 0
4380; LA64-NEXT:    sub.w $a5, $a4, $a1
4381; LA64-NEXT:    xor $a5, $a4, $a5
4382; LA64-NEXT:    and $a5, $a5, $a3
4383; LA64-NEXT:    xor $a5, $a4, $a5
4384; LA64-NEXT:    sc.w $a5, $a0, 0
4385; LA64-NEXT:    beqz $a5, .LBB141_1
4386; LA64-NEXT:  # %bb.2:
4387; LA64-NEXT:    srl.w $a0, $a4, $a2
4388; LA64-NEXT:    ret
4389  %1 = atomicrmw sub ptr %a, i16 %b monotonic
4390  ret i16 %1
4391}
4392
4393define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
4394; LA32-LABEL: atomicrmw_sub_i32_monotonic:
4395; LA32:       # %bb.0:
4396; LA32-NEXT:  .LBB142_1: # =>This Inner Loop Header: Depth=1
4397; LA32-NEXT:    ll.w $a2, $a0, 0
4398; LA32-NEXT:    sub.w $a3, $a2, $a1
4399; LA32-NEXT:    sc.w $a3, $a0, 0
4400; LA32-NEXT:    beqz $a3, .LBB142_1
4401; LA32-NEXT:  # %bb.2:
4402; LA32-NEXT:    move $a0, $a2
4403; LA32-NEXT:    ret
4404;
4405; LA64-LABEL: atomicrmw_sub_i32_monotonic:
4406; LA64:       # %bb.0:
4407; LA64-NEXT:    sub.w $a2, $zero, $a1
4408; LA64-NEXT:    amadd.w $a1, $a2, $a0
4409; LA64-NEXT:    move $a0, $a1
4410; LA64-NEXT:    ret
4411  %1 = atomicrmw sub ptr %a, i32 %b monotonic
4412  ret i32 %1
4413}
4414
4415define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
4416; LA32-LABEL: atomicrmw_sub_i64_monotonic:
4417; LA32:       # %bb.0:
4418; LA32-NEXT:    addi.w $sp, $sp, -16
4419; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4420; LA32-NEXT:    move $a3, $zero
4421; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
4422; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4423; LA32-NEXT:    addi.w $sp, $sp, 16
4424; LA32-NEXT:    ret
4425;
4426; LA64-LABEL: atomicrmw_sub_i64_monotonic:
4427; LA64:       # %bb.0:
4428; LA64-NEXT:    sub.d $a2, $zero, $a1
4429; LA64-NEXT:    amadd.d $a1, $a2, $a0
4430; LA64-NEXT:    move $a0, $a1
4431; LA64-NEXT:    ret
4432  %1 = atomicrmw sub ptr %a, i64 %b monotonic
4433  ret i64 %1
4434}
4435
4436define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
4437; LA32-LABEL: atomicrmw_nand_i8_monotonic:
4438; LA32:       # %bb.0:
4439; LA32-NEXT:    slli.w $a2, $a0, 3
4440; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4441; LA32-NEXT:    ori $a3, $zero, 255
4442; LA32-NEXT:    sll.w $a3, $a3, $a2
4443; LA32-NEXT:    andi $a1, $a1, 255
4444; LA32-NEXT:    sll.w $a1, $a1, $a2
4445; LA32-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
4446; LA32-NEXT:    ll.w $a4, $a0, 0
4447; LA32-NEXT:    and $a5, $a4, $a1
4448; LA32-NEXT:    nor $a5, $a5, $zero
4449; LA32-NEXT:    xor $a5, $a4, $a5
4450; LA32-NEXT:    and $a5, $a5, $a3
4451; LA32-NEXT:    xor $a5, $a4, $a5
4452; LA32-NEXT:    sc.w $a5, $a0, 0
4453; LA32-NEXT:    beqz $a5, .LBB144_1
4454; LA32-NEXT:  # %bb.2:
4455; LA32-NEXT:    srl.w $a0, $a4, $a2
4456; LA32-NEXT:    ret
4457;
4458; LA64-LABEL: atomicrmw_nand_i8_monotonic:
4459; LA64:       # %bb.0:
4460; LA64-NEXT:    slli.d $a2, $a0, 3
4461; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4462; LA64-NEXT:    ori $a3, $zero, 255
4463; LA64-NEXT:    sll.w $a3, $a3, $a2
4464; LA64-NEXT:    andi $a1, $a1, 255
4465; LA64-NEXT:    sll.w $a1, $a1, $a2
4466; LA64-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
4467; LA64-NEXT:    ll.w $a4, $a0, 0
4468; LA64-NEXT:    and $a5, $a4, $a1
4469; LA64-NEXT:    nor $a5, $a5, $zero
4470; LA64-NEXT:    xor $a5, $a4, $a5
4471; LA64-NEXT:    and $a5, $a5, $a3
4472; LA64-NEXT:    xor $a5, $a4, $a5
4473; LA64-NEXT:    sc.w $a5, $a0, 0
4474; LA64-NEXT:    beqz $a5, .LBB144_1
4475; LA64-NEXT:  # %bb.2:
4476; LA64-NEXT:    srl.w $a0, $a4, $a2
4477; LA64-NEXT:    ret
4478  %1 = atomicrmw nand ptr %a, i8 %b monotonic
4479  ret i8 %1
4480}
4481
4482define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
4483; LA32-LABEL: atomicrmw_nand_i16_monotonic:
4484; LA32:       # %bb.0:
4485; LA32-NEXT:    slli.w $a2, $a0, 3
4486; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4487; LA32-NEXT:    lu12i.w $a3, 15
4488; LA32-NEXT:    ori $a3, $a3, 4095
4489; LA32-NEXT:    sll.w $a3, $a3, $a2
4490; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4491; LA32-NEXT:    sll.w $a1, $a1, $a2
4492; LA32-NEXT:  .LBB145_1: # =>This Inner Loop Header: Depth=1
4493; LA32-NEXT:    ll.w $a4, $a0, 0
4494; LA32-NEXT:    and $a5, $a4, $a1
4495; LA32-NEXT:    nor $a5, $a5, $zero
4496; LA32-NEXT:    xor $a5, $a4, $a5
4497; LA32-NEXT:    and $a5, $a5, $a3
4498; LA32-NEXT:    xor $a5, $a4, $a5
4499; LA32-NEXT:    sc.w $a5, $a0, 0
4500; LA32-NEXT:    beqz $a5, .LBB145_1
4501; LA32-NEXT:  # %bb.2:
4502; LA32-NEXT:    srl.w $a0, $a4, $a2
4503; LA32-NEXT:    ret
4504;
4505; LA64-LABEL: atomicrmw_nand_i16_monotonic:
4506; LA64:       # %bb.0:
4507; LA64-NEXT:    slli.d $a2, $a0, 3
4508; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4509; LA64-NEXT:    lu12i.w $a3, 15
4510; LA64-NEXT:    ori $a3, $a3, 4095
4511; LA64-NEXT:    sll.w $a3, $a3, $a2
4512; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4513; LA64-NEXT:    sll.w $a1, $a1, $a2
4514; LA64-NEXT:  .LBB145_1: # =>This Inner Loop Header: Depth=1
4515; LA64-NEXT:    ll.w $a4, $a0, 0
4516; LA64-NEXT:    and $a5, $a4, $a1
4517; LA64-NEXT:    nor $a5, $a5, $zero
4518; LA64-NEXT:    xor $a5, $a4, $a5
4519; LA64-NEXT:    and $a5, $a5, $a3
4520; LA64-NEXT:    xor $a5, $a4, $a5
4521; LA64-NEXT:    sc.w $a5, $a0, 0
4522; LA64-NEXT:    beqz $a5, .LBB145_1
4523; LA64-NEXT:  # %bb.2:
4524; LA64-NEXT:    srl.w $a0, $a4, $a2
4525; LA64-NEXT:    ret
4526  %1 = atomicrmw nand ptr %a, i16 %b monotonic
4527  ret i16 %1
4528}
4529
4530define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
4531; LA32-LABEL: atomicrmw_nand_i32_monotonic:
4532; LA32:       # %bb.0:
4533; LA32-NEXT:  .LBB146_1: # =>This Inner Loop Header: Depth=1
4534; LA32-NEXT:    ll.w $a2, $a0, 0
4535; LA32-NEXT:    and $a3, $a2, $a1
4536; LA32-NEXT:    nor $a3, $a3, $zero
4537; LA32-NEXT:    sc.w $a3, $a0, 0
4538; LA32-NEXT:    beqz $a3, .LBB146_1
4539; LA32-NEXT:  # %bb.2:
4540; LA32-NEXT:    move $a0, $a2
4541; LA32-NEXT:    ret
4542;
4543; LA64-LABEL: atomicrmw_nand_i32_monotonic:
4544; LA64:       # %bb.0:
4545; LA64-NEXT:  .LBB146_1: # =>This Inner Loop Header: Depth=1
4546; LA64-NEXT:    ll.w $a2, $a0, 0
4547; LA64-NEXT:    and $a3, $a2, $a1
4548; LA64-NEXT:    nor $a3, $a3, $zero
4549; LA64-NEXT:    sc.w $a3, $a0, 0
4550; LA64-NEXT:    beqz $a3, .LBB146_1
4551; LA64-NEXT:  # %bb.2:
4552; LA64-NEXT:    move $a0, $a2
4553; LA64-NEXT:    ret
4554  %1 = atomicrmw nand ptr %a, i32 %b monotonic
4555  ret i32 %1
4556}
4557
4558define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
4559; LA32-LABEL: atomicrmw_nand_i64_monotonic:
4560; LA32:       # %bb.0:
4561; LA32-NEXT:    addi.w $sp, $sp, -16
4562; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4563; LA32-NEXT:    move $a3, $zero
4564; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
4565; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4566; LA32-NEXT:    addi.w $sp, $sp, 16
4567; LA32-NEXT:    ret
4568;
4569; LA64-LABEL: atomicrmw_nand_i64_monotonic:
4570; LA64:       # %bb.0:
4571; LA64-NEXT:  .LBB147_1: # =>This Inner Loop Header: Depth=1
4572; LA64-NEXT:    ll.d $a2, $a0, 0
4573; LA64-NEXT:    and $a3, $a2, $a1
4574; LA64-NEXT:    nor $a3, $a3, $zero
4575; LA64-NEXT:    sc.d $a3, $a0, 0
4576; LA64-NEXT:    beqz $a3, .LBB147_1
4577; LA64-NEXT:  # %bb.2:
4578; LA64-NEXT:    move $a0, $a2
4579; LA64-NEXT:    ret
4580  %1 = atomicrmw nand ptr %a, i64 %b monotonic
4581  ret i64 %1
4582}
4583
4584define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
4585; LA32-LABEL: atomicrmw_and_i8_monotonic:
4586; LA32:       # %bb.0:
4587; LA32-NEXT:    slli.w $a2, $a0, 3
4588; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4589; LA32-NEXT:    ori $a3, $zero, 255
4590; LA32-NEXT:    sll.w $a3, $a3, $a2
4591; LA32-NEXT:    andi $a1, $a1, 255
4592; LA32-NEXT:    sll.w $a1, $a1, $a2
4593; LA32-NEXT:    orn $a1, $a1, $a3
4594; LA32-NEXT:  .LBB148_1: # =>This Inner Loop Header: Depth=1
4595; LA32-NEXT:    ll.w $a3, $a0, 0
4596; LA32-NEXT:    and $a4, $a3, $a1
4597; LA32-NEXT:    sc.w $a4, $a0, 0
4598; LA32-NEXT:    beqz $a4, .LBB148_1
4599; LA32-NEXT:  # %bb.2:
4600; LA32-NEXT:    srl.w $a0, $a3, $a2
4601; LA32-NEXT:    ret
4602;
4603; LA64-LABEL: atomicrmw_and_i8_monotonic:
4604; LA64:       # %bb.0:
4605; LA64-NEXT:    slli.d $a2, $a0, 3
4606; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4607; LA64-NEXT:    ori $a3, $zero, 255
4608; LA64-NEXT:    sll.w $a3, $a3, $a2
4609; LA64-NEXT:    andi $a1, $a1, 255
4610; LA64-NEXT:    sll.w $a1, $a1, $a2
4611; LA64-NEXT:    orn $a1, $a1, $a3
4612; LA64-NEXT:    amand.w $a3, $a1, $a0
4613; LA64-NEXT:    srl.w $a0, $a3, $a2
4614; LA64-NEXT:    ret
4615  %1 = atomicrmw and ptr %a, i8 %b monotonic
4616  ret i8 %1
4617}
4618
4619define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
4620; LA32-LABEL: atomicrmw_and_i16_monotonic:
4621; LA32:       # %bb.0:
4622; LA32-NEXT:    slli.w $a2, $a0, 3
4623; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4624; LA32-NEXT:    lu12i.w $a3, 15
4625; LA32-NEXT:    ori $a3, $a3, 4095
4626; LA32-NEXT:    sll.w $a3, $a3, $a2
4627; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4628; LA32-NEXT:    sll.w $a1, $a1, $a2
4629; LA32-NEXT:    orn $a1, $a1, $a3
4630; LA32-NEXT:  .LBB149_1: # =>This Inner Loop Header: Depth=1
4631; LA32-NEXT:    ll.w $a3, $a0, 0
4632; LA32-NEXT:    and $a4, $a3, $a1
4633; LA32-NEXT:    sc.w $a4, $a0, 0
4634; LA32-NEXT:    beqz $a4, .LBB149_1
4635; LA32-NEXT:  # %bb.2:
4636; LA32-NEXT:    srl.w $a0, $a3, $a2
4637; LA32-NEXT:    ret
4638;
4639; LA64-LABEL: atomicrmw_and_i16_monotonic:
4640; LA64:       # %bb.0:
4641; LA64-NEXT:    slli.d $a2, $a0, 3
4642; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4643; LA64-NEXT:    lu12i.w $a3, 15
4644; LA64-NEXT:    ori $a3, $a3, 4095
4645; LA64-NEXT:    sll.w $a3, $a3, $a2
4646; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4647; LA64-NEXT:    sll.w $a1, $a1, $a2
4648; LA64-NEXT:    orn $a1, $a1, $a3
4649; LA64-NEXT:    amand.w $a3, $a1, $a0
4650; LA64-NEXT:    srl.w $a0, $a3, $a2
4651; LA64-NEXT:    ret
4652  %1 = atomicrmw and ptr %a, i16 %b monotonic
4653  ret i16 %1
4654}
4655
4656define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
4657; LA32-LABEL: atomicrmw_and_i32_monotonic:
4658; LA32:       # %bb.0:
4659; LA32-NEXT:  .LBB150_1: # =>This Inner Loop Header: Depth=1
4660; LA32-NEXT:    ll.w $a2, $a0, 0
4661; LA32-NEXT:    and $a3, $a2, $a1
4662; LA32-NEXT:    sc.w $a3, $a0, 0
4663; LA32-NEXT:    beqz $a3, .LBB150_1
4664; LA32-NEXT:  # %bb.2:
4665; LA32-NEXT:    move $a0, $a2
4666; LA32-NEXT:    ret
4667;
4668; LA64-LABEL: atomicrmw_and_i32_monotonic:
4669; LA64:       # %bb.0:
4670; LA64-NEXT:    amand.w $a2, $a1, $a0
4671; LA64-NEXT:    move $a0, $a2
4672; LA64-NEXT:    ret
4673  %1 = atomicrmw and ptr %a, i32 %b monotonic
4674  ret i32 %1
4675}
4676
4677define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
4678; LA32-LABEL: atomicrmw_and_i64_monotonic:
4679; LA32:       # %bb.0:
4680; LA32-NEXT:    addi.w $sp, $sp, -16
4681; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4682; LA32-NEXT:    move $a3, $zero
4683; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
4684; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4685; LA32-NEXT:    addi.w $sp, $sp, 16
4686; LA32-NEXT:    ret
4687;
4688; LA64-LABEL: atomicrmw_and_i64_monotonic:
4689; LA64:       # %bb.0:
4690; LA64-NEXT:    amand.d $a2, $a1, $a0
4691; LA64-NEXT:    move $a0, $a2
4692; LA64-NEXT:    ret
4693  %1 = atomicrmw and ptr %a, i64 %b monotonic
4694  ret i64 %1
4695}
4696
4697define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
4698; LA32-LABEL: atomicrmw_or_i8_monotonic:
4699; LA32:       # %bb.0:
4700; LA32-NEXT:    slli.w $a2, $a0, 3
4701; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4702; LA32-NEXT:    andi $a1, $a1, 255
4703; LA32-NEXT:    sll.w $a1, $a1, $a2
4704; LA32-NEXT:  .LBB152_1: # =>This Inner Loop Header: Depth=1
4705; LA32-NEXT:    ll.w $a3, $a0, 0
4706; LA32-NEXT:    or $a4, $a3, $a1
4707; LA32-NEXT:    sc.w $a4, $a0, 0
4708; LA32-NEXT:    beqz $a4, .LBB152_1
4709; LA32-NEXT:  # %bb.2:
4710; LA32-NEXT:    srl.w $a0, $a3, $a2
4711; LA32-NEXT:    ret
4712;
4713; LA64-LABEL: atomicrmw_or_i8_monotonic:
4714; LA64:       # %bb.0:
4715; LA64-NEXT:    slli.d $a2, $a0, 3
4716; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4717; LA64-NEXT:    andi $a1, $a1, 255
4718; LA64-NEXT:    sll.w $a1, $a1, $a2
4719; LA64-NEXT:    amor.w $a3, $a1, $a0
4720; LA64-NEXT:    srl.w $a0, $a3, $a2
4721; LA64-NEXT:    ret
4722  %1 = atomicrmw or ptr %a, i8 %b monotonic
4723  ret i8 %1
4724}
4725
4726define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
4727; LA32-LABEL: atomicrmw_or_i16_monotonic:
4728; LA32:       # %bb.0:
4729; LA32-NEXT:    slli.w $a2, $a0, 3
4730; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4731; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4732; LA32-NEXT:    sll.w $a1, $a1, $a2
4733; LA32-NEXT:  .LBB153_1: # =>This Inner Loop Header: Depth=1
4734; LA32-NEXT:    ll.w $a3, $a0, 0
4735; LA32-NEXT:    or $a4, $a3, $a1
4736; LA32-NEXT:    sc.w $a4, $a0, 0
4737; LA32-NEXT:    beqz $a4, .LBB153_1
4738; LA32-NEXT:  # %bb.2:
4739; LA32-NEXT:    srl.w $a0, $a3, $a2
4740; LA32-NEXT:    ret
4741;
4742; LA64-LABEL: atomicrmw_or_i16_monotonic:
4743; LA64:       # %bb.0:
4744; LA64-NEXT:    slli.d $a2, $a0, 3
4745; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4746; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4747; LA64-NEXT:    sll.w $a1, $a1, $a2
4748; LA64-NEXT:    amor.w $a3, $a1, $a0
4749; LA64-NEXT:    srl.w $a0, $a3, $a2
4750; LA64-NEXT:    ret
4751  %1 = atomicrmw or ptr %a, i16 %b monotonic
4752  ret i16 %1
4753}
4754
4755define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
4756; LA32-LABEL: atomicrmw_or_i32_monotonic:
4757; LA32:       # %bb.0:
4758; LA32-NEXT:  .LBB154_1: # =>This Inner Loop Header: Depth=1
4759; LA32-NEXT:    ll.w $a2, $a0, 0
4760; LA32-NEXT:    or $a3, $a2, $a1
4761; LA32-NEXT:    sc.w $a3, $a0, 0
4762; LA32-NEXT:    beqz $a3, .LBB154_1
4763; LA32-NEXT:  # %bb.2:
4764; LA32-NEXT:    move $a0, $a2
4765; LA32-NEXT:    ret
4766;
4767; LA64-LABEL: atomicrmw_or_i32_monotonic:
4768; LA64:       # %bb.0:
4769; LA64-NEXT:    amor.w $a2, $a1, $a0
4770; LA64-NEXT:    move $a0, $a2
4771; LA64-NEXT:    ret
4772  %1 = atomicrmw or ptr %a, i32 %b monotonic
4773  ret i32 %1
4774}
4775
4776define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
4777; LA32-LABEL: atomicrmw_or_i64_monotonic:
4778; LA32:       # %bb.0:
4779; LA32-NEXT:    addi.w $sp, $sp, -16
4780; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4781; LA32-NEXT:    move $a3, $zero
4782; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
4783; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4784; LA32-NEXT:    addi.w $sp, $sp, 16
4785; LA32-NEXT:    ret
4786;
4787; LA64-LABEL: atomicrmw_or_i64_monotonic:
4788; LA64:       # %bb.0:
4789; LA64-NEXT:    amor.d $a2, $a1, $a0
4790; LA64-NEXT:    move $a0, $a2
4791; LA64-NEXT:    ret
4792  %1 = atomicrmw or ptr %a, i64 %b monotonic
4793  ret i64 %1
4794}
4795
4796define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
4797; LA32-LABEL: atomicrmw_xor_i8_monotonic:
4798; LA32:       # %bb.0:
4799; LA32-NEXT:    slli.w $a2, $a0, 3
4800; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4801; LA32-NEXT:    andi $a1, $a1, 255
4802; LA32-NEXT:    sll.w $a1, $a1, $a2
4803; LA32-NEXT:  .LBB156_1: # =>This Inner Loop Header: Depth=1
4804; LA32-NEXT:    ll.w $a3, $a0, 0
4805; LA32-NEXT:    xor $a4, $a3, $a1
4806; LA32-NEXT:    sc.w $a4, $a0, 0
4807; LA32-NEXT:    beqz $a4, .LBB156_1
4808; LA32-NEXT:  # %bb.2:
4809; LA32-NEXT:    srl.w $a0, $a3, $a2
4810; LA32-NEXT:    ret
4811;
4812; LA64-LABEL: atomicrmw_xor_i8_monotonic:
4813; LA64:       # %bb.0:
4814; LA64-NEXT:    slli.d $a2, $a0, 3
4815; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4816; LA64-NEXT:    andi $a1, $a1, 255
4817; LA64-NEXT:    sll.w $a1, $a1, $a2
4818; LA64-NEXT:    amxor.w $a3, $a1, $a0
4819; LA64-NEXT:    srl.w $a0, $a3, $a2
4820; LA64-NEXT:    ret
4821  %1 = atomicrmw xor ptr %a, i8 %b monotonic
4822  ret i8 %1
4823}
4824
4825define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
4826; LA32-LABEL: atomicrmw_xor_i16_monotonic:
4827; LA32:       # %bb.0:
4828; LA32-NEXT:    slli.w $a2, $a0, 3
4829; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
4830; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
4831; LA32-NEXT:    sll.w $a1, $a1, $a2
4832; LA32-NEXT:  .LBB157_1: # =>This Inner Loop Header: Depth=1
4833; LA32-NEXT:    ll.w $a3, $a0, 0
4834; LA32-NEXT:    xor $a4, $a3, $a1
4835; LA32-NEXT:    sc.w $a4, $a0, 0
4836; LA32-NEXT:    beqz $a4, .LBB157_1
4837; LA32-NEXT:  # %bb.2:
4838; LA32-NEXT:    srl.w $a0, $a3, $a2
4839; LA32-NEXT:    ret
4840;
4841; LA64-LABEL: atomicrmw_xor_i16_monotonic:
4842; LA64:       # %bb.0:
4843; LA64-NEXT:    slli.d $a2, $a0, 3
4844; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
4845; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
4846; LA64-NEXT:    sll.w $a1, $a1, $a2
4847; LA64-NEXT:    amxor.w $a3, $a1, $a0
4848; LA64-NEXT:    srl.w $a0, $a3, $a2
4849; LA64-NEXT:    ret
4850  %1 = atomicrmw xor ptr %a, i16 %b monotonic
4851  ret i16 %1
4852}
4853
4854define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
4855; LA32-LABEL: atomicrmw_xor_i32_monotonic:
4856; LA32:       # %bb.0:
4857; LA32-NEXT:  .LBB158_1: # =>This Inner Loop Header: Depth=1
4858; LA32-NEXT:    ll.w $a2, $a0, 0
4859; LA32-NEXT:    xor $a3, $a2, $a1
4860; LA32-NEXT:    sc.w $a3, $a0, 0
4861; LA32-NEXT:    beqz $a3, .LBB158_1
4862; LA32-NEXT:  # %bb.2:
4863; LA32-NEXT:    move $a0, $a2
4864; LA32-NEXT:    ret
4865;
4866; LA64-LABEL: atomicrmw_xor_i32_monotonic:
4867; LA64:       # %bb.0:
4868; LA64-NEXT:    amxor.w $a2, $a1, $a0
4869; LA64-NEXT:    move $a0, $a2
4870; LA64-NEXT:    ret
4871  %1 = atomicrmw xor ptr %a, i32 %b monotonic
4872  ret i32 %1
4873}
4874
4875define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
4876; LA32-LABEL: atomicrmw_xor_i64_monotonic:
4877; LA32:       # %bb.0:
4878; LA32-NEXT:    addi.w $sp, $sp, -16
4879; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
4880; LA32-NEXT:    move $a3, $zero
4881; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
4882; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
4883; LA32-NEXT:    addi.w $sp, $sp, 16
4884; LA32-NEXT:    ret
4885;
4886; LA64-LABEL: atomicrmw_xor_i64_monotonic:
4887; LA64:       # %bb.0:
4888; LA64-NEXT:    amxor.d $a2, $a1, $a0
4889; LA64-NEXT:    move $a0, $a2
4890; LA64-NEXT:    ret
4891  %1 = atomicrmw xor ptr %a, i64 %b monotonic
4892  ret i64 %1
4893}
4894