xref: /llvm-project/llvm/test/CodeGen/RISCV/rem.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32IM %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64IM %s
10
11define i32 @urem(i32 %a, i32 %b) nounwind {
12; RV32I-LABEL: urem:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    tail __umodsi3
15;
16; RV32IM-LABEL: urem:
17; RV32IM:       # %bb.0:
18; RV32IM-NEXT:    remu a0, a0, a1
19; RV32IM-NEXT:    ret
20;
21; RV64I-LABEL: urem:
22; RV64I:       # %bb.0:
23; RV64I-NEXT:    addi sp, sp, -16
24; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
25; RV64I-NEXT:    slli a0, a0, 32
26; RV64I-NEXT:    slli a1, a1, 32
27; RV64I-NEXT:    srli a0, a0, 32
28; RV64I-NEXT:    srli a1, a1, 32
29; RV64I-NEXT:    call __umoddi3
30; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
31; RV64I-NEXT:    addi sp, sp, 16
32; RV64I-NEXT:    ret
33;
34; RV64IM-LABEL: urem:
35; RV64IM:       # %bb.0:
36; RV64IM-NEXT:    remuw a0, a0, a1
37; RV64IM-NEXT:    ret
38  %1 = urem i32 %a, %b
39  ret i32 %1
40}
41
42define i32 @urem_constant_lhs(i32 %a) nounwind {
43; RV32I-LABEL: urem_constant_lhs:
44; RV32I:       # %bb.0:
45; RV32I-NEXT:    mv a1, a0
46; RV32I-NEXT:    li a0, 10
47; RV32I-NEXT:    tail __umodsi3
48;
49; RV32IM-LABEL: urem_constant_lhs:
50; RV32IM:       # %bb.0:
51; RV32IM-NEXT:    li a1, 10
52; RV32IM-NEXT:    remu a0, a1, a0
53; RV32IM-NEXT:    ret
54;
55; RV64I-LABEL: urem_constant_lhs:
56; RV64I:       # %bb.0:
57; RV64I-NEXT:    addi sp, sp, -16
58; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
59; RV64I-NEXT:    slli a0, a0, 32
60; RV64I-NEXT:    srli a1, a0, 32
61; RV64I-NEXT:    li a0, 10
62; RV64I-NEXT:    call __umoddi3
63; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
64; RV64I-NEXT:    addi sp, sp, 16
65; RV64I-NEXT:    ret
66;
67; RV64IM-LABEL: urem_constant_lhs:
68; RV64IM:       # %bb.0:
69; RV64IM-NEXT:    li a1, 10
70; RV64IM-NEXT:    remuw a0, a1, a0
71; RV64IM-NEXT:    ret
72  %1 = urem i32 10, %a
73  ret i32 %1
74}
75
76define i32 @srem(i32 %a, i32 %b) nounwind {
77; RV32I-LABEL: srem:
78; RV32I:       # %bb.0:
79; RV32I-NEXT:    tail __modsi3
80;
81; RV32IM-LABEL: srem:
82; RV32IM:       # %bb.0:
83; RV32IM-NEXT:    rem a0, a0, a1
84; RV32IM-NEXT:    ret
85;
86; RV64I-LABEL: srem:
87; RV64I:       # %bb.0:
88; RV64I-NEXT:    addi sp, sp, -16
89; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
90; RV64I-NEXT:    sext.w a0, a0
91; RV64I-NEXT:    sext.w a1, a1
92; RV64I-NEXT:    call __moddi3
93; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
94; RV64I-NEXT:    addi sp, sp, 16
95; RV64I-NEXT:    ret
96;
97; RV64IM-LABEL: srem:
98; RV64IM:       # %bb.0:
99; RV64IM-NEXT:    remw a0, a0, a1
100; RV64IM-NEXT:    ret
101  %1 = srem i32 %a, %b
102  ret i32 %1
103}
104
105define i32 @srem_pow2(i32 %a) nounwind {
106; RV32I-LABEL: srem_pow2:
107; RV32I:       # %bb.0:
108; RV32I-NEXT:    srai a1, a0, 31
109; RV32I-NEXT:    srli a1, a1, 29
110; RV32I-NEXT:    add a1, a0, a1
111; RV32I-NEXT:    andi a1, a1, -8
112; RV32I-NEXT:    sub a0, a0, a1
113; RV32I-NEXT:    ret
114;
115; RV32IM-LABEL: srem_pow2:
116; RV32IM:       # %bb.0:
117; RV32IM-NEXT:    srai a1, a0, 31
118; RV32IM-NEXT:    srli a1, a1, 29
119; RV32IM-NEXT:    add a1, a0, a1
120; RV32IM-NEXT:    andi a1, a1, -8
121; RV32IM-NEXT:    sub a0, a0, a1
122; RV32IM-NEXT:    ret
123;
124; RV64I-LABEL: srem_pow2:
125; RV64I:       # %bb.0:
126; RV64I-NEXT:    sraiw a1, a0, 31
127; RV64I-NEXT:    srliw a1, a1, 29
128; RV64I-NEXT:    add a1, a0, a1
129; RV64I-NEXT:    andi a1, a1, -8
130; RV64I-NEXT:    subw a0, a0, a1
131; RV64I-NEXT:    ret
132;
133; RV64IM-LABEL: srem_pow2:
134; RV64IM:       # %bb.0:
135; RV64IM-NEXT:    sraiw a1, a0, 31
136; RV64IM-NEXT:    srliw a1, a1, 29
137; RV64IM-NEXT:    add a1, a0, a1
138; RV64IM-NEXT:    andi a1, a1, -8
139; RV64IM-NEXT:    subw a0, a0, a1
140; RV64IM-NEXT:    ret
141  %1 = srem i32 %a, 8
142  ret i32 %1
143}
144
145define i32 @srem_pow2_2(i32 %a) nounwind {
146; RV32I-LABEL: srem_pow2_2:
147; RV32I:       # %bb.0:
148; RV32I-NEXT:    srai a1, a0, 31
149; RV32I-NEXT:    srli a1, a1, 16
150; RV32I-NEXT:    add a1, a0, a1
151; RV32I-NEXT:    lui a2, 1048560
152; RV32I-NEXT:    and a1, a1, a2
153; RV32I-NEXT:    sub a0, a0, a1
154; RV32I-NEXT:    ret
155;
156; RV32IM-LABEL: srem_pow2_2:
157; RV32IM:       # %bb.0:
158; RV32IM-NEXT:    srai a1, a0, 31
159; RV32IM-NEXT:    srli a1, a1, 16
160; RV32IM-NEXT:    add a1, a0, a1
161; RV32IM-NEXT:    lui a2, 1048560
162; RV32IM-NEXT:    and a1, a1, a2
163; RV32IM-NEXT:    sub a0, a0, a1
164; RV32IM-NEXT:    ret
165;
166; RV64I-LABEL: srem_pow2_2:
167; RV64I:       # %bb.0:
168; RV64I-NEXT:    sraiw a1, a0, 31
169; RV64I-NEXT:    srliw a1, a1, 16
170; RV64I-NEXT:    add a1, a0, a1
171; RV64I-NEXT:    lui a2, 1048560
172; RV64I-NEXT:    and a1, a1, a2
173; RV64I-NEXT:    subw a0, a0, a1
174; RV64I-NEXT:    ret
175;
176; RV64IM-LABEL: srem_pow2_2:
177; RV64IM:       # %bb.0:
178; RV64IM-NEXT:    sraiw a1, a0, 31
179; RV64IM-NEXT:    srliw a1, a1, 16
180; RV64IM-NEXT:    add a1, a0, a1
181; RV64IM-NEXT:    lui a2, 1048560
182; RV64IM-NEXT:    and a1, a1, a2
183; RV64IM-NEXT:    subw a0, a0, a1
184; RV64IM-NEXT:    ret
185  %1 = srem i32 %a, 65536
186  ret i32 %1
187}
188
189define i32 @srem_constant_lhs(i32 %a) nounwind {
190; RV32I-LABEL: srem_constant_lhs:
191; RV32I:       # %bb.0:
192; RV32I-NEXT:    mv a1, a0
193; RV32I-NEXT:    li a0, -10
194; RV32I-NEXT:    tail __modsi3
195;
196; RV32IM-LABEL: srem_constant_lhs:
197; RV32IM:       # %bb.0:
198; RV32IM-NEXT:    li a1, -10
199; RV32IM-NEXT:    rem a0, a1, a0
200; RV32IM-NEXT:    ret
201;
202; RV64I-LABEL: srem_constant_lhs:
203; RV64I:       # %bb.0:
204; RV64I-NEXT:    addi sp, sp, -16
205; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
206; RV64I-NEXT:    sext.w a1, a0
207; RV64I-NEXT:    li a0, -10
208; RV64I-NEXT:    call __moddi3
209; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
210; RV64I-NEXT:    addi sp, sp, 16
211; RV64I-NEXT:    ret
212;
213; RV64IM-LABEL: srem_constant_lhs:
214; RV64IM:       # %bb.0:
215; RV64IM-NEXT:    li a1, -10
216; RV64IM-NEXT:    remw a0, a1, a0
217; RV64IM-NEXT:    ret
218  %1 = srem i32 -10, %a
219  ret i32 %1
220}
221
222define i64 @urem64(i64 %a, i64 %b) nounwind {
223; RV32I-LABEL: urem64:
224; RV32I:       # %bb.0:
225; RV32I-NEXT:    addi sp, sp, -16
226; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
227; RV32I-NEXT:    call __umoddi3
228; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
229; RV32I-NEXT:    addi sp, sp, 16
230; RV32I-NEXT:    ret
231;
232; RV32IM-LABEL: urem64:
233; RV32IM:       # %bb.0:
234; RV32IM-NEXT:    addi sp, sp, -16
235; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
236; RV32IM-NEXT:    call __umoddi3
237; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
238; RV32IM-NEXT:    addi sp, sp, 16
239; RV32IM-NEXT:    ret
240;
241; RV64I-LABEL: urem64:
242; RV64I:       # %bb.0:
243; RV64I-NEXT:    tail __umoddi3
244;
245; RV64IM-LABEL: urem64:
246; RV64IM:       # %bb.0:
247; RV64IM-NEXT:    remu a0, a0, a1
248; RV64IM-NEXT:    ret
249  %1 = urem i64 %a, %b
250  ret i64 %1
251}
252
253define i64 @urem64_constant_lhs(i64 %a) nounwind {
254; RV32I-LABEL: urem64_constant_lhs:
255; RV32I:       # %bb.0:
256; RV32I-NEXT:    addi sp, sp, -16
257; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
258; RV32I-NEXT:    mv a3, a1
259; RV32I-NEXT:    mv a2, a0
260; RV32I-NEXT:    li a0, 10
261; RV32I-NEXT:    li a1, 0
262; RV32I-NEXT:    call __umoddi3
263; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
264; RV32I-NEXT:    addi sp, sp, 16
265; RV32I-NEXT:    ret
266;
267; RV32IM-LABEL: urem64_constant_lhs:
268; RV32IM:       # %bb.0:
269; RV32IM-NEXT:    addi sp, sp, -16
270; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
271; RV32IM-NEXT:    mv a3, a1
272; RV32IM-NEXT:    mv a2, a0
273; RV32IM-NEXT:    li a0, 10
274; RV32IM-NEXT:    li a1, 0
275; RV32IM-NEXT:    call __umoddi3
276; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
277; RV32IM-NEXT:    addi sp, sp, 16
278; RV32IM-NEXT:    ret
279;
280; RV64I-LABEL: urem64_constant_lhs:
281; RV64I:       # %bb.0:
282; RV64I-NEXT:    mv a1, a0
283; RV64I-NEXT:    li a0, 10
284; RV64I-NEXT:    tail __umoddi3
285;
286; RV64IM-LABEL: urem64_constant_lhs:
287; RV64IM:       # %bb.0:
288; RV64IM-NEXT:    li a1, 10
289; RV64IM-NEXT:    remu a0, a1, a0
290; RV64IM-NEXT:    ret
291  %1 = urem i64 10, %a
292  ret i64 %1
293}
294
295define i64 @srem64(i64 %a, i64 %b) nounwind {
296; RV32I-LABEL: srem64:
297; RV32I:       # %bb.0:
298; RV32I-NEXT:    addi sp, sp, -16
299; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
300; RV32I-NEXT:    call __moddi3
301; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
302; RV32I-NEXT:    addi sp, sp, 16
303; RV32I-NEXT:    ret
304;
305; RV32IM-LABEL: srem64:
306; RV32IM:       # %bb.0:
307; RV32IM-NEXT:    addi sp, sp, -16
308; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
309; RV32IM-NEXT:    call __moddi3
310; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
311; RV32IM-NEXT:    addi sp, sp, 16
312; RV32IM-NEXT:    ret
313;
314; RV64I-LABEL: srem64:
315; RV64I:       # %bb.0:
316; RV64I-NEXT:    tail __moddi3
317;
318; RV64IM-LABEL: srem64:
319; RV64IM:       # %bb.0:
320; RV64IM-NEXT:    rem a0, a0, a1
321; RV64IM-NEXT:    ret
322  %1 = srem i64 %a, %b
323  ret i64 %1
324}
325
326define i64 @srem64_constant_lhs(i64 %a) nounwind {
327; RV32I-LABEL: srem64_constant_lhs:
328; RV32I:       # %bb.0:
329; RV32I-NEXT:    addi sp, sp, -16
330; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
331; RV32I-NEXT:    mv a3, a1
332; RV32I-NEXT:    mv a2, a0
333; RV32I-NEXT:    li a0, -10
334; RV32I-NEXT:    li a1, -1
335; RV32I-NEXT:    call __moddi3
336; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
337; RV32I-NEXT:    addi sp, sp, 16
338; RV32I-NEXT:    ret
339;
340; RV32IM-LABEL: srem64_constant_lhs:
341; RV32IM:       # %bb.0:
342; RV32IM-NEXT:    addi sp, sp, -16
343; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
344; RV32IM-NEXT:    mv a3, a1
345; RV32IM-NEXT:    mv a2, a0
346; RV32IM-NEXT:    li a0, -10
347; RV32IM-NEXT:    li a1, -1
348; RV32IM-NEXT:    call __moddi3
349; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
350; RV32IM-NEXT:    addi sp, sp, 16
351; RV32IM-NEXT:    ret
352;
353; RV64I-LABEL: srem64_constant_lhs:
354; RV64I:       # %bb.0:
355; RV64I-NEXT:    mv a1, a0
356; RV64I-NEXT:    li a0, -10
357; RV64I-NEXT:    tail __moddi3
358;
359; RV64IM-LABEL: srem64_constant_lhs:
360; RV64IM:       # %bb.0:
361; RV64IM-NEXT:    li a1, -10
362; RV64IM-NEXT:    rem a0, a1, a0
363; RV64IM-NEXT:    ret
364  %1 = srem i64 -10, %a
365  ret i64 %1
366}
367
368define i8 @urem8(i8 %a, i8 %b) nounwind {
369; RV32I-LABEL: urem8:
370; RV32I:       # %bb.0:
371; RV32I-NEXT:    addi sp, sp, -16
372; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
373; RV32I-NEXT:    andi a0, a0, 255
374; RV32I-NEXT:    andi a1, a1, 255
375; RV32I-NEXT:    call __umodsi3
376; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
377; RV32I-NEXT:    addi sp, sp, 16
378; RV32I-NEXT:    ret
379;
380; RV32IM-LABEL: urem8:
381; RV32IM:       # %bb.0:
382; RV32IM-NEXT:    andi a1, a1, 255
383; RV32IM-NEXT:    andi a0, a0, 255
384; RV32IM-NEXT:    remu a0, a0, a1
385; RV32IM-NEXT:    ret
386;
387; RV64I-LABEL: urem8:
388; RV64I:       # %bb.0:
389; RV64I-NEXT:    addi sp, sp, -16
390; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
391; RV64I-NEXT:    andi a0, a0, 255
392; RV64I-NEXT:    andi a1, a1, 255
393; RV64I-NEXT:    call __umoddi3
394; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
395; RV64I-NEXT:    addi sp, sp, 16
396; RV64I-NEXT:    ret
397;
398; RV64IM-LABEL: urem8:
399; RV64IM:       # %bb.0:
400; RV64IM-NEXT:    andi a1, a1, 255
401; RV64IM-NEXT:    andi a0, a0, 255
402; RV64IM-NEXT:    remuw a0, a0, a1
403; RV64IM-NEXT:    ret
404  %1 = urem i8 %a, %b
405  ret i8 %1
406}
407
408define i8 @urem8_constant_lhs(i8 %a) nounwind {
409; RV32I-LABEL: urem8_constant_lhs:
410; RV32I:       # %bb.0:
411; RV32I-NEXT:    addi sp, sp, -16
412; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
413; RV32I-NEXT:    andi a1, a0, 255
414; RV32I-NEXT:    li a0, 10
415; RV32I-NEXT:    call __umodsi3
416; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
417; RV32I-NEXT:    addi sp, sp, 16
418; RV32I-NEXT:    ret
419;
420; RV32IM-LABEL: urem8_constant_lhs:
421; RV32IM:       # %bb.0:
422; RV32IM-NEXT:    andi a0, a0, 255
423; RV32IM-NEXT:    li a1, 10
424; RV32IM-NEXT:    remu a0, a1, a0
425; RV32IM-NEXT:    ret
426;
427; RV64I-LABEL: urem8_constant_lhs:
428; RV64I:       # %bb.0:
429; RV64I-NEXT:    addi sp, sp, -16
430; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
431; RV64I-NEXT:    andi a1, a0, 255
432; RV64I-NEXT:    li a0, 10
433; RV64I-NEXT:    call __umoddi3
434; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
435; RV64I-NEXT:    addi sp, sp, 16
436; RV64I-NEXT:    ret
437;
438; RV64IM-LABEL: urem8_constant_lhs:
439; RV64IM:       # %bb.0:
440; RV64IM-NEXT:    andi a0, a0, 255
441; RV64IM-NEXT:    li a1, 10
442; RV64IM-NEXT:    remuw a0, a1, a0
443; RV64IM-NEXT:    ret
444  %1 = urem i8 10, %a
445  ret i8 %1
446}
447
448
449define i8 @srem8(i8 %a, i8 %b) nounwind {
450; RV32I-LABEL: srem8:
451; RV32I:       # %bb.0:
452; RV32I-NEXT:    addi sp, sp, -16
453; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
454; RV32I-NEXT:    slli a0, a0, 24
455; RV32I-NEXT:    slli a1, a1, 24
456; RV32I-NEXT:    srai a0, a0, 24
457; RV32I-NEXT:    srai a1, a1, 24
458; RV32I-NEXT:    call __modsi3
459; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
460; RV32I-NEXT:    addi sp, sp, 16
461; RV32I-NEXT:    ret
462;
463; RV32IM-LABEL: srem8:
464; RV32IM:       # %bb.0:
465; RV32IM-NEXT:    slli a1, a1, 24
466; RV32IM-NEXT:    slli a0, a0, 24
467; RV32IM-NEXT:    srai a1, a1, 24
468; RV32IM-NEXT:    srai a0, a0, 24
469; RV32IM-NEXT:    rem a0, a0, a1
470; RV32IM-NEXT:    ret
471;
472; RV64I-LABEL: srem8:
473; RV64I:       # %bb.0:
474; RV64I-NEXT:    addi sp, sp, -16
475; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
476; RV64I-NEXT:    slli a0, a0, 56
477; RV64I-NEXT:    slli a1, a1, 56
478; RV64I-NEXT:    srai a0, a0, 56
479; RV64I-NEXT:    srai a1, a1, 56
480; RV64I-NEXT:    call __moddi3
481; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
482; RV64I-NEXT:    addi sp, sp, 16
483; RV64I-NEXT:    ret
484;
485; RV64IM-LABEL: srem8:
486; RV64IM:       # %bb.0:
487; RV64IM-NEXT:    slli a1, a1, 56
488; RV64IM-NEXT:    slli a0, a0, 56
489; RV64IM-NEXT:    srai a1, a1, 56
490; RV64IM-NEXT:    srai a0, a0, 56
491; RV64IM-NEXT:    remw a0, a0, a1
492; RV64IM-NEXT:    ret
493  %1 = srem i8 %a, %b
494  ret i8 %1
495}
496
497define i8 @srem8_constant_lhs(i8 %a) nounwind {
498; RV32I-LABEL: srem8_constant_lhs:
499; RV32I:       # %bb.0:
500; RV32I-NEXT:    addi sp, sp, -16
501; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
502; RV32I-NEXT:    slli a0, a0, 24
503; RV32I-NEXT:    srai a1, a0, 24
504; RV32I-NEXT:    li a0, -10
505; RV32I-NEXT:    call __modsi3
506; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
507; RV32I-NEXT:    addi sp, sp, 16
508; RV32I-NEXT:    ret
509;
510; RV32IM-LABEL: srem8_constant_lhs:
511; RV32IM:       # %bb.0:
512; RV32IM-NEXT:    slli a0, a0, 24
513; RV32IM-NEXT:    srai a0, a0, 24
514; RV32IM-NEXT:    li a1, -10
515; RV32IM-NEXT:    rem a0, a1, a0
516; RV32IM-NEXT:    ret
517;
518; RV64I-LABEL: srem8_constant_lhs:
519; RV64I:       # %bb.0:
520; RV64I-NEXT:    addi sp, sp, -16
521; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
522; RV64I-NEXT:    slli a0, a0, 56
523; RV64I-NEXT:    srai a1, a0, 56
524; RV64I-NEXT:    li a0, -10
525; RV64I-NEXT:    call __moddi3
526; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
527; RV64I-NEXT:    addi sp, sp, 16
528; RV64I-NEXT:    ret
529;
530; RV64IM-LABEL: srem8_constant_lhs:
531; RV64IM:       # %bb.0:
532; RV64IM-NEXT:    slli a0, a0, 56
533; RV64IM-NEXT:    srai a0, a0, 56
534; RV64IM-NEXT:    li a1, -10
535; RV64IM-NEXT:    remw a0, a1, a0
536; RV64IM-NEXT:    ret
537  %1 = srem i8 -10, %a
538  ret i8 %1
539}
540
541
542define i16 @urem16(i16 %a, i16 %b) nounwind {
543; RV32I-LABEL: urem16:
544; RV32I:       # %bb.0:
545; RV32I-NEXT:    addi sp, sp, -16
546; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
547; RV32I-NEXT:    lui a2, 16
548; RV32I-NEXT:    addi a2, a2, -1
549; RV32I-NEXT:    and a0, a0, a2
550; RV32I-NEXT:    and a1, a1, a2
551; RV32I-NEXT:    call __umodsi3
552; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
553; RV32I-NEXT:    addi sp, sp, 16
554; RV32I-NEXT:    ret
555;
556; RV32IM-LABEL: urem16:
557; RV32IM:       # %bb.0:
558; RV32IM-NEXT:    lui a2, 16
559; RV32IM-NEXT:    addi a2, a2, -1
560; RV32IM-NEXT:    and a1, a1, a2
561; RV32IM-NEXT:    and a0, a0, a2
562; RV32IM-NEXT:    remu a0, a0, a1
563; RV32IM-NEXT:    ret
564;
565; RV64I-LABEL: urem16:
566; RV64I:       # %bb.0:
567; RV64I-NEXT:    addi sp, sp, -16
568; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
569; RV64I-NEXT:    lui a2, 16
570; RV64I-NEXT:    addiw a2, a2, -1
571; RV64I-NEXT:    and a0, a0, a2
572; RV64I-NEXT:    and a1, a1, a2
573; RV64I-NEXT:    call __umoddi3
574; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
575; RV64I-NEXT:    addi sp, sp, 16
576; RV64I-NEXT:    ret
577;
578; RV64IM-LABEL: urem16:
579; RV64IM:       # %bb.0:
580; RV64IM-NEXT:    lui a2, 16
581; RV64IM-NEXT:    addi a2, a2, -1
582; RV64IM-NEXT:    and a1, a1, a2
583; RV64IM-NEXT:    and a0, a0, a2
584; RV64IM-NEXT:    remuw a0, a0, a1
585; RV64IM-NEXT:    ret
586  %1 = urem i16 %a, %b
587  ret i16 %1
588}
589
590define i16 @urem16_constant_lhs(i16 %a) nounwind {
591; RV32I-LABEL: urem16_constant_lhs:
592; RV32I:       # %bb.0:
593; RV32I-NEXT:    addi sp, sp, -16
594; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
595; RV32I-NEXT:    slli a0, a0, 16
596; RV32I-NEXT:    srli a1, a0, 16
597; RV32I-NEXT:    li a0, 10
598; RV32I-NEXT:    call __umodsi3
599; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
600; RV32I-NEXT:    addi sp, sp, 16
601; RV32I-NEXT:    ret
602;
603; RV32IM-LABEL: urem16_constant_lhs:
604; RV32IM:       # %bb.0:
605; RV32IM-NEXT:    slli a0, a0, 16
606; RV32IM-NEXT:    srli a0, a0, 16
607; RV32IM-NEXT:    li a1, 10
608; RV32IM-NEXT:    remu a0, a1, a0
609; RV32IM-NEXT:    ret
610;
611; RV64I-LABEL: urem16_constant_lhs:
612; RV64I:       # %bb.0:
613; RV64I-NEXT:    addi sp, sp, -16
614; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
615; RV64I-NEXT:    slli a0, a0, 48
616; RV64I-NEXT:    srli a1, a0, 48
617; RV64I-NEXT:    li a0, 10
618; RV64I-NEXT:    call __umoddi3
619; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
620; RV64I-NEXT:    addi sp, sp, 16
621; RV64I-NEXT:    ret
622;
623; RV64IM-LABEL: urem16_constant_lhs:
624; RV64IM:       # %bb.0:
625; RV64IM-NEXT:    slli a0, a0, 48
626; RV64IM-NEXT:    srli a0, a0, 48
627; RV64IM-NEXT:    li a1, 10
628; RV64IM-NEXT:    remuw a0, a1, a0
629; RV64IM-NEXT:    ret
630  %1 = urem i16 10, %a
631  ret i16 %1
632}
633
634define i16 @srem16(i16 %a, i16 %b) nounwind {
635; RV32I-LABEL: srem16:
636; RV32I:       # %bb.0:
637; RV32I-NEXT:    addi sp, sp, -16
638; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
639; RV32I-NEXT:    slli a0, a0, 16
640; RV32I-NEXT:    slli a1, a1, 16
641; RV32I-NEXT:    srai a0, a0, 16
642; RV32I-NEXT:    srai a1, a1, 16
643; RV32I-NEXT:    call __modsi3
644; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
645; RV32I-NEXT:    addi sp, sp, 16
646; RV32I-NEXT:    ret
647;
648; RV32IM-LABEL: srem16:
649; RV32IM:       # %bb.0:
650; RV32IM-NEXT:    slli a1, a1, 16
651; RV32IM-NEXT:    slli a0, a0, 16
652; RV32IM-NEXT:    srai a1, a1, 16
653; RV32IM-NEXT:    srai a0, a0, 16
654; RV32IM-NEXT:    rem a0, a0, a1
655; RV32IM-NEXT:    ret
656;
657; RV64I-LABEL: srem16:
658; RV64I:       # %bb.0:
659; RV64I-NEXT:    addi sp, sp, -16
660; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
661; RV64I-NEXT:    slli a0, a0, 48
662; RV64I-NEXT:    slli a1, a1, 48
663; RV64I-NEXT:    srai a0, a0, 48
664; RV64I-NEXT:    srai a1, a1, 48
665; RV64I-NEXT:    call __moddi3
666; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
667; RV64I-NEXT:    addi sp, sp, 16
668; RV64I-NEXT:    ret
669;
670; RV64IM-LABEL: srem16:
671; RV64IM:       # %bb.0:
672; RV64IM-NEXT:    slli a1, a1, 48
673; RV64IM-NEXT:    slli a0, a0, 48
674; RV64IM-NEXT:    srai a1, a1, 48
675; RV64IM-NEXT:    srai a0, a0, 48
676; RV64IM-NEXT:    remw a0, a0, a1
677; RV64IM-NEXT:    ret
678  %1 = srem i16 %a, %b
679  ret i16 %1
680}
681
682define i16 @srem16_constant_lhs(i16 %a) nounwind {
683; RV32I-LABEL: srem16_constant_lhs:
684; RV32I:       # %bb.0:
685; RV32I-NEXT:    addi sp, sp, -16
686; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
687; RV32I-NEXT:    slli a0, a0, 16
688; RV32I-NEXT:    srai a1, a0, 16
689; RV32I-NEXT:    li a0, -10
690; RV32I-NEXT:    call __modsi3
691; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
692; RV32I-NEXT:    addi sp, sp, 16
693; RV32I-NEXT:    ret
694;
695; RV32IM-LABEL: srem16_constant_lhs:
696; RV32IM:       # %bb.0:
697; RV32IM-NEXT:    slli a0, a0, 16
698; RV32IM-NEXT:    srai a0, a0, 16
699; RV32IM-NEXT:    li a1, -10
700; RV32IM-NEXT:    rem a0, a1, a0
701; RV32IM-NEXT:    ret
702;
703; RV64I-LABEL: srem16_constant_lhs:
704; RV64I:       # %bb.0:
705; RV64I-NEXT:    addi sp, sp, -16
706; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
707; RV64I-NEXT:    slli a0, a0, 48
708; RV64I-NEXT:    srai a1, a0, 48
709; RV64I-NEXT:    li a0, -10
710; RV64I-NEXT:    call __moddi3
711; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
712; RV64I-NEXT:    addi sp, sp, 16
713; RV64I-NEXT:    ret
714;
715; RV64IM-LABEL: srem16_constant_lhs:
716; RV64IM:       # %bb.0:
717; RV64IM-NEXT:    slli a0, a0, 48
718; RV64IM-NEXT:    srai a0, a0, 48
719; RV64IM-NEXT:    li a1, -10
720; RV64IM-NEXT:    remw a0, a1, a0
721; RV64IM-NEXT:    ret
722  %1 = srem i16 -10, %a
723  ret i16 %1
724}
725