xref: /llvm-project/llvm/test/CodeGen/RISCV/div.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32IM %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64IM %s
10
11define i32 @udiv(i32 %a, i32 %b) nounwind {
12; RV32I-LABEL: udiv:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    tail __udivsi3
15;
16; RV32IM-LABEL: udiv:
17; RV32IM:       # %bb.0:
18; RV32IM-NEXT:    divu a0, a0, a1
19; RV32IM-NEXT:    ret
20;
21; RV64I-LABEL: udiv:
22; RV64I:       # %bb.0:
23; RV64I-NEXT:    addi sp, sp, -16
24; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
25; RV64I-NEXT:    slli a0, a0, 32
26; RV64I-NEXT:    slli a1, a1, 32
27; RV64I-NEXT:    srli a0, a0, 32
28; RV64I-NEXT:    srli a1, a1, 32
29; RV64I-NEXT:    call __udivdi3
30; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
31; RV64I-NEXT:    addi sp, sp, 16
32; RV64I-NEXT:    ret
33;
34; RV64IM-LABEL: udiv:
35; RV64IM:       # %bb.0:
36; RV64IM-NEXT:    divuw a0, a0, a1
37; RV64IM-NEXT:    ret
38  %1 = udiv i32 %a, %b
39  ret i32 %1
40}
41
42define i32 @udiv_constant(i32 %a) nounwind {
43; RV32I-LABEL: udiv_constant:
44; RV32I:       # %bb.0:
45; RV32I-NEXT:    li a1, 5
46; RV32I-NEXT:    tail __udivsi3
47;
48; RV32IM-LABEL: udiv_constant:
49; RV32IM:       # %bb.0:
50; RV32IM-NEXT:    lui a1, 838861
51; RV32IM-NEXT:    addi a1, a1, -819
52; RV32IM-NEXT:    mulhu a0, a0, a1
53; RV32IM-NEXT:    srli a0, a0, 2
54; RV32IM-NEXT:    ret
55;
56; RV64I-LABEL: udiv_constant:
57; RV64I:       # %bb.0:
58; RV64I-NEXT:    addi sp, sp, -16
59; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
60; RV64I-NEXT:    slli a0, a0, 32
61; RV64I-NEXT:    srli a0, a0, 32
62; RV64I-NEXT:    li a1, 5
63; RV64I-NEXT:    call __udivdi3
64; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
65; RV64I-NEXT:    addi sp, sp, 16
66; RV64I-NEXT:    ret
67;
68; RV64IM-LABEL: udiv_constant:
69; RV64IM:       # %bb.0:
70; RV64IM-NEXT:    slli a0, a0, 32
71; RV64IM-NEXT:    lui a1, 838861
72; RV64IM-NEXT:    addi a1, a1, -819
73; RV64IM-NEXT:    slli a1, a1, 32
74; RV64IM-NEXT:    mulhu a0, a0, a1
75; RV64IM-NEXT:    srli a0, a0, 34
76; RV64IM-NEXT:    ret
77  %1 = udiv i32 %a, 5
78  ret i32 %1
79}
80
81define i32 @udiv_pow2(i32 %a) nounwind {
82; RV32I-LABEL: udiv_pow2:
83; RV32I:       # %bb.0:
84; RV32I-NEXT:    srli a0, a0, 3
85; RV32I-NEXT:    ret
86;
87; RV32IM-LABEL: udiv_pow2:
88; RV32IM:       # %bb.0:
89; RV32IM-NEXT:    srli a0, a0, 3
90; RV32IM-NEXT:    ret
91;
92; RV64I-LABEL: udiv_pow2:
93; RV64I:       # %bb.0:
94; RV64I-NEXT:    srliw a0, a0, 3
95; RV64I-NEXT:    ret
96;
97; RV64IM-LABEL: udiv_pow2:
98; RV64IM:       # %bb.0:
99; RV64IM-NEXT:    srliw a0, a0, 3
100; RV64IM-NEXT:    ret
101  %1 = udiv i32 %a, 8
102  ret i32 %1
103}
104
105define i32 @udiv_constant_lhs(i32 %a) nounwind {
106; RV32I-LABEL: udiv_constant_lhs:
107; RV32I:       # %bb.0:
108; RV32I-NEXT:    mv a1, a0
109; RV32I-NEXT:    li a0, 10
110; RV32I-NEXT:    tail __udivsi3
111;
112; RV32IM-LABEL: udiv_constant_lhs:
113; RV32IM:       # %bb.0:
114; RV32IM-NEXT:    li a1, 10
115; RV32IM-NEXT:    divu a0, a1, a0
116; RV32IM-NEXT:    ret
117;
118; RV64I-LABEL: udiv_constant_lhs:
119; RV64I:       # %bb.0:
120; RV64I-NEXT:    addi sp, sp, -16
121; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
122; RV64I-NEXT:    slli a0, a0, 32
123; RV64I-NEXT:    srli a1, a0, 32
124; RV64I-NEXT:    li a0, 10
125; RV64I-NEXT:    call __udivdi3
126; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
127; RV64I-NEXT:    addi sp, sp, 16
128; RV64I-NEXT:    ret
129;
130; RV64IM-LABEL: udiv_constant_lhs:
131; RV64IM:       # %bb.0:
132; RV64IM-NEXT:    li a1, 10
133; RV64IM-NEXT:    divuw a0, a1, a0
134; RV64IM-NEXT:    ret
135  %1 = udiv i32 10, %a
136  ret i32 %1
137}
138
139define i64 @udiv64(i64 %a, i64 %b) nounwind {
140; RV32I-LABEL: udiv64:
141; RV32I:       # %bb.0:
142; RV32I-NEXT:    addi sp, sp, -16
143; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
144; RV32I-NEXT:    call __udivdi3
145; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
146; RV32I-NEXT:    addi sp, sp, 16
147; RV32I-NEXT:    ret
148;
149; RV32IM-LABEL: udiv64:
150; RV32IM:       # %bb.0:
151; RV32IM-NEXT:    addi sp, sp, -16
152; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
153; RV32IM-NEXT:    call __udivdi3
154; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
155; RV32IM-NEXT:    addi sp, sp, 16
156; RV32IM-NEXT:    ret
157;
158; RV64I-LABEL: udiv64:
159; RV64I:       # %bb.0:
160; RV64I-NEXT:    tail __udivdi3
161;
162; RV64IM-LABEL: udiv64:
163; RV64IM:       # %bb.0:
164; RV64IM-NEXT:    divu a0, a0, a1
165; RV64IM-NEXT:    ret
166  %1 = udiv i64 %a, %b
167  ret i64 %1
168}
169
170define i64 @udiv64_constant(i64 %a) nounwind {
171; RV32I-LABEL: udiv64_constant:
172; RV32I:       # %bb.0:
173; RV32I-NEXT:    addi sp, sp, -16
174; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
175; RV32I-NEXT:    li a2, 5
176; RV32I-NEXT:    li a3, 0
177; RV32I-NEXT:    call __udivdi3
178; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
179; RV32I-NEXT:    addi sp, sp, 16
180; RV32I-NEXT:    ret
181;
182; RV32IM-LABEL: udiv64_constant:
183; RV32IM:       # %bb.0:
184; RV32IM-NEXT:    add a2, a0, a1
185; RV32IM-NEXT:    lui a3, 838861
186; RV32IM-NEXT:    sltu a4, a2, a0
187; RV32IM-NEXT:    addi a5, a3, -819
188; RV32IM-NEXT:    addi a3, a3, -820
189; RV32IM-NEXT:    add a2, a2, a4
190; RV32IM-NEXT:    mulhu a4, a2, a5
191; RV32IM-NEXT:    srli a6, a4, 2
192; RV32IM-NEXT:    andi a4, a4, -4
193; RV32IM-NEXT:    add a4, a4, a6
194; RV32IM-NEXT:    sub a2, a2, a4
195; RV32IM-NEXT:    sub a4, a0, a2
196; RV32IM-NEXT:    sltu a0, a0, a2
197; RV32IM-NEXT:    mul a2, a4, a3
198; RV32IM-NEXT:    mulhu a3, a4, a5
199; RV32IM-NEXT:    sub a1, a1, a0
200; RV32IM-NEXT:    add a2, a3, a2
201; RV32IM-NEXT:    mul a1, a1, a5
202; RV32IM-NEXT:    add a1, a2, a1
203; RV32IM-NEXT:    mul a0, a4, a5
204; RV32IM-NEXT:    ret
205;
206; RV64I-LABEL: udiv64_constant:
207; RV64I:       # %bb.0:
208; RV64I-NEXT:    li a1, 5
209; RV64I-NEXT:    tail __udivdi3
210;
211; RV64IM-LABEL: udiv64_constant:
212; RV64IM:       # %bb.0:
213; RV64IM-NEXT:    lui a1, 838861
214; RV64IM-NEXT:    addiw a1, a1, -819
215; RV64IM-NEXT:    slli a2, a1, 32
216; RV64IM-NEXT:    add a1, a1, a2
217; RV64IM-NEXT:    mulhu a0, a0, a1
218; RV64IM-NEXT:    srli a0, a0, 2
219; RV64IM-NEXT:    ret
220  %1 = udiv i64 %a, 5
221  ret i64 %1
222}
223
224define i64 @udiv64_constant_lhs(i64 %a) nounwind {
225; RV32I-LABEL: udiv64_constant_lhs:
226; RV32I:       # %bb.0:
227; RV32I-NEXT:    addi sp, sp, -16
228; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
229; RV32I-NEXT:    mv a3, a1
230; RV32I-NEXT:    mv a2, a0
231; RV32I-NEXT:    li a0, 10
232; RV32I-NEXT:    li a1, 0
233; RV32I-NEXT:    call __udivdi3
234; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
235; RV32I-NEXT:    addi sp, sp, 16
236; RV32I-NEXT:    ret
237;
238; RV32IM-LABEL: udiv64_constant_lhs:
239; RV32IM:       # %bb.0:
240; RV32IM-NEXT:    addi sp, sp, -16
241; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
242; RV32IM-NEXT:    mv a3, a1
243; RV32IM-NEXT:    mv a2, a0
244; RV32IM-NEXT:    li a0, 10
245; RV32IM-NEXT:    li a1, 0
246; RV32IM-NEXT:    call __udivdi3
247; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
248; RV32IM-NEXT:    addi sp, sp, 16
249; RV32IM-NEXT:    ret
250;
251; RV64I-LABEL: udiv64_constant_lhs:
252; RV64I:       # %bb.0:
253; RV64I-NEXT:    mv a1, a0
254; RV64I-NEXT:    li a0, 10
255; RV64I-NEXT:    tail __udivdi3
256;
257; RV64IM-LABEL: udiv64_constant_lhs:
258; RV64IM:       # %bb.0:
259; RV64IM-NEXT:    li a1, 10
260; RV64IM-NEXT:    divu a0, a1, a0
261; RV64IM-NEXT:    ret
262  %1 = udiv i64 10, %a
263  ret i64 %1
264}
265
266define i8 @udiv8(i8 %a, i8 %b) nounwind {
267; RV32I-LABEL: udiv8:
268; RV32I:       # %bb.0:
269; RV32I-NEXT:    addi sp, sp, -16
270; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
271; RV32I-NEXT:    andi a0, a0, 255
272; RV32I-NEXT:    andi a1, a1, 255
273; RV32I-NEXT:    call __udivsi3
274; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
275; RV32I-NEXT:    addi sp, sp, 16
276; RV32I-NEXT:    ret
277;
278; RV32IM-LABEL: udiv8:
279; RV32IM:       # %bb.0:
280; RV32IM-NEXT:    andi a1, a1, 255
281; RV32IM-NEXT:    andi a0, a0, 255
282; RV32IM-NEXT:    divu a0, a0, a1
283; RV32IM-NEXT:    ret
284;
285; RV64I-LABEL: udiv8:
286; RV64I:       # %bb.0:
287; RV64I-NEXT:    addi sp, sp, -16
288; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
289; RV64I-NEXT:    andi a0, a0, 255
290; RV64I-NEXT:    andi a1, a1, 255
291; RV64I-NEXT:    call __udivdi3
292; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
293; RV64I-NEXT:    addi sp, sp, 16
294; RV64I-NEXT:    ret
295;
296; RV64IM-LABEL: udiv8:
297; RV64IM:       # %bb.0:
298; RV64IM-NEXT:    andi a1, a1, 255
299; RV64IM-NEXT:    andi a0, a0, 255
300; RV64IM-NEXT:    divuw a0, a0, a1
301; RV64IM-NEXT:    ret
302  %1 = udiv i8 %a, %b
303  ret i8 %1
304}
305
306define i8 @udiv8_constant(i8 %a) nounwind {
307; RV32I-LABEL: udiv8_constant:
308; RV32I:       # %bb.0:
309; RV32I-NEXT:    addi sp, sp, -16
310; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
311; RV32I-NEXT:    andi a0, a0, 255
312; RV32I-NEXT:    li a1, 5
313; RV32I-NEXT:    call __udivsi3
314; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
315; RV32I-NEXT:    addi sp, sp, 16
316; RV32I-NEXT:    ret
317;
318; RV32IM-LABEL: udiv8_constant:
319; RV32IM:       # %bb.0:
320; RV32IM-NEXT:    andi a0, a0, 255
321; RV32IM-NEXT:    li a1, 205
322; RV32IM-NEXT:    mul a0, a0, a1
323; RV32IM-NEXT:    srli a0, a0, 10
324; RV32IM-NEXT:    ret
325;
326; RV64I-LABEL: udiv8_constant:
327; RV64I:       # %bb.0:
328; RV64I-NEXT:    addi sp, sp, -16
329; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
330; RV64I-NEXT:    andi a0, a0, 255
331; RV64I-NEXT:    li a1, 5
332; RV64I-NEXT:    call __udivdi3
333; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
334; RV64I-NEXT:    addi sp, sp, 16
335; RV64I-NEXT:    ret
336;
337; RV64IM-LABEL: udiv8_constant:
338; RV64IM:       # %bb.0:
339; RV64IM-NEXT:    andi a0, a0, 255
340; RV64IM-NEXT:    li a1, 205
341; RV64IM-NEXT:    mul a0, a0, a1
342; RV64IM-NEXT:    srli a0, a0, 10
343; RV64IM-NEXT:    ret
344  %1 = udiv i8 %a, 5
345  ret i8 %1
346}
347
348define i8 @udiv8_pow2(i8 %a) nounwind {
349; RV32I-LABEL: udiv8_pow2:
350; RV32I:       # %bb.0:
351; RV32I-NEXT:    slli a0, a0, 24
352; RV32I-NEXT:    srli a0, a0, 27
353; RV32I-NEXT:    ret
354;
355; RV32IM-LABEL: udiv8_pow2:
356; RV32IM:       # %bb.0:
357; RV32IM-NEXT:    slli a0, a0, 24
358; RV32IM-NEXT:    srli a0, a0, 27
359; RV32IM-NEXT:    ret
360;
361; RV64I-LABEL: udiv8_pow2:
362; RV64I:       # %bb.0:
363; RV64I-NEXT:    slli a0, a0, 56
364; RV64I-NEXT:    srli a0, a0, 59
365; RV64I-NEXT:    ret
366;
367; RV64IM-LABEL: udiv8_pow2:
368; RV64IM:       # %bb.0:
369; RV64IM-NEXT:    slli a0, a0, 56
370; RV64IM-NEXT:    srli a0, a0, 59
371; RV64IM-NEXT:    ret
372  %1 = udiv i8 %a, 8
373  ret i8 %1
374}
375
376define i8 @udiv8_constant_lhs(i8 %a) nounwind {
377; RV32I-LABEL: udiv8_constant_lhs:
378; RV32I:       # %bb.0:
379; RV32I-NEXT:    addi sp, sp, -16
380; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
381; RV32I-NEXT:    andi a1, a0, 255
382; RV32I-NEXT:    li a0, 10
383; RV32I-NEXT:    call __udivsi3
384; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
385; RV32I-NEXT:    addi sp, sp, 16
386; RV32I-NEXT:    ret
387;
388; RV32IM-LABEL: udiv8_constant_lhs:
389; RV32IM:       # %bb.0:
390; RV32IM-NEXT:    andi a0, a0, 255
391; RV32IM-NEXT:    li a1, 10
392; RV32IM-NEXT:    divu a0, a1, a0
393; RV32IM-NEXT:    ret
394;
395; RV64I-LABEL: udiv8_constant_lhs:
396; RV64I:       # %bb.0:
397; RV64I-NEXT:    addi sp, sp, -16
398; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
399; RV64I-NEXT:    andi a1, a0, 255
400; RV64I-NEXT:    li a0, 10
401; RV64I-NEXT:    call __udivdi3
402; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
403; RV64I-NEXT:    addi sp, sp, 16
404; RV64I-NEXT:    ret
405;
406; RV64IM-LABEL: udiv8_constant_lhs:
407; RV64IM:       # %bb.0:
408; RV64IM-NEXT:    andi a0, a0, 255
409; RV64IM-NEXT:    li a1, 10
410; RV64IM-NEXT:    divuw a0, a1, a0
411; RV64IM-NEXT:    ret
412  %1 = udiv i8 10, %a
413  ret i8 %1
414}
415
416define i16 @udiv16(i16 %a, i16 %b) nounwind {
417; RV32I-LABEL: udiv16:
418; RV32I:       # %bb.0:
419; RV32I-NEXT:    addi sp, sp, -16
420; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
421; RV32I-NEXT:    lui a2, 16
422; RV32I-NEXT:    addi a2, a2, -1
423; RV32I-NEXT:    and a0, a0, a2
424; RV32I-NEXT:    and a1, a1, a2
425; RV32I-NEXT:    call __udivsi3
426; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
427; RV32I-NEXT:    addi sp, sp, 16
428; RV32I-NEXT:    ret
429;
430; RV32IM-LABEL: udiv16:
431; RV32IM:       # %bb.0:
432; RV32IM-NEXT:    lui a2, 16
433; RV32IM-NEXT:    addi a2, a2, -1
434; RV32IM-NEXT:    and a1, a1, a2
435; RV32IM-NEXT:    and a0, a0, a2
436; RV32IM-NEXT:    divu a0, a0, a1
437; RV32IM-NEXT:    ret
438;
439; RV64I-LABEL: udiv16:
440; RV64I:       # %bb.0:
441; RV64I-NEXT:    addi sp, sp, -16
442; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
443; RV64I-NEXT:    lui a2, 16
444; RV64I-NEXT:    addiw a2, a2, -1
445; RV64I-NEXT:    and a0, a0, a2
446; RV64I-NEXT:    and a1, a1, a2
447; RV64I-NEXT:    call __udivdi3
448; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
449; RV64I-NEXT:    addi sp, sp, 16
450; RV64I-NEXT:    ret
451;
452; RV64IM-LABEL: udiv16:
453; RV64IM:       # %bb.0:
454; RV64IM-NEXT:    lui a2, 16
455; RV64IM-NEXT:    addi a2, a2, -1
456; RV64IM-NEXT:    and a1, a1, a2
457; RV64IM-NEXT:    and a0, a0, a2
458; RV64IM-NEXT:    divuw a0, a0, a1
459; RV64IM-NEXT:    ret
460  %1 = udiv i16 %a, %b
461  ret i16 %1
462}
463
464define i16 @udiv16_constant(i16 %a) nounwind {
465; RV32I-LABEL: udiv16_constant:
466; RV32I:       # %bb.0:
467; RV32I-NEXT:    addi sp, sp, -16
468; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
469; RV32I-NEXT:    slli a0, a0, 16
470; RV32I-NEXT:    srli a0, a0, 16
471; RV32I-NEXT:    li a1, 5
472; RV32I-NEXT:    call __udivsi3
473; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
474; RV32I-NEXT:    addi sp, sp, 16
475; RV32I-NEXT:    ret
476;
477; RV32IM-LABEL: udiv16_constant:
478; RV32IM:       # %bb.0:
479; RV32IM-NEXT:    slli a0, a0, 16
480; RV32IM-NEXT:    lui a1, 838864
481; RV32IM-NEXT:    mulhu a0, a0, a1
482; RV32IM-NEXT:    srli a0, a0, 18
483; RV32IM-NEXT:    ret
484;
485; RV64I-LABEL: udiv16_constant:
486; RV64I:       # %bb.0:
487; RV64I-NEXT:    addi sp, sp, -16
488; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
489; RV64I-NEXT:    slli a0, a0, 48
490; RV64I-NEXT:    srli a0, a0, 48
491; RV64I-NEXT:    li a1, 5
492; RV64I-NEXT:    call __udivdi3
493; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
494; RV64I-NEXT:    addi sp, sp, 16
495; RV64I-NEXT:    ret
496;
497; RV64IM-LABEL: udiv16_constant:
498; RV64IM:       # %bb.0:
499; RV64IM-NEXT:    lui a1, 52429
500; RV64IM-NEXT:    slli a1, a1, 4
501; RV64IM-NEXT:    slli a0, a0, 48
502; RV64IM-NEXT:    mulhu a0, a0, a1
503; RV64IM-NEXT:    srli a0, a0, 18
504; RV64IM-NEXT:    ret
505  %1 = udiv i16 %a, 5
506  ret i16 %1
507}
508
509define i16 @udiv16_pow2(i16 %a) nounwind {
510; RV32I-LABEL: udiv16_pow2:
511; RV32I:       # %bb.0:
512; RV32I-NEXT:    slli a0, a0, 16
513; RV32I-NEXT:    srli a0, a0, 19
514; RV32I-NEXT:    ret
515;
516; RV32IM-LABEL: udiv16_pow2:
517; RV32IM:       # %bb.0:
518; RV32IM-NEXT:    slli a0, a0, 16
519; RV32IM-NEXT:    srli a0, a0, 19
520; RV32IM-NEXT:    ret
521;
522; RV64I-LABEL: udiv16_pow2:
523; RV64I:       # %bb.0:
524; RV64I-NEXT:    slli a0, a0, 48
525; RV64I-NEXT:    srli a0, a0, 51
526; RV64I-NEXT:    ret
527;
528; RV64IM-LABEL: udiv16_pow2:
529; RV64IM:       # %bb.0:
530; RV64IM-NEXT:    slli a0, a0, 48
531; RV64IM-NEXT:    srli a0, a0, 51
532; RV64IM-NEXT:    ret
533  %1 = udiv i16 %a, 8
534  ret i16 %1
535}
536
537define i16 @udiv16_constant_lhs(i16 %a) nounwind {
538; RV32I-LABEL: udiv16_constant_lhs:
539; RV32I:       # %bb.0:
540; RV32I-NEXT:    addi sp, sp, -16
541; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
542; RV32I-NEXT:    slli a0, a0, 16
543; RV32I-NEXT:    srli a1, a0, 16
544; RV32I-NEXT:    li a0, 10
545; RV32I-NEXT:    call __udivsi3
546; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
547; RV32I-NEXT:    addi sp, sp, 16
548; RV32I-NEXT:    ret
549;
550; RV32IM-LABEL: udiv16_constant_lhs:
551; RV32IM:       # %bb.0:
552; RV32IM-NEXT:    slli a0, a0, 16
553; RV32IM-NEXT:    srli a0, a0, 16
554; RV32IM-NEXT:    li a1, 10
555; RV32IM-NEXT:    divu a0, a1, a0
556; RV32IM-NEXT:    ret
557;
558; RV64I-LABEL: udiv16_constant_lhs:
559; RV64I:       # %bb.0:
560; RV64I-NEXT:    addi sp, sp, -16
561; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
562; RV64I-NEXT:    slli a0, a0, 48
563; RV64I-NEXT:    srli a1, a0, 48
564; RV64I-NEXT:    li a0, 10
565; RV64I-NEXT:    call __udivdi3
566; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
567; RV64I-NEXT:    addi sp, sp, 16
568; RV64I-NEXT:    ret
569;
570; RV64IM-LABEL: udiv16_constant_lhs:
571; RV64IM:       # %bb.0:
572; RV64IM-NEXT:    slli a0, a0, 48
573; RV64IM-NEXT:    srli a0, a0, 48
574; RV64IM-NEXT:    li a1, 10
575; RV64IM-NEXT:    divuw a0, a1, a0
576; RV64IM-NEXT:    ret
577  %1 = udiv i16 10, %a
578  ret i16 %1
579}
580
581define i32 @sdiv(i32 %a, i32 %b) nounwind {
582; RV32I-LABEL: sdiv:
583; RV32I:       # %bb.0:
584; RV32I-NEXT:    tail __divsi3
585;
586; RV32IM-LABEL: sdiv:
587; RV32IM:       # %bb.0:
588; RV32IM-NEXT:    div a0, a0, a1
589; RV32IM-NEXT:    ret
590;
591; RV64I-LABEL: sdiv:
592; RV64I:       # %bb.0:
593; RV64I-NEXT:    addi sp, sp, -16
594; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
595; RV64I-NEXT:    sext.w a0, a0
596; RV64I-NEXT:    sext.w a1, a1
597; RV64I-NEXT:    call __divdi3
598; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
599; RV64I-NEXT:    addi sp, sp, 16
600; RV64I-NEXT:    ret
601;
602; RV64IM-LABEL: sdiv:
603; RV64IM:       # %bb.0:
604; RV64IM-NEXT:    divw a0, a0, a1
605; RV64IM-NEXT:    ret
606  %1 = sdiv i32 %a, %b
607  ret i32 %1
608}
609
610define i32 @sdiv_constant(i32 %a) nounwind {
611; RV32I-LABEL: sdiv_constant:
612; RV32I:       # %bb.0:
613; RV32I-NEXT:    li a1, 5
614; RV32I-NEXT:    tail __divsi3
615;
616; RV32IM-LABEL: sdiv_constant:
617; RV32IM:       # %bb.0:
618; RV32IM-NEXT:    lui a1, 419430
619; RV32IM-NEXT:    addi a1, a1, 1639
620; RV32IM-NEXT:    mulh a0, a0, a1
621; RV32IM-NEXT:    srli a1, a0, 31
622; RV32IM-NEXT:    srai a0, a0, 1
623; RV32IM-NEXT:    add a0, a0, a1
624; RV32IM-NEXT:    ret
625;
626; RV64I-LABEL: sdiv_constant:
627; RV64I:       # %bb.0:
628; RV64I-NEXT:    addi sp, sp, -16
629; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
630; RV64I-NEXT:    sext.w a0, a0
631; RV64I-NEXT:    li a1, 5
632; RV64I-NEXT:    call __divdi3
633; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
634; RV64I-NEXT:    addi sp, sp, 16
635; RV64I-NEXT:    ret
636;
637; RV64IM-LABEL: sdiv_constant:
638; RV64IM:       # %bb.0:
639; RV64IM-NEXT:    sext.w a0, a0
640; RV64IM-NEXT:    lui a1, 419430
641; RV64IM-NEXT:    addiw a1, a1, 1639
642; RV64IM-NEXT:    mul a0, a0, a1
643; RV64IM-NEXT:    srli a1, a0, 63
644; RV64IM-NEXT:    srai a0, a0, 33
645; RV64IM-NEXT:    add a0, a0, a1
646; RV64IM-NEXT:    ret
647  %1 = sdiv i32 %a, 5
648  ret i32 %1
649}
650
651define i32 @sdiv_pow2(i32 %a) nounwind {
652; RV32I-LABEL: sdiv_pow2:
653; RV32I:       # %bb.0:
654; RV32I-NEXT:    srai a1, a0, 31
655; RV32I-NEXT:    srli a1, a1, 29
656; RV32I-NEXT:    add a0, a0, a1
657; RV32I-NEXT:    srai a0, a0, 3
658; RV32I-NEXT:    ret
659;
660; RV32IM-LABEL: sdiv_pow2:
661; RV32IM:       # %bb.0:
662; RV32IM-NEXT:    srai a1, a0, 31
663; RV32IM-NEXT:    srli a1, a1, 29
664; RV32IM-NEXT:    add a0, a0, a1
665; RV32IM-NEXT:    srai a0, a0, 3
666; RV32IM-NEXT:    ret
667;
668; RV64I-LABEL: sdiv_pow2:
669; RV64I:       # %bb.0:
670; RV64I-NEXT:    sraiw a1, a0, 31
671; RV64I-NEXT:    srliw a1, a1, 29
672; RV64I-NEXT:    add a0, a0, a1
673; RV64I-NEXT:    sraiw a0, a0, 3
674; RV64I-NEXT:    ret
675;
676; RV64IM-LABEL: sdiv_pow2:
677; RV64IM:       # %bb.0:
678; RV64IM-NEXT:    sraiw a1, a0, 31
679; RV64IM-NEXT:    srliw a1, a1, 29
680; RV64IM-NEXT:    add a0, a0, a1
681; RV64IM-NEXT:    sraiw a0, a0, 3
682; RV64IM-NEXT:    ret
683  %1 = sdiv i32 %a, 8
684  ret i32 %1
685}
686
687define i32 @sdiv_pow2_2(i32 %a) nounwind {
688; RV32I-LABEL: sdiv_pow2_2:
689; RV32I:       # %bb.0:
690; RV32I-NEXT:    srai a1, a0, 31
691; RV32I-NEXT:    srli a1, a1, 16
692; RV32I-NEXT:    add a0, a0, a1
693; RV32I-NEXT:    srai a0, a0, 16
694; RV32I-NEXT:    ret
695;
696; RV32IM-LABEL: sdiv_pow2_2:
697; RV32IM:       # %bb.0:
698; RV32IM-NEXT:    srai a1, a0, 31
699; RV32IM-NEXT:    srli a1, a1, 16
700; RV32IM-NEXT:    add a0, a0, a1
701; RV32IM-NEXT:    srai a0, a0, 16
702; RV32IM-NEXT:    ret
703;
704; RV64I-LABEL: sdiv_pow2_2:
705; RV64I:       # %bb.0:
706; RV64I-NEXT:    sraiw a1, a0, 31
707; RV64I-NEXT:    srliw a1, a1, 16
708; RV64I-NEXT:    add a0, a0, a1
709; RV64I-NEXT:    sraiw a0, a0, 16
710; RV64I-NEXT:    ret
711;
712; RV64IM-LABEL: sdiv_pow2_2:
713; RV64IM:       # %bb.0:
714; RV64IM-NEXT:    sraiw a1, a0, 31
715; RV64IM-NEXT:    srliw a1, a1, 16
716; RV64IM-NEXT:    add a0, a0, a1
717; RV64IM-NEXT:    sraiw a0, a0, 16
718; RV64IM-NEXT:    ret
719  %1 = sdiv i32 %a, 65536
720  ret i32 %1
721}
722
723define i32 @sdiv_constant_lhs(i32 %a) nounwind {
724; RV32I-LABEL: sdiv_constant_lhs:
725; RV32I:       # %bb.0:
726; RV32I-NEXT:    mv a1, a0
727; RV32I-NEXT:    li a0, -10
728; RV32I-NEXT:    tail __divsi3
729;
730; RV32IM-LABEL: sdiv_constant_lhs:
731; RV32IM:       # %bb.0:
732; RV32IM-NEXT:    li a1, -10
733; RV32IM-NEXT:    div a0, a1, a0
734; RV32IM-NEXT:    ret
735;
736; RV64I-LABEL: sdiv_constant_lhs:
737; RV64I:       # %bb.0:
738; RV64I-NEXT:    addi sp, sp, -16
739; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
740; RV64I-NEXT:    sext.w a1, a0
741; RV64I-NEXT:    li a0, -10
742; RV64I-NEXT:    call __divdi3
743; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
744; RV64I-NEXT:    addi sp, sp, 16
745; RV64I-NEXT:    ret
746;
747; RV64IM-LABEL: sdiv_constant_lhs:
748; RV64IM:       # %bb.0:
749; RV64IM-NEXT:    li a1, -10
750; RV64IM-NEXT:    divw a0, a1, a0
751; RV64IM-NEXT:    ret
752  %1 = sdiv i32 -10, %a
753  ret i32 %1
754}
755
756define i64 @sdiv64(i64 %a, i64 %b) nounwind {
757; RV32I-LABEL: sdiv64:
758; RV32I:       # %bb.0:
759; RV32I-NEXT:    addi sp, sp, -16
760; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
761; RV32I-NEXT:    call __divdi3
762; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
763; RV32I-NEXT:    addi sp, sp, 16
764; RV32I-NEXT:    ret
765;
766; RV32IM-LABEL: sdiv64:
767; RV32IM:       # %bb.0:
768; RV32IM-NEXT:    addi sp, sp, -16
769; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
770; RV32IM-NEXT:    call __divdi3
771; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
772; RV32IM-NEXT:    addi sp, sp, 16
773; RV32IM-NEXT:    ret
774;
775; RV64I-LABEL: sdiv64:
776; RV64I:       # %bb.0:
777; RV64I-NEXT:    tail __divdi3
778;
779; RV64IM-LABEL: sdiv64:
780; RV64IM:       # %bb.0:
781; RV64IM-NEXT:    div a0, a0, a1
782; RV64IM-NEXT:    ret
783  %1 = sdiv i64 %a, %b
784  ret i64 %1
785}
786
787define i64 @sdiv64_constant(i64 %a) nounwind {
788; RV32I-LABEL: sdiv64_constant:
789; RV32I:       # %bb.0:
790; RV32I-NEXT:    addi sp, sp, -16
791; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
792; RV32I-NEXT:    li a2, 5
793; RV32I-NEXT:    li a3, 0
794; RV32I-NEXT:    call __divdi3
795; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
796; RV32I-NEXT:    addi sp, sp, 16
797; RV32I-NEXT:    ret
798;
799; RV32IM-LABEL: sdiv64_constant:
800; RV32IM:       # %bb.0:
801; RV32IM-NEXT:    addi sp, sp, -16
802; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
803; RV32IM-NEXT:    li a2, 5
804; RV32IM-NEXT:    li a3, 0
805; RV32IM-NEXT:    call __divdi3
806; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
807; RV32IM-NEXT:    addi sp, sp, 16
808; RV32IM-NEXT:    ret
809;
810; RV64I-LABEL: sdiv64_constant:
811; RV64I:       # %bb.0:
812; RV64I-NEXT:    li a1, 5
813; RV64I-NEXT:    tail __divdi3
814;
815; RV64IM-LABEL: sdiv64_constant:
816; RV64IM:       # %bb.0:
817; RV64IM-NEXT:    lui a1, %hi(.LCPI21_0)
818; RV64IM-NEXT:    ld a1, %lo(.LCPI21_0)(a1)
819; RV64IM-NEXT:    mulh a0, a0, a1
820; RV64IM-NEXT:    srli a1, a0, 63
821; RV64IM-NEXT:    srai a0, a0, 1
822; RV64IM-NEXT:    add a0, a0, a1
823; RV64IM-NEXT:    ret
824  %1 = sdiv i64 %a, 5
825  ret i64 %1
826}
827
828define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
829; RV32I-LABEL: sdiv64_constant_lhs:
830; RV32I:       # %bb.0:
831; RV32I-NEXT:    addi sp, sp, -16
832; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
833; RV32I-NEXT:    mv a3, a1
834; RV32I-NEXT:    mv a2, a0
835; RV32I-NEXT:    li a0, 10
836; RV32I-NEXT:    li a1, 0
837; RV32I-NEXT:    call __divdi3
838; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
839; RV32I-NEXT:    addi sp, sp, 16
840; RV32I-NEXT:    ret
841;
842; RV32IM-LABEL: sdiv64_constant_lhs:
843; RV32IM:       # %bb.0:
844; RV32IM-NEXT:    addi sp, sp, -16
845; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
846; RV32IM-NEXT:    mv a3, a1
847; RV32IM-NEXT:    mv a2, a0
848; RV32IM-NEXT:    li a0, 10
849; RV32IM-NEXT:    li a1, 0
850; RV32IM-NEXT:    call __divdi3
851; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
852; RV32IM-NEXT:    addi sp, sp, 16
853; RV32IM-NEXT:    ret
854;
855; RV64I-LABEL: sdiv64_constant_lhs:
856; RV64I:       # %bb.0:
857; RV64I-NEXT:    mv a1, a0
858; RV64I-NEXT:    li a0, 10
859; RV64I-NEXT:    tail __divdi3
860;
861; RV64IM-LABEL: sdiv64_constant_lhs:
862; RV64IM:       # %bb.0:
863; RV64IM-NEXT:    li a1, 10
864; RV64IM-NEXT:    div a0, a1, a0
865; RV64IM-NEXT:    ret
866  %1 = sdiv i64 10, %a
867  ret i64 %1
868}
869
870; Although this sdiv has two sexti32 operands, it shouldn't compile to divw on
871; RV64M as that wouldn't produce the correct result for e.g. INT_MIN/-1.
872
873define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
874; RV32I-LABEL: sdiv64_sext_operands:
875; RV32I:       # %bb.0:
876; RV32I-NEXT:    addi sp, sp, -16
877; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
878; RV32I-NEXT:    mv a2, a1
879; RV32I-NEXT:    srai a1, a0, 31
880; RV32I-NEXT:    srai a3, a2, 31
881; RV32I-NEXT:    call __divdi3
882; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
883; RV32I-NEXT:    addi sp, sp, 16
884; RV32I-NEXT:    ret
885;
886; RV32IM-LABEL: sdiv64_sext_operands:
887; RV32IM:       # %bb.0:
888; RV32IM-NEXT:    addi sp, sp, -16
889; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
890; RV32IM-NEXT:    mv a2, a1
891; RV32IM-NEXT:    srai a1, a0, 31
892; RV32IM-NEXT:    srai a3, a2, 31
893; RV32IM-NEXT:    call __divdi3
894; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
895; RV32IM-NEXT:    addi sp, sp, 16
896; RV32IM-NEXT:    ret
897;
898; RV64I-LABEL: sdiv64_sext_operands:
899; RV64I:       # %bb.0:
900; RV64I-NEXT:    sext.w a0, a0
901; RV64I-NEXT:    sext.w a1, a1
902; RV64I-NEXT:    tail __divdi3
903;
904; RV64IM-LABEL: sdiv64_sext_operands:
905; RV64IM:       # %bb.0:
906; RV64IM-NEXT:    sext.w a0, a0
907; RV64IM-NEXT:    sext.w a1, a1
908; RV64IM-NEXT:    div a0, a0, a1
909; RV64IM-NEXT:    ret
910  %1 = sext i32 %a to i64
911  %2 = sext i32 %b to i64
912  %3 = sdiv i64 %1, %2
913  ret i64 %3
914}
915
916define i8 @sdiv8(i8 %a, i8 %b) nounwind {
917; RV32I-LABEL: sdiv8:
918; RV32I:       # %bb.0:
919; RV32I-NEXT:    addi sp, sp, -16
920; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
921; RV32I-NEXT:    slli a0, a0, 24
922; RV32I-NEXT:    slli a1, a1, 24
923; RV32I-NEXT:    srai a0, a0, 24
924; RV32I-NEXT:    srai a1, a1, 24
925; RV32I-NEXT:    call __divsi3
926; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
927; RV32I-NEXT:    addi sp, sp, 16
928; RV32I-NEXT:    ret
929;
930; RV32IM-LABEL: sdiv8:
931; RV32IM:       # %bb.0:
932; RV32IM-NEXT:    slli a1, a1, 24
933; RV32IM-NEXT:    slli a0, a0, 24
934; RV32IM-NEXT:    srai a1, a1, 24
935; RV32IM-NEXT:    srai a0, a0, 24
936; RV32IM-NEXT:    div a0, a0, a1
937; RV32IM-NEXT:    ret
938;
939; RV64I-LABEL: sdiv8:
940; RV64I:       # %bb.0:
941; RV64I-NEXT:    addi sp, sp, -16
942; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
943; RV64I-NEXT:    slli a0, a0, 56
944; RV64I-NEXT:    slli a1, a1, 56
945; RV64I-NEXT:    srai a0, a0, 56
946; RV64I-NEXT:    srai a1, a1, 56
947; RV64I-NEXT:    call __divdi3
948; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
949; RV64I-NEXT:    addi sp, sp, 16
950; RV64I-NEXT:    ret
951;
952; RV64IM-LABEL: sdiv8:
953; RV64IM:       # %bb.0:
954; RV64IM-NEXT:    slli a1, a1, 56
955; RV64IM-NEXT:    slli a0, a0, 56
956; RV64IM-NEXT:    srai a1, a1, 56
957; RV64IM-NEXT:    srai a0, a0, 56
958; RV64IM-NEXT:    divw a0, a0, a1
959; RV64IM-NEXT:    ret
960  %1 = sdiv i8 %a, %b
961  ret i8 %1
962}
963
964define i8 @sdiv8_constant(i8 %a) nounwind {
965; RV32I-LABEL: sdiv8_constant:
966; RV32I:       # %bb.0:
967; RV32I-NEXT:    addi sp, sp, -16
968; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
969; RV32I-NEXT:    slli a0, a0, 24
970; RV32I-NEXT:    srai a0, a0, 24
971; RV32I-NEXT:    li a1, 5
972; RV32I-NEXT:    call __divsi3
973; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
974; RV32I-NEXT:    addi sp, sp, 16
975; RV32I-NEXT:    ret
976;
977; RV32IM-LABEL: sdiv8_constant:
978; RV32IM:       # %bb.0:
979; RV32IM-NEXT:    slli a0, a0, 24
980; RV32IM-NEXT:    li a1, 103
981; RV32IM-NEXT:    srai a0, a0, 24
982; RV32IM-NEXT:    mul a0, a0, a1
983; RV32IM-NEXT:    srli a1, a0, 31
984; RV32IM-NEXT:    srai a0, a0, 9
985; RV32IM-NEXT:    add a0, a0, a1
986; RV32IM-NEXT:    ret
987;
988; RV64I-LABEL: sdiv8_constant:
989; RV64I:       # %bb.0:
990; RV64I-NEXT:    addi sp, sp, -16
991; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
992; RV64I-NEXT:    slli a0, a0, 56
993; RV64I-NEXT:    srai a0, a0, 56
994; RV64I-NEXT:    li a1, 5
995; RV64I-NEXT:    call __divdi3
996; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
997; RV64I-NEXT:    addi sp, sp, 16
998; RV64I-NEXT:    ret
999;
1000; RV64IM-LABEL: sdiv8_constant:
1001; RV64IM:       # %bb.0:
1002; RV64IM-NEXT:    slli a0, a0, 56
1003; RV64IM-NEXT:    li a1, 103
1004; RV64IM-NEXT:    srai a0, a0, 56
1005; RV64IM-NEXT:    mul a0, a0, a1
1006; RV64IM-NEXT:    srli a1, a0, 63
1007; RV64IM-NEXT:    srai a0, a0, 9
1008; RV64IM-NEXT:    add a0, a0, a1
1009; RV64IM-NEXT:    ret
1010  %1 = sdiv i8 %a, 5
1011  ret i8 %1
1012}
1013
1014define i8 @sdiv8_pow2(i8 %a) nounwind {
1015; RV32I-LABEL: sdiv8_pow2:
1016; RV32I:       # %bb.0:
1017; RV32I-NEXT:    slli a1, a0, 24
1018; RV32I-NEXT:    srai a1, a1, 2
1019; RV32I-NEXT:    srli a1, a1, 29
1020; RV32I-NEXT:    add a0, a0, a1
1021; RV32I-NEXT:    slli a0, a0, 24
1022; RV32I-NEXT:    srai a0, a0, 27
1023; RV32I-NEXT:    ret
1024;
1025; RV32IM-LABEL: sdiv8_pow2:
1026; RV32IM:       # %bb.0:
1027; RV32IM-NEXT:    slli a1, a0, 24
1028; RV32IM-NEXT:    srai a1, a1, 2
1029; RV32IM-NEXT:    srli a1, a1, 29
1030; RV32IM-NEXT:    add a0, a0, a1
1031; RV32IM-NEXT:    slli a0, a0, 24
1032; RV32IM-NEXT:    srai a0, a0, 27
1033; RV32IM-NEXT:    ret
1034;
1035; RV64I-LABEL: sdiv8_pow2:
1036; RV64I:       # %bb.0:
1037; RV64I-NEXT:    slli a1, a0, 56
1038; RV64I-NEXT:    srai a1, a1, 2
1039; RV64I-NEXT:    srli a1, a1, 61
1040; RV64I-NEXT:    add a0, a0, a1
1041; RV64I-NEXT:    slli a0, a0, 56
1042; RV64I-NEXT:    srai a0, a0, 59
1043; RV64I-NEXT:    ret
1044;
1045; RV64IM-LABEL: sdiv8_pow2:
1046; RV64IM:       # %bb.0:
1047; RV64IM-NEXT:    slli a1, a0, 56
1048; RV64IM-NEXT:    srai a1, a1, 2
1049; RV64IM-NEXT:    srli a1, a1, 61
1050; RV64IM-NEXT:    add a0, a0, a1
1051; RV64IM-NEXT:    slli a0, a0, 56
1052; RV64IM-NEXT:    srai a0, a0, 59
1053; RV64IM-NEXT:    ret
1054  %1 = sdiv i8 %a, 8
1055  ret i8 %1
1056}
1057
1058define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
1059; RV32I-LABEL: sdiv8_constant_lhs:
1060; RV32I:       # %bb.0:
1061; RV32I-NEXT:    addi sp, sp, -16
1062; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1063; RV32I-NEXT:    slli a0, a0, 24
1064; RV32I-NEXT:    srai a1, a0, 24
1065; RV32I-NEXT:    li a0, -10
1066; RV32I-NEXT:    call __divsi3
1067; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1068; RV32I-NEXT:    addi sp, sp, 16
1069; RV32I-NEXT:    ret
1070;
1071; RV32IM-LABEL: sdiv8_constant_lhs:
1072; RV32IM:       # %bb.0:
1073; RV32IM-NEXT:    slli a0, a0, 24
1074; RV32IM-NEXT:    srai a0, a0, 24
1075; RV32IM-NEXT:    li a1, -10
1076; RV32IM-NEXT:    div a0, a1, a0
1077; RV32IM-NEXT:    ret
1078;
1079; RV64I-LABEL: sdiv8_constant_lhs:
1080; RV64I:       # %bb.0:
1081; RV64I-NEXT:    addi sp, sp, -16
1082; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1083; RV64I-NEXT:    slli a0, a0, 56
1084; RV64I-NEXT:    srai a1, a0, 56
1085; RV64I-NEXT:    li a0, -10
1086; RV64I-NEXT:    call __divdi3
1087; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1088; RV64I-NEXT:    addi sp, sp, 16
1089; RV64I-NEXT:    ret
1090;
1091; RV64IM-LABEL: sdiv8_constant_lhs:
1092; RV64IM:       # %bb.0:
1093; RV64IM-NEXT:    slli a0, a0, 56
1094; RV64IM-NEXT:    srai a0, a0, 56
1095; RV64IM-NEXT:    li a1, -10
1096; RV64IM-NEXT:    divw a0, a1, a0
1097; RV64IM-NEXT:    ret
1098  %1 = sdiv i8 -10, %a
1099  ret i8 %1
1100}
1101
1102define i16 @sdiv16(i16 %a, i16 %b) nounwind {
1103; RV32I-LABEL: sdiv16:
1104; RV32I:       # %bb.0:
1105; RV32I-NEXT:    addi sp, sp, -16
1106; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1107; RV32I-NEXT:    slli a0, a0, 16
1108; RV32I-NEXT:    slli a1, a1, 16
1109; RV32I-NEXT:    srai a0, a0, 16
1110; RV32I-NEXT:    srai a1, a1, 16
1111; RV32I-NEXT:    call __divsi3
1112; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1113; RV32I-NEXT:    addi sp, sp, 16
1114; RV32I-NEXT:    ret
1115;
1116; RV32IM-LABEL: sdiv16:
1117; RV32IM:       # %bb.0:
1118; RV32IM-NEXT:    slli a1, a1, 16
1119; RV32IM-NEXT:    slli a0, a0, 16
1120; RV32IM-NEXT:    srai a1, a1, 16
1121; RV32IM-NEXT:    srai a0, a0, 16
1122; RV32IM-NEXT:    div a0, a0, a1
1123; RV32IM-NEXT:    ret
1124;
1125; RV64I-LABEL: sdiv16:
1126; RV64I:       # %bb.0:
1127; RV64I-NEXT:    addi sp, sp, -16
1128; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1129; RV64I-NEXT:    slli a0, a0, 48
1130; RV64I-NEXT:    slli a1, a1, 48
1131; RV64I-NEXT:    srai a0, a0, 48
1132; RV64I-NEXT:    srai a1, a1, 48
1133; RV64I-NEXT:    call __divdi3
1134; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1135; RV64I-NEXT:    addi sp, sp, 16
1136; RV64I-NEXT:    ret
1137;
1138; RV64IM-LABEL: sdiv16:
1139; RV64IM:       # %bb.0:
1140; RV64IM-NEXT:    slli a1, a1, 48
1141; RV64IM-NEXT:    slli a0, a0, 48
1142; RV64IM-NEXT:    srai a1, a1, 48
1143; RV64IM-NEXT:    srai a0, a0, 48
1144; RV64IM-NEXT:    divw a0, a0, a1
1145; RV64IM-NEXT:    ret
1146  %1 = sdiv i16 %a, %b
1147  ret i16 %1
1148}
1149
1150define i16 @sdiv16_constant(i16 %a) nounwind {
1151; RV32I-LABEL: sdiv16_constant:
1152; RV32I:       # %bb.0:
1153; RV32I-NEXT:    addi sp, sp, -16
1154; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1155; RV32I-NEXT:    slli a0, a0, 16
1156; RV32I-NEXT:    srai a0, a0, 16
1157; RV32I-NEXT:    li a1, 5
1158; RV32I-NEXT:    call __divsi3
1159; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1160; RV32I-NEXT:    addi sp, sp, 16
1161; RV32I-NEXT:    ret
1162;
1163; RV32IM-LABEL: sdiv16_constant:
1164; RV32IM:       # %bb.0:
1165; RV32IM-NEXT:    slli a0, a0, 16
1166; RV32IM-NEXT:    lui a1, 6
1167; RV32IM-NEXT:    srai a0, a0, 16
1168; RV32IM-NEXT:    addi a1, a1, 1639
1169; RV32IM-NEXT:    mul a0, a0, a1
1170; RV32IM-NEXT:    srli a1, a0, 31
1171; RV32IM-NEXT:    srai a0, a0, 17
1172; RV32IM-NEXT:    add a0, a0, a1
1173; RV32IM-NEXT:    ret
1174;
1175; RV64I-LABEL: sdiv16_constant:
1176; RV64I:       # %bb.0:
1177; RV64I-NEXT:    addi sp, sp, -16
1178; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1179; RV64I-NEXT:    slli a0, a0, 48
1180; RV64I-NEXT:    srai a0, a0, 48
1181; RV64I-NEXT:    li a1, 5
1182; RV64I-NEXT:    call __divdi3
1183; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1184; RV64I-NEXT:    addi sp, sp, 16
1185; RV64I-NEXT:    ret
1186;
1187; RV64IM-LABEL: sdiv16_constant:
1188; RV64IM:       # %bb.0:
1189; RV64IM-NEXT:    slli a0, a0, 48
1190; RV64IM-NEXT:    lui a1, 6
1191; RV64IM-NEXT:    srai a0, a0, 48
1192; RV64IM-NEXT:    addiw a1, a1, 1639
1193; RV64IM-NEXT:    mul a0, a0, a1
1194; RV64IM-NEXT:    srli a1, a0, 63
1195; RV64IM-NEXT:    srai a0, a0, 17
1196; RV64IM-NEXT:    add a0, a0, a1
1197; RV64IM-NEXT:    ret
1198  %1 = sdiv i16 %a, 5
1199  ret i16 %1
1200}
1201
1202define i16 @sdiv16_pow2(i16 %a) nounwind {
1203; RV32I-LABEL: sdiv16_pow2:
1204; RV32I:       # %bb.0:
1205; RV32I-NEXT:    slli a1, a0, 16
1206; RV32I-NEXT:    srai a1, a1, 2
1207; RV32I-NEXT:    srli a1, a1, 29
1208; RV32I-NEXT:    add a0, a0, a1
1209; RV32I-NEXT:    slli a0, a0, 16
1210; RV32I-NEXT:    srai a0, a0, 19
1211; RV32I-NEXT:    ret
1212;
1213; RV32IM-LABEL: sdiv16_pow2:
1214; RV32IM:       # %bb.0:
1215; RV32IM-NEXT:    slli a1, a0, 16
1216; RV32IM-NEXT:    srai a1, a1, 2
1217; RV32IM-NEXT:    srli a1, a1, 29
1218; RV32IM-NEXT:    add a0, a0, a1
1219; RV32IM-NEXT:    slli a0, a0, 16
1220; RV32IM-NEXT:    srai a0, a0, 19
1221; RV32IM-NEXT:    ret
1222;
1223; RV64I-LABEL: sdiv16_pow2:
1224; RV64I:       # %bb.0:
1225; RV64I-NEXT:    slli a1, a0, 48
1226; RV64I-NEXT:    srai a1, a1, 2
1227; RV64I-NEXT:    srli a1, a1, 61
1228; RV64I-NEXT:    add a0, a0, a1
1229; RV64I-NEXT:    slli a0, a0, 48
1230; RV64I-NEXT:    srai a0, a0, 51
1231; RV64I-NEXT:    ret
1232;
1233; RV64IM-LABEL: sdiv16_pow2:
1234; RV64IM:       # %bb.0:
1235; RV64IM-NEXT:    slli a1, a0, 48
1236; RV64IM-NEXT:    srai a1, a1, 2
1237; RV64IM-NEXT:    srli a1, a1, 61
1238; RV64IM-NEXT:    add a0, a0, a1
1239; RV64IM-NEXT:    slli a0, a0, 48
1240; RV64IM-NEXT:    srai a0, a0, 51
1241; RV64IM-NEXT:    ret
1242  %1 = sdiv i16 %a, 8
1243  ret i16 %1
1244}
1245
1246define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
1247; RV32I-LABEL: sdiv16_constant_lhs:
1248; RV32I:       # %bb.0:
1249; RV32I-NEXT:    addi sp, sp, -16
1250; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1251; RV32I-NEXT:    slli a0, a0, 16
1252; RV32I-NEXT:    srai a1, a0, 16
1253; RV32I-NEXT:    li a0, -10
1254; RV32I-NEXT:    call __divsi3
1255; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1256; RV32I-NEXT:    addi sp, sp, 16
1257; RV32I-NEXT:    ret
1258;
1259; RV32IM-LABEL: sdiv16_constant_lhs:
1260; RV32IM:       # %bb.0:
1261; RV32IM-NEXT:    slli a0, a0, 16
1262; RV32IM-NEXT:    srai a0, a0, 16
1263; RV32IM-NEXT:    li a1, -10
1264; RV32IM-NEXT:    div a0, a1, a0
1265; RV32IM-NEXT:    ret
1266;
1267; RV64I-LABEL: sdiv16_constant_lhs:
1268; RV64I:       # %bb.0:
1269; RV64I-NEXT:    addi sp, sp, -16
1270; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1271; RV64I-NEXT:    slli a0, a0, 48
1272; RV64I-NEXT:    srai a1, a0, 48
1273; RV64I-NEXT:    li a0, -10
1274; RV64I-NEXT:    call __divdi3
1275; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1276; RV64I-NEXT:    addi sp, sp, 16
1277; RV64I-NEXT:    ret
1278;
1279; RV64IM-LABEL: sdiv16_constant_lhs:
1280; RV64IM:       # %bb.0:
1281; RV64IM-NEXT:    slli a0, a0, 48
1282; RV64IM-NEXT:    srai a0, a0, 48
1283; RV64IM-NEXT:    li a1, -10
1284; RV64IM-NEXT:    divw a0, a1, a0
1285; RV64IM-NEXT:    ret
1286  %1 = sdiv i16 -10, %a
1287  ret i16 %1
1288}
1289