xref: /netbsd-src/external/gpl3/gcc.old/dist/libgcc/config/riscv/div.S (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1/* Integer division routines for RISC-V.
2
3   Copyright (C) 2016-2017 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24<http://www.gnu.org/licenses/>.  */
25
26  .text
27  .align 2
28
29#if __riscv_xlen == 32
30/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
31# define __udivdi3 __udivsi3
32# define __umoddi3 __umodsi3
33# define __divdi3 __divsi3
34# define __moddi3 __modsi3
35#else
36  .globl __udivsi3
37__udivsi3:
38  /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t.  */
39  sll    a0, a0, 32
40  sll    a1, a1, 32
41  move   t0, ra
42  jal    __udivdi3
43  sext.w a0, a0
44  jr     t0
45
46  .globl __umodsi3
47__umodsi3:
48  /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t.  */
49  sll    a0, a0, 32
50  sll    a1, a1, 32
51  srl    a0, a0, 32
52  srl    a1, a1, 32
53  move   t0, ra
54  jal    __udivdi3
55  sext.w a0, a1
56  jr     t0
57
58  .globl __modsi3
59  __modsi3 = __moddi3
60
61  .globl __divsi3
62__divsi3:
63  /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3.  */
64  li    t0, -1
65  beq   a1, t0, .L20
66#endif
67
68  .globl __divdi3
69__divdi3:
70  bltz  a0, .L10
71  bltz  a1, .L11
72  /* Since the quotient is positive, fall into __udivdi3.  */
73
74  .globl __udivdi3
75__udivdi3:
76  mv    a2, a1
77  mv    a1, a0
78  li    a0, -1
79  beqz  a2, .L5
80  li    a3, 1
81  bgeu  a2, a1, .L2
82.L1:
83  blez  a2, .L2
84  slli  a2, a2, 1
85  slli  a3, a3, 1
86  bgtu  a1, a2, .L1
87.L2:
88  li    a0, 0
89.L3:
90  bltu  a1, a2, .L4
91  sub   a1, a1, a2
92  or    a0, a0, a3
93.L4:
94  srli  a3, a3, 1
95  srli  a2, a2, 1
96  bnez  a3, .L3
97.L5:
98  ret
99
100  .globl __umoddi3
101__umoddi3:
102  /* Call __udivdi3(a0, a1), then return the remainder, which is in a1.  */
103  move  t0, ra
104  jal   __udivdi3
105  move  a0, a1
106  jr    t0
107
108  /* Handle negative arguments to __divdi3.  */
109.L10:
110  neg   a0, a0
111  bgez  a1, .L12      /* Compute __udivdi3(-a0, a1), then negate the result.  */
112  neg   a1, a1
113  j     __udivdi3     /* Compute __udivdi3(-a0, -a1).  */
114.L11:                 /* Compute __udivdi3(a0, -a1), then negate the result.  */
115  neg   a1, a1
116.L12:
117  move  t0, ra
118  jal   __udivdi3
119  neg   a0, a0
120  jr    t0
121
122  .globl __moddi3
123__moddi3:
124  move   t0, ra
125  bltz   a1, .L31
126  bltz   a0, .L32
127.L30:
128  jal    __udivdi3    /* The dividend is not negative.  */
129  move   a0, a1
130  jr     t0
131.L31:
132  neg    a1, a1
133  bgez   a0, .L30
134.L32:
135  neg    a0, a0
136  jal    __udivdi3    /* The dividend is hella negative.  */
137  neg    a0, a1
138  jr     t0
139
140#if __riscv_xlen == 64
141  /* continuation of __divsi3 */
142.L20:
143  sll   t0, t0, 31
144  bne   a0, t0, __divdi3
145  ret
146#endif
147