xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/dive_1.c (revision 72c7faa4dbb41dbb0238d6b4a109da0d4b236dd4)
1 /* UltraSPARC 64 mpn_divexact_1 -- mpn by limb exact division.
2 
3    THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY.  THEY'RE ALMOST
4    CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5    FUTURE GNU MP RELEASES.
6 
7 Copyright 2000, 2001, 2003, 2019 Free Software Foundation, Inc.
8 
9 This file is part of the GNU MP Library.
10 
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
13 
14   * the GNU Lesser General Public License as published by the Free
15     Software Foundation; either version 3 of the License, or (at your
16     option) any later version.
17 
18 or
19 
20   * the GNU General Public License as published by the Free Software
21     Foundation; either version 2 of the License, or (at your option) any
22     later version.
23 
24 or both in parallel, as here.
25 
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
29 for more details.
30 
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library.  If not,
33 see https://www.gnu.org/licenses/.  */
34 
35 #include "gmp-impl.h"
36 #include "longlong.h"
37 
38 #include "mpn/sparc64/sparc64.h"
39 
40 
41 /*                 64-bit divisor   32-bit divisor
42                     cycles/limb      cycles/limb
43                      (approx)         (approx)
44    Ultrasparc 2i:      110               70
45 */
46 
47 
48 /* There are two key ideas here to reduce mulx's.  Firstly when the divisor
49    is 32-bits the high of q*d can be calculated without the two 32x32->64
50    cross-products involving the high 32-bits of the divisor, that being zero
51    of course.  Secondly umul_ppmm_lowequal and umul_ppmm_half_lowequal save
52    one mulx (each) knowing the low of q*d is equal to the input limb l.
53 
54    For size==1, a simple udivx is used.  This is faster than calculating an
55    inverse.
56 
57    For a 32-bit divisor and small sizes, an attempt was made at a simple
58    udivx loop (two per 64-bit limb), but it turned out to be slower than
59    mul-by-inverse.  At size==2 the inverse is about 260 cycles total
60    compared to a udivx at 291.  Perhaps the latter would suit when size==2
61    but the high 32-bits of the second limb is zero (saving one udivx), but
62    it doesn't seem worth a special case just for that.  */
63 
64 void
mpn_divexact_1(mp_ptr dst,mp_srcptr src,mp_size_t size,mp_limb_t divisor)65 mpn_divexact_1 (mp_ptr dst, mp_srcptr src, mp_size_t size, mp_limb_t divisor)
66 {
67   mp_limb_t  inverse, s, s_next, c, l, ls, q;
68   unsigned   rshift, lshift;
69   mp_limb_t  lshift_mask;
70   mp_limb_t  divisor_h;
71 
72   ASSERT (size >= 1);
73   ASSERT (divisor != 0);
74   ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size));
75   ASSERT_MPN (src, size);
76   ASSERT_LIMB (divisor);
77 
78   s = *src++;                 /* src low limb */
79   size--;
80   if (size == 0)
81     {
82       *dst = s / divisor;
83       return;
84     }
85 
86   if ((divisor & 1) == 0)
87     {
88       count_trailing_zeros (rshift, divisor);
89       divisor >>= rshift;
90       lshift = 64 - rshift;
91 
92       lshift_mask = MP_LIMB_T_MAX;
93     }
94   else
95     {
96       rshift = 0;
97 
98       /* rshift==0 means no shift, so must mask out other part in this case */
99       lshift = 0;
100       lshift_mask = 0;
101     }
102 
103   binvert_limb (inverse, divisor);
104 
105   c = 0;
106   divisor_h = HIGH32 (divisor);
107 
108   if (divisor_h == 0)
109     {
110       /* 32-bit divisor */
111       do
112         {
113           s_next = *src++;
114           ls = (s >> rshift) | ((s_next << lshift) & lshift_mask);
115           s = s_next;
116 
117           SUBC_LIMB (c, l, ls, c);
118 
119           q = l * inverse;
120           *dst++ = q;
121 
122           umul_ppmm_half_lowequal (l, q, divisor, l);
123           c += l;
124 
125           size--;
126         }
127       while (size != 0);
128 
129       ls = s >> rshift;
130       l = ls - c;
131       q = l * inverse;
132       *dst = q;
133     }
134   else
135     {
136       /* 64-bit divisor */
137       mp_limb_t  divisor_l = LOW32 (divisor);
138       do
139         {
140           s_next = *src++;
141           ls = (s >> rshift) | ((s_next << lshift) & lshift_mask);
142           s = s_next;
143 
144           SUBC_LIMB (c, l, ls, c);
145 
146           q = l * inverse;
147           *dst++ = q;
148 
149           umul_ppmm_lowequal (l, q, divisor, divisor_h, divisor_l, l);
150           c += l;
151 
152           size--;
153         }
154       while (size != 0);
155 
156       ls = s >> rshift;
157       l = ls - c;
158       q = l * inverse;
159       *dst = q;
160     }
161 }
162