xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/dive_1.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /* UltraSPARC 64 mpn_divexact_1 -- mpn by limb exact division.
2 
3    THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY.  THEY'RE ALMOST
4    CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5    FUTURE GNU MP RELEASES.
6 
7 Copyright 2000, 2001, 2003 Free Software Foundation, Inc.
8 
9 This file is part of the GNU MP Library.
10 
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
13 
14   * the GNU Lesser General Public License as published by the Free
15     Software Foundation; either version 3 of the License, or (at your
16     option) any later version.
17 
18 or
19 
20   * the GNU General Public License as published by the Free Software
21     Foundation; either version 2 of the License, or (at your option) any
22     later version.
23 
24 or both in parallel, as here.
25 
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
29 for more details.
30 
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library.  If not,
33 see https://www.gnu.org/licenses/.  */
34 
35 #include "gmp.h"
36 #include "gmp-impl.h"
37 #include "longlong.h"
38 
39 #include "mpn/sparc64/sparc64.h"
40 
41 
42 /*                 64-bit divisor   32-bit divisor
43                     cycles/limb      cycles/limb
44                      (approx)         (approx)
45    Ultrasparc 2i:      110               70
46 */
47 
48 
49 /* There are two key ideas here to reduce mulx's.  Firstly when the divisor
50    is 32-bits the high of q*d can be calculated without the two 32x32->64
51    cross-products involving the high 32-bits of the divisor, that being zero
52    of course.  Secondly umul_ppmm_lowequal and umul_ppmm_half_lowequal save
53    one mulx (each) knowing the low of q*d is equal to the input limb l.
54 
55    For size==1, a simple udivx is used.  This is faster than calculating an
56    inverse.
57 
58    For a 32-bit divisor and small sizes, an attempt was made at a simple
59    udivx loop (two per 64-bit limb), but it turned out to be slower than
60    mul-by-inverse.  At size==2 the inverse is about 260 cycles total
61    compared to a udivx at 291.  Perhaps the latter would suit when size==2
62    but the high 32-bits of the second limb is zero (saving one udivx), but
63    it doesn't seem worth a special case just for that.  */
64 
65 void
66 mpn_divexact_1 (mp_ptr dst, mp_srcptr src, mp_size_t size, mp_limb_t divisor)
67 {
68   mp_limb_t  inverse, s, s_next, c, l, ls, q;
69   unsigned   rshift, lshift;
70   mp_limb_t  lshift_mask;
71   mp_limb_t  divisor_h;
72 
73   ASSERT (size >= 1);
74   ASSERT (divisor != 0);
75   ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size));
76   ASSERT_MPN (src, size);
77   ASSERT_LIMB (divisor);
78 
79   s = *src++;                 /* src low limb */
80   size--;
81   if (size == 0)
82     {
83       *dst = s / divisor;
84       return;
85     }
86 
87   if ((divisor & 1) == 0)
88     {
89       count_trailing_zeros (rshift, divisor);
90       divisor >>= rshift;
91     }
92   else
93     rshift = 0;
94 
95   binvert_limb (inverse, divisor);
96 
97   lshift = 64 - rshift;
98 
99   /* lshift==64 means no shift, so must mask out other part in this case */
100   lshift_mask = (rshift == 0 ? 0 : MP_LIMB_T_MAX);
101 
102   c = 0;
103   divisor_h = HIGH32 (divisor);
104 
105   if (divisor_h == 0)
106     {
107       /* 32-bit divisor */
108       do
109         {
110           s_next = *src++;
111           ls = (s >> rshift) | ((s_next << lshift) & lshift_mask);
112           s = s_next;
113 
114           SUBC_LIMB (c, l, ls, c);
115 
116           q = l * inverse;
117           *dst++ = q;
118 
119           umul_ppmm_half_lowequal (l, q, divisor, l);
120           c += l;
121 
122           size--;
123         }
124       while (size != 0);
125 
126       ls = s >> rshift;
127       l = ls - c;
128       q = l * inverse;
129       *dst = q;
130     }
131   else
132     {
133       /* 64-bit divisor */
134       mp_limb_t  divisor_l = LOW32 (divisor);
135       do
136         {
137           s_next = *src++;
138           ls = (s >> rshift) | ((s_next << lshift) & lshift_mask);
139           s = s_next;
140 
141           SUBC_LIMB (c, l, ls, c);
142 
143           q = l * inverse;
144           *dst++ = q;
145 
146           umul_ppmm_lowequal (l, q, divisor, divisor_h, divisor_l, l);
147           c += l;
148 
149           size--;
150         }
151       while (size != 0);
152 
153       ls = s >> rshift;
154       l = ls - c;
155       q = l * inverse;
156       *dst = q;
157     }
158 }
159