xref: /netbsd-src/external/lgpl3/gmp/dist/tests/mpn/t-fat.c (revision 567219e1d7461bff1b180e494a9674a287b057a7)
1 /* Test fat binary setups.
2 
3 Copyright 2003 Free Software Foundation, Inc.
4 
5 This file is part of the GNU MP Library.
6 
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
11 
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
15 License for more details.
16 
17 You should have received a copy of the GNU Lesser General Public License
18 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
19 
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 
24 #include "gmp.h"
25 #include "gmp-impl.h"
26 #include "longlong.h"
27 #include "tests.h"
28 
29 
30 /* In this program we're aiming to pick up certain subtle problems that
31    might creep into a fat binary.
32 
33    1. We want to ensure the application entry point routines like
34       __gmpn_add_n dispatch to the correct field of __gmpn_cpuvec.
35 
36       Note that these routines are not exercised as a side effect of other
37       tests (eg. the mpz routines).  Internally the fields of __gmpn_cpuvec
38       are used directly, so we need to write test code explicitly calling
39       the mpn functions, like an application will have.
40 
41    2. We want to ensure the initial __gmpn_cpuvec data has the initializer
42       function pointers in the correct fields, and that those initializer
43       functions dispatch to their correct corresponding field once
44       initialization has been done.
45 
46       Only one of the initializer routines executes in a normal program,
47       since that routine sets all the pointers to actual mpn functions.  We
48       forcibly reset __gmpn_cpuvec so we can run each.
49 
50    In both cases for the above, the data put through the functions is
51    nothing special, just enough to verify that for instance an add_n is
52    really doing an add_n and has not for instance mistakenly gone to sub_n
53    or something.
54 
55    The loop around each test will exercise the initializer routine on the
56    first iteration, and the dispatcher routine on the second.
57 
58    If the dispatcher and/or initializer routines are generated mechanically
59    via macros (eg. mpn/x86/fat/fat_entry.asm) then there shouldn't be too
60    much risk of them going wrong, provided the structure layout is correctly
61    expressed.  But if they're in C then it's good to guard against typos in
62    what is rather repetitive code.  The initializer data for __gmpn_cpuvec
63    in fat.c is always done by hand and is likewise a bit repetitive.  */
64 
65 
66 /* dummies when not a fat binary */
67 #if ! WANT_FAT_BINARY
68 struct cpuvec_t {
69   int  initialized;
70 };
71 struct cpuvec_t __gmpn_cpuvec;
72 #define ITERATE_FAT_THRESHOLDS()  do { } while (0)
73 #endif
74 
75 /* saved from program startup */
76 struct cpuvec_t  initial_cpuvec;
77 
78 void
79 check_functions (void)
80 {
81   mp_limb_t  wp[2], xp[2], yp[2], r;
82   int  i;
83 
84   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
85   for (i = 0; i < 2; i++)
86     {
87       xp[0] = 123;
88       yp[0] = 456;
89       mpn_add_n (wp, xp, yp, (mp_size_t) 1);
90       ASSERT_ALWAYS (wp[0] == 579);
91     }
92 
93   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
94   for (i = 0; i < 2; i++)
95     {
96       xp[0] = 123;
97       wp[0] = 456;
98       r = mpn_addmul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
99       ASSERT_ALWAYS (wp[0] == 702);
100       ASSERT_ALWAYS (r == 0);
101     }
102 
103 #if HAVE_NATIVE_mpn_copyd
104   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
105   for (i = 0; i < 2; i++)
106     {
107       xp[0] = 123;
108       xp[1] = 456;
109       mpn_copyd (xp+1, xp, (mp_size_t) 1);
110       ASSERT_ALWAYS (xp[1] == 123);
111     }
112 #endif
113 
114 #if HAVE_NATIVE_mpn_copyi
115   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
116   for (i = 0; i < 2; i++)
117     {
118       xp[0] = 123;
119       xp[1] = 456;
120       mpn_copyi (xp, xp+1, (mp_size_t) 1);
121       ASSERT_ALWAYS (xp[0] == 456);
122     }
123 #endif
124 
125   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
126   for (i = 0; i < 2; i++)
127     {
128       xp[0] = 1605;
129       mpn_divexact_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(5));
130       ASSERT_ALWAYS (wp[0] == 321);
131     }
132 
133   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
134   for (i = 0; i < 2; i++)
135     {
136       xp[0] = 1296;
137       r = mpn_divexact_by3c (wp, xp, (mp_size_t) 1, CNST_LIMB(0));
138       ASSERT_ALWAYS (wp[0] == 432);
139       ASSERT_ALWAYS (r == 0);
140     }
141 
142   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
143   for (i = 0; i < 2; i++)
144     {
145       xp[0] = 287;
146       r = mpn_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1, CNST_LIMB(7));
147       ASSERT_ALWAYS (wp[1] == 41);
148       ASSERT_ALWAYS (wp[0] == 0);
149       ASSERT_ALWAYS (r == 0);
150     }
151 
152   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
153   for (i = 0; i < 2; i++)
154     {
155       xp[0] = 12;
156       r = mpn_gcd_1 (xp, (mp_size_t) 1, CNST_LIMB(9));
157       ASSERT_ALWAYS (r == 3);
158     }
159 
160   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
161   for (i = 0; i < 2; i++)
162     {
163       xp[0] = 0x1001;
164       mpn_lshift (wp, xp, (mp_size_t) 1, 1);
165       ASSERT_ALWAYS (wp[0] == 0x2002);
166     }
167 
168   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
169   for (i = 0; i < 2; i++)
170     {
171       xp[0] = 14;
172       r = mpn_mod_1 (xp, (mp_size_t) 1, CNST_LIMB(4));
173       ASSERT_ALWAYS (r == 2);
174     }
175 
176 #if (GMP_NUMB_BITS % 4) == 0
177   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
178   for (i = 0; i < 2; i++)
179     {
180       int  bits = (GMP_NUMB_BITS / 4) * 3;
181       mp_limb_t  mod = (CNST_LIMB(1) << bits) - 1;
182       mp_limb_t  want = GMP_NUMB_MAX % mod;
183       xp[0] = GMP_NUMB_MAX;
184       r = mpn_mod_34lsub1 (xp, (mp_size_t) 1);
185       ASSERT_ALWAYS (r % mod == want);
186     }
187 #endif
188 
189   /*   DECL_modexact_1c_odd ((*modexact_1c_odd)); */
190 
191   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
192   for (i = 0; i < 2; i++)
193     {
194       xp[0] = 14;
195       r = mpn_mul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(4));
196       ASSERT_ALWAYS (wp[0] == 56);
197       ASSERT_ALWAYS (r == 0);
198     }
199 
200   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
201   for (i = 0; i < 2; i++)
202     {
203       xp[0] = 5;
204       yp[0] = 7;
205       mpn_mul_basecase (wp, xp, (mp_size_t) 1, yp, (mp_size_t) 1);
206       ASSERT_ALWAYS (wp[0] == 35);
207       ASSERT_ALWAYS (wp[1] == 0);
208     }
209 
210 #if HAVE_NATIVE_mpn_preinv_divrem_1 && GMP_NAIL_BITS == 0
211   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
212   for (i = 0; i < 2; i++)
213     {
214       xp[0] = 0x101;
215       r = mpn_preinv_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1,
216                                GMP_LIMB_HIGHBIT,
217                                refmpn_invert_limb (GMP_LIMB_HIGHBIT), 0);
218       ASSERT_ALWAYS (wp[0] == 0x202);
219       ASSERT_ALWAYS (wp[1] == 0);
220       ASSERT_ALWAYS (r == 0);
221     }
222 #endif
223 
224 #if GMP_NAIL_BITS == 0
225   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
226   for (i = 0; i < 2; i++)
227     {
228       xp[0] = GMP_LIMB_HIGHBIT+123;
229       r = mpn_preinv_mod_1 (xp, (mp_size_t) 1, GMP_LIMB_HIGHBIT,
230                             refmpn_invert_limb (GMP_LIMB_HIGHBIT));
231       ASSERT_ALWAYS (r == 123);
232     }
233 #endif
234 
235   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
236   for (i = 0; i < 2; i++)
237     {
238       xp[0] = 0x8008;
239       mpn_rshift (wp, xp, (mp_size_t) 1, 1);
240       ASSERT_ALWAYS (wp[0] == 0x4004);
241     }
242 
243   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
244   for (i = 0; i < 2; i++)
245     {
246       xp[0] = 5;
247       mpn_sqr_basecase (wp, xp, (mp_size_t) 1);
248       ASSERT_ALWAYS (wp[0] == 25);
249       ASSERT_ALWAYS (wp[1] == 0);
250     }
251 
252   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
253   for (i = 0; i < 2; i++)
254     {
255       xp[0] = 999;
256       yp[0] = 666;
257       mpn_sub_n (wp, xp, yp, (mp_size_t) 1);
258       ASSERT_ALWAYS (wp[0] == 333);
259     }
260 
261   memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
262   for (i = 0; i < 2; i++)
263     {
264       xp[0] = 123;
265       wp[0] = 456;
266       r = mpn_submul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
267       ASSERT_ALWAYS (wp[0] == 210);
268       ASSERT_ALWAYS (r == 0);
269     }
270 }
271 
272 /* Expect the first use of a each fat threshold to invoke the necessary
273    initialization.  */
274 void
275 check_thresholds (void)
276 {
277 #define ITERATE(name,field)                                             \
278   do {                                                                  \
279     memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));   \
280     ASSERT_ALWAYS (name != 0);                                          \
281     ASSERT_ALWAYS (name == __gmpn_cpuvec.field);                        \
282     ASSERT_ALWAYS (__gmpn_cpuvec.initialized);                          \
283   } while (0)
284 
285   ITERATE_FAT_THRESHOLDS ();
286 }
287 
288 
289 int
290 main (void)
291 {
292   memcpy (&initial_cpuvec, &__gmpn_cpuvec, sizeof (__gmpn_cpuvec));
293 
294   tests_start ();
295 
296   check_functions ();
297   check_thresholds ();
298 
299   tests_end ();
300   exit (0);
301 }
302