xref: /netbsd-src/lib/libc/stdlib/merge.c (revision 6dda330ef643be98872a624c71c92a1fe5adb586)
1 /*	$NetBSD: merge.c,v 1.3 1995/12/28 08:52:28 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Peter McIlroy.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #if defined(LIBC_SCCS) && !defined(lint)
40 #if 0
41 static char sccsid[] = "from: @(#)merge.c	8.2 (Berkeley) 2/14/94";
42 #else
43 static char *rcsid = "$NetBSD: merge.c,v 1.3 1995/12/28 08:52:28 thorpej Exp $";
44 #endif
45 #endif /* LIBC_SCCS and not lint */
46 
47 /*
48  * Hybrid exponential search/linear search merge sort with hybrid
49  * natural/pairwise first pass.  Requires about .3% more comparisons
50  * for random data than LSMS with pairwise first pass alone.
51  * It works for objects as small as two bytes.
52  */
53 
54 #define NATURAL
55 #define THRESHOLD 16	/* Best choice for natural merge cut-off. */
56 
57 /* #define NATURAL to get hybrid natural merge.
58  * (The default is pairwise merging.)
59  */
60 
61 #include <sys/types.h>
62 
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <string.h>
66 
67 static void setup __P((u_char *, u_char *, size_t, size_t, int (*)()));
68 static void insertionsort __P((u_char *, size_t, size_t, int (*)()));
69 
70 #define ISIZE sizeof(int)
71 #define PSIZE sizeof(u_char *)
72 #define ICOPY_LIST(src, dst, last)				\
73 	do							\
74 	*(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE;	\
75 	while(src < last)
76 #define ICOPY_ELT(src, dst, i)					\
77 	do							\
78 	*(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE;	\
79 	while (i -= ISIZE)
80 
81 #define CCOPY_LIST(src, dst, last)		\
82 	do					\
83 		*dst++ = *src++;		\
84 	while (src < last)
85 #define CCOPY_ELT(src, dst, i)			\
86 	do					\
87 		*dst++ = *src++;		\
88 	while (i -= 1)
89 
90 /*
91  * Find the next possible pointer head.  (Trickery for forcing an array
92  * to do double duty as a linked list when objects do not align with word
93  * boundaries.
94  */
95 /* Assumption: PSIZE is a power of 2. */
96 #define EVAL(p) (u_char **)						\
97 	((u_char *)0 +							\
98 	    (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
99 
100 /*
101  * Arguments are as for qsort.
102  */
103 int
104 mergesort(base, nmemb, size, cmp)
105 	void *base;
106 	size_t nmemb;
107 	register size_t size;
108 	int (*cmp) __P((const void *, const void *));
109 {
110 	register int i, sense;
111 	int big, iflag;
112 	register u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
113 	u_char *list2, *list1, *p2, *p, *last, **p1;
114 
115 	if (size < PSIZE / 2) {		/* Pointers must fit into 2 * size. */
116 		errno = EINVAL;
117 		return (-1);
118 	}
119 
120 	/*
121 	 * XXX
122 	 * Stupid subtraction for the Cray.
123 	 */
124 	iflag = 0;
125 	if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
126 		iflag = 1;
127 
128 	if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
129 		return (-1);
130 
131 	list1 = base;
132 	setup(list1, list2, nmemb, size, cmp);
133 	last = list2 + nmemb * size;
134 	i = big = 0;
135 	while (*EVAL(list2) != last) {
136 	    l2 = list1;
137 	    p1 = EVAL(list1);
138 	    for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
139 	    	p2 = *EVAL(p2);
140 	    	f1 = l2;
141 	    	f2 = l1 = list1 + (p2 - list2);
142 	    	if (p2 != last)
143 	    		p2 = *EVAL(p2);
144 	    	l2 = list1 + (p2 - list2);
145 	    	while (f1 < l1 && f2 < l2) {
146 	    		if ((*cmp)(f1, f2) <= 0) {
147 	    			q = f2;
148 	    			b = f1, t = l1;
149 	    			sense = -1;
150 	    		} else {
151 	    			q = f1;
152 	    			b = f2, t = l2;
153 	    			sense = 0;
154 	    		}
155 	    		if (!big) {	/* here i = 0 */
156 LINEAR:	    			while ((b += size) < t && cmp(q, b) >sense)
157 	    				if (++i == 6) {
158 	    					big = 1;
159 	    					goto EXPONENTIAL;
160 	    				}
161 	    		} else {
162 EXPONENTIAL:	    		for (i = size; ; i <<= 1)
163 	    				if ((p = (b + i)) >= t) {
164 	    					if ((p = t - size) > b &&
165 						    (*cmp)(q, p) <= sense)
166 	    						t = p;
167 	    					else
168 	    						b = p;
169 	    					break;
170 	    				} else if ((*cmp)(q, p) <= sense) {
171 	    					t = p;
172 	    					if (i == size)
173 	    						big = 0;
174 	    					goto FASTCASE;
175 	    				} else
176 	    					b = p;
177 SLOWCASE:	    		while (t > b+size) {
178 	    				i = (((t - b) / size) >> 1) * size;
179 	    				if ((*cmp)(q, p = b + i) <= sense)
180 	    					t = p;
181 	    				else
182 	    					b = p;
183 	    			}
184 	    			goto COPY;
185 FASTCASE:	    		while (i > size)
186 	    				if ((*cmp)(q,
187 	    					p = b + (i >>= 1)) <= sense)
188 	    					t = p;
189 	    				else
190 	    					b = p;
191 COPY:	    			b = t;
192 	    		}
193 	    		i = size;
194 	    		if (q == f1) {
195 	    			if (iflag) {
196 	    				ICOPY_LIST(f2, tp2, b);
197 	    				ICOPY_ELT(f1, tp2, i);
198 	    			} else {
199 	    				CCOPY_LIST(f2, tp2, b);
200 	    				CCOPY_ELT(f1, tp2, i);
201 	    			}
202 	    		} else {
203 	    			if (iflag) {
204 	    				ICOPY_LIST(f1, tp2, b);
205 	    				ICOPY_ELT(f2, tp2, i);
206 	    			} else {
207 	    				CCOPY_LIST(f1, tp2, b);
208 	    				CCOPY_ELT(f2, tp2, i);
209 	    			}
210 	    		}
211 	    	}
212 	    	if (f2 < l2) {
213 	    		if (iflag)
214 	    			ICOPY_LIST(f2, tp2, l2);
215 	    		else
216 	    			CCOPY_LIST(f2, tp2, l2);
217 	    	} else if (f1 < l1) {
218 	    		if (iflag)
219 	    			ICOPY_LIST(f1, tp2, l1);
220 	    		else
221 	    			CCOPY_LIST(f1, tp2, l1);
222 	    	}
223 	    	*p1 = l2;
224 	    }
225 	    tp2 = list1;	/* swap list1, list2 */
226 	    list1 = list2;
227 	    list2 = tp2;
228 	    last = list2 + nmemb*size;
229 	}
230 	if (base == list2) {
231 		memmove(list2, list1, nmemb*size);
232 		list2 = list1;
233 	}
234 	free(list2);
235 	return (0);
236 }
237 
238 #define	swap(a, b) {					\
239 		s = b;					\
240 		i = size;				\
241 		do {					\
242 			tmp = *a; *a++ = *s; *s++ = tmp; \
243 		} while (--i);				\
244 		a -= size;				\
245 	}
246 #define reverse(bot, top) {				\
247 	s = top;					\
248 	do {						\
249 		i = size;				\
250 		do {					\
251 			tmp = *bot; *bot++ = *s; *s++ = tmp; \
252 		} while (--i);				\
253 		s -= size2;				\
254 	} while(bot < s);				\
255 }
256 
257 /*
258  * Optional hybrid natural/pairwise first pass.  Eats up list1 in runs of
259  * increasing order, list2 in a corresponding linked list.  Checks for runs
260  * when THRESHOLD/2 pairs compare with same sense.  (Only used when NATURAL
261  * is defined.  Otherwise simple pairwise merging is used.)
262  */
263 void
264 setup(list1, list2, n, size, cmp)
265 	size_t n, size;
266 	int (*cmp) __P((const void *, const void *));
267 	u_char *list1, *list2;
268 {
269 	int i, length, size2, tmp, sense;
270 	u_char *f1, *f2, *s, *l2, *last, *p2;
271 
272 	size2 = size*2;
273 	if (n <= 5) {
274 		insertionsort(list1, n, size, cmp);
275 		*EVAL(list2) = (u_char*) list2 + n*size;
276 		return;
277 	}
278 	/*
279 	 * Avoid running pointers out of bounds; limit n to evens
280 	 * for simplicity.
281 	 */
282 	i = 4 + (n & 1);
283 	insertionsort(list1 + (n - i) * size, i, size, cmp);
284 	last = list1 + size * (n - i);
285 	*EVAL(list2 + (last - list1)) = list2 + n * size;
286 
287 #ifdef NATURAL
288 	p2 = list2;
289 	f1 = list1;
290 	sense = (cmp(f1, f1 + size) > 0);
291 	for (; f1 < last; sense = !sense) {
292 		length = 2;
293 					/* Find pairs with same sense. */
294 		for (f2 = f1 + size2; f2 < last; f2 += size2) {
295 			if ((cmp(f2, f2+ size) > 0) != sense)
296 				break;
297 			length += 2;
298 		}
299 		if (length < THRESHOLD) {		/* Pairwise merge */
300 			do {
301 				p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
302 				if (sense > 0)
303 					swap (f1, f1 + size);
304 			} while ((f1 += size2) < f2);
305 		} else {				/* Natural merge */
306 			l2 = f2;
307 			for (f2 = f1 + size2; f2 < l2; f2 += size2) {
308 				if ((cmp(f2-size, f2) > 0) != sense) {
309 					p2 = *EVAL(p2) = f2 - list1 + list2;
310 					if (sense > 0)
311 						reverse(f1, f2-size);
312 					f1 = f2;
313 				}
314 			}
315 			if (sense > 0)
316 				reverse (f1, f2-size);
317 			f1 = f2;
318 			if (f2 < last || cmp(f2 - size, f2) > 0)
319 				p2 = *EVAL(p2) = f2 - list1 + list2;
320 			else
321 				p2 = *EVAL(p2) = list2 + n*size;
322 		}
323 	}
324 #else		/* pairwise merge only. */
325 	for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
326 		p2 = *EVAL(p2) = p2 + size2;
327 		if (cmp (f1, f1 + size) > 0)
328 			swap(f1, f1 + size);
329 	}
330 #endif /* NATURAL */
331 }
332 
333 /*
334  * This is to avoid out-of-bounds addresses in sorting the
335  * last 4 elements.
336  */
337 static void
338 insertionsort(a, n, size, cmp)
339 	u_char *a;
340 	size_t n, size;
341 	int (*cmp) __P((const void *, const void *));
342 {
343 	u_char *ai, *s, *t, *u, tmp;
344 	int i;
345 
346 	for (ai = a+size; --n >= 1; ai += size)
347 		for (t = ai; t > a; t -= size) {
348 			u = t - size;
349 			if (cmp(u, t) <= 0)
350 				break;
351 			swap(u, t);
352 		}
353 }
354