xref: /csrg-svn/lib/libc/stdlib/radixsort.c (revision 45287)
1*45287Sbostic /*-
2*45287Sbostic  * Copyright (c) 1990 The Regents of the University of California.
3*45287Sbostic  * All rights reserved.
4*45287Sbostic  *
5*45287Sbostic  * %sccs.include.redist.c%
6*45287Sbostic  */
7*45287Sbostic 
8*45287Sbostic #if defined(LIBC_SCCS) && !defined(lint)
9*45287Sbostic static char sccsid[] = "@(#)radixsort.c	5.1 (Berkeley) 10/01/90";
10*45287Sbostic #endif /* LIBC_SCCS and not lint */
11*45287Sbostic 
12*45287Sbostic #include <sys/types.h>
13*45287Sbostic #include <sys/errno.h>
14*45287Sbostic #include <limits.h>
15*45287Sbostic #include <stdlib.h>
16*45287Sbostic #include <stddef.h>
17*45287Sbostic 
18*45287Sbostic #define	NCHARS	(UCHAR_MAX + 1)
19*45287Sbostic 
20*45287Sbostic /*
21*45287Sbostic  * shellsort (diminishing increment sort) from Data Structures and
22*45287Sbostic  * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
23*45287Sbostic  * see also Knuth Vol. 3, page 84.  The increments are selected from
24*45287Sbostic  * formula (8), page 95.  Roughly O(N^3/2).
25*45287Sbostic  *
26*45287Sbostic  * __rspartition is the cutoff point for a further partitioning instead
27*45287Sbostic  * of a shellsort.  If it changes check __rsshell_increments.  Both of
28*45287Sbostic  * these are exported, as the best values are data dependent.  Unrolling
29*45287Sbostic  * this loop has not proven worthwhile.
30*45287Sbostic  */
31*45287Sbostic #define	NPARTITION	40
32*45287Sbostic int __rspartition = NPARTITION;
33*45287Sbostic int __rsshell_increments[] = { 4, 1, 0, 0, 0, 0, 0, 0 };
34*45287Sbostic #define SHELLSORT { \
35*45287Sbostic 	register u_char ch, *s1, *s2; \
36*45287Sbostic 	register int incr, *incrp; \
37*45287Sbostic 	for (incrp = __rsshell_increments; incr = *incrp++;) \
38*45287Sbostic 		for (t1 = incr; t1 < nmemb; ++t1) \
39*45287Sbostic 			for (t2 = t1 - incr; t2 >= 0;) { \
40*45287Sbostic 				s1 = p[t2] + indx; \
41*45287Sbostic 				s2 = p[t2 + incr] + indx; \
42*45287Sbostic 				while ((ch = tr[*s1++]) == tr[*s2] && ch) \
43*45287Sbostic 					++s2; \
44*45287Sbostic 				if (ch > tr[*s2]) { \
45*45287Sbostic 					s1 = p[t2]; \
46*45287Sbostic 					p[t2] = p[t2 + incr]; \
47*45287Sbostic 					p[t2 + incr] = s1; \
48*45287Sbostic 					t2 -= incr; \
49*45287Sbostic 				} else \
50*45287Sbostic 					break; \
51*45287Sbostic 			} \
52*45287Sbostic }
53*45287Sbostic 
54*45287Sbostic /*
55*45287Sbostic  * stack points to context structures.  Each structure defines a
56*45287Sbostic  * scheduled partitioning.  Radixsort exits when the stack is empty.
57*45287Sbostic  *
58*45287Sbostic  * The stack size is data dependent, and guessing is probably not
59*45287Sbostic  * worthwhile.  The initial stack fits in 1K with four bytes left over
60*45287Sbostic  * for malloc.  The initial size is exported, as the best value is
61*45287Sbostic  * data, and possibly, system, dependent.
62*45287Sbostic  */
63*45287Sbostic typedef struct _stack {
64*45287Sbostic 	u_char **bot;
65*45287Sbostic 	int indx, nmemb;
66*45287Sbostic } CONTEXT;
67*45287Sbostic 
68*45287Sbostic int __radix_stacksize = (1024 - 4) / sizeof(CONTEXT);
69*45287Sbostic #define	STACKPUSH { \
70*45287Sbostic 	if (stackp == estack) { \
71*45287Sbostic 		t1 = stackp - stack; \
72*45287Sbostic 		stackp = stack; \
73*45287Sbostic 		if (!(stack = (CONTEXT *)realloc((char *)stack, \
74*45287Sbostic 		    (__radix_stacksize *= 2) * sizeof(CONTEXT)))) { \
75*45287Sbostic 			t1 = errno; \
76*45287Sbostic 			free((char *)l2); \
77*45287Sbostic 			if (stackp) \
78*45287Sbostic 				free((char *)stackp); \
79*45287Sbostic 			errno = t1; \
80*45287Sbostic 			return(-1); \
81*45287Sbostic 		} \
82*45287Sbostic 		stackp = stack + t1; \
83*45287Sbostic 		estack = stack + __radix_stacksize; \
84*45287Sbostic 	} \
85*45287Sbostic 	stackp->bot = p; \
86*45287Sbostic 	stackp->nmemb = nmemb; \
87*45287Sbostic 	stackp->indx = indx; \
88*45287Sbostic 	++stackp; \
89*45287Sbostic }
90*45287Sbostic 
91*45287Sbostic #define	STACKPOP { \
92*45287Sbostic 	if (stackp == stack) \
93*45287Sbostic 		break; \
94*45287Sbostic 	--stackp; \
95*45287Sbostic 	bot = stackp->bot; \
96*45287Sbostic 	nmemb = stackp->nmemb; \
97*45287Sbostic 	indx = stackp->indx; \
98*45287Sbostic }
99*45287Sbostic 
100*45287Sbostic /*
101*45287Sbostic  * A variant of MSD radix sorting; see Knuth Vol. 3, page 177, and 5.2.5,
102*45287Sbostic  * Ex. 10 and 12.  Also, "Three Partition Refinement Algorithms, Paige and
103*45287Sbostic  * Tarjan, SIAM J. Comput. Vol. 16, No. 6, December 1987.
104*45287Sbostic  *
105*45287Sbostic  * This uses a simple sort as soon as a bucket crosses a cutoff point, rather
106*45287Sbostic  * than sorting the entire list after partitioning is finished.
107*45287Sbostic  *
108*45287Sbostic  * This is pure MSD instead of LSD of some number of MSD, switching to the
109*45287Sbostic  * simple sort as soon as possible.  Takes linear time relative to the number
110*45287Sbostic  * of bytes in the strings.
111*45287Sbostic  */
112*45287Sbostic radixsort(l1, nmemb, tab, endbyte)
113*45287Sbostic 	u_char **l1, *tab, endbyte;
114*45287Sbostic 	register int nmemb;
115*45287Sbostic {
116*45287Sbostic 	register int i, indx, t1, t2;
117*45287Sbostic 	register u_char **l2, **p, **bot, *tr;
118*45287Sbostic 	CONTEXT *estack, *stack, *stackp;
119*45287Sbostic 	int c[NCHARS + 1];
120*45287Sbostic 	u_char ltab[NCHARS];
121*45287Sbostic 
122*45287Sbostic 	if (nmemb <= 1)
123*45287Sbostic 		return(0);
124*45287Sbostic 
125*45287Sbostic 	/*
126*45287Sbostic 	 * there are two arrays, one provided by the user (l1), and the
127*45287Sbostic 	 * temporary one (l2).  The data is sorted to the temporary stack,
128*45287Sbostic 	 * and then copied back.  The speedup of using index to determine
129*45287Sbostic 	 * which stack the data is on and simply swapping stacks back and
130*45287Sbostic 	 * forth, thus avoiding the copy every iteration, turns out to not
131*45287Sbostic 	 * be any faster than the current implementation.
132*45287Sbostic 	 */
133*45287Sbostic 	if (!(l2 = (u_char **)malloc(sizeof(u_char *) * nmemb)))
134*45287Sbostic 		return(-1);
135*45287Sbostic 
136*45287Sbostic 	/* initialize stack */
137*45287Sbostic 	stack = stackp = estack = NULL;
138*45287Sbostic 
139*45287Sbostic 	/*
140*45287Sbostic 	 * tr references a table of sort weights; multiple entries may
141*45287Sbostic 	 * map to the same weight; EOS char must have the lowest weight.
142*45287Sbostic 	 */
143*45287Sbostic 	if (tab)
144*45287Sbostic 		tr = tab;
145*45287Sbostic 	else {
146*45287Sbostic 		tr = ltab;
147*45287Sbostic 		for (t1 = 0, t2 = endbyte; t1 < t2; ++t1)
148*45287Sbostic 			tr[t1] = t1 + 1;
149*45287Sbostic 		tr[t2] = 0;
150*45287Sbostic 		for (t1 = endbyte + 1; t1 < NCHARS; ++t1)
151*45287Sbostic 			tr[t1] = t1;
152*45287Sbostic 	}
153*45287Sbostic 
154*45287Sbostic 	/* first sort is entire stack */
155*45287Sbostic 	bot = l1;
156*45287Sbostic 	indx = 0;
157*45287Sbostic 
158*45287Sbostic 	for (;;) {
159*45287Sbostic 		/* clear bucket count array */
160*45287Sbostic 		bzero((char *)c, sizeof(c));
161*45287Sbostic 
162*45287Sbostic 		/*
163*45287Sbostic 		 * compute number of items that sort to the same bucket
164*45287Sbostic 		 * for this index.
165*45287Sbostic 		 */
166*45287Sbostic 		for (p = bot, i = nmemb; i--;)
167*45287Sbostic 			++c[tr[(*p++)[indx]]];
168*45287Sbostic 
169*45287Sbostic 		/*
170*45287Sbostic 		 * sum the number of characters into c, dividing the temp
171*45287Sbostic 		 * stack into the right number of buckets for this bucket,
172*45287Sbostic 		 * this index.  C contains the cumulative total of keys
173*45287Sbostic 		 * before and included in this bucket, and will later be
174*45287Sbostic 		 * used as an index to the bucket.  c[NCHARS] contains
175*45287Sbostic 		 * the total number of elements, for determining how many
176*45287Sbostic 		 * elements the last bucket contains.
177*45287Sbostic 		 */
178*45287Sbostic 		for (i = 1; i <= NCHARS; ++i)
179*45287Sbostic 			c[i] += c[i - 1];
180*45287Sbostic 
181*45287Sbostic 		/*
182*45287Sbostic 		 * partition the elements into buckets; c decrements
183*45287Sbostic 		 * through the bucket, and ends up pointing to the
184*45287Sbostic 		 * first element of the bucket.
185*45287Sbostic 		 */
186*45287Sbostic 		for (i = nmemb; i--;) {
187*45287Sbostic 			--p;
188*45287Sbostic 			l2[--c[tr[(*p)[indx]]]] = *p;
189*45287Sbostic 		}
190*45287Sbostic 
191*45287Sbostic 		/* copy the partitioned elements back to user stack */
192*45287Sbostic 		bcopy(l2, bot, nmemb * sizeof(u_char *));
193*45287Sbostic 
194*45287Sbostic 		++indx;
195*45287Sbostic 		/*
196*45287Sbostic 		 * sort buckets as necessary; don't sort c[0], it's the
197*45287Sbostic 		 * EOS character bucket, and nothing can follow EOS.
198*45287Sbostic 		 */
199*45287Sbostic 		for (i = NCHARS - 1; i; i--) {
200*45287Sbostic 			if ((nmemb = c[i + 1] - (t1 = c[i])) < 2)
201*45287Sbostic 				continue;
202*45287Sbostic 			p = bot + t1;
203*45287Sbostic 			if (nmemb > __rspartition)
204*45287Sbostic 				STACKPUSH
205*45287Sbostic 			else
206*45287Sbostic 				SHELLSORT
207*45287Sbostic 		}
208*45287Sbostic 		/* break out when stack is empty */
209*45287Sbostic 		STACKPOP
210*45287Sbostic 	}
211*45287Sbostic 
212*45287Sbostic 	free((char *)l2);
213*45287Sbostic 	free((char *)stack);
214*45287Sbostic #ifdef STATS
215*45287Sbostic 	(void)fprintf(stderr, "max stack %u.\n", __radix_stacksize);
216*45287Sbostic #endif
217*45287Sbostic 	return(0);
218*45287Sbostic }
219