1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*	Copyright (c) 1988 AT&T	*/
23*0Sstevel@tonic-gate /*	  All Rights Reserved  	*/
24*0Sstevel@tonic-gate 
25*0Sstevel@tonic-gate 
26*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*0Sstevel@tonic-gate 
28*0Sstevel@tonic-gate /*
29*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
30*0Sstevel@tonic-gate  * Use is subject to license terms.
31*0Sstevel@tonic-gate  */
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate #pragma weak mallopt = _mallopt
34*0Sstevel@tonic-gate #pragma weak mallinfo = _mallinfo
35*0Sstevel@tonic-gate #pragma weak cfree = _cfree
36*0Sstevel@tonic-gate #pragma weak memalign = _memalign
37*0Sstevel@tonic-gate #pragma weak valloc = _valloc
38*0Sstevel@tonic-gate 
39*0Sstevel@tonic-gate #include <sys/types.h>
40*0Sstevel@tonic-gate 
41*0Sstevel@tonic-gate #ifndef debug
42*0Sstevel@tonic-gate #define	NDEBUG
43*0Sstevel@tonic-gate #endif
44*0Sstevel@tonic-gate 
45*0Sstevel@tonic-gate #include <stdlib.h>
46*0Sstevel@tonic-gate #include <string.h>
47*0Sstevel@tonic-gate #include "assert.h"
48*0Sstevel@tonic-gate #include "malloc.h"
49*0Sstevel@tonic-gate #include "mallint.h"
50*0Sstevel@tonic-gate #include <thread.h>
51*0Sstevel@tonic-gate #include <synch.h>
52*0Sstevel@tonic-gate #include <unistd.h>
53*0Sstevel@tonic-gate #include <limits.h>
54*0Sstevel@tonic-gate 
55*0Sstevel@tonic-gate static mutex_t mlock = DEFAULTMUTEX;
56*0Sstevel@tonic-gate static ssize_t freespace(struct holdblk *);
57*0Sstevel@tonic-gate static void *malloc_unlocked(size_t, int);
58*0Sstevel@tonic-gate static void *realloc_unlocked(void *, size_t);
59*0Sstevel@tonic-gate static void free_unlocked(void *);
60*0Sstevel@tonic-gate static void *morecore(size_t);
61*0Sstevel@tonic-gate 
62*0Sstevel@tonic-gate /*
63*0Sstevel@tonic-gate  * use level memory allocater (malloc, free, realloc)
64*0Sstevel@tonic-gate  *
65*0Sstevel@tonic-gate  *	-malloc, free, realloc and mallopt form a memory allocator
66*0Sstevel@tonic-gate  *	similar to malloc, free, and realloc.  The routines
67*0Sstevel@tonic-gate  *	here are much faster than the original, with slightly worse
68*0Sstevel@tonic-gate  *	space usage (a few percent difference on most input).  They
69*0Sstevel@tonic-gate  *	do not have the property that data in freed blocks is left
70*0Sstevel@tonic-gate  *	untouched until the space is reallocated.
71*0Sstevel@tonic-gate  *
72*0Sstevel@tonic-gate  *	-Memory is kept in the "arena", a singly linked list of blocks.
73*0Sstevel@tonic-gate  *	These blocks are of 3 types.
74*0Sstevel@tonic-gate  *		1. A free block.  This is a block not in use by the
75*0Sstevel@tonic-gate  *		   user.  It has a 3 word header. (See description
76*0Sstevel@tonic-gate  *		   of the free queue.)
77*0Sstevel@tonic-gate  *		2. An allocated block.  This is a block the user has
78*0Sstevel@tonic-gate  *		   requested.  It has only a 1 word header, pointing
79*0Sstevel@tonic-gate  *		   to the next block of any sort.
80*0Sstevel@tonic-gate  *		3. A permanently allocated block.  This covers space
81*0Sstevel@tonic-gate  *		   aquired by the user directly through sbrk().  It
82*0Sstevel@tonic-gate  *		   has a 1 word header, as does 2.
83*0Sstevel@tonic-gate  *	Blocks of type 1 have the lower bit of the pointer to the
84*0Sstevel@tonic-gate  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
85*0Sstevel@tonic-gate  *	to mark them busy.
86*0Sstevel@tonic-gate  *
87*0Sstevel@tonic-gate  *	-Unallocated blocks are kept on an unsorted doubly linked
88*0Sstevel@tonic-gate  *	free list.
89*0Sstevel@tonic-gate  *
90*0Sstevel@tonic-gate  *	-Memory is allocated in blocks, with sizes specified by the
91*0Sstevel@tonic-gate  *	user.  A circular first-fit startegy is used, with a roving
92*0Sstevel@tonic-gate  *	head of the free queue, which prevents bunching of small
93*0Sstevel@tonic-gate  *	blocks at the head of the queue.
94*0Sstevel@tonic-gate  *
95*0Sstevel@tonic-gate  *	-Compaction is performed at free time of any blocks immediately
96*0Sstevel@tonic-gate  *	following the freed block.  The freed block will be combined
97*0Sstevel@tonic-gate  *	with a preceding block during the search phase of malloc.
98*0Sstevel@tonic-gate  *	Since a freed block is added at the front of the free queue,
99*0Sstevel@tonic-gate  *	which is moved to the end of the queue if considered and
100*0Sstevel@tonic-gate  *	rejected during the search, fragmentation only occurs if
101*0Sstevel@tonic-gate  *	a block with a contiguious preceding block that is free is
102*0Sstevel@tonic-gate  *	freed and reallocated on the next call to malloc.  The
103*0Sstevel@tonic-gate  *	time savings of this strategy is judged to be worth the
104*0Sstevel@tonic-gate  *	occasional waste of memory.
105*0Sstevel@tonic-gate  *
106*0Sstevel@tonic-gate  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
107*0Sstevel@tonic-gate  *	A large "holding" block is allocated via a recursive call to
108*0Sstevel@tonic-gate  *	malloc.  This block contains a header and ?????? small blocks.
109*0Sstevel@tonic-gate  *	Holding blocks for a given size of small block (rounded to the
110*0Sstevel@tonic-gate  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
111*0Sstevel@tonic-gate  *	holding block with an unused small block is in front of any without.
112*0Sstevel@tonic-gate  *	A list of free blocks is kept within the holding block.
113*0Sstevel@tonic-gate  */
114*0Sstevel@tonic-gate 
115*0Sstevel@tonic-gate /*
116*0Sstevel@tonic-gate  *	description of arena, free queue, holding blocks etc.
117*0Sstevel@tonic-gate  *
118*0Sstevel@tonic-gate  * New compiler and linker does not guarentee order of initialized data.
119*0Sstevel@tonic-gate  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
120*0Sstevel@tonic-gate  * Later code depends on this order.
121*0Sstevel@tonic-gate  */
122*0Sstevel@tonic-gate 
123*0Sstevel@tonic-gate static struct header arena[4] = {
124*0Sstevel@tonic-gate 	    {0, 0, 0},
125*0Sstevel@tonic-gate 	    {0, 0, 0},
126*0Sstevel@tonic-gate 	    {0, 0, 0},
127*0Sstevel@tonic-gate 	    {0, 0, 0}
128*0Sstevel@tonic-gate 	};
129*0Sstevel@tonic-gate 				/*
130*0Sstevel@tonic-gate 				 * the second word is a minimal block to
131*0Sstevel@tonic-gate 				 * start the arena. The first is a busy
132*0Sstevel@tonic-gate 				 * block to be pointed to by the last block.
133*0Sstevel@tonic-gate 				 */
134*0Sstevel@tonic-gate 
135*0Sstevel@tonic-gate #define	freeptr (arena + 2)
136*0Sstevel@tonic-gate 				/* first and last entry in free list */
137*0Sstevel@tonic-gate static struct header *arenaend;	/* ptr to block marking high end of arena */
138*0Sstevel@tonic-gate static struct header *lastblk;	/* the highest block in the arena */
139*0Sstevel@tonic-gate static struct holdblk **holdhead;   /* pointer to array of head pointers */
140*0Sstevel@tonic-gate 				    /* to holding block chains */
141*0Sstevel@tonic-gate /*
142*0Sstevel@tonic-gate  * In order to save time calculating indices, the array is 1 too
143*0Sstevel@tonic-gate  * large, and the first element is unused
144*0Sstevel@tonic-gate  *
145*0Sstevel@tonic-gate  * Variables controlling algorithm, esp. how holding blocs are used
146*0Sstevel@tonic-gate  */
147*0Sstevel@tonic-gate static int numlblks = NUMLBLKS;
148*0Sstevel@tonic-gate static int minhead = MINHEAD;
149*0Sstevel@tonic-gate static int change = 0;	/* != 0, once param changes are no longer allowed */
150*0Sstevel@tonic-gate static int fastct = FASTCT;
151*0Sstevel@tonic-gate static unsigned int maxfast = MAXFAST;
152*0Sstevel@tonic-gate /* number of small block sizes to map to one size */
153*0Sstevel@tonic-gate 
154*0Sstevel@tonic-gate static int grain = ALIGNSZ;
155*0Sstevel@tonic-gate 
156*0Sstevel@tonic-gate #ifdef debug
157*0Sstevel@tonic-gate static int case1count = 0;
158*0Sstevel@tonic-gate 
159*0Sstevel@tonic-gate static void
160*0Sstevel@tonic-gate checkq(void)
161*0Sstevel@tonic-gate {
162*0Sstevel@tonic-gate 	register struct header *p;
163*0Sstevel@tonic-gate 
164*0Sstevel@tonic-gate 	p = &freeptr[0];
165*0Sstevel@tonic-gate 
166*0Sstevel@tonic-gate 	/* check forward */
167*0Sstevel@tonic-gate 	/*CSTYLED*/
168*0Sstevel@tonic-gate 	while (p != &freeptr[1]) {
169*0Sstevel@tonic-gate 		p = p->nextfree;
170*0Sstevel@tonic-gate 		assert(p->prevfree->nextfree == p);
171*0Sstevel@tonic-gate 	}
172*0Sstevel@tonic-gate 
173*0Sstevel@tonic-gate 	/* check backward */
174*0Sstevel@tonic-gate 	/*CSTYLED*/
175*0Sstevel@tonic-gate 	while (p != &freeptr[0]) {
176*0Sstevel@tonic-gate 		p = p->prevfree;
177*0Sstevel@tonic-gate 		assert(p->nextfree->prevfree == p);
178*0Sstevel@tonic-gate 	}
179*0Sstevel@tonic-gate }
180*0Sstevel@tonic-gate #endif
181*0Sstevel@tonic-gate 
182*0Sstevel@tonic-gate 
183*0Sstevel@tonic-gate /*
184*0Sstevel@tonic-gate  * malloc(nbytes) - give a user nbytes to use
185*0Sstevel@tonic-gate  */
186*0Sstevel@tonic-gate 
187*0Sstevel@tonic-gate void *
188*0Sstevel@tonic-gate malloc(size_t nbytes)
189*0Sstevel@tonic-gate {
190*0Sstevel@tonic-gate 	void *ret;
191*0Sstevel@tonic-gate 
192*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
193*0Sstevel@tonic-gate 	ret = malloc_unlocked(nbytes, 0);
194*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
195*0Sstevel@tonic-gate 	return (ret);
196*0Sstevel@tonic-gate }
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate /*
199*0Sstevel@tonic-gate  * Use malloc_unlocked() to get the address to start with; Given this
200*0Sstevel@tonic-gate  * address, find out the closest address that aligns with the request
201*0Sstevel@tonic-gate  * and return that address after doing some house keeping (refer to the
202*0Sstevel@tonic-gate  * ascii art below).
203*0Sstevel@tonic-gate  */
204*0Sstevel@tonic-gate void *
205*0Sstevel@tonic-gate _memalign(size_t alignment, size_t size)
206*0Sstevel@tonic-gate {
207*0Sstevel@tonic-gate 	void *alloc_buf;
208*0Sstevel@tonic-gate 	struct header *hd;
209*0Sstevel@tonic-gate 	size_t alloc_size;
210*0Sstevel@tonic-gate 	uintptr_t fr;
211*0Sstevel@tonic-gate 	static int realloc;
212*0Sstevel@tonic-gate 
213*0Sstevel@tonic-gate 	if (size == 0 || alignment == 0 ||
214*0Sstevel@tonic-gate 		(alignment & (alignment - 1)) != 0) {
215*0Sstevel@tonic-gate 		return (NULL);
216*0Sstevel@tonic-gate 	}
217*0Sstevel@tonic-gate 	if (alignment <= ALIGNSZ)
218*0Sstevel@tonic-gate 		return (malloc(size));
219*0Sstevel@tonic-gate 
220*0Sstevel@tonic-gate 	alloc_size = size + alignment;
221*0Sstevel@tonic-gate 	if (alloc_size < size) { /* overflow */
222*0Sstevel@tonic-gate 		return (NULL);
223*0Sstevel@tonic-gate 	}
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
226*0Sstevel@tonic-gate 	alloc_buf = malloc_unlocked(alloc_size, 1);
227*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
228*0Sstevel@tonic-gate 
229*0Sstevel@tonic-gate 	if (alloc_buf == NULL)
230*0Sstevel@tonic-gate 		return (NULL);
231*0Sstevel@tonic-gate 	fr = (uintptr_t)alloc_buf;
232*0Sstevel@tonic-gate 
233*0Sstevel@tonic-gate 	fr = (fr + alignment - 1) / alignment * alignment;
234*0Sstevel@tonic-gate 
235*0Sstevel@tonic-gate 	if (fr == (uintptr_t)alloc_buf)
236*0Sstevel@tonic-gate 		return (alloc_buf);
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
239*0Sstevel@tonic-gate 		/*
240*0Sstevel@tonic-gate 		 * we hit an edge case, where the space ahead of aligned
241*0Sstevel@tonic-gate 		 * address is not sufficient to hold 'header' and hence we
242*0Sstevel@tonic-gate 		 * can't free it. So double the allocation request.
243*0Sstevel@tonic-gate 		 */
244*0Sstevel@tonic-gate 		realloc++;
245*0Sstevel@tonic-gate 		free(alloc_buf);
246*0Sstevel@tonic-gate 		alloc_size = size + alignment*2;
247*0Sstevel@tonic-gate 		if (alloc_size < size) {
248*0Sstevel@tonic-gate 			return (NULL);
249*0Sstevel@tonic-gate 		}
250*0Sstevel@tonic-gate 
251*0Sstevel@tonic-gate 		(void) mutex_lock(&mlock);
252*0Sstevel@tonic-gate 		alloc_buf = malloc_unlocked(alloc_size, 1);
253*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
254*0Sstevel@tonic-gate 
255*0Sstevel@tonic-gate 		if (alloc_buf == NULL)
256*0Sstevel@tonic-gate 			return (NULL);
257*0Sstevel@tonic-gate 		fr = (uintptr_t)alloc_buf;
258*0Sstevel@tonic-gate 
259*0Sstevel@tonic-gate 		fr = (fr + alignment - 1) / alignment * alignment;
260*0Sstevel@tonic-gate 		if (fr == (uintptr_t)alloc_buf)
261*0Sstevel@tonic-gate 			return (alloc_buf);
262*0Sstevel@tonic-gate 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
263*0Sstevel@tonic-gate 			fr = fr + alignment;
264*0Sstevel@tonic-gate 		}
265*0Sstevel@tonic-gate 	}
266*0Sstevel@tonic-gate 
267*0Sstevel@tonic-gate 	/*
268*0Sstevel@tonic-gate 	 *	+-------+		+-------+
269*0Sstevel@tonic-gate 	 *  +---| <a>   |		| <a>   |--+
270*0Sstevel@tonic-gate 	 *  |   +-------+<--alloc_buf-->+-------+  |
271*0Sstevel@tonic-gate 	 *  |   |	|		|	|  |
272*0Sstevel@tonic-gate 	 *  |   |	|		|	|  |
273*0Sstevel@tonic-gate 	 *  |   |	|		|	|  |
274*0Sstevel@tonic-gate 	 *  |   |	|	 hd-->  +-------+  |
275*0Sstevel@tonic-gate 	 *  |   |	|	    +---|  <b>  |<-+
276*0Sstevel@tonic-gate 	 *  |   |	|	    |   +-------+<--- fr
277*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
278*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
279*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
280*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
281*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
282*0Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
283*0Sstevel@tonic-gate 	 *  |   +-------+	    |   +-------+
284*0Sstevel@tonic-gate 	 *  +-->|  next |	    +-->|  next |
285*0Sstevel@tonic-gate 	 *	+-------+		+-------+
286*0Sstevel@tonic-gate 	 *
287*0Sstevel@tonic-gate 	 */
288*0Sstevel@tonic-gate 	hd = (struct header *)((char *)fr - minhead);
289*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
290*0Sstevel@tonic-gate 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
291*0Sstevel@tonic-gate 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
292*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
293*0Sstevel@tonic-gate 	free(alloc_buf);
294*0Sstevel@tonic-gate 	CHECKQ
295*0Sstevel@tonic-gate 	return ((void *)fr);
296*0Sstevel@tonic-gate }
297*0Sstevel@tonic-gate 
298*0Sstevel@tonic-gate void *
299*0Sstevel@tonic-gate _valloc(size_t size)
300*0Sstevel@tonic-gate {
301*0Sstevel@tonic-gate 	static unsigned pagesize;
302*0Sstevel@tonic-gate 	if (size == 0)
303*0Sstevel@tonic-gate 		return (NULL);
304*0Sstevel@tonic-gate 
305*0Sstevel@tonic-gate 	if (!pagesize)
306*0Sstevel@tonic-gate 		pagesize = sysconf(_SC_PAGESIZE);
307*0Sstevel@tonic-gate 
308*0Sstevel@tonic-gate 	return (memalign(pagesize, size));
309*0Sstevel@tonic-gate }
310*0Sstevel@tonic-gate 
311*0Sstevel@tonic-gate /*
312*0Sstevel@tonic-gate  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
313*0Sstevel@tonic-gate  */
314*0Sstevel@tonic-gate 
315*0Sstevel@tonic-gate static void *
316*0Sstevel@tonic-gate malloc_unlocked(size_t nbytes, int nosmall)
317*0Sstevel@tonic-gate {
318*0Sstevel@tonic-gate 	struct header *blk;
319*0Sstevel@tonic-gate 	size_t nb;	/* size of entire block we need */
320*0Sstevel@tonic-gate 
321*0Sstevel@tonic-gate 	/* on first call, initialize */
322*0Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
323*0Sstevel@tonic-gate 		/* initialize arena */
324*0Sstevel@tonic-gate 		arena[1].nextblk = (struct header *)BUSY;
325*0Sstevel@tonic-gate 		arena[0].nextblk = (struct header *)BUSY;
326*0Sstevel@tonic-gate 		lastblk = arenaend = &(arena[1]);
327*0Sstevel@tonic-gate 		/* initialize free queue */
328*0Sstevel@tonic-gate 		freeptr[0].nextfree = &(freeptr[1]);
329*0Sstevel@tonic-gate 		freeptr[1].nextblk = &(arena[0]);
330*0Sstevel@tonic-gate 		freeptr[1].prevfree = &(freeptr[0]);
331*0Sstevel@tonic-gate 		/* mark that small blocks not init yet */
332*0Sstevel@tonic-gate 	}
333*0Sstevel@tonic-gate 	if (nbytes == 0)
334*0Sstevel@tonic-gate 		return (NULL);
335*0Sstevel@tonic-gate 
336*0Sstevel@tonic-gate 	if (nbytes <= maxfast && !nosmall) {
337*0Sstevel@tonic-gate 		/*
338*0Sstevel@tonic-gate 		 * We can allocate out of a holding block
339*0Sstevel@tonic-gate 		 */
340*0Sstevel@tonic-gate 		struct holdblk *holdblk; /* head of right sized queue */
341*0Sstevel@tonic-gate 		struct lblk *lblk;	 /* pointer to a little block */
342*0Sstevel@tonic-gate 		struct holdblk *newhold;
343*0Sstevel@tonic-gate 
344*0Sstevel@tonic-gate 		if (!change) {
345*0Sstevel@tonic-gate 			int i;
346*0Sstevel@tonic-gate 			/*
347*0Sstevel@tonic-gate 			 * This allocates space for hold block
348*0Sstevel@tonic-gate 			 * pointers by calling malloc recursively.
349*0Sstevel@tonic-gate 			 * Maxfast is temporarily set to 0, to
350*0Sstevel@tonic-gate 			 * avoid infinite recursion.  allocate
351*0Sstevel@tonic-gate 			 * space for an extra ptr so that an index
352*0Sstevel@tonic-gate 			 * is just ->blksz/grain, with the first
353*0Sstevel@tonic-gate 			 * ptr unused.
354*0Sstevel@tonic-gate 			 */
355*0Sstevel@tonic-gate 			change = 1;	/* change to algorithm params */
356*0Sstevel@tonic-gate 					/* no longer allowed */
357*0Sstevel@tonic-gate 			/*
358*0Sstevel@tonic-gate 			 * temporarily alter maxfast, to avoid
359*0Sstevel@tonic-gate 			 * infinite recursion
360*0Sstevel@tonic-gate 			 */
361*0Sstevel@tonic-gate 			maxfast = 0;
362*0Sstevel@tonic-gate 			holdhead = (struct holdblk **)
363*0Sstevel@tonic-gate 			    malloc_unlocked(sizeof (struct holdblk *) *
364*0Sstevel@tonic-gate 			    (fastct + 1), 0);
365*0Sstevel@tonic-gate 			if (holdhead == NULL)
366*0Sstevel@tonic-gate 				return (malloc_unlocked(nbytes, 0));
367*0Sstevel@tonic-gate 			for (i = 1; i <= fastct; i++) {
368*0Sstevel@tonic-gate 				holdhead[i] = HGROUND;
369*0Sstevel@tonic-gate 			}
370*0Sstevel@tonic-gate 			maxfast = fastct * grain;
371*0Sstevel@tonic-gate 		}
372*0Sstevel@tonic-gate 		/*
373*0Sstevel@tonic-gate 		 * Note that this uses the absolute min header size (MINHEAD)
374*0Sstevel@tonic-gate 		 * unlike the large block case which uses minhead
375*0Sstevel@tonic-gate 		 *
376*0Sstevel@tonic-gate 		 * round up to nearest multiple of grain
377*0Sstevel@tonic-gate 		 * code assumes grain is a multiple of MINHEAD
378*0Sstevel@tonic-gate 		 */
379*0Sstevel@tonic-gate 		/* round up to grain */
380*0Sstevel@tonic-gate 		nb = (nbytes + grain - 1) / grain * grain;
381*0Sstevel@tonic-gate 		holdblk = holdhead[nb / grain];
382*0Sstevel@tonic-gate 		nb = nb + MINHEAD;
383*0Sstevel@tonic-gate 		/*
384*0Sstevel@tonic-gate 		 * look for space in the holding block.  Blocks with
385*0Sstevel@tonic-gate 		 * space will be in front of those without
386*0Sstevel@tonic-gate 		 */
387*0Sstevel@tonic-gate 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
388*0Sstevel@tonic-gate 			/* there is space */
389*0Sstevel@tonic-gate 			lblk = holdblk->lfreeq;
390*0Sstevel@tonic-gate 
391*0Sstevel@tonic-gate 			/*
392*0Sstevel@tonic-gate 			 * Now make lfreeq point to a free block.
393*0Sstevel@tonic-gate 			 * If lblk has been previously allocated and
394*0Sstevel@tonic-gate 			 * freed, it has a valid pointer to use.
395*0Sstevel@tonic-gate 			 * Otherwise, lblk is at the beginning of
396*0Sstevel@tonic-gate 			 * the unallocated blocks at the end of
397*0Sstevel@tonic-gate 			 * the holding block, so, if there is room, take
398*0Sstevel@tonic-gate 			 * the next space.  If not, mark holdblk full,
399*0Sstevel@tonic-gate 			 * and move holdblk to the end of the queue
400*0Sstevel@tonic-gate 			 */
401*0Sstevel@tonic-gate 			if (lblk < holdblk->unused) {
402*0Sstevel@tonic-gate 				/* move to next holdblk, if this one full */
403*0Sstevel@tonic-gate 				if ((holdblk->lfreeq =
404*0Sstevel@tonic-gate 				    CLRSMAL(lblk->header.nextfree)) ==
405*0Sstevel@tonic-gate 				    LGROUND) {
406*0Sstevel@tonic-gate 					holdhead[(nb-MINHEAD) / grain] =
407*0Sstevel@tonic-gate 					    holdblk->nexthblk;
408*0Sstevel@tonic-gate 				}
409*0Sstevel@tonic-gate 			} else if (((char *)holdblk->unused + nb) <
410*0Sstevel@tonic-gate 			    ((char *)holdblk + HOLDSZ(nb))) {
411*0Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
412*0Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
413*0Sstevel@tonic-gate 				holdblk->lfreeq = holdblk->unused;
414*0Sstevel@tonic-gate 			} else {
415*0Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
416*0Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
417*0Sstevel@tonic-gate 				holdblk->lfreeq = LGROUND;
418*0Sstevel@tonic-gate 				holdhead[(nb-MINHEAD)/grain] =
419*0Sstevel@tonic-gate 				    holdblk->nexthblk;
420*0Sstevel@tonic-gate 			}
421*0Sstevel@tonic-gate 			/* mark as busy and small */
422*0Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
423*0Sstevel@tonic-gate 		} else {
424*0Sstevel@tonic-gate 			/* we need a new holding block */
425*0Sstevel@tonic-gate 			newhold = (struct holdblk *)
426*0Sstevel@tonic-gate 			    malloc_unlocked(HOLDSZ(nb), 0);
427*0Sstevel@tonic-gate 			if ((char *)newhold == NULL) {
428*0Sstevel@tonic-gate 				return (NULL);
429*0Sstevel@tonic-gate 			}
430*0Sstevel@tonic-gate 			/* add to head of free queue */
431*0Sstevel@tonic-gate 			if (holdblk != HGROUND) {
432*0Sstevel@tonic-gate 				newhold->nexthblk = holdblk;
433*0Sstevel@tonic-gate 				newhold->prevhblk = holdblk->prevhblk;
434*0Sstevel@tonic-gate 				holdblk->prevhblk = newhold;
435*0Sstevel@tonic-gate 				newhold->prevhblk->nexthblk = newhold;
436*0Sstevel@tonic-gate 			} else {
437*0Sstevel@tonic-gate 				newhold->nexthblk = newhold->prevhblk = newhold;
438*0Sstevel@tonic-gate 			}
439*0Sstevel@tonic-gate 			holdhead[(nb-MINHEAD)/grain] = newhold;
440*0Sstevel@tonic-gate 			/* set up newhold */
441*0Sstevel@tonic-gate 			lblk = (struct lblk *)(newhold->space);
442*0Sstevel@tonic-gate 			newhold->lfreeq = newhold->unused =
443*0Sstevel@tonic-gate 			    (struct lblk *)((char *)newhold->space+nb);
444*0Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
445*0Sstevel@tonic-gate 			newhold->blksz = nb-MINHEAD;
446*0Sstevel@tonic-gate 		}
447*0Sstevel@tonic-gate #ifdef debug
448*0Sstevel@tonic-gate 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
449*0Sstevel@tonic-gate 		    nbytes);
450*0Sstevel@tonic-gate #endif /* debug */
451*0Sstevel@tonic-gate 		return ((char *)lblk + MINHEAD);
452*0Sstevel@tonic-gate 	} else {
453*0Sstevel@tonic-gate 		/*
454*0Sstevel@tonic-gate 		 * We need an ordinary block
455*0Sstevel@tonic-gate 		 */
456*0Sstevel@tonic-gate 		struct header *newblk;	/* used for creating a block */
457*0Sstevel@tonic-gate 
458*0Sstevel@tonic-gate 		/* get number of bytes we need */
459*0Sstevel@tonic-gate 		nb = nbytes + minhead;
460*0Sstevel@tonic-gate 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
461*0Sstevel@tonic-gate 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
462*0Sstevel@tonic-gate 		/*
463*0Sstevel@tonic-gate 		 * see if there is a big enough block
464*0Sstevel@tonic-gate 		 * If none exists, you will get to freeptr[1].
465*0Sstevel@tonic-gate 		 * freeptr[1].next = &arena[0], so when you do the test,
466*0Sstevel@tonic-gate 		 * the result is a large positive number, since arena[0]
467*0Sstevel@tonic-gate 		 * comes before all blocks.  Arena[0] is marked busy so
468*0Sstevel@tonic-gate 		 * that it will not be compacted.  This kludge is for the
469*0Sstevel@tonic-gate 		 * sake of the almighty efficiency.
470*0Sstevel@tonic-gate 		 */
471*0Sstevel@tonic-gate 		/* check that a very large request won't cause an inf. loop */
472*0Sstevel@tonic-gate 
473*0Sstevel@tonic-gate 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
474*0Sstevel@tonic-gate 			return (NULL);
475*0Sstevel@tonic-gate 		} else {
476*0Sstevel@tonic-gate 			struct header *next;		/* following block */
477*0Sstevel@tonic-gate 			struct header *nextnext;	/* block after next */
478*0Sstevel@tonic-gate 
479*0Sstevel@tonic-gate 			blk = freeptr;
480*0Sstevel@tonic-gate 			do {
481*0Sstevel@tonic-gate 				blk = blk->nextfree;
482*0Sstevel@tonic-gate 				/* see if we can compact */
483*0Sstevel@tonic-gate 				next = blk->nextblk;
484*0Sstevel@tonic-gate 				if (!TESTBUSY(nextnext = next->nextblk)) {
485*0Sstevel@tonic-gate 					do {
486*0Sstevel@tonic-gate 						DELFREEQ(next);
487*0Sstevel@tonic-gate 						next = nextnext;
488*0Sstevel@tonic-gate 						nextnext = next->nextblk;
489*0Sstevel@tonic-gate 					} while (!TESTBUSY(nextnext));
490*0Sstevel@tonic-gate 					/*
491*0Sstevel@tonic-gate 					 * next will be at most == to lastblk,
492*0Sstevel@tonic-gate 					 * but I think the >= test is faster
493*0Sstevel@tonic-gate 					 */
494*0Sstevel@tonic-gate 					if (next >= arenaend)
495*0Sstevel@tonic-gate 						lastblk = blk;
496*0Sstevel@tonic-gate 					blk->nextblk = next;
497*0Sstevel@tonic-gate 				}
498*0Sstevel@tonic-gate 			} while (((char *)(next) - (char *)blk) < nb);
499*0Sstevel@tonic-gate 		}
500*0Sstevel@tonic-gate 		/*
501*0Sstevel@tonic-gate 		 * if we didn't find a block, get more memory
502*0Sstevel@tonic-gate 		 */
503*0Sstevel@tonic-gate 		if (blk == &(freeptr[1])) {
504*0Sstevel@tonic-gate 			/*
505*0Sstevel@tonic-gate 			 * careful coding could likely replace
506*0Sstevel@tonic-gate 			 * newend with arenaend
507*0Sstevel@tonic-gate 			 */
508*0Sstevel@tonic-gate 			struct header *newend;	/* new end of arena */
509*0Sstevel@tonic-gate 			ssize_t nget;	/* number of words to get */
510*0Sstevel@tonic-gate 
511*0Sstevel@tonic-gate 			/*
512*0Sstevel@tonic-gate 			 * Three cases - 1. There is space between arenaend
513*0Sstevel@tonic-gate 			 *		    and the break value that will become
514*0Sstevel@tonic-gate 			 *		    a permanently allocated block.
515*0Sstevel@tonic-gate 			 *		 2. Case 1 is not true, and the last
516*0Sstevel@tonic-gate 			 *		    block is allocated.
517*0Sstevel@tonic-gate 			 *		 3. Case 1 is not true, and the last
518*0Sstevel@tonic-gate 			 *		    block is free
519*0Sstevel@tonic-gate 			 */
520*0Sstevel@tonic-gate 			if ((newblk = (struct header *)sbrk(0)) !=
521*0Sstevel@tonic-gate 			    (struct header *)((char *)arenaend + HEADSZ)) {
522*0Sstevel@tonic-gate 				/* case 1 */
523*0Sstevel@tonic-gate #ifdef debug
524*0Sstevel@tonic-gate 				if (case1count++ > 0)
525*0Sstevel@tonic-gate 				    (void) write(2, "Case 1 hit more that once."
526*0Sstevel@tonic-gate 					" brk or sbrk?\n", 41);
527*0Sstevel@tonic-gate #endif
528*0Sstevel@tonic-gate 				/* get size to fetch */
529*0Sstevel@tonic-gate 				nget = nb + HEADSZ;
530*0Sstevel@tonic-gate 				/* round up to a block */
531*0Sstevel@tonic-gate 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
532*0Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
533*0Sstevel@tonic-gate 				/* get memory */
534*0Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
535*0Sstevel@tonic-gate 					return (NULL);
536*0Sstevel@tonic-gate 				/* add to arena */
537*0Sstevel@tonic-gate 				newend = (struct header *)((char *)newblk + nget
538*0Sstevel@tonic-gate 				    - HEADSZ);
539*0Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
540*0Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
541*0Sstevel@tonic-gate /* ???  newblk ?? */
542*0Sstevel@tonic-gate 				newblk->nextblk = newend;
543*0Sstevel@tonic-gate 
544*0Sstevel@tonic-gate 				/*
545*0Sstevel@tonic-gate 				 * space becomes a permanently allocated block.
546*0Sstevel@tonic-gate 				 * This is likely not mt-safe as lock is not
547*0Sstevel@tonic-gate 				 * shared with brk or sbrk
548*0Sstevel@tonic-gate 				 */
549*0Sstevel@tonic-gate 				arenaend->nextblk = SETBUSY(newblk);
550*0Sstevel@tonic-gate 				/* adjust other pointers */
551*0Sstevel@tonic-gate 				arenaend = newend;
552*0Sstevel@tonic-gate 				lastblk = newblk;
553*0Sstevel@tonic-gate 				blk = newblk;
554*0Sstevel@tonic-gate 			} else if (TESTBUSY(lastblk->nextblk)) {
555*0Sstevel@tonic-gate 				/* case 2 */
556*0Sstevel@tonic-gate 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
557*0Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
558*0Sstevel@tonic-gate 					return (NULL);
559*0Sstevel@tonic-gate 				/* block must be word aligned */
560*0Sstevel@tonic-gate 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
561*0Sstevel@tonic-gate 				/*
562*0Sstevel@tonic-gate 				 * stub at old arenaend becomes first word
563*0Sstevel@tonic-gate 				 * in blk
564*0Sstevel@tonic-gate 				 */
565*0Sstevel@tonic-gate /* ???  	newblk = arenaend; */
566*0Sstevel@tonic-gate 
567*0Sstevel@tonic-gate 				newend =
568*0Sstevel@tonic-gate 				    (struct header *)((char *)arenaend+nget);
569*0Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
570*0Sstevel@tonic-gate 				arenaend->nextblk = newend;
571*0Sstevel@tonic-gate 				lastblk = blk = arenaend;
572*0Sstevel@tonic-gate 				arenaend = newend;
573*0Sstevel@tonic-gate 			} else {
574*0Sstevel@tonic-gate 				/* case 3 */
575*0Sstevel@tonic-gate 				/*
576*0Sstevel@tonic-gate 				 * last block in arena is at end of memory and
577*0Sstevel@tonic-gate 				 * is free
578*0Sstevel@tonic-gate 				 */
579*0Sstevel@tonic-gate 				/* 1.7 had this backward without cast */
580*0Sstevel@tonic-gate 				nget = nb -
581*0Sstevel@tonic-gate 				    ((char *)arenaend - (char *)lastblk);
582*0Sstevel@tonic-gate 				nget = (nget + (BLOCKSZ - 1)) /
583*0Sstevel@tonic-gate 				    BLOCKSZ * BLOCKSZ;
584*0Sstevel@tonic-gate 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
585*0Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
586*0Sstevel@tonic-gate 					return (NULL);
587*0Sstevel@tonic-gate 				/* combine with last block, put in arena */
588*0Sstevel@tonic-gate 				newend = (struct header *)
589*0Sstevel@tonic-gate 				    ((char *)arenaend + nget);
590*0Sstevel@tonic-gate 				arenaend = lastblk->nextblk = newend;
591*0Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
592*0Sstevel@tonic-gate 				/* set which block to use */
593*0Sstevel@tonic-gate 				blk = lastblk;
594*0Sstevel@tonic-gate 				DELFREEQ(blk);
595*0Sstevel@tonic-gate 			}
596*0Sstevel@tonic-gate 		} else {
597*0Sstevel@tonic-gate 			struct header *nblk;	/* next block */
598*0Sstevel@tonic-gate 
599*0Sstevel@tonic-gate 			/* take block found of free queue */
600*0Sstevel@tonic-gate 			DELFREEQ(blk);
601*0Sstevel@tonic-gate 			/*
602*0Sstevel@tonic-gate 			 * make head of free queue immediately follow blk,
603*0Sstevel@tonic-gate 			 * unless blk was at the end of the queue
604*0Sstevel@tonic-gate 			 */
605*0Sstevel@tonic-gate 			nblk = blk->nextfree;
606*0Sstevel@tonic-gate 			if (nblk != &(freeptr[1])) {
607*0Sstevel@tonic-gate 				MOVEHEAD(nblk);
608*0Sstevel@tonic-gate 			}
609*0Sstevel@tonic-gate 		}
610*0Sstevel@tonic-gate 		/* blk now points to an adequate block */
611*0Sstevel@tonic-gate 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
612*0Sstevel@tonic-gate 			/* carve out the right size block */
613*0Sstevel@tonic-gate 			/* newblk will be the remainder */
614*0Sstevel@tonic-gate 			newblk = (struct header *)((char *)blk + nb);
615*0Sstevel@tonic-gate 			newblk->nextblk = blk->nextblk;
616*0Sstevel@tonic-gate 			/* mark the block busy */
617*0Sstevel@tonic-gate 			blk->nextblk = SETBUSY(newblk);
618*0Sstevel@tonic-gate 			ADDFREEQ(newblk);
619*0Sstevel@tonic-gate 			/* if blk was lastblk, make newblk lastblk */
620*0Sstevel@tonic-gate 			if (blk == lastblk)
621*0Sstevel@tonic-gate 				lastblk = newblk;
622*0Sstevel@tonic-gate 		} else {
623*0Sstevel@tonic-gate 			/* just mark the block busy */
624*0Sstevel@tonic-gate 			blk->nextblk = SETBUSY(blk->nextblk);
625*0Sstevel@tonic-gate 		}
626*0Sstevel@tonic-gate 	}
627*0Sstevel@tonic-gate 	CHECKQ
628*0Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
629*0Sstevel@tonic-gate 	    ((char *)blk + minhead) >= nbytes);
630*0Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
631*0Sstevel@tonic-gate 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
632*0Sstevel@tonic-gate 	return ((char *)blk + minhead);
633*0Sstevel@tonic-gate }
634*0Sstevel@tonic-gate 
635*0Sstevel@tonic-gate /*
636*0Sstevel@tonic-gate  * free(ptr) - free block that user thinks starts at ptr
637*0Sstevel@tonic-gate  *
638*0Sstevel@tonic-gate  *	input - ptr-1 contains the block header.
639*0Sstevel@tonic-gate  *		If the header points forward, we have a normal
640*0Sstevel@tonic-gate  *			block pointing to the next block
641*0Sstevel@tonic-gate  *		if the header points backward, we have a small
642*0Sstevel@tonic-gate  *			block from a holding block.
643*0Sstevel@tonic-gate  *		In both cases, the busy bit must be set
644*0Sstevel@tonic-gate  */
645*0Sstevel@tonic-gate 
646*0Sstevel@tonic-gate void
647*0Sstevel@tonic-gate free(void *ptr)
648*0Sstevel@tonic-gate {
649*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
650*0Sstevel@tonic-gate 	free_unlocked(ptr);
651*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
652*0Sstevel@tonic-gate }
653*0Sstevel@tonic-gate 
654*0Sstevel@tonic-gate /*
655*0Sstevel@tonic-gate  * free_unlocked(ptr) - Do the real work for free()
656*0Sstevel@tonic-gate  */
657*0Sstevel@tonic-gate 
658*0Sstevel@tonic-gate void
659*0Sstevel@tonic-gate free_unlocked(void *ptr)
660*0Sstevel@tonic-gate {
661*0Sstevel@tonic-gate 	struct holdblk *holdblk;	/* block holding blk */
662*0Sstevel@tonic-gate 	struct holdblk *oldhead;	/* former head of the hold block */
663*0Sstevel@tonic-gate 					/* queue containing blk's holder */
664*0Sstevel@tonic-gate 
665*0Sstevel@tonic-gate 	if (ptr == NULL)
666*0Sstevel@tonic-gate 		return;
667*0Sstevel@tonic-gate 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
668*0Sstevel@tonic-gate 		struct lblk	*lblk;	/* pointer to freed block */
669*0Sstevel@tonic-gate 		ssize_t		offset;	/* choice of header lists */
670*0Sstevel@tonic-gate 
671*0Sstevel@tonic-gate 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
672*0Sstevel@tonic-gate 		assert((struct header *)lblk < arenaend);
673*0Sstevel@tonic-gate 		assert((struct header *)lblk > arena);
674*0Sstevel@tonic-gate 		/* allow twits (e.g. awk) to free a block twice */
675*0Sstevel@tonic-gate 		holdblk = lblk->header.holder;
676*0Sstevel@tonic-gate 		if (!TESTBUSY(holdblk))
677*0Sstevel@tonic-gate 			return;
678*0Sstevel@tonic-gate 		holdblk = (struct holdblk *)CLRALL(holdblk);
679*0Sstevel@tonic-gate 		/* put lblk on its hold block's free list */
680*0Sstevel@tonic-gate 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
681*0Sstevel@tonic-gate 		holdblk->lfreeq = lblk;
682*0Sstevel@tonic-gate 		/* move holdblk to head of queue, if its not already there */
683*0Sstevel@tonic-gate 		offset = holdblk->blksz / grain;
684*0Sstevel@tonic-gate 		oldhead = holdhead[offset];
685*0Sstevel@tonic-gate 		if (oldhead != holdblk) {
686*0Sstevel@tonic-gate 			/* first take out of current spot */
687*0Sstevel@tonic-gate 			holdhead[offset] = holdblk;
688*0Sstevel@tonic-gate 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
689*0Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
690*0Sstevel@tonic-gate 			/* now add at front */
691*0Sstevel@tonic-gate 			holdblk->nexthblk = oldhead;
692*0Sstevel@tonic-gate 			holdblk->prevhblk = oldhead->prevhblk;
693*0Sstevel@tonic-gate 			oldhead->prevhblk = holdblk;
694*0Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk;
695*0Sstevel@tonic-gate 		}
696*0Sstevel@tonic-gate 	} else {
697*0Sstevel@tonic-gate 		struct header *blk;	/* real start of block */
698*0Sstevel@tonic-gate 		struct header *next;	/* next = blk->nextblk */
699*0Sstevel@tonic-gate 		struct header *nextnext;	/* block after next */
700*0Sstevel@tonic-gate 
701*0Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
702*0Sstevel@tonic-gate 		next = blk->nextblk;
703*0Sstevel@tonic-gate 		/* take care of twits (e.g. awk) who return blocks twice */
704*0Sstevel@tonic-gate 		if (!TESTBUSY(next))
705*0Sstevel@tonic-gate 			return;
706*0Sstevel@tonic-gate 		blk->nextblk = next = CLRBUSY(next);
707*0Sstevel@tonic-gate 		ADDFREEQ(blk);
708*0Sstevel@tonic-gate 		/* see if we can compact */
709*0Sstevel@tonic-gate 		if (!TESTBUSY(nextnext = next->nextblk)) {
710*0Sstevel@tonic-gate 			do {
711*0Sstevel@tonic-gate 				DELFREEQ(next);
712*0Sstevel@tonic-gate 				next = nextnext;
713*0Sstevel@tonic-gate 			} while (!TESTBUSY(nextnext = next->nextblk));
714*0Sstevel@tonic-gate 			if (next == arenaend) lastblk = blk;
715*0Sstevel@tonic-gate 			blk->nextblk = next;
716*0Sstevel@tonic-gate 		}
717*0Sstevel@tonic-gate 	}
718*0Sstevel@tonic-gate 	CHECKQ
719*0Sstevel@tonic-gate }
720*0Sstevel@tonic-gate 
721*0Sstevel@tonic-gate 
722*0Sstevel@tonic-gate /*
723*0Sstevel@tonic-gate  * realloc(ptr, size) - give the user a block of size "size", with
724*0Sstevel@tonic-gate  *			    the contents pointed to by ptr.  Free ptr.
725*0Sstevel@tonic-gate  */
726*0Sstevel@tonic-gate 
727*0Sstevel@tonic-gate void *
728*0Sstevel@tonic-gate realloc(void *ptr, size_t size)
729*0Sstevel@tonic-gate {
730*0Sstevel@tonic-gate 	void	*retval;
731*0Sstevel@tonic-gate 
732*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
733*0Sstevel@tonic-gate 	retval = realloc_unlocked(ptr, size);
734*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
735*0Sstevel@tonic-gate 	return (retval);
736*0Sstevel@tonic-gate }
737*0Sstevel@tonic-gate 
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate /*
740*0Sstevel@tonic-gate  * realloc_unlocked(ptr) - Do the real work for realloc()
741*0Sstevel@tonic-gate  */
742*0Sstevel@tonic-gate 
743*0Sstevel@tonic-gate static void *
744*0Sstevel@tonic-gate realloc_unlocked(void *ptr, size_t size)
745*0Sstevel@tonic-gate {
746*0Sstevel@tonic-gate 	struct header *blk;	/* block ptr is contained in */
747*0Sstevel@tonic-gate 	size_t trusize;	/* block size as allocater sees it */
748*0Sstevel@tonic-gate 	char *newptr;			/* pointer to user's new block */
749*0Sstevel@tonic-gate 	size_t cpysize;	/* amount to copy */
750*0Sstevel@tonic-gate 	struct header *next;	/* block after blk */
751*0Sstevel@tonic-gate 
752*0Sstevel@tonic-gate 	if (ptr == NULL)
753*0Sstevel@tonic-gate 		return (malloc_unlocked(size, 0));
754*0Sstevel@tonic-gate 
755*0Sstevel@tonic-gate 	if (size == 0) {
756*0Sstevel@tonic-gate 		free_unlocked(ptr);
757*0Sstevel@tonic-gate 		return (NULL);
758*0Sstevel@tonic-gate 	}
759*0Sstevel@tonic-gate 
760*0Sstevel@tonic-gate 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
761*0Sstevel@tonic-gate 	    header.holder)) {
762*0Sstevel@tonic-gate 		/*
763*0Sstevel@tonic-gate 		 * we have a special small block which can't be expanded
764*0Sstevel@tonic-gate 		 *
765*0Sstevel@tonic-gate 		 * This makes the assumption that even if the user is
766*0Sstevel@tonic-gate 		 * reallocating a free block, malloc doesn't alter the contents
767*0Sstevel@tonic-gate 		 * of small blocks
768*0Sstevel@tonic-gate 		 */
769*0Sstevel@tonic-gate 		newptr = malloc_unlocked(size, 0);
770*0Sstevel@tonic-gate 		if (newptr == NULL)
771*0Sstevel@tonic-gate 			return (NULL);
772*0Sstevel@tonic-gate 		/* this isn't to save time--its to protect the twits */
773*0Sstevel@tonic-gate 		if ((char *)ptr != newptr) {
774*0Sstevel@tonic-gate 			struct lblk *lblk;
775*0Sstevel@tonic-gate 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
776*0Sstevel@tonic-gate 			cpysize = ((struct holdblk *)
777*0Sstevel@tonic-gate 			    CLRALL(lblk->header.holder))->blksz;
778*0Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
779*0Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
780*0Sstevel@tonic-gate 			free_unlocked(ptr);
781*0Sstevel@tonic-gate 		}
782*0Sstevel@tonic-gate 	} else {
783*0Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
784*0Sstevel@tonic-gate 		next = blk->nextblk;
785*0Sstevel@tonic-gate 		/*
786*0Sstevel@tonic-gate 		 * deal with twits who reallocate free blocks
787*0Sstevel@tonic-gate 		 *
788*0Sstevel@tonic-gate 		 * if they haven't reset minblk via getopt, that's
789*0Sstevel@tonic-gate 		 * their problem
790*0Sstevel@tonic-gate 		 */
791*0Sstevel@tonic-gate 		if (!TESTBUSY(next)) {
792*0Sstevel@tonic-gate 			DELFREEQ(blk);
793*0Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
794*0Sstevel@tonic-gate 		}
795*0Sstevel@tonic-gate 		next = CLRBUSY(next);
796*0Sstevel@tonic-gate 		/* make blk as big as possible */
797*0Sstevel@tonic-gate 		if (!TESTBUSY(next->nextblk)) {
798*0Sstevel@tonic-gate 			do {
799*0Sstevel@tonic-gate 				DELFREEQ(next);
800*0Sstevel@tonic-gate 				next = next->nextblk;
801*0Sstevel@tonic-gate 			} while (!TESTBUSY(next->nextblk));
802*0Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
803*0Sstevel@tonic-gate 			if (next >= arenaend) lastblk = blk;
804*0Sstevel@tonic-gate 		}
805*0Sstevel@tonic-gate 		/* get size we really need */
806*0Sstevel@tonic-gate 		trusize = size+minhead;
807*0Sstevel@tonic-gate 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
808*0Sstevel@tonic-gate 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
809*0Sstevel@tonic-gate 		/* see if we have enough */
810*0Sstevel@tonic-gate 		/* this isn't really the copy size, but I need a register */
811*0Sstevel@tonic-gate 		cpysize = (char *)next - (char *)blk;
812*0Sstevel@tonic-gate 		if (cpysize >= trusize) {
813*0Sstevel@tonic-gate 			/* carve out the size we need */
814*0Sstevel@tonic-gate 			struct header *newblk;	/* remainder */
815*0Sstevel@tonic-gate 
816*0Sstevel@tonic-gate 			if (cpysize - trusize >= MINBLKSZ) {
817*0Sstevel@tonic-gate 				/*
818*0Sstevel@tonic-gate 				 * carve out the right size block
819*0Sstevel@tonic-gate 				 * newblk will be the remainder
820*0Sstevel@tonic-gate 				 */
821*0Sstevel@tonic-gate 				newblk = (struct header *)((char *)blk +
822*0Sstevel@tonic-gate 				    trusize);
823*0Sstevel@tonic-gate 				newblk->nextblk = next;
824*0Sstevel@tonic-gate 				blk->nextblk = SETBUSY(newblk);
825*0Sstevel@tonic-gate 				/* at this point, next is invalid */
826*0Sstevel@tonic-gate 				ADDFREEQ(newblk);
827*0Sstevel@tonic-gate 				/* if blk was lastblk, make newblk lastblk */
828*0Sstevel@tonic-gate 				if (blk == lastblk)
829*0Sstevel@tonic-gate 					lastblk = newblk;
830*0Sstevel@tonic-gate 			}
831*0Sstevel@tonic-gate 			newptr = ptr;
832*0Sstevel@tonic-gate 		} else {
833*0Sstevel@tonic-gate 			/* bite the bullet, and call malloc */
834*0Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
835*0Sstevel@tonic-gate 			newptr = malloc_unlocked(size, 0);
836*0Sstevel@tonic-gate 			if (newptr == NULL)
837*0Sstevel@tonic-gate 				return (NULL);
838*0Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
839*0Sstevel@tonic-gate 			free_unlocked(ptr);
840*0Sstevel@tonic-gate 		}
841*0Sstevel@tonic-gate 	}
842*0Sstevel@tonic-gate 	return (newptr);
843*0Sstevel@tonic-gate }
844*0Sstevel@tonic-gate 
845*0Sstevel@tonic-gate 
846*0Sstevel@tonic-gate /* LINTLIBRARY */
847*0Sstevel@tonic-gate /*
848*0Sstevel@tonic-gate  * calloc - allocate and clear memory block
849*0Sstevel@tonic-gate  */
850*0Sstevel@tonic-gate 
851*0Sstevel@tonic-gate void *
852*0Sstevel@tonic-gate calloc(size_t num, size_t size)
853*0Sstevel@tonic-gate {
854*0Sstevel@tonic-gate 	char *mp;
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate 	num *= size;
857*0Sstevel@tonic-gate 	mp = malloc(num);
858*0Sstevel@tonic-gate 	if (mp == NULL)
859*0Sstevel@tonic-gate 		return (NULL);
860*0Sstevel@tonic-gate 	(void) memset(mp, 0, num);
861*0Sstevel@tonic-gate 	return (mp);
862*0Sstevel@tonic-gate }
863*0Sstevel@tonic-gate 
864*0Sstevel@tonic-gate 
865*0Sstevel@tonic-gate /*
866*0Sstevel@tonic-gate  * Mallopt - set options for allocation
867*0Sstevel@tonic-gate  *
868*0Sstevel@tonic-gate  *	Mallopt provides for control over the allocation algorithm.
869*0Sstevel@tonic-gate  *	The cmds available are:
870*0Sstevel@tonic-gate  *
871*0Sstevel@tonic-gate  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
872*0Sstevel@tonic-gate  *		 largest small, quickly allocated block.  Maxfast
873*0Sstevel@tonic-gate  *		 may be set to 0 to disable fast allocation entirely.
874*0Sstevel@tonic-gate  *
875*0Sstevel@tonic-gate  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
876*0Sstevel@tonic-gate  *		 small blocks per holding block.  Value must be
877*0Sstevel@tonic-gate  *		 greater than 0.
878*0Sstevel@tonic-gate  *
879*0Sstevel@tonic-gate  *	M_GRAIN  Set grain to value.  The sizes of all blocks
880*0Sstevel@tonic-gate  *		 smaller than maxfast are considered to be rounded
881*0Sstevel@tonic-gate  *		 up to the nearest multiple of grain. The default
882*0Sstevel@tonic-gate  *		 value of grain is the smallest number of bytes
883*0Sstevel@tonic-gate  *		 which will allow alignment of any data type.    Grain
884*0Sstevel@tonic-gate  *		 will be rounded up to a multiple of its default,
885*0Sstevel@tonic-gate  *		 and maxsize will be rounded up to a multiple of
886*0Sstevel@tonic-gate  *		 grain.  Value must be greater than 0.
887*0Sstevel@tonic-gate  *
888*0Sstevel@tonic-gate  *	M_KEEP   Retain data in freed block until the next malloc,
889*0Sstevel@tonic-gate  *		 realloc, or calloc.  Value is ignored.
890*0Sstevel@tonic-gate  *		 This option is provided only for compatibility with
891*0Sstevel@tonic-gate  *		 the old version of malloc, and is not recommended.
892*0Sstevel@tonic-gate  *
893*0Sstevel@tonic-gate  *	returns - 0, upon successful completion
894*0Sstevel@tonic-gate  *		 1, if malloc has previously been called or
895*0Sstevel@tonic-gate  *		    if value or cmd have illegal values
896*0Sstevel@tonic-gate  */
897*0Sstevel@tonic-gate 
898*0Sstevel@tonic-gate int
899*0Sstevel@tonic-gate _mallopt(int cmd, int value)
900*0Sstevel@tonic-gate {
901*0Sstevel@tonic-gate 	/* disallow changes once a small block is allocated */
902*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
903*0Sstevel@tonic-gate 	if (change) {
904*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
905*0Sstevel@tonic-gate 		return (1);
906*0Sstevel@tonic-gate 	}
907*0Sstevel@tonic-gate 	switch (cmd) {
908*0Sstevel@tonic-gate 	case M_MXFAST:
909*0Sstevel@tonic-gate 		if (value < 0) {
910*0Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
911*0Sstevel@tonic-gate 			return (1);
912*0Sstevel@tonic-gate 		}
913*0Sstevel@tonic-gate 		fastct = (value + grain - 1) / grain;
914*0Sstevel@tonic-gate 		maxfast = grain*fastct;
915*0Sstevel@tonic-gate 		break;
916*0Sstevel@tonic-gate 	case M_NLBLKS:
917*0Sstevel@tonic-gate 		if (value <= 1) {
918*0Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
919*0Sstevel@tonic-gate 			return (1);
920*0Sstevel@tonic-gate 		}
921*0Sstevel@tonic-gate 		numlblks = value;
922*0Sstevel@tonic-gate 		break;
923*0Sstevel@tonic-gate 	case M_GRAIN:
924*0Sstevel@tonic-gate 		if (value <= 0) {
925*0Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
926*0Sstevel@tonic-gate 			return (1);
927*0Sstevel@tonic-gate 		}
928*0Sstevel@tonic-gate 
929*0Sstevel@tonic-gate 		/* round grain up to a multiple of ALIGNSZ */
930*0Sstevel@tonic-gate 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
931*0Sstevel@tonic-gate 
932*0Sstevel@tonic-gate 		/* reduce fastct appropriately */
933*0Sstevel@tonic-gate 		fastct = (maxfast + grain - 1) / grain;
934*0Sstevel@tonic-gate 		maxfast = grain * fastct;
935*0Sstevel@tonic-gate 		break;
936*0Sstevel@tonic-gate 	case M_KEEP:
937*0Sstevel@tonic-gate 		if (change && holdhead != NULL) {
938*0Sstevel@tonic-gate 			mutex_unlock(&mlock);
939*0Sstevel@tonic-gate 			return (1);
940*0Sstevel@tonic-gate 		}
941*0Sstevel@tonic-gate 		minhead = HEADSZ;
942*0Sstevel@tonic-gate 		break;
943*0Sstevel@tonic-gate 	default:
944*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
945*0Sstevel@tonic-gate 		return (1);
946*0Sstevel@tonic-gate 	}
947*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
948*0Sstevel@tonic-gate 	return (0);
949*0Sstevel@tonic-gate }
950*0Sstevel@tonic-gate 
951*0Sstevel@tonic-gate /*
952*0Sstevel@tonic-gate  * mallinfo-provide information about space usage
953*0Sstevel@tonic-gate  *
954*0Sstevel@tonic-gate  *	input - max; mallinfo will return the size of the
955*0Sstevel@tonic-gate  *		largest block < max.
956*0Sstevel@tonic-gate  *
957*0Sstevel@tonic-gate  *	output - a structure containing a description of
958*0Sstevel@tonic-gate  *		 of space usage, defined in malloc.h
959*0Sstevel@tonic-gate  */
960*0Sstevel@tonic-gate 
961*0Sstevel@tonic-gate struct mallinfo
962*0Sstevel@tonic-gate _mallinfo(void)
963*0Sstevel@tonic-gate {
964*0Sstevel@tonic-gate 	struct header *blk, *next;	/* ptr to ordinary blocks */
965*0Sstevel@tonic-gate 	struct holdblk *hblk;		/* ptr to holding blocks */
966*0Sstevel@tonic-gate 	struct mallinfo inf;		/* return value */
967*0Sstevel@tonic-gate 	int	i;			/* the ubiquitous counter */
968*0Sstevel@tonic-gate 	ssize_t size;			/* size of a block */
969*0Sstevel@tonic-gate 	ssize_t fsp;			/* free space in 1 hold block */
970*0Sstevel@tonic-gate 
971*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
972*0Sstevel@tonic-gate 	(void) memset(&inf, 0, sizeof (struct mallinfo));
973*0Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
974*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
975*0Sstevel@tonic-gate 		return (inf);
976*0Sstevel@tonic-gate 	}
977*0Sstevel@tonic-gate 	blk = CLRBUSY(arena[1].nextblk);
978*0Sstevel@tonic-gate 	/* return total space used */
979*0Sstevel@tonic-gate 	inf.arena = (char *)arenaend - (char *)blk;
980*0Sstevel@tonic-gate 
981*0Sstevel@tonic-gate 	/*
982*0Sstevel@tonic-gate 	 * loop through arena, counting # of blocks, and
983*0Sstevel@tonic-gate 	 * and space used by blocks
984*0Sstevel@tonic-gate 	 */
985*0Sstevel@tonic-gate 	next = CLRBUSY(blk->nextblk);
986*0Sstevel@tonic-gate 	while (next != &(arena[1])) {
987*0Sstevel@tonic-gate 		inf.ordblks++;
988*0Sstevel@tonic-gate 		size = (char *)next - (char *)blk;
989*0Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk)) {
990*0Sstevel@tonic-gate 			inf.uordblks += size;
991*0Sstevel@tonic-gate 			inf.keepcost += HEADSZ-MINHEAD;
992*0Sstevel@tonic-gate 		} else {
993*0Sstevel@tonic-gate 			inf.fordblks += size;
994*0Sstevel@tonic-gate 		}
995*0Sstevel@tonic-gate 		blk = next;
996*0Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
997*0Sstevel@tonic-gate 	}
998*0Sstevel@tonic-gate 
999*0Sstevel@tonic-gate 	/*
1000*0Sstevel@tonic-gate 	 * if any holding block have been allocated
1001*0Sstevel@tonic-gate 	 * then examine space in holding blks
1002*0Sstevel@tonic-gate 	 */
1003*0Sstevel@tonic-gate 	if (change && holdhead != NULL) {
1004*0Sstevel@tonic-gate 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
1005*0Sstevel@tonic-gate 			hblk = holdhead[i];
1006*0Sstevel@tonic-gate 			/* do only if chain not empty */
1007*0Sstevel@tonic-gate 			if (hblk != HGROUND) {
1008*0Sstevel@tonic-gate 				size = hblk->blksz +
1009*0Sstevel@tonic-gate 				    sizeof (struct lblk) - sizeof (int);
1010*0Sstevel@tonic-gate 				do {	/* loop thru 1 hold blk chain */
1011*0Sstevel@tonic-gate 					inf.hblks++;
1012*0Sstevel@tonic-gate 					fsp = freespace(hblk);
1013*0Sstevel@tonic-gate 					inf.fsmblks += fsp;
1014*0Sstevel@tonic-gate 					inf.usmblks += numlblks*size - fsp;
1015*0Sstevel@tonic-gate 					inf.smblks += numlblks;
1016*0Sstevel@tonic-gate 					hblk = hblk->nexthblk;
1017*0Sstevel@tonic-gate 				} while (hblk != holdhead[i]);
1018*0Sstevel@tonic-gate 			}
1019*0Sstevel@tonic-gate 		}
1020*0Sstevel@tonic-gate 	}
1021*0Sstevel@tonic-gate 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1022*0Sstevel@tonic-gate 	/* holding block were counted in ordblks, so subtract off */
1023*0Sstevel@tonic-gate 	inf.ordblks -= inf.hblks;
1024*0Sstevel@tonic-gate 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1025*0Sstevel@tonic-gate 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1026*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1027*0Sstevel@tonic-gate 	return (inf);
1028*0Sstevel@tonic-gate }
1029*0Sstevel@tonic-gate 
1030*0Sstevel@tonic-gate 
1031*0Sstevel@tonic-gate /*
1032*0Sstevel@tonic-gate  * freespace - calc. how much space is used in the free
1033*0Sstevel@tonic-gate  *		    small blocks in a given holding block
1034*0Sstevel@tonic-gate  *
1035*0Sstevel@tonic-gate  *	input - hblk = given holding block
1036*0Sstevel@tonic-gate  *
1037*0Sstevel@tonic-gate  *	returns space used in free small blocks of hblk
1038*0Sstevel@tonic-gate  */
1039*0Sstevel@tonic-gate 
1040*0Sstevel@tonic-gate static ssize_t
1041*0Sstevel@tonic-gate freespace(struct holdblk *holdblk)
1042*0Sstevel@tonic-gate {
1043*0Sstevel@tonic-gate 	struct lblk *lblk;
1044*0Sstevel@tonic-gate 	ssize_t space = 0;
1045*0Sstevel@tonic-gate 	ssize_t size;
1046*0Sstevel@tonic-gate 	struct lblk *unused;
1047*0Sstevel@tonic-gate 
1048*0Sstevel@tonic-gate 	lblk = CLRSMAL(holdblk->lfreeq);
1049*0Sstevel@tonic-gate 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1050*0Sstevel@tonic-gate 	unused = CLRSMAL(holdblk->unused);
1051*0Sstevel@tonic-gate 	/* follow free chain */
1052*0Sstevel@tonic-gate 	while ((lblk != LGROUND) && (lblk != unused)) {
1053*0Sstevel@tonic-gate 		space += size;
1054*0Sstevel@tonic-gate 		lblk = CLRSMAL(lblk->header.nextfree);
1055*0Sstevel@tonic-gate 	}
1056*0Sstevel@tonic-gate 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1057*0Sstevel@tonic-gate 	return (space);
1058*0Sstevel@tonic-gate }
1059*0Sstevel@tonic-gate 
1060*0Sstevel@tonic-gate static void *
1061*0Sstevel@tonic-gate morecore(size_t bytes)
1062*0Sstevel@tonic-gate {
1063*0Sstevel@tonic-gate 	void * ret;
1064*0Sstevel@tonic-gate 
1065*0Sstevel@tonic-gate 	if (bytes > LONG_MAX) {
1066*0Sstevel@tonic-gate 		intptr_t wad;
1067*0Sstevel@tonic-gate 		/*
1068*0Sstevel@tonic-gate 		 * The request size is too big. We need to do this in
1069*0Sstevel@tonic-gate 		 * chunks. Sbrk only takes an int for an arg.
1070*0Sstevel@tonic-gate 		 */
1071*0Sstevel@tonic-gate 		if (bytes == ULONG_MAX)
1072*0Sstevel@tonic-gate 			return ((void *)-1);
1073*0Sstevel@tonic-gate 
1074*0Sstevel@tonic-gate 		ret = sbrk(0);
1075*0Sstevel@tonic-gate 		wad = LONG_MAX;
1076*0Sstevel@tonic-gate 		while (wad > 0) {
1077*0Sstevel@tonic-gate 			if (sbrk(wad) == (void *)-1) {
1078*0Sstevel@tonic-gate 				if (ret != sbrk(0))
1079*0Sstevel@tonic-gate 					(void) sbrk(-LONG_MAX);
1080*0Sstevel@tonic-gate 				return ((void *)-1);
1081*0Sstevel@tonic-gate 			}
1082*0Sstevel@tonic-gate 			bytes -= LONG_MAX;
1083*0Sstevel@tonic-gate 			wad = bytes;
1084*0Sstevel@tonic-gate 		}
1085*0Sstevel@tonic-gate 	} else
1086*0Sstevel@tonic-gate 		ret = sbrk(bytes);
1087*0Sstevel@tonic-gate 
1088*0Sstevel@tonic-gate 	return (ret);
1089*0Sstevel@tonic-gate }
1090*0Sstevel@tonic-gate 
1091*0Sstevel@tonic-gate #ifdef debug
1092*0Sstevel@tonic-gate int
1093*0Sstevel@tonic-gate check_arena(void)
1094*0Sstevel@tonic-gate {
1095*0Sstevel@tonic-gate 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
1096*0Sstevel@tonic-gate 
1097*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1098*0Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
1099*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
1100*0Sstevel@tonic-gate 		return (-1);
1101*0Sstevel@tonic-gate 	}
1102*0Sstevel@tonic-gate 	blk = arena + 1;
1103*0Sstevel@tonic-gate 
1104*0Sstevel@tonic-gate 	/* loop through arena, checking */
1105*0Sstevel@tonic-gate 	blk = (struct header *)CLRALL(blk->nextblk);
1106*0Sstevel@tonic-gate 	next = (struct header *)CLRALL(blk->nextblk);
1107*0Sstevel@tonic-gate 	while (next != arena + 1) {
1108*0Sstevel@tonic-gate 		assert(blk >= arena + 1);
1109*0Sstevel@tonic-gate 		assert(blk <= lastblk);
1110*0Sstevel@tonic-gate 		assert(next >= blk + 1);
1111*0Sstevel@tonic-gate 		assert(((uintptr_t)((struct header *)blk->nextblk) &
1112*0Sstevel@tonic-gate 		    (4 | SMAL)) == 0);
1113*0Sstevel@tonic-gate 
1114*0Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk) == 0) {
1115*0Sstevel@tonic-gate 			assert(blk->nextfree >= freeptr);
1116*0Sstevel@tonic-gate 			assert(blk->prevfree >= freeptr);
1117*0Sstevel@tonic-gate 			assert(blk->nextfree <= lastblk);
1118*0Sstevel@tonic-gate 			assert(blk->prevfree <= lastblk);
1119*0Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->nextfree) &
1120*0Sstevel@tonic-gate 			    7) == 0);
1121*0Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->prevfree) &
1122*0Sstevel@tonic-gate 			    7) == 0 || blk->prevfree == freeptr);
1123*0Sstevel@tonic-gate 		}
1124*0Sstevel@tonic-gate 		blk = next;
1125*0Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
1126*0Sstevel@tonic-gate 	}
1127*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1128*0Sstevel@tonic-gate 	return (0);
1129*0Sstevel@tonic-gate }
1130*0Sstevel@tonic-gate 
1131*0Sstevel@tonic-gate #define	RSTALLOC	1
1132*0Sstevel@tonic-gate #endif
1133*0Sstevel@tonic-gate 
1134*0Sstevel@tonic-gate #ifdef RSTALLOC
1135*0Sstevel@tonic-gate /*
1136*0Sstevel@tonic-gate  * rstalloc - reset alloc routines
1137*0Sstevel@tonic-gate  *
1138*0Sstevel@tonic-gate  *	description -	return allocated memory and reset
1139*0Sstevel@tonic-gate  *			allocation pointers.
1140*0Sstevel@tonic-gate  *
1141*0Sstevel@tonic-gate  *	Warning - This is for debugging purposes only.
1142*0Sstevel@tonic-gate  *		  It will return all memory allocated after
1143*0Sstevel@tonic-gate  *		  the first call to malloc, even if some
1144*0Sstevel@tonic-gate  *		  of it was fetched by a user's sbrk().
1145*0Sstevel@tonic-gate  */
1146*0Sstevel@tonic-gate 
1147*0Sstevel@tonic-gate void
1148*0Sstevel@tonic-gate rstalloc(void)
1149*0Sstevel@tonic-gate {
1150*0Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1151*0Sstevel@tonic-gate 	minhead = MINHEAD;
1152*0Sstevel@tonic-gate 	grain = ALIGNSZ;
1153*0Sstevel@tonic-gate 	numlblks = NUMLBLKS;
1154*0Sstevel@tonic-gate 	fastct = FASTCT;
1155*0Sstevel@tonic-gate 	maxfast = MAXFAST;
1156*0Sstevel@tonic-gate 	change = 0;
1157*0Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
1158*0Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
1159*0Sstevel@tonic-gate 		return;
1160*0Sstevel@tonic-gate 	}
1161*0Sstevel@tonic-gate 	brk(CLRBUSY(arena[1].nextblk));
1162*0Sstevel@tonic-gate 	freeptr[0].nextfree = GROUND;
1163*0Sstevel@tonic-gate #ifdef debug
1164*0Sstevel@tonic-gate 	case1count = 0;
1165*0Sstevel@tonic-gate #endif
1166*0Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1167*0Sstevel@tonic-gate }
1168*0Sstevel@tonic-gate #endif	/* RSTALLOC */
1169*0Sstevel@tonic-gate 
1170*0Sstevel@tonic-gate /*
1171*0Sstevel@tonic-gate  * cfree is an undocumented, obsolete function
1172*0Sstevel@tonic-gate  */
1173*0Sstevel@tonic-gate 
1174*0Sstevel@tonic-gate /* ARGSUSED */
1175*0Sstevel@tonic-gate void
1176*0Sstevel@tonic-gate _cfree(char *p, unsigned num, unsigned size)
1177*0Sstevel@tonic-gate {
1178*0Sstevel@tonic-gate 	free(p);
1179*0Sstevel@tonic-gate }
1180