10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52658Sraf  * Common Development and Distribution License (the "License").
62658Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
212658Sraf 
22*3866Sraf /*
23*3866Sraf  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24*3866Sraf  * Use is subject to license terms.
25*3866Sraf  */
26*3866Sraf 
270Sstevel@tonic-gate /*	Copyright (c) 1988 AT&T	*/
280Sstevel@tonic-gate /*	  All Rights Reserved  	*/
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
310Sstevel@tonic-gate 
32*3866Sraf #include <c_synonyms.h>
330Sstevel@tonic-gate #include <sys/types.h>
340Sstevel@tonic-gate 
350Sstevel@tonic-gate #ifndef debug
360Sstevel@tonic-gate #define	NDEBUG
370Sstevel@tonic-gate #endif
380Sstevel@tonic-gate 
390Sstevel@tonic-gate #include <stdlib.h>
400Sstevel@tonic-gate #include <string.h>
410Sstevel@tonic-gate #include "assert.h"
420Sstevel@tonic-gate #include "malloc.h"
430Sstevel@tonic-gate #include "mallint.h"
440Sstevel@tonic-gate #include <thread.h>
45*3866Sraf #include <pthread.h>
460Sstevel@tonic-gate #include <synch.h>
470Sstevel@tonic-gate #include <unistd.h>
480Sstevel@tonic-gate #include <limits.h>
490Sstevel@tonic-gate 
500Sstevel@tonic-gate static mutex_t mlock = DEFAULTMUTEX;
510Sstevel@tonic-gate static ssize_t freespace(struct holdblk *);
520Sstevel@tonic-gate static void *malloc_unlocked(size_t, int);
530Sstevel@tonic-gate static void *realloc_unlocked(void *, size_t);
540Sstevel@tonic-gate static void free_unlocked(void *);
550Sstevel@tonic-gate static void *morecore(size_t);
560Sstevel@tonic-gate 
570Sstevel@tonic-gate /*
580Sstevel@tonic-gate  * use level memory allocater (malloc, free, realloc)
590Sstevel@tonic-gate  *
600Sstevel@tonic-gate  *	-malloc, free, realloc and mallopt form a memory allocator
610Sstevel@tonic-gate  *	similar to malloc, free, and realloc.  The routines
620Sstevel@tonic-gate  *	here are much faster than the original, with slightly worse
630Sstevel@tonic-gate  *	space usage (a few percent difference on most input).  They
640Sstevel@tonic-gate  *	do not have the property that data in freed blocks is left
650Sstevel@tonic-gate  *	untouched until the space is reallocated.
660Sstevel@tonic-gate  *
670Sstevel@tonic-gate  *	-Memory is kept in the "arena", a singly linked list of blocks.
680Sstevel@tonic-gate  *	These blocks are of 3 types.
690Sstevel@tonic-gate  *		1. A free block.  This is a block not in use by the
700Sstevel@tonic-gate  *		   user.  It has a 3 word header. (See description
710Sstevel@tonic-gate  *		   of the free queue.)
720Sstevel@tonic-gate  *		2. An allocated block.  This is a block the user has
730Sstevel@tonic-gate  *		   requested.  It has only a 1 word header, pointing
740Sstevel@tonic-gate  *		   to the next block of any sort.
750Sstevel@tonic-gate  *		3. A permanently allocated block.  This covers space
760Sstevel@tonic-gate  *		   aquired by the user directly through sbrk().  It
770Sstevel@tonic-gate  *		   has a 1 word header, as does 2.
780Sstevel@tonic-gate  *	Blocks of type 1 have the lower bit of the pointer to the
790Sstevel@tonic-gate  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
800Sstevel@tonic-gate  *	to mark them busy.
810Sstevel@tonic-gate  *
820Sstevel@tonic-gate  *	-Unallocated blocks are kept on an unsorted doubly linked
830Sstevel@tonic-gate  *	free list.
840Sstevel@tonic-gate  *
850Sstevel@tonic-gate  *	-Memory is allocated in blocks, with sizes specified by the
860Sstevel@tonic-gate  *	user.  A circular first-fit startegy is used, with a roving
870Sstevel@tonic-gate  *	head of the free queue, which prevents bunching of small
880Sstevel@tonic-gate  *	blocks at the head of the queue.
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  *	-Compaction is performed at free time of any blocks immediately
910Sstevel@tonic-gate  *	following the freed block.  The freed block will be combined
920Sstevel@tonic-gate  *	with a preceding block during the search phase of malloc.
930Sstevel@tonic-gate  *	Since a freed block is added at the front of the free queue,
940Sstevel@tonic-gate  *	which is moved to the end of the queue if considered and
950Sstevel@tonic-gate  *	rejected during the search, fragmentation only occurs if
960Sstevel@tonic-gate  *	a block with a contiguious preceding block that is free is
970Sstevel@tonic-gate  *	freed and reallocated on the next call to malloc.  The
980Sstevel@tonic-gate  *	time savings of this strategy is judged to be worth the
990Sstevel@tonic-gate  *	occasional waste of memory.
1000Sstevel@tonic-gate  *
1010Sstevel@tonic-gate  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
1020Sstevel@tonic-gate  *	A large "holding" block is allocated via a recursive call to
1030Sstevel@tonic-gate  *	malloc.  This block contains a header and ?????? small blocks.
1040Sstevel@tonic-gate  *	Holding blocks for a given size of small block (rounded to the
1050Sstevel@tonic-gate  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
1060Sstevel@tonic-gate  *	holding block with an unused small block is in front of any without.
1070Sstevel@tonic-gate  *	A list of free blocks is kept within the holding block.
1080Sstevel@tonic-gate  */
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate /*
1110Sstevel@tonic-gate  *	description of arena, free queue, holding blocks etc.
1120Sstevel@tonic-gate  *
1130Sstevel@tonic-gate  * New compiler and linker does not guarentee order of initialized data.
1140Sstevel@tonic-gate  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
1150Sstevel@tonic-gate  * Later code depends on this order.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate static struct header arena[4] = {
1190Sstevel@tonic-gate 	    {0, 0, 0},
1200Sstevel@tonic-gate 	    {0, 0, 0},
1210Sstevel@tonic-gate 	    {0, 0, 0},
1220Sstevel@tonic-gate 	    {0, 0, 0}
1230Sstevel@tonic-gate 	};
1240Sstevel@tonic-gate 				/*
1250Sstevel@tonic-gate 				 * the second word is a minimal block to
1260Sstevel@tonic-gate 				 * start the arena. The first is a busy
1270Sstevel@tonic-gate 				 * block to be pointed to by the last block.
1280Sstevel@tonic-gate 				 */
1290Sstevel@tonic-gate 
1300Sstevel@tonic-gate #define	freeptr (arena + 2)
1310Sstevel@tonic-gate 				/* first and last entry in free list */
1320Sstevel@tonic-gate static struct header *arenaend;	/* ptr to block marking high end of arena */
1330Sstevel@tonic-gate static struct header *lastblk;	/* the highest block in the arena */
1340Sstevel@tonic-gate static struct holdblk **holdhead;   /* pointer to array of head pointers */
1350Sstevel@tonic-gate 				    /* to holding block chains */
1360Sstevel@tonic-gate /*
1370Sstevel@tonic-gate  * In order to save time calculating indices, the array is 1 too
1380Sstevel@tonic-gate  * large, and the first element is unused
1390Sstevel@tonic-gate  *
1400Sstevel@tonic-gate  * Variables controlling algorithm, esp. how holding blocs are used
1410Sstevel@tonic-gate  */
1420Sstevel@tonic-gate static int numlblks = NUMLBLKS;
1430Sstevel@tonic-gate static int minhead = MINHEAD;
1440Sstevel@tonic-gate static int change = 0;	/* != 0, once param changes are no longer allowed */
1450Sstevel@tonic-gate static int fastct = FASTCT;
1460Sstevel@tonic-gate static unsigned int maxfast = MAXFAST;
1470Sstevel@tonic-gate /* number of small block sizes to map to one size */
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate static int grain = ALIGNSZ;
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate #ifdef debug
1520Sstevel@tonic-gate static int case1count = 0;
1530Sstevel@tonic-gate 
1540Sstevel@tonic-gate static void
1550Sstevel@tonic-gate checkq(void)
1560Sstevel@tonic-gate {
1570Sstevel@tonic-gate 	register struct header *p;
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	p = &freeptr[0];
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate 	/* check forward */
1620Sstevel@tonic-gate 	/*CSTYLED*/
1630Sstevel@tonic-gate 	while (p != &freeptr[1]) {
1640Sstevel@tonic-gate 		p = p->nextfree;
1650Sstevel@tonic-gate 		assert(p->prevfree->nextfree == p);
1660Sstevel@tonic-gate 	}
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate 	/* check backward */
1690Sstevel@tonic-gate 	/*CSTYLED*/
1700Sstevel@tonic-gate 	while (p != &freeptr[0]) {
1710Sstevel@tonic-gate 		p = p->prevfree;
1720Sstevel@tonic-gate 		assert(p->nextfree->prevfree == p);
1730Sstevel@tonic-gate 	}
1740Sstevel@tonic-gate }
1750Sstevel@tonic-gate #endif
1760Sstevel@tonic-gate 
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate /*
1790Sstevel@tonic-gate  * malloc(nbytes) - give a user nbytes to use
1800Sstevel@tonic-gate  */
1810Sstevel@tonic-gate 
1820Sstevel@tonic-gate void *
1830Sstevel@tonic-gate malloc(size_t nbytes)
1840Sstevel@tonic-gate {
1850Sstevel@tonic-gate 	void *ret;
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1880Sstevel@tonic-gate 	ret = malloc_unlocked(nbytes, 0);
1890Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1900Sstevel@tonic-gate 	return (ret);
1910Sstevel@tonic-gate }
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate /*
1940Sstevel@tonic-gate  * Use malloc_unlocked() to get the address to start with; Given this
1950Sstevel@tonic-gate  * address, find out the closest address that aligns with the request
1960Sstevel@tonic-gate  * and return that address after doing some house keeping (refer to the
1970Sstevel@tonic-gate  * ascii art below).
1980Sstevel@tonic-gate  */
1990Sstevel@tonic-gate void *
2002658Sraf memalign(size_t alignment, size_t size)
2010Sstevel@tonic-gate {
2020Sstevel@tonic-gate 	void *alloc_buf;
2030Sstevel@tonic-gate 	struct header *hd;
2040Sstevel@tonic-gate 	size_t alloc_size;
2050Sstevel@tonic-gate 	uintptr_t fr;
2060Sstevel@tonic-gate 	static int realloc;
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 	if (size == 0 || alignment == 0 ||
2090Sstevel@tonic-gate 		(alignment & (alignment - 1)) != 0) {
2100Sstevel@tonic-gate 		return (NULL);
2110Sstevel@tonic-gate 	}
2120Sstevel@tonic-gate 	if (alignment <= ALIGNSZ)
2130Sstevel@tonic-gate 		return (malloc(size));
2140Sstevel@tonic-gate 
2150Sstevel@tonic-gate 	alloc_size = size + alignment;
2160Sstevel@tonic-gate 	if (alloc_size < size) { /* overflow */
2170Sstevel@tonic-gate 		return (NULL);
2180Sstevel@tonic-gate 	}
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
2210Sstevel@tonic-gate 	alloc_buf = malloc_unlocked(alloc_size, 1);
2220Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 	if (alloc_buf == NULL)
2250Sstevel@tonic-gate 		return (NULL);
2260Sstevel@tonic-gate 	fr = (uintptr_t)alloc_buf;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	fr = (fr + alignment - 1) / alignment * alignment;
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	if (fr == (uintptr_t)alloc_buf)
2310Sstevel@tonic-gate 		return (alloc_buf);
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2340Sstevel@tonic-gate 		/*
2350Sstevel@tonic-gate 		 * we hit an edge case, where the space ahead of aligned
2360Sstevel@tonic-gate 		 * address is not sufficient to hold 'header' and hence we
2370Sstevel@tonic-gate 		 * can't free it. So double the allocation request.
2380Sstevel@tonic-gate 		 */
2390Sstevel@tonic-gate 		realloc++;
2400Sstevel@tonic-gate 		free(alloc_buf);
2410Sstevel@tonic-gate 		alloc_size = size + alignment*2;
2420Sstevel@tonic-gate 		if (alloc_size < size) {
2430Sstevel@tonic-gate 			return (NULL);
2440Sstevel@tonic-gate 		}
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate 		(void) mutex_lock(&mlock);
2470Sstevel@tonic-gate 		alloc_buf = malloc_unlocked(alloc_size, 1);
2480Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
2490Sstevel@tonic-gate 
2500Sstevel@tonic-gate 		if (alloc_buf == NULL)
2510Sstevel@tonic-gate 			return (NULL);
2520Sstevel@tonic-gate 		fr = (uintptr_t)alloc_buf;
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 		fr = (fr + alignment - 1) / alignment * alignment;
2550Sstevel@tonic-gate 		if (fr == (uintptr_t)alloc_buf)
2560Sstevel@tonic-gate 			return (alloc_buf);
2570Sstevel@tonic-gate 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2580Sstevel@tonic-gate 			fr = fr + alignment;
2590Sstevel@tonic-gate 		}
2600Sstevel@tonic-gate 	}
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate 	/*
2630Sstevel@tonic-gate 	 *	+-------+		+-------+
2640Sstevel@tonic-gate 	 *  +---| <a>   |		| <a>   |--+
2650Sstevel@tonic-gate 	 *  |   +-------+<--alloc_buf-->+-------+  |
2660Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2670Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2680Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2690Sstevel@tonic-gate 	 *  |   |	|	 hd-->  +-------+  |
2700Sstevel@tonic-gate 	 *  |   |	|	    +---|  <b>  |<-+
2710Sstevel@tonic-gate 	 *  |   |	|	    |   +-------+<--- fr
2720Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2730Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2740Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2750Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2760Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2770Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2780Sstevel@tonic-gate 	 *  |   +-------+	    |   +-------+
2790Sstevel@tonic-gate 	 *  +-->|  next |	    +-->|  next |
2800Sstevel@tonic-gate 	 *	+-------+		+-------+
2810Sstevel@tonic-gate 	 *
2820Sstevel@tonic-gate 	 */
2830Sstevel@tonic-gate 	hd = (struct header *)((char *)fr - minhead);
2840Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
2850Sstevel@tonic-gate 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
2860Sstevel@tonic-gate 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
2870Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
2880Sstevel@tonic-gate 	free(alloc_buf);
2890Sstevel@tonic-gate 	CHECKQ
2900Sstevel@tonic-gate 	return ((void *)fr);
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate void *
2942658Sraf valloc(size_t size)
2950Sstevel@tonic-gate {
2960Sstevel@tonic-gate 	static unsigned pagesize;
2970Sstevel@tonic-gate 	if (size == 0)
2980Sstevel@tonic-gate 		return (NULL);
2990Sstevel@tonic-gate 
3000Sstevel@tonic-gate 	if (!pagesize)
3010Sstevel@tonic-gate 		pagesize = sysconf(_SC_PAGESIZE);
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate 	return (memalign(pagesize, size));
3040Sstevel@tonic-gate }
3050Sstevel@tonic-gate 
3060Sstevel@tonic-gate /*
3070Sstevel@tonic-gate  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
3080Sstevel@tonic-gate  */
3090Sstevel@tonic-gate 
3100Sstevel@tonic-gate static void *
3110Sstevel@tonic-gate malloc_unlocked(size_t nbytes, int nosmall)
3120Sstevel@tonic-gate {
3130Sstevel@tonic-gate 	struct header *blk;
3140Sstevel@tonic-gate 	size_t nb;	/* size of entire block we need */
3150Sstevel@tonic-gate 
3160Sstevel@tonic-gate 	/* on first call, initialize */
3170Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
3180Sstevel@tonic-gate 		/* initialize arena */
3190Sstevel@tonic-gate 		arena[1].nextblk = (struct header *)BUSY;
3200Sstevel@tonic-gate 		arena[0].nextblk = (struct header *)BUSY;
3210Sstevel@tonic-gate 		lastblk = arenaend = &(arena[1]);
3220Sstevel@tonic-gate 		/* initialize free queue */
3230Sstevel@tonic-gate 		freeptr[0].nextfree = &(freeptr[1]);
3240Sstevel@tonic-gate 		freeptr[1].nextblk = &(arena[0]);
3250Sstevel@tonic-gate 		freeptr[1].prevfree = &(freeptr[0]);
3260Sstevel@tonic-gate 		/* mark that small blocks not init yet */
3270Sstevel@tonic-gate 	}
3280Sstevel@tonic-gate 	if (nbytes == 0)
3290Sstevel@tonic-gate 		return (NULL);
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate 	if (nbytes <= maxfast && !nosmall) {
3320Sstevel@tonic-gate 		/*
3330Sstevel@tonic-gate 		 * We can allocate out of a holding block
3340Sstevel@tonic-gate 		 */
3350Sstevel@tonic-gate 		struct holdblk *holdblk; /* head of right sized queue */
3360Sstevel@tonic-gate 		struct lblk *lblk;	 /* pointer to a little block */
3370Sstevel@tonic-gate 		struct holdblk *newhold;
3380Sstevel@tonic-gate 
3390Sstevel@tonic-gate 		if (!change) {
3400Sstevel@tonic-gate 			int i;
3410Sstevel@tonic-gate 			/*
3420Sstevel@tonic-gate 			 * This allocates space for hold block
3430Sstevel@tonic-gate 			 * pointers by calling malloc recursively.
3440Sstevel@tonic-gate 			 * Maxfast is temporarily set to 0, to
3450Sstevel@tonic-gate 			 * avoid infinite recursion.  allocate
3460Sstevel@tonic-gate 			 * space for an extra ptr so that an index
3470Sstevel@tonic-gate 			 * is just ->blksz/grain, with the first
3480Sstevel@tonic-gate 			 * ptr unused.
3490Sstevel@tonic-gate 			 */
3500Sstevel@tonic-gate 			change = 1;	/* change to algorithm params */
3510Sstevel@tonic-gate 					/* no longer allowed */
3520Sstevel@tonic-gate 			/*
3530Sstevel@tonic-gate 			 * temporarily alter maxfast, to avoid
3540Sstevel@tonic-gate 			 * infinite recursion
3550Sstevel@tonic-gate 			 */
3560Sstevel@tonic-gate 			maxfast = 0;
3570Sstevel@tonic-gate 			holdhead = (struct holdblk **)
3580Sstevel@tonic-gate 			    malloc_unlocked(sizeof (struct holdblk *) *
3590Sstevel@tonic-gate 			    (fastct + 1), 0);
3600Sstevel@tonic-gate 			if (holdhead == NULL)
3610Sstevel@tonic-gate 				return (malloc_unlocked(nbytes, 0));
3620Sstevel@tonic-gate 			for (i = 1; i <= fastct; i++) {
3630Sstevel@tonic-gate 				holdhead[i] = HGROUND;
3640Sstevel@tonic-gate 			}
3650Sstevel@tonic-gate 			maxfast = fastct * grain;
3660Sstevel@tonic-gate 		}
3670Sstevel@tonic-gate 		/*
3680Sstevel@tonic-gate 		 * Note that this uses the absolute min header size (MINHEAD)
3690Sstevel@tonic-gate 		 * unlike the large block case which uses minhead
3700Sstevel@tonic-gate 		 *
3710Sstevel@tonic-gate 		 * round up to nearest multiple of grain
3720Sstevel@tonic-gate 		 * code assumes grain is a multiple of MINHEAD
3730Sstevel@tonic-gate 		 */
3740Sstevel@tonic-gate 		/* round up to grain */
3750Sstevel@tonic-gate 		nb = (nbytes + grain - 1) / grain * grain;
3760Sstevel@tonic-gate 		holdblk = holdhead[nb / grain];
3770Sstevel@tonic-gate 		nb = nb + MINHEAD;
3780Sstevel@tonic-gate 		/*
3790Sstevel@tonic-gate 		 * look for space in the holding block.  Blocks with
3800Sstevel@tonic-gate 		 * space will be in front of those without
3810Sstevel@tonic-gate 		 */
3820Sstevel@tonic-gate 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
3830Sstevel@tonic-gate 			/* there is space */
3840Sstevel@tonic-gate 			lblk = holdblk->lfreeq;
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 			/*
3870Sstevel@tonic-gate 			 * Now make lfreeq point to a free block.
3880Sstevel@tonic-gate 			 * If lblk has been previously allocated and
3890Sstevel@tonic-gate 			 * freed, it has a valid pointer to use.
3900Sstevel@tonic-gate 			 * Otherwise, lblk is at the beginning of
3910Sstevel@tonic-gate 			 * the unallocated blocks at the end of
3920Sstevel@tonic-gate 			 * the holding block, so, if there is room, take
3930Sstevel@tonic-gate 			 * the next space.  If not, mark holdblk full,
3940Sstevel@tonic-gate 			 * and move holdblk to the end of the queue
3950Sstevel@tonic-gate 			 */
3960Sstevel@tonic-gate 			if (lblk < holdblk->unused) {
3970Sstevel@tonic-gate 				/* move to next holdblk, if this one full */
3980Sstevel@tonic-gate 				if ((holdblk->lfreeq =
3990Sstevel@tonic-gate 				    CLRSMAL(lblk->header.nextfree)) ==
4000Sstevel@tonic-gate 				    LGROUND) {
4010Sstevel@tonic-gate 					holdhead[(nb-MINHEAD) / grain] =
4020Sstevel@tonic-gate 					    holdblk->nexthblk;
4030Sstevel@tonic-gate 				}
4040Sstevel@tonic-gate 			} else if (((char *)holdblk->unused + nb) <
4050Sstevel@tonic-gate 			    ((char *)holdblk + HOLDSZ(nb))) {
4060Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
4070Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
4080Sstevel@tonic-gate 				holdblk->lfreeq = holdblk->unused;
4090Sstevel@tonic-gate 			} else {
4100Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
4110Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
4120Sstevel@tonic-gate 				holdblk->lfreeq = LGROUND;
4130Sstevel@tonic-gate 				holdhead[(nb-MINHEAD)/grain] =
4140Sstevel@tonic-gate 				    holdblk->nexthblk;
4150Sstevel@tonic-gate 			}
4160Sstevel@tonic-gate 			/* mark as busy and small */
4170Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
4180Sstevel@tonic-gate 		} else {
4190Sstevel@tonic-gate 			/* we need a new holding block */
4200Sstevel@tonic-gate 			newhold = (struct holdblk *)
4210Sstevel@tonic-gate 			    malloc_unlocked(HOLDSZ(nb), 0);
4220Sstevel@tonic-gate 			if ((char *)newhold == NULL) {
4230Sstevel@tonic-gate 				return (NULL);
4240Sstevel@tonic-gate 			}
4250Sstevel@tonic-gate 			/* add to head of free queue */
4260Sstevel@tonic-gate 			if (holdblk != HGROUND) {
4270Sstevel@tonic-gate 				newhold->nexthblk = holdblk;
4280Sstevel@tonic-gate 				newhold->prevhblk = holdblk->prevhblk;
4290Sstevel@tonic-gate 				holdblk->prevhblk = newhold;
4300Sstevel@tonic-gate 				newhold->prevhblk->nexthblk = newhold;
4310Sstevel@tonic-gate 			} else {
4320Sstevel@tonic-gate 				newhold->nexthblk = newhold->prevhblk = newhold;
4330Sstevel@tonic-gate 			}
4340Sstevel@tonic-gate 			holdhead[(nb-MINHEAD)/grain] = newhold;
4350Sstevel@tonic-gate 			/* set up newhold */
4360Sstevel@tonic-gate 			lblk = (struct lblk *)(newhold->space);
4370Sstevel@tonic-gate 			newhold->lfreeq = newhold->unused =
4380Sstevel@tonic-gate 			    (struct lblk *)((char *)newhold->space+nb);
4390Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
4400Sstevel@tonic-gate 			newhold->blksz = nb-MINHEAD;
4410Sstevel@tonic-gate 		}
4420Sstevel@tonic-gate #ifdef debug
4430Sstevel@tonic-gate 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
4440Sstevel@tonic-gate 		    nbytes);
4450Sstevel@tonic-gate #endif /* debug */
4460Sstevel@tonic-gate 		return ((char *)lblk + MINHEAD);
4470Sstevel@tonic-gate 	} else {
4480Sstevel@tonic-gate 		/*
4490Sstevel@tonic-gate 		 * We need an ordinary block
4500Sstevel@tonic-gate 		 */
4510Sstevel@tonic-gate 		struct header *newblk;	/* used for creating a block */
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 		/* get number of bytes we need */
4540Sstevel@tonic-gate 		nb = nbytes + minhead;
4550Sstevel@tonic-gate 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
4560Sstevel@tonic-gate 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
4570Sstevel@tonic-gate 		/*
4580Sstevel@tonic-gate 		 * see if there is a big enough block
4590Sstevel@tonic-gate 		 * If none exists, you will get to freeptr[1].
4600Sstevel@tonic-gate 		 * freeptr[1].next = &arena[0], so when you do the test,
4610Sstevel@tonic-gate 		 * the result is a large positive number, since arena[0]
4620Sstevel@tonic-gate 		 * comes before all blocks.  Arena[0] is marked busy so
4630Sstevel@tonic-gate 		 * that it will not be compacted.  This kludge is for the
4640Sstevel@tonic-gate 		 * sake of the almighty efficiency.
4650Sstevel@tonic-gate 		 */
4660Sstevel@tonic-gate 		/* check that a very large request won't cause an inf. loop */
4670Sstevel@tonic-gate 
4680Sstevel@tonic-gate 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
4690Sstevel@tonic-gate 			return (NULL);
4700Sstevel@tonic-gate 		} else {
4710Sstevel@tonic-gate 			struct header *next;		/* following block */
4720Sstevel@tonic-gate 			struct header *nextnext;	/* block after next */
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 			blk = freeptr;
4750Sstevel@tonic-gate 			do {
4760Sstevel@tonic-gate 				blk = blk->nextfree;
4770Sstevel@tonic-gate 				/* see if we can compact */
4780Sstevel@tonic-gate 				next = blk->nextblk;
4790Sstevel@tonic-gate 				if (!TESTBUSY(nextnext = next->nextblk)) {
4800Sstevel@tonic-gate 					do {
4810Sstevel@tonic-gate 						DELFREEQ(next);
4820Sstevel@tonic-gate 						next = nextnext;
4830Sstevel@tonic-gate 						nextnext = next->nextblk;
4840Sstevel@tonic-gate 					} while (!TESTBUSY(nextnext));
4850Sstevel@tonic-gate 					/*
4860Sstevel@tonic-gate 					 * next will be at most == to lastblk,
4870Sstevel@tonic-gate 					 * but I think the >= test is faster
4880Sstevel@tonic-gate 					 */
4890Sstevel@tonic-gate 					if (next >= arenaend)
4900Sstevel@tonic-gate 						lastblk = blk;
4910Sstevel@tonic-gate 					blk->nextblk = next;
4920Sstevel@tonic-gate 				}
4930Sstevel@tonic-gate 			} while (((char *)(next) - (char *)blk) < nb);
4940Sstevel@tonic-gate 		}
4950Sstevel@tonic-gate 		/*
4960Sstevel@tonic-gate 		 * if we didn't find a block, get more memory
4970Sstevel@tonic-gate 		 */
4980Sstevel@tonic-gate 		if (blk == &(freeptr[1])) {
4990Sstevel@tonic-gate 			/*
5000Sstevel@tonic-gate 			 * careful coding could likely replace
5010Sstevel@tonic-gate 			 * newend with arenaend
5020Sstevel@tonic-gate 			 */
5030Sstevel@tonic-gate 			struct header *newend;	/* new end of arena */
5040Sstevel@tonic-gate 			ssize_t nget;	/* number of words to get */
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate 			/*
5070Sstevel@tonic-gate 			 * Three cases - 1. There is space between arenaend
5080Sstevel@tonic-gate 			 *		    and the break value that will become
5090Sstevel@tonic-gate 			 *		    a permanently allocated block.
5100Sstevel@tonic-gate 			 *		 2. Case 1 is not true, and the last
5110Sstevel@tonic-gate 			 *		    block is allocated.
5120Sstevel@tonic-gate 			 *		 3. Case 1 is not true, and the last
5130Sstevel@tonic-gate 			 *		    block is free
5140Sstevel@tonic-gate 			 */
5150Sstevel@tonic-gate 			if ((newblk = (struct header *)sbrk(0)) !=
5160Sstevel@tonic-gate 			    (struct header *)((char *)arenaend + HEADSZ)) {
5170Sstevel@tonic-gate 				/* case 1 */
5180Sstevel@tonic-gate #ifdef debug
5190Sstevel@tonic-gate 				if (case1count++ > 0)
5200Sstevel@tonic-gate 				    (void) write(2, "Case 1 hit more that once."
5210Sstevel@tonic-gate 					" brk or sbrk?\n", 41);
5220Sstevel@tonic-gate #endif
5230Sstevel@tonic-gate 				/* get size to fetch */
5240Sstevel@tonic-gate 				nget = nb + HEADSZ;
5250Sstevel@tonic-gate 				/* round up to a block */
5260Sstevel@tonic-gate 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
5270Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
5280Sstevel@tonic-gate 				/* get memory */
5290Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5300Sstevel@tonic-gate 					return (NULL);
5310Sstevel@tonic-gate 				/* add to arena */
5320Sstevel@tonic-gate 				newend = (struct header *)((char *)newblk + nget
5330Sstevel@tonic-gate 				    - HEADSZ);
5340Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
5350Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5360Sstevel@tonic-gate /* ???  newblk ?? */
5370Sstevel@tonic-gate 				newblk->nextblk = newend;
5380Sstevel@tonic-gate 
5390Sstevel@tonic-gate 				/*
5400Sstevel@tonic-gate 				 * space becomes a permanently allocated block.
5410Sstevel@tonic-gate 				 * This is likely not mt-safe as lock is not
5420Sstevel@tonic-gate 				 * shared with brk or sbrk
5430Sstevel@tonic-gate 				 */
5440Sstevel@tonic-gate 				arenaend->nextblk = SETBUSY(newblk);
5450Sstevel@tonic-gate 				/* adjust other pointers */
5460Sstevel@tonic-gate 				arenaend = newend;
5470Sstevel@tonic-gate 				lastblk = newblk;
5480Sstevel@tonic-gate 				blk = newblk;
5490Sstevel@tonic-gate 			} else if (TESTBUSY(lastblk->nextblk)) {
5500Sstevel@tonic-gate 				/* case 2 */
5510Sstevel@tonic-gate 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
5520Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5530Sstevel@tonic-gate 					return (NULL);
5540Sstevel@tonic-gate 				/* block must be word aligned */
5550Sstevel@tonic-gate 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
5560Sstevel@tonic-gate 				/*
5570Sstevel@tonic-gate 				 * stub at old arenaend becomes first word
5580Sstevel@tonic-gate 				 * in blk
5590Sstevel@tonic-gate 				 */
5600Sstevel@tonic-gate /* ???  	newblk = arenaend; */
5610Sstevel@tonic-gate 
5620Sstevel@tonic-gate 				newend =
5630Sstevel@tonic-gate 				    (struct header *)((char *)arenaend+nget);
5640Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5650Sstevel@tonic-gate 				arenaend->nextblk = newend;
5660Sstevel@tonic-gate 				lastblk = blk = arenaend;
5670Sstevel@tonic-gate 				arenaend = newend;
5680Sstevel@tonic-gate 			} else {
5690Sstevel@tonic-gate 				/* case 3 */
5700Sstevel@tonic-gate 				/*
5710Sstevel@tonic-gate 				 * last block in arena is at end of memory and
5720Sstevel@tonic-gate 				 * is free
5730Sstevel@tonic-gate 				 */
5740Sstevel@tonic-gate 				/* 1.7 had this backward without cast */
5750Sstevel@tonic-gate 				nget = nb -
5760Sstevel@tonic-gate 				    ((char *)arenaend - (char *)lastblk);
5770Sstevel@tonic-gate 				nget = (nget + (BLOCKSZ - 1)) /
5780Sstevel@tonic-gate 				    BLOCKSZ * BLOCKSZ;
5790Sstevel@tonic-gate 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
5800Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5810Sstevel@tonic-gate 					return (NULL);
5820Sstevel@tonic-gate 				/* combine with last block, put in arena */
5830Sstevel@tonic-gate 				newend = (struct header *)
5840Sstevel@tonic-gate 				    ((char *)arenaend + nget);
5850Sstevel@tonic-gate 				arenaend = lastblk->nextblk = newend;
5860Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5870Sstevel@tonic-gate 				/* set which block to use */
5880Sstevel@tonic-gate 				blk = lastblk;
5890Sstevel@tonic-gate 				DELFREEQ(blk);
5900Sstevel@tonic-gate 			}
5910Sstevel@tonic-gate 		} else {
5920Sstevel@tonic-gate 			struct header *nblk;	/* next block */
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 			/* take block found of free queue */
5950Sstevel@tonic-gate 			DELFREEQ(blk);
5960Sstevel@tonic-gate 			/*
5970Sstevel@tonic-gate 			 * make head of free queue immediately follow blk,
5980Sstevel@tonic-gate 			 * unless blk was at the end of the queue
5990Sstevel@tonic-gate 			 */
6000Sstevel@tonic-gate 			nblk = blk->nextfree;
6010Sstevel@tonic-gate 			if (nblk != &(freeptr[1])) {
6020Sstevel@tonic-gate 				MOVEHEAD(nblk);
6030Sstevel@tonic-gate 			}
6040Sstevel@tonic-gate 		}
6050Sstevel@tonic-gate 		/* blk now points to an adequate block */
6060Sstevel@tonic-gate 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
6070Sstevel@tonic-gate 			/* carve out the right size block */
6080Sstevel@tonic-gate 			/* newblk will be the remainder */
6090Sstevel@tonic-gate 			newblk = (struct header *)((char *)blk + nb);
6100Sstevel@tonic-gate 			newblk->nextblk = blk->nextblk;
6110Sstevel@tonic-gate 			/* mark the block busy */
6120Sstevel@tonic-gate 			blk->nextblk = SETBUSY(newblk);
6130Sstevel@tonic-gate 			ADDFREEQ(newblk);
6140Sstevel@tonic-gate 			/* if blk was lastblk, make newblk lastblk */
6150Sstevel@tonic-gate 			if (blk == lastblk)
6160Sstevel@tonic-gate 				lastblk = newblk;
6170Sstevel@tonic-gate 		} else {
6180Sstevel@tonic-gate 			/* just mark the block busy */
6190Sstevel@tonic-gate 			blk->nextblk = SETBUSY(blk->nextblk);
6200Sstevel@tonic-gate 		}
6210Sstevel@tonic-gate 	}
6220Sstevel@tonic-gate 	CHECKQ
6230Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
6240Sstevel@tonic-gate 	    ((char *)blk + minhead) >= nbytes);
6250Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
6260Sstevel@tonic-gate 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
6270Sstevel@tonic-gate 	return ((char *)blk + minhead);
6280Sstevel@tonic-gate }
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate /*
6310Sstevel@tonic-gate  * free(ptr) - free block that user thinks starts at ptr
6320Sstevel@tonic-gate  *
6330Sstevel@tonic-gate  *	input - ptr-1 contains the block header.
6340Sstevel@tonic-gate  *		If the header points forward, we have a normal
6350Sstevel@tonic-gate  *			block pointing to the next block
6360Sstevel@tonic-gate  *		if the header points backward, we have a small
6370Sstevel@tonic-gate  *			block from a holding block.
6380Sstevel@tonic-gate  *		In both cases, the busy bit must be set
6390Sstevel@tonic-gate  */
6400Sstevel@tonic-gate 
6410Sstevel@tonic-gate void
6420Sstevel@tonic-gate free(void *ptr)
6430Sstevel@tonic-gate {
6440Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
6450Sstevel@tonic-gate 	free_unlocked(ptr);
6460Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
6470Sstevel@tonic-gate }
6480Sstevel@tonic-gate 
6490Sstevel@tonic-gate /*
6500Sstevel@tonic-gate  * free_unlocked(ptr) - Do the real work for free()
6510Sstevel@tonic-gate  */
6520Sstevel@tonic-gate 
6530Sstevel@tonic-gate void
6540Sstevel@tonic-gate free_unlocked(void *ptr)
6550Sstevel@tonic-gate {
6560Sstevel@tonic-gate 	struct holdblk *holdblk;	/* block holding blk */
6570Sstevel@tonic-gate 	struct holdblk *oldhead;	/* former head of the hold block */
6580Sstevel@tonic-gate 					/* queue containing blk's holder */
6590Sstevel@tonic-gate 
6600Sstevel@tonic-gate 	if (ptr == NULL)
6610Sstevel@tonic-gate 		return;
6620Sstevel@tonic-gate 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
6630Sstevel@tonic-gate 		struct lblk	*lblk;	/* pointer to freed block */
6640Sstevel@tonic-gate 		ssize_t		offset;	/* choice of header lists */
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
6670Sstevel@tonic-gate 		assert((struct header *)lblk < arenaend);
6680Sstevel@tonic-gate 		assert((struct header *)lblk > arena);
6690Sstevel@tonic-gate 		/* allow twits (e.g. awk) to free a block twice */
6700Sstevel@tonic-gate 		holdblk = lblk->header.holder;
6710Sstevel@tonic-gate 		if (!TESTBUSY(holdblk))
6720Sstevel@tonic-gate 			return;
6730Sstevel@tonic-gate 		holdblk = (struct holdblk *)CLRALL(holdblk);
6740Sstevel@tonic-gate 		/* put lblk on its hold block's free list */
6750Sstevel@tonic-gate 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
6760Sstevel@tonic-gate 		holdblk->lfreeq = lblk;
6770Sstevel@tonic-gate 		/* move holdblk to head of queue, if its not already there */
6780Sstevel@tonic-gate 		offset = holdblk->blksz / grain;
6790Sstevel@tonic-gate 		oldhead = holdhead[offset];
6800Sstevel@tonic-gate 		if (oldhead != holdblk) {
6810Sstevel@tonic-gate 			/* first take out of current spot */
6820Sstevel@tonic-gate 			holdhead[offset] = holdblk;
6830Sstevel@tonic-gate 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
6840Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
6850Sstevel@tonic-gate 			/* now add at front */
6860Sstevel@tonic-gate 			holdblk->nexthblk = oldhead;
6870Sstevel@tonic-gate 			holdblk->prevhblk = oldhead->prevhblk;
6880Sstevel@tonic-gate 			oldhead->prevhblk = holdblk;
6890Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk;
6900Sstevel@tonic-gate 		}
6910Sstevel@tonic-gate 	} else {
6920Sstevel@tonic-gate 		struct header *blk;	/* real start of block */
6930Sstevel@tonic-gate 		struct header *next;	/* next = blk->nextblk */
6940Sstevel@tonic-gate 		struct header *nextnext;	/* block after next */
6950Sstevel@tonic-gate 
6960Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
6970Sstevel@tonic-gate 		next = blk->nextblk;
6980Sstevel@tonic-gate 		/* take care of twits (e.g. awk) who return blocks twice */
6990Sstevel@tonic-gate 		if (!TESTBUSY(next))
7000Sstevel@tonic-gate 			return;
7010Sstevel@tonic-gate 		blk->nextblk = next = CLRBUSY(next);
7020Sstevel@tonic-gate 		ADDFREEQ(blk);
7030Sstevel@tonic-gate 		/* see if we can compact */
7040Sstevel@tonic-gate 		if (!TESTBUSY(nextnext = next->nextblk)) {
7050Sstevel@tonic-gate 			do {
7060Sstevel@tonic-gate 				DELFREEQ(next);
7070Sstevel@tonic-gate 				next = nextnext;
7080Sstevel@tonic-gate 			} while (!TESTBUSY(nextnext = next->nextblk));
7090Sstevel@tonic-gate 			if (next == arenaend) lastblk = blk;
7100Sstevel@tonic-gate 			blk->nextblk = next;
7110Sstevel@tonic-gate 		}
7120Sstevel@tonic-gate 	}
7130Sstevel@tonic-gate 	CHECKQ
7140Sstevel@tonic-gate }
7150Sstevel@tonic-gate 
7160Sstevel@tonic-gate 
7170Sstevel@tonic-gate /*
7180Sstevel@tonic-gate  * realloc(ptr, size) - give the user a block of size "size", with
7190Sstevel@tonic-gate  *			    the contents pointed to by ptr.  Free ptr.
7200Sstevel@tonic-gate  */
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate void *
7230Sstevel@tonic-gate realloc(void *ptr, size_t size)
7240Sstevel@tonic-gate {
7250Sstevel@tonic-gate 	void	*retval;
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
7280Sstevel@tonic-gate 	retval = realloc_unlocked(ptr, size);
7290Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
7300Sstevel@tonic-gate 	return (retval);
7310Sstevel@tonic-gate }
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 
7340Sstevel@tonic-gate /*
7350Sstevel@tonic-gate  * realloc_unlocked(ptr) - Do the real work for realloc()
7360Sstevel@tonic-gate  */
7370Sstevel@tonic-gate 
7380Sstevel@tonic-gate static void *
7390Sstevel@tonic-gate realloc_unlocked(void *ptr, size_t size)
7400Sstevel@tonic-gate {
7410Sstevel@tonic-gate 	struct header *blk;	/* block ptr is contained in */
7420Sstevel@tonic-gate 	size_t trusize;	/* block size as allocater sees it */
7430Sstevel@tonic-gate 	char *newptr;			/* pointer to user's new block */
7440Sstevel@tonic-gate 	size_t cpysize;	/* amount to copy */
7450Sstevel@tonic-gate 	struct header *next;	/* block after blk */
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate 	if (ptr == NULL)
7480Sstevel@tonic-gate 		return (malloc_unlocked(size, 0));
7490Sstevel@tonic-gate 
7500Sstevel@tonic-gate 	if (size == 0) {
7510Sstevel@tonic-gate 		free_unlocked(ptr);
7520Sstevel@tonic-gate 		return (NULL);
7530Sstevel@tonic-gate 	}
7540Sstevel@tonic-gate 
7550Sstevel@tonic-gate 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
7560Sstevel@tonic-gate 	    header.holder)) {
7570Sstevel@tonic-gate 		/*
7580Sstevel@tonic-gate 		 * we have a special small block which can't be expanded
7590Sstevel@tonic-gate 		 *
7600Sstevel@tonic-gate 		 * This makes the assumption that even if the user is
7610Sstevel@tonic-gate 		 * reallocating a free block, malloc doesn't alter the contents
7620Sstevel@tonic-gate 		 * of small blocks
7630Sstevel@tonic-gate 		 */
7640Sstevel@tonic-gate 		newptr = malloc_unlocked(size, 0);
7650Sstevel@tonic-gate 		if (newptr == NULL)
7660Sstevel@tonic-gate 			return (NULL);
7670Sstevel@tonic-gate 		/* this isn't to save time--its to protect the twits */
7680Sstevel@tonic-gate 		if ((char *)ptr != newptr) {
7690Sstevel@tonic-gate 			struct lblk *lblk;
7700Sstevel@tonic-gate 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
7710Sstevel@tonic-gate 			cpysize = ((struct holdblk *)
7720Sstevel@tonic-gate 			    CLRALL(lblk->header.holder))->blksz;
7730Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
7740Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
7750Sstevel@tonic-gate 			free_unlocked(ptr);
7760Sstevel@tonic-gate 		}
7770Sstevel@tonic-gate 	} else {
7780Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
7790Sstevel@tonic-gate 		next = blk->nextblk;
7800Sstevel@tonic-gate 		/*
7810Sstevel@tonic-gate 		 * deal with twits who reallocate free blocks
7820Sstevel@tonic-gate 		 *
7830Sstevel@tonic-gate 		 * if they haven't reset minblk via getopt, that's
7840Sstevel@tonic-gate 		 * their problem
7850Sstevel@tonic-gate 		 */
7860Sstevel@tonic-gate 		if (!TESTBUSY(next)) {
7870Sstevel@tonic-gate 			DELFREEQ(blk);
7880Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
7890Sstevel@tonic-gate 		}
7900Sstevel@tonic-gate 		next = CLRBUSY(next);
7910Sstevel@tonic-gate 		/* make blk as big as possible */
7920Sstevel@tonic-gate 		if (!TESTBUSY(next->nextblk)) {
7930Sstevel@tonic-gate 			do {
7940Sstevel@tonic-gate 				DELFREEQ(next);
7950Sstevel@tonic-gate 				next = next->nextblk;
7960Sstevel@tonic-gate 			} while (!TESTBUSY(next->nextblk));
7970Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
7980Sstevel@tonic-gate 			if (next >= arenaend) lastblk = blk;
7990Sstevel@tonic-gate 		}
8000Sstevel@tonic-gate 		/* get size we really need */
8010Sstevel@tonic-gate 		trusize = size+minhead;
8020Sstevel@tonic-gate 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
8030Sstevel@tonic-gate 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
8040Sstevel@tonic-gate 		/* see if we have enough */
8050Sstevel@tonic-gate 		/* this isn't really the copy size, but I need a register */
8060Sstevel@tonic-gate 		cpysize = (char *)next - (char *)blk;
8070Sstevel@tonic-gate 		if (cpysize >= trusize) {
8080Sstevel@tonic-gate 			/* carve out the size we need */
8090Sstevel@tonic-gate 			struct header *newblk;	/* remainder */
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 			if (cpysize - trusize >= MINBLKSZ) {
8120Sstevel@tonic-gate 				/*
8130Sstevel@tonic-gate 				 * carve out the right size block
8140Sstevel@tonic-gate 				 * newblk will be the remainder
8150Sstevel@tonic-gate 				 */
8160Sstevel@tonic-gate 				newblk = (struct header *)((char *)blk +
8170Sstevel@tonic-gate 				    trusize);
8180Sstevel@tonic-gate 				newblk->nextblk = next;
8190Sstevel@tonic-gate 				blk->nextblk = SETBUSY(newblk);
8200Sstevel@tonic-gate 				/* at this point, next is invalid */
8210Sstevel@tonic-gate 				ADDFREEQ(newblk);
8220Sstevel@tonic-gate 				/* if blk was lastblk, make newblk lastblk */
8230Sstevel@tonic-gate 				if (blk == lastblk)
8240Sstevel@tonic-gate 					lastblk = newblk;
8250Sstevel@tonic-gate 			}
8260Sstevel@tonic-gate 			newptr = ptr;
8270Sstevel@tonic-gate 		} else {
8280Sstevel@tonic-gate 			/* bite the bullet, and call malloc */
8290Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
8300Sstevel@tonic-gate 			newptr = malloc_unlocked(size, 0);
8310Sstevel@tonic-gate 			if (newptr == NULL)
8320Sstevel@tonic-gate 				return (NULL);
8330Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
8340Sstevel@tonic-gate 			free_unlocked(ptr);
8350Sstevel@tonic-gate 		}
8360Sstevel@tonic-gate 	}
8370Sstevel@tonic-gate 	return (newptr);
8380Sstevel@tonic-gate }
8390Sstevel@tonic-gate 
8400Sstevel@tonic-gate 
8410Sstevel@tonic-gate /* LINTLIBRARY */
8420Sstevel@tonic-gate /*
8430Sstevel@tonic-gate  * calloc - allocate and clear memory block
8440Sstevel@tonic-gate  */
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate void *
8470Sstevel@tonic-gate calloc(size_t num, size_t size)
8480Sstevel@tonic-gate {
8490Sstevel@tonic-gate 	char *mp;
8500Sstevel@tonic-gate 
8510Sstevel@tonic-gate 	num *= size;
8520Sstevel@tonic-gate 	mp = malloc(num);
8530Sstevel@tonic-gate 	if (mp == NULL)
8540Sstevel@tonic-gate 		return (NULL);
8550Sstevel@tonic-gate 	(void) memset(mp, 0, num);
8560Sstevel@tonic-gate 	return (mp);
8570Sstevel@tonic-gate }
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate /*
8610Sstevel@tonic-gate  * Mallopt - set options for allocation
8620Sstevel@tonic-gate  *
8630Sstevel@tonic-gate  *	Mallopt provides for control over the allocation algorithm.
8640Sstevel@tonic-gate  *	The cmds available are:
8650Sstevel@tonic-gate  *
8660Sstevel@tonic-gate  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
8670Sstevel@tonic-gate  *		 largest small, quickly allocated block.  Maxfast
8680Sstevel@tonic-gate  *		 may be set to 0 to disable fast allocation entirely.
8690Sstevel@tonic-gate  *
8700Sstevel@tonic-gate  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
8710Sstevel@tonic-gate  *		 small blocks per holding block.  Value must be
8720Sstevel@tonic-gate  *		 greater than 0.
8730Sstevel@tonic-gate  *
8740Sstevel@tonic-gate  *	M_GRAIN  Set grain to value.  The sizes of all blocks
8750Sstevel@tonic-gate  *		 smaller than maxfast are considered to be rounded
8760Sstevel@tonic-gate  *		 up to the nearest multiple of grain. The default
8770Sstevel@tonic-gate  *		 value of grain is the smallest number of bytes
8780Sstevel@tonic-gate  *		 which will allow alignment of any data type.    Grain
8790Sstevel@tonic-gate  *		 will be rounded up to a multiple of its default,
8800Sstevel@tonic-gate  *		 and maxsize will be rounded up to a multiple of
8810Sstevel@tonic-gate  *		 grain.  Value must be greater than 0.
8820Sstevel@tonic-gate  *
8830Sstevel@tonic-gate  *	M_KEEP   Retain data in freed block until the next malloc,
8840Sstevel@tonic-gate  *		 realloc, or calloc.  Value is ignored.
8850Sstevel@tonic-gate  *		 This option is provided only for compatibility with
8860Sstevel@tonic-gate  *		 the old version of malloc, and is not recommended.
8870Sstevel@tonic-gate  *
8880Sstevel@tonic-gate  *	returns - 0, upon successful completion
8890Sstevel@tonic-gate  *		 1, if malloc has previously been called or
8900Sstevel@tonic-gate  *		    if value or cmd have illegal values
8910Sstevel@tonic-gate  */
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate int
8942658Sraf mallopt(int cmd, int value)
8950Sstevel@tonic-gate {
8960Sstevel@tonic-gate 	/* disallow changes once a small block is allocated */
8970Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
8980Sstevel@tonic-gate 	if (change) {
8990Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
9000Sstevel@tonic-gate 		return (1);
9010Sstevel@tonic-gate 	}
9020Sstevel@tonic-gate 	switch (cmd) {
9030Sstevel@tonic-gate 	case M_MXFAST:
9040Sstevel@tonic-gate 		if (value < 0) {
9050Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9060Sstevel@tonic-gate 			return (1);
9070Sstevel@tonic-gate 		}
9080Sstevel@tonic-gate 		fastct = (value + grain - 1) / grain;
9090Sstevel@tonic-gate 		maxfast = grain*fastct;
9100Sstevel@tonic-gate 		break;
9110Sstevel@tonic-gate 	case M_NLBLKS:
9120Sstevel@tonic-gate 		if (value <= 1) {
9130Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9140Sstevel@tonic-gate 			return (1);
9150Sstevel@tonic-gate 		}
9160Sstevel@tonic-gate 		numlblks = value;
9170Sstevel@tonic-gate 		break;
9180Sstevel@tonic-gate 	case M_GRAIN:
9190Sstevel@tonic-gate 		if (value <= 0) {
9200Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9210Sstevel@tonic-gate 			return (1);
9220Sstevel@tonic-gate 		}
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 		/* round grain up to a multiple of ALIGNSZ */
9250Sstevel@tonic-gate 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 		/* reduce fastct appropriately */
9280Sstevel@tonic-gate 		fastct = (maxfast + grain - 1) / grain;
9290Sstevel@tonic-gate 		maxfast = grain * fastct;
9300Sstevel@tonic-gate 		break;
9310Sstevel@tonic-gate 	case M_KEEP:
9320Sstevel@tonic-gate 		if (change && holdhead != NULL) {
9330Sstevel@tonic-gate 			mutex_unlock(&mlock);
9340Sstevel@tonic-gate 			return (1);
9350Sstevel@tonic-gate 		}
9360Sstevel@tonic-gate 		minhead = HEADSZ;
9370Sstevel@tonic-gate 		break;
9380Sstevel@tonic-gate 	default:
9390Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
9400Sstevel@tonic-gate 		return (1);
9410Sstevel@tonic-gate 	}
9420Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
9430Sstevel@tonic-gate 	return (0);
9440Sstevel@tonic-gate }
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate /*
9470Sstevel@tonic-gate  * mallinfo-provide information about space usage
9480Sstevel@tonic-gate  *
9490Sstevel@tonic-gate  *	input - max; mallinfo will return the size of the
9500Sstevel@tonic-gate  *		largest block < max.
9510Sstevel@tonic-gate  *
9520Sstevel@tonic-gate  *	output - a structure containing a description of
9530Sstevel@tonic-gate  *		 of space usage, defined in malloc.h
9540Sstevel@tonic-gate  */
9550Sstevel@tonic-gate 
9560Sstevel@tonic-gate struct mallinfo
9572658Sraf mallinfo(void)
9580Sstevel@tonic-gate {
9590Sstevel@tonic-gate 	struct header *blk, *next;	/* ptr to ordinary blocks */
9600Sstevel@tonic-gate 	struct holdblk *hblk;		/* ptr to holding blocks */
9610Sstevel@tonic-gate 	struct mallinfo inf;		/* return value */
9620Sstevel@tonic-gate 	int	i;			/* the ubiquitous counter */
9630Sstevel@tonic-gate 	ssize_t size;			/* size of a block */
9640Sstevel@tonic-gate 	ssize_t fsp;			/* free space in 1 hold block */
9650Sstevel@tonic-gate 
9660Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
9670Sstevel@tonic-gate 	(void) memset(&inf, 0, sizeof (struct mallinfo));
9680Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
9690Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
9700Sstevel@tonic-gate 		return (inf);
9710Sstevel@tonic-gate 	}
9720Sstevel@tonic-gate 	blk = CLRBUSY(arena[1].nextblk);
9730Sstevel@tonic-gate 	/* return total space used */
9740Sstevel@tonic-gate 	inf.arena = (char *)arenaend - (char *)blk;
9750Sstevel@tonic-gate 
9760Sstevel@tonic-gate 	/*
9770Sstevel@tonic-gate 	 * loop through arena, counting # of blocks, and
9780Sstevel@tonic-gate 	 * and space used by blocks
9790Sstevel@tonic-gate 	 */
9800Sstevel@tonic-gate 	next = CLRBUSY(blk->nextblk);
9810Sstevel@tonic-gate 	while (next != &(arena[1])) {
9820Sstevel@tonic-gate 		inf.ordblks++;
9830Sstevel@tonic-gate 		size = (char *)next - (char *)blk;
9840Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk)) {
9850Sstevel@tonic-gate 			inf.uordblks += size;
9860Sstevel@tonic-gate 			inf.keepcost += HEADSZ-MINHEAD;
9870Sstevel@tonic-gate 		} else {
9880Sstevel@tonic-gate 			inf.fordblks += size;
9890Sstevel@tonic-gate 		}
9900Sstevel@tonic-gate 		blk = next;
9910Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
9920Sstevel@tonic-gate 	}
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate 	/*
9950Sstevel@tonic-gate 	 * if any holding block have been allocated
9960Sstevel@tonic-gate 	 * then examine space in holding blks
9970Sstevel@tonic-gate 	 */
9980Sstevel@tonic-gate 	if (change && holdhead != NULL) {
9990Sstevel@tonic-gate 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
10000Sstevel@tonic-gate 			hblk = holdhead[i];
10010Sstevel@tonic-gate 			/* do only if chain not empty */
10020Sstevel@tonic-gate 			if (hblk != HGROUND) {
10030Sstevel@tonic-gate 				size = hblk->blksz +
10040Sstevel@tonic-gate 				    sizeof (struct lblk) - sizeof (int);
10050Sstevel@tonic-gate 				do {	/* loop thru 1 hold blk chain */
10060Sstevel@tonic-gate 					inf.hblks++;
10070Sstevel@tonic-gate 					fsp = freespace(hblk);
10080Sstevel@tonic-gate 					inf.fsmblks += fsp;
10090Sstevel@tonic-gate 					inf.usmblks += numlblks*size - fsp;
10100Sstevel@tonic-gate 					inf.smblks += numlblks;
10110Sstevel@tonic-gate 					hblk = hblk->nexthblk;
10120Sstevel@tonic-gate 				} while (hblk != holdhead[i]);
10130Sstevel@tonic-gate 			}
10140Sstevel@tonic-gate 		}
10150Sstevel@tonic-gate 	}
10160Sstevel@tonic-gate 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
10170Sstevel@tonic-gate 	/* holding block were counted in ordblks, so subtract off */
10180Sstevel@tonic-gate 	inf.ordblks -= inf.hblks;
10190Sstevel@tonic-gate 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
10200Sstevel@tonic-gate 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
10210Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
10220Sstevel@tonic-gate 	return (inf);
10230Sstevel@tonic-gate }
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate /*
10270Sstevel@tonic-gate  * freespace - calc. how much space is used in the free
10280Sstevel@tonic-gate  *		    small blocks in a given holding block
10290Sstevel@tonic-gate  *
10300Sstevel@tonic-gate  *	input - hblk = given holding block
10310Sstevel@tonic-gate  *
10320Sstevel@tonic-gate  *	returns space used in free small blocks of hblk
10330Sstevel@tonic-gate  */
10340Sstevel@tonic-gate 
10350Sstevel@tonic-gate static ssize_t
10360Sstevel@tonic-gate freespace(struct holdblk *holdblk)
10370Sstevel@tonic-gate {
10380Sstevel@tonic-gate 	struct lblk *lblk;
10390Sstevel@tonic-gate 	ssize_t space = 0;
10400Sstevel@tonic-gate 	ssize_t size;
10410Sstevel@tonic-gate 	struct lblk *unused;
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate 	lblk = CLRSMAL(holdblk->lfreeq);
10440Sstevel@tonic-gate 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
10450Sstevel@tonic-gate 	unused = CLRSMAL(holdblk->unused);
10460Sstevel@tonic-gate 	/* follow free chain */
10470Sstevel@tonic-gate 	while ((lblk != LGROUND) && (lblk != unused)) {
10480Sstevel@tonic-gate 		space += size;
10490Sstevel@tonic-gate 		lblk = CLRSMAL(lblk->header.nextfree);
10500Sstevel@tonic-gate 	}
10510Sstevel@tonic-gate 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
10520Sstevel@tonic-gate 	return (space);
10530Sstevel@tonic-gate }
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate static void *
10560Sstevel@tonic-gate morecore(size_t bytes)
10570Sstevel@tonic-gate {
10580Sstevel@tonic-gate 	void * ret;
10590Sstevel@tonic-gate 
10600Sstevel@tonic-gate 	if (bytes > LONG_MAX) {
10610Sstevel@tonic-gate 		intptr_t wad;
10620Sstevel@tonic-gate 		/*
10630Sstevel@tonic-gate 		 * The request size is too big. We need to do this in
10640Sstevel@tonic-gate 		 * chunks. Sbrk only takes an int for an arg.
10650Sstevel@tonic-gate 		 */
10660Sstevel@tonic-gate 		if (bytes == ULONG_MAX)
10670Sstevel@tonic-gate 			return ((void *)-1);
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 		ret = sbrk(0);
10700Sstevel@tonic-gate 		wad = LONG_MAX;
10710Sstevel@tonic-gate 		while (wad > 0) {
10720Sstevel@tonic-gate 			if (sbrk(wad) == (void *)-1) {
10730Sstevel@tonic-gate 				if (ret != sbrk(0))
10740Sstevel@tonic-gate 					(void) sbrk(-LONG_MAX);
10750Sstevel@tonic-gate 				return ((void *)-1);
10760Sstevel@tonic-gate 			}
10770Sstevel@tonic-gate 			bytes -= LONG_MAX;
10780Sstevel@tonic-gate 			wad = bytes;
10790Sstevel@tonic-gate 		}
10800Sstevel@tonic-gate 	} else
10810Sstevel@tonic-gate 		ret = sbrk(bytes);
10820Sstevel@tonic-gate 
10830Sstevel@tonic-gate 	return (ret);
10840Sstevel@tonic-gate }
10850Sstevel@tonic-gate 
10860Sstevel@tonic-gate #ifdef debug
10870Sstevel@tonic-gate int
10880Sstevel@tonic-gate check_arena(void)
10890Sstevel@tonic-gate {
10900Sstevel@tonic-gate 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
10930Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
10940Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
10950Sstevel@tonic-gate 		return (-1);
10960Sstevel@tonic-gate 	}
10970Sstevel@tonic-gate 	blk = arena + 1;
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	/* loop through arena, checking */
11000Sstevel@tonic-gate 	blk = (struct header *)CLRALL(blk->nextblk);
11010Sstevel@tonic-gate 	next = (struct header *)CLRALL(blk->nextblk);
11020Sstevel@tonic-gate 	while (next != arena + 1) {
11030Sstevel@tonic-gate 		assert(blk >= arena + 1);
11040Sstevel@tonic-gate 		assert(blk <= lastblk);
11050Sstevel@tonic-gate 		assert(next >= blk + 1);
11060Sstevel@tonic-gate 		assert(((uintptr_t)((struct header *)blk->nextblk) &
11070Sstevel@tonic-gate 		    (4 | SMAL)) == 0);
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk) == 0) {
11100Sstevel@tonic-gate 			assert(blk->nextfree >= freeptr);
11110Sstevel@tonic-gate 			assert(blk->prevfree >= freeptr);
11120Sstevel@tonic-gate 			assert(blk->nextfree <= lastblk);
11130Sstevel@tonic-gate 			assert(blk->prevfree <= lastblk);
11140Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->nextfree) &
11150Sstevel@tonic-gate 			    7) == 0);
11160Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->prevfree) &
11170Sstevel@tonic-gate 			    7) == 0 || blk->prevfree == freeptr);
11180Sstevel@tonic-gate 		}
11190Sstevel@tonic-gate 		blk = next;
11200Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
11210Sstevel@tonic-gate 	}
11220Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
11230Sstevel@tonic-gate 	return (0);
11240Sstevel@tonic-gate }
11250Sstevel@tonic-gate 
11260Sstevel@tonic-gate #define	RSTALLOC	1
11270Sstevel@tonic-gate #endif
11280Sstevel@tonic-gate 
11290Sstevel@tonic-gate #ifdef RSTALLOC
11300Sstevel@tonic-gate /*
11310Sstevel@tonic-gate  * rstalloc - reset alloc routines
11320Sstevel@tonic-gate  *
11330Sstevel@tonic-gate  *	description -	return allocated memory and reset
11340Sstevel@tonic-gate  *			allocation pointers.
11350Sstevel@tonic-gate  *
11360Sstevel@tonic-gate  *	Warning - This is for debugging purposes only.
11370Sstevel@tonic-gate  *		  It will return all memory allocated after
11380Sstevel@tonic-gate  *		  the first call to malloc, even if some
11390Sstevel@tonic-gate  *		  of it was fetched by a user's sbrk().
11400Sstevel@tonic-gate  */
11410Sstevel@tonic-gate 
11420Sstevel@tonic-gate void
11430Sstevel@tonic-gate rstalloc(void)
11440Sstevel@tonic-gate {
11450Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
11460Sstevel@tonic-gate 	minhead = MINHEAD;
11470Sstevel@tonic-gate 	grain = ALIGNSZ;
11480Sstevel@tonic-gate 	numlblks = NUMLBLKS;
11490Sstevel@tonic-gate 	fastct = FASTCT;
11500Sstevel@tonic-gate 	maxfast = MAXFAST;
11510Sstevel@tonic-gate 	change = 0;
11520Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
11530Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
11540Sstevel@tonic-gate 		return;
11550Sstevel@tonic-gate 	}
11560Sstevel@tonic-gate 	brk(CLRBUSY(arena[1].nextblk));
11570Sstevel@tonic-gate 	freeptr[0].nextfree = GROUND;
11580Sstevel@tonic-gate #ifdef debug
11590Sstevel@tonic-gate 	case1count = 0;
11600Sstevel@tonic-gate #endif
11610Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
11620Sstevel@tonic-gate }
11630Sstevel@tonic-gate #endif	/* RSTALLOC */
11640Sstevel@tonic-gate 
11650Sstevel@tonic-gate /*
11660Sstevel@tonic-gate  * cfree is an undocumented, obsolete function
11670Sstevel@tonic-gate  */
11680Sstevel@tonic-gate 
11692658Sraf /* ARGSUSED1 */
11700Sstevel@tonic-gate void
11712658Sraf cfree(void *p, size_t num, size_t size)
11720Sstevel@tonic-gate {
11730Sstevel@tonic-gate 	free(p);
11740Sstevel@tonic-gate }
1175*3866Sraf 
1176*3866Sraf static void
1177*3866Sraf malloc_prepare()
1178*3866Sraf {
1179*3866Sraf 	(void) mutex_lock(&mlock);
1180*3866Sraf }
1181*3866Sraf 
1182*3866Sraf static void
1183*3866Sraf malloc_release()
1184*3866Sraf {
1185*3866Sraf 	(void) mutex_unlock(&mlock);
1186*3866Sraf }
1187*3866Sraf 
1188*3866Sraf #pragma init(malloc_init)
1189*3866Sraf static void
1190*3866Sraf malloc_init(void)
1191*3866Sraf {
1192*3866Sraf 	(void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
1193*3866Sraf }
1194