10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*2658Sraf  * Common Development and Distribution License (the "License").
6*2658Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*2658Sraf 
220Sstevel@tonic-gate /*	Copyright (c) 1988 AT&T	*/
230Sstevel@tonic-gate /*	  All Rights Reserved  	*/
240Sstevel@tonic-gate 
250Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*
28*2658Sraf  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
290Sstevel@tonic-gate  * Use is subject to license terms.
300Sstevel@tonic-gate  */
310Sstevel@tonic-gate 
320Sstevel@tonic-gate #include <sys/types.h>
330Sstevel@tonic-gate 
340Sstevel@tonic-gate #ifndef debug
350Sstevel@tonic-gate #define	NDEBUG
360Sstevel@tonic-gate #endif
370Sstevel@tonic-gate 
380Sstevel@tonic-gate #include <stdlib.h>
390Sstevel@tonic-gate #include <string.h>
400Sstevel@tonic-gate #include "assert.h"
410Sstevel@tonic-gate #include "malloc.h"
420Sstevel@tonic-gate #include "mallint.h"
430Sstevel@tonic-gate #include <thread.h>
440Sstevel@tonic-gate #include <synch.h>
450Sstevel@tonic-gate #include <unistd.h>
460Sstevel@tonic-gate #include <limits.h>
470Sstevel@tonic-gate 
480Sstevel@tonic-gate static mutex_t mlock = DEFAULTMUTEX;
490Sstevel@tonic-gate static ssize_t freespace(struct holdblk *);
500Sstevel@tonic-gate static void *malloc_unlocked(size_t, int);
510Sstevel@tonic-gate static void *realloc_unlocked(void *, size_t);
520Sstevel@tonic-gate static void free_unlocked(void *);
530Sstevel@tonic-gate static void *morecore(size_t);
540Sstevel@tonic-gate 
550Sstevel@tonic-gate /*
560Sstevel@tonic-gate  * use level memory allocater (malloc, free, realloc)
570Sstevel@tonic-gate  *
580Sstevel@tonic-gate  *	-malloc, free, realloc and mallopt form a memory allocator
590Sstevel@tonic-gate  *	similar to malloc, free, and realloc.  The routines
600Sstevel@tonic-gate  *	here are much faster than the original, with slightly worse
610Sstevel@tonic-gate  *	space usage (a few percent difference on most input).  They
620Sstevel@tonic-gate  *	do not have the property that data in freed blocks is left
630Sstevel@tonic-gate  *	untouched until the space is reallocated.
640Sstevel@tonic-gate  *
650Sstevel@tonic-gate  *	-Memory is kept in the "arena", a singly linked list of blocks.
660Sstevel@tonic-gate  *	These blocks are of 3 types.
670Sstevel@tonic-gate  *		1. A free block.  This is a block not in use by the
680Sstevel@tonic-gate  *		   user.  It has a 3 word header. (See description
690Sstevel@tonic-gate  *		   of the free queue.)
700Sstevel@tonic-gate  *		2. An allocated block.  This is a block the user has
710Sstevel@tonic-gate  *		   requested.  It has only a 1 word header, pointing
720Sstevel@tonic-gate  *		   to the next block of any sort.
730Sstevel@tonic-gate  *		3. A permanently allocated block.  This covers space
740Sstevel@tonic-gate  *		   aquired by the user directly through sbrk().  It
750Sstevel@tonic-gate  *		   has a 1 word header, as does 2.
760Sstevel@tonic-gate  *	Blocks of type 1 have the lower bit of the pointer to the
770Sstevel@tonic-gate  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
780Sstevel@tonic-gate  *	to mark them busy.
790Sstevel@tonic-gate  *
800Sstevel@tonic-gate  *	-Unallocated blocks are kept on an unsorted doubly linked
810Sstevel@tonic-gate  *	free list.
820Sstevel@tonic-gate  *
830Sstevel@tonic-gate  *	-Memory is allocated in blocks, with sizes specified by the
840Sstevel@tonic-gate  *	user.  A circular first-fit startegy is used, with a roving
850Sstevel@tonic-gate  *	head of the free queue, which prevents bunching of small
860Sstevel@tonic-gate  *	blocks at the head of the queue.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  *	-Compaction is performed at free time of any blocks immediately
890Sstevel@tonic-gate  *	following the freed block.  The freed block will be combined
900Sstevel@tonic-gate  *	with a preceding block during the search phase of malloc.
910Sstevel@tonic-gate  *	Since a freed block is added at the front of the free queue,
920Sstevel@tonic-gate  *	which is moved to the end of the queue if considered and
930Sstevel@tonic-gate  *	rejected during the search, fragmentation only occurs if
940Sstevel@tonic-gate  *	a block with a contiguious preceding block that is free is
950Sstevel@tonic-gate  *	freed and reallocated on the next call to malloc.  The
960Sstevel@tonic-gate  *	time savings of this strategy is judged to be worth the
970Sstevel@tonic-gate  *	occasional waste of memory.
980Sstevel@tonic-gate  *
990Sstevel@tonic-gate  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
1000Sstevel@tonic-gate  *	A large "holding" block is allocated via a recursive call to
1010Sstevel@tonic-gate  *	malloc.  This block contains a header and ?????? small blocks.
1020Sstevel@tonic-gate  *	Holding blocks for a given size of small block (rounded to the
1030Sstevel@tonic-gate  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
1040Sstevel@tonic-gate  *	holding block with an unused small block is in front of any without.
1050Sstevel@tonic-gate  *	A list of free blocks is kept within the holding block.
1060Sstevel@tonic-gate  */
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate /*
1090Sstevel@tonic-gate  *	description of arena, free queue, holding blocks etc.
1100Sstevel@tonic-gate  *
1110Sstevel@tonic-gate  * New compiler and linker does not guarentee order of initialized data.
1120Sstevel@tonic-gate  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
1130Sstevel@tonic-gate  * Later code depends on this order.
1140Sstevel@tonic-gate  */
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate static struct header arena[4] = {
1170Sstevel@tonic-gate 	    {0, 0, 0},
1180Sstevel@tonic-gate 	    {0, 0, 0},
1190Sstevel@tonic-gate 	    {0, 0, 0},
1200Sstevel@tonic-gate 	    {0, 0, 0}
1210Sstevel@tonic-gate 	};
1220Sstevel@tonic-gate 				/*
1230Sstevel@tonic-gate 				 * the second word is a minimal block to
1240Sstevel@tonic-gate 				 * start the arena. The first is a busy
1250Sstevel@tonic-gate 				 * block to be pointed to by the last block.
1260Sstevel@tonic-gate 				 */
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate #define	freeptr (arena + 2)
1290Sstevel@tonic-gate 				/* first and last entry in free list */
1300Sstevel@tonic-gate static struct header *arenaend;	/* ptr to block marking high end of arena */
1310Sstevel@tonic-gate static struct header *lastblk;	/* the highest block in the arena */
1320Sstevel@tonic-gate static struct holdblk **holdhead;   /* pointer to array of head pointers */
1330Sstevel@tonic-gate 				    /* to holding block chains */
1340Sstevel@tonic-gate /*
1350Sstevel@tonic-gate  * In order to save time calculating indices, the array is 1 too
1360Sstevel@tonic-gate  * large, and the first element is unused
1370Sstevel@tonic-gate  *
1380Sstevel@tonic-gate  * Variables controlling algorithm, esp. how holding blocs are used
1390Sstevel@tonic-gate  */
1400Sstevel@tonic-gate static int numlblks = NUMLBLKS;
1410Sstevel@tonic-gate static int minhead = MINHEAD;
1420Sstevel@tonic-gate static int change = 0;	/* != 0, once param changes are no longer allowed */
1430Sstevel@tonic-gate static int fastct = FASTCT;
1440Sstevel@tonic-gate static unsigned int maxfast = MAXFAST;
1450Sstevel@tonic-gate /* number of small block sizes to map to one size */
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate static int grain = ALIGNSZ;
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate #ifdef debug
1500Sstevel@tonic-gate static int case1count = 0;
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate static void
1530Sstevel@tonic-gate checkq(void)
1540Sstevel@tonic-gate {
1550Sstevel@tonic-gate 	register struct header *p;
1560Sstevel@tonic-gate 
1570Sstevel@tonic-gate 	p = &freeptr[0];
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	/* check forward */
1600Sstevel@tonic-gate 	/*CSTYLED*/
1610Sstevel@tonic-gate 	while (p != &freeptr[1]) {
1620Sstevel@tonic-gate 		p = p->nextfree;
1630Sstevel@tonic-gate 		assert(p->prevfree->nextfree == p);
1640Sstevel@tonic-gate 	}
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate 	/* check backward */
1670Sstevel@tonic-gate 	/*CSTYLED*/
1680Sstevel@tonic-gate 	while (p != &freeptr[0]) {
1690Sstevel@tonic-gate 		p = p->prevfree;
1700Sstevel@tonic-gate 		assert(p->nextfree->prevfree == p);
1710Sstevel@tonic-gate 	}
1720Sstevel@tonic-gate }
1730Sstevel@tonic-gate #endif
1740Sstevel@tonic-gate 
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate /*
1770Sstevel@tonic-gate  * malloc(nbytes) - give a user nbytes to use
1780Sstevel@tonic-gate  */
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate void *
1810Sstevel@tonic-gate malloc(size_t nbytes)
1820Sstevel@tonic-gate {
1830Sstevel@tonic-gate 	void *ret;
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1860Sstevel@tonic-gate 	ret = malloc_unlocked(nbytes, 0);
1870Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1880Sstevel@tonic-gate 	return (ret);
1890Sstevel@tonic-gate }
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate /*
1920Sstevel@tonic-gate  * Use malloc_unlocked() to get the address to start with; Given this
1930Sstevel@tonic-gate  * address, find out the closest address that aligns with the request
1940Sstevel@tonic-gate  * and return that address after doing some house keeping (refer to the
1950Sstevel@tonic-gate  * ascii art below).
1960Sstevel@tonic-gate  */
1970Sstevel@tonic-gate void *
198*2658Sraf memalign(size_t alignment, size_t size)
1990Sstevel@tonic-gate {
2000Sstevel@tonic-gate 	void *alloc_buf;
2010Sstevel@tonic-gate 	struct header *hd;
2020Sstevel@tonic-gate 	size_t alloc_size;
2030Sstevel@tonic-gate 	uintptr_t fr;
2040Sstevel@tonic-gate 	static int realloc;
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	if (size == 0 || alignment == 0 ||
2070Sstevel@tonic-gate 		(alignment & (alignment - 1)) != 0) {
2080Sstevel@tonic-gate 		return (NULL);
2090Sstevel@tonic-gate 	}
2100Sstevel@tonic-gate 	if (alignment <= ALIGNSZ)
2110Sstevel@tonic-gate 		return (malloc(size));
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	alloc_size = size + alignment;
2140Sstevel@tonic-gate 	if (alloc_size < size) { /* overflow */
2150Sstevel@tonic-gate 		return (NULL);
2160Sstevel@tonic-gate 	}
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
2190Sstevel@tonic-gate 	alloc_buf = malloc_unlocked(alloc_size, 1);
2200Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
2210Sstevel@tonic-gate 
2220Sstevel@tonic-gate 	if (alloc_buf == NULL)
2230Sstevel@tonic-gate 		return (NULL);
2240Sstevel@tonic-gate 	fr = (uintptr_t)alloc_buf;
2250Sstevel@tonic-gate 
2260Sstevel@tonic-gate 	fr = (fr + alignment - 1) / alignment * alignment;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	if (fr == (uintptr_t)alloc_buf)
2290Sstevel@tonic-gate 		return (alloc_buf);
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2320Sstevel@tonic-gate 		/*
2330Sstevel@tonic-gate 		 * we hit an edge case, where the space ahead of aligned
2340Sstevel@tonic-gate 		 * address is not sufficient to hold 'header' and hence we
2350Sstevel@tonic-gate 		 * can't free it. So double the allocation request.
2360Sstevel@tonic-gate 		 */
2370Sstevel@tonic-gate 		realloc++;
2380Sstevel@tonic-gate 		free(alloc_buf);
2390Sstevel@tonic-gate 		alloc_size = size + alignment*2;
2400Sstevel@tonic-gate 		if (alloc_size < size) {
2410Sstevel@tonic-gate 			return (NULL);
2420Sstevel@tonic-gate 		}
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 		(void) mutex_lock(&mlock);
2450Sstevel@tonic-gate 		alloc_buf = malloc_unlocked(alloc_size, 1);
2460Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate 		if (alloc_buf == NULL)
2490Sstevel@tonic-gate 			return (NULL);
2500Sstevel@tonic-gate 		fr = (uintptr_t)alloc_buf;
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate 		fr = (fr + alignment - 1) / alignment * alignment;
2530Sstevel@tonic-gate 		if (fr == (uintptr_t)alloc_buf)
2540Sstevel@tonic-gate 			return (alloc_buf);
2550Sstevel@tonic-gate 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2560Sstevel@tonic-gate 			fr = fr + alignment;
2570Sstevel@tonic-gate 		}
2580Sstevel@tonic-gate 	}
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate 	/*
2610Sstevel@tonic-gate 	 *	+-------+		+-------+
2620Sstevel@tonic-gate 	 *  +---| <a>   |		| <a>   |--+
2630Sstevel@tonic-gate 	 *  |   +-------+<--alloc_buf-->+-------+  |
2640Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2650Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2660Sstevel@tonic-gate 	 *  |   |	|		|	|  |
2670Sstevel@tonic-gate 	 *  |   |	|	 hd-->  +-------+  |
2680Sstevel@tonic-gate 	 *  |   |	|	    +---|  <b>  |<-+
2690Sstevel@tonic-gate 	 *  |   |	|	    |   +-------+<--- fr
2700Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2710Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2720Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2730Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2740Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2750Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
2760Sstevel@tonic-gate 	 *  |   +-------+	    |   +-------+
2770Sstevel@tonic-gate 	 *  +-->|  next |	    +-->|  next |
2780Sstevel@tonic-gate 	 *	+-------+		+-------+
2790Sstevel@tonic-gate 	 *
2800Sstevel@tonic-gate 	 */
2810Sstevel@tonic-gate 	hd = (struct header *)((char *)fr - minhead);
2820Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
2830Sstevel@tonic-gate 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
2840Sstevel@tonic-gate 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
2850Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
2860Sstevel@tonic-gate 	free(alloc_buf);
2870Sstevel@tonic-gate 	CHECKQ
2880Sstevel@tonic-gate 	return ((void *)fr);
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate void *
292*2658Sraf valloc(size_t size)
2930Sstevel@tonic-gate {
2940Sstevel@tonic-gate 	static unsigned pagesize;
2950Sstevel@tonic-gate 	if (size == 0)
2960Sstevel@tonic-gate 		return (NULL);
2970Sstevel@tonic-gate 
2980Sstevel@tonic-gate 	if (!pagesize)
2990Sstevel@tonic-gate 		pagesize = sysconf(_SC_PAGESIZE);
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 	return (memalign(pagesize, size));
3020Sstevel@tonic-gate }
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate /*
3050Sstevel@tonic-gate  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
3060Sstevel@tonic-gate  */
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate static void *
3090Sstevel@tonic-gate malloc_unlocked(size_t nbytes, int nosmall)
3100Sstevel@tonic-gate {
3110Sstevel@tonic-gate 	struct header *blk;
3120Sstevel@tonic-gate 	size_t nb;	/* size of entire block we need */
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate 	/* on first call, initialize */
3150Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
3160Sstevel@tonic-gate 		/* initialize arena */
3170Sstevel@tonic-gate 		arena[1].nextblk = (struct header *)BUSY;
3180Sstevel@tonic-gate 		arena[0].nextblk = (struct header *)BUSY;
3190Sstevel@tonic-gate 		lastblk = arenaend = &(arena[1]);
3200Sstevel@tonic-gate 		/* initialize free queue */
3210Sstevel@tonic-gate 		freeptr[0].nextfree = &(freeptr[1]);
3220Sstevel@tonic-gate 		freeptr[1].nextblk = &(arena[0]);
3230Sstevel@tonic-gate 		freeptr[1].prevfree = &(freeptr[0]);
3240Sstevel@tonic-gate 		/* mark that small blocks not init yet */
3250Sstevel@tonic-gate 	}
3260Sstevel@tonic-gate 	if (nbytes == 0)
3270Sstevel@tonic-gate 		return (NULL);
3280Sstevel@tonic-gate 
3290Sstevel@tonic-gate 	if (nbytes <= maxfast && !nosmall) {
3300Sstevel@tonic-gate 		/*
3310Sstevel@tonic-gate 		 * We can allocate out of a holding block
3320Sstevel@tonic-gate 		 */
3330Sstevel@tonic-gate 		struct holdblk *holdblk; /* head of right sized queue */
3340Sstevel@tonic-gate 		struct lblk *lblk;	 /* pointer to a little block */
3350Sstevel@tonic-gate 		struct holdblk *newhold;
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate 		if (!change) {
3380Sstevel@tonic-gate 			int i;
3390Sstevel@tonic-gate 			/*
3400Sstevel@tonic-gate 			 * This allocates space for hold block
3410Sstevel@tonic-gate 			 * pointers by calling malloc recursively.
3420Sstevel@tonic-gate 			 * Maxfast is temporarily set to 0, to
3430Sstevel@tonic-gate 			 * avoid infinite recursion.  allocate
3440Sstevel@tonic-gate 			 * space for an extra ptr so that an index
3450Sstevel@tonic-gate 			 * is just ->blksz/grain, with the first
3460Sstevel@tonic-gate 			 * ptr unused.
3470Sstevel@tonic-gate 			 */
3480Sstevel@tonic-gate 			change = 1;	/* change to algorithm params */
3490Sstevel@tonic-gate 					/* no longer allowed */
3500Sstevel@tonic-gate 			/*
3510Sstevel@tonic-gate 			 * temporarily alter maxfast, to avoid
3520Sstevel@tonic-gate 			 * infinite recursion
3530Sstevel@tonic-gate 			 */
3540Sstevel@tonic-gate 			maxfast = 0;
3550Sstevel@tonic-gate 			holdhead = (struct holdblk **)
3560Sstevel@tonic-gate 			    malloc_unlocked(sizeof (struct holdblk *) *
3570Sstevel@tonic-gate 			    (fastct + 1), 0);
3580Sstevel@tonic-gate 			if (holdhead == NULL)
3590Sstevel@tonic-gate 				return (malloc_unlocked(nbytes, 0));
3600Sstevel@tonic-gate 			for (i = 1; i <= fastct; i++) {
3610Sstevel@tonic-gate 				holdhead[i] = HGROUND;
3620Sstevel@tonic-gate 			}
3630Sstevel@tonic-gate 			maxfast = fastct * grain;
3640Sstevel@tonic-gate 		}
3650Sstevel@tonic-gate 		/*
3660Sstevel@tonic-gate 		 * Note that this uses the absolute min header size (MINHEAD)
3670Sstevel@tonic-gate 		 * unlike the large block case which uses minhead
3680Sstevel@tonic-gate 		 *
3690Sstevel@tonic-gate 		 * round up to nearest multiple of grain
3700Sstevel@tonic-gate 		 * code assumes grain is a multiple of MINHEAD
3710Sstevel@tonic-gate 		 */
3720Sstevel@tonic-gate 		/* round up to grain */
3730Sstevel@tonic-gate 		nb = (nbytes + grain - 1) / grain * grain;
3740Sstevel@tonic-gate 		holdblk = holdhead[nb / grain];
3750Sstevel@tonic-gate 		nb = nb + MINHEAD;
3760Sstevel@tonic-gate 		/*
3770Sstevel@tonic-gate 		 * look for space in the holding block.  Blocks with
3780Sstevel@tonic-gate 		 * space will be in front of those without
3790Sstevel@tonic-gate 		 */
3800Sstevel@tonic-gate 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
3810Sstevel@tonic-gate 			/* there is space */
3820Sstevel@tonic-gate 			lblk = holdblk->lfreeq;
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 			/*
3850Sstevel@tonic-gate 			 * Now make lfreeq point to a free block.
3860Sstevel@tonic-gate 			 * If lblk has been previously allocated and
3870Sstevel@tonic-gate 			 * freed, it has a valid pointer to use.
3880Sstevel@tonic-gate 			 * Otherwise, lblk is at the beginning of
3890Sstevel@tonic-gate 			 * the unallocated blocks at the end of
3900Sstevel@tonic-gate 			 * the holding block, so, if there is room, take
3910Sstevel@tonic-gate 			 * the next space.  If not, mark holdblk full,
3920Sstevel@tonic-gate 			 * and move holdblk to the end of the queue
3930Sstevel@tonic-gate 			 */
3940Sstevel@tonic-gate 			if (lblk < holdblk->unused) {
3950Sstevel@tonic-gate 				/* move to next holdblk, if this one full */
3960Sstevel@tonic-gate 				if ((holdblk->lfreeq =
3970Sstevel@tonic-gate 				    CLRSMAL(lblk->header.nextfree)) ==
3980Sstevel@tonic-gate 				    LGROUND) {
3990Sstevel@tonic-gate 					holdhead[(nb-MINHEAD) / grain] =
4000Sstevel@tonic-gate 					    holdblk->nexthblk;
4010Sstevel@tonic-gate 				}
4020Sstevel@tonic-gate 			} else if (((char *)holdblk->unused + nb) <
4030Sstevel@tonic-gate 			    ((char *)holdblk + HOLDSZ(nb))) {
4040Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
4050Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
4060Sstevel@tonic-gate 				holdblk->lfreeq = holdblk->unused;
4070Sstevel@tonic-gate 			} else {
4080Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
4090Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
4100Sstevel@tonic-gate 				holdblk->lfreeq = LGROUND;
4110Sstevel@tonic-gate 				holdhead[(nb-MINHEAD)/grain] =
4120Sstevel@tonic-gate 				    holdblk->nexthblk;
4130Sstevel@tonic-gate 			}
4140Sstevel@tonic-gate 			/* mark as busy and small */
4150Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
4160Sstevel@tonic-gate 		} else {
4170Sstevel@tonic-gate 			/* we need a new holding block */
4180Sstevel@tonic-gate 			newhold = (struct holdblk *)
4190Sstevel@tonic-gate 			    malloc_unlocked(HOLDSZ(nb), 0);
4200Sstevel@tonic-gate 			if ((char *)newhold == NULL) {
4210Sstevel@tonic-gate 				return (NULL);
4220Sstevel@tonic-gate 			}
4230Sstevel@tonic-gate 			/* add to head of free queue */
4240Sstevel@tonic-gate 			if (holdblk != HGROUND) {
4250Sstevel@tonic-gate 				newhold->nexthblk = holdblk;
4260Sstevel@tonic-gate 				newhold->prevhblk = holdblk->prevhblk;
4270Sstevel@tonic-gate 				holdblk->prevhblk = newhold;
4280Sstevel@tonic-gate 				newhold->prevhblk->nexthblk = newhold;
4290Sstevel@tonic-gate 			} else {
4300Sstevel@tonic-gate 				newhold->nexthblk = newhold->prevhblk = newhold;
4310Sstevel@tonic-gate 			}
4320Sstevel@tonic-gate 			holdhead[(nb-MINHEAD)/grain] = newhold;
4330Sstevel@tonic-gate 			/* set up newhold */
4340Sstevel@tonic-gate 			lblk = (struct lblk *)(newhold->space);
4350Sstevel@tonic-gate 			newhold->lfreeq = newhold->unused =
4360Sstevel@tonic-gate 			    (struct lblk *)((char *)newhold->space+nb);
4370Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
4380Sstevel@tonic-gate 			newhold->blksz = nb-MINHEAD;
4390Sstevel@tonic-gate 		}
4400Sstevel@tonic-gate #ifdef debug
4410Sstevel@tonic-gate 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
4420Sstevel@tonic-gate 		    nbytes);
4430Sstevel@tonic-gate #endif /* debug */
4440Sstevel@tonic-gate 		return ((char *)lblk + MINHEAD);
4450Sstevel@tonic-gate 	} else {
4460Sstevel@tonic-gate 		/*
4470Sstevel@tonic-gate 		 * We need an ordinary block
4480Sstevel@tonic-gate 		 */
4490Sstevel@tonic-gate 		struct header *newblk;	/* used for creating a block */
4500Sstevel@tonic-gate 
4510Sstevel@tonic-gate 		/* get number of bytes we need */
4520Sstevel@tonic-gate 		nb = nbytes + minhead;
4530Sstevel@tonic-gate 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
4540Sstevel@tonic-gate 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
4550Sstevel@tonic-gate 		/*
4560Sstevel@tonic-gate 		 * see if there is a big enough block
4570Sstevel@tonic-gate 		 * If none exists, you will get to freeptr[1].
4580Sstevel@tonic-gate 		 * freeptr[1].next = &arena[0], so when you do the test,
4590Sstevel@tonic-gate 		 * the result is a large positive number, since arena[0]
4600Sstevel@tonic-gate 		 * comes before all blocks.  Arena[0] is marked busy so
4610Sstevel@tonic-gate 		 * that it will not be compacted.  This kludge is for the
4620Sstevel@tonic-gate 		 * sake of the almighty efficiency.
4630Sstevel@tonic-gate 		 */
4640Sstevel@tonic-gate 		/* check that a very large request won't cause an inf. loop */
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
4670Sstevel@tonic-gate 			return (NULL);
4680Sstevel@tonic-gate 		} else {
4690Sstevel@tonic-gate 			struct header *next;		/* following block */
4700Sstevel@tonic-gate 			struct header *nextnext;	/* block after next */
4710Sstevel@tonic-gate 
4720Sstevel@tonic-gate 			blk = freeptr;
4730Sstevel@tonic-gate 			do {
4740Sstevel@tonic-gate 				blk = blk->nextfree;
4750Sstevel@tonic-gate 				/* see if we can compact */
4760Sstevel@tonic-gate 				next = blk->nextblk;
4770Sstevel@tonic-gate 				if (!TESTBUSY(nextnext = next->nextblk)) {
4780Sstevel@tonic-gate 					do {
4790Sstevel@tonic-gate 						DELFREEQ(next);
4800Sstevel@tonic-gate 						next = nextnext;
4810Sstevel@tonic-gate 						nextnext = next->nextblk;
4820Sstevel@tonic-gate 					} while (!TESTBUSY(nextnext));
4830Sstevel@tonic-gate 					/*
4840Sstevel@tonic-gate 					 * next will be at most == to lastblk,
4850Sstevel@tonic-gate 					 * but I think the >= test is faster
4860Sstevel@tonic-gate 					 */
4870Sstevel@tonic-gate 					if (next >= arenaend)
4880Sstevel@tonic-gate 						lastblk = blk;
4890Sstevel@tonic-gate 					blk->nextblk = next;
4900Sstevel@tonic-gate 				}
4910Sstevel@tonic-gate 			} while (((char *)(next) - (char *)blk) < nb);
4920Sstevel@tonic-gate 		}
4930Sstevel@tonic-gate 		/*
4940Sstevel@tonic-gate 		 * if we didn't find a block, get more memory
4950Sstevel@tonic-gate 		 */
4960Sstevel@tonic-gate 		if (blk == &(freeptr[1])) {
4970Sstevel@tonic-gate 			/*
4980Sstevel@tonic-gate 			 * careful coding could likely replace
4990Sstevel@tonic-gate 			 * newend with arenaend
5000Sstevel@tonic-gate 			 */
5010Sstevel@tonic-gate 			struct header *newend;	/* new end of arena */
5020Sstevel@tonic-gate 			ssize_t nget;	/* number of words to get */
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate 			/*
5050Sstevel@tonic-gate 			 * Three cases - 1. There is space between arenaend
5060Sstevel@tonic-gate 			 *		    and the break value that will become
5070Sstevel@tonic-gate 			 *		    a permanently allocated block.
5080Sstevel@tonic-gate 			 *		 2. Case 1 is not true, and the last
5090Sstevel@tonic-gate 			 *		    block is allocated.
5100Sstevel@tonic-gate 			 *		 3. Case 1 is not true, and the last
5110Sstevel@tonic-gate 			 *		    block is free
5120Sstevel@tonic-gate 			 */
5130Sstevel@tonic-gate 			if ((newblk = (struct header *)sbrk(0)) !=
5140Sstevel@tonic-gate 			    (struct header *)((char *)arenaend + HEADSZ)) {
5150Sstevel@tonic-gate 				/* case 1 */
5160Sstevel@tonic-gate #ifdef debug
5170Sstevel@tonic-gate 				if (case1count++ > 0)
5180Sstevel@tonic-gate 				    (void) write(2, "Case 1 hit more that once."
5190Sstevel@tonic-gate 					" brk or sbrk?\n", 41);
5200Sstevel@tonic-gate #endif
5210Sstevel@tonic-gate 				/* get size to fetch */
5220Sstevel@tonic-gate 				nget = nb + HEADSZ;
5230Sstevel@tonic-gate 				/* round up to a block */
5240Sstevel@tonic-gate 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
5250Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
5260Sstevel@tonic-gate 				/* get memory */
5270Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5280Sstevel@tonic-gate 					return (NULL);
5290Sstevel@tonic-gate 				/* add to arena */
5300Sstevel@tonic-gate 				newend = (struct header *)((char *)newblk + nget
5310Sstevel@tonic-gate 				    - HEADSZ);
5320Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
5330Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5340Sstevel@tonic-gate /* ???  newblk ?? */
5350Sstevel@tonic-gate 				newblk->nextblk = newend;
5360Sstevel@tonic-gate 
5370Sstevel@tonic-gate 				/*
5380Sstevel@tonic-gate 				 * space becomes a permanently allocated block.
5390Sstevel@tonic-gate 				 * This is likely not mt-safe as lock is not
5400Sstevel@tonic-gate 				 * shared with brk or sbrk
5410Sstevel@tonic-gate 				 */
5420Sstevel@tonic-gate 				arenaend->nextblk = SETBUSY(newblk);
5430Sstevel@tonic-gate 				/* adjust other pointers */
5440Sstevel@tonic-gate 				arenaend = newend;
5450Sstevel@tonic-gate 				lastblk = newblk;
5460Sstevel@tonic-gate 				blk = newblk;
5470Sstevel@tonic-gate 			} else if (TESTBUSY(lastblk->nextblk)) {
5480Sstevel@tonic-gate 				/* case 2 */
5490Sstevel@tonic-gate 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
5500Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5510Sstevel@tonic-gate 					return (NULL);
5520Sstevel@tonic-gate 				/* block must be word aligned */
5530Sstevel@tonic-gate 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
5540Sstevel@tonic-gate 				/*
5550Sstevel@tonic-gate 				 * stub at old arenaend becomes first word
5560Sstevel@tonic-gate 				 * in blk
5570Sstevel@tonic-gate 				 */
5580Sstevel@tonic-gate /* ???  	newblk = arenaend; */
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 				newend =
5610Sstevel@tonic-gate 				    (struct header *)((char *)arenaend+nget);
5620Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5630Sstevel@tonic-gate 				arenaend->nextblk = newend;
5640Sstevel@tonic-gate 				lastblk = blk = arenaend;
5650Sstevel@tonic-gate 				arenaend = newend;
5660Sstevel@tonic-gate 			} else {
5670Sstevel@tonic-gate 				/* case 3 */
5680Sstevel@tonic-gate 				/*
5690Sstevel@tonic-gate 				 * last block in arena is at end of memory and
5700Sstevel@tonic-gate 				 * is free
5710Sstevel@tonic-gate 				 */
5720Sstevel@tonic-gate 				/* 1.7 had this backward without cast */
5730Sstevel@tonic-gate 				nget = nb -
5740Sstevel@tonic-gate 				    ((char *)arenaend - (char *)lastblk);
5750Sstevel@tonic-gate 				nget = (nget + (BLOCKSZ - 1)) /
5760Sstevel@tonic-gate 				    BLOCKSZ * BLOCKSZ;
5770Sstevel@tonic-gate 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
5780Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
5790Sstevel@tonic-gate 					return (NULL);
5800Sstevel@tonic-gate 				/* combine with last block, put in arena */
5810Sstevel@tonic-gate 				newend = (struct header *)
5820Sstevel@tonic-gate 				    ((char *)arenaend + nget);
5830Sstevel@tonic-gate 				arenaend = lastblk->nextblk = newend;
5840Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
5850Sstevel@tonic-gate 				/* set which block to use */
5860Sstevel@tonic-gate 				blk = lastblk;
5870Sstevel@tonic-gate 				DELFREEQ(blk);
5880Sstevel@tonic-gate 			}
5890Sstevel@tonic-gate 		} else {
5900Sstevel@tonic-gate 			struct header *nblk;	/* next block */
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 			/* take block found of free queue */
5930Sstevel@tonic-gate 			DELFREEQ(blk);
5940Sstevel@tonic-gate 			/*
5950Sstevel@tonic-gate 			 * make head of free queue immediately follow blk,
5960Sstevel@tonic-gate 			 * unless blk was at the end of the queue
5970Sstevel@tonic-gate 			 */
5980Sstevel@tonic-gate 			nblk = blk->nextfree;
5990Sstevel@tonic-gate 			if (nblk != &(freeptr[1])) {
6000Sstevel@tonic-gate 				MOVEHEAD(nblk);
6010Sstevel@tonic-gate 			}
6020Sstevel@tonic-gate 		}
6030Sstevel@tonic-gate 		/* blk now points to an adequate block */
6040Sstevel@tonic-gate 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
6050Sstevel@tonic-gate 			/* carve out the right size block */
6060Sstevel@tonic-gate 			/* newblk will be the remainder */
6070Sstevel@tonic-gate 			newblk = (struct header *)((char *)blk + nb);
6080Sstevel@tonic-gate 			newblk->nextblk = blk->nextblk;
6090Sstevel@tonic-gate 			/* mark the block busy */
6100Sstevel@tonic-gate 			blk->nextblk = SETBUSY(newblk);
6110Sstevel@tonic-gate 			ADDFREEQ(newblk);
6120Sstevel@tonic-gate 			/* if blk was lastblk, make newblk lastblk */
6130Sstevel@tonic-gate 			if (blk == lastblk)
6140Sstevel@tonic-gate 				lastblk = newblk;
6150Sstevel@tonic-gate 		} else {
6160Sstevel@tonic-gate 			/* just mark the block busy */
6170Sstevel@tonic-gate 			blk->nextblk = SETBUSY(blk->nextblk);
6180Sstevel@tonic-gate 		}
6190Sstevel@tonic-gate 	}
6200Sstevel@tonic-gate 	CHECKQ
6210Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
6220Sstevel@tonic-gate 	    ((char *)blk + minhead) >= nbytes);
6230Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
6240Sstevel@tonic-gate 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
6250Sstevel@tonic-gate 	return ((char *)blk + minhead);
6260Sstevel@tonic-gate }
6270Sstevel@tonic-gate 
6280Sstevel@tonic-gate /*
6290Sstevel@tonic-gate  * free(ptr) - free block that user thinks starts at ptr
6300Sstevel@tonic-gate  *
6310Sstevel@tonic-gate  *	input - ptr-1 contains the block header.
6320Sstevel@tonic-gate  *		If the header points forward, we have a normal
6330Sstevel@tonic-gate  *			block pointing to the next block
6340Sstevel@tonic-gate  *		if the header points backward, we have a small
6350Sstevel@tonic-gate  *			block from a holding block.
6360Sstevel@tonic-gate  *		In both cases, the busy bit must be set
6370Sstevel@tonic-gate  */
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate void
6400Sstevel@tonic-gate free(void *ptr)
6410Sstevel@tonic-gate {
6420Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
6430Sstevel@tonic-gate 	free_unlocked(ptr);
6440Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
6450Sstevel@tonic-gate }
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate /*
6480Sstevel@tonic-gate  * free_unlocked(ptr) - Do the real work for free()
6490Sstevel@tonic-gate  */
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate void
6520Sstevel@tonic-gate free_unlocked(void *ptr)
6530Sstevel@tonic-gate {
6540Sstevel@tonic-gate 	struct holdblk *holdblk;	/* block holding blk */
6550Sstevel@tonic-gate 	struct holdblk *oldhead;	/* former head of the hold block */
6560Sstevel@tonic-gate 					/* queue containing blk's holder */
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate 	if (ptr == NULL)
6590Sstevel@tonic-gate 		return;
6600Sstevel@tonic-gate 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
6610Sstevel@tonic-gate 		struct lblk	*lblk;	/* pointer to freed block */
6620Sstevel@tonic-gate 		ssize_t		offset;	/* choice of header lists */
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
6650Sstevel@tonic-gate 		assert((struct header *)lblk < arenaend);
6660Sstevel@tonic-gate 		assert((struct header *)lblk > arena);
6670Sstevel@tonic-gate 		/* allow twits (e.g. awk) to free a block twice */
6680Sstevel@tonic-gate 		holdblk = lblk->header.holder;
6690Sstevel@tonic-gate 		if (!TESTBUSY(holdblk))
6700Sstevel@tonic-gate 			return;
6710Sstevel@tonic-gate 		holdblk = (struct holdblk *)CLRALL(holdblk);
6720Sstevel@tonic-gate 		/* put lblk on its hold block's free list */
6730Sstevel@tonic-gate 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
6740Sstevel@tonic-gate 		holdblk->lfreeq = lblk;
6750Sstevel@tonic-gate 		/* move holdblk to head of queue, if its not already there */
6760Sstevel@tonic-gate 		offset = holdblk->blksz / grain;
6770Sstevel@tonic-gate 		oldhead = holdhead[offset];
6780Sstevel@tonic-gate 		if (oldhead != holdblk) {
6790Sstevel@tonic-gate 			/* first take out of current spot */
6800Sstevel@tonic-gate 			holdhead[offset] = holdblk;
6810Sstevel@tonic-gate 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
6820Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
6830Sstevel@tonic-gate 			/* now add at front */
6840Sstevel@tonic-gate 			holdblk->nexthblk = oldhead;
6850Sstevel@tonic-gate 			holdblk->prevhblk = oldhead->prevhblk;
6860Sstevel@tonic-gate 			oldhead->prevhblk = holdblk;
6870Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk;
6880Sstevel@tonic-gate 		}
6890Sstevel@tonic-gate 	} else {
6900Sstevel@tonic-gate 		struct header *blk;	/* real start of block */
6910Sstevel@tonic-gate 		struct header *next;	/* next = blk->nextblk */
6920Sstevel@tonic-gate 		struct header *nextnext;	/* block after next */
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
6950Sstevel@tonic-gate 		next = blk->nextblk;
6960Sstevel@tonic-gate 		/* take care of twits (e.g. awk) who return blocks twice */
6970Sstevel@tonic-gate 		if (!TESTBUSY(next))
6980Sstevel@tonic-gate 			return;
6990Sstevel@tonic-gate 		blk->nextblk = next = CLRBUSY(next);
7000Sstevel@tonic-gate 		ADDFREEQ(blk);
7010Sstevel@tonic-gate 		/* see if we can compact */
7020Sstevel@tonic-gate 		if (!TESTBUSY(nextnext = next->nextblk)) {
7030Sstevel@tonic-gate 			do {
7040Sstevel@tonic-gate 				DELFREEQ(next);
7050Sstevel@tonic-gate 				next = nextnext;
7060Sstevel@tonic-gate 			} while (!TESTBUSY(nextnext = next->nextblk));
7070Sstevel@tonic-gate 			if (next == arenaend) lastblk = blk;
7080Sstevel@tonic-gate 			blk->nextblk = next;
7090Sstevel@tonic-gate 		}
7100Sstevel@tonic-gate 	}
7110Sstevel@tonic-gate 	CHECKQ
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 
7150Sstevel@tonic-gate /*
7160Sstevel@tonic-gate  * realloc(ptr, size) - give the user a block of size "size", with
7170Sstevel@tonic-gate  *			    the contents pointed to by ptr.  Free ptr.
7180Sstevel@tonic-gate  */
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate void *
7210Sstevel@tonic-gate realloc(void *ptr, size_t size)
7220Sstevel@tonic-gate {
7230Sstevel@tonic-gate 	void	*retval;
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
7260Sstevel@tonic-gate 	retval = realloc_unlocked(ptr, size);
7270Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
7280Sstevel@tonic-gate 	return (retval);
7290Sstevel@tonic-gate }
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate 
7320Sstevel@tonic-gate /*
7330Sstevel@tonic-gate  * realloc_unlocked(ptr) - Do the real work for realloc()
7340Sstevel@tonic-gate  */
7350Sstevel@tonic-gate 
7360Sstevel@tonic-gate static void *
7370Sstevel@tonic-gate realloc_unlocked(void *ptr, size_t size)
7380Sstevel@tonic-gate {
7390Sstevel@tonic-gate 	struct header *blk;	/* block ptr is contained in */
7400Sstevel@tonic-gate 	size_t trusize;	/* block size as allocater sees it */
7410Sstevel@tonic-gate 	char *newptr;			/* pointer to user's new block */
7420Sstevel@tonic-gate 	size_t cpysize;	/* amount to copy */
7430Sstevel@tonic-gate 	struct header *next;	/* block after blk */
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate 	if (ptr == NULL)
7460Sstevel@tonic-gate 		return (malloc_unlocked(size, 0));
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 	if (size == 0) {
7490Sstevel@tonic-gate 		free_unlocked(ptr);
7500Sstevel@tonic-gate 		return (NULL);
7510Sstevel@tonic-gate 	}
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
7540Sstevel@tonic-gate 	    header.holder)) {
7550Sstevel@tonic-gate 		/*
7560Sstevel@tonic-gate 		 * we have a special small block which can't be expanded
7570Sstevel@tonic-gate 		 *
7580Sstevel@tonic-gate 		 * This makes the assumption that even if the user is
7590Sstevel@tonic-gate 		 * reallocating a free block, malloc doesn't alter the contents
7600Sstevel@tonic-gate 		 * of small blocks
7610Sstevel@tonic-gate 		 */
7620Sstevel@tonic-gate 		newptr = malloc_unlocked(size, 0);
7630Sstevel@tonic-gate 		if (newptr == NULL)
7640Sstevel@tonic-gate 			return (NULL);
7650Sstevel@tonic-gate 		/* this isn't to save time--its to protect the twits */
7660Sstevel@tonic-gate 		if ((char *)ptr != newptr) {
7670Sstevel@tonic-gate 			struct lblk *lblk;
7680Sstevel@tonic-gate 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
7690Sstevel@tonic-gate 			cpysize = ((struct holdblk *)
7700Sstevel@tonic-gate 			    CLRALL(lblk->header.holder))->blksz;
7710Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
7720Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
7730Sstevel@tonic-gate 			free_unlocked(ptr);
7740Sstevel@tonic-gate 		}
7750Sstevel@tonic-gate 	} else {
7760Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
7770Sstevel@tonic-gate 		next = blk->nextblk;
7780Sstevel@tonic-gate 		/*
7790Sstevel@tonic-gate 		 * deal with twits who reallocate free blocks
7800Sstevel@tonic-gate 		 *
7810Sstevel@tonic-gate 		 * if they haven't reset minblk via getopt, that's
7820Sstevel@tonic-gate 		 * their problem
7830Sstevel@tonic-gate 		 */
7840Sstevel@tonic-gate 		if (!TESTBUSY(next)) {
7850Sstevel@tonic-gate 			DELFREEQ(blk);
7860Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
7870Sstevel@tonic-gate 		}
7880Sstevel@tonic-gate 		next = CLRBUSY(next);
7890Sstevel@tonic-gate 		/* make blk as big as possible */
7900Sstevel@tonic-gate 		if (!TESTBUSY(next->nextblk)) {
7910Sstevel@tonic-gate 			do {
7920Sstevel@tonic-gate 				DELFREEQ(next);
7930Sstevel@tonic-gate 				next = next->nextblk;
7940Sstevel@tonic-gate 			} while (!TESTBUSY(next->nextblk));
7950Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
7960Sstevel@tonic-gate 			if (next >= arenaend) lastblk = blk;
7970Sstevel@tonic-gate 		}
7980Sstevel@tonic-gate 		/* get size we really need */
7990Sstevel@tonic-gate 		trusize = size+minhead;
8000Sstevel@tonic-gate 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
8010Sstevel@tonic-gate 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
8020Sstevel@tonic-gate 		/* see if we have enough */
8030Sstevel@tonic-gate 		/* this isn't really the copy size, but I need a register */
8040Sstevel@tonic-gate 		cpysize = (char *)next - (char *)blk;
8050Sstevel@tonic-gate 		if (cpysize >= trusize) {
8060Sstevel@tonic-gate 			/* carve out the size we need */
8070Sstevel@tonic-gate 			struct header *newblk;	/* remainder */
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 			if (cpysize - trusize >= MINBLKSZ) {
8100Sstevel@tonic-gate 				/*
8110Sstevel@tonic-gate 				 * carve out the right size block
8120Sstevel@tonic-gate 				 * newblk will be the remainder
8130Sstevel@tonic-gate 				 */
8140Sstevel@tonic-gate 				newblk = (struct header *)((char *)blk +
8150Sstevel@tonic-gate 				    trusize);
8160Sstevel@tonic-gate 				newblk->nextblk = next;
8170Sstevel@tonic-gate 				blk->nextblk = SETBUSY(newblk);
8180Sstevel@tonic-gate 				/* at this point, next is invalid */
8190Sstevel@tonic-gate 				ADDFREEQ(newblk);
8200Sstevel@tonic-gate 				/* if blk was lastblk, make newblk lastblk */
8210Sstevel@tonic-gate 				if (blk == lastblk)
8220Sstevel@tonic-gate 					lastblk = newblk;
8230Sstevel@tonic-gate 			}
8240Sstevel@tonic-gate 			newptr = ptr;
8250Sstevel@tonic-gate 		} else {
8260Sstevel@tonic-gate 			/* bite the bullet, and call malloc */
8270Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
8280Sstevel@tonic-gate 			newptr = malloc_unlocked(size, 0);
8290Sstevel@tonic-gate 			if (newptr == NULL)
8300Sstevel@tonic-gate 				return (NULL);
8310Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
8320Sstevel@tonic-gate 			free_unlocked(ptr);
8330Sstevel@tonic-gate 		}
8340Sstevel@tonic-gate 	}
8350Sstevel@tonic-gate 	return (newptr);
8360Sstevel@tonic-gate }
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate /* LINTLIBRARY */
8400Sstevel@tonic-gate /*
8410Sstevel@tonic-gate  * calloc - allocate and clear memory block
8420Sstevel@tonic-gate  */
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate void *
8450Sstevel@tonic-gate calloc(size_t num, size_t size)
8460Sstevel@tonic-gate {
8470Sstevel@tonic-gate 	char *mp;
8480Sstevel@tonic-gate 
8490Sstevel@tonic-gate 	num *= size;
8500Sstevel@tonic-gate 	mp = malloc(num);
8510Sstevel@tonic-gate 	if (mp == NULL)
8520Sstevel@tonic-gate 		return (NULL);
8530Sstevel@tonic-gate 	(void) memset(mp, 0, num);
8540Sstevel@tonic-gate 	return (mp);
8550Sstevel@tonic-gate }
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate /*
8590Sstevel@tonic-gate  * Mallopt - set options for allocation
8600Sstevel@tonic-gate  *
8610Sstevel@tonic-gate  *	Mallopt provides for control over the allocation algorithm.
8620Sstevel@tonic-gate  *	The cmds available are:
8630Sstevel@tonic-gate  *
8640Sstevel@tonic-gate  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
8650Sstevel@tonic-gate  *		 largest small, quickly allocated block.  Maxfast
8660Sstevel@tonic-gate  *		 may be set to 0 to disable fast allocation entirely.
8670Sstevel@tonic-gate  *
8680Sstevel@tonic-gate  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
8690Sstevel@tonic-gate  *		 small blocks per holding block.  Value must be
8700Sstevel@tonic-gate  *		 greater than 0.
8710Sstevel@tonic-gate  *
8720Sstevel@tonic-gate  *	M_GRAIN  Set grain to value.  The sizes of all blocks
8730Sstevel@tonic-gate  *		 smaller than maxfast are considered to be rounded
8740Sstevel@tonic-gate  *		 up to the nearest multiple of grain. The default
8750Sstevel@tonic-gate  *		 value of grain is the smallest number of bytes
8760Sstevel@tonic-gate  *		 which will allow alignment of any data type.    Grain
8770Sstevel@tonic-gate  *		 will be rounded up to a multiple of its default,
8780Sstevel@tonic-gate  *		 and maxsize will be rounded up to a multiple of
8790Sstevel@tonic-gate  *		 grain.  Value must be greater than 0.
8800Sstevel@tonic-gate  *
8810Sstevel@tonic-gate  *	M_KEEP   Retain data in freed block until the next malloc,
8820Sstevel@tonic-gate  *		 realloc, or calloc.  Value is ignored.
8830Sstevel@tonic-gate  *		 This option is provided only for compatibility with
8840Sstevel@tonic-gate  *		 the old version of malloc, and is not recommended.
8850Sstevel@tonic-gate  *
8860Sstevel@tonic-gate  *	returns - 0, upon successful completion
8870Sstevel@tonic-gate  *		 1, if malloc has previously been called or
8880Sstevel@tonic-gate  *		    if value or cmd have illegal values
8890Sstevel@tonic-gate  */
8900Sstevel@tonic-gate 
8910Sstevel@tonic-gate int
892*2658Sraf mallopt(int cmd, int value)
8930Sstevel@tonic-gate {
8940Sstevel@tonic-gate 	/* disallow changes once a small block is allocated */
8950Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
8960Sstevel@tonic-gate 	if (change) {
8970Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
8980Sstevel@tonic-gate 		return (1);
8990Sstevel@tonic-gate 	}
9000Sstevel@tonic-gate 	switch (cmd) {
9010Sstevel@tonic-gate 	case M_MXFAST:
9020Sstevel@tonic-gate 		if (value < 0) {
9030Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9040Sstevel@tonic-gate 			return (1);
9050Sstevel@tonic-gate 		}
9060Sstevel@tonic-gate 		fastct = (value + grain - 1) / grain;
9070Sstevel@tonic-gate 		maxfast = grain*fastct;
9080Sstevel@tonic-gate 		break;
9090Sstevel@tonic-gate 	case M_NLBLKS:
9100Sstevel@tonic-gate 		if (value <= 1) {
9110Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9120Sstevel@tonic-gate 			return (1);
9130Sstevel@tonic-gate 		}
9140Sstevel@tonic-gate 		numlblks = value;
9150Sstevel@tonic-gate 		break;
9160Sstevel@tonic-gate 	case M_GRAIN:
9170Sstevel@tonic-gate 		if (value <= 0) {
9180Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
9190Sstevel@tonic-gate 			return (1);
9200Sstevel@tonic-gate 		}
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 		/* round grain up to a multiple of ALIGNSZ */
9230Sstevel@tonic-gate 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
9240Sstevel@tonic-gate 
9250Sstevel@tonic-gate 		/* reduce fastct appropriately */
9260Sstevel@tonic-gate 		fastct = (maxfast + grain - 1) / grain;
9270Sstevel@tonic-gate 		maxfast = grain * fastct;
9280Sstevel@tonic-gate 		break;
9290Sstevel@tonic-gate 	case M_KEEP:
9300Sstevel@tonic-gate 		if (change && holdhead != NULL) {
9310Sstevel@tonic-gate 			mutex_unlock(&mlock);
9320Sstevel@tonic-gate 			return (1);
9330Sstevel@tonic-gate 		}
9340Sstevel@tonic-gate 		minhead = HEADSZ;
9350Sstevel@tonic-gate 		break;
9360Sstevel@tonic-gate 	default:
9370Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
9380Sstevel@tonic-gate 		return (1);
9390Sstevel@tonic-gate 	}
9400Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
9410Sstevel@tonic-gate 	return (0);
9420Sstevel@tonic-gate }
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate /*
9450Sstevel@tonic-gate  * mallinfo-provide information about space usage
9460Sstevel@tonic-gate  *
9470Sstevel@tonic-gate  *	input - max; mallinfo will return the size of the
9480Sstevel@tonic-gate  *		largest block < max.
9490Sstevel@tonic-gate  *
9500Sstevel@tonic-gate  *	output - a structure containing a description of
9510Sstevel@tonic-gate  *		 of space usage, defined in malloc.h
9520Sstevel@tonic-gate  */
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate struct mallinfo
955*2658Sraf mallinfo(void)
9560Sstevel@tonic-gate {
9570Sstevel@tonic-gate 	struct header *blk, *next;	/* ptr to ordinary blocks */
9580Sstevel@tonic-gate 	struct holdblk *hblk;		/* ptr to holding blocks */
9590Sstevel@tonic-gate 	struct mallinfo inf;		/* return value */
9600Sstevel@tonic-gate 	int	i;			/* the ubiquitous counter */
9610Sstevel@tonic-gate 	ssize_t size;			/* size of a block */
9620Sstevel@tonic-gate 	ssize_t fsp;			/* free space in 1 hold block */
9630Sstevel@tonic-gate 
9640Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
9650Sstevel@tonic-gate 	(void) memset(&inf, 0, sizeof (struct mallinfo));
9660Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
9670Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
9680Sstevel@tonic-gate 		return (inf);
9690Sstevel@tonic-gate 	}
9700Sstevel@tonic-gate 	blk = CLRBUSY(arena[1].nextblk);
9710Sstevel@tonic-gate 	/* return total space used */
9720Sstevel@tonic-gate 	inf.arena = (char *)arenaend - (char *)blk;
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate 	/*
9750Sstevel@tonic-gate 	 * loop through arena, counting # of blocks, and
9760Sstevel@tonic-gate 	 * and space used by blocks
9770Sstevel@tonic-gate 	 */
9780Sstevel@tonic-gate 	next = CLRBUSY(blk->nextblk);
9790Sstevel@tonic-gate 	while (next != &(arena[1])) {
9800Sstevel@tonic-gate 		inf.ordblks++;
9810Sstevel@tonic-gate 		size = (char *)next - (char *)blk;
9820Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk)) {
9830Sstevel@tonic-gate 			inf.uordblks += size;
9840Sstevel@tonic-gate 			inf.keepcost += HEADSZ-MINHEAD;
9850Sstevel@tonic-gate 		} else {
9860Sstevel@tonic-gate 			inf.fordblks += size;
9870Sstevel@tonic-gate 		}
9880Sstevel@tonic-gate 		blk = next;
9890Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
9900Sstevel@tonic-gate 	}
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 	/*
9930Sstevel@tonic-gate 	 * if any holding block have been allocated
9940Sstevel@tonic-gate 	 * then examine space in holding blks
9950Sstevel@tonic-gate 	 */
9960Sstevel@tonic-gate 	if (change && holdhead != NULL) {
9970Sstevel@tonic-gate 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
9980Sstevel@tonic-gate 			hblk = holdhead[i];
9990Sstevel@tonic-gate 			/* do only if chain not empty */
10000Sstevel@tonic-gate 			if (hblk != HGROUND) {
10010Sstevel@tonic-gate 				size = hblk->blksz +
10020Sstevel@tonic-gate 				    sizeof (struct lblk) - sizeof (int);
10030Sstevel@tonic-gate 				do {	/* loop thru 1 hold blk chain */
10040Sstevel@tonic-gate 					inf.hblks++;
10050Sstevel@tonic-gate 					fsp = freespace(hblk);
10060Sstevel@tonic-gate 					inf.fsmblks += fsp;
10070Sstevel@tonic-gate 					inf.usmblks += numlblks*size - fsp;
10080Sstevel@tonic-gate 					inf.smblks += numlblks;
10090Sstevel@tonic-gate 					hblk = hblk->nexthblk;
10100Sstevel@tonic-gate 				} while (hblk != holdhead[i]);
10110Sstevel@tonic-gate 			}
10120Sstevel@tonic-gate 		}
10130Sstevel@tonic-gate 	}
10140Sstevel@tonic-gate 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
10150Sstevel@tonic-gate 	/* holding block were counted in ordblks, so subtract off */
10160Sstevel@tonic-gate 	inf.ordblks -= inf.hblks;
10170Sstevel@tonic-gate 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
10180Sstevel@tonic-gate 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
10190Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
10200Sstevel@tonic-gate 	return (inf);
10210Sstevel@tonic-gate }
10220Sstevel@tonic-gate 
10230Sstevel@tonic-gate 
10240Sstevel@tonic-gate /*
10250Sstevel@tonic-gate  * freespace - calc. how much space is used in the free
10260Sstevel@tonic-gate  *		    small blocks in a given holding block
10270Sstevel@tonic-gate  *
10280Sstevel@tonic-gate  *	input - hblk = given holding block
10290Sstevel@tonic-gate  *
10300Sstevel@tonic-gate  *	returns space used in free small blocks of hblk
10310Sstevel@tonic-gate  */
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate static ssize_t
10340Sstevel@tonic-gate freespace(struct holdblk *holdblk)
10350Sstevel@tonic-gate {
10360Sstevel@tonic-gate 	struct lblk *lblk;
10370Sstevel@tonic-gate 	ssize_t space = 0;
10380Sstevel@tonic-gate 	ssize_t size;
10390Sstevel@tonic-gate 	struct lblk *unused;
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate 	lblk = CLRSMAL(holdblk->lfreeq);
10420Sstevel@tonic-gate 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
10430Sstevel@tonic-gate 	unused = CLRSMAL(holdblk->unused);
10440Sstevel@tonic-gate 	/* follow free chain */
10450Sstevel@tonic-gate 	while ((lblk != LGROUND) && (lblk != unused)) {
10460Sstevel@tonic-gate 		space += size;
10470Sstevel@tonic-gate 		lblk = CLRSMAL(lblk->header.nextfree);
10480Sstevel@tonic-gate 	}
10490Sstevel@tonic-gate 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
10500Sstevel@tonic-gate 	return (space);
10510Sstevel@tonic-gate }
10520Sstevel@tonic-gate 
10530Sstevel@tonic-gate static void *
10540Sstevel@tonic-gate morecore(size_t bytes)
10550Sstevel@tonic-gate {
10560Sstevel@tonic-gate 	void * ret;
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	if (bytes > LONG_MAX) {
10590Sstevel@tonic-gate 		intptr_t wad;
10600Sstevel@tonic-gate 		/*
10610Sstevel@tonic-gate 		 * The request size is too big. We need to do this in
10620Sstevel@tonic-gate 		 * chunks. Sbrk only takes an int for an arg.
10630Sstevel@tonic-gate 		 */
10640Sstevel@tonic-gate 		if (bytes == ULONG_MAX)
10650Sstevel@tonic-gate 			return ((void *)-1);
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate 		ret = sbrk(0);
10680Sstevel@tonic-gate 		wad = LONG_MAX;
10690Sstevel@tonic-gate 		while (wad > 0) {
10700Sstevel@tonic-gate 			if (sbrk(wad) == (void *)-1) {
10710Sstevel@tonic-gate 				if (ret != sbrk(0))
10720Sstevel@tonic-gate 					(void) sbrk(-LONG_MAX);
10730Sstevel@tonic-gate 				return ((void *)-1);
10740Sstevel@tonic-gate 			}
10750Sstevel@tonic-gate 			bytes -= LONG_MAX;
10760Sstevel@tonic-gate 			wad = bytes;
10770Sstevel@tonic-gate 		}
10780Sstevel@tonic-gate 	} else
10790Sstevel@tonic-gate 		ret = sbrk(bytes);
10800Sstevel@tonic-gate 
10810Sstevel@tonic-gate 	return (ret);
10820Sstevel@tonic-gate }
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate #ifdef debug
10850Sstevel@tonic-gate int
10860Sstevel@tonic-gate check_arena(void)
10870Sstevel@tonic-gate {
10880Sstevel@tonic-gate 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
10910Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
10920Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
10930Sstevel@tonic-gate 		return (-1);
10940Sstevel@tonic-gate 	}
10950Sstevel@tonic-gate 	blk = arena + 1;
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 	/* loop through arena, checking */
10980Sstevel@tonic-gate 	blk = (struct header *)CLRALL(blk->nextblk);
10990Sstevel@tonic-gate 	next = (struct header *)CLRALL(blk->nextblk);
11000Sstevel@tonic-gate 	while (next != arena + 1) {
11010Sstevel@tonic-gate 		assert(blk >= arena + 1);
11020Sstevel@tonic-gate 		assert(blk <= lastblk);
11030Sstevel@tonic-gate 		assert(next >= blk + 1);
11040Sstevel@tonic-gate 		assert(((uintptr_t)((struct header *)blk->nextblk) &
11050Sstevel@tonic-gate 		    (4 | SMAL)) == 0);
11060Sstevel@tonic-gate 
11070Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk) == 0) {
11080Sstevel@tonic-gate 			assert(blk->nextfree >= freeptr);
11090Sstevel@tonic-gate 			assert(blk->prevfree >= freeptr);
11100Sstevel@tonic-gate 			assert(blk->nextfree <= lastblk);
11110Sstevel@tonic-gate 			assert(blk->prevfree <= lastblk);
11120Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->nextfree) &
11130Sstevel@tonic-gate 			    7) == 0);
11140Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->prevfree) &
11150Sstevel@tonic-gate 			    7) == 0 || blk->prevfree == freeptr);
11160Sstevel@tonic-gate 		}
11170Sstevel@tonic-gate 		blk = next;
11180Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
11190Sstevel@tonic-gate 	}
11200Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
11210Sstevel@tonic-gate 	return (0);
11220Sstevel@tonic-gate }
11230Sstevel@tonic-gate 
11240Sstevel@tonic-gate #define	RSTALLOC	1
11250Sstevel@tonic-gate #endif
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate #ifdef RSTALLOC
11280Sstevel@tonic-gate /*
11290Sstevel@tonic-gate  * rstalloc - reset alloc routines
11300Sstevel@tonic-gate  *
11310Sstevel@tonic-gate  *	description -	return allocated memory and reset
11320Sstevel@tonic-gate  *			allocation pointers.
11330Sstevel@tonic-gate  *
11340Sstevel@tonic-gate  *	Warning - This is for debugging purposes only.
11350Sstevel@tonic-gate  *		  It will return all memory allocated after
11360Sstevel@tonic-gate  *		  the first call to malloc, even if some
11370Sstevel@tonic-gate  *		  of it was fetched by a user's sbrk().
11380Sstevel@tonic-gate  */
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate void
11410Sstevel@tonic-gate rstalloc(void)
11420Sstevel@tonic-gate {
11430Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
11440Sstevel@tonic-gate 	minhead = MINHEAD;
11450Sstevel@tonic-gate 	grain = ALIGNSZ;
11460Sstevel@tonic-gate 	numlblks = NUMLBLKS;
11470Sstevel@tonic-gate 	fastct = FASTCT;
11480Sstevel@tonic-gate 	maxfast = MAXFAST;
11490Sstevel@tonic-gate 	change = 0;
11500Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
11510Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
11520Sstevel@tonic-gate 		return;
11530Sstevel@tonic-gate 	}
11540Sstevel@tonic-gate 	brk(CLRBUSY(arena[1].nextblk));
11550Sstevel@tonic-gate 	freeptr[0].nextfree = GROUND;
11560Sstevel@tonic-gate #ifdef debug
11570Sstevel@tonic-gate 	case1count = 0;
11580Sstevel@tonic-gate #endif
11590Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
11600Sstevel@tonic-gate }
11610Sstevel@tonic-gate #endif	/* RSTALLOC */
11620Sstevel@tonic-gate 
11630Sstevel@tonic-gate /*
11640Sstevel@tonic-gate  * cfree is an undocumented, obsolete function
11650Sstevel@tonic-gate  */
11660Sstevel@tonic-gate 
1167*2658Sraf /* ARGSUSED1 */
11680Sstevel@tonic-gate void
1169*2658Sraf cfree(void *p, size_t num, size_t size)
11700Sstevel@tonic-gate {
11710Sstevel@tonic-gate 	free(p);
11720Sstevel@tonic-gate }
1173