xref: /onnv-gate/usr/src/lib/watchmalloc/common/malloc.c (revision 6812:febeba71273d)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52658Sraf  * Common Development and Distribution License (the "License").
62658Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
2135Scraigm 
220Sstevel@tonic-gate /*
23*6812Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
2435Scraigm  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*	Copyright (c) 1988 AT&T	*/
280Sstevel@tonic-gate /*	  All Rights Reserved	*/
290Sstevel@tonic-gate 
3035Scraigm #pragma ident	"%Z%%M%	%I%	%E% SMI"
310Sstevel@tonic-gate 
320Sstevel@tonic-gate /*
330Sstevel@tonic-gate  *	Memory management: malloc(), realloc(), free(), memalign().
340Sstevel@tonic-gate  *
350Sstevel@tonic-gate  *	The following #-parameters may be redefined:
360Sstevel@tonic-gate  *	GETCORE: a function to get more core memory.
370Sstevel@tonic-gate  *		GETCORE(0) is assumed to return the next available
380Sstevel@tonic-gate  *		address. Default is 'sbrk'.
390Sstevel@tonic-gate  *	ERRCORE: the error code as returned by GETCORE.
400Sstevel@tonic-gate  *		Default is ((char *)(-1)).
410Sstevel@tonic-gate  *	CORESIZE: a desired unit (measured in bytes) to be used
420Sstevel@tonic-gate  *		with GETCORE. Default is (1024*ALIGN).
430Sstevel@tonic-gate  *
440Sstevel@tonic-gate  *	This algorithm is based on a best fit strategy with lists of
450Sstevel@tonic-gate  *	free elts maintained in a self-adjusting binary tree. Each list
460Sstevel@tonic-gate  *	contains all elts of the same size. The tree is ordered by size.
470Sstevel@tonic-gate  *	For results on self-adjusting trees, see the paper:
480Sstevel@tonic-gate  *		Self-Adjusting Binary Trees,
490Sstevel@tonic-gate  *		DD Sleator & RE Tarjan, JACM 1985.
500Sstevel@tonic-gate  *
510Sstevel@tonic-gate  *	The header of a block contains the size of the data part in bytes.
520Sstevel@tonic-gate  *	Since the size of a block is 0%4, the low two bits of the header
530Sstevel@tonic-gate  *	are free and used as follows:
540Sstevel@tonic-gate  *
550Sstevel@tonic-gate  *		BIT0:	1 for busy (block is in use), 0 for free.
560Sstevel@tonic-gate  *		BIT1:	if the block is busy, this bit is 1 if the
570Sstevel@tonic-gate  *			preceding block in contiguous memory is free.
580Sstevel@tonic-gate  *			Otherwise, it is always 0.
590Sstevel@tonic-gate  */
600Sstevel@tonic-gate 
610Sstevel@tonic-gate #include "mallint.h"
620Sstevel@tonic-gate 
630Sstevel@tonic-gate static	mutex_t	__watch_malloc_lock = DEFAULTMUTEX;
640Sstevel@tonic-gate 
650Sstevel@tonic-gate static	TREE	*Root;		/* root of the free tree */
660Sstevel@tonic-gate static	TREE	*Bottom;	/* the last free chunk in the arena */
670Sstevel@tonic-gate static	char	*Baddr;		/* current high address of the arena */
680Sstevel@tonic-gate 
690Sstevel@tonic-gate static	void	t_delete(TREE *);
700Sstevel@tonic-gate static	void	t_splay(TREE *);
710Sstevel@tonic-gate static	void	realfree(void *);
720Sstevel@tonic-gate static	void	*malloc_unlocked(size_t);
730Sstevel@tonic-gate static	void	free_unlocked(void *);
740Sstevel@tonic-gate static	TREE	*morecore(size_t);
750Sstevel@tonic-gate 
760Sstevel@tonic-gate static	void	protect(TREE *);
770Sstevel@tonic-gate static	void	unprotect(TREE *);
780Sstevel@tonic-gate 
790Sstevel@tonic-gate #define	FREEPAT	0
800Sstevel@tonic-gate #define	LIVEPAT	1
810Sstevel@tonic-gate 
820Sstevel@tonic-gate /*
830Sstevel@tonic-gate  * Patterns to be copied into freed blocks and allocated blocks.
840Sstevel@tonic-gate  * 0xfeedbeef and 0xfeedface are invalid pointer values in all programs.
850Sstevel@tonic-gate  */
860Sstevel@tonic-gate static	uint64_t	patterns[2] = {
8735Scraigm 	0xdeadbeefdeadbeefULL,	/* pattern in a freed block */
8835Scraigm 	0xbaddcafebaddcafeULL	/* pattern in an allocated block */
890Sstevel@tonic-gate };
900Sstevel@tonic-gate 
910Sstevel@tonic-gate static void
copy_pattern(int pat,TREE * tp)920Sstevel@tonic-gate copy_pattern(int pat, TREE *tp)
930Sstevel@tonic-gate {
940Sstevel@tonic-gate 	uint64_t pattern = patterns[pat];
950Sstevel@tonic-gate 	size_t sz = SIZE(tp) / sizeof (uint64_t);
960Sstevel@tonic-gate 	/* LINTED improper alignment */
970Sstevel@tonic-gate 	uint64_t *datap = (uint64_t *)DATA(tp);
980Sstevel@tonic-gate 
990Sstevel@tonic-gate 	while (sz--)
1000Sstevel@tonic-gate 		*datap++ = pattern;
1010Sstevel@tonic-gate }
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
1040Sstevel@tonic-gate  * Keep lists of small blocks, LIFO order.
1050Sstevel@tonic-gate  */
1060Sstevel@tonic-gate static	TREE	*List[MINSIZE/WORDSIZE-1];
1070Sstevel@tonic-gate static	TREE	*Last[MINSIZE/WORDSIZE-1];
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate /* number of blocks to get at one time */
1100Sstevel@tonic-gate #define	NPS	(WORDSIZE*8)
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate static void *
smalloc(size_t size)1130Sstevel@tonic-gate smalloc(size_t size)
1140Sstevel@tonic-gate {
1150Sstevel@tonic-gate 	TREE	*tp;
1160Sstevel@tonic-gate 	size_t	i;
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate 	ASSERT(size % WORDSIZE == 0);
1190Sstevel@tonic-gate 	/* want to return a unique pointer on malloc(0) */
1200Sstevel@tonic-gate 	if (size == 0)
1210Sstevel@tonic-gate 		size = WORDSIZE;
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	/* list to use */
1240Sstevel@tonic-gate 	i = size / WORDSIZE - 1;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 	if (List[i] == NULL) {
1270Sstevel@tonic-gate 		TREE	*np;
1280Sstevel@tonic-gate 		int	n;
1290Sstevel@tonic-gate 		ASSERT((size + WORDSIZE) * NPS >= MINSIZE);
1300Sstevel@tonic-gate 
1310Sstevel@tonic-gate 		/* get NPS of these block types */
1320Sstevel@tonic-gate 		if ((np = malloc_unlocked((size + WORDSIZE)*NPS)) == NULL)
1330Sstevel@tonic-gate 			return (NULL);
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate 		/* make them into a link list */
1360Sstevel@tonic-gate 		for (n = 0, List[i] = np; n < NPS; ++n) {
1370Sstevel@tonic-gate 			tp = np;
1380Sstevel@tonic-gate 			SIZE(tp) = size;
1390Sstevel@tonic-gate 			copy_pattern(FREEPAT, tp);
1400Sstevel@tonic-gate 			if (n == NPS - 1) {
1410Sstevel@tonic-gate 				Last[i] = tp;
1420Sstevel@tonic-gate 				np = NULL;
1430Sstevel@tonic-gate 			} else {
1440Sstevel@tonic-gate 				/* LINTED improper alignment */
1450Sstevel@tonic-gate 				np = NEXT(tp);
1460Sstevel@tonic-gate 			}
1470Sstevel@tonic-gate 			AFTER(tp) = np;
1480Sstevel@tonic-gate 			protect(tp);
1490Sstevel@tonic-gate 		}
1500Sstevel@tonic-gate 	}
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate 	/* allocate from the head of the queue */
1530Sstevel@tonic-gate 	tp = List[i];
1540Sstevel@tonic-gate 	unprotect(tp);
1550Sstevel@tonic-gate 	if ((List[i] = AFTER(tp)) == NULL)
1560Sstevel@tonic-gate 		Last[i] = NULL;
1570Sstevel@tonic-gate 	copy_pattern(LIVEPAT, tp);
1580Sstevel@tonic-gate 	SETBIT0(SIZE(tp));
1590Sstevel@tonic-gate 	protect(tp);
1600Sstevel@tonic-gate 	return (DATA(tp));
1610Sstevel@tonic-gate }
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate void *
malloc(size_t size)1640Sstevel@tonic-gate malloc(size_t size)
1650Sstevel@tonic-gate {
1660Sstevel@tonic-gate 	void	*ret;
1673866Sraf 	(void) mutex_lock(&__watch_malloc_lock);
1680Sstevel@tonic-gate 	ret = malloc_unlocked(size);
1693866Sraf 	(void) mutex_unlock(&__watch_malloc_lock);
1700Sstevel@tonic-gate 	return (ret);
1710Sstevel@tonic-gate }
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate static void *
malloc_unlocked(size_t size)1740Sstevel@tonic-gate malloc_unlocked(size_t size)
1750Sstevel@tonic-gate {
1760Sstevel@tonic-gate 	size_t	n;
1770Sstevel@tonic-gate 	TREE	*tp, *sp, *tmp;
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	COUNT(nmalloc);
1800Sstevel@tonic-gate 	ASSERT(WORDSIZE == ALIGN);
1810Sstevel@tonic-gate 
1820Sstevel@tonic-gate 	/* check for size that could overflow calculations */
1830Sstevel@tonic-gate 	if (size > MAX_MALLOC) {
1840Sstevel@tonic-gate 		errno = ENOMEM;
1850Sstevel@tonic-gate 		return (NULL);
1860Sstevel@tonic-gate 	}
1870Sstevel@tonic-gate 	/* make sure that size is 0 mod ALIGN */
1880Sstevel@tonic-gate 	ROUND(size);
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	/* small blocks */
1910Sstevel@tonic-gate 	if (size < MINSIZE)
1920Sstevel@tonic-gate 		return (smalloc(size));
1930Sstevel@tonic-gate 
1940Sstevel@tonic-gate 	/* search for an elt of the right size */
1950Sstevel@tonic-gate 	sp = NULL;
1960Sstevel@tonic-gate 	n = 0;
1970Sstevel@tonic-gate 	if (Root) {
1980Sstevel@tonic-gate 		tp = Root;
1990Sstevel@tonic-gate 		for (;;) {
2000Sstevel@tonic-gate 			unprotect(tp);
2010Sstevel@tonic-gate 			if (SIZE(tp) >= size) {	/* branch left */
2020Sstevel@tonic-gate 				if (n == 0 || n >= SIZE(tp)) {
2030Sstevel@tonic-gate 					sp = tp;
2040Sstevel@tonic-gate 					n = SIZE(tp);
2050Sstevel@tonic-gate 				}
2060Sstevel@tonic-gate 				if ((tmp = LEFT(tp)) != NULL) {
2070Sstevel@tonic-gate 					protect(tp);
2080Sstevel@tonic-gate 					tp = tmp;
2090Sstevel@tonic-gate 				} else {
2100Sstevel@tonic-gate 					protect(tp);
2110Sstevel@tonic-gate 					break;
2120Sstevel@tonic-gate 				}
2130Sstevel@tonic-gate 			} else {		/* branch right */
2140Sstevel@tonic-gate 				if ((tmp = RIGHT(tp)) != NULL) {
2150Sstevel@tonic-gate 					protect(tp);
2160Sstevel@tonic-gate 					tp = tmp;
2170Sstevel@tonic-gate 				} else {
2180Sstevel@tonic-gate 					protect(tp);
2190Sstevel@tonic-gate 					break;
2200Sstevel@tonic-gate 				}
2210Sstevel@tonic-gate 			}
2220Sstevel@tonic-gate 		}
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 		if (sp) {
2250Sstevel@tonic-gate 			unprotect(sp);
2260Sstevel@tonic-gate 			t_delete(sp);
2270Sstevel@tonic-gate 		} else if (tp != Root) {
2280Sstevel@tonic-gate 			/* make the searched-to element the root */
2290Sstevel@tonic-gate 			unprotect(tp);
2300Sstevel@tonic-gate 			t_splay(tp);
2310Sstevel@tonic-gate 			protect(tp);
2320Sstevel@tonic-gate 			Root = tp;
2330Sstevel@tonic-gate 		}
2340Sstevel@tonic-gate 	}
2350Sstevel@tonic-gate 
2360Sstevel@tonic-gate 	/* if found none fitted in the tree */
2370Sstevel@tonic-gate 	if (sp == NULL) {
2380Sstevel@tonic-gate 		if (Bottom) {
2390Sstevel@tonic-gate 			unprotect(Bottom);
2400Sstevel@tonic-gate 			if (size <= SIZE(Bottom)) {
2410Sstevel@tonic-gate 				sp = Bottom;
2420Sstevel@tonic-gate 				CLRBITS01(SIZE(sp));
2430Sstevel@tonic-gate 			} else {
2440Sstevel@tonic-gate 				protect(Bottom);
2450Sstevel@tonic-gate 				if ((sp = morecore(size)) == NULL)
2460Sstevel@tonic-gate 					return (NULL);
2470Sstevel@tonic-gate 			}
2480Sstevel@tonic-gate 		} else {
2490Sstevel@tonic-gate 			if ((sp = morecore(size)) == NULL)
2500Sstevel@tonic-gate 				return (NULL);
2510Sstevel@tonic-gate 		}
2520Sstevel@tonic-gate 	}
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 	/* tell the forward neighbor that we're busy */
2550Sstevel@tonic-gate 	/* LINTED improper alignment */
2560Sstevel@tonic-gate 	tmp = NEXT(sp);
2570Sstevel@tonic-gate 	unprotect(tmp);
2580Sstevel@tonic-gate 	CLRBIT1(SIZE(tmp));
2590Sstevel@tonic-gate 	ASSERT(ISBIT0(SIZE(tmp)));
2600Sstevel@tonic-gate 	protect(tmp);
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate leftover:
2630Sstevel@tonic-gate 	/* if the leftover is enough for a new free piece */
2640Sstevel@tonic-gate 	if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) {
2650Sstevel@tonic-gate 		n -= WORDSIZE;
2660Sstevel@tonic-gate 		SIZE(sp) = size;
2670Sstevel@tonic-gate 		/* LINTED improper alignment */
2680Sstevel@tonic-gate 		tp = NEXT(sp);
2690Sstevel@tonic-gate 		SIZE(tp) = n | BIT0;
2700Sstevel@tonic-gate 		realfree(DATA(tp));
2710Sstevel@tonic-gate 	} else if (BOTTOM(sp))
2720Sstevel@tonic-gate 		Bottom = NULL;
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	/* return the allocated space */
2750Sstevel@tonic-gate 	copy_pattern(LIVEPAT, sp);
2760Sstevel@tonic-gate 	SIZE(sp) |= BIT0;
2770Sstevel@tonic-gate 	protect(sp);
2780Sstevel@tonic-gate 	return (DATA(sp));
2790Sstevel@tonic-gate }
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate /*
2820Sstevel@tonic-gate  *	realloc().
2830Sstevel@tonic-gate  *	If the block size is increasing, we try forward merging first.
2840Sstevel@tonic-gate  *	This is not best-fit but it avoids some data recopying.
2850Sstevel@tonic-gate  */
2860Sstevel@tonic-gate void *
realloc(void * old,size_t size)2870Sstevel@tonic-gate realloc(void *old, size_t size)
2880Sstevel@tonic-gate {
2890Sstevel@tonic-gate 	TREE	*tp, *np;
2900Sstevel@tonic-gate 	size_t	ts;
2910Sstevel@tonic-gate 	char	*new;
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 	COUNT(nrealloc);
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate 	/* check for size that could overflow calculations */
2960Sstevel@tonic-gate 	if (size > MAX_MALLOC) {
2970Sstevel@tonic-gate 		errno = ENOMEM;
2980Sstevel@tonic-gate 		return (NULL);
2990Sstevel@tonic-gate 	}
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 	/* pointer to the block */
3023866Sraf 	(void) mutex_lock(&__watch_malloc_lock);
3030Sstevel@tonic-gate 	if (old == NULL) {
3040Sstevel@tonic-gate 		new = malloc_unlocked(size);
3053866Sraf 		(void) mutex_unlock(&__watch_malloc_lock);
3060Sstevel@tonic-gate 		return (new);
3070Sstevel@tonic-gate 	}
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 	/* make sure that size is 0 mod ALIGN */
3100Sstevel@tonic-gate 	ROUND(size);
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 	/* LINTED improper alignment */
3130Sstevel@tonic-gate 	tp = BLOCK(old);
3140Sstevel@tonic-gate 	unprotect(tp);
3150Sstevel@tonic-gate 	ts = SIZE(tp);
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate 	/* if the block was freed, data has been destroyed. */
3180Sstevel@tonic-gate 	if (!ISBIT0(ts)) {
3190Sstevel@tonic-gate 		/* XXX; complain here! */
3200Sstevel@tonic-gate 		protect(tp);
3213866Sraf 		(void) mutex_unlock(&__watch_malloc_lock);
3220Sstevel@tonic-gate 		errno = EINVAL;
3230Sstevel@tonic-gate 		return (NULL);
3240Sstevel@tonic-gate 	}
3250Sstevel@tonic-gate 
3260Sstevel@tonic-gate 	CLRBITS01(SIZE(tp));
3270Sstevel@tonic-gate 	if (size == SIZE(tp)) {	/* nothing to do */
3280Sstevel@tonic-gate 		SIZE(tp) = ts;
3290Sstevel@tonic-gate 		protect(tp);
3303866Sraf 		(void) mutex_unlock(&__watch_malloc_lock);
3310Sstevel@tonic-gate 		return (old);
3320Sstevel@tonic-gate 	}
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	/* special cases involving small blocks */
3350Sstevel@tonic-gate 	if (size < MINSIZE || SIZE(tp) < MINSIZE) {
3360Sstevel@tonic-gate 		if (size == 0) {
3370Sstevel@tonic-gate 			SETOLD01(SIZE(tp), ts);
3380Sstevel@tonic-gate 			free_unlocked(old);
3393866Sraf 			(void) mutex_unlock(&__watch_malloc_lock);
3400Sstevel@tonic-gate 			return (NULL);
3410Sstevel@tonic-gate 		}
3420Sstevel@tonic-gate 		goto call_malloc;
3430Sstevel@tonic-gate 	}
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	/* block is increasing in size, try merging the next block */
3460Sstevel@tonic-gate 	if (size > SIZE(tp)) {
3470Sstevel@tonic-gate 		/* LINTED improper alignment */
3480Sstevel@tonic-gate 		np = NEXT(tp);
3490Sstevel@tonic-gate 		unprotect(np);
3500Sstevel@tonic-gate 		if (ISBIT0(SIZE(np)))
3510Sstevel@tonic-gate 			protect(np);
3520Sstevel@tonic-gate 		else {
3530Sstevel@tonic-gate 			TREE *tmp;
3540Sstevel@tonic-gate 			ASSERT(SIZE(np) >= MINSIZE);
3550Sstevel@tonic-gate 			ASSERT(!ISBIT1(SIZE(np)));
3560Sstevel@tonic-gate 			SIZE(tp) += SIZE(np) + WORDSIZE;
3570Sstevel@tonic-gate 			if (np != Bottom)
3580Sstevel@tonic-gate 				t_delete(np);
3590Sstevel@tonic-gate 			else
3600Sstevel@tonic-gate 				Bottom = NULL;
3610Sstevel@tonic-gate 			/* LINTED improper alignment */
3620Sstevel@tonic-gate 			tmp = NEXT(np);
3630Sstevel@tonic-gate 			unprotect(tmp);
3640Sstevel@tonic-gate 			CLRBIT1(SIZE(tmp));
3650Sstevel@tonic-gate 			protect(tmp);
3660Sstevel@tonic-gate 		}
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 		/* not enough & at TRUE end of memory, try extending core */
3690Sstevel@tonic-gate 		if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) {
3700Sstevel@tonic-gate 			Bottom = tp;
3710Sstevel@tonic-gate 			protect(Bottom);
3720Sstevel@tonic-gate 			if ((tp = morecore(size)) == NULL) {
3730Sstevel@tonic-gate 				tp = Bottom;
3740Sstevel@tonic-gate 				Bottom = NULL;
3750Sstevel@tonic-gate 				unprotect(tp);
3760Sstevel@tonic-gate 			}
3770Sstevel@tonic-gate 		}
3780Sstevel@tonic-gate 	}
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 	/* got enough space to use */
3810Sstevel@tonic-gate 	if (size <= SIZE(tp)) {
3820Sstevel@tonic-gate 		size_t n;
3830Sstevel@tonic-gate chop_big:
3840Sstevel@tonic-gate 		if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) {
3850Sstevel@tonic-gate 			n -= WORDSIZE;
3860Sstevel@tonic-gate 			SIZE(tp) = size;
3870Sstevel@tonic-gate 			/* LINTED improper alignment */
3880Sstevel@tonic-gate 			np = NEXT(tp);
3890Sstevel@tonic-gate 			SIZE(np) = n | BIT0;
3900Sstevel@tonic-gate 			realfree(DATA(np));
3910Sstevel@tonic-gate 		} else if (BOTTOM(tp))
3920Sstevel@tonic-gate 			Bottom = NULL;
3930Sstevel@tonic-gate 
3940Sstevel@tonic-gate 		/* the previous block may be free */
3950Sstevel@tonic-gate 		SETOLD01(SIZE(tp), ts);
3960Sstevel@tonic-gate 		protect(tp);
3973866Sraf 		(void) mutex_unlock(&__watch_malloc_lock);
3980Sstevel@tonic-gate 		return (old);
3990Sstevel@tonic-gate 	}
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate call_malloc:	/* call malloc to get a new block */
4020Sstevel@tonic-gate 	SETOLD01(SIZE(tp), ts);
4030Sstevel@tonic-gate 	if ((new = malloc_unlocked(size)) != NULL) {
4040Sstevel@tonic-gate 		CLRBITS01(ts);
4050Sstevel@tonic-gate 		if (ts > size)
4060Sstevel@tonic-gate 			ts = size;
4070Sstevel@tonic-gate 		(void) memcpy(new, old, ts);
4080Sstevel@tonic-gate 		free_unlocked(old);
4093866Sraf 		(void) mutex_unlock(&__watch_malloc_lock);
4100Sstevel@tonic-gate 		return (new);
4110Sstevel@tonic-gate 	}
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate 	/*
4140Sstevel@tonic-gate 	 * Attempt special case recovery allocations since malloc() failed:
4150Sstevel@tonic-gate 	 *
4160Sstevel@tonic-gate 	 * 1. size <= SIZE(tp) < MINSIZE
4170Sstevel@tonic-gate 	 *	Simply return the existing block
4180Sstevel@tonic-gate 	 * 2. SIZE(tp) < size < MINSIZE
4190Sstevel@tonic-gate 	 *	malloc() may have failed to allocate the chunk of
4200Sstevel@tonic-gate 	 *	small blocks. Try asking for MINSIZE bytes.
4210Sstevel@tonic-gate 	 * 3. size < MINSIZE <= SIZE(tp)
4220Sstevel@tonic-gate 	 *	malloc() may have failed as with 2.  Change to
4230Sstevel@tonic-gate 	 *	MINSIZE allocation which is taken from the beginning
4240Sstevel@tonic-gate 	 *	of the current block.
4250Sstevel@tonic-gate 	 * 4. MINSIZE <= SIZE(tp) < size
4260Sstevel@tonic-gate 	 *	If the previous block is free and the combination of
4270Sstevel@tonic-gate 	 *	these two blocks has at least size bytes, then merge
4280Sstevel@tonic-gate 	 *	the two blocks copying the existing contents backwards.
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	CLRBITS01(SIZE(tp));
4310Sstevel@tonic-gate 	if (SIZE(tp) < MINSIZE) {
4320Sstevel@tonic-gate 		if (size < SIZE(tp))		/* case 1. */ {
4330Sstevel@tonic-gate 			SETOLD01(SIZE(tp), ts);
4340Sstevel@tonic-gate 			protect(tp);
4353866Sraf 			(void) mutex_unlock(&__watch_malloc_lock);
4360Sstevel@tonic-gate 			return (old);
4370Sstevel@tonic-gate 		} else if (size < MINSIZE)	/* case 2. */ {
4380Sstevel@tonic-gate 			size = MINSIZE;
4390Sstevel@tonic-gate 			goto call_malloc;
4400Sstevel@tonic-gate 		}
4410Sstevel@tonic-gate 	} else if (size < MINSIZE)		/* case 3. */ {
4420Sstevel@tonic-gate 		size = MINSIZE;
4430Sstevel@tonic-gate 		goto chop_big;
4440Sstevel@tonic-gate 	} else if (ISBIT1(ts)) {
4450Sstevel@tonic-gate 		np = LAST(tp);
4460Sstevel@tonic-gate 		unprotect(np);
4470Sstevel@tonic-gate 		if ((SIZE(np) + SIZE(tp) + WORDSIZE) >= size) {
4480Sstevel@tonic-gate 			ASSERT(!ISBIT0(SIZE(np)));
4490Sstevel@tonic-gate 			t_delete(np);
4500Sstevel@tonic-gate 			SIZE(np) += SIZE(tp) + WORDSIZE;
4510Sstevel@tonic-gate 			/*
4520Sstevel@tonic-gate 			 * Since the copy may overlap, use memmove().
4530Sstevel@tonic-gate 			 */
4540Sstevel@tonic-gate 			(void) memmove(DATA(np), old, SIZE(tp));
4550Sstevel@tonic-gate 			old = DATA(np);
4560Sstevel@tonic-gate 			tp = np;
4570Sstevel@tonic-gate 			CLRBIT1(ts);
4580Sstevel@tonic-gate 			goto chop_big;
4590Sstevel@tonic-gate 		}
4600Sstevel@tonic-gate 		protect(np);
4610Sstevel@tonic-gate 	}
4620Sstevel@tonic-gate 	SETOLD01(SIZE(tp), ts);
4630Sstevel@tonic-gate 	protect(tp);
4643866Sraf 	(void) mutex_unlock(&__watch_malloc_lock);
4650Sstevel@tonic-gate 	/* malloc() sets errno */
4660Sstevel@tonic-gate 	return (NULL);
4670Sstevel@tonic-gate }
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate /*
4700Sstevel@tonic-gate  *	realfree().
4710Sstevel@tonic-gate  *	Coalescing of adjacent free blocks is done first.
4720Sstevel@tonic-gate  *	Then, the new free block is leaf-inserted into the free tree
4730Sstevel@tonic-gate  *	without splaying. This strategy does not guarantee the amortized
4740Sstevel@tonic-gate  *	O(nlogn) behaviour for the insert/delete/find set of operations
4750Sstevel@tonic-gate  *	on the tree. In practice, however, free is much more infrequent
4760Sstevel@tonic-gate  *	than malloc/realloc and the tree searches performed by these
4770Sstevel@tonic-gate  *	functions adequately keep the tree in balance.
4780Sstevel@tonic-gate  */
4790Sstevel@tonic-gate static void
realfree(void * old)4800Sstevel@tonic-gate realfree(void *old)
4810Sstevel@tonic-gate {
4820Sstevel@tonic-gate 	TREE	*tp, *sp, *np, *tmp;
4830Sstevel@tonic-gate 	size_t	ts, size;
4840Sstevel@tonic-gate 
4850Sstevel@tonic-gate 	COUNT(nfree);
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate 	/* pointer to the block */
4880Sstevel@tonic-gate 	/* LINTED improper alignment */
4890Sstevel@tonic-gate 	tp = BLOCK(old);
4900Sstevel@tonic-gate 	unprotect(tp);
4910Sstevel@tonic-gate 	ts = SIZE(tp);
4920Sstevel@tonic-gate 	if (!ISBIT0(ts)) {	/* block is not busy; previously freed? */
4930Sstevel@tonic-gate 		protect(tp);	/* force a watchpoint trap */
4940Sstevel@tonic-gate 		CLRBIT0(SIZE(tp));
4950Sstevel@tonic-gate 		return;
4960Sstevel@tonic-gate 	}
4970Sstevel@tonic-gate 	CLRBITS01(SIZE(tp));
4980Sstevel@tonic-gate 	copy_pattern(FREEPAT, tp);
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate 	/* small block, return it to the tail of its queue */
5010Sstevel@tonic-gate 	if (SIZE(tp) < MINSIZE) {
5020Sstevel@tonic-gate 		ASSERT(SIZE(tp) / WORDSIZE >= 1);
5030Sstevel@tonic-gate 		ts = SIZE(tp) / WORDSIZE - 1;
5040Sstevel@tonic-gate 		AFTER(tp) = NULL;
5050Sstevel@tonic-gate 		protect(tp);
5060Sstevel@tonic-gate 		if (List[ts] == NULL) {
5070Sstevel@tonic-gate 			List[ts] = tp;
5080Sstevel@tonic-gate 			Last[ts] = tp;
5090Sstevel@tonic-gate 		} else {
5100Sstevel@tonic-gate 			sp = Last[ts];
5110Sstevel@tonic-gate 			unprotect(sp);
5120Sstevel@tonic-gate 			AFTER(sp) = tp;
5130Sstevel@tonic-gate 			protect(sp);
5140Sstevel@tonic-gate 			Last[ts] = tp;
5150Sstevel@tonic-gate 		}
5160Sstevel@tonic-gate 		return;
5170Sstevel@tonic-gate 	}
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate 	/* see if coalescing with next block is warranted */
5200Sstevel@tonic-gate 	/* LINTED improper alignment */
5210Sstevel@tonic-gate 	np = NEXT(tp);
5220Sstevel@tonic-gate 	unprotect(np);
5230Sstevel@tonic-gate 	if (ISBIT0(SIZE(np)))
5240Sstevel@tonic-gate 		protect(np);
5250Sstevel@tonic-gate 	else {
5260Sstevel@tonic-gate 		if (np != Bottom)
5270Sstevel@tonic-gate 			t_delete(np);
5280Sstevel@tonic-gate 		SIZE(tp) += SIZE(np) + WORDSIZE;
5290Sstevel@tonic-gate 	}
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate 	/* the same with the preceding block */
5320Sstevel@tonic-gate 	if (ISBIT1(ts)) {
5330Sstevel@tonic-gate 		np = LAST(tp);
5340Sstevel@tonic-gate 		unprotect(np);
5350Sstevel@tonic-gate 		ASSERT(!ISBIT0(SIZE(np)));
5360Sstevel@tonic-gate 		ASSERT(np != Bottom);
5370Sstevel@tonic-gate 		t_delete(np);
5380Sstevel@tonic-gate 		SIZE(np) += SIZE(tp) + WORDSIZE;
5390Sstevel@tonic-gate 		tp = np;
5400Sstevel@tonic-gate 	}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	/* initialize tree info */
5430Sstevel@tonic-gate 	PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL;
5440Sstevel@tonic-gate 
5450Sstevel@tonic-gate 	/* set bottom block, or insert in the free tree */
5460Sstevel@tonic-gate 	if (BOTTOM(tp))
5470Sstevel@tonic-gate 		Bottom = tp;
5480Sstevel@tonic-gate 	else {
5490Sstevel@tonic-gate 		/* search for the place to insert */
5500Sstevel@tonic-gate 		if (Root) {
5510Sstevel@tonic-gate 			size = SIZE(tp);
5520Sstevel@tonic-gate 			np = Root;
5530Sstevel@tonic-gate 			for (;;) {
5540Sstevel@tonic-gate 				unprotect(np);
5550Sstevel@tonic-gate 				if (SIZE(np) > size) {
5560Sstevel@tonic-gate 					if ((tmp = LEFT(np)) != NULL) {
5570Sstevel@tonic-gate 						protect(np);
5580Sstevel@tonic-gate 						np = tmp;
5590Sstevel@tonic-gate 					} else {
5600Sstevel@tonic-gate 						LEFT(np) = tp;
5610Sstevel@tonic-gate 						PARENT(tp) = np;
5620Sstevel@tonic-gate 						protect(np);
5630Sstevel@tonic-gate 						break;
5640Sstevel@tonic-gate 					}
5650Sstevel@tonic-gate 				} else if (SIZE(np) < size) {
5660Sstevel@tonic-gate 					if ((tmp = RIGHT(np)) != NULL) {
5670Sstevel@tonic-gate 						protect(np);
5680Sstevel@tonic-gate 						np = tmp;
5690Sstevel@tonic-gate 					} else {
5700Sstevel@tonic-gate 						RIGHT(np) = tp;
5710Sstevel@tonic-gate 						PARENT(tp) = np;
5720Sstevel@tonic-gate 						protect(np);
5730Sstevel@tonic-gate 						break;
5740Sstevel@tonic-gate 					}
5750Sstevel@tonic-gate 				} else {
5760Sstevel@tonic-gate 					if ((sp = PARENT(np)) != NULL) {
5770Sstevel@tonic-gate 						unprotect(sp);
5780Sstevel@tonic-gate 						if (np == LEFT(sp))
5790Sstevel@tonic-gate 							LEFT(sp) = tp;
5800Sstevel@tonic-gate 						else
5810Sstevel@tonic-gate 							RIGHT(sp) = tp;
5820Sstevel@tonic-gate 						PARENT(tp) = sp;
5830Sstevel@tonic-gate 						protect(sp);
5840Sstevel@tonic-gate 					} else
5850Sstevel@tonic-gate 						Root = tp;
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 					/* insert to head of list */
5880Sstevel@tonic-gate 					if ((sp = LEFT(np)) != NULL) {
5890Sstevel@tonic-gate 						unprotect(sp);
5900Sstevel@tonic-gate 						PARENT(sp) = tp;
5910Sstevel@tonic-gate 						protect(sp);
5920Sstevel@tonic-gate 					}
5930Sstevel@tonic-gate 					LEFT(tp) = sp;
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 					if ((sp = RIGHT(np)) != NULL) {
5960Sstevel@tonic-gate 						unprotect(sp);
5970Sstevel@tonic-gate 						PARENT(sp) = tp;
5980Sstevel@tonic-gate 						protect(sp);
5990Sstevel@tonic-gate 					}
6000Sstevel@tonic-gate 					RIGHT(tp) = sp;
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate 					/* doubly link list */
6030Sstevel@tonic-gate 					LINKFOR(tp) = np;
6040Sstevel@tonic-gate 					LINKBAK(np) = tp;
6050Sstevel@tonic-gate 					SETNOTREE(np);
6060Sstevel@tonic-gate 					protect(np);
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate 					break;
6090Sstevel@tonic-gate 				}
6100Sstevel@tonic-gate 			}
6110Sstevel@tonic-gate 		} else {
6120Sstevel@tonic-gate 			Root = tp;
6130Sstevel@tonic-gate 		}
6140Sstevel@tonic-gate 	}
6150Sstevel@tonic-gate 
6160Sstevel@tonic-gate 	/*
6170Sstevel@tonic-gate 	 * Tell next block that this one is free.
6180Sstevel@tonic-gate 	 * The first WORD of the next block contains self's address.
6190Sstevel@tonic-gate 	 */
6200Sstevel@tonic-gate 	/* LINTED improper alignment */
6210Sstevel@tonic-gate 	tmp = NEXT(tp);
6220Sstevel@tonic-gate 	unprotect(tmp);
6230Sstevel@tonic-gate 	/* LINTED improper alignment */
6240Sstevel@tonic-gate 	*(SELFP(tp)) = tp;
6250Sstevel@tonic-gate 	SETBIT1(SIZE(tmp));
6260Sstevel@tonic-gate 	ASSERT(ISBIT0(SIZE(tmp)));
6270Sstevel@tonic-gate 	protect(tmp);
6280Sstevel@tonic-gate 	protect(tp);
6290Sstevel@tonic-gate }
6300Sstevel@tonic-gate 
6310Sstevel@tonic-gate /*
6320Sstevel@tonic-gate  * Get more core. Gaps in memory are noted as busy blocks.
6330Sstevel@tonic-gate  */
6340Sstevel@tonic-gate static TREE *
morecore(size_t size)6350Sstevel@tonic-gate morecore(size_t size)
6360Sstevel@tonic-gate {
6370Sstevel@tonic-gate 	TREE	*tp;
6380Sstevel@tonic-gate 	size_t	n, offset, requestsize;
6390Sstevel@tonic-gate 	char	*addr;
6400Sstevel@tonic-gate 
6410Sstevel@tonic-gate 	/* compute new amount of memory to get */
6420Sstevel@tonic-gate 	tp = Bottom;
6430Sstevel@tonic-gate 	n = size + 2 * WORDSIZE;
6440Sstevel@tonic-gate 	addr = GETCORE(0);
6450Sstevel@tonic-gate 
6460Sstevel@tonic-gate 	if (addr == ERRCORE)
6470Sstevel@tonic-gate 		/* errno set by GETCORE sbrk */
6480Sstevel@tonic-gate 		return (NULL);
6490Sstevel@tonic-gate 
6500Sstevel@tonic-gate 	/* need to pad size out so that addr is aligned */
6510Sstevel@tonic-gate 	if ((((size_t)addr) % ALIGN) != 0)
6520Sstevel@tonic-gate 		offset = ALIGN - (size_t)addr % ALIGN;
6530Sstevel@tonic-gate 	else
6540Sstevel@tonic-gate 		offset = 0;
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate 	if (tp)
6570Sstevel@tonic-gate 		unprotect(tp);
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate 	/* if not segmented memory, what we need may be smaller */
6600Sstevel@tonic-gate 	if (addr == Baddr) {
6610Sstevel@tonic-gate 		n -= WORDSIZE;
6620Sstevel@tonic-gate 		if (tp != NULL)
6630Sstevel@tonic-gate 			n -= SIZE(tp);
6640Sstevel@tonic-gate 	}
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate 	/* get a multiple of CORESIZE */
6670Sstevel@tonic-gate 	n = ((n - 1) / CORESIZE + 1) * CORESIZE;
6680Sstevel@tonic-gate 	requestsize = n + offset;
6690Sstevel@tonic-gate 
6700Sstevel@tonic-gate 	/* check if nsize request could overflow in GETCORE */
6710Sstevel@tonic-gate 	if (requestsize > MAX_MALLOC - (size_t)addr) {
6720Sstevel@tonic-gate 		if (tp)
6730Sstevel@tonic-gate 			protect(tp);
6740Sstevel@tonic-gate 		errno = ENOMEM;
6750Sstevel@tonic-gate 		return (NULL);
6760Sstevel@tonic-gate 	}
6770Sstevel@tonic-gate 
6780Sstevel@tonic-gate 	if (requestsize > MAX_GETCORE) {
6790Sstevel@tonic-gate 		intptr_t	delta;
6800Sstevel@tonic-gate 		/*
6810Sstevel@tonic-gate 		 * the value required is too big for GETCORE() to deal with
6820Sstevel@tonic-gate 		 * in one go, so use GETCORE() at most 2 times instead.
6830Sstevel@tonic-gate 		 * Argument to GETCORE() must be multiple of ALIGN.
6840Sstevel@tonic-gate 		 * If not, GETCORE(-MAX_GETCORE) will not return brk point
6850Sstevel@tonic-gate 		 * to previous value, but will be ALIGN more.
6860Sstevel@tonic-gate 		 * This would leave a small hole.
6870Sstevel@tonic-gate 		 */
6880Sstevel@tonic-gate 		delta = MAX_GETCORE;
6890Sstevel@tonic-gate 		while (delta > 0) {
6900Sstevel@tonic-gate 			if (GETCORE(delta) == ERRCORE) {
6910Sstevel@tonic-gate 				if (tp)
6920Sstevel@tonic-gate 					protect(tp);
6930Sstevel@tonic-gate 				if (addr != GETCORE(0))
6940Sstevel@tonic-gate 					(void) GETCORE(-MAX_GETCORE);
6950Sstevel@tonic-gate 				return (NULL);
6960Sstevel@tonic-gate 			}
6970Sstevel@tonic-gate 			requestsize -= MAX_GETCORE;
6980Sstevel@tonic-gate 			delta = requestsize;
6990Sstevel@tonic-gate 		}
7000Sstevel@tonic-gate 	} else if (GETCORE(requestsize) == ERRCORE) {
7010Sstevel@tonic-gate 		if (tp)
7020Sstevel@tonic-gate 			protect(tp);
7030Sstevel@tonic-gate 		return (NULL);
7040Sstevel@tonic-gate 	}
7050Sstevel@tonic-gate 
7060Sstevel@tonic-gate 	/* contiguous memory */
7070Sstevel@tonic-gate 	if (addr == Baddr) {
7080Sstevel@tonic-gate 		ASSERT(offset == 0);
7090Sstevel@tonic-gate 		if (tp) {
7100Sstevel@tonic-gate 			addr = ((char *)tp);
7110Sstevel@tonic-gate 			n += SIZE(tp) + 2 * WORDSIZE;
7120Sstevel@tonic-gate 		} else {
7130Sstevel@tonic-gate 			addr = Baddr - WORDSIZE;
7140Sstevel@tonic-gate 			n += WORDSIZE;
7150Sstevel@tonic-gate 		}
7160Sstevel@tonic-gate 	} else {
7170Sstevel@tonic-gate 		addr += offset;
7180Sstevel@tonic-gate 	}
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 	/* new bottom address */
7210Sstevel@tonic-gate 	Baddr = addr + n;
7220Sstevel@tonic-gate 
7230Sstevel@tonic-gate 	/* new bottom block */
7240Sstevel@tonic-gate 	/* LINTED improper alignment */
7250Sstevel@tonic-gate 	tp = ((TREE *)addr);
7260Sstevel@tonic-gate 	SIZE(tp) = n - 2 * WORDSIZE;
7270Sstevel@tonic-gate 	ASSERT((SIZE(tp) % ALIGN) == 0);
7280Sstevel@tonic-gate 
7290Sstevel@tonic-gate 	/* reserved the last word to head any noncontiguous memory */
7300Sstevel@tonic-gate 	/* LINTED improper alignment */
7310Sstevel@tonic-gate 	SETBIT0(SIZE(NEXT(tp)));
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 	/* non-contiguous memory, free old bottom block */
7340Sstevel@tonic-gate 	if (Bottom && Bottom != tp) {
7350Sstevel@tonic-gate 		SETBIT0(SIZE(Bottom));
7360Sstevel@tonic-gate 		realfree(DATA(Bottom));
7370Sstevel@tonic-gate 	}
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate 	return (tp);
7400Sstevel@tonic-gate }
7410Sstevel@tonic-gate 
7420Sstevel@tonic-gate /*
7430Sstevel@tonic-gate  * Utility function to avoid protecting a tree node twice.
7440Sstevel@tonic-gate  * Return true if tp is in the NULL-terminated array of tree nodes.
7450Sstevel@tonic-gate  */
7460Sstevel@tonic-gate static int
in_list(TREE * tp,TREE ** npp)7470Sstevel@tonic-gate in_list(TREE *tp, TREE **npp)
7480Sstevel@tonic-gate {
7490Sstevel@tonic-gate 	TREE *sp;
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate 	while ((sp = *npp++) != NULL)
7520Sstevel@tonic-gate 		if (tp == sp)
7530Sstevel@tonic-gate 			return (1);
7540Sstevel@tonic-gate 	return (0);
7550Sstevel@tonic-gate }
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate /*
7580Sstevel@tonic-gate  * Tree rotation functions (BU: bottom-up, TD: top-down).
7590Sstevel@tonic-gate  * All functions are entered with the arguments unprotected.
7600Sstevel@tonic-gate  * They must return in the same condition, with all other elements
7610Sstevel@tonic-gate  * that have been unprotected during the operation re-protected.
7620Sstevel@tonic-gate  */
7630Sstevel@tonic-gate static void
LEFT1(TREE * x,TREE * y)7640Sstevel@tonic-gate LEFT1(TREE *x, TREE *y)
7650Sstevel@tonic-gate {
7660Sstevel@tonic-gate 	TREE *node[3];
7670Sstevel@tonic-gate 	TREE **npp = node;
7680Sstevel@tonic-gate 	TREE *tp;
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate 	if ((RIGHT(x) = LEFT(y)) != NULL) {
7710Sstevel@tonic-gate 		unprotect(*npp++ = RIGHT(x));
7720Sstevel@tonic-gate 		PARENT(RIGHT(x)) = x;
7730Sstevel@tonic-gate 	}
7740Sstevel@tonic-gate 	if ((PARENT(y) = PARENT(x)) != NULL) {
7750Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
7760Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
7770Sstevel@tonic-gate 			LEFT(PARENT(y)) = y;
7780Sstevel@tonic-gate 		else
7790Sstevel@tonic-gate 			RIGHT(PARENT(y)) = y;
7800Sstevel@tonic-gate 	}
7810Sstevel@tonic-gate 	LEFT(y) = x;
7820Sstevel@tonic-gate 	PARENT(x) = y;
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 	*npp = NULL;
7850Sstevel@tonic-gate 	npp = node;
7860Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
7870Sstevel@tonic-gate 		if (tp != x && tp != y && !in_list(tp, npp))
7880Sstevel@tonic-gate 			protect(tp);
7890Sstevel@tonic-gate }
7900Sstevel@tonic-gate 
7910Sstevel@tonic-gate static void
RIGHT1(TREE * x,TREE * y)7920Sstevel@tonic-gate RIGHT1(TREE *x, TREE *y)
7930Sstevel@tonic-gate {
7940Sstevel@tonic-gate 	TREE *node[3];
7950Sstevel@tonic-gate 	TREE **npp = node;
7960Sstevel@tonic-gate 	TREE *tp;
7970Sstevel@tonic-gate 
7980Sstevel@tonic-gate 	if ((LEFT(x) = RIGHT(y)) != NULL) {
7990Sstevel@tonic-gate 		unprotect(*npp++ = LEFT(x));
8000Sstevel@tonic-gate 		PARENT(LEFT(x)) = x;
8010Sstevel@tonic-gate 	}
8020Sstevel@tonic-gate 	if ((PARENT(y) = PARENT(x)) != NULL) {
8030Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
8040Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
8050Sstevel@tonic-gate 			LEFT(PARENT(y)) = y;
8060Sstevel@tonic-gate 		else
8070Sstevel@tonic-gate 			RIGHT(PARENT(y)) = y;
8080Sstevel@tonic-gate 	}
8090Sstevel@tonic-gate 	RIGHT(y) = x;
8100Sstevel@tonic-gate 	PARENT(x) = y;
8110Sstevel@tonic-gate 
8120Sstevel@tonic-gate 	*npp = NULL;
8130Sstevel@tonic-gate 	npp = node;
8140Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
8150Sstevel@tonic-gate 		if (tp != x && tp != y && !in_list(tp, npp))
8160Sstevel@tonic-gate 			protect(tp);
8170Sstevel@tonic-gate }
8180Sstevel@tonic-gate 
8190Sstevel@tonic-gate static void
BULEFT2(TREE * x,TREE * y,TREE * z)8200Sstevel@tonic-gate BULEFT2(TREE *x, TREE *y, TREE *z)
8210Sstevel@tonic-gate {
8220Sstevel@tonic-gate 	TREE *node[4];
8230Sstevel@tonic-gate 	TREE **npp = node;
8240Sstevel@tonic-gate 	TREE *tp;
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 	if ((RIGHT(x) = LEFT(y)) != NULL) {
8270Sstevel@tonic-gate 		unprotect(*npp++ = RIGHT(x));
8280Sstevel@tonic-gate 		PARENT(RIGHT(x)) = x;
8290Sstevel@tonic-gate 	}
8300Sstevel@tonic-gate 	if ((RIGHT(y) = LEFT(z)) != NULL) {
8310Sstevel@tonic-gate 		unprotect(*npp++ = RIGHT(y));
8320Sstevel@tonic-gate 		PARENT(RIGHT(y)) = y;
8330Sstevel@tonic-gate 	}
8340Sstevel@tonic-gate 	if ((PARENT(z) = PARENT(x)) != NULL) {
8350Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
8360Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
8370Sstevel@tonic-gate 			LEFT(PARENT(z)) = z;
8380Sstevel@tonic-gate 		else
8390Sstevel@tonic-gate 			RIGHT(PARENT(z)) = z;
8400Sstevel@tonic-gate 	}
8410Sstevel@tonic-gate 	LEFT(z) = y;
8420Sstevel@tonic-gate 	PARENT(y) = z;
8430Sstevel@tonic-gate 	LEFT(y) = x;
8440Sstevel@tonic-gate 	PARENT(x) = y;
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate 	*npp = NULL;
8470Sstevel@tonic-gate 	npp = node;
8480Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
8490Sstevel@tonic-gate 		if (tp != x && tp != y && tp != z && !in_list(tp, npp))
8500Sstevel@tonic-gate 			protect(tp);
8510Sstevel@tonic-gate }
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate static void
BURIGHT2(TREE * x,TREE * y,TREE * z)8540Sstevel@tonic-gate BURIGHT2(TREE *x, TREE *y, TREE *z)
8550Sstevel@tonic-gate {
8560Sstevel@tonic-gate 	TREE *node[4];
8570Sstevel@tonic-gate 	TREE **npp = node;
8580Sstevel@tonic-gate 	TREE *tp;
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate 	if ((LEFT(x) = RIGHT(y)) != NULL) {
8610Sstevel@tonic-gate 		unprotect(*npp++ = LEFT(x));
8620Sstevel@tonic-gate 		PARENT(LEFT(x)) = x;
8630Sstevel@tonic-gate 	}
8640Sstevel@tonic-gate 	if ((LEFT(y) = RIGHT(z)) != NULL) {
8650Sstevel@tonic-gate 		unprotect(*npp++ = LEFT(y));
8660Sstevel@tonic-gate 		PARENT(LEFT(y)) = y;
8670Sstevel@tonic-gate 	}
8680Sstevel@tonic-gate 	if ((PARENT(z) = PARENT(x)) != NULL) {
8690Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
8700Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
8710Sstevel@tonic-gate 			LEFT(PARENT(z)) = z;
8720Sstevel@tonic-gate 		else
8730Sstevel@tonic-gate 			RIGHT(PARENT(z)) = z;
8740Sstevel@tonic-gate 	}
8750Sstevel@tonic-gate 	RIGHT(z) = y;
8760Sstevel@tonic-gate 	PARENT(y) = z;
8770Sstevel@tonic-gate 	RIGHT(y) = x;
8780Sstevel@tonic-gate 	PARENT(x) = y;
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	*npp = NULL;
8810Sstevel@tonic-gate 	npp = node;
8820Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
8830Sstevel@tonic-gate 		if (tp != x && tp != y && tp != z && !in_list(tp, npp))
8840Sstevel@tonic-gate 			protect(tp);
8850Sstevel@tonic-gate }
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate static void
TDLEFT2(TREE * x,TREE * y,TREE * z)8880Sstevel@tonic-gate TDLEFT2(TREE *x, TREE *y, TREE *z)
8890Sstevel@tonic-gate {
8900Sstevel@tonic-gate 	TREE *node[3];
8910Sstevel@tonic-gate 	TREE **npp = node;
8920Sstevel@tonic-gate 	TREE *tp;
8930Sstevel@tonic-gate 
8940Sstevel@tonic-gate 	if ((RIGHT(y) = LEFT(z)) != NULL) {
8950Sstevel@tonic-gate 		unprotect(*npp++ = RIGHT(y));
8960Sstevel@tonic-gate 		PARENT(RIGHT(y)) = y;
8970Sstevel@tonic-gate 	}
8980Sstevel@tonic-gate 	if ((PARENT(z) = PARENT(x)) != NULL) {
8990Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
9000Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
9010Sstevel@tonic-gate 			LEFT(PARENT(z)) = z;
9020Sstevel@tonic-gate 		else
9030Sstevel@tonic-gate 			RIGHT(PARENT(z)) = z;
9040Sstevel@tonic-gate 	}
9050Sstevel@tonic-gate 	PARENT(x) = z;
9060Sstevel@tonic-gate 	LEFT(z) = x;
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate 	*npp = NULL;
9090Sstevel@tonic-gate 	npp = node;
9100Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
9110Sstevel@tonic-gate 		if (tp != x && tp != y && tp != z && !in_list(tp, npp))
9120Sstevel@tonic-gate 			protect(tp);
9130Sstevel@tonic-gate }
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate #if 0	/* Not used, for now */
9160Sstevel@tonic-gate static void
9170Sstevel@tonic-gate TDRIGHT2(TREE *x, TREE *y, TREE *z)
9180Sstevel@tonic-gate {
9190Sstevel@tonic-gate 	TREE *node[3];
9200Sstevel@tonic-gate 	TREE **npp = node;
9210Sstevel@tonic-gate 	TREE *tp;
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 	if ((LEFT(y) = RIGHT(z)) != NULL) {
9240Sstevel@tonic-gate 		unprotect(*npp++ = LEFT(y));
9250Sstevel@tonic-gate 		PARENT(LEFT(y)) = y;
9260Sstevel@tonic-gate 	}
9270Sstevel@tonic-gate 	if ((PARENT(z) = PARENT(x)) != NULL) {
9280Sstevel@tonic-gate 		unprotect(*npp++ = PARENT(x));
9290Sstevel@tonic-gate 		if (LEFT(PARENT(x)) == x)
9300Sstevel@tonic-gate 			LEFT(PARENT(z)) = z;
9310Sstevel@tonic-gate 		else
9320Sstevel@tonic-gate 			RIGHT(PARENT(z)) = z;
9330Sstevel@tonic-gate 	}
9340Sstevel@tonic-gate 	PARENT(x) = z;
9350Sstevel@tonic-gate 	RIGHT(z) = x;
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 	*npp = NULL;
9380Sstevel@tonic-gate 	npp = node;
9390Sstevel@tonic-gate 	while ((tp = *npp++) != NULL)
9400Sstevel@tonic-gate 		if (tp != x && tp != y && tp != z && !in_list(tp, npp))
9410Sstevel@tonic-gate 			protect(tp);
9420Sstevel@tonic-gate }
9430Sstevel@tonic-gate #endif
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate /*
9460Sstevel@tonic-gate  *	Delete a tree element
9470Sstevel@tonic-gate  */
9480Sstevel@tonic-gate static void
t_delete(TREE * op)9490Sstevel@tonic-gate t_delete(TREE *op)
9500Sstevel@tonic-gate {
9510Sstevel@tonic-gate 	TREE *tp, *sp, *gp;
9520Sstevel@tonic-gate 
9530Sstevel@tonic-gate 	/* if this is a non-tree node */
9540Sstevel@tonic-gate 	if (ISNOTREE(op)) {
9550Sstevel@tonic-gate 		tp = LINKBAK(op);
9560Sstevel@tonic-gate 		unprotect(tp);
9570Sstevel@tonic-gate 		if ((sp = LINKFOR(op)) != NULL) {
9580Sstevel@tonic-gate 			unprotect(sp);
9590Sstevel@tonic-gate 			LINKBAK(sp) = tp;
9600Sstevel@tonic-gate 			protect(sp);
9610Sstevel@tonic-gate 		}
9620Sstevel@tonic-gate 		LINKFOR(tp) = sp;
9630Sstevel@tonic-gate 		protect(tp);
9640Sstevel@tonic-gate 		return;
9650Sstevel@tonic-gate 	}
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate 	/* make op the root of the tree */
9680Sstevel@tonic-gate 	if (PARENT(op))
9690Sstevel@tonic-gate 		t_splay(op);
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 	/* if this is the start of a list */
9720Sstevel@tonic-gate 	if ((tp = LINKFOR(op)) != NULL) {
9730Sstevel@tonic-gate 		unprotect(tp);
9740Sstevel@tonic-gate 		PARENT(tp) = NULL;
9750Sstevel@tonic-gate 		if ((sp = LEFT(op)) != NULL) {
9760Sstevel@tonic-gate 			unprotect(sp);
9770Sstevel@tonic-gate 			PARENT(sp) = tp;
9780Sstevel@tonic-gate 			protect(sp);
9790Sstevel@tonic-gate 		}
9800Sstevel@tonic-gate 		LEFT(tp) = sp;
9810Sstevel@tonic-gate 
9820Sstevel@tonic-gate 		if ((sp = RIGHT(op)) != NULL) {
9830Sstevel@tonic-gate 			unprotect(sp);
9840Sstevel@tonic-gate 			PARENT(sp) = tp;
9850Sstevel@tonic-gate 			protect(sp);
9860Sstevel@tonic-gate 		}
9870Sstevel@tonic-gate 		RIGHT(tp) = sp;
9880Sstevel@tonic-gate 
9890Sstevel@tonic-gate 		Root = tp;
9900Sstevel@tonic-gate 		protect(tp);
9910Sstevel@tonic-gate 		return;
9920Sstevel@tonic-gate 	}
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate 	/* if op has a non-null left subtree */
9950Sstevel@tonic-gate 	if ((tp = LEFT(op)) != NULL) {
9960Sstevel@tonic-gate 		unprotect(tp);
9970Sstevel@tonic-gate 		PARENT(tp) = NULL;
9980Sstevel@tonic-gate 		if (RIGHT(op)) {
9990Sstevel@tonic-gate 			/* make the right-end of the left subtree its root */
10000Sstevel@tonic-gate 			while ((sp = RIGHT(tp)) != NULL) {
10010Sstevel@tonic-gate 				unprotect(sp);
10020Sstevel@tonic-gate 				if ((gp = RIGHT(sp)) != NULL) {
10030Sstevel@tonic-gate 					unprotect(gp);
10040Sstevel@tonic-gate 					TDLEFT2(tp, sp, gp);
10050Sstevel@tonic-gate 					protect(sp);
10060Sstevel@tonic-gate 					protect(tp);
10070Sstevel@tonic-gate 					tp = gp;
10080Sstevel@tonic-gate 				} else {
10090Sstevel@tonic-gate 					LEFT1(tp, sp);
10100Sstevel@tonic-gate 					protect(tp);
10110Sstevel@tonic-gate 					tp = sp;
10120Sstevel@tonic-gate 				}
10130Sstevel@tonic-gate 			}
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate 			/* hook the right subtree of op to the above elt */
10160Sstevel@tonic-gate 			RIGHT(tp) = sp = RIGHT(op);
10170Sstevel@tonic-gate 			unprotect(sp);
10180Sstevel@tonic-gate 			PARENT(sp) = tp;
10190Sstevel@tonic-gate 			protect(sp);
10200Sstevel@tonic-gate 		}
10210Sstevel@tonic-gate 		protect(tp);
10220Sstevel@tonic-gate 	} else if ((tp = RIGHT(op)) != NULL) {	/* no left subtree */
10230Sstevel@tonic-gate 		unprotect(tp);
10240Sstevel@tonic-gate 		PARENT(tp) = NULL;
10250Sstevel@tonic-gate 		protect(tp);
10260Sstevel@tonic-gate 	}
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	Root = tp;
10290Sstevel@tonic-gate }
10300Sstevel@tonic-gate 
10310Sstevel@tonic-gate /*
10320Sstevel@tonic-gate  *	Bottom up splaying (simple version).
10330Sstevel@tonic-gate  *	The basic idea is to roughly cut in half the
10340Sstevel@tonic-gate  *	path from Root to tp and make tp the new root.
10350Sstevel@tonic-gate  */
10360Sstevel@tonic-gate static void
t_splay(TREE * tp)10370Sstevel@tonic-gate t_splay(TREE *tp)
10380Sstevel@tonic-gate {
10390Sstevel@tonic-gate 	TREE *pp, *gp;
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate 	/* iterate until tp is the root */
10420Sstevel@tonic-gate 	while ((pp = PARENT(tp)) != NULL) {
10430Sstevel@tonic-gate 		unprotect(pp);
10440Sstevel@tonic-gate 		/* grandparent of tp */
10450Sstevel@tonic-gate 		gp = PARENT(pp);
10460Sstevel@tonic-gate 		if (gp)
10470Sstevel@tonic-gate 			unprotect(gp);
10480Sstevel@tonic-gate 
10490Sstevel@tonic-gate 		/* x is a left child */
10500Sstevel@tonic-gate 		if (LEFT(pp) == tp) {
10510Sstevel@tonic-gate 			if (gp && LEFT(gp) == pp) {
10520Sstevel@tonic-gate 				BURIGHT2(gp, pp, tp);
10530Sstevel@tonic-gate 				protect(gp);
10540Sstevel@tonic-gate 			} else {
10550Sstevel@tonic-gate 				if (gp)
10560Sstevel@tonic-gate 					protect(gp);
10570Sstevel@tonic-gate 				RIGHT1(pp, tp);
10580Sstevel@tonic-gate 			}
10590Sstevel@tonic-gate 		} else {
10600Sstevel@tonic-gate 			ASSERT(RIGHT(pp) == tp);
10610Sstevel@tonic-gate 			if (gp && RIGHT(gp) == pp) {
10620Sstevel@tonic-gate 				BULEFT2(gp, pp, tp);
10630Sstevel@tonic-gate 				protect(gp);
10640Sstevel@tonic-gate 			} else {
10650Sstevel@tonic-gate 				if (gp)
10660Sstevel@tonic-gate 					protect(gp);
10670Sstevel@tonic-gate 				LEFT1(pp, tp);
10680Sstevel@tonic-gate 			}
10690Sstevel@tonic-gate 		}
10700Sstevel@tonic-gate 		protect(pp);
10710Sstevel@tonic-gate 		unprotect(tp);	/* just in case */
10720Sstevel@tonic-gate 	}
10730Sstevel@tonic-gate }
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate void
free(void * old)10760Sstevel@tonic-gate free(void *old)
10770Sstevel@tonic-gate {
10783866Sraf 	(void) mutex_lock(&__watch_malloc_lock);
10790Sstevel@tonic-gate 	free_unlocked(old);
10803866Sraf 	(void) mutex_unlock(&__watch_malloc_lock);
10810Sstevel@tonic-gate }
10820Sstevel@tonic-gate 
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate static void
free_unlocked(void * old)10850Sstevel@tonic-gate free_unlocked(void *old)
10860Sstevel@tonic-gate {
10870Sstevel@tonic-gate 	if (old != NULL)
10880Sstevel@tonic-gate 		realfree(old);
10890Sstevel@tonic-gate }
10900Sstevel@tonic-gate 
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate /*
10930Sstevel@tonic-gate  * memalign(align,nbytes)
10940Sstevel@tonic-gate  *
10950Sstevel@tonic-gate  * Description:
10960Sstevel@tonic-gate  *	Returns a block of specified size on a specified alignment boundary.
10970Sstevel@tonic-gate  *
10980Sstevel@tonic-gate  * Algorithm:
10990Sstevel@tonic-gate  *	Malloc enough to ensure that a block can be aligned correctly.
11000Sstevel@tonic-gate  *	Find the alignment point and return the fragments
11010Sstevel@tonic-gate  *	before and after the block.
11020Sstevel@tonic-gate  *
11030Sstevel@tonic-gate  * Errors:
11040Sstevel@tonic-gate  *	Returns NULL and sets errno as follows:
11050Sstevel@tonic-gate  *	[EINVAL]
11060Sstevel@tonic-gate  *		if nbytes = 0,
11070Sstevel@tonic-gate  *		or if alignment is misaligned,
11080Sstevel@tonic-gate  *		or if the heap has been detectably corrupted.
11090Sstevel@tonic-gate  *	[ENOMEM]
11100Sstevel@tonic-gate  *		if the requested memory could not be allocated.
11110Sstevel@tonic-gate  */
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate #define	misaligned(p)		((unsigned)(p) & 3)
11140Sstevel@tonic-gate 		/* 4-byte "word" alignment is considered ok in LP64 */
11150Sstevel@tonic-gate #define	nextblk(p, size)	((TREE *)((char *)(p) + (size)))
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate void *
memalign(size_t align,size_t nbytes)11182658Sraf memalign(size_t align, size_t nbytes)
11190Sstevel@tonic-gate {
11200Sstevel@tonic-gate 	size_t	reqsize;	/* Num of bytes to get from malloc() */
11210Sstevel@tonic-gate 	TREE	*p;		/* Ptr returned from malloc() */
11220Sstevel@tonic-gate 	TREE	*blk;		/* For addressing fragment blocks */
11230Sstevel@tonic-gate 	size_t	blksize;	/* Current (shrinking) block size */
11240Sstevel@tonic-gate 	TREE	*alignedp;	/* Ptr to properly aligned boundary */
11250Sstevel@tonic-gate 	TREE	*aligned_blk;	/* The block to be returned */
11260Sstevel@tonic-gate 	size_t	frag_size;	/* size of fragments fore and aft */
11270Sstevel@tonic-gate 	size_t	x;
11280Sstevel@tonic-gate 
11290Sstevel@tonic-gate 	/*
11300Sstevel@tonic-gate 	 * check for valid size and alignment parameters
11310Sstevel@tonic-gate 	 * MAX_ALIGN check prevents overflow in later calculation.
11320Sstevel@tonic-gate 	 */
11330Sstevel@tonic-gate 	if (nbytes == 0 || misaligned(align) || align == 0 ||
11340Sstevel@tonic-gate 	    align > MAX_ALIGN) {
11350Sstevel@tonic-gate 		errno = EINVAL;
11360Sstevel@tonic-gate 		return (NULL);
11370Sstevel@tonic-gate 	}
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate 	/*
11400Sstevel@tonic-gate 	 * Malloc enough memory to guarantee that the result can be
11410Sstevel@tonic-gate 	 * aligned correctly. The worst case is when malloc returns
11420Sstevel@tonic-gate 	 * a block so close to the next alignment boundary that a
11430Sstevel@tonic-gate 	 * fragment of minimum size cannot be created.  In order to
11440Sstevel@tonic-gate 	 * make sure we can handle this, we need to force the
11450Sstevel@tonic-gate 	 * alignment to be at least as large as the minimum frag size
11460Sstevel@tonic-gate 	 * (MINSIZE + WORDSIZE).
11470Sstevel@tonic-gate 	 */
11480Sstevel@tonic-gate 
11490Sstevel@tonic-gate 	/* check for size that could overflow ROUND calculation */
11500Sstevel@tonic-gate 	if (nbytes > MAX_MALLOC) {
11510Sstevel@tonic-gate 		errno = ENOMEM;
11520Sstevel@tonic-gate 		return (NULL);
11530Sstevel@tonic-gate 	}
11540Sstevel@tonic-gate 	ROUND(nbytes);
11550Sstevel@tonic-gate 	if (nbytes < MINSIZE)
11560Sstevel@tonic-gate 		nbytes = MINSIZE;
11570Sstevel@tonic-gate 	ROUND(align);
11580Sstevel@tonic-gate 	while (align < MINSIZE + WORDSIZE)
11590Sstevel@tonic-gate 		align <<= 1;
11600Sstevel@tonic-gate 	reqsize = nbytes + align + (MINSIZE + WORDSIZE);
11610Sstevel@tonic-gate 	/* check for overflow */
11620Sstevel@tonic-gate 	if (reqsize < nbytes) {
11630Sstevel@tonic-gate 		errno = ENOMEM;
11640Sstevel@tonic-gate 		return (NULL);
11650Sstevel@tonic-gate 	}
11660Sstevel@tonic-gate 	p = (TREE *) malloc(reqsize);
11670Sstevel@tonic-gate 	if (p == (TREE *) NULL) {
11680Sstevel@tonic-gate 		/* malloc sets errno */
11690Sstevel@tonic-gate 		return (NULL);
11700Sstevel@tonic-gate 	}
11713866Sraf 	(void) mutex_lock(&__watch_malloc_lock);
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 	/*
11740Sstevel@tonic-gate 	 * get size of the entire block (overhead and all)
11750Sstevel@tonic-gate 	 */
11760Sstevel@tonic-gate 	/* LINTED improper alignment */
11770Sstevel@tonic-gate 	blk = BLOCK(p);			/* back up to get length word */
11780Sstevel@tonic-gate 	unprotect(blk);
11790Sstevel@tonic-gate 	blksize = SIZE(blk);
11800Sstevel@tonic-gate 	CLRBITS01(blksize);
11810Sstevel@tonic-gate 
11820Sstevel@tonic-gate 	/*
11830Sstevel@tonic-gate 	 * locate the proper alignment boundary within the block.
11840Sstevel@tonic-gate 	 */
11850Sstevel@tonic-gate 	x = (size_t)p;
11860Sstevel@tonic-gate 	if (x % align != 0)
11870Sstevel@tonic-gate 		x += align - (x % align);
11880Sstevel@tonic-gate 	alignedp = (TREE *)x;
11890Sstevel@tonic-gate 	/* LINTED improper alignment */
11900Sstevel@tonic-gate 	aligned_blk = BLOCK(alignedp);
11910Sstevel@tonic-gate 
11920Sstevel@tonic-gate 	/*
11930Sstevel@tonic-gate 	 * Check out the space to the left of the alignment
11940Sstevel@tonic-gate 	 * boundary, and split off a fragment if necessary.
11950Sstevel@tonic-gate 	 */
11960Sstevel@tonic-gate 	frag_size = (size_t)aligned_blk - (size_t)blk;
11970Sstevel@tonic-gate 	if (frag_size != 0) {
11980Sstevel@tonic-gate 		/*
11990Sstevel@tonic-gate 		 * Create a fragment to the left of the aligned block.
12000Sstevel@tonic-gate 		 */
12010Sstevel@tonic-gate 		if (frag_size < MINSIZE + WORDSIZE) {
12020Sstevel@tonic-gate 			/*
12030Sstevel@tonic-gate 			 * Not enough space. So make the split
12040Sstevel@tonic-gate 			 * at the other end of the alignment unit.
12050Sstevel@tonic-gate 			 * We know this yields enough space, because
12060Sstevel@tonic-gate 			 * we forced align >= MINSIZE + WORDSIZE above.
12070Sstevel@tonic-gate 			 */
12080Sstevel@tonic-gate 			frag_size += align;
12090Sstevel@tonic-gate 			/* LINTED improper alignment */
12100Sstevel@tonic-gate 			aligned_blk = nextblk(aligned_blk, align);
12110Sstevel@tonic-gate 		}
12120Sstevel@tonic-gate 		blksize -= frag_size;
12130Sstevel@tonic-gate 		SIZE(aligned_blk) = blksize | BIT0;
12140Sstevel@tonic-gate 		frag_size -= WORDSIZE;
12150Sstevel@tonic-gate 		SIZE(blk) = frag_size | BIT0 | ISBIT1(SIZE(blk));
12160Sstevel@tonic-gate 		free_unlocked(DATA(blk));
12170Sstevel@tonic-gate 		/*
12180Sstevel@tonic-gate 		 * free_unlocked(DATA(blk)) has the side-effect of calling
12190Sstevel@tonic-gate 		 * protect() on the block following blk, that is, aligned_blk.
12200Sstevel@tonic-gate 		 * We recover from this by unprotect()ing it here.
12210Sstevel@tonic-gate 		 */
12220Sstevel@tonic-gate 		unprotect(aligned_blk);
12230Sstevel@tonic-gate 	}
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	/*
12260Sstevel@tonic-gate 	 * Is there a (sufficiently large) fragment to the
12270Sstevel@tonic-gate 	 * right of the aligned block?
12280Sstevel@tonic-gate 	 */
12290Sstevel@tonic-gate 	frag_size = blksize - nbytes;
12300Sstevel@tonic-gate 	if (frag_size >= MINSIZE + WORDSIZE) {
12310Sstevel@tonic-gate 		/*
12320Sstevel@tonic-gate 		 * split and free a fragment on the right
12330Sstevel@tonic-gate 		 */
12340Sstevel@tonic-gate 		blksize = SIZE(aligned_blk);
12350Sstevel@tonic-gate 		SIZE(aligned_blk) = nbytes;
12360Sstevel@tonic-gate 		/* LINTED improper alignment */
12370Sstevel@tonic-gate 		blk = NEXT(aligned_blk);
12380Sstevel@tonic-gate 		SETOLD01(SIZE(aligned_blk), blksize);
12390Sstevel@tonic-gate 		frag_size -= WORDSIZE;
12400Sstevel@tonic-gate 		SIZE(blk) = frag_size | BIT0;
12410Sstevel@tonic-gate 		free_unlocked(DATA(blk));
12420Sstevel@tonic-gate 	}
12430Sstevel@tonic-gate 	copy_pattern(LIVEPAT, aligned_blk);
12440Sstevel@tonic-gate 	protect(aligned_blk);
12453866Sraf 	(void) mutex_unlock(&__watch_malloc_lock);
12460Sstevel@tonic-gate 	return (DATA(aligned_blk));
12470Sstevel@tonic-gate }
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate void *
valloc(size_t size)12502658Sraf valloc(size_t size)
12510Sstevel@tonic-gate {
12520Sstevel@tonic-gate 	static unsigned pagesize;
12530Sstevel@tonic-gate 	if (!pagesize)
12540Sstevel@tonic-gate 		pagesize = _sysconf(_SC_PAGESIZE);
12550Sstevel@tonic-gate 	return (memalign(pagesize, size));
12560Sstevel@tonic-gate }
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate void *
calloc(size_t num,size_t size)12590Sstevel@tonic-gate calloc(size_t num, size_t size)
12600Sstevel@tonic-gate {
12610Sstevel@tonic-gate 	void *mp;
12620Sstevel@tonic-gate 	size_t total;
12630Sstevel@tonic-gate 
12640Sstevel@tonic-gate 	total = num * size;
12650Sstevel@tonic-gate 
12660Sstevel@tonic-gate 	/* check for overflow */
12670Sstevel@tonic-gate 	if (num != 0 && total / num != size) {
12680Sstevel@tonic-gate 		errno = ENOMEM;
12690Sstevel@tonic-gate 		return (NULL);
12700Sstevel@tonic-gate 	}
12710Sstevel@tonic-gate 	if ((mp = malloc(total)) != NULL)
12720Sstevel@tonic-gate 		(void) memset(mp, 0, total);
12730Sstevel@tonic-gate 	return (mp);
12740Sstevel@tonic-gate }
12750Sstevel@tonic-gate 
12760Sstevel@tonic-gate /* ARGSUSED1 */
12770Sstevel@tonic-gate void
cfree(void * p,size_t num,size_t size)12782658Sraf cfree(void *p, size_t num, size_t size)
12790Sstevel@tonic-gate {
12800Sstevel@tonic-gate 	free(p);
12810Sstevel@tonic-gate }
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate typedef struct {
12840Sstevel@tonic-gate 	long cmd;
12850Sstevel@tonic-gate 	prwatch_t prwatch;
12860Sstevel@tonic-gate } ctl_t;
12870Sstevel@tonic-gate 
12880Sstevel@tonic-gate static pid_t my_pid = 0;	/* to check for whether we fork()d */
12890Sstevel@tonic-gate static int dont_watch = 0;
12900Sstevel@tonic-gate static int do_stop = 0;
12910Sstevel@tonic-gate static int ctlfd = -1;
12920Sstevel@tonic-gate struct stat ctlstatb;
12930Sstevel@tonic-gate static int wflags = WA_WRITE;
12940Sstevel@tonic-gate 
12950Sstevel@tonic-gate static void
init_watch()12960Sstevel@tonic-gate init_watch()
12970Sstevel@tonic-gate {
12980Sstevel@tonic-gate 	char str[80];
12990Sstevel@tonic-gate 	char *s;
13000Sstevel@tonic-gate 
13010Sstevel@tonic-gate 	my_pid = getpid();
13020Sstevel@tonic-gate 
13030Sstevel@tonic-gate 	dont_watch = 1;
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate 	if ((s = getenv("MALLOC_DEBUG")) == NULL)
13060Sstevel@tonic-gate 		return;
13070Sstevel@tonic-gate 
13080Sstevel@tonic-gate 	s = strncpy(str, s, sizeof (str));
13090Sstevel@tonic-gate 	while (s != NULL) {
13100Sstevel@tonic-gate 		char *e = strchr(s, ',');
13110Sstevel@tonic-gate 		if (e)
13120Sstevel@tonic-gate 			*e++ = '\0';
13130Sstevel@tonic-gate 		if (strcmp(s, "STOP") == 0)
13140Sstevel@tonic-gate 			do_stop = 1;
13150Sstevel@tonic-gate 		else if (strcmp(s, "WATCH") == 0)
13160Sstevel@tonic-gate 			dont_watch = 0;
13170Sstevel@tonic-gate 		else if (strcmp(s, "RW") == 0) {
13180Sstevel@tonic-gate 			dont_watch = 0;
13190Sstevel@tonic-gate 			wflags = WA_READ|WA_WRITE;
13200Sstevel@tonic-gate 		}
13210Sstevel@tonic-gate 		s = e;
13220Sstevel@tonic-gate 	}
13230Sstevel@tonic-gate 
13240Sstevel@tonic-gate 	if (dont_watch)
13250Sstevel@tonic-gate 		return;
13260Sstevel@tonic-gate 
13270Sstevel@tonic-gate 	if ((ctlfd = open("/proc/self/ctl", O_WRONLY)) < 0 ||
13280Sstevel@tonic-gate 	    fstat(ctlfd, &ctlstatb) != 0) {
13290Sstevel@tonic-gate 		if (ctlfd >= 0)
13300Sstevel@tonic-gate 			(void) close(ctlfd);
13310Sstevel@tonic-gate 		ctlfd = -1;
13320Sstevel@tonic-gate 		dont_watch = 1;
13330Sstevel@tonic-gate 		return;
13340Sstevel@tonic-gate 	}
13350Sstevel@tonic-gate 	/* close-on-exec */
13360Sstevel@tonic-gate 	(void) fcntl(ctlfd, F_SETFD, 1);
13370Sstevel@tonic-gate 
13380Sstevel@tonic-gate 	if (do_stop) {
13390Sstevel@tonic-gate 		int pfd;
13400Sstevel@tonic-gate 		pstatus_t pstatus;
13410Sstevel@tonic-gate 		struct {
13420Sstevel@tonic-gate 			long cmd;
13430Sstevel@tonic-gate 			fltset_t fltset;
13440Sstevel@tonic-gate 		} ctl;
13450Sstevel@tonic-gate 
13460Sstevel@tonic-gate 		/*
13470Sstevel@tonic-gate 		 * Play together with some /proc controller
13480Sstevel@tonic-gate 		 * that has set other stop-on-fault flags.
13490Sstevel@tonic-gate 		 */
13500Sstevel@tonic-gate 		premptyset(&ctl.fltset);
13510Sstevel@tonic-gate 		if ((pfd = open("/proc/self/status", O_RDONLY)) >= 0) {
13520Sstevel@tonic-gate 			if (read(pfd, &pstatus, sizeof (pstatus))
13530Sstevel@tonic-gate 			    == sizeof (pstatus))
13540Sstevel@tonic-gate 				ctl.fltset = pstatus.pr_flttrace;
13550Sstevel@tonic-gate 			(void) close(pfd);
13560Sstevel@tonic-gate 		}
13570Sstevel@tonic-gate 		praddset(&ctl.fltset, FLTWATCH);
13580Sstevel@tonic-gate 		ctl.cmd = PCSFAULT;
13590Sstevel@tonic-gate 		(void) write(ctlfd, &ctl, sizeof (ctl));
13600Sstevel@tonic-gate 	}
13610Sstevel@tonic-gate }
13620Sstevel@tonic-gate 
13630Sstevel@tonic-gate static int
nowatch()13640Sstevel@tonic-gate nowatch()
13650Sstevel@tonic-gate {
13660Sstevel@tonic-gate 	struct stat statb;
13670Sstevel@tonic-gate 
13680Sstevel@tonic-gate 	if (dont_watch)
13690Sstevel@tonic-gate 		return (1);
13700Sstevel@tonic-gate 	if (ctlfd < 0)	/* first time */
13710Sstevel@tonic-gate 		init_watch();
13720Sstevel@tonic-gate 	else if (fstat(ctlfd, &statb) != 0 ||
13730Sstevel@tonic-gate 	    statb.st_dev != ctlstatb.st_dev ||
13740Sstevel@tonic-gate 	    statb.st_ino != ctlstatb.st_ino) {
13750Sstevel@tonic-gate 		/*
13760Sstevel@tonic-gate 		 * Someone closed our file descriptor.
13770Sstevel@tonic-gate 		 * Just open another one.
13780Sstevel@tonic-gate 		 */
13790Sstevel@tonic-gate 		if ((ctlfd = open("/proc/self/ctl", O_WRONLY)) < 0 ||
13800Sstevel@tonic-gate 		    fstat(ctlfd, &ctlstatb) != 0) {
13810Sstevel@tonic-gate 			if (ctlfd >= 0)
13820Sstevel@tonic-gate 				(void) close(ctlfd);
13830Sstevel@tonic-gate 			ctlfd = -1;
13840Sstevel@tonic-gate 			dont_watch = 1;
13850Sstevel@tonic-gate 			return (1);
13860Sstevel@tonic-gate 		}
13870Sstevel@tonic-gate 		/* close-on-exec */
13880Sstevel@tonic-gate 		(void) fcntl(ctlfd, F_SETFD, 1);
13890Sstevel@tonic-gate 	}
13900Sstevel@tonic-gate 	if (my_pid != getpid()) {
13910Sstevel@tonic-gate 		/*
13920Sstevel@tonic-gate 		 * We fork()d since the last call to the allocator.
13930Sstevel@tonic-gate 		 * watchpoints are not inherited across fork().
13940Sstevel@tonic-gate 		 * XXX: how to recover from this ???
13950Sstevel@tonic-gate 		 */
13960Sstevel@tonic-gate 		dont_watch = 1;
13970Sstevel@tonic-gate 		(void) close(ctlfd);
13980Sstevel@tonic-gate 		ctlfd = -1;
13990Sstevel@tonic-gate 	}
14000Sstevel@tonic-gate 	return (dont_watch);
14010Sstevel@tonic-gate }
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate static void
protect(TREE * tp)14040Sstevel@tonic-gate protect(TREE *tp)
14050Sstevel@tonic-gate {
14060Sstevel@tonic-gate 	ctl_t ctl;
14070Sstevel@tonic-gate 	size_t size, sz;
14080Sstevel@tonic-gate 
14090Sstevel@tonic-gate 	if (nowatch())
14100Sstevel@tonic-gate 		return;
14110Sstevel@tonic-gate 	if (tp == NULL || DATA(tp) == Baddr)
14120Sstevel@tonic-gate 		return;
14130Sstevel@tonic-gate 
14140Sstevel@tonic-gate 	sz = size = SIZE(tp);
14150Sstevel@tonic-gate 	CLRBITS01(size);
14160Sstevel@tonic-gate 	if (size == 0)
14170Sstevel@tonic-gate 		return;
14180Sstevel@tonic-gate 	if (ISBIT0(sz))		/* block is busy, protect only the head */
14190Sstevel@tonic-gate 		size = 0;
14200Sstevel@tonic-gate 	ctl.cmd = PCWATCH;
14210Sstevel@tonic-gate 	ctl.prwatch.pr_vaddr = (uintptr_t)tp;
14220Sstevel@tonic-gate 	ctl.prwatch.pr_size = size + WORDSIZE;
14230Sstevel@tonic-gate 	ctl.prwatch.pr_wflags = wflags;
14240Sstevel@tonic-gate 	(void) write(ctlfd, &ctl, sizeof (ctl));
14250Sstevel@tonic-gate }
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate static void
unprotect(TREE * tp)14280Sstevel@tonic-gate unprotect(TREE *tp)
14290Sstevel@tonic-gate {
14300Sstevel@tonic-gate 	ctl_t ctl;
14310Sstevel@tonic-gate 
14320Sstevel@tonic-gate 	if (nowatch())
14330Sstevel@tonic-gate 		return;
14340Sstevel@tonic-gate 	if (tp == NULL || DATA(tp) == Baddr)
14350Sstevel@tonic-gate 		return;
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	ctl.cmd = PCWATCH;
14380Sstevel@tonic-gate 	ctl.prwatch.pr_vaddr = (uintptr_t)tp;
14390Sstevel@tonic-gate 	ctl.prwatch.pr_size = WORDSIZE;		/* size is arbitrary */
14400Sstevel@tonic-gate 	ctl.prwatch.pr_wflags = 0;		/* clear the watched area */
14410Sstevel@tonic-gate 	(void) write(ctlfd, &ctl, sizeof (ctl));
14420Sstevel@tonic-gate }
14433866Sraf 
14443866Sraf static void
malloc_prepare()14453866Sraf malloc_prepare()
14463866Sraf {
14473866Sraf 	(void) mutex_lock(&__watch_malloc_lock);
14483866Sraf }
14493866Sraf 
14503866Sraf static void
malloc_release()14513866Sraf malloc_release()
14523866Sraf {
14533866Sraf 	(void) mutex_unlock(&__watch_malloc_lock);
14543866Sraf }
14553866Sraf 
14563866Sraf #pragma init(malloc_init)
14573866Sraf static void
malloc_init(void)14583866Sraf malloc_init(void)
14593866Sraf {
14603866Sraf 	(void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
14613866Sraf }
1462