xref: /onnv-gate/usr/src/lib/libc/port/threads/alloc.c (revision 6515:10dab2b883e0)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*6515Sraf  * Common Development and Distribution License (the "License").
6*6515Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*6515Sraf 
220Sstevel@tonic-gate /*
23*6515Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
310Sstevel@tonic-gate #include <sys/syscall.h>
320Sstevel@tonic-gate 
330Sstevel@tonic-gate extern int __systemcall6(sysret_t *, int, ...);
340Sstevel@tonic-gate 
350Sstevel@tonic-gate /*
360Sstevel@tonic-gate  * This is a small and simple power of two memory allocator that is
370Sstevel@tonic-gate  * used internally by libc.  Allocations are fast and memory is never
380Sstevel@tonic-gate  * returned to the system, except for allocations of 64 Kbytes and larger,
390Sstevel@tonic-gate  * which are simply mmap()ed and munmap()ed as needed.  Smaller allocations
400Sstevel@tonic-gate  * (minimum size is 64 bytes) are obtained from mmap() of 64K chunks
410Sstevel@tonic-gate  * broken up into unit allocations and maintained on free lists.
420Sstevel@tonic-gate  * The interface requires the caller to keep track of the size of an
430Sstevel@tonic-gate  * allocated block and to pass that size back when freeing a block.
440Sstevel@tonic-gate  *
450Sstevel@tonic-gate  * This allocator is called during initialization, from code called
460Sstevel@tonic-gate  * from the dynamic linker, so it must not call anything that might
470Sstevel@tonic-gate  * re-invoke the dynamic linker to resolve a symbol.  That is,
480Sstevel@tonic-gate  * it must only call functions that are wholly private to libc.
490Sstevel@tonic-gate  *
500Sstevel@tonic-gate  * Also, this allocator must be unique across all link maps
510Sstevel@tonic-gate  * because pointers returned by lmalloc() are stored in the
520Sstevel@tonic-gate  * thread structure, which is constant across all link maps.
530Sstevel@tonic-gate  *
540Sstevel@tonic-gate  * Memory blocks returned by lmalloc() are initialized to zero.
550Sstevel@tonic-gate  */
560Sstevel@tonic-gate 
570Sstevel@tonic-gate #define	MINSIZE		64	/* (1 << MINSHIFT) */
580Sstevel@tonic-gate #define	MINSHIFT	6
590Sstevel@tonic-gate #define	CHUNKSIZE	(64 * 1024)
600Sstevel@tonic-gate 
610Sstevel@tonic-gate /*
620Sstevel@tonic-gate  * bucketnum	allocation size
630Sstevel@tonic-gate  * 0		64
640Sstevel@tonic-gate  * 1		128
650Sstevel@tonic-gate  * 2		256
660Sstevel@tonic-gate  * 3		512
670Sstevel@tonic-gate  * 4		1024
680Sstevel@tonic-gate  * 5		2048
690Sstevel@tonic-gate  * 6		4096
700Sstevel@tonic-gate  * 7		8192
710Sstevel@tonic-gate  * 8		16384
720Sstevel@tonic-gate  * 9		32768
730Sstevel@tonic-gate  */
740Sstevel@tonic-gate 
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate  * See "thr_uberdata.h" for the definition of bucket_t.
770Sstevel@tonic-gate  * The 10 (NBUCKETS) buckets are allocated in uberdata.
780Sstevel@tonic-gate  */
790Sstevel@tonic-gate 
800Sstevel@tonic-gate /*
810Sstevel@tonic-gate  * Performance hack:
820Sstevel@tonic-gate  *
830Sstevel@tonic-gate  * On the very first lmalloc(), before any memory has been allocated,
840Sstevel@tonic-gate  * mmap() a 24K block of memory and carve out six 2K chunks, each
850Sstevel@tonic-gate  * of which is subdivided for the initial allocations from buckets
860Sstevel@tonic-gate  * 0, 1, 2, 3, 4 and 5, giving them initial numbers of elements
870Sstevel@tonic-gate  * 32, 16, 8, 4, 2 and 1, respectively.  The remaining 12K is cut
880Sstevel@tonic-gate  * into one 4K buffer for bucket 6 and one 8K buffer for bucket 7.
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  * This results in almost all simple single-threaded processes,
910Sstevel@tonic-gate  * such as those employed in the kenbus test suite, having to
920Sstevel@tonic-gate  * allocate only this one 24K block during their lifetimes.
930Sstevel@tonic-gate  */
940Sstevel@tonic-gate 
950Sstevel@tonic-gate #define	SUBCHUNKSIZE	2048
960Sstevel@tonic-gate #define	BASE_SIZE	(24 * 1024)
970Sstevel@tonic-gate 
980Sstevel@tonic-gate static void
990Sstevel@tonic-gate initial_allocation(bucket_t *bp)	/* &__uberdata.bucket[0] */
1000Sstevel@tonic-gate {
1010Sstevel@tonic-gate 	sysret_t rval;
1020Sstevel@tonic-gate 	void *ptr;
1030Sstevel@tonic-gate 	size_t size;
1040Sstevel@tonic-gate 	size_t n;
1050Sstevel@tonic-gate 	int bucketnum;
1060Sstevel@tonic-gate 	void *base;
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate 	/*
1090Sstevel@tonic-gate 	 * We do this seemingly obtuse call to __systemcall6(SYS_mmap)
1100Sstevel@tonic-gate 	 * instead of simply calling mmap() directly because, if the
1110Sstevel@tonic-gate 	 * mmap() system call fails, we must make sure that __cerror()
112*6515Sraf 	 * is not called, because that would call ___errno()
1130Sstevel@tonic-gate 	 * which would dereference curthread and, because we are very
1140Sstevel@tonic-gate 	 * early in libc initialization, curthread is NULL and we would
1150Sstevel@tonic-gate 	 * draw a hard-to-debug SIGSEGV core dump, or worse.
1160Sstevel@tonic-gate 	 * We opt to give a thread panic message instead.
1170Sstevel@tonic-gate 	 */
1180Sstevel@tonic-gate 	if (__systemcall6(&rval, SYS_mmap, CHUNKSIZE, BASE_SIZE,
1190Sstevel@tonic-gate 	    PROT_READ | PROT_WRITE | PROT_EXEC,
1200Sstevel@tonic-gate 	    _MAP_NEW | MAP_PRIVATE | MAP_ANON | MAP_ALIGN, -1L, (off_t)0) != 0)
1210Sstevel@tonic-gate 		thr_panic("initial allocation failed; swap space exhausted?");
1220Sstevel@tonic-gate 	base = (void *)rval.sys_rval1;
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate 	for (bucketnum = 0; bucketnum < 6; bucketnum++, bp++) {
1250Sstevel@tonic-gate 		size = (size_t)MINSIZE << bucketnum;
1260Sstevel@tonic-gate 		n = SUBCHUNKSIZE / size;
1270Sstevel@tonic-gate 		ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate 		ASSERT(bp->free_list == NULL);
1300Sstevel@tonic-gate 		bp->free_list = ptr;
1310Sstevel@tonic-gate 		while (--n != 0) {
1320Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
1330Sstevel@tonic-gate 			*(void **)ptr = next;
1340Sstevel@tonic-gate 			ptr = next;
1350Sstevel@tonic-gate 		}
1360Sstevel@tonic-gate 		*(void **)ptr = NULL;
1370Sstevel@tonic-gate 	}
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate 	ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
1400Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
1410Sstevel@tonic-gate 	bp->free_list = ptr;
1420Sstevel@tonic-gate 
1430Sstevel@tonic-gate 	ptr = (void *)((caddr_t)ptr + 2 * SUBCHUNKSIZE);
1440Sstevel@tonic-gate 	bp++;
1450Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
1460Sstevel@tonic-gate 	bp->free_list = ptr;
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate 	ASSERT(((caddr_t)ptr - (caddr_t)base + 4 * SUBCHUNKSIZE) == BASE_SIZE);
1490Sstevel@tonic-gate }
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate static int
1520Sstevel@tonic-gate getbucketnum(size_t size)
1530Sstevel@tonic-gate {
1540Sstevel@tonic-gate 	int highbit = 0;
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate 	if (size-- <= MINSIZE)
1570Sstevel@tonic-gate 		return (0);
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate #ifdef _LP64
1600Sstevel@tonic-gate 	if (size & 0xffffffff00000000ul)
1610Sstevel@tonic-gate 		highbit += 32, size >>= 32;
1620Sstevel@tonic-gate #endif
1630Sstevel@tonic-gate 	if (size & 0xffff0000)
1640Sstevel@tonic-gate 		highbit += 16, size >>= 16;
1650Sstevel@tonic-gate 	if (size & 0xff00)
1660Sstevel@tonic-gate 		highbit += 8, size >>= 8;
1670Sstevel@tonic-gate 	if (size & 0xf0)
1680Sstevel@tonic-gate 		highbit += 4, size >>= 4;
1690Sstevel@tonic-gate 	if (size & 0xc)
1700Sstevel@tonic-gate 		highbit += 2, size >>= 2;
1710Sstevel@tonic-gate 	if (size & 0x2)
1720Sstevel@tonic-gate 		highbit += 1;
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate 	ASSERT(highbit >= MINSHIFT);
1750Sstevel@tonic-gate 	return (highbit - (MINSHIFT - 1));
1760Sstevel@tonic-gate }
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate void *
1790Sstevel@tonic-gate lmalloc(size_t size)
1800Sstevel@tonic-gate {
1810Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
1820Sstevel@tonic-gate 	ulwp_t *self;
1830Sstevel@tonic-gate 	uberdata_t *udp;
1840Sstevel@tonic-gate 	bucket_t *bp;
1850Sstevel@tonic-gate 	void *ptr;
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	/*
1880Sstevel@tonic-gate 	 * ulwp_t structures must be allocated from a rwx mapping since it
1890Sstevel@tonic-gate 	 * is a normal data object _and_ it contains instructions that are
1900Sstevel@tonic-gate 	 * executed for user-land DTrace tracing with the fasttrap provider.
1910Sstevel@tonic-gate 	 */
1920Sstevel@tonic-gate 	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1930Sstevel@tonic-gate 
1940Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
1950Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
1980Sstevel@tonic-gate 		/* mmap() allocates memory already set to zero */
199*6515Sraf 		ptr = mmap((void *)CHUNKSIZE, size, prot,
200*6515Sraf 		    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
2010Sstevel@tonic-gate 		if (ptr == MAP_FAILED)
2020Sstevel@tonic-gate 			ptr = NULL;
2030Sstevel@tonic-gate 		return (ptr);
2040Sstevel@tonic-gate 	}
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
2070Sstevel@tonic-gate 		udp = &__uberdata;
2080Sstevel@tonic-gate 	else
2090Sstevel@tonic-gate 		udp = self->ul_uberdata;
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 	if (udp->bucket_init == 0) {
2120Sstevel@tonic-gate 		ASSERT(udp->nthreads == 0);
2130Sstevel@tonic-gate 		initial_allocation(udp->bucket);
2140Sstevel@tonic-gate 		udp->bucket_init = 1;
2150Sstevel@tonic-gate 	}
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	bp = &udp->bucket[bucketnum];
2180Sstevel@tonic-gate 	if (self != NULL)
2190Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	if ((ptr = bp->free_list) == NULL) {
2220Sstevel@tonic-gate 		size_t bsize;
2230Sstevel@tonic-gate 		size_t n;
2240Sstevel@tonic-gate 
2250Sstevel@tonic-gate 		/*
2260Sstevel@tonic-gate 		 * Double the number of chunks mmap()ed each time,
2270Sstevel@tonic-gate 		 * in case of large numbers of allocations.
2280Sstevel@tonic-gate 		 */
2290Sstevel@tonic-gate 		if (bp->chunks == 0)
2300Sstevel@tonic-gate 			bp->chunks = 1;
2310Sstevel@tonic-gate 		else
2320Sstevel@tonic-gate 			bp->chunks <<= 1;
2330Sstevel@tonic-gate 		for (;;) {
2340Sstevel@tonic-gate 			bsize = CHUNKSIZE * bp->chunks;
2350Sstevel@tonic-gate 			n = bsize / size;
236*6515Sraf 			ptr = mmap((void *)CHUNKSIZE, bsize, prot,
237*6515Sraf 			    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
2380Sstevel@tonic-gate 			if (ptr != MAP_FAILED)
2390Sstevel@tonic-gate 				break;
2400Sstevel@tonic-gate 			/* try a smaller chunk allocation */
2410Sstevel@tonic-gate 			if ((bp->chunks >>= 1) == 0) {
2420Sstevel@tonic-gate 				if (self != NULL)
2430Sstevel@tonic-gate 					lmutex_unlock(&bp->bucket_lock);
2440Sstevel@tonic-gate 				return (NULL);
2450Sstevel@tonic-gate 			}
2460Sstevel@tonic-gate 		}
2470Sstevel@tonic-gate 		bp->free_list = ptr;
2480Sstevel@tonic-gate 		while (--n != 0) {
2490Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
2500Sstevel@tonic-gate 			*(void **)ptr = next;
2510Sstevel@tonic-gate 			ptr = next;
2520Sstevel@tonic-gate 		}
2530Sstevel@tonic-gate 		*(void **)ptr = NULL;
2540Sstevel@tonic-gate 		ptr = bp->free_list;
2550Sstevel@tonic-gate 	}
2560Sstevel@tonic-gate 	bp->free_list = *(void **)ptr;
2570Sstevel@tonic-gate 	if (self != NULL)
2580Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
2590Sstevel@tonic-gate 	/*
2600Sstevel@tonic-gate 	 * We maintain the free list already zeroed except for the pointer
2610Sstevel@tonic-gate 	 * stored at the head of the block (mmap() allocates memory already
2620Sstevel@tonic-gate 	 * set to zero), so all we have to do is zero out the pointer.
2630Sstevel@tonic-gate 	 */
2640Sstevel@tonic-gate 	*(void **)ptr = NULL;
2650Sstevel@tonic-gate 	return (ptr);
2660Sstevel@tonic-gate }
2670Sstevel@tonic-gate 
2680Sstevel@tonic-gate void
2690Sstevel@tonic-gate lfree(void *ptr, size_t size)
2700Sstevel@tonic-gate {
2710Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
2720Sstevel@tonic-gate 	ulwp_t *self;
2730Sstevel@tonic-gate 	bucket_t *bp;
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
2760Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
2790Sstevel@tonic-gate 		/* see comment below */
2800Sstevel@tonic-gate 		if (((uintptr_t)ptr & (CHUNKSIZE - 1)) != 0)
2810Sstevel@tonic-gate 			goto bad;
282*6515Sraf 		(void) munmap(ptr, size);
2830Sstevel@tonic-gate 		return;
2840Sstevel@tonic-gate 	}
2850Sstevel@tonic-gate 
2860Sstevel@tonic-gate 	/*
2870Sstevel@tonic-gate 	 * If the low order bits are not all zero as expected, then panic.
2880Sstevel@tonic-gate 	 * This can be caused by an application calling, for example,
2890Sstevel@tonic-gate 	 * pthread_attr_destroy() without having first called
2900Sstevel@tonic-gate 	 * pthread_attr_init() (thereby passing uninitialized data
2910Sstevel@tonic-gate 	 * to pthread_attr_destroy() who then calls lfree() with
2920Sstevel@tonic-gate 	 * the uninitialized data).
2930Sstevel@tonic-gate 	 */
2940Sstevel@tonic-gate 	if (((uintptr_t)ptr & (size - 1)) != 0)
2950Sstevel@tonic-gate 		goto bad;
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	/*
2980Sstevel@tonic-gate 	 * Zeroing the memory here saves time later when reallocating it.
2990Sstevel@tonic-gate 	 */
300*6515Sraf 	(void) memset(ptr, 0, size);
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
3030Sstevel@tonic-gate 		bp = &__uberdata.bucket[bucketnum];
3040Sstevel@tonic-gate 	else {
3050Sstevel@tonic-gate 		bp = &self->ul_uberdata->bucket[bucketnum];
3060Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
3070Sstevel@tonic-gate 	}
3080Sstevel@tonic-gate 	*(void **)ptr = bp->free_list;
3090Sstevel@tonic-gate 	bp->free_list = ptr;
3100Sstevel@tonic-gate 	if (self != NULL)
3110Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
3120Sstevel@tonic-gate 	return;
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate bad:
3150Sstevel@tonic-gate 	thr_panic("lfree() called with a misaligned pointer");
3160Sstevel@tonic-gate }
3170Sstevel@tonic-gate 
3180Sstevel@tonic-gate /*
3190Sstevel@tonic-gate  * The following functions can be used internally to libc
3200Sstevel@tonic-gate  * to make memory allocations in the style of malloc()/free()
3210Sstevel@tonic-gate  * (where the size of the allocation is not remembered by the caller)
3220Sstevel@tonic-gate  * but which are safe to use within critical sections, that is,
3230Sstevel@tonic-gate  * sections of code bounded by enter_critical()/exit_critical(),
3240Sstevel@tonic-gate  * lmutex_lock()/lmutex_unlock() or lrw_rdlock()/lrw_wrlock()/lrw_unlock().
3250Sstevel@tonic-gate  *
3260Sstevel@tonic-gate  * These functions must never be used to allocate memory that is
3270Sstevel@tonic-gate  * passed out of libc, for example by strdup(), because it is a
3280Sstevel@tonic-gate  * fatal error to free() an object allocated by libc_malloc().
3290Sstevel@tonic-gate  * Such objects can only be freed by calling libc_free().
3300Sstevel@tonic-gate  */
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate #ifdef	_LP64
3330Sstevel@tonic-gate #define	ALIGNMENT	16
3340Sstevel@tonic-gate #else
3350Sstevel@tonic-gate #define	ALIGNMENT	8
3360Sstevel@tonic-gate #endif
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate typedef union {
3390Sstevel@tonic-gate 	size_t	private_size;
3400Sstevel@tonic-gate 	char	private_align[ALIGNMENT];
3410Sstevel@tonic-gate } private_header_t;
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate void *
3440Sstevel@tonic-gate libc_malloc(size_t size)
3450Sstevel@tonic-gate {
3460Sstevel@tonic-gate 	private_header_t *ptr;
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
3490Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
3500Sstevel@tonic-gate 		return (NULL);
3510Sstevel@tonic-gate 	ptr->private_size = size;
3520Sstevel@tonic-gate 	return (ptr + 1);
3530Sstevel@tonic-gate }
3540Sstevel@tonic-gate 
3550Sstevel@tonic-gate void *
3560Sstevel@tonic-gate libc_realloc(void *old, size_t size)
3570Sstevel@tonic-gate {
3580Sstevel@tonic-gate 	private_header_t *ptr;
3590Sstevel@tonic-gate 	void *new;
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
3620Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
3630Sstevel@tonic-gate 		return (NULL);
3640Sstevel@tonic-gate 	ptr->private_size = size;
3650Sstevel@tonic-gate 	new = ptr + 1;
3660Sstevel@tonic-gate 	if (old != NULL) {
3670Sstevel@tonic-gate 		ptr = (private_header_t *)old - 1;
3680Sstevel@tonic-gate 		if (size >= ptr->private_size)
3690Sstevel@tonic-gate 			size = ptr->private_size;
370*6515Sraf 		(void) memcpy(new, old, size - sizeof (*ptr));
3710Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
3720Sstevel@tonic-gate 	}
3730Sstevel@tonic-gate 	return (new);
3740Sstevel@tonic-gate }
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate void
3770Sstevel@tonic-gate libc_free(void *p)
3780Sstevel@tonic-gate {
3790Sstevel@tonic-gate 	private_header_t *ptr;
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	if (p) {
3820Sstevel@tonic-gate 		ptr = (private_header_t *)p - 1;
3830Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
3840Sstevel@tonic-gate 	}
3850Sstevel@tonic-gate }
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate char *
3880Sstevel@tonic-gate libc_strdup(const char *s1)
3890Sstevel@tonic-gate {
3900Sstevel@tonic-gate 	char *s2 = libc_malloc(strlen(s1) + 1);
3910Sstevel@tonic-gate 
3920Sstevel@tonic-gate 	if (s2)
3930Sstevel@tonic-gate 		(void) strcpy(s2, s1);
3940Sstevel@tonic-gate 	return (s2);
3950Sstevel@tonic-gate }
396