xref: /onnv-gate/usr/src/lib/libc/port/threads/alloc.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include "lint.h"
30*0Sstevel@tonic-gate #include "thr_uberdata.h"
31*0Sstevel@tonic-gate #include <sys/syscall.h>
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate extern int __systemcall6(sysret_t *, int, ...);
34*0Sstevel@tonic-gate 
35*0Sstevel@tonic-gate /*
36*0Sstevel@tonic-gate  * This is a small and simple power of two memory allocator that is
37*0Sstevel@tonic-gate  * used internally by libc.  Allocations are fast and memory is never
38*0Sstevel@tonic-gate  * returned to the system, except for allocations of 64 Kbytes and larger,
39*0Sstevel@tonic-gate  * which are simply mmap()ed and munmap()ed as needed.  Smaller allocations
40*0Sstevel@tonic-gate  * (minimum size is 64 bytes) are obtained from mmap() of 64K chunks
41*0Sstevel@tonic-gate  * broken up into unit allocations and maintained on free lists.
42*0Sstevel@tonic-gate  * The interface requires the caller to keep track of the size of an
43*0Sstevel@tonic-gate  * allocated block and to pass that size back when freeing a block.
44*0Sstevel@tonic-gate  *
45*0Sstevel@tonic-gate  * This allocator is called during initialization, from code called
46*0Sstevel@tonic-gate  * from the dynamic linker, so it must not call anything that might
47*0Sstevel@tonic-gate  * re-invoke the dynamic linker to resolve a symbol.  That is,
48*0Sstevel@tonic-gate  * it must only call functions that are wholly private to libc.
49*0Sstevel@tonic-gate  *
50*0Sstevel@tonic-gate  * Also, this allocator must be unique across all link maps
51*0Sstevel@tonic-gate  * because pointers returned by lmalloc() are stored in the
52*0Sstevel@tonic-gate  * thread structure, which is constant across all link maps.
53*0Sstevel@tonic-gate  *
54*0Sstevel@tonic-gate  * Memory blocks returned by lmalloc() are initialized to zero.
55*0Sstevel@tonic-gate  */
56*0Sstevel@tonic-gate 
57*0Sstevel@tonic-gate #define	MINSIZE		64	/* (1 << MINSHIFT) */
58*0Sstevel@tonic-gate #define	MINSHIFT	6
59*0Sstevel@tonic-gate #define	CHUNKSIZE	(64 * 1024)
60*0Sstevel@tonic-gate 
61*0Sstevel@tonic-gate /*
62*0Sstevel@tonic-gate  * bucketnum	allocation size
63*0Sstevel@tonic-gate  * 0		64
64*0Sstevel@tonic-gate  * 1		128
65*0Sstevel@tonic-gate  * 2		256
66*0Sstevel@tonic-gate  * 3		512
67*0Sstevel@tonic-gate  * 4		1024
68*0Sstevel@tonic-gate  * 5		2048
69*0Sstevel@tonic-gate  * 6		4096
70*0Sstevel@tonic-gate  * 7		8192
71*0Sstevel@tonic-gate  * 8		16384
72*0Sstevel@tonic-gate  * 9		32768
73*0Sstevel@tonic-gate  */
74*0Sstevel@tonic-gate 
75*0Sstevel@tonic-gate /*
76*0Sstevel@tonic-gate  * See "thr_uberdata.h" for the definition of bucket_t.
77*0Sstevel@tonic-gate  * The 10 (NBUCKETS) buckets are allocated in uberdata.
78*0Sstevel@tonic-gate  */
79*0Sstevel@tonic-gate 
80*0Sstevel@tonic-gate /*
81*0Sstevel@tonic-gate  * Performance hack:
82*0Sstevel@tonic-gate  *
83*0Sstevel@tonic-gate  * On the very first lmalloc(), before any memory has been allocated,
84*0Sstevel@tonic-gate  * mmap() a 24K block of memory and carve out six 2K chunks, each
85*0Sstevel@tonic-gate  * of which is subdivided for the initial allocations from buckets
86*0Sstevel@tonic-gate  * 0, 1, 2, 3, 4 and 5, giving them initial numbers of elements
87*0Sstevel@tonic-gate  * 32, 16, 8, 4, 2 and 1, respectively.  The remaining 12K is cut
88*0Sstevel@tonic-gate  * into one 4K buffer for bucket 6 and one 8K buffer for bucket 7.
89*0Sstevel@tonic-gate  *
90*0Sstevel@tonic-gate  * This results in almost all simple single-threaded processes,
91*0Sstevel@tonic-gate  * such as those employed in the kenbus test suite, having to
92*0Sstevel@tonic-gate  * allocate only this one 24K block during their lifetimes.
93*0Sstevel@tonic-gate  */
94*0Sstevel@tonic-gate 
95*0Sstevel@tonic-gate #define	SUBCHUNKSIZE	2048
96*0Sstevel@tonic-gate #define	BASE_SIZE	(24 * 1024)
97*0Sstevel@tonic-gate 
98*0Sstevel@tonic-gate static void
99*0Sstevel@tonic-gate initial_allocation(bucket_t *bp)	/* &__uberdata.bucket[0] */
100*0Sstevel@tonic-gate {
101*0Sstevel@tonic-gate 	sysret_t rval;
102*0Sstevel@tonic-gate 	void *ptr;
103*0Sstevel@tonic-gate 	size_t size;
104*0Sstevel@tonic-gate 	size_t n;
105*0Sstevel@tonic-gate 	int bucketnum;
106*0Sstevel@tonic-gate 	void *base;
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate 	/*
109*0Sstevel@tonic-gate 	 * We do this seemingly obtuse call to __systemcall6(SYS_mmap)
110*0Sstevel@tonic-gate 	 * instead of simply calling mmap() directly because, if the
111*0Sstevel@tonic-gate 	 * mmap() system call fails, we must make sure that __cerror()
112*0Sstevel@tonic-gate 	 * is not called, because that would call _private___errno()
113*0Sstevel@tonic-gate 	 * which would dereference curthread and, because we are very
114*0Sstevel@tonic-gate 	 * early in libc initialization, curthread is NULL and we would
115*0Sstevel@tonic-gate 	 * draw a hard-to-debug SIGSEGV core dump, or worse.
116*0Sstevel@tonic-gate 	 * We opt to give a thread panic message instead.
117*0Sstevel@tonic-gate 	 */
118*0Sstevel@tonic-gate 	if (__systemcall6(&rval, SYS_mmap, CHUNKSIZE, BASE_SIZE,
119*0Sstevel@tonic-gate 	    PROT_READ | PROT_WRITE | PROT_EXEC,
120*0Sstevel@tonic-gate 	    _MAP_NEW | MAP_PRIVATE | MAP_ANON | MAP_ALIGN, -1L, (off_t)0) != 0)
121*0Sstevel@tonic-gate 		thr_panic("initial allocation failed; swap space exhausted?");
122*0Sstevel@tonic-gate 	base = (void *)rval.sys_rval1;
123*0Sstevel@tonic-gate 
124*0Sstevel@tonic-gate 	for (bucketnum = 0; bucketnum < 6; bucketnum++, bp++) {
125*0Sstevel@tonic-gate 		size = (size_t)MINSIZE << bucketnum;
126*0Sstevel@tonic-gate 		n = SUBCHUNKSIZE / size;
127*0Sstevel@tonic-gate 		ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
128*0Sstevel@tonic-gate 
129*0Sstevel@tonic-gate 		ASSERT(bp->free_list == NULL);
130*0Sstevel@tonic-gate 		bp->free_list = ptr;
131*0Sstevel@tonic-gate 		while (--n != 0) {
132*0Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
133*0Sstevel@tonic-gate 			*(void **)ptr = next;
134*0Sstevel@tonic-gate 			ptr = next;
135*0Sstevel@tonic-gate 		}
136*0Sstevel@tonic-gate 		*(void **)ptr = NULL;
137*0Sstevel@tonic-gate 	}
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate 	ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
140*0Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
141*0Sstevel@tonic-gate 	bp->free_list = ptr;
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate 	ptr = (void *)((caddr_t)ptr + 2 * SUBCHUNKSIZE);
144*0Sstevel@tonic-gate 	bp++;
145*0Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
146*0Sstevel@tonic-gate 	bp->free_list = ptr;
147*0Sstevel@tonic-gate 
148*0Sstevel@tonic-gate 	ASSERT(((caddr_t)ptr - (caddr_t)base + 4 * SUBCHUNKSIZE) == BASE_SIZE);
149*0Sstevel@tonic-gate }
150*0Sstevel@tonic-gate 
151*0Sstevel@tonic-gate static int
152*0Sstevel@tonic-gate getbucketnum(size_t size)
153*0Sstevel@tonic-gate {
154*0Sstevel@tonic-gate 	int highbit = 0;
155*0Sstevel@tonic-gate 
156*0Sstevel@tonic-gate 	if (size-- <= MINSIZE)
157*0Sstevel@tonic-gate 		return (0);
158*0Sstevel@tonic-gate 
159*0Sstevel@tonic-gate #ifdef _LP64
160*0Sstevel@tonic-gate 	if (size & 0xffffffff00000000ul)
161*0Sstevel@tonic-gate 		highbit += 32, size >>= 32;
162*0Sstevel@tonic-gate #endif
163*0Sstevel@tonic-gate 	if (size & 0xffff0000)
164*0Sstevel@tonic-gate 		highbit += 16, size >>= 16;
165*0Sstevel@tonic-gate 	if (size & 0xff00)
166*0Sstevel@tonic-gate 		highbit += 8, size >>= 8;
167*0Sstevel@tonic-gate 	if (size & 0xf0)
168*0Sstevel@tonic-gate 		highbit += 4, size >>= 4;
169*0Sstevel@tonic-gate 	if (size & 0xc)
170*0Sstevel@tonic-gate 		highbit += 2, size >>= 2;
171*0Sstevel@tonic-gate 	if (size & 0x2)
172*0Sstevel@tonic-gate 		highbit += 1;
173*0Sstevel@tonic-gate 
174*0Sstevel@tonic-gate 	ASSERT(highbit >= MINSHIFT);
175*0Sstevel@tonic-gate 	return (highbit - (MINSHIFT - 1));
176*0Sstevel@tonic-gate }
177*0Sstevel@tonic-gate 
178*0Sstevel@tonic-gate void *
179*0Sstevel@tonic-gate lmalloc(size_t size)
180*0Sstevel@tonic-gate {
181*0Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
182*0Sstevel@tonic-gate 	ulwp_t *self;
183*0Sstevel@tonic-gate 	uberdata_t *udp;
184*0Sstevel@tonic-gate 	bucket_t *bp;
185*0Sstevel@tonic-gate 	void *ptr;
186*0Sstevel@tonic-gate 
187*0Sstevel@tonic-gate 	/*
188*0Sstevel@tonic-gate 	 * ulwp_t structures must be allocated from a rwx mapping since it
189*0Sstevel@tonic-gate 	 * is a normal data object _and_ it contains instructions that are
190*0Sstevel@tonic-gate 	 * executed for user-land DTrace tracing with the fasttrap provider.
191*0Sstevel@tonic-gate 	 */
192*0Sstevel@tonic-gate 	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
193*0Sstevel@tonic-gate 
194*0Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
195*0Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
196*0Sstevel@tonic-gate 
197*0Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
198*0Sstevel@tonic-gate 		/* mmap() allocates memory already set to zero */
199*0Sstevel@tonic-gate 		ptr = _private_mmap((void *)CHUNKSIZE, size, prot,
200*0Sstevel@tonic-gate 			MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
201*0Sstevel@tonic-gate 		if (ptr == MAP_FAILED)
202*0Sstevel@tonic-gate 			ptr = NULL;
203*0Sstevel@tonic-gate 		return (ptr);
204*0Sstevel@tonic-gate 	}
205*0Sstevel@tonic-gate 
206*0Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
207*0Sstevel@tonic-gate 		udp = &__uberdata;
208*0Sstevel@tonic-gate 	else
209*0Sstevel@tonic-gate 		udp = self->ul_uberdata;
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 	if (udp->bucket_init == 0) {
212*0Sstevel@tonic-gate 		ASSERT(udp->nthreads == 0);
213*0Sstevel@tonic-gate 		initial_allocation(udp->bucket);
214*0Sstevel@tonic-gate 		udp->bucket_init = 1;
215*0Sstevel@tonic-gate 	}
216*0Sstevel@tonic-gate 
217*0Sstevel@tonic-gate 	bp = &udp->bucket[bucketnum];
218*0Sstevel@tonic-gate 	if (self != NULL)
219*0Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
220*0Sstevel@tonic-gate 
221*0Sstevel@tonic-gate 	if ((ptr = bp->free_list) == NULL) {
222*0Sstevel@tonic-gate 		size_t bsize;
223*0Sstevel@tonic-gate 		size_t n;
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 		/*
226*0Sstevel@tonic-gate 		 * Double the number of chunks mmap()ed each time,
227*0Sstevel@tonic-gate 		 * in case of large numbers of allocations.
228*0Sstevel@tonic-gate 		 */
229*0Sstevel@tonic-gate 		if (bp->chunks == 0)
230*0Sstevel@tonic-gate 			bp->chunks = 1;
231*0Sstevel@tonic-gate 		else
232*0Sstevel@tonic-gate 			bp->chunks <<= 1;
233*0Sstevel@tonic-gate 		for (;;) {
234*0Sstevel@tonic-gate 			bsize = CHUNKSIZE * bp->chunks;
235*0Sstevel@tonic-gate 			n = bsize / size;
236*0Sstevel@tonic-gate 			ptr = _private_mmap((void *)CHUNKSIZE, bsize, prot,
237*0Sstevel@tonic-gate 				MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
238*0Sstevel@tonic-gate 			if (ptr != MAP_FAILED)
239*0Sstevel@tonic-gate 				break;
240*0Sstevel@tonic-gate 			/* try a smaller chunk allocation */
241*0Sstevel@tonic-gate 			if ((bp->chunks >>= 1) == 0) {
242*0Sstevel@tonic-gate 				if (self != NULL)
243*0Sstevel@tonic-gate 					lmutex_unlock(&bp->bucket_lock);
244*0Sstevel@tonic-gate 				return (NULL);
245*0Sstevel@tonic-gate 			}
246*0Sstevel@tonic-gate 		}
247*0Sstevel@tonic-gate 		bp->free_list = ptr;
248*0Sstevel@tonic-gate 		while (--n != 0) {
249*0Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
250*0Sstevel@tonic-gate 			*(void **)ptr = next;
251*0Sstevel@tonic-gate 			ptr = next;
252*0Sstevel@tonic-gate 		}
253*0Sstevel@tonic-gate 		*(void **)ptr = NULL;
254*0Sstevel@tonic-gate 		ptr = bp->free_list;
255*0Sstevel@tonic-gate 	}
256*0Sstevel@tonic-gate 	bp->free_list = *(void **)ptr;
257*0Sstevel@tonic-gate 	if (self != NULL)
258*0Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
259*0Sstevel@tonic-gate 	/*
260*0Sstevel@tonic-gate 	 * We maintain the free list already zeroed except for the pointer
261*0Sstevel@tonic-gate 	 * stored at the head of the block (mmap() allocates memory already
262*0Sstevel@tonic-gate 	 * set to zero), so all we have to do is zero out the pointer.
263*0Sstevel@tonic-gate 	 */
264*0Sstevel@tonic-gate 	*(void **)ptr = NULL;
265*0Sstevel@tonic-gate 	return (ptr);
266*0Sstevel@tonic-gate }
267*0Sstevel@tonic-gate 
268*0Sstevel@tonic-gate void
269*0Sstevel@tonic-gate lfree(void *ptr, size_t size)
270*0Sstevel@tonic-gate {
271*0Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
272*0Sstevel@tonic-gate 	ulwp_t *self;
273*0Sstevel@tonic-gate 	bucket_t *bp;
274*0Sstevel@tonic-gate 
275*0Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
276*0Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
277*0Sstevel@tonic-gate 
278*0Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
279*0Sstevel@tonic-gate 		/* see comment below */
280*0Sstevel@tonic-gate 		if (((uintptr_t)ptr & (CHUNKSIZE - 1)) != 0)
281*0Sstevel@tonic-gate 			goto bad;
282*0Sstevel@tonic-gate 		(void) _private_munmap(ptr, size);
283*0Sstevel@tonic-gate 		return;
284*0Sstevel@tonic-gate 	}
285*0Sstevel@tonic-gate 
286*0Sstevel@tonic-gate 	/*
287*0Sstevel@tonic-gate 	 * If the low order bits are not all zero as expected, then panic.
288*0Sstevel@tonic-gate 	 * This can be caused by an application calling, for example,
289*0Sstevel@tonic-gate 	 * pthread_attr_destroy() without having first called
290*0Sstevel@tonic-gate 	 * pthread_attr_init() (thereby passing uninitialized data
291*0Sstevel@tonic-gate 	 * to pthread_attr_destroy() who then calls lfree() with
292*0Sstevel@tonic-gate 	 * the uninitialized data).
293*0Sstevel@tonic-gate 	 */
294*0Sstevel@tonic-gate 	if (((uintptr_t)ptr & (size - 1)) != 0)
295*0Sstevel@tonic-gate 		goto bad;
296*0Sstevel@tonic-gate 
297*0Sstevel@tonic-gate 	/*
298*0Sstevel@tonic-gate 	 * Zeroing the memory here saves time later when reallocating it.
299*0Sstevel@tonic-gate 	 */
300*0Sstevel@tonic-gate 	(void) _private_memset(ptr, 0, size);
301*0Sstevel@tonic-gate 
302*0Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
303*0Sstevel@tonic-gate 		bp = &__uberdata.bucket[bucketnum];
304*0Sstevel@tonic-gate 	else {
305*0Sstevel@tonic-gate 		bp = &self->ul_uberdata->bucket[bucketnum];
306*0Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
307*0Sstevel@tonic-gate 	}
308*0Sstevel@tonic-gate 	*(void **)ptr = bp->free_list;
309*0Sstevel@tonic-gate 	bp->free_list = ptr;
310*0Sstevel@tonic-gate 	if (self != NULL)
311*0Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
312*0Sstevel@tonic-gate 	return;
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate bad:
315*0Sstevel@tonic-gate 	thr_panic("lfree() called with a misaligned pointer");
316*0Sstevel@tonic-gate }
317*0Sstevel@tonic-gate 
318*0Sstevel@tonic-gate /*
319*0Sstevel@tonic-gate  * The following functions can be used internally to libc
320*0Sstevel@tonic-gate  * to make memory allocations in the style of malloc()/free()
321*0Sstevel@tonic-gate  * (where the size of the allocation is not remembered by the caller)
322*0Sstevel@tonic-gate  * but which are safe to use within critical sections, that is,
323*0Sstevel@tonic-gate  * sections of code bounded by enter_critical()/exit_critical(),
324*0Sstevel@tonic-gate  * lmutex_lock()/lmutex_unlock() or lrw_rdlock()/lrw_wrlock()/lrw_unlock().
325*0Sstevel@tonic-gate  *
326*0Sstevel@tonic-gate  * These functions must never be used to allocate memory that is
327*0Sstevel@tonic-gate  * passed out of libc, for example by strdup(), because it is a
328*0Sstevel@tonic-gate  * fatal error to free() an object allocated by libc_malloc().
329*0Sstevel@tonic-gate  * Such objects can only be freed by calling libc_free().
330*0Sstevel@tonic-gate  */
331*0Sstevel@tonic-gate 
332*0Sstevel@tonic-gate #ifdef	_LP64
333*0Sstevel@tonic-gate #define	ALIGNMENT	16
334*0Sstevel@tonic-gate #else
335*0Sstevel@tonic-gate #define	ALIGNMENT	8
336*0Sstevel@tonic-gate #endif
337*0Sstevel@tonic-gate 
338*0Sstevel@tonic-gate typedef union {
339*0Sstevel@tonic-gate 	size_t	private_size;
340*0Sstevel@tonic-gate 	char	private_align[ALIGNMENT];
341*0Sstevel@tonic-gate } private_header_t;
342*0Sstevel@tonic-gate 
343*0Sstevel@tonic-gate void *
344*0Sstevel@tonic-gate libc_malloc(size_t size)
345*0Sstevel@tonic-gate {
346*0Sstevel@tonic-gate 	private_header_t *ptr;
347*0Sstevel@tonic-gate 
348*0Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
349*0Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
350*0Sstevel@tonic-gate 		return (NULL);
351*0Sstevel@tonic-gate 	ptr->private_size = size;
352*0Sstevel@tonic-gate 	return (ptr + 1);
353*0Sstevel@tonic-gate }
354*0Sstevel@tonic-gate 
355*0Sstevel@tonic-gate void *
356*0Sstevel@tonic-gate libc_realloc(void *old, size_t size)
357*0Sstevel@tonic-gate {
358*0Sstevel@tonic-gate 	private_header_t *ptr;
359*0Sstevel@tonic-gate 	void *new;
360*0Sstevel@tonic-gate 
361*0Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
362*0Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
363*0Sstevel@tonic-gate 		return (NULL);
364*0Sstevel@tonic-gate 	ptr->private_size = size;
365*0Sstevel@tonic-gate 	new = ptr + 1;
366*0Sstevel@tonic-gate 	if (old != NULL) {
367*0Sstevel@tonic-gate 		ptr = (private_header_t *)old - 1;
368*0Sstevel@tonic-gate 		if (size >= ptr->private_size)
369*0Sstevel@tonic-gate 			size = ptr->private_size;
370*0Sstevel@tonic-gate 		(void) _memcpy(new, old, size - sizeof (*ptr));
371*0Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
372*0Sstevel@tonic-gate 	}
373*0Sstevel@tonic-gate 	return (new);
374*0Sstevel@tonic-gate }
375*0Sstevel@tonic-gate 
376*0Sstevel@tonic-gate void
377*0Sstevel@tonic-gate libc_free(void *p)
378*0Sstevel@tonic-gate {
379*0Sstevel@tonic-gate 	private_header_t *ptr;
380*0Sstevel@tonic-gate 
381*0Sstevel@tonic-gate 	if (p) {
382*0Sstevel@tonic-gate 		ptr = (private_header_t *)p - 1;
383*0Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
384*0Sstevel@tonic-gate 	}
385*0Sstevel@tonic-gate }
386*0Sstevel@tonic-gate 
387*0Sstevel@tonic-gate char *
388*0Sstevel@tonic-gate libc_strdup(const char *s1)
389*0Sstevel@tonic-gate {
390*0Sstevel@tonic-gate 	char *s2 = libc_malloc(strlen(s1) + 1);
391*0Sstevel@tonic-gate 
392*0Sstevel@tonic-gate 	if (s2)
393*0Sstevel@tonic-gate 		(void) strcpy(s2, s1);
394*0Sstevel@tonic-gate 	return (s2);
395*0Sstevel@tonic-gate }
396