1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <mtmalloc.h>
30*0Sstevel@tonic-gate #include "mtmalloc_impl.h"
31*0Sstevel@tonic-gate #include <unistd.h>
32*0Sstevel@tonic-gate #include <synch.h>
33*0Sstevel@tonic-gate #include <thread.h>
34*0Sstevel@tonic-gate #include <stdio.h>
35*0Sstevel@tonic-gate #include <limits.h>
36*0Sstevel@tonic-gate #include <errno.h>
37*0Sstevel@tonic-gate #include <string.h>
38*0Sstevel@tonic-gate #include <strings.h>
39*0Sstevel@tonic-gate #include <sys/param.h>
40*0Sstevel@tonic-gate #include <sys/sysmacros.h>
41*0Sstevel@tonic-gate 
42*0Sstevel@tonic-gate /*
43*0Sstevel@tonic-gate  * To turn on the asserts just compile -DDEBUG
44*0Sstevel@tonic-gate  */
45*0Sstevel@tonic-gate 
46*0Sstevel@tonic-gate #ifndef	DEBUG
47*0Sstevel@tonic-gate #define	NDEBUG
48*0Sstevel@tonic-gate #endif
49*0Sstevel@tonic-gate 
50*0Sstevel@tonic-gate #include <assert.h>
51*0Sstevel@tonic-gate 
52*0Sstevel@tonic-gate /*
53*0Sstevel@tonic-gate  * The MT hot malloc implementation contained herein is designed to be
54*0Sstevel@tonic-gate  * plug-compatible with the libc version of malloc. It is not intended
55*0Sstevel@tonic-gate  * to replace that implementation until we decide that it is ok to break
56*0Sstevel@tonic-gate  * customer apps (Solaris 3.0).
57*0Sstevel@tonic-gate  *
58*0Sstevel@tonic-gate  * For requests up to 2^^16, the allocator initializes itself into NCPUS
59*0Sstevel@tonic-gate  * worth of chains of caches. When a memory request is made, the calling thread
60*0Sstevel@tonic-gate  * is vectored into one of NCPUS worth of caches.  The LWP id gives us a cheap,
61*0Sstevel@tonic-gate  * contention-reducing index to use, eventually, this should be replaced with
62*0Sstevel@tonic-gate  * the actual CPU sequence number, when an interface to get it is available.
63*0Sstevel@tonic-gate  *
64*0Sstevel@tonic-gate  * Once the thread is vectored into one of the list of caches the real
65*0Sstevel@tonic-gate  * allocation of the memory begins. The size is determined to figure out which
66*0Sstevel@tonic-gate  * bucket the allocation should be satisfied from. The management of free
67*0Sstevel@tonic-gate  * buckets is done via a bitmask. A free bucket is represented by a 1. The
68*0Sstevel@tonic-gate  * first free bit represents the first free bucket. The position of the bit,
69*0Sstevel@tonic-gate  * represents the position of the bucket in the arena.
70*0Sstevel@tonic-gate  *
71*0Sstevel@tonic-gate  * When the memory from the arena is handed out, the address of the cache
72*0Sstevel@tonic-gate  * control structure is written in the word preceeding the returned memory.
73*0Sstevel@tonic-gate  * This cache control address is used during free() to mark the buffer free
74*0Sstevel@tonic-gate  * in the cache control structure.
75*0Sstevel@tonic-gate  *
76*0Sstevel@tonic-gate  * When all available memory in a cache has been depleted, a new chunk of memory
77*0Sstevel@tonic-gate  * is allocated via sbrk(). The new cache is allocated from this chunk of memory
78*0Sstevel@tonic-gate  * and initialized in the function create_cache(). New caches are installed at
79*0Sstevel@tonic-gate  * the front of a singly linked list of the same size memory pools. This helps
80*0Sstevel@tonic-gate  * to ensure that there will tend to be available memory in the beginning of the
81*0Sstevel@tonic-gate  * list.
82*0Sstevel@tonic-gate  *
83*0Sstevel@tonic-gate  * Long linked lists hurt performance. To decrease this effect, there is a
84*0Sstevel@tonic-gate  * tunable, requestsize, that bumps up the sbrk allocation size and thus
85*0Sstevel@tonic-gate  * increases the number of available blocks within an arena.  We also keep
86*0Sstevel@tonic-gate  * a "hint" for each cache list, which is the last cache in the list allocated
87*0Sstevel@tonic-gate  * from.  This lowers the cost of searching if there are a lot of fully
88*0Sstevel@tonic-gate  * allocated blocks at the front of the list.
89*0Sstevel@tonic-gate  *
90*0Sstevel@tonic-gate  * For requests greater than 2^^16 (oversize allocations), there are two pieces
91*0Sstevel@tonic-gate  * of overhead. There is the OVERHEAD used to hold the cache addr
92*0Sstevel@tonic-gate  * (&oversize_list), plus an oversize_t structure to further describe the block.
93*0Sstevel@tonic-gate  *
94*0Sstevel@tonic-gate  * The oversize list is kept as defragmented as possible by coalescing
95*0Sstevel@tonic-gate  * freed oversized allocations with adjacent neighbors.
96*0Sstevel@tonic-gate  *
97*0Sstevel@tonic-gate  * Addresses handed out are stored in a hash table, and are aligned on
98*0Sstevel@tonic-gate  * MTMALLOC_MIN_ALIGN-byte boundaries at both ends. Request sizes are rounded-up
99*0Sstevel@tonic-gate  * where necessary in order to achieve this. This eases the implementation of
100*0Sstevel@tonic-gate  * MTDEBUGPATTERN and MTINITPATTERN, particularly where coalescing occurs.
101*0Sstevel@tonic-gate  *
102*0Sstevel@tonic-gate  * A memalign allocation takes memalign header overhead.  There's two
103*0Sstevel@tonic-gate  * types of memalign headers distinguished by MTMALLOC_MEMALIGN_MAGIC
104*0Sstevel@tonic-gate  * and MTMALLOC_MEMALIGN_MIN_MAGIC.  When the size of memory taken to
105*0Sstevel@tonic-gate  * get to the aligned address from malloc'ed address is the minimum size
106*0Sstevel@tonic-gate  * OVERHEAD, we create a header taking only one OVERHEAD space with magic
107*0Sstevel@tonic-gate  * number MTMALLOC_MEMALIGN_MIN_MAGIC, and we know by subtracting OVERHEAD
108*0Sstevel@tonic-gate  * from memaligned address, we can get to the malloc'ed address. Otherwise,
109*0Sstevel@tonic-gate  * we create a memalign header taking two OVERHEAD space, one stores
110*0Sstevel@tonic-gate  * MTMALLOC_MEMALIGN_MAGIC magic number, the other one points back to the
111*0Sstevel@tonic-gate  * malloc'ed address.
112*0Sstevel@tonic-gate  */
113*0Sstevel@tonic-gate 
114*0Sstevel@tonic-gate #if defined(__i386) || defined(__amd64)
115*0Sstevel@tonic-gate #include <arpa/inet.h>	/* for htonl() */
116*0Sstevel@tonic-gate #endif
117*0Sstevel@tonic-gate 
118*0Sstevel@tonic-gate static void * morecore(size_t);
119*0Sstevel@tonic-gate static int setup_caches(void);
120*0Sstevel@tonic-gate static void create_cache(cache_t *, size_t bufsize, uint_t hunks);
121*0Sstevel@tonic-gate static void * malloc_internal(size_t, percpu_t *);
122*0Sstevel@tonic-gate static void * oversize(size_t);
123*0Sstevel@tonic-gate static oversize_t *find_oversize(size_t);
124*0Sstevel@tonic-gate static void add_oversize(oversize_t *);
125*0Sstevel@tonic-gate static void copy_pattern(uint32_t, void *, size_t);
126*0Sstevel@tonic-gate static void * verify_pattern(uint32_t, void *, size_t);
127*0Sstevel@tonic-gate static void reinit_cpu_list(void);
128*0Sstevel@tonic-gate static void reinit_cache(cache_t *);
129*0Sstevel@tonic-gate static void free_oversize(oversize_t *);
130*0Sstevel@tonic-gate static oversize_t *oversize_header_alloc(uintptr_t, size_t);
131*0Sstevel@tonic-gate 
132*0Sstevel@tonic-gate /*
133*0Sstevel@tonic-gate  * oversize hash table stuff
134*0Sstevel@tonic-gate  */
135*0Sstevel@tonic-gate #define	NUM_BUCKETS	67	/* must be prime */
136*0Sstevel@tonic-gate #define	HASH_OVERSIZE(caddr)	((uintptr_t)(caddr) % NUM_BUCKETS)
137*0Sstevel@tonic-gate oversize_t *ovsz_hashtab[NUM_BUCKETS];
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate /*
140*0Sstevel@tonic-gate  * Gets a decent "current cpu identifier", to be used to reduce contention.
141*0Sstevel@tonic-gate  * Eventually, this should be replaced by an interface to get the actual
142*0Sstevel@tonic-gate  * CPU sequence number in libthread/liblwp.
143*0Sstevel@tonic-gate  */
144*0Sstevel@tonic-gate extern uint_t _thr_self();
145*0Sstevel@tonic-gate #pragma weak _thr_self
146*0Sstevel@tonic-gate #define	get_curcpu_func() (curcpu_func)_thr_self
147*0Sstevel@tonic-gate 
148*0Sstevel@tonic-gate #define	ALIGN(x, a)	((((uintptr_t)(x) + ((uintptr_t)(a) - 1)) \
149*0Sstevel@tonic-gate 			& ~((uintptr_t)(a) - 1)))
150*0Sstevel@tonic-gate 
151*0Sstevel@tonic-gate /* need this to deal with little endianess of x86 */
152*0Sstevel@tonic-gate #if defined(__i386) || defined(__amd64)
153*0Sstevel@tonic-gate #define	FLIP_EM(x)	htonl((x))
154*0Sstevel@tonic-gate #else
155*0Sstevel@tonic-gate #define	FLIP_EM(x)	(x)
156*0Sstevel@tonic-gate #endif
157*0Sstevel@tonic-gate 
158*0Sstevel@tonic-gate #define	INSERT_ONLY			0
159*0Sstevel@tonic-gate #define	COALESCE_LEFT			0x00000001
160*0Sstevel@tonic-gate #define	COALESCE_RIGHT			0x00000002
161*0Sstevel@tonic-gate #define	COALESCE_WITH_BOTH_SIDES	(COALESCE_LEFT | COALESCE_RIGHT)
162*0Sstevel@tonic-gate 
163*0Sstevel@tonic-gate #define	OVERHEAD	8	/* size needed to write cache addr */
164*0Sstevel@tonic-gate #define	HUNKSIZE	8192	/* just a multiplier */
165*0Sstevel@tonic-gate 
166*0Sstevel@tonic-gate #define	MAX_CACHED_SHIFT	16	/* 64K is the max cached size */
167*0Sstevel@tonic-gate #define	MAX_CACHED		(1 << MAX_CACHED_SHIFT)
168*0Sstevel@tonic-gate #define	MIN_CACHED_SHIFT	4	/* smaller requests rounded up */
169*0Sstevel@tonic-gate #define	MTMALLOC_MIN_ALIGN	8	/* min guaranteed alignment */
170*0Sstevel@tonic-gate 
171*0Sstevel@tonic-gate #define	NUM_CACHES	(MAX_CACHED_SHIFT - MIN_CACHED_SHIFT + 1)
172*0Sstevel@tonic-gate #define	CACHELIST_SIZE	ALIGN(NUM_CACHES * sizeof (cache_head_t), \
173*0Sstevel@tonic-gate     CACHE_COHERENCY_UNIT)
174*0Sstevel@tonic-gate 
175*0Sstevel@tonic-gate #define	MINSIZE		9	/* for requestsize, tunable */
176*0Sstevel@tonic-gate #define	MAXSIZE		256	/* arbitrary, big enough, for requestsize */
177*0Sstevel@tonic-gate 
178*0Sstevel@tonic-gate #define	FREEPATTERN	0xdeadbeef /* debug fill pattern for free buf */
179*0Sstevel@tonic-gate #define	INITPATTERN	0xbaddcafe /* debug fill pattern for new buf */
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate #define	misaligned(p)	((unsigned)(p) & (sizeof (int) - 1))
182*0Sstevel@tonic-gate #define	IS_OVERSIZE(x, y)	(((x) < (y)) && (((x) > MAX_CACHED)? 1 : 0))
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate static long requestsize = MINSIZE; /* 9 pages per cache; tunable; 9 is min */
185*0Sstevel@tonic-gate 
186*0Sstevel@tonic-gate static uint_t cpu_mask;
187*0Sstevel@tonic-gate static curcpu_func curcpu;
188*0Sstevel@tonic-gate 
189*0Sstevel@tonic-gate static int32_t debugopt;
190*0Sstevel@tonic-gate static int32_t reinit;
191*0Sstevel@tonic-gate 
192*0Sstevel@tonic-gate static percpu_t *cpu_list;
193*0Sstevel@tonic-gate static oversize_t oversize_list;
194*0Sstevel@tonic-gate static mutex_t oversize_lock;
195*0Sstevel@tonic-gate 
196*0Sstevel@tonic-gate static int ncpus;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate #define	MTMALLOC_OVERSIZE_MAGIC		((uintptr_t)&oversize_list)
199*0Sstevel@tonic-gate #define	MTMALLOC_MEMALIGN_MAGIC		((uintptr_t)&oversize_list + 1)
200*0Sstevel@tonic-gate #define	MTMALLOC_MEMALIGN_MIN_MAGIC	((uintptr_t)&oversize_list + 2)
201*0Sstevel@tonic-gate 
202*0Sstevel@tonic-gate /*
203*0Sstevel@tonic-gate  * We require allocations handed out to be aligned on MTMALLOC_MIN_ALIGN-byte
204*0Sstevel@tonic-gate  * boundaries. We round up sizeof (oversize_t) (when necessary) to ensure that
205*0Sstevel@tonic-gate  * this is achieved.
206*0Sstevel@tonic-gate  */
207*0Sstevel@tonic-gate #define	OVSZ_SIZE		(ALIGN(sizeof (oversize_t), MTMALLOC_MIN_ALIGN))
208*0Sstevel@tonic-gate #define	OVSZ_HEADER_SIZE	(OVSZ_SIZE + OVERHEAD)
209*0Sstevel@tonic-gate 
210*0Sstevel@tonic-gate /*
211*0Sstevel@tonic-gate  * memalign header takes 2 OVERHEAD space.  One for memalign magic, and the
212*0Sstevel@tonic-gate  * other one points back to the start address of originally allocated space.
213*0Sstevel@tonic-gate  */
214*0Sstevel@tonic-gate #define	MEMALIGN_HEADER_SIZE	2 * OVERHEAD
215*0Sstevel@tonic-gate #define	MEMALIGN_HEADER_ALLOC(x, shift, malloc_addr)\
216*0Sstevel@tonic-gate 	if (shift == OVERHEAD)\
217*0Sstevel@tonic-gate 		*((uintptr_t *)((caddr_t)x - OVERHEAD)) = \
218*0Sstevel@tonic-gate 			MTMALLOC_MEMALIGN_MIN_MAGIC; \
219*0Sstevel@tonic-gate 	else {\
220*0Sstevel@tonic-gate 		*((uintptr_t *)((caddr_t)x - OVERHEAD)) = \
221*0Sstevel@tonic-gate 			MTMALLOC_MEMALIGN_MAGIC; \
222*0Sstevel@tonic-gate 		*((uintptr_t *)((caddr_t)x - 2 * OVERHEAD)) = \
223*0Sstevel@tonic-gate 			(uintptr_t)malloc_addr; \
224*0Sstevel@tonic-gate 	}
225*0Sstevel@tonic-gate 
226*0Sstevel@tonic-gate void *
227*0Sstevel@tonic-gate malloc(size_t bytes)
228*0Sstevel@tonic-gate {
229*0Sstevel@tonic-gate 	percpu_t *list_rotor;
230*0Sstevel@tonic-gate 	uint_t	list_index;
231*0Sstevel@tonic-gate 
232*0Sstevel@tonic-gate 	/*
233*0Sstevel@tonic-gate 	 * this test is due to linking with libthread.
234*0Sstevel@tonic-gate 	 * There are malloc calls prior to this library
235*0Sstevel@tonic-gate 	 * being initialized.
236*0Sstevel@tonic-gate 	 *
237*0Sstevel@tonic-gate 	 * If setup_caches fails, we set ENOMEM and return NULL
238*0Sstevel@tonic-gate 	 */
239*0Sstevel@tonic-gate 	if (cpu_list == (percpu_t *)NULL) {
240*0Sstevel@tonic-gate 		if (setup_caches() == 0) {
241*0Sstevel@tonic-gate 			errno = ENOMEM;
242*0Sstevel@tonic-gate 			return (NULL);
243*0Sstevel@tonic-gate 		}
244*0Sstevel@tonic-gate 	}
245*0Sstevel@tonic-gate 
246*0Sstevel@tonic-gate 	if (bytes > MAX_CACHED)
247*0Sstevel@tonic-gate 		return (oversize(bytes));
248*0Sstevel@tonic-gate 
249*0Sstevel@tonic-gate 	list_index = (curcpu() & cpu_mask);
250*0Sstevel@tonic-gate 
251*0Sstevel@tonic-gate 	list_rotor = &cpu_list[list_index];
252*0Sstevel@tonic-gate 
253*0Sstevel@tonic-gate 	return (malloc_internal(bytes, list_rotor));
254*0Sstevel@tonic-gate }
255*0Sstevel@tonic-gate 
256*0Sstevel@tonic-gate void *
257*0Sstevel@tonic-gate realloc(void * ptr, size_t bytes)
258*0Sstevel@tonic-gate {
259*0Sstevel@tonic-gate 	void *new, *data_ptr;
260*0Sstevel@tonic-gate 	cache_t *cacheptr;
261*0Sstevel@tonic-gate 	caddr_t mem;
262*0Sstevel@tonic-gate 	size_t shift = 0;
263*0Sstevel@tonic-gate 
264*0Sstevel@tonic-gate 	if (ptr == NULL)
265*0Sstevel@tonic-gate 		return (malloc(bytes));
266*0Sstevel@tonic-gate 
267*0Sstevel@tonic-gate 	if (bytes == 0) {
268*0Sstevel@tonic-gate 		free(ptr);
269*0Sstevel@tonic-gate 		return (NULL);
270*0Sstevel@tonic-gate 	}
271*0Sstevel@tonic-gate 
272*0Sstevel@tonic-gate 	data_ptr = ptr;
273*0Sstevel@tonic-gate 	mem = (caddr_t)ptr - OVERHEAD;
274*0Sstevel@tonic-gate 
275*0Sstevel@tonic-gate 	new = malloc(bytes);
276*0Sstevel@tonic-gate 
277*0Sstevel@tonic-gate 	if (new == NULL)
278*0Sstevel@tonic-gate 		return (NULL);
279*0Sstevel@tonic-gate 
280*0Sstevel@tonic-gate 	/*
281*0Sstevel@tonic-gate 	 * If new == ptr, ptr has previously been freed. Passing a freed pointer
282*0Sstevel@tonic-gate 	 * to realloc() is not allowed - unless the caller specifically states
283*0Sstevel@tonic-gate 	 * otherwise, in which case we must avoid freeing ptr (ie new) before we
284*0Sstevel@tonic-gate 	 * return new. There is (obviously) no requirement to memcpy() ptr to
285*0Sstevel@tonic-gate 	 * new before we return.
286*0Sstevel@tonic-gate 	 */
287*0Sstevel@tonic-gate 	if (new == ptr) {
288*0Sstevel@tonic-gate 		if (!(debugopt & MTDOUBLEFREE))
289*0Sstevel@tonic-gate 			abort();
290*0Sstevel@tonic-gate 		return (new);
291*0Sstevel@tonic-gate 	}
292*0Sstevel@tonic-gate 
293*0Sstevel@tonic-gate 	if (*(uintptr_t *)mem == MTMALLOC_MEMALIGN_MAGIC) {
294*0Sstevel@tonic-gate 		mem -= OVERHEAD;
295*0Sstevel@tonic-gate 		ptr = (void *)*(uintptr_t *)mem;
296*0Sstevel@tonic-gate 		mem = (caddr_t)ptr - OVERHEAD;
297*0Sstevel@tonic-gate 		shift = (size_t)((uintptr_t)data_ptr - (uintptr_t)ptr);
298*0Sstevel@tonic-gate 	} else if (*(uintptr_t *)mem == MTMALLOC_MEMALIGN_MIN_MAGIC) {
299*0Sstevel@tonic-gate 		ptr = (void *) mem;
300*0Sstevel@tonic-gate 		mem -= OVERHEAD;
301*0Sstevel@tonic-gate 		shift = OVERHEAD;
302*0Sstevel@tonic-gate 	}
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 	if (*(uintptr_t *)mem == MTMALLOC_OVERSIZE_MAGIC) {
305*0Sstevel@tonic-gate 		oversize_t *old;
306*0Sstevel@tonic-gate 
307*0Sstevel@tonic-gate 		old = (oversize_t *)(mem - OVSZ_SIZE);
308*0Sstevel@tonic-gate 		(void) memcpy(new, data_ptr, MIN(bytes, old->size - shift));
309*0Sstevel@tonic-gate 		free(ptr);
310*0Sstevel@tonic-gate 		return (new);
311*0Sstevel@tonic-gate 	}
312*0Sstevel@tonic-gate 
313*0Sstevel@tonic-gate 	cacheptr = (cache_t *)*(uintptr_t *)mem;
314*0Sstevel@tonic-gate 
315*0Sstevel@tonic-gate 	(void) memcpy(new, data_ptr,
316*0Sstevel@tonic-gate 		MIN(cacheptr->mt_size - OVERHEAD - shift, bytes));
317*0Sstevel@tonic-gate 	free(ptr);
318*0Sstevel@tonic-gate 
319*0Sstevel@tonic-gate 	return (new);
320*0Sstevel@tonic-gate }
321*0Sstevel@tonic-gate 
322*0Sstevel@tonic-gate void *
323*0Sstevel@tonic-gate calloc(size_t nelem, size_t bytes)
324*0Sstevel@tonic-gate {
325*0Sstevel@tonic-gate 	void * ptr;
326*0Sstevel@tonic-gate 	size_t size = nelem * bytes;
327*0Sstevel@tonic-gate 
328*0Sstevel@tonic-gate 	ptr = malloc(size);
329*0Sstevel@tonic-gate 	if (ptr == NULL)
330*0Sstevel@tonic-gate 		return (NULL);
331*0Sstevel@tonic-gate 	bzero(ptr, size);
332*0Sstevel@tonic-gate 
333*0Sstevel@tonic-gate 	return (ptr);
334*0Sstevel@tonic-gate }
335*0Sstevel@tonic-gate 
336*0Sstevel@tonic-gate void
337*0Sstevel@tonic-gate free(void * ptr)
338*0Sstevel@tonic-gate {
339*0Sstevel@tonic-gate 	cache_t *cacheptr;
340*0Sstevel@tonic-gate 	caddr_t mem;
341*0Sstevel@tonic-gate 	int32_t i;
342*0Sstevel@tonic-gate 	caddr_t freeblocks;
343*0Sstevel@tonic-gate 	uintptr_t offset;
344*0Sstevel@tonic-gate 	uchar_t mask;
345*0Sstevel@tonic-gate 	int32_t which_bit, num_bytes;
346*0Sstevel@tonic-gate 
347*0Sstevel@tonic-gate 	if (ptr == NULL)
348*0Sstevel@tonic-gate 		return;
349*0Sstevel@tonic-gate 
350*0Sstevel@tonic-gate 	mem = (caddr_t)ptr - OVERHEAD;
351*0Sstevel@tonic-gate 
352*0Sstevel@tonic-gate 	if (*(uintptr_t *)mem == MTMALLOC_MEMALIGN_MAGIC) {
353*0Sstevel@tonic-gate 		mem -= OVERHEAD;
354*0Sstevel@tonic-gate 		ptr = (void *)*(uintptr_t *)mem;
355*0Sstevel@tonic-gate 		mem = (caddr_t)ptr - OVERHEAD;
356*0Sstevel@tonic-gate 	} else if (*(uintptr_t *)mem == MTMALLOC_MEMALIGN_MIN_MAGIC) {
357*0Sstevel@tonic-gate 		ptr = (void *) mem;
358*0Sstevel@tonic-gate 		mem -= OVERHEAD;
359*0Sstevel@tonic-gate 	}
360*0Sstevel@tonic-gate 
361*0Sstevel@tonic-gate 	if (*(uintptr_t *)mem == MTMALLOC_OVERSIZE_MAGIC) {
362*0Sstevel@tonic-gate 		oversize_t *big, **opp;
363*0Sstevel@tonic-gate 		int bucket;
364*0Sstevel@tonic-gate 
365*0Sstevel@tonic-gate 		big = (oversize_t *)(mem - OVSZ_SIZE);
366*0Sstevel@tonic-gate 		(void) mutex_lock(&oversize_lock);
367*0Sstevel@tonic-gate 
368*0Sstevel@tonic-gate 		bucket = HASH_OVERSIZE(big->addr);
369*0Sstevel@tonic-gate 		for (opp = &ovsz_hashtab[bucket]; *opp != NULL;
370*0Sstevel@tonic-gate 		    opp = &(*opp)->hash_next)
371*0Sstevel@tonic-gate 			if (*opp == big)
372*0Sstevel@tonic-gate 				break;
373*0Sstevel@tonic-gate 
374*0Sstevel@tonic-gate 		if (*opp == NULL) {
375*0Sstevel@tonic-gate 			if (!(debugopt & MTDOUBLEFREE))
376*0Sstevel@tonic-gate 				abort();
377*0Sstevel@tonic-gate 			(void) mutex_unlock(&oversize_lock);
378*0Sstevel@tonic-gate 			return;
379*0Sstevel@tonic-gate 		}
380*0Sstevel@tonic-gate 
381*0Sstevel@tonic-gate 		*opp = big->hash_next;	/* remove big from the hash table */
382*0Sstevel@tonic-gate 		big->hash_next = NULL;
383*0Sstevel@tonic-gate 
384*0Sstevel@tonic-gate 		if (debugopt & MTDEBUGPATTERN)
385*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, ptr, big->size);
386*0Sstevel@tonic-gate 		add_oversize(big);
387*0Sstevel@tonic-gate 		(void) mutex_unlock(&oversize_lock);
388*0Sstevel@tonic-gate 		return;
389*0Sstevel@tonic-gate 	}
390*0Sstevel@tonic-gate 
391*0Sstevel@tonic-gate 	cacheptr = (cache_t *)*(uintptr_t *)mem;
392*0Sstevel@tonic-gate 	freeblocks = cacheptr->mt_freelist;
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate 	/*
395*0Sstevel@tonic-gate 	 * This is the distance measured in bits into the arena.
396*0Sstevel@tonic-gate 	 * The value of offset is in bytes but there is a 1-1 correlation
397*0Sstevel@tonic-gate 	 * between distance into the arena and distance into the
398*0Sstevel@tonic-gate 	 * freelist bitmask.
399*0Sstevel@tonic-gate 	 */
400*0Sstevel@tonic-gate 	offset = mem - cacheptr->mt_arena;
401*0Sstevel@tonic-gate 
402*0Sstevel@tonic-gate 	/*
403*0Sstevel@tonic-gate 	 * i is total number of bits to offset into freelist bitmask.
404*0Sstevel@tonic-gate 	 */
405*0Sstevel@tonic-gate 
406*0Sstevel@tonic-gate 	i = offset / cacheptr->mt_size;
407*0Sstevel@tonic-gate 
408*0Sstevel@tonic-gate 	num_bytes = i >> 3;
409*0Sstevel@tonic-gate 
410*0Sstevel@tonic-gate 	/*
411*0Sstevel@tonic-gate 	 * which_bit is the bit offset into the byte in the freelist.
412*0Sstevel@tonic-gate 	 * if our freelist bitmask looks like 0xf3 and we are freeing
413*0Sstevel@tonic-gate 	 * block 5 (ie: the 6th block) our mask will be 0xf7 after
414*0Sstevel@tonic-gate 	 * the free. Things go left to right that's why the mask is 0x80
415*0Sstevel@tonic-gate 	 * and not 0x01.
416*0Sstevel@tonic-gate 	 */
417*0Sstevel@tonic-gate 	which_bit = i - (num_bytes << 3);
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate 	mask = 0x80 >> which_bit;
420*0Sstevel@tonic-gate 
421*0Sstevel@tonic-gate 	freeblocks += num_bytes;
422*0Sstevel@tonic-gate 
423*0Sstevel@tonic-gate 	if (debugopt & MTDEBUGPATTERN)
424*0Sstevel@tonic-gate 		copy_pattern(FREEPATTERN, ptr, cacheptr->mt_size - OVERHEAD);
425*0Sstevel@tonic-gate 
426*0Sstevel@tonic-gate 	(void) mutex_lock(&cacheptr->mt_cache_lock);
427*0Sstevel@tonic-gate 
428*0Sstevel@tonic-gate 	if (*freeblocks & mask) {
429*0Sstevel@tonic-gate 		if (!(debugopt & MTDOUBLEFREE))
430*0Sstevel@tonic-gate 			abort();
431*0Sstevel@tonic-gate 	} else {
432*0Sstevel@tonic-gate 		*freeblocks |= mask;
433*0Sstevel@tonic-gate 		cacheptr->mt_nfree++;
434*0Sstevel@tonic-gate 	}
435*0Sstevel@tonic-gate 
436*0Sstevel@tonic-gate 	(void) mutex_unlock(&cacheptr->mt_cache_lock);
437*0Sstevel@tonic-gate }
438*0Sstevel@tonic-gate 
439*0Sstevel@tonic-gate void *
440*0Sstevel@tonic-gate memalign(size_t alignment, size_t size)
441*0Sstevel@tonic-gate {
442*0Sstevel@tonic-gate 	size_t alloc_size;
443*0Sstevel@tonic-gate 	uintptr_t offset;
444*0Sstevel@tonic-gate 	void *alloc_buf;
445*0Sstevel@tonic-gate 	void *ret_buf;
446*0Sstevel@tonic-gate 
447*0Sstevel@tonic-gate 	if (size == 0 || alignment == 0 ||
448*0Sstevel@tonic-gate 		misaligned(alignment) ||
449*0Sstevel@tonic-gate 		(alignment & (alignment - 1)) != 0) {
450*0Sstevel@tonic-gate 		errno = EINVAL;
451*0Sstevel@tonic-gate 		return (NULL);
452*0Sstevel@tonic-gate 	}
453*0Sstevel@tonic-gate 
454*0Sstevel@tonic-gate 	/* <= MTMALLOC_MIN_ALIGN, malloc can provide directly */
455*0Sstevel@tonic-gate 	if (alignment <= MTMALLOC_MIN_ALIGN)
456*0Sstevel@tonic-gate 		return (malloc(size));
457*0Sstevel@tonic-gate 
458*0Sstevel@tonic-gate 	alloc_size = size + alignment - MTMALLOC_MIN_ALIGN;
459*0Sstevel@tonic-gate 
460*0Sstevel@tonic-gate 	if (alloc_size < size) { /* overflow */
461*0Sstevel@tonic-gate 		errno = ENOMEM;
462*0Sstevel@tonic-gate 		return (NULL);
463*0Sstevel@tonic-gate 	}
464*0Sstevel@tonic-gate 
465*0Sstevel@tonic-gate 	alloc_buf = malloc(alloc_size);
466*0Sstevel@tonic-gate 
467*0Sstevel@tonic-gate 	if (alloc_buf == NULL)
468*0Sstevel@tonic-gate 		/* malloc sets errno */
469*0Sstevel@tonic-gate 		return (NULL);
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 	/*
472*0Sstevel@tonic-gate 	 * If alloc_size > MAX_CACHED, malloc() will have returned a multiple of
473*0Sstevel@tonic-gate 	 * MTMALLOC_MIN_ALIGN, having rounded-up alloc_size if necessary. Since
474*0Sstevel@tonic-gate 	 * we will use alloc_size to return the excess fragments to the free
475*0Sstevel@tonic-gate 	 * list, we also round-up alloc_size if necessary.
476*0Sstevel@tonic-gate 	 */
477*0Sstevel@tonic-gate 	if ((alloc_size > MAX_CACHED) &&
478*0Sstevel@tonic-gate 	    (alloc_size & (MTMALLOC_MIN_ALIGN - 1)))
479*0Sstevel@tonic-gate 		alloc_size = ALIGN(alloc_size, MTMALLOC_MIN_ALIGN);
480*0Sstevel@tonic-gate 
481*0Sstevel@tonic-gate 	if ((offset = (uintptr_t)alloc_buf & (alignment - 1)) == 0) {
482*0Sstevel@tonic-gate 		/* aligned correctly */
483*0Sstevel@tonic-gate 
484*0Sstevel@tonic-gate 		size_t frag_size = alloc_size -
485*0Sstevel@tonic-gate 			(size + MTMALLOC_MIN_ALIGN + OVSZ_HEADER_SIZE);
486*0Sstevel@tonic-gate 
487*0Sstevel@tonic-gate 		/*
488*0Sstevel@tonic-gate 		 * If the leftover piece of the memory > MAX_CACHED,
489*0Sstevel@tonic-gate 		 * split off the piece and return it back to the freelist.
490*0Sstevel@tonic-gate 		 */
491*0Sstevel@tonic-gate 		if (IS_OVERSIZE(frag_size, alloc_size)) {
492*0Sstevel@tonic-gate 			oversize_t *orig, *tail;
493*0Sstevel@tonic-gate 			uintptr_t taddr;
494*0Sstevel@tonic-gate 			size_t data_size;
495*0Sstevel@tonic-gate 			taddr = ALIGN((uintptr_t)alloc_buf + size,
496*0Sstevel@tonic-gate 					MTMALLOC_MIN_ALIGN);
497*0Sstevel@tonic-gate 			data_size = taddr - (uintptr_t)alloc_buf;
498*0Sstevel@tonic-gate 			orig = (oversize_t *)((uintptr_t)alloc_buf -
499*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE);
500*0Sstevel@tonic-gate 			frag_size = orig->size - data_size -
501*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE;
502*0Sstevel@tonic-gate 			orig->size = data_size;
503*0Sstevel@tonic-gate 			tail = oversize_header_alloc(taddr, frag_size);
504*0Sstevel@tonic-gate 			free_oversize(tail);
505*0Sstevel@tonic-gate 		}
506*0Sstevel@tonic-gate 		ret_buf = alloc_buf;
507*0Sstevel@tonic-gate 	} else {
508*0Sstevel@tonic-gate 		uchar_t	oversize_bits = 0;
509*0Sstevel@tonic-gate 		size_t	head_sz, data_sz, tail_sz;
510*0Sstevel@tonic-gate 		uintptr_t ret_addr, taddr, shift, tshift;
511*0Sstevel@tonic-gate 		oversize_t *orig, *tail;
512*0Sstevel@tonic-gate 		size_t tsize;
513*0Sstevel@tonic-gate 
514*0Sstevel@tonic-gate 		/* needs to be aligned */
515*0Sstevel@tonic-gate 		shift = alignment - offset;
516*0Sstevel@tonic-gate 
517*0Sstevel@tonic-gate 		assert(shift >= MTMALLOC_MIN_ALIGN);
518*0Sstevel@tonic-gate 
519*0Sstevel@tonic-gate 		ret_addr = ((uintptr_t)alloc_buf + shift);
520*0Sstevel@tonic-gate 		ret_buf = (void *)ret_addr;
521*0Sstevel@tonic-gate 
522*0Sstevel@tonic-gate 		if (alloc_size <= MAX_CACHED) {
523*0Sstevel@tonic-gate 			MEMALIGN_HEADER_ALLOC(ret_addr, shift, alloc_buf);
524*0Sstevel@tonic-gate 			return (ret_buf);
525*0Sstevel@tonic-gate 		}
526*0Sstevel@tonic-gate 
527*0Sstevel@tonic-gate 		/*
528*0Sstevel@tonic-gate 		 * Only check for the fragments when the memory is allocted
529*0Sstevel@tonic-gate 		 * from oversize_list.  Split off a fragment and return it
530*0Sstevel@tonic-gate 		 * to the oversize freelist when it's > MAX_CACHED.
531*0Sstevel@tonic-gate 		 */
532*0Sstevel@tonic-gate 
533*0Sstevel@tonic-gate 		head_sz = shift - MAX(MEMALIGN_HEADER_SIZE, OVSZ_HEADER_SIZE);
534*0Sstevel@tonic-gate 
535*0Sstevel@tonic-gate 		tail_sz = alloc_size -
536*0Sstevel@tonic-gate 			(shift + size + MTMALLOC_MIN_ALIGN + OVSZ_HEADER_SIZE);
537*0Sstevel@tonic-gate 
538*0Sstevel@tonic-gate 		oversize_bits |= IS_OVERSIZE(head_sz, alloc_size) |
539*0Sstevel@tonic-gate 				IS_OVERSIZE(size, alloc_size) << DATA_SHIFT |
540*0Sstevel@tonic-gate 				IS_OVERSIZE(tail_sz, alloc_size) << TAIL_SHIFT;
541*0Sstevel@tonic-gate 
542*0Sstevel@tonic-gate 		switch (oversize_bits) {
543*0Sstevel@tonic-gate 			case NONE_OVERSIZE:
544*0Sstevel@tonic-gate 			case DATA_OVERSIZE:
545*0Sstevel@tonic-gate 				MEMALIGN_HEADER_ALLOC(ret_addr, shift,
546*0Sstevel@tonic-gate 					alloc_buf);
547*0Sstevel@tonic-gate 				break;
548*0Sstevel@tonic-gate 			case HEAD_OVERSIZE:
549*0Sstevel@tonic-gate 				/*
550*0Sstevel@tonic-gate 				 * If we can extend data > MAX_CACHED and have
551*0Sstevel@tonic-gate 				 * head still > MAX_CACHED, we split head-end
552*0Sstevel@tonic-gate 				 * as the case of head-end and data oversized,
553*0Sstevel@tonic-gate 				 * otherwise just create memalign header.
554*0Sstevel@tonic-gate 				 */
555*0Sstevel@tonic-gate 				tsize = (shift + size) - (MAX_CACHED + 8 +
556*0Sstevel@tonic-gate 					MTMALLOC_MIN_ALIGN + OVSZ_HEADER_SIZE);
557*0Sstevel@tonic-gate 
558*0Sstevel@tonic-gate 				if (!IS_OVERSIZE(tsize, alloc_size)) {
559*0Sstevel@tonic-gate 					MEMALIGN_HEADER_ALLOC(ret_addr, shift,
560*0Sstevel@tonic-gate 						alloc_buf);
561*0Sstevel@tonic-gate 					break;
562*0Sstevel@tonic-gate 				} else {
563*0Sstevel@tonic-gate 					tsize += OVSZ_HEADER_SIZE;
564*0Sstevel@tonic-gate 					taddr = ALIGN((uintptr_t)alloc_buf +
565*0Sstevel@tonic-gate 						tsize, MTMALLOC_MIN_ALIGN);
566*0Sstevel@tonic-gate 					tshift = ret_addr - taddr;
567*0Sstevel@tonic-gate 					MEMALIGN_HEADER_ALLOC(ret_addr, tshift,
568*0Sstevel@tonic-gate 						taddr);
569*0Sstevel@tonic-gate 					ret_addr = taddr;
570*0Sstevel@tonic-gate 					shift = ret_addr - (uintptr_t)alloc_buf;
571*0Sstevel@tonic-gate 				}
572*0Sstevel@tonic-gate 				/* FALLTHROUGH */
573*0Sstevel@tonic-gate 			case HEAD_AND_DATA_OVERSIZE:
574*0Sstevel@tonic-gate 				/*
575*0Sstevel@tonic-gate 				 * Split off the head fragment and
576*0Sstevel@tonic-gate 				 * return it back to oversize freelist.
577*0Sstevel@tonic-gate 				 * Create oversize header for the piece
578*0Sstevel@tonic-gate 				 * of (data + tail fragment).
579*0Sstevel@tonic-gate 				 */
580*0Sstevel@tonic-gate 				orig = (oversize_t *)((uintptr_t)alloc_buf -
581*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE);
582*0Sstevel@tonic-gate 				(void) oversize_header_alloc(ret_addr -
583*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE,
584*0Sstevel@tonic-gate 						(orig->size - shift));
585*0Sstevel@tonic-gate 				orig->size = shift - OVSZ_HEADER_SIZE;
586*0Sstevel@tonic-gate 
587*0Sstevel@tonic-gate 				/* free up the head fragment */
588*0Sstevel@tonic-gate 				free_oversize(orig);
589*0Sstevel@tonic-gate 				break;
590*0Sstevel@tonic-gate 			case TAIL_OVERSIZE:
591*0Sstevel@tonic-gate 				/*
592*0Sstevel@tonic-gate 				 * If we can extend data > MAX_CACHED and have
593*0Sstevel@tonic-gate 				 * tail-end still > MAX_CACHED, we split tail
594*0Sstevel@tonic-gate 				 * end, otherwise just create memalign header.
595*0Sstevel@tonic-gate 				 */
596*0Sstevel@tonic-gate 				orig = (oversize_t *)((uintptr_t)alloc_buf -
597*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE);
598*0Sstevel@tonic-gate 				tsize =  orig->size - (MAX_CACHED + 8 +
599*0Sstevel@tonic-gate 					shift + OVSZ_HEADER_SIZE +
600*0Sstevel@tonic-gate 					MTMALLOC_MIN_ALIGN);
601*0Sstevel@tonic-gate 				if (!IS_OVERSIZE(tsize, alloc_size)) {
602*0Sstevel@tonic-gate 					MEMALIGN_HEADER_ALLOC(ret_addr, shift,
603*0Sstevel@tonic-gate 						alloc_buf);
604*0Sstevel@tonic-gate 					break;
605*0Sstevel@tonic-gate 				} else {
606*0Sstevel@tonic-gate 					size = MAX_CACHED + 8;
607*0Sstevel@tonic-gate 				}
608*0Sstevel@tonic-gate 				/* FALLTHROUGH */
609*0Sstevel@tonic-gate 			case DATA_AND_TAIL_OVERSIZE:
610*0Sstevel@tonic-gate 				/*
611*0Sstevel@tonic-gate 				 * Split off the tail fragment and
612*0Sstevel@tonic-gate 				 * return it back to oversize freelist.
613*0Sstevel@tonic-gate 				 * Create memalign header and adjust
614*0Sstevel@tonic-gate 				 * the size for the piece of
615*0Sstevel@tonic-gate 				 * (head fragment + data).
616*0Sstevel@tonic-gate 				 */
617*0Sstevel@tonic-gate 				taddr = ALIGN(ret_addr + size,
618*0Sstevel@tonic-gate 						MTMALLOC_MIN_ALIGN);
619*0Sstevel@tonic-gate 				data_sz = (size_t)(taddr -
620*0Sstevel@tonic-gate 						(uintptr_t)alloc_buf);
621*0Sstevel@tonic-gate 				orig = (oversize_t *)((uintptr_t)alloc_buf -
622*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE);
623*0Sstevel@tonic-gate 				tsize = orig->size - data_sz;
624*0Sstevel@tonic-gate 				orig->size = data_sz;
625*0Sstevel@tonic-gate 				MEMALIGN_HEADER_ALLOC(ret_buf, shift,
626*0Sstevel@tonic-gate 					alloc_buf);
627*0Sstevel@tonic-gate 				tsize -= OVSZ_HEADER_SIZE;
628*0Sstevel@tonic-gate 				tail = oversize_header_alloc(taddr,  tsize);
629*0Sstevel@tonic-gate 				free_oversize(tail);
630*0Sstevel@tonic-gate 				break;
631*0Sstevel@tonic-gate 			case HEAD_AND_TAIL_OVERSIZE:
632*0Sstevel@tonic-gate 				/*
633*0Sstevel@tonic-gate 				 * Split off the head fragment.
634*0Sstevel@tonic-gate 				 * We try to free up tail-end when we can
635*0Sstevel@tonic-gate 				 * extend data size to (MAX_CACHED + 8)
636*0Sstevel@tonic-gate 				 * and remain tail-end oversized.
637*0Sstevel@tonic-gate 				 * The bottom line is all split pieces
638*0Sstevel@tonic-gate 				 * should be oversize in size.
639*0Sstevel@tonic-gate 				 */
640*0Sstevel@tonic-gate 				orig = (oversize_t *)((uintptr_t)alloc_buf -
641*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE);
642*0Sstevel@tonic-gate 				tsize =  orig->size - (MAX_CACHED + 8 +
643*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE + shift +
644*0Sstevel@tonic-gate 					MTMALLOC_MIN_ALIGN);
645*0Sstevel@tonic-gate 
646*0Sstevel@tonic-gate 				if (!IS_OVERSIZE(tsize, alloc_size)) {
647*0Sstevel@tonic-gate 					/*
648*0Sstevel@tonic-gate 					 * If the chunk is not big enough
649*0Sstevel@tonic-gate 					 * to make both data and tail oversize
650*0Sstevel@tonic-gate 					 * we just keep them as one piece.
651*0Sstevel@tonic-gate 					 */
652*0Sstevel@tonic-gate 					(void) oversize_header_alloc(ret_addr -
653*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE,
654*0Sstevel@tonic-gate 						orig->size - shift);
655*0Sstevel@tonic-gate 					orig->size = shift -
656*0Sstevel@tonic-gate 						OVSZ_HEADER_SIZE;
657*0Sstevel@tonic-gate 					free_oversize(orig);
658*0Sstevel@tonic-gate 					break;
659*0Sstevel@tonic-gate 				} else {
660*0Sstevel@tonic-gate 					/*
661*0Sstevel@tonic-gate 					 * extend data size > MAX_CACHED
662*0Sstevel@tonic-gate 					 * and handle it as head, data, tail
663*0Sstevel@tonic-gate 					 * are all oversized.
664*0Sstevel@tonic-gate 					 */
665*0Sstevel@tonic-gate 					size = MAX_CACHED + 8;
666*0Sstevel@tonic-gate 				}
667*0Sstevel@tonic-gate 				/* FALLTHROUGH */
668*0Sstevel@tonic-gate 			case ALL_OVERSIZE:
669*0Sstevel@tonic-gate 				/*
670*0Sstevel@tonic-gate 				 * split off the head and tail fragments,
671*0Sstevel@tonic-gate 				 * return them back to the oversize freelist.
672*0Sstevel@tonic-gate 				 * Alloc oversize header for data seg.
673*0Sstevel@tonic-gate 				 */
674*0Sstevel@tonic-gate 				orig = (oversize_t *)((uintptr_t)alloc_buf -
675*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE);
676*0Sstevel@tonic-gate 				tsize = orig->size;
677*0Sstevel@tonic-gate 				orig->size = shift - OVSZ_HEADER_SIZE;
678*0Sstevel@tonic-gate 				free_oversize(orig);
679*0Sstevel@tonic-gate 
680*0Sstevel@tonic-gate 				taddr = ALIGN(ret_addr + size,
681*0Sstevel@tonic-gate 					MTMALLOC_MIN_ALIGN);
682*0Sstevel@tonic-gate 				data_sz = taddr - ret_addr;
683*0Sstevel@tonic-gate 				assert(tsize > (shift + data_sz +
684*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE));
685*0Sstevel@tonic-gate 				tail_sz = tsize -
686*0Sstevel@tonic-gate 					(shift + data_sz + OVSZ_HEADER_SIZE);
687*0Sstevel@tonic-gate 
688*0Sstevel@tonic-gate 				/* create oversize header for data seg */
689*0Sstevel@tonic-gate 				(void) oversize_header_alloc(ret_addr -
690*0Sstevel@tonic-gate 					OVSZ_HEADER_SIZE, data_sz);
691*0Sstevel@tonic-gate 
692*0Sstevel@tonic-gate 				/* create oversize header for tail fragment */
693*0Sstevel@tonic-gate 				tail = oversize_header_alloc(taddr, tail_sz);
694*0Sstevel@tonic-gate 				free_oversize(tail);
695*0Sstevel@tonic-gate 				break;
696*0Sstevel@tonic-gate 			default:
697*0Sstevel@tonic-gate 				/* should not reach here */
698*0Sstevel@tonic-gate 				assert(0);
699*0Sstevel@tonic-gate 		}
700*0Sstevel@tonic-gate 	}
701*0Sstevel@tonic-gate 	return (ret_buf);
702*0Sstevel@tonic-gate }
703*0Sstevel@tonic-gate 
704*0Sstevel@tonic-gate 
705*0Sstevel@tonic-gate void *
706*0Sstevel@tonic-gate valloc(size_t size)
707*0Sstevel@tonic-gate {
708*0Sstevel@tonic-gate 	static unsigned pagesize;
709*0Sstevel@tonic-gate 
710*0Sstevel@tonic-gate 	if (size == 0)
711*0Sstevel@tonic-gate 		return (NULL);
712*0Sstevel@tonic-gate 
713*0Sstevel@tonic-gate 	if (!pagesize)
714*0Sstevel@tonic-gate 		pagesize = sysconf(_SC_PAGESIZE);
715*0Sstevel@tonic-gate 
716*0Sstevel@tonic-gate 	return (memalign(pagesize, size));
717*0Sstevel@tonic-gate }
718*0Sstevel@tonic-gate 
719*0Sstevel@tonic-gate void
720*0Sstevel@tonic-gate mallocctl(int cmd, long value)
721*0Sstevel@tonic-gate {
722*0Sstevel@tonic-gate 	switch (cmd) {
723*0Sstevel@tonic-gate 
724*0Sstevel@tonic-gate 	case MTDEBUGPATTERN:
725*0Sstevel@tonic-gate 		/*
726*0Sstevel@tonic-gate 		 * Reinitialize free blocks in case malloc() is called prior
727*0Sstevel@tonic-gate 		 * to mallocctl().
728*0Sstevel@tonic-gate 		 */
729*0Sstevel@tonic-gate 		if (value && !(debugopt & cmd)) {
730*0Sstevel@tonic-gate 			reinit++;
731*0Sstevel@tonic-gate 			debugopt |= cmd;
732*0Sstevel@tonic-gate 			reinit_cpu_list();
733*0Sstevel@tonic-gate 		}
734*0Sstevel@tonic-gate 		/*FALLTHRU*/
735*0Sstevel@tonic-gate 	case MTDOUBLEFREE:
736*0Sstevel@tonic-gate 	case MTINITBUFFER:
737*0Sstevel@tonic-gate 		if (value)
738*0Sstevel@tonic-gate 			debugopt |= cmd;
739*0Sstevel@tonic-gate 		else
740*0Sstevel@tonic-gate 			debugopt &= ~cmd;
741*0Sstevel@tonic-gate 		break;
742*0Sstevel@tonic-gate 	case MTCHUNKSIZE:
743*0Sstevel@tonic-gate 		if (value >= MINSIZE && value <= MAXSIZE)
744*0Sstevel@tonic-gate 			requestsize = value;
745*0Sstevel@tonic-gate 		break;
746*0Sstevel@tonic-gate 	default:
747*0Sstevel@tonic-gate 		break;
748*0Sstevel@tonic-gate 	}
749*0Sstevel@tonic-gate }
750*0Sstevel@tonic-gate 
751*0Sstevel@tonic-gate /*
752*0Sstevel@tonic-gate  * if this function is changed, update the fallback code in setup_caches to
753*0Sstevel@tonic-gate  * set ncpus to the number of possible return values. (currently 1)
754*0Sstevel@tonic-gate  */
755*0Sstevel@tonic-gate static uint_t
756*0Sstevel@tonic-gate fallback_curcpu(void)
757*0Sstevel@tonic-gate {
758*0Sstevel@tonic-gate 	return (0);
759*0Sstevel@tonic-gate }
760*0Sstevel@tonic-gate 
761*0Sstevel@tonic-gate /*
762*0Sstevel@tonic-gate  * Returns non-zero on success, zero on failure.
763*0Sstevel@tonic-gate  *
764*0Sstevel@tonic-gate  * This carefully doesn't set cpu_list until initialization is finished.
765*0Sstevel@tonic-gate  */
766*0Sstevel@tonic-gate static int
767*0Sstevel@tonic-gate setup_caches(void)
768*0Sstevel@tonic-gate {
769*0Sstevel@tonic-gate 	static mutex_t init_lock = DEFAULTMUTEX;
770*0Sstevel@tonic-gate 
771*0Sstevel@tonic-gate 	uintptr_t oldbrk;
772*0Sstevel@tonic-gate 	uintptr_t newbrk;
773*0Sstevel@tonic-gate 
774*0Sstevel@tonic-gate 	size_t cache_space_needed;
775*0Sstevel@tonic-gate 	size_t padding;
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate 	curcpu_func new_curcpu;
778*0Sstevel@tonic-gate 	uint_t new_cpu_mask;
779*0Sstevel@tonic-gate 	percpu_t *new_cpu_list;
780*0Sstevel@tonic-gate 
781*0Sstevel@tonic-gate 	uint_t i, j;
782*0Sstevel@tonic-gate 	uintptr_t list_addr;
783*0Sstevel@tonic-gate 
784*0Sstevel@tonic-gate 	(void) mutex_lock(&init_lock);
785*0Sstevel@tonic-gate 	if (cpu_list != NULL) {
786*0Sstevel@tonic-gate 		(void) mutex_unlock(&init_lock);
787*0Sstevel@tonic-gate 		return (1); 		/* success -- already initialized */
788*0Sstevel@tonic-gate 	}
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate 	new_curcpu = get_curcpu_func();
791*0Sstevel@tonic-gate 	if (new_curcpu == NULL) {
792*0Sstevel@tonic-gate 		new_curcpu = fallback_curcpu;
793*0Sstevel@tonic-gate 		ncpus = 1;
794*0Sstevel@tonic-gate 	} else {
795*0Sstevel@tonic-gate 		if ((ncpus = 2 * sysconf(_SC_NPROCESSORS_CONF)) <= 0)
796*0Sstevel@tonic-gate 			ncpus = 4; /* decent default value */
797*0Sstevel@tonic-gate 	}
798*0Sstevel@tonic-gate 	assert(ncpus > 0);
799*0Sstevel@tonic-gate 
800*0Sstevel@tonic-gate 	/* round ncpus up to a power of 2 */
801*0Sstevel@tonic-gate 	while (ncpus & (ncpus - 1))
802*0Sstevel@tonic-gate 		ncpus++;
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate 	new_cpu_mask = ncpus - 1;	/* create the cpu mask */
805*0Sstevel@tonic-gate 
806*0Sstevel@tonic-gate 	/*
807*0Sstevel@tonic-gate 	 * We now do some magic with the brk.  What we want to get in the
808*0Sstevel@tonic-gate 	 * end is a bunch of well-aligned stuff in a big initial allocation.
809*0Sstevel@tonic-gate 	 * Along the way, we do sanity checks to make sure no one else has
810*0Sstevel@tonic-gate 	 * touched the brk (which shouldn't happen, but it's always good to
811*0Sstevel@tonic-gate 	 * check)
812*0Sstevel@tonic-gate 	 *
813*0Sstevel@tonic-gate 	 * First, make sure sbrk is sane, and store the current brk in oldbrk.
814*0Sstevel@tonic-gate 	 */
815*0Sstevel@tonic-gate 	oldbrk = (uintptr_t)sbrk(0);
816*0Sstevel@tonic-gate 	if ((void *)oldbrk == (void *)-1) {
817*0Sstevel@tonic-gate 		(void) mutex_unlock(&init_lock);
818*0Sstevel@tonic-gate 		return (0);	/* sbrk is broken -- we're doomed. */
819*0Sstevel@tonic-gate 	}
820*0Sstevel@tonic-gate 
821*0Sstevel@tonic-gate 	/*
822*0Sstevel@tonic-gate 	 * Now, align the brk to a multiple of CACHE_COHERENCY_UNIT, so that
823*0Sstevel@tonic-gate 	 * the percpu structures and cache lists will be properly aligned.
824*0Sstevel@tonic-gate 	 *
825*0Sstevel@tonic-gate 	 *   2.  All hunks will be page-aligned, assuming HUNKSIZE >= PAGESIZE,
826*0Sstevel@tonic-gate 	 *	so they can be paged out individually.
827*0Sstevel@tonic-gate 	 */
828*0Sstevel@tonic-gate 	newbrk = ALIGN(oldbrk, CACHE_COHERENCY_UNIT);
829*0Sstevel@tonic-gate 	if (newbrk != oldbrk && (uintptr_t)sbrk(newbrk - oldbrk) != oldbrk) {
830*0Sstevel@tonic-gate 		(void) mutex_unlock(&init_lock);
831*0Sstevel@tonic-gate 		return (0);	/* someone else sbrked */
832*0Sstevel@tonic-gate 	}
833*0Sstevel@tonic-gate 
834*0Sstevel@tonic-gate 	/*
835*0Sstevel@tonic-gate 	 * For each cpu, there is one percpu_t and a list of caches
836*0Sstevel@tonic-gate 	 */
837*0Sstevel@tonic-gate 	cache_space_needed = ncpus * (sizeof (percpu_t) + CACHELIST_SIZE);
838*0Sstevel@tonic-gate 
839*0Sstevel@tonic-gate 	new_cpu_list = (percpu_t *)sbrk(cache_space_needed);
840*0Sstevel@tonic-gate 
841*0Sstevel@tonic-gate 	if (new_cpu_list == (percpu_t *)-1 ||
842*0Sstevel@tonic-gate 	    (uintptr_t)new_cpu_list != newbrk) {
843*0Sstevel@tonic-gate 		(void) mutex_unlock(&init_lock);
844*0Sstevel@tonic-gate 		return (0);	/* someone else sbrked */
845*0Sstevel@tonic-gate 	}
846*0Sstevel@tonic-gate 
847*0Sstevel@tonic-gate 	/*
848*0Sstevel@tonic-gate 	 * Finally, align the brk to HUNKSIZE so that all hunks are
849*0Sstevel@tonic-gate 	 * page-aligned, to avoid edge-effects.
850*0Sstevel@tonic-gate 	 */
851*0Sstevel@tonic-gate 
852*0Sstevel@tonic-gate 	newbrk = (uintptr_t)new_cpu_list + cache_space_needed;
853*0Sstevel@tonic-gate 
854*0Sstevel@tonic-gate 	padding = ALIGN(newbrk, HUNKSIZE) - newbrk;
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate 	if (padding > 0 && (uintptr_t)sbrk(padding) != newbrk) {
857*0Sstevel@tonic-gate 		(void) mutex_unlock(&init_lock);
858*0Sstevel@tonic-gate 		return (0);	/* someone else sbrked */
859*0Sstevel@tonic-gate 	}
860*0Sstevel@tonic-gate 
861*0Sstevel@tonic-gate 	list_addr = ((uintptr_t)new_cpu_list + (sizeof (percpu_t) * ncpus));
862*0Sstevel@tonic-gate 
863*0Sstevel@tonic-gate 	/* initialize the percpu list */
864*0Sstevel@tonic-gate 	for (i = 0; i < ncpus; i++) {
865*0Sstevel@tonic-gate 		new_cpu_list[i].mt_caches = (cache_head_t *)list_addr;
866*0Sstevel@tonic-gate 		for (j = 0; j < NUM_CACHES; j++) {
867*0Sstevel@tonic-gate 			new_cpu_list[i].mt_caches[j].mt_cache = NULL;
868*0Sstevel@tonic-gate 			new_cpu_list[i].mt_caches[j].mt_hint = NULL;
869*0Sstevel@tonic-gate 		}
870*0Sstevel@tonic-gate 
871*0Sstevel@tonic-gate 		bzero(&new_cpu_list[i].mt_parent_lock, sizeof (mutex_t));
872*0Sstevel@tonic-gate 
873*0Sstevel@tonic-gate 		/* get the correct cache list alignment */
874*0Sstevel@tonic-gate 		list_addr += CACHELIST_SIZE;
875*0Sstevel@tonic-gate 	}
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate 	/*
878*0Sstevel@tonic-gate 	 * Initialize oversize listhead
879*0Sstevel@tonic-gate 	 */
880*0Sstevel@tonic-gate 	oversize_list.next_bysize = &oversize_list;
881*0Sstevel@tonic-gate 	oversize_list.prev_bysize = &oversize_list;
882*0Sstevel@tonic-gate 	oversize_list.next_byaddr = &oversize_list;
883*0Sstevel@tonic-gate 	oversize_list.prev_byaddr = &oversize_list;
884*0Sstevel@tonic-gate 	oversize_list.addr = NULL;
885*0Sstevel@tonic-gate 	oversize_list.size = 0;		/* sentinal */
886*0Sstevel@tonic-gate 
887*0Sstevel@tonic-gate 	/*
888*0Sstevel@tonic-gate 	 * now install the global variables, leaving cpu_list for last, so that
889*0Sstevel@tonic-gate 	 * there aren't any race conditions.
890*0Sstevel@tonic-gate 	 */
891*0Sstevel@tonic-gate 	curcpu = new_curcpu;
892*0Sstevel@tonic-gate 	cpu_mask = new_cpu_mask;
893*0Sstevel@tonic-gate 	cpu_list = new_cpu_list;
894*0Sstevel@tonic-gate 
895*0Sstevel@tonic-gate 	(void) mutex_unlock(&init_lock);
896*0Sstevel@tonic-gate 
897*0Sstevel@tonic-gate 	return (1);
898*0Sstevel@tonic-gate }
899*0Sstevel@tonic-gate 
900*0Sstevel@tonic-gate static void
901*0Sstevel@tonic-gate create_cache(cache_t *cp, size_t size, uint_t chunksize)
902*0Sstevel@tonic-gate {
903*0Sstevel@tonic-gate 	long nblocks;
904*0Sstevel@tonic-gate 
905*0Sstevel@tonic-gate 	bzero(&cp->mt_cache_lock, sizeof (mutex_t));
906*0Sstevel@tonic-gate 	cp->mt_size = size;
907*0Sstevel@tonic-gate 	cp->mt_freelist = ((caddr_t)cp + sizeof (cache_t));
908*0Sstevel@tonic-gate 	cp->mt_span = chunksize * HUNKSIZE - sizeof (cache_t);
909*0Sstevel@tonic-gate 	cp->mt_hunks = chunksize;
910*0Sstevel@tonic-gate 	/*
911*0Sstevel@tonic-gate 	 * rough calculation. We will need to adjust later.
912*0Sstevel@tonic-gate 	 */
913*0Sstevel@tonic-gate 	nblocks = cp->mt_span / cp->mt_size;
914*0Sstevel@tonic-gate 	nblocks >>= 3;
915*0Sstevel@tonic-gate 	if (nblocks == 0) { /* less than 8 free blocks in this pool */
916*0Sstevel@tonic-gate 		int32_t numblocks = 0;
917*0Sstevel@tonic-gate 		long i = cp->mt_span;
918*0Sstevel@tonic-gate 		size_t sub = cp->mt_size;
919*0Sstevel@tonic-gate 		uchar_t mask = 0;
920*0Sstevel@tonic-gate 
921*0Sstevel@tonic-gate 		while (i > sub) {
922*0Sstevel@tonic-gate 			numblocks++;
923*0Sstevel@tonic-gate 			i -= sub;
924*0Sstevel@tonic-gate 		}
925*0Sstevel@tonic-gate 		nblocks = numblocks;
926*0Sstevel@tonic-gate 		cp->mt_arena = (caddr_t)ALIGN(cp->mt_freelist + 8, 8);
927*0Sstevel@tonic-gate 		cp->mt_nfree = numblocks;
928*0Sstevel@tonic-gate 		while (numblocks--) {
929*0Sstevel@tonic-gate 			mask |= 0x80 >> numblocks;
930*0Sstevel@tonic-gate 		}
931*0Sstevel@tonic-gate 		*(cp->mt_freelist) = mask;
932*0Sstevel@tonic-gate 	} else {
933*0Sstevel@tonic-gate 		cp->mt_arena = (caddr_t)ALIGN((caddr_t)cp->mt_freelist +
934*0Sstevel@tonic-gate 			nblocks, 32);
935*0Sstevel@tonic-gate 		/* recompute nblocks */
936*0Sstevel@tonic-gate 		nblocks = (uintptr_t)((caddr_t)cp->mt_freelist +
937*0Sstevel@tonic-gate 			cp->mt_span - cp->mt_arena) / cp->mt_size;
938*0Sstevel@tonic-gate 		cp->mt_nfree = ((nblocks >> 3) << 3);
939*0Sstevel@tonic-gate 		/* Set everything to free */
940*0Sstevel@tonic-gate 		(void) memset(cp->mt_freelist, 0xff, nblocks >> 3);
941*0Sstevel@tonic-gate 	}
942*0Sstevel@tonic-gate 
943*0Sstevel@tonic-gate 	if (debugopt & MTDEBUGPATTERN)
944*0Sstevel@tonic-gate 		copy_pattern(FREEPATTERN, cp->mt_arena, cp->mt_size * nblocks);
945*0Sstevel@tonic-gate 
946*0Sstevel@tonic-gate 	cp->mt_next = NULL;
947*0Sstevel@tonic-gate }
948*0Sstevel@tonic-gate 
949*0Sstevel@tonic-gate static void
950*0Sstevel@tonic-gate reinit_cpu_list(void)
951*0Sstevel@tonic-gate {
952*0Sstevel@tonic-gate 	oversize_t *wp = oversize_list.next_bysize;
953*0Sstevel@tonic-gate 	percpu_t *cpuptr;
954*0Sstevel@tonic-gate 	cache_t *thiscache;
955*0Sstevel@tonic-gate 	cache_head_t *cachehead;
956*0Sstevel@tonic-gate 
957*0Sstevel@tonic-gate 	if (wp == NULL || cpu_list == NULL) {
958*0Sstevel@tonic-gate 		reinit = 0;
959*0Sstevel@tonic-gate 		return;
960*0Sstevel@tonic-gate 	}
961*0Sstevel@tonic-gate 
962*0Sstevel@tonic-gate 	/* Reinitialize free oversize blocks. */
963*0Sstevel@tonic-gate 	(void) mutex_lock(&oversize_lock);
964*0Sstevel@tonic-gate 	if (debugopt & MTDEBUGPATTERN)
965*0Sstevel@tonic-gate 		for (; wp != &oversize_list; wp = wp->next_bysize)
966*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, wp->addr, wp->size);
967*0Sstevel@tonic-gate 	(void) mutex_unlock(&oversize_lock);
968*0Sstevel@tonic-gate 
969*0Sstevel@tonic-gate 	/* Reinitialize free blocks. */
970*0Sstevel@tonic-gate 	for (cpuptr = &cpu_list[0]; cpuptr < &cpu_list[ncpus]; cpuptr++) {
971*0Sstevel@tonic-gate 		(void) mutex_lock(&cpuptr->mt_parent_lock);
972*0Sstevel@tonic-gate 		for (cachehead = &cpuptr->mt_caches[0]; cachehead <
973*0Sstevel@tonic-gate 			&cpuptr->mt_caches[NUM_CACHES]; cachehead++) {
974*0Sstevel@tonic-gate 			for (thiscache = cachehead->mt_cache; thiscache != NULL;
975*0Sstevel@tonic-gate 				thiscache = thiscache->mt_next) {
976*0Sstevel@tonic-gate 				(void) mutex_lock(&thiscache->mt_cache_lock);
977*0Sstevel@tonic-gate 				if (thiscache->mt_nfree == 0) {
978*0Sstevel@tonic-gate 					(void) mutex_unlock(
979*0Sstevel@tonic-gate 					    &thiscache->mt_cache_lock);
980*0Sstevel@tonic-gate 					continue;
981*0Sstevel@tonic-gate 				}
982*0Sstevel@tonic-gate 				if (thiscache != NULL)
983*0Sstevel@tonic-gate 					reinit_cache(thiscache);
984*0Sstevel@tonic-gate 				(void) mutex_unlock(&thiscache->mt_cache_lock);
985*0Sstevel@tonic-gate 			}
986*0Sstevel@tonic-gate 		}
987*0Sstevel@tonic-gate 		(void) mutex_unlock(&cpuptr->mt_parent_lock);
988*0Sstevel@tonic-gate 	}
989*0Sstevel@tonic-gate 	reinit = 0;
990*0Sstevel@tonic-gate }
991*0Sstevel@tonic-gate 
992*0Sstevel@tonic-gate static void
993*0Sstevel@tonic-gate reinit_cache(cache_t *thiscache)
994*0Sstevel@tonic-gate {
995*0Sstevel@tonic-gate 	uint32_t *freeblocks; /* not a uintptr_t on purpose */
996*0Sstevel@tonic-gate 	int32_t i, n;
997*0Sstevel@tonic-gate 	caddr_t ret;
998*0Sstevel@tonic-gate 
999*0Sstevel@tonic-gate 	freeblocks = (uint32_t *)thiscache->mt_freelist;
1000*0Sstevel@tonic-gate 	while (freeblocks < (uint32_t *)thiscache->mt_arena) {
1001*0Sstevel@tonic-gate 		if (*freeblocks & 0xffffffff) {
1002*0Sstevel@tonic-gate 		    for (i = 0; i < 32; i++) {
1003*0Sstevel@tonic-gate 			if (FLIP_EM(*freeblocks) & (0x80000000 >> i)) {
1004*0Sstevel@tonic-gate 				n = (uintptr_t)(((freeblocks -
1005*0Sstevel@tonic-gate 				    (uint32_t *)thiscache->mt_freelist) << 5)
1006*0Sstevel@tonic-gate 				    + i) * thiscache->mt_size;
1007*0Sstevel@tonic-gate 				ret = thiscache->mt_arena + n;
1008*0Sstevel@tonic-gate 				ret += OVERHEAD;
1009*0Sstevel@tonic-gate 				copy_pattern(FREEPATTERN, ret,
1010*0Sstevel@tonic-gate 				    thiscache->mt_size);
1011*0Sstevel@tonic-gate 			}
1012*0Sstevel@tonic-gate 		    }
1013*0Sstevel@tonic-gate 		}
1014*0Sstevel@tonic-gate 		freeblocks++;
1015*0Sstevel@tonic-gate 	}
1016*0Sstevel@tonic-gate }
1017*0Sstevel@tonic-gate 
1018*0Sstevel@tonic-gate static void *
1019*0Sstevel@tonic-gate malloc_internal(size_t size, percpu_t *cpuptr)
1020*0Sstevel@tonic-gate {
1021*0Sstevel@tonic-gate 	cache_head_t *cachehead;
1022*0Sstevel@tonic-gate 	cache_t *thiscache, *hintcache;
1023*0Sstevel@tonic-gate 	int32_t i, n, logsz, bucket;
1024*0Sstevel@tonic-gate 	uint32_t index;
1025*0Sstevel@tonic-gate 	uint32_t *freeblocks; /* not a uintptr_t on purpose */
1026*0Sstevel@tonic-gate 	caddr_t ret;
1027*0Sstevel@tonic-gate 
1028*0Sstevel@tonic-gate 	logsz = MIN_CACHED_SHIFT;
1029*0Sstevel@tonic-gate 
1030*0Sstevel@tonic-gate 	while (size > (1 << logsz))
1031*0Sstevel@tonic-gate 		logsz++;
1032*0Sstevel@tonic-gate 
1033*0Sstevel@tonic-gate 	bucket = logsz - MIN_CACHED_SHIFT;
1034*0Sstevel@tonic-gate 
1035*0Sstevel@tonic-gate 	(void) mutex_lock(&cpuptr->mt_parent_lock);
1036*0Sstevel@tonic-gate 
1037*0Sstevel@tonic-gate 	/*
1038*0Sstevel@tonic-gate 	 * Find a cache of the appropriate size with free buffers.
1039*0Sstevel@tonic-gate 	 *
1040*0Sstevel@tonic-gate 	 * We don't need to lock each cache as we check their mt_nfree count,
1041*0Sstevel@tonic-gate 	 * since:
1042*0Sstevel@tonic-gate 	 *	1.  We are only looking for caches with mt_nfree > 0.  If a
1043*0Sstevel@tonic-gate 	 *	   free happens during our search, it will increment mt_nfree,
1044*0Sstevel@tonic-gate 	 *	   which will not effect the test.
1045*0Sstevel@tonic-gate 	 *	2.  Allocations can decrement mt_nfree, but they can't happen
1046*0Sstevel@tonic-gate 	 *	   as long as we hold mt_parent_lock.
1047*0Sstevel@tonic-gate 	 */
1048*0Sstevel@tonic-gate 
1049*0Sstevel@tonic-gate 	cachehead = &cpuptr->mt_caches[bucket];
1050*0Sstevel@tonic-gate 
1051*0Sstevel@tonic-gate 	/* Search through the list, starting at the mt_hint */
1052*0Sstevel@tonic-gate 	thiscache = cachehead->mt_hint;
1053*0Sstevel@tonic-gate 
1054*0Sstevel@tonic-gate 	while (thiscache != NULL && thiscache->mt_nfree == 0)
1055*0Sstevel@tonic-gate 		thiscache = thiscache->mt_next;
1056*0Sstevel@tonic-gate 
1057*0Sstevel@tonic-gate 	if (thiscache == NULL) {
1058*0Sstevel@tonic-gate 		/* wrap around -- search up to the hint */
1059*0Sstevel@tonic-gate 		thiscache = cachehead->mt_cache;
1060*0Sstevel@tonic-gate 		hintcache = cachehead->mt_hint;
1061*0Sstevel@tonic-gate 
1062*0Sstevel@tonic-gate 		while (thiscache != NULL && thiscache != hintcache &&
1063*0Sstevel@tonic-gate 		    thiscache->mt_nfree == 0)
1064*0Sstevel@tonic-gate 			thiscache = thiscache->mt_next;
1065*0Sstevel@tonic-gate 
1066*0Sstevel@tonic-gate 		if (thiscache == hintcache)
1067*0Sstevel@tonic-gate 			thiscache = NULL;
1068*0Sstevel@tonic-gate 	}
1069*0Sstevel@tonic-gate 
1070*0Sstevel@tonic-gate 
1071*0Sstevel@tonic-gate 	if (thiscache == NULL) { /* there are no free caches */
1072*0Sstevel@tonic-gate 		int32_t thisrequest = requestsize;
1073*0Sstevel@tonic-gate 		int32_t buffer_size = (1 << logsz) + OVERHEAD;
1074*0Sstevel@tonic-gate 
1075*0Sstevel@tonic-gate 		thiscache = (cache_t *)morecore(thisrequest * HUNKSIZE);
1076*0Sstevel@tonic-gate 
1077*0Sstevel@tonic-gate 		if (thiscache == (cache_t *)-1) {
1078*0Sstevel@tonic-gate 		    (void) mutex_unlock(&cpuptr->mt_parent_lock);
1079*0Sstevel@tonic-gate 		    errno = EAGAIN;
1080*0Sstevel@tonic-gate 		    return (NULL);
1081*0Sstevel@tonic-gate 		}
1082*0Sstevel@tonic-gate 		create_cache(thiscache, buffer_size, thisrequest);
1083*0Sstevel@tonic-gate 
1084*0Sstevel@tonic-gate 		/* link in the new block at the beginning of the list */
1085*0Sstevel@tonic-gate 		thiscache->mt_next = cachehead->mt_cache;
1086*0Sstevel@tonic-gate 		cachehead->mt_cache = thiscache;
1087*0Sstevel@tonic-gate 	}
1088*0Sstevel@tonic-gate 
1089*0Sstevel@tonic-gate 	/* update the hint to the cache we found or created */
1090*0Sstevel@tonic-gate 	cachehead->mt_hint = thiscache;
1091*0Sstevel@tonic-gate 
1092*0Sstevel@tonic-gate 	/* thiscache now points to a cache with available space */
1093*0Sstevel@tonic-gate 	(void) mutex_lock(&thiscache->mt_cache_lock);
1094*0Sstevel@tonic-gate 
1095*0Sstevel@tonic-gate 	freeblocks = (uint32_t *)thiscache->mt_freelist;
1096*0Sstevel@tonic-gate 	while (freeblocks < (uint32_t *)thiscache->mt_arena) {
1097*0Sstevel@tonic-gate 		if (*freeblocks & 0xffffffff)
1098*0Sstevel@tonic-gate 			break;
1099*0Sstevel@tonic-gate 		freeblocks++;
1100*0Sstevel@tonic-gate 		if (freeblocks < (uint32_t *)thiscache->mt_arena &&
1101*0Sstevel@tonic-gate 		    *freeblocks & 0xffffffff)
1102*0Sstevel@tonic-gate 			break;
1103*0Sstevel@tonic-gate 		freeblocks++;
1104*0Sstevel@tonic-gate 		if (freeblocks < (uint32_t *)thiscache->mt_arena &&
1105*0Sstevel@tonic-gate 		    *freeblocks & 0xffffffff)
1106*0Sstevel@tonic-gate 			break;
1107*0Sstevel@tonic-gate 		freeblocks++;
1108*0Sstevel@tonic-gate 		if (freeblocks < (uint32_t *)thiscache->mt_arena &&
1109*0Sstevel@tonic-gate 		    *freeblocks & 0xffffffff)
1110*0Sstevel@tonic-gate 			break;
1111*0Sstevel@tonic-gate 		freeblocks++;
1112*0Sstevel@tonic-gate 	}
1113*0Sstevel@tonic-gate 
1114*0Sstevel@tonic-gate 	/*
1115*0Sstevel@tonic-gate 	 * the offset from mt_freelist to freeblocks is the offset into
1116*0Sstevel@tonic-gate 	 * the arena. Be sure to include the offset into freeblocks
1117*0Sstevel@tonic-gate 	 * of the bitmask. n is the offset.
1118*0Sstevel@tonic-gate 	 */
1119*0Sstevel@tonic-gate 	for (i = 0; i < 32; ) {
1120*0Sstevel@tonic-gate 		if (FLIP_EM(*freeblocks) & (0x80000000 >> i++))
1121*0Sstevel@tonic-gate 			break;
1122*0Sstevel@tonic-gate 		if (FLIP_EM(*freeblocks) & (0x80000000 >> i++))
1123*0Sstevel@tonic-gate 			break;
1124*0Sstevel@tonic-gate 		if (FLIP_EM(*freeblocks) & (0x80000000 >> i++))
1125*0Sstevel@tonic-gate 			break;
1126*0Sstevel@tonic-gate 		if (FLIP_EM(*freeblocks) & (0x80000000 >> i++))
1127*0Sstevel@tonic-gate 			break;
1128*0Sstevel@tonic-gate 	}
1129*0Sstevel@tonic-gate 	index = 0x80000000 >> --i;
1130*0Sstevel@tonic-gate 
1131*0Sstevel@tonic-gate 
1132*0Sstevel@tonic-gate 	*freeblocks &= FLIP_EM(~index);
1133*0Sstevel@tonic-gate 
1134*0Sstevel@tonic-gate 	thiscache->mt_nfree--;
1135*0Sstevel@tonic-gate 
1136*0Sstevel@tonic-gate 	(void) mutex_unlock(&thiscache->mt_cache_lock);
1137*0Sstevel@tonic-gate 	(void) mutex_unlock(&cpuptr->mt_parent_lock);
1138*0Sstevel@tonic-gate 
1139*0Sstevel@tonic-gate 	n = (uintptr_t)(((freeblocks - (uint32_t *)thiscache->mt_freelist) << 5)
1140*0Sstevel@tonic-gate 		+ i) * thiscache->mt_size;
1141*0Sstevel@tonic-gate 	/*
1142*0Sstevel@tonic-gate 	 * Now you have the offset in n, you've changed the free mask
1143*0Sstevel@tonic-gate 	 * in the freelist. Nothing left to do but find the block
1144*0Sstevel@tonic-gate 	 * in the arena and put the value of thiscache in the word
1145*0Sstevel@tonic-gate 	 * ahead of the handed out address and return the memory
1146*0Sstevel@tonic-gate 	 * back to the user.
1147*0Sstevel@tonic-gate 	 */
1148*0Sstevel@tonic-gate 	ret = thiscache->mt_arena + n;
1149*0Sstevel@tonic-gate 
1150*0Sstevel@tonic-gate 	/* Store the cache addr for this buf. Makes free go fast. */
1151*0Sstevel@tonic-gate 	*(uintptr_t *)ret = (uintptr_t)thiscache;
1152*0Sstevel@tonic-gate 
1153*0Sstevel@tonic-gate 	/*
1154*0Sstevel@tonic-gate 	 * This assert makes sure we don't hand out memory that is not
1155*0Sstevel@tonic-gate 	 * owned by this cache.
1156*0Sstevel@tonic-gate 	 */
1157*0Sstevel@tonic-gate 	assert(ret + thiscache->mt_size <= thiscache->mt_freelist +
1158*0Sstevel@tonic-gate 		thiscache->mt_span);
1159*0Sstevel@tonic-gate 
1160*0Sstevel@tonic-gate 	ret += OVERHEAD;
1161*0Sstevel@tonic-gate 
1162*0Sstevel@tonic-gate 	assert(((uintptr_t)ret & 7) == 0); /* are we 8 byte aligned */
1163*0Sstevel@tonic-gate 
1164*0Sstevel@tonic-gate 	if (reinit == 0 && (debugopt & MTDEBUGPATTERN))
1165*0Sstevel@tonic-gate 		if (verify_pattern(FREEPATTERN, ret, size))
1166*0Sstevel@tonic-gate 			abort();	/* reference after free */
1167*0Sstevel@tonic-gate 
1168*0Sstevel@tonic-gate 	if (debugopt & MTINITBUFFER)
1169*0Sstevel@tonic-gate 		copy_pattern(INITPATTERN, ret, size);
1170*0Sstevel@tonic-gate 	return ((void *)ret);
1171*0Sstevel@tonic-gate }
1172*0Sstevel@tonic-gate 
1173*0Sstevel@tonic-gate static void *
1174*0Sstevel@tonic-gate morecore(size_t bytes)
1175*0Sstevel@tonic-gate {
1176*0Sstevel@tonic-gate 	void * ret;
1177*0Sstevel@tonic-gate 
1178*0Sstevel@tonic-gate 	if (bytes > LONG_MAX) {
1179*0Sstevel@tonic-gate 		intptr_t wad;
1180*0Sstevel@tonic-gate 		/*
1181*0Sstevel@tonic-gate 		 * The request size is too big. We need to do this in
1182*0Sstevel@tonic-gate 		 * chunks. Sbrk only takes an int for an arg.
1183*0Sstevel@tonic-gate 		 */
1184*0Sstevel@tonic-gate 		if (bytes == ULONG_MAX)
1185*0Sstevel@tonic-gate 			return ((void *)-1);
1186*0Sstevel@tonic-gate 
1187*0Sstevel@tonic-gate 		ret = sbrk(0);
1188*0Sstevel@tonic-gate 		wad = LONG_MAX;
1189*0Sstevel@tonic-gate 		while (wad > 0) {
1190*0Sstevel@tonic-gate 			if (sbrk(wad) == (void *)-1) {
1191*0Sstevel@tonic-gate 				if (ret != sbrk(0))
1192*0Sstevel@tonic-gate 					(void) sbrk(-LONG_MAX);
1193*0Sstevel@tonic-gate 				return ((void *)-1);
1194*0Sstevel@tonic-gate 			}
1195*0Sstevel@tonic-gate 			bytes -= LONG_MAX;
1196*0Sstevel@tonic-gate 			wad = bytes;
1197*0Sstevel@tonic-gate 		}
1198*0Sstevel@tonic-gate 	} else
1199*0Sstevel@tonic-gate 		ret = sbrk(bytes);
1200*0Sstevel@tonic-gate 
1201*0Sstevel@tonic-gate 	return (ret);
1202*0Sstevel@tonic-gate }
1203*0Sstevel@tonic-gate 
1204*0Sstevel@tonic-gate 
1205*0Sstevel@tonic-gate static void *
1206*0Sstevel@tonic-gate oversize(size_t size)
1207*0Sstevel@tonic-gate {
1208*0Sstevel@tonic-gate 	caddr_t ret;
1209*0Sstevel@tonic-gate 	oversize_t *big;
1210*0Sstevel@tonic-gate 	int bucket;
1211*0Sstevel@tonic-gate 
1212*0Sstevel@tonic-gate 	/*
1213*0Sstevel@tonic-gate 	 * The idea with the global lock is that we are sure to
1214*0Sstevel@tonic-gate 	 * block in the kernel anyway since given an oversize alloc
1215*0Sstevel@tonic-gate 	 * we are sure to have to call morecore();
1216*0Sstevel@tonic-gate 	 */
1217*0Sstevel@tonic-gate 	(void) mutex_lock(&oversize_lock);
1218*0Sstevel@tonic-gate 
1219*0Sstevel@tonic-gate 	/*
1220*0Sstevel@tonic-gate 	 * Since we ensure every address we hand back is
1221*0Sstevel@tonic-gate 	 * MTMALLOC_MIN_ALIGN-byte aligned, ALIGNing size ensures that the
1222*0Sstevel@tonic-gate 	 * memory handed out is MTMALLOC_MIN_ALIGN-byte aligned at both ends.
1223*0Sstevel@tonic-gate 	 * This eases the implementation of MTDEBUGPATTERN and MTINITPATTERN,
1224*0Sstevel@tonic-gate 	 * particularly where coalescing occurs.
1225*0Sstevel@tonic-gate 	 */
1226*0Sstevel@tonic-gate 	size = ALIGN(size, MTMALLOC_MIN_ALIGN);
1227*0Sstevel@tonic-gate 
1228*0Sstevel@tonic-gate 	if ((big = find_oversize(size)) != NULL) {
1229*0Sstevel@tonic-gate 		if (reinit == 0 && (debugopt & MTDEBUGPATTERN))
1230*0Sstevel@tonic-gate 			if (verify_pattern(FREEPATTERN, big->addr, size))
1231*0Sstevel@tonic-gate 				abort();	/* reference after free */
1232*0Sstevel@tonic-gate 	} else {
1233*0Sstevel@tonic-gate 		/* Get more 8-byte aligned memory from heap */
1234*0Sstevel@tonic-gate 		ret = morecore(size + OVSZ_HEADER_SIZE);
1235*0Sstevel@tonic-gate 		if (ret == (caddr_t)-1) {
1236*0Sstevel@tonic-gate 			(void) mutex_unlock(&oversize_lock);
1237*0Sstevel@tonic-gate 			errno = ENOMEM;
1238*0Sstevel@tonic-gate 			return (NULL);
1239*0Sstevel@tonic-gate 		}
1240*0Sstevel@tonic-gate 		big = oversize_header_alloc((uintptr_t)ret, size);
1241*0Sstevel@tonic-gate 	}
1242*0Sstevel@tonic-gate 	ret = big->addr;
1243*0Sstevel@tonic-gate 
1244*0Sstevel@tonic-gate 	/* Add big to the hash table at the head of the relevant bucket. */
1245*0Sstevel@tonic-gate 	bucket = HASH_OVERSIZE(ret);
1246*0Sstevel@tonic-gate 	big->hash_next = ovsz_hashtab[bucket];
1247*0Sstevel@tonic-gate 	ovsz_hashtab[bucket] = big;
1248*0Sstevel@tonic-gate 
1249*0Sstevel@tonic-gate 	if (debugopt & MTINITBUFFER)
1250*0Sstevel@tonic-gate 		copy_pattern(INITPATTERN, ret, size);
1251*0Sstevel@tonic-gate 
1252*0Sstevel@tonic-gate 	(void) mutex_unlock(&oversize_lock);
1253*0Sstevel@tonic-gate 	assert(((uintptr_t)ret & 7) == 0); /* are we 8 byte aligned */
1254*0Sstevel@tonic-gate 	return ((void *)ret);
1255*0Sstevel@tonic-gate }
1256*0Sstevel@tonic-gate 
1257*0Sstevel@tonic-gate static void
1258*0Sstevel@tonic-gate insert_oversize(oversize_t *op, oversize_t *nx)
1259*0Sstevel@tonic-gate {
1260*0Sstevel@tonic-gate 	oversize_t *sp;
1261*0Sstevel@tonic-gate 
1262*0Sstevel@tonic-gate 	/* locate correct insertion point in size-ordered list */
1263*0Sstevel@tonic-gate 	for (sp = oversize_list.next_bysize;
1264*0Sstevel@tonic-gate 	    sp != &oversize_list && (op->size > sp->size);
1265*0Sstevel@tonic-gate 	    sp = sp->next_bysize)
1266*0Sstevel@tonic-gate 		;
1267*0Sstevel@tonic-gate 
1268*0Sstevel@tonic-gate 	/* link into size-ordered list */
1269*0Sstevel@tonic-gate 	op->next_bysize = sp;
1270*0Sstevel@tonic-gate 	op->prev_bysize = sp->prev_bysize;
1271*0Sstevel@tonic-gate 	op->prev_bysize->next_bysize = op;
1272*0Sstevel@tonic-gate 	op->next_bysize->prev_bysize = op;
1273*0Sstevel@tonic-gate 
1274*0Sstevel@tonic-gate 	/*
1275*0Sstevel@tonic-gate 	 * link item into address-ordered list
1276*0Sstevel@tonic-gate 	 * (caller provides insertion point as an optimization)
1277*0Sstevel@tonic-gate 	 */
1278*0Sstevel@tonic-gate 	op->next_byaddr = nx;
1279*0Sstevel@tonic-gate 	op->prev_byaddr = nx->prev_byaddr;
1280*0Sstevel@tonic-gate 	op->prev_byaddr->next_byaddr = op;
1281*0Sstevel@tonic-gate 	op->next_byaddr->prev_byaddr = op;
1282*0Sstevel@tonic-gate 
1283*0Sstevel@tonic-gate }
1284*0Sstevel@tonic-gate 
1285*0Sstevel@tonic-gate static void
1286*0Sstevel@tonic-gate unlink_oversize(oversize_t *lp)
1287*0Sstevel@tonic-gate {
1288*0Sstevel@tonic-gate 	/* unlink from address list */
1289*0Sstevel@tonic-gate 	lp->prev_byaddr->next_byaddr = lp->next_byaddr;
1290*0Sstevel@tonic-gate 	lp->next_byaddr->prev_byaddr = lp->prev_byaddr;
1291*0Sstevel@tonic-gate 
1292*0Sstevel@tonic-gate 	/* unlink from size list */
1293*0Sstevel@tonic-gate 	lp->prev_bysize->next_bysize = lp->next_bysize;
1294*0Sstevel@tonic-gate 	lp->next_bysize->prev_bysize = lp->prev_bysize;
1295*0Sstevel@tonic-gate }
1296*0Sstevel@tonic-gate 
1297*0Sstevel@tonic-gate static void
1298*0Sstevel@tonic-gate position_oversize_by_size(oversize_t *op)
1299*0Sstevel@tonic-gate {
1300*0Sstevel@tonic-gate 	oversize_t *sp;
1301*0Sstevel@tonic-gate 
1302*0Sstevel@tonic-gate 	if (op->size > op->next_bysize->size ||
1303*0Sstevel@tonic-gate 	    op->size < op->prev_bysize->size) {
1304*0Sstevel@tonic-gate 
1305*0Sstevel@tonic-gate 		/* unlink from size list */
1306*0Sstevel@tonic-gate 		op->prev_bysize->next_bysize = op->next_bysize;
1307*0Sstevel@tonic-gate 		op->next_bysize->prev_bysize = op->prev_bysize;
1308*0Sstevel@tonic-gate 
1309*0Sstevel@tonic-gate 		/* locate correct insertion point in size-ordered list */
1310*0Sstevel@tonic-gate 		for (sp = oversize_list.next_bysize;
1311*0Sstevel@tonic-gate 		    sp != &oversize_list && (op->size > sp->size);
1312*0Sstevel@tonic-gate 		    sp = sp->next_bysize)
1313*0Sstevel@tonic-gate 			;
1314*0Sstevel@tonic-gate 
1315*0Sstevel@tonic-gate 		/* link into size-ordered list */
1316*0Sstevel@tonic-gate 		op->next_bysize = sp;
1317*0Sstevel@tonic-gate 		op->prev_bysize = sp->prev_bysize;
1318*0Sstevel@tonic-gate 		op->prev_bysize->next_bysize = op;
1319*0Sstevel@tonic-gate 		op->next_bysize->prev_bysize = op;
1320*0Sstevel@tonic-gate 	}
1321*0Sstevel@tonic-gate }
1322*0Sstevel@tonic-gate 
1323*0Sstevel@tonic-gate static void
1324*0Sstevel@tonic-gate add_oversize(oversize_t *lp)
1325*0Sstevel@tonic-gate {
1326*0Sstevel@tonic-gate 	int merge_flags = INSERT_ONLY;
1327*0Sstevel@tonic-gate 	oversize_t *nx;  	/* ptr to item right of insertion point */
1328*0Sstevel@tonic-gate 	oversize_t *pv;  	/* ptr to item left of insertion point */
1329*0Sstevel@tonic-gate 	uint_t size_lp, size_pv, size_nx;
1330*0Sstevel@tonic-gate 	uintptr_t endp_lp, endp_pv, endp_nx;
1331*0Sstevel@tonic-gate 
1332*0Sstevel@tonic-gate 	/*
1333*0Sstevel@tonic-gate 	 * Locate insertion point in address-ordered list
1334*0Sstevel@tonic-gate 	 */
1335*0Sstevel@tonic-gate 
1336*0Sstevel@tonic-gate 	for (nx = oversize_list.next_byaddr;
1337*0Sstevel@tonic-gate 	    nx != &oversize_list && (lp->addr > nx->addr);
1338*0Sstevel@tonic-gate 	    nx = nx->next_byaddr)
1339*0Sstevel@tonic-gate 		;
1340*0Sstevel@tonic-gate 
1341*0Sstevel@tonic-gate 	/*
1342*0Sstevel@tonic-gate 	 * Determine how to add chunk to oversize freelist
1343*0Sstevel@tonic-gate 	 */
1344*0Sstevel@tonic-gate 
1345*0Sstevel@tonic-gate 	size_lp = OVSZ_HEADER_SIZE + lp->size;
1346*0Sstevel@tonic-gate 	endp_lp = ALIGN((uintptr_t)lp + size_lp, MTMALLOC_MIN_ALIGN);
1347*0Sstevel@tonic-gate 	size_lp = endp_lp - (uintptr_t)lp;
1348*0Sstevel@tonic-gate 
1349*0Sstevel@tonic-gate 	pv = nx->prev_byaddr;
1350*0Sstevel@tonic-gate 
1351*0Sstevel@tonic-gate 	if (pv->size) {
1352*0Sstevel@tonic-gate 
1353*0Sstevel@tonic-gate 		size_pv = OVSZ_HEADER_SIZE + pv->size;
1354*0Sstevel@tonic-gate 		endp_pv = ALIGN((uintptr_t)pv + size_pv,
1355*0Sstevel@tonic-gate 		    MTMALLOC_MIN_ALIGN);
1356*0Sstevel@tonic-gate 		size_pv = endp_pv - (uintptr_t)pv;
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate 		/* Check for adjacency with left chunk */
1359*0Sstevel@tonic-gate 		if ((uintptr_t)lp == endp_pv)
1360*0Sstevel@tonic-gate 			merge_flags |= COALESCE_LEFT;
1361*0Sstevel@tonic-gate 	}
1362*0Sstevel@tonic-gate 
1363*0Sstevel@tonic-gate 	if (nx->size) {
1364*0Sstevel@tonic-gate 
1365*0Sstevel@tonic-gate 	    /* Check for adjacency with right chunk */
1366*0Sstevel@tonic-gate 	    if ((uintptr_t)nx == endp_lp) {
1367*0Sstevel@tonic-gate 		size_nx = OVSZ_HEADER_SIZE + nx->size;
1368*0Sstevel@tonic-gate 		endp_nx = ALIGN((uintptr_t)nx + size_nx,
1369*0Sstevel@tonic-gate 		    MTMALLOC_MIN_ALIGN);
1370*0Sstevel@tonic-gate 		size_nx = endp_nx - (uintptr_t)nx;
1371*0Sstevel@tonic-gate 		merge_flags |= COALESCE_RIGHT;
1372*0Sstevel@tonic-gate 	    }
1373*0Sstevel@tonic-gate 	}
1374*0Sstevel@tonic-gate 
1375*0Sstevel@tonic-gate 	/*
1376*0Sstevel@tonic-gate 	 * If MTDEBUGPATTERN==1, lp->addr will have been overwritten with
1377*0Sstevel@tonic-gate 	 * FREEPATTERN for lp->size bytes. If we can merge, the oversize
1378*0Sstevel@tonic-gate 	 * header(s) that will also become part of the memory available for
1379*0Sstevel@tonic-gate 	 * reallocation (ie lp and/or nx) must also be overwritten with
1380*0Sstevel@tonic-gate 	 * FREEPATTERN or we will SIGABRT when this memory is next reallocated.
1381*0Sstevel@tonic-gate 	 */
1382*0Sstevel@tonic-gate 	switch (merge_flags) {
1383*0Sstevel@tonic-gate 
1384*0Sstevel@tonic-gate 	case INSERT_ONLY:		/* Coalescing not possible */
1385*0Sstevel@tonic-gate 		insert_oversize(lp, nx);
1386*0Sstevel@tonic-gate 		break;
1387*0Sstevel@tonic-gate 	case COALESCE_LEFT:
1388*0Sstevel@tonic-gate 		pv->size += size_lp;
1389*0Sstevel@tonic-gate 		position_oversize_by_size(pv);
1390*0Sstevel@tonic-gate 		if (debugopt & MTDEBUGPATTERN)
1391*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, lp, OVSZ_HEADER_SIZE);
1392*0Sstevel@tonic-gate 		break;
1393*0Sstevel@tonic-gate 	case COALESCE_RIGHT:
1394*0Sstevel@tonic-gate 		unlink_oversize(nx);
1395*0Sstevel@tonic-gate 		lp->size += size_nx;
1396*0Sstevel@tonic-gate 		insert_oversize(lp, pv->next_byaddr);
1397*0Sstevel@tonic-gate 		if (debugopt & MTDEBUGPATTERN)
1398*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, nx, OVSZ_HEADER_SIZE);
1399*0Sstevel@tonic-gate 		break;
1400*0Sstevel@tonic-gate 	case COALESCE_WITH_BOTH_SIDES:	/* Merge (with right) to the left */
1401*0Sstevel@tonic-gate 		pv->size += size_lp + size_nx;
1402*0Sstevel@tonic-gate 		unlink_oversize(nx);
1403*0Sstevel@tonic-gate 		position_oversize_by_size(pv);
1404*0Sstevel@tonic-gate 		if (debugopt & MTDEBUGPATTERN) {
1405*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, lp, OVSZ_HEADER_SIZE);
1406*0Sstevel@tonic-gate 			copy_pattern(FREEPATTERN, nx, OVSZ_HEADER_SIZE);
1407*0Sstevel@tonic-gate 		}
1408*0Sstevel@tonic-gate 		break;
1409*0Sstevel@tonic-gate 	}
1410*0Sstevel@tonic-gate }
1411*0Sstevel@tonic-gate 
1412*0Sstevel@tonic-gate /*
1413*0Sstevel@tonic-gate  * Find memory on our list that is at least size big. If we find a block that is
1414*0Sstevel@tonic-gate  * big enough, we break it up and return the associated oversize_t struct back
1415*0Sstevel@tonic-gate  * to the calling client. Any leftover piece of that block is returned to the
1416*0Sstevel@tonic-gate  * freelist.
1417*0Sstevel@tonic-gate  */
1418*0Sstevel@tonic-gate static oversize_t *
1419*0Sstevel@tonic-gate find_oversize(size_t size)
1420*0Sstevel@tonic-gate {
1421*0Sstevel@tonic-gate 	oversize_t *wp = oversize_list.next_bysize;
1422*0Sstevel@tonic-gate 	while (wp != &oversize_list && size > wp->size)
1423*0Sstevel@tonic-gate 		wp = wp->next_bysize;
1424*0Sstevel@tonic-gate 
1425*0Sstevel@tonic-gate 	if (wp == &oversize_list) /* empty list or nothing big enough */
1426*0Sstevel@tonic-gate 		return (NULL);
1427*0Sstevel@tonic-gate 	/* breaking up a chunk of memory */
1428*0Sstevel@tonic-gate 	if ((long)((wp->size - (size + OVSZ_HEADER_SIZE + MTMALLOC_MIN_ALIGN)))
1429*0Sstevel@tonic-gate 	    > MAX_CACHED) {
1430*0Sstevel@tonic-gate 		caddr_t off;
1431*0Sstevel@tonic-gate 		oversize_t *np;
1432*0Sstevel@tonic-gate 		size_t osize;
1433*0Sstevel@tonic-gate 		off = (caddr_t)ALIGN(wp->addr + size,
1434*0Sstevel@tonic-gate 		    MTMALLOC_MIN_ALIGN);
1435*0Sstevel@tonic-gate 		osize = wp->size;
1436*0Sstevel@tonic-gate 		wp->size = (size_t)(off - wp->addr);
1437*0Sstevel@tonic-gate 		np = oversize_header_alloc((uintptr_t)off,
1438*0Sstevel@tonic-gate 		    osize - (wp->size + OVSZ_HEADER_SIZE));
1439*0Sstevel@tonic-gate 		if ((long)np->size < 0)
1440*0Sstevel@tonic-gate 			abort();
1441*0Sstevel@tonic-gate 		unlink_oversize(wp);
1442*0Sstevel@tonic-gate 		add_oversize(np);
1443*0Sstevel@tonic-gate 	} else {
1444*0Sstevel@tonic-gate 		unlink_oversize(wp);
1445*0Sstevel@tonic-gate 	}
1446*0Sstevel@tonic-gate 	return (wp);
1447*0Sstevel@tonic-gate }
1448*0Sstevel@tonic-gate 
1449*0Sstevel@tonic-gate static void
1450*0Sstevel@tonic-gate copy_pattern(uint32_t pattern, void *buf_arg, size_t size)
1451*0Sstevel@tonic-gate {
1452*0Sstevel@tonic-gate 	uint32_t *bufend = (uint32_t *)((char *)buf_arg + size);
1453*0Sstevel@tonic-gate 	uint32_t *buf = buf_arg;
1454*0Sstevel@tonic-gate 
1455*0Sstevel@tonic-gate 	while (buf < bufend - 3) {
1456*0Sstevel@tonic-gate 		buf[3] = buf[2] = buf[1] = buf[0] = pattern;
1457*0Sstevel@tonic-gate 		buf += 4;
1458*0Sstevel@tonic-gate 	}
1459*0Sstevel@tonic-gate 	while (buf < bufend)
1460*0Sstevel@tonic-gate 		*buf++ = pattern;
1461*0Sstevel@tonic-gate }
1462*0Sstevel@tonic-gate 
1463*0Sstevel@tonic-gate static void *
1464*0Sstevel@tonic-gate verify_pattern(uint32_t pattern, void *buf_arg, size_t size)
1465*0Sstevel@tonic-gate {
1466*0Sstevel@tonic-gate 	uint32_t *bufend = (uint32_t *)((char *)buf_arg + size);
1467*0Sstevel@tonic-gate 	uint32_t *buf;
1468*0Sstevel@tonic-gate 
1469*0Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
1470*0Sstevel@tonic-gate 		if (*buf != pattern)
1471*0Sstevel@tonic-gate 			return (buf);
1472*0Sstevel@tonic-gate 	return (NULL);
1473*0Sstevel@tonic-gate }
1474*0Sstevel@tonic-gate 
1475*0Sstevel@tonic-gate static void
1476*0Sstevel@tonic-gate free_oversize(oversize_t *ovp)
1477*0Sstevel@tonic-gate {
1478*0Sstevel@tonic-gate 	assert(((uintptr_t)ovp->addr & 7) == 0); /* are we 8 byte aligned */
1479*0Sstevel@tonic-gate 	assert(ovp->size > MAX_CACHED);
1480*0Sstevel@tonic-gate 
1481*0Sstevel@tonic-gate 	ovp->next_bysize = ovp->prev_bysize = NULL;
1482*0Sstevel@tonic-gate 	ovp->next_byaddr = ovp->prev_byaddr = NULL;
1483*0Sstevel@tonic-gate 	(void) mutex_lock(&oversize_lock);
1484*0Sstevel@tonic-gate 	add_oversize(ovp);
1485*0Sstevel@tonic-gate 	(void) mutex_unlock(&oversize_lock);
1486*0Sstevel@tonic-gate }
1487*0Sstevel@tonic-gate 
1488*0Sstevel@tonic-gate static oversize_t *
1489*0Sstevel@tonic-gate oversize_header_alloc(uintptr_t mem, size_t size)
1490*0Sstevel@tonic-gate {
1491*0Sstevel@tonic-gate 	oversize_t *ovsz_hdr;
1492*0Sstevel@tonic-gate 
1493*0Sstevel@tonic-gate 	assert(size > MAX_CACHED);
1494*0Sstevel@tonic-gate 
1495*0Sstevel@tonic-gate 	ovsz_hdr = (oversize_t *)mem;
1496*0Sstevel@tonic-gate 	ovsz_hdr->prev_bysize = NULL;
1497*0Sstevel@tonic-gate 	ovsz_hdr->next_bysize = NULL;
1498*0Sstevel@tonic-gate 	ovsz_hdr->prev_byaddr = NULL;
1499*0Sstevel@tonic-gate 	ovsz_hdr->next_byaddr = NULL;
1500*0Sstevel@tonic-gate 	ovsz_hdr->hash_next = NULL;
1501*0Sstevel@tonic-gate 	ovsz_hdr->size = size;
1502*0Sstevel@tonic-gate 	mem += OVSZ_SIZE;
1503*0Sstevel@tonic-gate 	*(uintptr_t *)mem = MTMALLOC_OVERSIZE_MAGIC;
1504*0Sstevel@tonic-gate 	mem += OVERHEAD;
1505*0Sstevel@tonic-gate 	assert(((uintptr_t)mem & 7) == 0); /* are we 8 byte aligned */
1506*0Sstevel@tonic-gate 	ovsz_hdr->addr = (caddr_t)mem;
1507*0Sstevel@tonic-gate 	return (ovsz_hdr);
1508*0Sstevel@tonic-gate }
1509