xref: /dflybsd-src/lib/libc/stdlib/nmalloc.c (revision 330d3c4b487f3fc5d0eb023645b0b2a569f7048e)
1 /*
2  * NMALLOC.C	- New Malloc (ported from kernel slab allocator)
3  *
4  * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and by
8  * Venkatesh Srinivas <me@endeavour.zapto.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
38  */
39 /*
40  * This module implements a slab allocator drop-in replacement for the
41  * libc malloc().
42  *
43  * A slab allocator reserves a ZONE for each chunk size, then lays the
44  * chunks out in an array within the zone.  Allocation and deallocation
45  * is nearly instantaneous, and overhead losses are limited to a fixed
46  * worst-case amount.
47  *
48  * The slab allocator does not have to pre-initialize the list of
49  * free chunks for each zone, and the underlying VM will not be
50  * touched at all beyond the zone header until an actual allocation
51  * needs it.
52  *
53  * Slab management and locking is done on a per-zone basis.
54  *
55  *	Alloc Size	Chunking        Number of zones
56  *	0-127		8		16
57  *	128-255		16		8
58  *	256-511		32		8
59  *	512-1023	64		8
60  *	1024-2047	128		8
61  *	2048-4095	256		8
62  *	4096-8191	512		8
63  *	8192-16383	1024		8
64  *	16384-32767	2048		8
65  *
66  *	Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67  *	is used to locate for free.  One and Two-page allocations use the
68  *	zone mechanic to avoid excessive mmap()/munmap() calls.
69  *
70  *			   API FEATURES AND SIDE EFFECTS
71  *
72  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73  *	Above that power-of-2 sized allocations are page-aligned.  Non
74  *	power-of-2 sized allocations are aligned the same as the chunk
75  *	size for their zone.
76  *    + malloc(0) returns a special non-NULL value
77  *    + ability to allocate arbitrarily large chunks of memory
78  *    + realloc will reuse the passed pointer if possible, within the
79  *	limitations of the zone chunking.
80  *
81  * Multithreaded enhancements for small allocations introduced August 2010.
82  * These are in the spirit of 'libumem'. See:
83  *	Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84  *	slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85  * 	USENIX Technical Conference. USENIX Association.
86  *
87  * TUNING
88  *
89  * The value of the environment variable MALLOC_OPTIONS is a character string
90  * containing various flags to tune nmalloc.
91  *
92  * 'U'   / ['u']	Generate / do not generate utrace entries for ktrace(1)
93  *			This will generate utrace events for all malloc,
94  *			realloc, and free calls. There are tools (mtrplay) to
95  *			replay and allocation pattern or to graph heap structure
96  *			(mtrgraph) which can interpret these logs.
97  * 'Z'   / ['z']	Zero out / do not zero all allocations.
98  *			Each new byte of memory allocated by malloc, realloc, or
99  *			reallocf will be initialized to 0. This is intended for
100  *			debugging and will affect performance negatively.
101  * 'H'	/  ['h']	Pass a hint to the kernel about pages unused by the
102  *			allocation functions.
103  */
104 
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
106 
107 #include "libc_private.h"
108 
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
113 #include <sys/uio.h>
114 #include <sys/ktrace.h>
115 #include <stdio.h>
116 #include <stdint.h>
117 #include <stdlib.h>
118 #include <stdarg.h>
119 #include <stddef.h>
120 #include <unistd.h>
121 #include <string.h>
122 #include <fcntl.h>
123 #include <errno.h>
124 #include <pthread.h>
125 
126 #include "spinlock.h"
127 #include "un-namespace.h"
128 
129 static char rcsid[] = "$Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 sv5679 Exp $";
130 
131 /*
132  * Linked list of large allocations
133  */
134 typedef struct bigalloc {
135 	struct bigalloc *next;	/* hash link */
136 	void	*base;		/* base pointer */
137 	u_long	bytes;		/* bytes allocated */
138 } *bigalloc_t;
139 
140 /*
141  * Note that any allocations which are exact multiples of PAGE_SIZE, or
142  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
143  */
144 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
145 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
146 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
147 #define ZALLOC_ZONE_SIZE	(64 * 1024)
148 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
149 #define ZALLOC_SLAB_SLIDE	20		/* L1-cache skip */
150 
151 #if ZALLOC_ZONE_LIMIT == 16384
152 #define NZONES			72
153 #elif ZALLOC_ZONE_LIMIT == 32768
154 #define NZONES			80
155 #else
156 #error "I couldn't figure out NZONES"
157 #endif
158 
159 /*
160  * Chunk structure for free elements
161  */
162 typedef struct slchunk {
163 	struct slchunk *c_Next;
164 } *slchunk_t;
165 
166 /*
167  * The IN-BAND zone header is placed at the beginning of each zone.
168  */
169 struct slglobaldata;
170 
171 typedef struct slzone {
172 	int32_t		z_Magic;	/* magic number for sanity check */
173 	int		z_NFree;	/* total free chunks / ualloc space */
174 	struct slzone *z_Next;		/* ZoneAry[] link if z_NFree non-zero */
175 	int		z_NMax;		/* maximum free chunks */
176 	char		*z_BasePtr;	/* pointer to start of chunk array */
177 	int		z_UIndex;	/* current initial allocation index */
178 	int		z_UEndIndex;	/* last (first) allocation index */
179 	int		z_ChunkSize;	/* chunk size for validation */
180 	int		z_FirstFreePg;	/* chunk list on a page-by-page basis */
181 	int		z_ZoneIndex;
182 	int		z_Flags;
183 	struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
184 #if defined(INVARIANTS)
185 	__uint32_t	z_Bitmap[];	/* bitmap of free chunks / sanity */
186 #endif
187 } *slzone_t;
188 
189 typedef struct slglobaldata {
190 	spinlock_t	Spinlock;
191 	slzone_t	ZoneAry[NZONES];/* linked list of zones NFree > 0 */
192 	int		JunkIndex;
193 } *slglobaldata_t;
194 
195 #define SLZF_UNOTZEROD		0x0001
196 
197 #define MAG_NORECURSE 		0x01
198 #define FASTSLABREALLOC		0x02
199 
200 /*
201  * Misc constants.  Note that allocations that are exact multiples of
202  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
203  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
204  */
205 #define MIN_CHUNK_SIZE		8		/* in bytes */
206 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
207 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
208 
209 /*
210  * The WEIRD_ADDR is used as known text to copy into free objects to
211  * try to create deterministic failure cases if the data is accessed after
212  * free.
213  *
214  * WARNING: A limited number of spinlocks are available, BIGXSIZE should
215  *	    not be larger then 64.
216  */
217 #define WEIRD_ADDR      0xdeadc0de
218 #define MAX_COPY        sizeof(weirdary)
219 #define ZERO_LENGTH_PTR	((void *)&malloc_dummy_pointer)
220 
221 #define BIGHSHIFT	10			/* bigalloc hash table */
222 #define BIGHSIZE	(1 << BIGHSHIFT)
223 #define BIGHMASK	(BIGHSIZE - 1)
224 #define BIGXSIZE	(BIGHSIZE / 16)		/* bigalloc lock table */
225 #define BIGXMASK	(BIGXSIZE - 1)
226 
227 #define SAFLAG_ZERO	0x0001
228 #define SAFLAG_PASSIVE	0x0002
229 
230 /*
231  * Thread control
232  */
233 
234 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
235 
236 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
237 				_mpanic("assertion: %s in %s",	\
238 				#exp, __func__);		\
239 			    } while (0)
240 
241 /*
242  * Magazines
243  */
244 
245 #define M_MAX_ROUNDS	64
246 #define M_ZONE_ROUNDS	64
247 #define M_LOW_ROUNDS	32
248 #define M_INIT_ROUNDS	8
249 #define M_BURST_FACTOR  8
250 #define M_BURST_NSCALE	2
251 
252 #define M_BURST		0x0001
253 #define M_BURST_EARLY	0x0002
254 
255 struct magazine {
256 	SLIST_ENTRY(magazine) nextmagazine;
257 
258 	int		flags;
259 	int 		capacity;	/* Max rounds in this magazine */
260 	int 		rounds;		/* Current number of free rounds */
261 	int		burst_factor;	/* Number of blocks to prefill with */
262 	int 		low_factor;	/* Free till low_factor from full mag */
263 	void		*objects[M_MAX_ROUNDS];
264 };
265 
266 SLIST_HEAD(magazinelist, magazine);
267 
268 static spinlock_t zone_mag_lock;
269 static struct magazine zone_magazine = {
270 	.flags = M_BURST | M_BURST_EARLY,
271 	.capacity = M_ZONE_ROUNDS,
272 	.rounds = 0,
273 	.burst_factor = M_BURST_FACTOR,
274 	.low_factor = M_LOW_ROUNDS
275 };
276 
277 #define MAGAZINE_FULL(mp)	(mp->rounds == mp->capacity)
278 #define MAGAZINE_NOTFULL(mp)	(mp->rounds < mp->capacity)
279 #define MAGAZINE_EMPTY(mp)	(mp->rounds == 0)
280 #define MAGAZINE_NOTEMPTY(mp)	(mp->rounds != 0)
281 
282 /* Each thread will have a pair of magazines per size-class (NZONES)
283  * The loaded magazine will support immediate allocations, the previous
284  * magazine will either be full or empty and can be swapped at need */
285 typedef struct magazine_pair {
286 	struct magazine	*loaded;
287 	struct magazine	*prev;
288 } magazine_pair;
289 
290 /* A depot is a collection of magazines for a single zone. */
291 typedef struct magazine_depot {
292 	struct magazinelist full;
293 	struct magazinelist empty;
294 	pthread_spinlock_t lock;
295 } magazine_depot;
296 
297 typedef struct thr_mags {
298 	magazine_pair	mags[NZONES];
299 	int		init;
300 } thr_mags;
301 
302 /* With this attribute set, do not require a function call for accessing
303  * this variable when the code is compiled -fPIC */
304 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
305 
306 static int mtmagazine_free_live = 0;
307 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
308 static pthread_key_t thread_mags_key;
309 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
310 static magazine_depot depots[NZONES];
311 
312 /*
313  * Fixed globals (not per-cpu)
314  */
315 static const int ZoneSize = ZALLOC_ZONE_SIZE;
316 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
317 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
318 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
319 
320 static int opt_madvise = 0;
321 static int opt_utrace = 0;
322 static int malloc_started = 0;
323 static int g_malloc_flags = 0;
324 static spinlock_t malloc_init_lock;
325 static struct slglobaldata	SLGlobalData;
326 static bigalloc_t bigalloc_array[BIGHSIZE];
327 static spinlock_t bigspin_array[BIGXSIZE];
328 static int malloc_panic;
329 static int malloc_dummy_pointer;
330 
331 static const int32_t weirdary[16] = {
332 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
333 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
334 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
335 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
336 };
337 
338 static void *_slaballoc(size_t size, int flags);
339 static void *_slabrealloc(void *ptr, size_t size);
340 static void _slabfree(void *ptr, int, bigalloc_t *);
341 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
342 static void _vmem_free(void *ptr, size_t bytes);
343 static void *magazine_alloc(struct magazine *, int *);
344 static int magazine_free(struct magazine *, void *);
345 static void *mtmagazine_alloc(int zi);
346 static int mtmagazine_free(int zi, void *);
347 static void mtmagazine_init(void);
348 static void mtmagazine_destructor(void *);
349 static slzone_t zone_alloc(int flags);
350 static void zone_free(void *z);
351 static void _mpanic(const char *ctl, ...);
352 static void malloc_init(void);
353 #if defined(INVARIANTS)
354 static void chunk_mark_allocated(slzone_t z, void *chunk);
355 static void chunk_mark_free(slzone_t z, void *chunk);
356 #endif
357 
358 struct nmalloc_utrace {
359 	void *p;
360 	size_t s;
361 	void *r;
362 };
363 
364 #define UTRACE(a, b, c)						\
365 	if (opt_utrace) {					\
366 		struct nmalloc_utrace ut = {			\
367 			.p = (a),				\
368 			.s = (b),				\
369 			.r = (c)				\
370 		};						\
371 		utrace(&ut, sizeof(ut));			\
372 	}
373 
374 #ifdef INVARIANTS
375 /*
376  * If enabled any memory allocated without M_ZERO is initialized to -1.
377  */
378 static int  use_malloc_pattern;
379 #endif
380 
381 static void
382 malloc_init(void)
383 {
384 	const char *p = NULL;
385 
386 	if (__isthreaded) {
387 		_SPINLOCK(&malloc_init_lock);
388 		if (malloc_started) {
389 			_SPINUNLOCK(&malloc_init_lock);
390 			return;
391 		}
392 	}
393 
394 	if (issetugid() == 0)
395 		p = getenv("MALLOC_OPTIONS");
396 
397 	for (; p != NULL && *p != '\0'; p++) {
398 		switch(*p) {
399 		case 'u':	opt_utrace = 0; break;
400 		case 'U':	opt_utrace = 1; break;
401 		case 'h':	opt_madvise = 0; break;
402 		case 'H':	opt_madvise = 1; break;
403 		case 'z':	g_malloc_flags = 0; break;
404 		case 'Z': 	g_malloc_flags = SAFLAG_ZERO; break;
405 		default:
406 			break;
407 		}
408 	}
409 
410 	malloc_started = 1;
411 
412 	if (__isthreaded)
413 		_SPINUNLOCK(&malloc_init_lock);
414 
415 	UTRACE((void *) -1, 0, NULL);
416 }
417 
418 /*
419  * Thread locks.
420  */
421 static __inline void
422 slgd_lock(slglobaldata_t slgd)
423 {
424 	if (__isthreaded)
425 		_SPINLOCK(&slgd->Spinlock);
426 }
427 
428 static __inline void
429 slgd_unlock(slglobaldata_t slgd)
430 {
431 	if (__isthreaded)
432 		_SPINUNLOCK(&slgd->Spinlock);
433 }
434 
435 static __inline void
436 depot_lock(magazine_depot *dp)
437 {
438 	if (__isthreaded)
439 		pthread_spin_lock(&dp->lock);
440 }
441 
442 static __inline void
443 depot_unlock(magazine_depot *dp)
444 {
445 	if (__isthreaded)
446 		pthread_spin_unlock(&dp->lock);
447 }
448 
449 static __inline void
450 zone_magazine_lock(void)
451 {
452 	if (__isthreaded)
453 		_SPINLOCK(&zone_mag_lock);
454 }
455 
456 static __inline void
457 zone_magazine_unlock(void)
458 {
459 	if (__isthreaded)
460 		_SPINUNLOCK(&zone_mag_lock);
461 }
462 
463 static __inline void
464 swap_mags(magazine_pair *mp)
465 {
466 	struct magazine *tmp;
467 	tmp = mp->loaded;
468 	mp->loaded = mp->prev;
469 	mp->prev = tmp;
470 }
471 
472 /*
473  * bigalloc hashing and locking support.
474  *
475  * Return an unmasked hash code for the passed pointer.
476  */
477 static __inline int
478 _bigalloc_hash(void *ptr)
479 {
480 	int hv;
481 
482 	hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
483 	      ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
484 
485 	return(hv);
486 }
487 
488 /*
489  * Lock the hash chain and return a pointer to its base for the specified
490  * address.
491  */
492 static __inline bigalloc_t *
493 bigalloc_lock(void *ptr)
494 {
495 	int hv = _bigalloc_hash(ptr);
496 	bigalloc_t *bigp;
497 
498 	bigp = &bigalloc_array[hv & BIGHMASK];
499 	if (__isthreaded)
500 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
501 	return(bigp);
502 }
503 
504 /*
505  * Lock the hash chain and return a pointer to its base for the specified
506  * address.
507  *
508  * BUT, if the hash chain is empty, just return NULL and do not bother
509  * to lock anything.
510  */
511 static __inline bigalloc_t *
512 bigalloc_check_and_lock(void *ptr)
513 {
514 	int hv = _bigalloc_hash(ptr);
515 	bigalloc_t *bigp;
516 
517 	bigp = &bigalloc_array[hv & BIGHMASK];
518 	if (*bigp == NULL)
519 		return(NULL);
520 	if (__isthreaded) {
521 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
522 	}
523 	return(bigp);
524 }
525 
526 static __inline void
527 bigalloc_unlock(void *ptr)
528 {
529 	int hv;
530 
531 	if (__isthreaded) {
532 		hv = _bigalloc_hash(ptr);
533 		_SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
534 	}
535 }
536 
537 /*
538  * Calculate the zone index for the allocation request size and set the
539  * allocation request size to that particular zone's chunk size.
540  */
541 static __inline int
542 zoneindex(size_t *bytes, size_t *chunking)
543 {
544 	size_t n = (unsigned int)*bytes;	/* unsigned for shift opt */
545 	if (n < 128) {
546 		*bytes = n = (n + 7) & ~7;
547 		*chunking = 8;
548 		return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
549 	}
550 	if (n < 256) {
551 		*bytes = n = (n + 15) & ~15;
552 		*chunking = 16;
553 		return(n / 16 + 7);
554 	}
555 	if (n < 8192) {
556 		if (n < 512) {
557 			*bytes = n = (n + 31) & ~31;
558 			*chunking = 32;
559 			return(n / 32 + 15);
560 		}
561 		if (n < 1024) {
562 			*bytes = n = (n + 63) & ~63;
563 			*chunking = 64;
564 			return(n / 64 + 23);
565 		}
566 		if (n < 2048) {
567 			*bytes = n = (n + 127) & ~127;
568 			*chunking = 128;
569 			return(n / 128 + 31);
570 		}
571 		if (n < 4096) {
572 			*bytes = n = (n + 255) & ~255;
573 			*chunking = 256;
574 			return(n / 256 + 39);
575 		}
576 		*bytes = n = (n + 511) & ~511;
577 		*chunking = 512;
578 		return(n / 512 + 47);
579 	}
580 #if ZALLOC_ZONE_LIMIT > 8192
581 	if (n < 16384) {
582 		*bytes = n = (n + 1023) & ~1023;
583 		*chunking = 1024;
584 		return(n / 1024 + 55);
585 	}
586 #endif
587 #if ZALLOC_ZONE_LIMIT > 16384
588 	if (n < 32768) {
589 		*bytes = n = (n + 2047) & ~2047;
590 		*chunking = 2048;
591 		return(n / 2048 + 63);
592 	}
593 #endif
594 	_mpanic("Unexpected byte count %d", n);
595 	return(0);
596 }
597 
598 /*
599  * malloc() - call internal slab allocator
600  */
601 void *
602 malloc(size_t size)
603 {
604 	void *ptr;
605 
606 	ptr = _slaballoc(size, 0);
607 	if (ptr == NULL)
608 		errno = ENOMEM;
609 	else
610 		UTRACE(0, size, ptr);
611 	return(ptr);
612 }
613 
614 /*
615  * calloc() - call internal slab allocator
616  */
617 void *
618 calloc(size_t number, size_t size)
619 {
620 	void *ptr;
621 
622 	ptr = _slaballoc(number * size, SAFLAG_ZERO);
623 	if (ptr == NULL)
624 		errno = ENOMEM;
625 	else
626 		UTRACE(0, number * size, ptr);
627 	return(ptr);
628 }
629 
630 /*
631  * realloc() (SLAB ALLOCATOR)
632  *
633  * We do not attempt to optimize this routine beyond reusing the same
634  * pointer if the new size fits within the chunking of the old pointer's
635  * zone.
636  */
637 void *
638 realloc(void *ptr, size_t size)
639 {
640 	void *ret;
641 	ret = _slabrealloc(ptr, size);
642 	if (ret == NULL)
643 		errno = ENOMEM;
644 	else
645 		UTRACE(ptr, size, ret);
646 	return(ret);
647 }
648 
649 /*
650  * posix_memalign()
651  *
652  * Allocate (size) bytes with a alignment of (alignment), where (alignment)
653  * is a power of 2 >= sizeof(void *).
654  *
655  * The slab allocator will allocate on power-of-2 boundaries up to
656  * at least PAGE_SIZE.  We use the zoneindex mechanic to find a
657  * zone matching the requirements, and _vmem_alloc() otherwise.
658  */
659 int
660 posix_memalign(void **memptr, size_t alignment, size_t size)
661 {
662 	bigalloc_t *bigp;
663 	bigalloc_t big;
664 	size_t chunking;
665 	int zi;
666 
667 	/*
668 	 * OpenGroup spec issue 6 checks
669 	 */
670 	if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
671 		*memptr = NULL;
672 		return(EINVAL);
673 	}
674 	if (alignment < sizeof(void *)) {
675 		*memptr = NULL;
676 		return(EINVAL);
677 	}
678 
679 	/*
680 	 * Our zone mechanism guarantees same-sized alignment for any
681 	 * power-of-2 allocation.  If size is a power-of-2 and reasonable
682 	 * we can just call _slaballoc() and be done.  We round size up
683 	 * to the nearest alignment boundary to improve our odds of
684 	 * it becoming a power-of-2 if it wasn't before.
685 	 */
686 	if (size <= alignment)
687 		size = alignment;
688 	else
689 		size = (size + alignment - 1) & ~(size_t)(alignment - 1);
690 	if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
691 		*memptr = _slaballoc(size, 0);
692 		return(*memptr ? 0 : ENOMEM);
693 	}
694 
695 	/*
696 	 * Otherwise locate a zone with a chunking that matches
697 	 * the requested alignment, within reason.   Consider two cases:
698 	 *
699 	 * (1) A 1K allocation on a 32-byte alignment.  The first zoneindex
700 	 *     we find will be the best fit because the chunking will be
701 	 *     greater or equal to the alignment.
702 	 *
703 	 * (2) A 513 allocation on a 256-byte alignment.  In this case
704 	 *     the first zoneindex we find will be for 576 byte allocations
705 	 *     with a chunking of 64, which is not sufficient.  To fix this
706 	 *     we simply find the nearest power-of-2 >= size and use the
707 	 *     same side-effect of _slaballoc() which guarantees
708 	 *     same-alignment on a power-of-2 allocation.
709 	 */
710 	if (size < PAGE_SIZE) {
711 		zi = zoneindex(&size, &chunking);
712 		if (chunking >= alignment) {
713 			*memptr = _slaballoc(size, 0);
714 			return(*memptr ? 0 : ENOMEM);
715 		}
716 		if (size >= 1024)
717 			alignment = 1024;
718 		if (size >= 16384)
719 			alignment = 16384;
720 		while (alignment < size)
721 			alignment <<= 1;
722 		*memptr = _slaballoc(alignment, 0);
723 		return(*memptr ? 0 : ENOMEM);
724 	}
725 
726 	/*
727 	 * If the slab allocator cannot handle it use vmem_alloc().
728 	 *
729 	 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
730 	 */
731 	if (alignment < PAGE_SIZE)
732 		alignment = PAGE_SIZE;
733 	if (size < alignment)
734 		size = alignment;
735 	size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
736 	*memptr = _vmem_alloc(size, alignment, 0);
737 	if (*memptr == NULL)
738 		return(ENOMEM);
739 
740 	big = _slaballoc(sizeof(struct bigalloc), 0);
741 	if (big == NULL) {
742 		_vmem_free(*memptr, size);
743 		*memptr = NULL;
744 		return(ENOMEM);
745 	}
746 	bigp = bigalloc_lock(*memptr);
747 	big->base = *memptr;
748 	big->bytes = size;
749 	big->next = *bigp;
750 	*bigp = big;
751 	bigalloc_unlock(*memptr);
752 
753 	return(0);
754 }
755 
756 /*
757  * free() (SLAB ALLOCATOR) - do the obvious
758  */
759 void
760 free(void *ptr)
761 {
762 	UTRACE(ptr, 0, 0);
763 	_slabfree(ptr, 0, NULL);
764 }
765 
766 /*
767  * _slaballoc()	(SLAB ALLOCATOR)
768  *
769  *	Allocate memory via the slab allocator.  If the request is too large,
770  *	or if it page-aligned beyond a certain size, we fall back to the
771  *	KMEM subsystem
772  */
773 static void *
774 _slaballoc(size_t size, int flags)
775 {
776 	slzone_t z;
777 	slchunk_t chunk;
778 	slglobaldata_t slgd;
779 	size_t chunking;
780 	int zi;
781 #ifdef INVARIANTS
782 	int i;
783 #endif
784 	int off;
785 	void *obj;
786 
787 	if (!malloc_started)
788 		malloc_init();
789 
790 	/*
791 	 * Handle the degenerate size == 0 case.  Yes, this does happen.
792 	 * Return a special pointer.  This is to maintain compatibility with
793 	 * the original malloc implementation.  Certain devices, such as the
794 	 * adaptec driver, not only allocate 0 bytes, they check for NULL and
795 	 * also realloc() later on.  Joy.
796 	 */
797 	if (size == 0)
798 		return(ZERO_LENGTH_PTR);
799 
800 	/* Capture global flags */
801 	flags |= g_malloc_flags;
802 
803 	/*
804 	 * Handle large allocations directly.  There should not be very many
805 	 * of these so performance is not a big issue.
806 	 *
807 	 * The backend allocator is pretty nasty on a SMP system.   Use the
808 	 * slab allocator for one and two page-sized chunks even though we
809 	 * lose some efficiency.
810 	 */
811 	if (size >= ZoneLimit ||
812 	    ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
813 		bigalloc_t big;
814 		bigalloc_t *bigp;
815 
816 		size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
817 		chunk = _vmem_alloc(size, PAGE_SIZE, flags);
818 		if (chunk == NULL)
819 			return(NULL);
820 
821 		big = _slaballoc(sizeof(struct bigalloc), 0);
822 		if (big == NULL) {
823 			_vmem_free(chunk, size);
824 			return(NULL);
825 		}
826 		bigp = bigalloc_lock(chunk);
827 		big->base = chunk;
828 		big->bytes = size;
829 		big->next = *bigp;
830 		*bigp = big;
831 		bigalloc_unlock(chunk);
832 
833 		return(chunk);
834 	}
835 
836 	/* Compute allocation zone; zoneindex will panic on excessive sizes */
837 	zi = zoneindex(&size, &chunking);
838 	MASSERT(zi < NZONES);
839 
840 	obj = mtmagazine_alloc(zi);
841 	if (obj != NULL) {
842 		if (flags & SAFLAG_ZERO)
843 			bzero(obj, size);
844 		return (obj);
845 	}
846 
847 	slgd = &SLGlobalData;
848 	slgd_lock(slgd);
849 
850 	/*
851 	 * Attempt to allocate out of an existing zone.  If all zones are
852 	 * exhausted pull one off the free list or allocate a new one.
853 	 */
854 	if ((z = slgd->ZoneAry[zi]) == NULL) {
855 
856 		z = zone_alloc(flags);
857 		if (z == NULL)
858 			goto fail;
859 
860 		/*
861 		 * How big is the base structure?
862 		 */
863 #if defined(INVARIANTS)
864 		/*
865 		 * Make room for z_Bitmap.  An exact calculation is
866 		 * somewhat more complicated so don't make an exact
867 		 * calculation.
868 		 */
869 		off = offsetof(struct slzone,
870 				z_Bitmap[(ZoneSize / size + 31) / 32]);
871 		bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
872 #else
873 		off = sizeof(struct slzone);
874 #endif
875 
876 		/*
877 		 * Align the storage in the zone based on the chunking.
878 		 *
879 		 * Guarantee power-of-2 alignment for power-of-2-sized
880 		 * chunks.  Otherwise align based on the chunking size
881 		 * (typically 8 or 16 bytes for small allocations).
882 		 *
883 		 * NOTE: Allocations >= ZoneLimit are governed by the
884 		 * bigalloc code and typically only guarantee page-alignment.
885 		 *
886 		 * Set initial conditions for UIndex near the zone header
887 		 * to reduce unecessary page faults, vs semi-randomization
888 		 * to improve L1 cache saturation.
889 		 */
890 		if ((size | (size - 1)) + 1 == (size << 1))
891 			off = (off + size - 1) & ~(size - 1);
892 		else
893 			off = (off + chunking - 1) & ~(chunking - 1);
894 		z->z_Magic = ZALLOC_SLAB_MAGIC;
895 		z->z_ZoneIndex = zi;
896 		z->z_NMax = (ZoneSize - off) / size;
897 		z->z_NFree = z->z_NMax;
898 		z->z_BasePtr = (char *)z + off;
899 		z->z_UIndex = z->z_UEndIndex = 0;
900 		z->z_ChunkSize = size;
901 		z->z_FirstFreePg = ZonePageCount;
902 		z->z_Next = slgd->ZoneAry[zi];
903 		slgd->ZoneAry[zi] = z;
904 		if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
905 			flags &= ~SAFLAG_ZERO;	/* already zero'd */
906 			flags |= SAFLAG_PASSIVE;
907 		}
908 
909 		/*
910 		 * Slide the base index for initial allocations out of the
911 		 * next zone we create so we do not over-weight the lower
912 		 * part of the cpu memory caches.
913 		 */
914 		slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
915 					& (ZALLOC_MAX_ZONE_SIZE - 1);
916 	}
917 
918 	/*
919 	 * Ok, we have a zone from which at least one chunk is available.
920 	 *
921 	 * Remove us from the ZoneAry[] when we become empty
922 	 */
923 	MASSERT(z->z_NFree > 0);
924 
925 	if (--z->z_NFree == 0) {
926 		slgd->ZoneAry[zi] = z->z_Next;
927 		z->z_Next = NULL;
928 	}
929 
930 	/*
931 	 * Locate a chunk in a free page.  This attempts to localize
932 	 * reallocations into earlier pages without us having to sort
933 	 * the chunk list.  A chunk may still overlap a page boundary.
934 	 */
935 	while (z->z_FirstFreePg < ZonePageCount) {
936 		if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
937 #ifdef DIAGNOSTIC
938 			/*
939 			 * Diagnostic: c_Next is not total garbage.
940 			 */
941 			MASSERT(chunk->c_Next == NULL ||
942 			    ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
943 			    ((intptr_t)chunk & IN_SAME_PAGE_MASK));
944 #endif
945 #ifdef INVARIANTS
946 			chunk_mark_allocated(z, chunk);
947 #endif
948 			MASSERT((uintptr_t)chunk & ZoneMask);
949 			z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
950 			goto done;
951 		}
952 		++z->z_FirstFreePg;
953 	}
954 
955 	/*
956 	 * No chunks are available but NFree said we had some memory,
957 	 * so it must be available in the never-before-used-memory
958 	 * area governed by UIndex.  The consequences are very
959 	 * serious if our zone got corrupted so we use an explicit
960 	 * panic rather then a KASSERT.
961 	 */
962 	chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
963 
964 	if (++z->z_UIndex == z->z_NMax)
965 		z->z_UIndex = 0;
966 	if (z->z_UIndex == z->z_UEndIndex) {
967 		if (z->z_NFree != 0)
968 			_mpanic("slaballoc: corrupted zone");
969 	}
970 
971 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
972 		flags &= ~SAFLAG_ZERO;
973 		flags |= SAFLAG_PASSIVE;
974 	}
975 #if defined(INVARIANTS)
976 	chunk_mark_allocated(z, chunk);
977 #endif
978 
979 done:
980 	slgd_unlock(slgd);
981 	if (flags & SAFLAG_ZERO) {
982 		bzero(chunk, size);
983 #ifdef INVARIANTS
984 	} else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
985 		if (use_malloc_pattern) {
986 			for (i = 0; i < size; i += sizeof(int)) {
987 				*(int *)((char *)chunk + i) = -1;
988 			}
989 		}
990 		/* avoid accidental double-free check */
991 		chunk->c_Next = (void *)-1;
992 #endif
993 	}
994 	return(chunk);
995 fail:
996 	slgd_unlock(slgd);
997 	return(NULL);
998 }
999 
1000 /*
1001  * Reallocate memory within the chunk
1002  */
1003 static void *
1004 _slabrealloc(void *ptr, size_t size)
1005 {
1006 	bigalloc_t *bigp;
1007 	void *nptr;
1008 	slzone_t z;
1009 	size_t chunking;
1010 
1011 	if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1012 		return(_slaballoc(size, 0));
1013 
1014 	if (size == 0) {
1015 	    free(ptr);
1016 	    return(ZERO_LENGTH_PTR);
1017 	}
1018 
1019 	/*
1020 	 * Handle oversized allocations.
1021 	 */
1022 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1023 		bigalloc_t big;
1024 		size_t bigbytes;
1025 
1026 		while ((big = *bigp) != NULL) {
1027 			if (big->base == ptr) {
1028 				size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1029 				bigbytes = big->bytes;
1030 				if (bigbytes == size) {
1031 					bigalloc_unlock(ptr);
1032 					return(ptr);
1033 				}
1034 				*bigp = big->next;
1035 				bigalloc_unlock(ptr);
1036 				if ((nptr = _slaballoc(size, 0)) == NULL) {
1037 					/* Relink block */
1038 					bigp = bigalloc_lock(ptr);
1039 					big->next = *bigp;
1040 					*bigp = big;
1041 					bigalloc_unlock(ptr);
1042 					return(NULL);
1043 				}
1044 				if (size > bigbytes)
1045 					size = bigbytes;
1046 				bcopy(ptr, nptr, size);
1047 				_slabfree(ptr, FASTSLABREALLOC, &big);
1048 				return(nptr);
1049 			}
1050 			bigp = &big->next;
1051 		}
1052 		bigalloc_unlock(ptr);
1053 	}
1054 
1055 	/*
1056 	 * Get the original allocation's zone.  If the new request winds
1057 	 * up using the same chunk size we do not have to do anything.
1058 	 *
1059 	 * NOTE: We don't have to lock the globaldata here, the fields we
1060 	 * access here will not change at least as long as we have control
1061 	 * over the allocation.
1062 	 */
1063 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1064 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1065 
1066 	/*
1067 	 * Use zoneindex() to chunk-align the new size, as long as the
1068 	 * new size is not too large.
1069 	 */
1070 	if (size < ZoneLimit) {
1071 		zoneindex(&size, &chunking);
1072 		if (z->z_ChunkSize == size)
1073 			return(ptr);
1074 	}
1075 
1076 	/*
1077 	 * Allocate memory for the new request size and copy as appropriate.
1078 	 */
1079 	if ((nptr = _slaballoc(size, 0)) != NULL) {
1080 		if (size > z->z_ChunkSize)
1081 			size = z->z_ChunkSize;
1082 		bcopy(ptr, nptr, size);
1083 		_slabfree(ptr, 0, NULL);
1084 	}
1085 
1086 	return(nptr);
1087 }
1088 
1089 /*
1090  * free (SLAB ALLOCATOR)
1091  *
1092  * Free a memory block previously allocated by malloc.  Note that we do not
1093  * attempt to uplodate ks_loosememuse as MP races could prevent us from
1094  * checking memory limits in malloc.
1095  *
1096  * flags:
1097  *	MAG_NORECURSE		Skip magazine layer
1098  *	FASTSLABREALLOC		Fast call from realloc
1099  * MPSAFE
1100  */
1101 static void
1102 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1103 {
1104 	slzone_t z;
1105 	slchunk_t chunk;
1106 	bigalloc_t big;
1107 	bigalloc_t *bigp;
1108 	slglobaldata_t slgd;
1109 	size_t size;
1110 	int zi;
1111 	int pgno;
1112 
1113 	/* Fast realloc path for big allocations */
1114 	if (flags & FASTSLABREALLOC) {
1115 		big = *rbigp;
1116 		goto fastslabrealloc;
1117 	}
1118 
1119 	/*
1120 	 * Handle NULL frees and special 0-byte allocations
1121 	 */
1122 	if (ptr == NULL)
1123 		return;
1124 	if (ptr == ZERO_LENGTH_PTR)
1125 		return;
1126 
1127 	/* Ensure that a destructor is in-place for thread-exit */
1128 	if (mtmagazine_free_live == 0) {
1129 		mtmagazine_free_live = 1;
1130 		pthread_once(&thread_mags_once, &mtmagazine_init);
1131 	}
1132 
1133 	/*
1134 	 * Handle oversized allocations.
1135 	 */
1136 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1137 		while ((big = *bigp) != NULL) {
1138 			if (big->base == ptr) {
1139 				if ((flags & FASTSLABREALLOC) == 0) {
1140 					*bigp = big->next;
1141 					bigalloc_unlock(ptr);
1142 				}
1143 fastslabrealloc:
1144 				size = big->bytes;
1145 				_slabfree(big, 0, NULL);
1146 #ifdef INVARIANTS
1147 				MASSERT(sizeof(weirdary) <= size);
1148 				bcopy(weirdary, ptr, sizeof(weirdary));
1149 #endif
1150 				_vmem_free(ptr, size);
1151 				return;
1152 			}
1153 			bigp = &big->next;
1154 		}
1155 		bigalloc_unlock(ptr);
1156 	}
1157 
1158 	/*
1159 	 * Zone case.  Figure out the zone based on the fact that it is
1160 	 * ZoneSize aligned.
1161 	 */
1162 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1163 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1164 
1165 	size = z->z_ChunkSize;
1166 	zi = z->z_ZoneIndex;
1167 
1168 	if (g_malloc_flags & SAFLAG_ZERO)
1169 		bzero(ptr, size);
1170 
1171 	if (((flags & MAG_NORECURSE) == 0) &&
1172 	    (mtmagazine_free(zi, ptr) == 0))
1173 		return;
1174 
1175 	pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1176 	chunk = ptr;
1177 	slgd = &SLGlobalData;
1178 	slgd_lock(slgd);
1179 
1180 #ifdef INVARIANTS
1181 	/*
1182 	 * Attempt to detect a double-free.  To reduce overhead we only check
1183 	 * if there appears to be link pointer at the base of the data.
1184 	 */
1185 	if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1186 		slchunk_t scan;
1187 
1188 		for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1189 			if (scan == chunk)
1190 				_mpanic("Double free at %p", chunk);
1191 		}
1192 	}
1193 	chunk_mark_free(z, chunk);
1194 #endif
1195 
1196 	/*
1197 	 * Put weird data into the memory to detect modifications after
1198 	 * freeing, illegal pointer use after freeing (we should fault on
1199 	 * the odd address), and so forth.
1200 	 */
1201 #ifdef INVARIANTS
1202 	if (z->z_ChunkSize < sizeof(weirdary))
1203 		bcopy(weirdary, chunk, z->z_ChunkSize);
1204 	else
1205 		bcopy(weirdary, chunk, sizeof(weirdary));
1206 #endif
1207 
1208 	/*
1209 	 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1210 	 * z_FirstFreePg.
1211 	 */
1212 	chunk->c_Next = z->z_PageAry[pgno];
1213 	z->z_PageAry[pgno] = chunk;
1214 	if (z->z_FirstFreePg > pgno)
1215 		z->z_FirstFreePg = pgno;
1216 
1217 	/*
1218 	 * Bump the number of free chunks.  If it becomes non-zero the zone
1219 	 * must be added back onto the appropriate list.
1220 	 */
1221 	if (z->z_NFree++ == 0) {
1222 		z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1223 		slgd->ZoneAry[z->z_ZoneIndex] = z;
1224 	}
1225 
1226 	/*
1227 	 * If the zone becomes totally free then release it.
1228 	 */
1229 	if (z->z_NFree == z->z_NMax) {
1230 		slzone_t *pz;
1231 
1232 		pz = &slgd->ZoneAry[z->z_ZoneIndex];
1233 		while (z != *pz)
1234 			pz = &(*pz)->z_Next;
1235 		*pz = z->z_Next;
1236 		z->z_Magic = -1;
1237 		z->z_Next = NULL;
1238 		zone_free(z);
1239 		return;
1240 	}
1241 	slgd_unlock(slgd);
1242 }
1243 
1244 #if defined(INVARIANTS)
1245 /*
1246  * Helper routines for sanity checks
1247  */
1248 static
1249 void
1250 chunk_mark_allocated(slzone_t z, void *chunk)
1251 {
1252 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1253 	__uint32_t *bitptr;
1254 
1255 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1256 	bitptr = &z->z_Bitmap[bitdex >> 5];
1257 	bitdex &= 31;
1258 	MASSERT((*bitptr & (1 << bitdex)) == 0);
1259 	*bitptr |= 1 << bitdex;
1260 }
1261 
1262 static
1263 void
1264 chunk_mark_free(slzone_t z, void *chunk)
1265 {
1266 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1267 	__uint32_t *bitptr;
1268 
1269 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1270 	bitptr = &z->z_Bitmap[bitdex >> 5];
1271 	bitdex &= 31;
1272 	MASSERT((*bitptr & (1 << bitdex)) != 0);
1273 	*bitptr &= ~(1 << bitdex);
1274 }
1275 
1276 #endif
1277 
1278 static __inline void *
1279 magazine_alloc(struct magazine *mp, int *burst)
1280 {
1281 	void *obj =  NULL;
1282 
1283 	do {
1284 		if (mp != NULL && MAGAZINE_NOTEMPTY(mp)) {
1285 			obj = mp->objects[--mp->rounds];
1286 			break;
1287 		}
1288 
1289 		/* Return burst factor to caller */
1290 		if ((mp->flags & M_BURST) && (burst != NULL)) {
1291 			*burst = mp->burst_factor;
1292 		}
1293 
1294 		/* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1295 		if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1296 		    (burst != NULL)) {
1297 			mp->burst_factor -= M_BURST_NSCALE;
1298 			if (mp->burst_factor <= 1) {
1299 				mp->burst_factor = 1;
1300 				mp->flags &= ~(M_BURST);
1301 				mp->flags &= ~(M_BURST_EARLY);
1302 			}
1303 		}
1304 
1305 	} while (0);
1306 
1307 	return obj;
1308 }
1309 
1310 static __inline int
1311 magazine_free(struct magazine *mp, void *p)
1312 {
1313 	if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1314 		mp->objects[mp->rounds++] = p;
1315 		return 0;
1316 	}
1317 
1318 	return -1;
1319 }
1320 
1321 static void *
1322 mtmagazine_alloc(int zi)
1323 {
1324 	thr_mags *tp;
1325 	struct magazine *mp, *emptymag;
1326 	magazine_depot *d;
1327 	void *obj = NULL;
1328 
1329 	tp = &thread_mags;
1330 
1331 	for (;;) {
1332 		/* If the loaded magazine has rounds, allocate and return */
1333 		if (((mp = tp->mags[zi].loaded) != NULL) &&
1334 		    MAGAZINE_NOTEMPTY(mp)) {
1335 			obj = magazine_alloc(mp, NULL);
1336 			break;
1337 		}
1338 
1339 		/* If the prev magazine is full, swap with loaded and retry */
1340 		if (((mp = tp->mags[zi].prev) != NULL) &&
1341 		    MAGAZINE_FULL(mp)) {
1342 			swap_mags(&tp->mags[zi]);
1343 			continue;
1344 		}
1345 
1346 		/* Lock the depot and check if it has any full magazines; if so
1347 		 * we return the prev to the emptymag list, move loaded to prev
1348 		 * load a full magazine, and retry */
1349 		d = &depots[zi];
1350 		depot_lock(d);
1351 
1352 		if (!SLIST_EMPTY(&d->full)) {
1353 			emptymag = tp->mags[zi].prev;
1354 			tp->mags[zi].prev = tp->mags[zi].loaded;
1355 			tp->mags[zi].loaded = SLIST_FIRST(&d->full);
1356 			SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1357 
1358 			/* Return emptymag to the depot */
1359 			if (emptymag != NULL)
1360 				SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1361 
1362 			depot_unlock(d);
1363 			continue;
1364 		} else {
1365 			depot_unlock(d);
1366 		}
1367 		break;
1368 	}
1369 
1370 	return (obj);
1371 }
1372 
1373 static int
1374 mtmagazine_free(int zi, void *ptr)
1375 {
1376 	thr_mags *tp;
1377 	struct magazine *mp, *loadedmag, *newmag;
1378 	magazine_depot *d;
1379 	int rc = -1;
1380 
1381 	tp = &thread_mags;
1382 
1383 	if (tp->init == 0) {
1384 		pthread_setspecific(thread_mags_key, tp);
1385 		tp->init = 1;
1386 	}
1387 
1388 	for (;;) {
1389 		/* If the loaded magazine has space, free directly to it */
1390 		if (((mp = tp->mags[zi].loaded) != NULL) &&
1391 		    MAGAZINE_NOTFULL(mp)) {
1392 			rc = magazine_free(mp, ptr);
1393 			break;
1394 		}
1395 
1396 		/* If the prev magazine is empty, swap with loaded and retry */
1397 		if (((mp = tp->mags[zi].prev) != NULL) &&
1398 		    MAGAZINE_EMPTY(mp)) {
1399 			swap_mags(&tp->mags[zi]);
1400 			continue;
1401 		}
1402 
1403 		/* Lock the depot; if there are any empty magazines, move the
1404 		 * prev to the depot's fullmag list, move loaded to previous,
1405 		 * and move a new emptymag to loaded, and retry. */
1406 
1407 		d = &depots[zi];
1408 		depot_lock(d);
1409 
1410 		if (!SLIST_EMPTY(&d->empty)) {
1411 			loadedmag = tp->mags[zi].prev;
1412 			tp->mags[zi].prev = tp->mags[zi].loaded;
1413 			tp->mags[zi].loaded = SLIST_FIRST(&d->empty);
1414 			SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1415 
1416 			/* Return loadedmag to the depot */
1417 			if (loadedmag != NULL)
1418 				SLIST_INSERT_HEAD(&d->full, loadedmag,
1419 						  nextmagazine);
1420 			depot_unlock(d);
1421 			continue;
1422 		}
1423 
1424 		/* Allocate an empty magazine, add it to the depot, retry */
1425 		newmag = _slaballoc(sizeof(struct magazine), SAFLAG_ZERO);
1426 		if (newmag != NULL) {
1427 			newmag->capacity = M_MAX_ROUNDS;
1428 			newmag->rounds = 0;
1429 
1430 			SLIST_INSERT_HEAD(&d->empty, newmag, nextmagazine);
1431 			depot_unlock(d);
1432 			continue;
1433 		} else {
1434 			depot_unlock(d);
1435 			rc = -1;
1436 		}
1437 		break;
1438 	}
1439 
1440 	return rc;
1441 }
1442 
1443 static void
1444 mtmagazine_init(void) {
1445 	int i = 0;
1446 	i = pthread_key_create(&thread_mags_key,&mtmagazine_destructor);
1447 	if (i != 0)
1448 		abort();
1449 }
1450 
1451 static void
1452 mtmagazine_drain(struct magazine *mp)
1453 {
1454 	void *obj;
1455 
1456 	while (MAGAZINE_NOTEMPTY(mp)) {
1457 		obj = magazine_alloc(mp, NULL);
1458 		_slabfree(obj, MAG_NORECURSE, NULL);
1459 	}
1460 }
1461 
1462 /*
1463  * mtmagazine_destructor()
1464  *
1465  * When a thread exits, we reclaim all its resources; all its magazines are
1466  * drained and the structures are freed.
1467  */
1468 static void
1469 mtmagazine_destructor(void *thrp)
1470 {
1471 	thr_mags *tp = thrp;
1472 	struct magazine *mp;
1473 	int i;
1474 
1475 	for (i = 0; i < NZONES; i++) {
1476 		mp = tp->mags[i].loaded;
1477 		if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1478 			mtmagazine_drain(mp);
1479 		_slabfree(mp, MAG_NORECURSE, NULL);
1480 
1481 		mp = tp->mags[i].prev;
1482 		if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1483 			mtmagazine_drain(mp);
1484 		_slabfree(mp, MAG_NORECURSE, NULL);
1485 	}
1486 }
1487 
1488 /*
1489  * zone_alloc()
1490  *
1491  * Attempt to allocate a zone from the zone magazine; the zone magazine has
1492  * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1493  */
1494 static slzone_t
1495 zone_alloc(int flags)
1496 {
1497 	slglobaldata_t slgd = &SLGlobalData;
1498 	int burst = 1;
1499 	int i, j;
1500 	slzone_t z;
1501 
1502 	zone_magazine_lock();
1503 	slgd_unlock(slgd);
1504 
1505 	z = magazine_alloc(&zone_magazine, &burst);
1506 	if (z == NULL) {
1507 		if (burst == 1)
1508 			zone_magazine_unlock();
1509 
1510 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1511 		if (z == NULL) {
1512 			zone_magazine_unlock();
1513 			slgd_lock(slgd);
1514 			return (NULL);
1515 		}
1516 
1517 		for (i = 1; i < burst; i++) {
1518 			j = magazine_free(&zone_magazine,
1519 					  (char *) z + (ZoneSize * i));
1520 			MASSERT(j == 0);
1521 		}
1522 
1523 		if (burst != 1)
1524 			zone_magazine_unlock();
1525 	} else {
1526 		z->z_Flags |= SLZF_UNOTZEROD;
1527 		zone_magazine_unlock();
1528 	}
1529 
1530 	slgd_lock(slgd);
1531 	return z;
1532 }
1533 
1534 /*
1535  * zone_free()
1536  *
1537  * Releases the slgd lock prior to unmap, if unmapping is necessary
1538  */
1539 static void
1540 zone_free(void *z)
1541 {
1542 	slglobaldata_t slgd = &SLGlobalData;
1543 	void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1544 	int i, j;
1545 
1546 	zone_magazine_lock();
1547 	slgd_unlock(slgd);
1548 
1549 	bzero(z, sizeof(struct slzone));
1550 
1551 	if (opt_madvise)
1552 		madvise(z, ZoneSize, MADV_FREE);
1553 
1554 	i = magazine_free(&zone_magazine, z);
1555 
1556 	/* If we failed to free, collect excess magazines; release the zone
1557 	 * magazine lock, and then free to the system via _vmem_free. Re-enable
1558 	 * BURST mode for the magazine. */
1559 	if (i == -1) {
1560 		j = zone_magazine.rounds - zone_magazine.low_factor;
1561 		for (i = 0; i < j; i++) {
1562 			excess[i] = magazine_alloc(&zone_magazine, NULL);
1563 			MASSERT(excess[i] !=  NULL);
1564 		}
1565 
1566 		zone_magazine_unlock();
1567 
1568 		for (i = 0; i < j; i++)
1569 			_vmem_free(excess[i], ZoneSize);
1570 
1571 		_vmem_free(z, ZoneSize);
1572 	} else {
1573 		zone_magazine_unlock();
1574 	}
1575 }
1576 
1577 /*
1578  * _vmem_alloc()
1579  *
1580  *	Directly map memory in PAGE_SIZE'd chunks with the specified
1581  *	alignment.
1582  *
1583  *	Alignment must be a multiple of PAGE_SIZE.
1584  *
1585  *	Size must be >= alignment.
1586  */
1587 static void *
1588 _vmem_alloc(size_t size, size_t align, int flags)
1589 {
1590 	char *addr;
1591 	char *save;
1592 	size_t excess;
1593 
1594 	/*
1595 	 * Map anonymous private memory.
1596 	 */
1597 	addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1598 		    MAP_PRIVATE|MAP_ANON, -1, 0);
1599 	if (addr == MAP_FAILED)
1600 		return(NULL);
1601 
1602 	/*
1603 	 * Check alignment.  The misaligned offset is also the excess
1604 	 * amount.  If misaligned unmap the excess so we have a chance of
1605 	 * mapping at the next alignment point and recursively try again.
1606 	 *
1607 	 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB	block alignment
1608 	 *   aaaaaaaaa aaaaaaaaaaa aa		mis-aligned allocation
1609 	 *   xxxxxxxxx				final excess calculation
1610 	 *   ^ returned address
1611 	 */
1612 	excess = (uintptr_t)addr & (align - 1);
1613 
1614 	if (excess) {
1615 		excess = align - excess;
1616 		save = addr;
1617 
1618 		munmap(save + excess, size - excess);
1619 		addr = _vmem_alloc(size, align, flags);
1620 		munmap(save, excess);
1621 	}
1622 	return((void *)addr);
1623 }
1624 
1625 /*
1626  * _vmem_free()
1627  *
1628  *	Free a chunk of memory allocated with _vmem_alloc()
1629  */
1630 static void
1631 _vmem_free(void *ptr, size_t size)
1632 {
1633 	munmap(ptr, size);
1634 }
1635 
1636 /*
1637  * Panic on fatal conditions
1638  */
1639 static void
1640 _mpanic(const char *ctl, ...)
1641 {
1642 	va_list va;
1643 
1644 	if (malloc_panic == 0) {
1645 		malloc_panic = 1;
1646 		va_start(va, ctl);
1647 		vfprintf(stderr, ctl, va);
1648 		fprintf(stderr, "\n");
1649 		fflush(stderr);
1650 		va_end(va);
1651 	}
1652 	abort();
1653 }
1654