xref: /dflybsd-src/lib/libc/stdlib/nmalloc.c (revision e58e48b4e24346fdda3691cc17bcd42522001399)
1 /*
2  * NMALLOC.C	- New Malloc (ported from kernel slab allocator)
3  *
4  * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and by
8  * Venkatesh Srinivas <me@endeavour.zapto.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
38  */
39 /*
40  * This module implements a slab allocator drop-in replacement for the
41  * libc malloc().
42  *
43  * A slab allocator reserves a ZONE for each chunk size, then lays the
44  * chunks out in an array within the zone.  Allocation and deallocation
45  * is nearly instantaneous, and overhead losses are limited to a fixed
46  * worst-case amount.
47  *
48  * The slab allocator does not have to pre-initialize the list of
49  * free chunks for each zone, and the underlying VM will not be
50  * touched at all beyond the zone header until an actual allocation
51  * needs it.
52  *
53  * Slab management and locking is done on a per-zone basis.
54  *
55  *	Alloc Size	Chunking        Number of zones
56  *	0-127		8		16
57  *	128-255		16		8
58  *	256-511		32		8
59  *	512-1023	64		8
60  *	1024-2047	128		8
61  *	2048-4095	256		8
62  *	4096-8191	512		8
63  *	8192-16383	1024		8
64  *	16384-32767	2048		8
65  *
66  *	Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67  *	is used to locate for free.  One and Two-page allocations use the
68  *	zone mechanic to avoid excessive mmap()/munmap() calls.
69  *
70  *			   API FEATURES AND SIDE EFFECTS
71  *
72  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73  *	Above that power-of-2 sized allocations are page-aligned.  Non
74  *	power-of-2 sized allocations are aligned the same as the chunk
75  *	size for their zone.
76  *    + malloc(0) returns a special non-NULL value
77  *    + ability to allocate arbitrarily large chunks of memory
78  *    + realloc will reuse the passed pointer if possible, within the
79  *	limitations of the zone chunking.
80  *
81  * Multithreaded enhancements for small allocations introduced August 2010.
82  * These are in the spirit of 'libumem'. See:
83  *	Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84  *	slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85  * 	USENIX Technical Conference. USENIX Association.
86  *
87  * TUNING
88  *
89  * The value of the environment variable MALLOC_OPTIONS is a character string
90  * containing various flags to tune nmalloc.
91  *
92  * 'U'   / ['u']	Generate / do not generate utrace entries for ktrace(1)
93  *			This will generate utrace events for all malloc,
94  *			realloc, and free calls. There are tools (mtrplay) to
95  *			replay and allocation pattern or to graph heap structure
96  *			(mtrgraph) which can interpret these logs.
97  * 'Z'   / ['z']	Zero out / do not zero all allocations.
98  *			Each new byte of memory allocated by malloc, realloc, or
99  *			reallocf will be initialized to 0. This is intended for
100  *			debugging and will affect performance negatively.
101  * 'H'	/  ['h']	Pass a hint to the kernel about pages unused by the
102  *			allocation functions.
103  */
104 
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
106 
107 #include "libc_private.h"
108 
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
113 #include <sys/uio.h>
114 #include <sys/ktrace.h>
115 #include <stdio.h>
116 #include <stdint.h>
117 #include <stdlib.h>
118 #include <stdarg.h>
119 #include <stddef.h>
120 #include <unistd.h>
121 #include <string.h>
122 #include <fcntl.h>
123 #include <errno.h>
124 #include <pthread.h>
125 
126 #include "spinlock.h"
127 #include "un-namespace.h"
128 
129 static char rcsid[] = "$Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 sv5679 Exp $";
130 
131 /*
132  * Linked list of large allocations
133  */
134 typedef struct bigalloc {
135 	struct bigalloc *next;	/* hash link */
136 	void	*base;		/* base pointer */
137 	u_long	bytes;		/* bytes allocated */
138 } *bigalloc_t;
139 
140 /*
141  * Note that any allocations which are exact multiples of PAGE_SIZE, or
142  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
143  */
144 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
145 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
146 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
147 #define ZALLOC_ZONE_SIZE	(64 * 1024)
148 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
149 #define ZALLOC_SLAB_SLIDE	20		/* L1-cache skip */
150 
151 #if ZALLOC_ZONE_LIMIT == 16384
152 #define NZONES			72
153 #elif ZALLOC_ZONE_LIMIT == 32768
154 #define NZONES			80
155 #else
156 #error "I couldn't figure out NZONES"
157 #endif
158 
159 /*
160  * Chunk structure for free elements
161  */
162 typedef struct slchunk {
163 	struct slchunk *c_Next;
164 } *slchunk_t;
165 
166 /*
167  * The IN-BAND zone header is placed at the beginning of each zone.
168  */
169 struct slglobaldata;
170 
171 typedef struct slzone {
172 	int32_t		z_Magic;	/* magic number for sanity check */
173 	int		z_NFree;	/* total free chunks / ualloc space */
174 	struct slzone *z_Next;		/* ZoneAry[] link if z_NFree non-zero */
175 	int		z_NMax;		/* maximum free chunks */
176 	char		*z_BasePtr;	/* pointer to start of chunk array */
177 	int		z_UIndex;	/* current initial allocation index */
178 	int		z_UEndIndex;	/* last (first) allocation index */
179 	int		z_ChunkSize;	/* chunk size for validation */
180 	int		z_FirstFreePg;	/* chunk list on a page-by-page basis */
181 	int		z_ZoneIndex;
182 	int		z_Flags;
183 	struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
184 #if defined(INVARIANTS)
185 	__uint32_t	z_Bitmap[];	/* bitmap of free chunks / sanity */
186 #endif
187 } *slzone_t;
188 
189 typedef struct slglobaldata {
190 	spinlock_t	Spinlock;
191 	slzone_t	ZoneAry[NZONES];/* linked list of zones NFree > 0 */
192 	int		JunkIndex;
193 } *slglobaldata_t;
194 
195 #define SLZF_UNOTZEROD		0x0001
196 
197 #define FASTSLABREALLOC		0x02
198 
199 /*
200  * Misc constants.  Note that allocations that are exact multiples of
201  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
202  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
203  */
204 #define MIN_CHUNK_SIZE		8		/* in bytes */
205 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
206 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
207 
208 /*
209  * The WEIRD_ADDR is used as known text to copy into free objects to
210  * try to create deterministic failure cases if the data is accessed after
211  * free.
212  *
213  * WARNING: A limited number of spinlocks are available, BIGXSIZE should
214  *	    not be larger then 64.
215  */
216 #define WEIRD_ADDR      0xdeadc0de
217 #define MAX_COPY        sizeof(weirdary)
218 #define ZERO_LENGTH_PTR	((void *)&malloc_dummy_pointer)
219 
220 #define BIGHSHIFT	10			/* bigalloc hash table */
221 #define BIGHSIZE	(1 << BIGHSHIFT)
222 #define BIGHMASK	(BIGHSIZE - 1)
223 #define BIGXSIZE	(BIGHSIZE / 16)		/* bigalloc lock table */
224 #define BIGXMASK	(BIGXSIZE - 1)
225 
226 #define SAFLAG_ZERO	0x0001
227 #define SAFLAG_PASSIVE	0x0002
228 
229 /*
230  * Thread control
231  */
232 
233 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
234 
235 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
236 				_mpanic("assertion: %s in %s",	\
237 				#exp, __func__);		\
238 			    } while (0)
239 
240 /*
241  * Magazines
242  */
243 
244 #define M_MAX_ROUNDS	64
245 #define M_ZONE_ROUNDS	64
246 #define M_LOW_ROUNDS	32
247 #define M_INIT_ROUNDS	8
248 #define M_BURST_FACTOR  8
249 #define M_BURST_NSCALE	2
250 
251 #define M_BURST		0x0001
252 #define M_BURST_EARLY	0x0002
253 
254 struct magazine {
255 	SLIST_ENTRY(magazine) nextmagazine;
256 
257 	int		flags;
258 	int 		capacity;	/* Max rounds in this magazine */
259 	int 		rounds;		/* Current number of free rounds */
260 	int		burst_factor;	/* Number of blocks to prefill with */
261 	int 		low_factor;	/* Free till low_factor from full mag */
262 	void		*objects[M_MAX_ROUNDS];
263 };
264 
265 SLIST_HEAD(magazinelist, magazine);
266 
267 static spinlock_t zone_mag_lock;
268 static struct magazine zone_magazine = {
269 	.flags = M_BURST | M_BURST_EARLY,
270 	.capacity = M_ZONE_ROUNDS,
271 	.rounds = 0,
272 	.burst_factor = M_BURST_FACTOR,
273 	.low_factor = M_LOW_ROUNDS
274 };
275 
276 #define MAGAZINE_FULL(mp)	(mp->rounds == mp->capacity)
277 #define MAGAZINE_NOTFULL(mp)	(mp->rounds < mp->capacity)
278 #define MAGAZINE_EMPTY(mp)	(mp->rounds == 0)
279 #define MAGAZINE_NOTEMPTY(mp)	(mp->rounds != 0)
280 
281 /* Each thread will have a pair of magazines per size-class (NZONES)
282  * The loaded magazine will support immediate allocations, the previous
283  * magazine will either be full or empty and can be swapped at need */
284 typedef struct magazine_pair {
285 	struct magazine	*loaded;
286 	struct magazine	*prev;
287 } magazine_pair;
288 
289 /* A depot is a collection of magazines for a single zone. */
290 typedef struct magazine_depot {
291 	struct magazinelist full;
292 	struct magazinelist empty;
293 	pthread_spinlock_t lock;
294 } magazine_depot;
295 
296 typedef struct thr_mags {
297 	magazine_pair	mags[NZONES];
298 	struct magazine	*newmag;
299 	int		init;
300 } thr_mags;
301 
302 /* With this attribute set, do not require a function call for accessing
303  * this variable when the code is compiled -fPIC */
304 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
305 
306 static int mtmagazine_free_live;
307 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
308 static pthread_key_t thread_mags_key;
309 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
310 static magazine_depot depots[NZONES];
311 
312 /*
313  * Fixed globals (not per-cpu)
314  */
315 static const int ZoneSize = ZALLOC_ZONE_SIZE;
316 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
317 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
318 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
319 
320 static int opt_madvise = 0;
321 static int opt_utrace = 0;
322 static int malloc_started = 0;
323 static int g_malloc_flags = 0;
324 static spinlock_t malloc_init_lock;
325 static struct slglobaldata	SLGlobalData;
326 static bigalloc_t bigalloc_array[BIGHSIZE];
327 static spinlock_t bigspin_array[BIGXSIZE];
328 static int malloc_panic;
329 static int malloc_dummy_pointer;
330 
331 static const int32_t weirdary[16] = {
332 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
333 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
334 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
335 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
336 };
337 
338 static void *_slaballoc(size_t size, int flags);
339 static void *_slabrealloc(void *ptr, size_t size);
340 static void _slabfree(void *ptr, int, bigalloc_t *);
341 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
342 static void _vmem_free(void *ptr, size_t bytes);
343 static void *magazine_alloc(struct magazine *, int *);
344 static int magazine_free(struct magazine *, void *);
345 static void *mtmagazine_alloc(int zi);
346 static int mtmagazine_free(int zi, void *);
347 static void mtmagazine_init(void);
348 static void mtmagazine_destructor(void *);
349 static slzone_t zone_alloc(int flags);
350 static void zone_free(void *z);
351 static void _mpanic(const char *ctl, ...);
352 static void malloc_init(void);
353 #if defined(INVARIANTS)
354 static void chunk_mark_allocated(slzone_t z, void *chunk);
355 static void chunk_mark_free(slzone_t z, void *chunk);
356 #endif
357 
358 struct nmalloc_utrace {
359 	void *p;
360 	size_t s;
361 	void *r;
362 };
363 
364 #define UTRACE(a, b, c)						\
365 	if (opt_utrace) {					\
366 		struct nmalloc_utrace ut = {			\
367 			.p = (a),				\
368 			.s = (b),				\
369 			.r = (c)				\
370 		};						\
371 		utrace(&ut, sizeof(ut));			\
372 	}
373 
374 #ifdef INVARIANTS
375 /*
376  * If enabled any memory allocated without M_ZERO is initialized to -1.
377  */
378 static int  use_malloc_pattern;
379 #endif
380 
381 static void
382 malloc_init(void)
383 {
384 	const char *p = NULL;
385 
386 	if (__isthreaded) {
387 		_SPINLOCK(&malloc_init_lock);
388 		if (malloc_started) {
389 			_SPINUNLOCK(&malloc_init_lock);
390 			return;
391 		}
392 	}
393 
394 	if (issetugid() == 0)
395 		p = getenv("MALLOC_OPTIONS");
396 
397 	for (; p != NULL && *p != '\0'; p++) {
398 		switch(*p) {
399 		case 'u':	opt_utrace = 0; break;
400 		case 'U':	opt_utrace = 1; break;
401 		case 'h':	opt_madvise = 0; break;
402 		case 'H':	opt_madvise = 1; break;
403 		case 'z':	g_malloc_flags = 0; break;
404 		case 'Z': 	g_malloc_flags = SAFLAG_ZERO; break;
405 		default:
406 			break;
407 		}
408 	}
409 
410 	malloc_started = 1;
411 
412 	if (__isthreaded)
413 		_SPINUNLOCK(&malloc_init_lock);
414 
415 	UTRACE((void *) -1, 0, NULL);
416 }
417 
418 /*
419  * We have to install a handler for nmalloc thread teardowns when
420  * the thread is created.  We cannot delay this because destructors in
421  * sophisticated userland programs can call malloc() for the first time
422  * during their thread exit.
423  *
424  * This routine is called directly from pthreads.
425  */
426 void
427 _nmalloc_thr_init(void)
428 {
429 	thr_mags *tp;
430 
431 	/*
432 	 * Disallow mtmagazine operations until the mtmagazine is
433 	 * initialized.
434 	 */
435 	tp = &thread_mags;
436 	tp->init = -1;
437 
438 	pthread_setspecific(thread_mags_key, tp);
439 	if (mtmagazine_free_live == 0) {
440 		mtmagazine_free_live = 1;
441 		pthread_once(&thread_mags_once, mtmagazine_init);
442 	}
443 	tp->init = 1;
444 }
445 
446 /*
447  * Thread locks.
448  */
449 static __inline void
450 slgd_lock(slglobaldata_t slgd)
451 {
452 	if (__isthreaded)
453 		_SPINLOCK(&slgd->Spinlock);
454 }
455 
456 static __inline void
457 slgd_unlock(slglobaldata_t slgd)
458 {
459 	if (__isthreaded)
460 		_SPINUNLOCK(&slgd->Spinlock);
461 }
462 
463 static __inline void
464 depot_lock(magazine_depot *dp)
465 {
466 	if (__isthreaded)
467 		pthread_spin_lock(&dp->lock);
468 }
469 
470 static __inline void
471 depot_unlock(magazine_depot *dp)
472 {
473 	if (__isthreaded)
474 		pthread_spin_unlock(&dp->lock);
475 }
476 
477 static __inline void
478 zone_magazine_lock(void)
479 {
480 	if (__isthreaded)
481 		_SPINLOCK(&zone_mag_lock);
482 }
483 
484 static __inline void
485 zone_magazine_unlock(void)
486 {
487 	if (__isthreaded)
488 		_SPINUNLOCK(&zone_mag_lock);
489 }
490 
491 static __inline void
492 swap_mags(magazine_pair *mp)
493 {
494 	struct magazine *tmp;
495 	tmp = mp->loaded;
496 	mp->loaded = mp->prev;
497 	mp->prev = tmp;
498 }
499 
500 /*
501  * bigalloc hashing and locking support.
502  *
503  * Return an unmasked hash code for the passed pointer.
504  */
505 static __inline int
506 _bigalloc_hash(void *ptr)
507 {
508 	int hv;
509 
510 	hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
511 	      ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
512 
513 	return(hv);
514 }
515 
516 /*
517  * Lock the hash chain and return a pointer to its base for the specified
518  * address.
519  */
520 static __inline bigalloc_t *
521 bigalloc_lock(void *ptr)
522 {
523 	int hv = _bigalloc_hash(ptr);
524 	bigalloc_t *bigp;
525 
526 	bigp = &bigalloc_array[hv & BIGHMASK];
527 	if (__isthreaded)
528 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
529 	return(bigp);
530 }
531 
532 /*
533  * Lock the hash chain and return a pointer to its base for the specified
534  * address.
535  *
536  * BUT, if the hash chain is empty, just return NULL and do not bother
537  * to lock anything.
538  */
539 static __inline bigalloc_t *
540 bigalloc_check_and_lock(void *ptr)
541 {
542 	int hv = _bigalloc_hash(ptr);
543 	bigalloc_t *bigp;
544 
545 	bigp = &bigalloc_array[hv & BIGHMASK];
546 	if (*bigp == NULL)
547 		return(NULL);
548 	if (__isthreaded) {
549 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
550 	}
551 	return(bigp);
552 }
553 
554 static __inline void
555 bigalloc_unlock(void *ptr)
556 {
557 	int hv;
558 
559 	if (__isthreaded) {
560 		hv = _bigalloc_hash(ptr);
561 		_SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
562 	}
563 }
564 
565 /*
566  * Calculate the zone index for the allocation request size and set the
567  * allocation request size to that particular zone's chunk size.
568  */
569 static __inline int
570 zoneindex(size_t *bytes, size_t *chunking)
571 {
572 	size_t n = (unsigned int)*bytes;	/* unsigned for shift opt */
573 	if (n < 128) {
574 		*bytes = n = (n + 7) & ~7;
575 		*chunking = 8;
576 		return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
577 	}
578 	if (n < 256) {
579 		*bytes = n = (n + 15) & ~15;
580 		*chunking = 16;
581 		return(n / 16 + 7);
582 	}
583 	if (n < 8192) {
584 		if (n < 512) {
585 			*bytes = n = (n + 31) & ~31;
586 			*chunking = 32;
587 			return(n / 32 + 15);
588 		}
589 		if (n < 1024) {
590 			*bytes = n = (n + 63) & ~63;
591 			*chunking = 64;
592 			return(n / 64 + 23);
593 		}
594 		if (n < 2048) {
595 			*bytes = n = (n + 127) & ~127;
596 			*chunking = 128;
597 			return(n / 128 + 31);
598 		}
599 		if (n < 4096) {
600 			*bytes = n = (n + 255) & ~255;
601 			*chunking = 256;
602 			return(n / 256 + 39);
603 		}
604 		*bytes = n = (n + 511) & ~511;
605 		*chunking = 512;
606 		return(n / 512 + 47);
607 	}
608 #if ZALLOC_ZONE_LIMIT > 8192
609 	if (n < 16384) {
610 		*bytes = n = (n + 1023) & ~1023;
611 		*chunking = 1024;
612 		return(n / 1024 + 55);
613 	}
614 #endif
615 #if ZALLOC_ZONE_LIMIT > 16384
616 	if (n < 32768) {
617 		*bytes = n = (n + 2047) & ~2047;
618 		*chunking = 2048;
619 		return(n / 2048 + 63);
620 	}
621 #endif
622 	_mpanic("Unexpected byte count %d", n);
623 	return(0);
624 }
625 
626 /*
627  * malloc() - call internal slab allocator
628  */
629 void *
630 malloc(size_t size)
631 {
632 	void *ptr;
633 
634 	ptr = _slaballoc(size, 0);
635 	if (ptr == NULL)
636 		errno = ENOMEM;
637 	else
638 		UTRACE(0, size, ptr);
639 	return(ptr);
640 }
641 
642 /*
643  * calloc() - call internal slab allocator
644  */
645 void *
646 calloc(size_t number, size_t size)
647 {
648 	void *ptr;
649 
650 	ptr = _slaballoc(number * size, SAFLAG_ZERO);
651 	if (ptr == NULL)
652 		errno = ENOMEM;
653 	else
654 		UTRACE(0, number * size, ptr);
655 	return(ptr);
656 }
657 
658 /*
659  * realloc() (SLAB ALLOCATOR)
660  *
661  * We do not attempt to optimize this routine beyond reusing the same
662  * pointer if the new size fits within the chunking of the old pointer's
663  * zone.
664  */
665 void *
666 realloc(void *ptr, size_t size)
667 {
668 	void *ret;
669 	ret = _slabrealloc(ptr, size);
670 	if (ret == NULL)
671 		errno = ENOMEM;
672 	else
673 		UTRACE(ptr, size, ret);
674 	return(ret);
675 }
676 
677 /*
678  * posix_memalign()
679  *
680  * Allocate (size) bytes with a alignment of (alignment), where (alignment)
681  * is a power of 2 >= sizeof(void *).
682  *
683  * The slab allocator will allocate on power-of-2 boundaries up to
684  * at least PAGE_SIZE.  We use the zoneindex mechanic to find a
685  * zone matching the requirements, and _vmem_alloc() otherwise.
686  */
687 int
688 posix_memalign(void **memptr, size_t alignment, size_t size)
689 {
690 	bigalloc_t *bigp;
691 	bigalloc_t big;
692 	size_t chunking;
693 	int zi;
694 
695 	/*
696 	 * OpenGroup spec issue 6 checks
697 	 */
698 	if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
699 		*memptr = NULL;
700 		return(EINVAL);
701 	}
702 	if (alignment < sizeof(void *)) {
703 		*memptr = NULL;
704 		return(EINVAL);
705 	}
706 
707 	/*
708 	 * Our zone mechanism guarantees same-sized alignment for any
709 	 * power-of-2 allocation.  If size is a power-of-2 and reasonable
710 	 * we can just call _slaballoc() and be done.  We round size up
711 	 * to the nearest alignment boundary to improve our odds of
712 	 * it becoming a power-of-2 if it wasn't before.
713 	 */
714 	if (size <= alignment)
715 		size = alignment;
716 	else
717 		size = (size + alignment - 1) & ~(size_t)(alignment - 1);
718 	if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
719 		*memptr = _slaballoc(size, 0);
720 		return(*memptr ? 0 : ENOMEM);
721 	}
722 
723 	/*
724 	 * Otherwise locate a zone with a chunking that matches
725 	 * the requested alignment, within reason.   Consider two cases:
726 	 *
727 	 * (1) A 1K allocation on a 32-byte alignment.  The first zoneindex
728 	 *     we find will be the best fit because the chunking will be
729 	 *     greater or equal to the alignment.
730 	 *
731 	 * (2) A 513 allocation on a 256-byte alignment.  In this case
732 	 *     the first zoneindex we find will be for 576 byte allocations
733 	 *     with a chunking of 64, which is not sufficient.  To fix this
734 	 *     we simply find the nearest power-of-2 >= size and use the
735 	 *     same side-effect of _slaballoc() which guarantees
736 	 *     same-alignment on a power-of-2 allocation.
737 	 */
738 	if (size < PAGE_SIZE) {
739 		zi = zoneindex(&size, &chunking);
740 		if (chunking >= alignment) {
741 			*memptr = _slaballoc(size, 0);
742 			return(*memptr ? 0 : ENOMEM);
743 		}
744 		if (size >= 1024)
745 			alignment = 1024;
746 		if (size >= 16384)
747 			alignment = 16384;
748 		while (alignment < size)
749 			alignment <<= 1;
750 		*memptr = _slaballoc(alignment, 0);
751 		return(*memptr ? 0 : ENOMEM);
752 	}
753 
754 	/*
755 	 * If the slab allocator cannot handle it use vmem_alloc().
756 	 *
757 	 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
758 	 */
759 	if (alignment < PAGE_SIZE)
760 		alignment = PAGE_SIZE;
761 	if (size < alignment)
762 		size = alignment;
763 	size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
764 	*memptr = _vmem_alloc(size, alignment, 0);
765 	if (*memptr == NULL)
766 		return(ENOMEM);
767 
768 	big = _slaballoc(sizeof(struct bigalloc), 0);
769 	if (big == NULL) {
770 		_vmem_free(*memptr, size);
771 		*memptr = NULL;
772 		return(ENOMEM);
773 	}
774 	bigp = bigalloc_lock(*memptr);
775 	big->base = *memptr;
776 	big->bytes = size;
777 	big->next = *bigp;
778 	*bigp = big;
779 	bigalloc_unlock(*memptr);
780 
781 	return(0);
782 }
783 
784 /*
785  * free() (SLAB ALLOCATOR) - do the obvious
786  */
787 void
788 free(void *ptr)
789 {
790 	UTRACE(ptr, 0, 0);
791 	_slabfree(ptr, 0, NULL);
792 }
793 
794 /*
795  * _slaballoc()	(SLAB ALLOCATOR)
796  *
797  *	Allocate memory via the slab allocator.  If the request is too large,
798  *	or if it page-aligned beyond a certain size, we fall back to the
799  *	KMEM subsystem
800  */
801 static void *
802 _slaballoc(size_t size, int flags)
803 {
804 	slzone_t z;
805 	slchunk_t chunk;
806 	slglobaldata_t slgd;
807 	size_t chunking;
808 	int zi;
809 #ifdef INVARIANTS
810 	int i;
811 #endif
812 	int off;
813 	void *obj;
814 
815 	if (!malloc_started)
816 		malloc_init();
817 
818 	/*
819 	 * Handle the degenerate size == 0 case.  Yes, this does happen.
820 	 * Return a special pointer.  This is to maintain compatibility with
821 	 * the original malloc implementation.  Certain devices, such as the
822 	 * adaptec driver, not only allocate 0 bytes, they check for NULL and
823 	 * also realloc() later on.  Joy.
824 	 */
825 	if (size == 0)
826 		return(ZERO_LENGTH_PTR);
827 
828 	/* Capture global flags */
829 	flags |= g_malloc_flags;
830 
831 	/*
832 	 * Handle large allocations directly.  There should not be very many
833 	 * of these so performance is not a big issue.
834 	 *
835 	 * The backend allocator is pretty nasty on a SMP system.   Use the
836 	 * slab allocator for one and two page-sized chunks even though we
837 	 * lose some efficiency.
838 	 */
839 	if (size >= ZoneLimit ||
840 	    ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
841 		bigalloc_t big;
842 		bigalloc_t *bigp;
843 
844 		size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
845 		chunk = _vmem_alloc(size, PAGE_SIZE, flags);
846 		if (chunk == NULL)
847 			return(NULL);
848 
849 		big = _slaballoc(sizeof(struct bigalloc), 0);
850 		if (big == NULL) {
851 			_vmem_free(chunk, size);
852 			return(NULL);
853 		}
854 		bigp = bigalloc_lock(chunk);
855 		big->base = chunk;
856 		big->bytes = size;
857 		big->next = *bigp;
858 		*bigp = big;
859 		bigalloc_unlock(chunk);
860 
861 		return(chunk);
862 	}
863 
864 	/* Compute allocation zone; zoneindex will panic on excessive sizes */
865 	zi = zoneindex(&size, &chunking);
866 	MASSERT(zi < NZONES);
867 
868 	obj = mtmagazine_alloc(zi);
869 	if (obj != NULL) {
870 		if (flags & SAFLAG_ZERO)
871 			bzero(obj, size);
872 		return (obj);
873 	}
874 
875 	slgd = &SLGlobalData;
876 	slgd_lock(slgd);
877 
878 	/*
879 	 * Attempt to allocate out of an existing zone.  If all zones are
880 	 * exhausted pull one off the free list or allocate a new one.
881 	 */
882 	if ((z = slgd->ZoneAry[zi]) == NULL) {
883 		z = zone_alloc(flags);
884 		if (z == NULL)
885 			goto fail;
886 
887 		/*
888 		 * How big is the base structure?
889 		 */
890 #if defined(INVARIANTS)
891 		/*
892 		 * Make room for z_Bitmap.  An exact calculation is
893 		 * somewhat more complicated so don't make an exact
894 		 * calculation.
895 		 */
896 		off = offsetof(struct slzone,
897 				z_Bitmap[(ZoneSize / size + 31) / 32]);
898 		bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
899 #else
900 		off = sizeof(struct slzone);
901 #endif
902 
903 		/*
904 		 * Align the storage in the zone based on the chunking.
905 		 *
906 		 * Guarantee power-of-2 alignment for power-of-2-sized
907 		 * chunks.  Otherwise align based on the chunking size
908 		 * (typically 8 or 16 bytes for small allocations).
909 		 *
910 		 * NOTE: Allocations >= ZoneLimit are governed by the
911 		 * bigalloc code and typically only guarantee page-alignment.
912 		 *
913 		 * Set initial conditions for UIndex near the zone header
914 		 * to reduce unecessary page faults, vs semi-randomization
915 		 * to improve L1 cache saturation.
916 		 */
917 		if ((size | (size - 1)) + 1 == (size << 1))
918 			off = (off + size - 1) & ~(size - 1);
919 		else
920 			off = (off + chunking - 1) & ~(chunking - 1);
921 		z->z_Magic = ZALLOC_SLAB_MAGIC;
922 		z->z_ZoneIndex = zi;
923 		z->z_NMax = (ZoneSize - off) / size;
924 		z->z_NFree = z->z_NMax;
925 		z->z_BasePtr = (char *)z + off;
926 		z->z_UIndex = z->z_UEndIndex = 0;
927 		z->z_ChunkSize = size;
928 		z->z_FirstFreePg = ZonePageCount;
929 		z->z_Next = slgd->ZoneAry[zi];
930 		slgd->ZoneAry[zi] = z;
931 		if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
932 			flags &= ~SAFLAG_ZERO;	/* already zero'd */
933 			flags |= SAFLAG_PASSIVE;
934 		}
935 
936 		/*
937 		 * Slide the base index for initial allocations out of the
938 		 * next zone we create so we do not over-weight the lower
939 		 * part of the cpu memory caches.
940 		 */
941 		slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
942 					& (ZALLOC_MAX_ZONE_SIZE - 1);
943 	}
944 
945 	/*
946 	 * Ok, we have a zone from which at least one chunk is available.
947 	 *
948 	 * Remove us from the ZoneAry[] when we become empty
949 	 */
950 	MASSERT(z->z_NFree > 0);
951 
952 	if (--z->z_NFree == 0) {
953 		slgd->ZoneAry[zi] = z->z_Next;
954 		z->z_Next = NULL;
955 	}
956 
957 	/*
958 	 * Locate a chunk in a free page.  This attempts to localize
959 	 * reallocations into earlier pages without us having to sort
960 	 * the chunk list.  A chunk may still overlap a page boundary.
961 	 */
962 	while (z->z_FirstFreePg < ZonePageCount) {
963 		if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
964 #ifdef DIAGNOSTIC
965 			/*
966 			 * Diagnostic: c_Next is not total garbage.
967 			 */
968 			MASSERT(chunk->c_Next == NULL ||
969 			    ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
970 			    ((intptr_t)chunk & IN_SAME_PAGE_MASK));
971 #endif
972 #ifdef INVARIANTS
973 			chunk_mark_allocated(z, chunk);
974 #endif
975 			MASSERT((uintptr_t)chunk & ZoneMask);
976 			z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
977 			goto done;
978 		}
979 		++z->z_FirstFreePg;
980 	}
981 
982 	/*
983 	 * No chunks are available but NFree said we had some memory,
984 	 * so it must be available in the never-before-used-memory
985 	 * area governed by UIndex.  The consequences are very
986 	 * serious if our zone got corrupted so we use an explicit
987 	 * panic rather then a KASSERT.
988 	 */
989 	chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
990 
991 	if (++z->z_UIndex == z->z_NMax)
992 		z->z_UIndex = 0;
993 	if (z->z_UIndex == z->z_UEndIndex) {
994 		if (z->z_NFree != 0)
995 			_mpanic("slaballoc: corrupted zone");
996 	}
997 
998 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
999 		flags &= ~SAFLAG_ZERO;
1000 		flags |= SAFLAG_PASSIVE;
1001 	}
1002 #if defined(INVARIANTS)
1003 	chunk_mark_allocated(z, chunk);
1004 #endif
1005 
1006 done:
1007 	slgd_unlock(slgd);
1008 	if (flags & SAFLAG_ZERO) {
1009 		bzero(chunk, size);
1010 #ifdef INVARIANTS
1011 	} else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1012 		if (use_malloc_pattern) {
1013 			for (i = 0; i < size; i += sizeof(int)) {
1014 				*(int *)((char *)chunk + i) = -1;
1015 			}
1016 		}
1017 		/* avoid accidental double-free check */
1018 		chunk->c_Next = (void *)-1;
1019 #endif
1020 	}
1021 	return(chunk);
1022 fail:
1023 	slgd_unlock(slgd);
1024 	return(NULL);
1025 }
1026 
1027 /*
1028  * Reallocate memory within the chunk
1029  */
1030 static void *
1031 _slabrealloc(void *ptr, size_t size)
1032 {
1033 	bigalloc_t *bigp;
1034 	void *nptr;
1035 	slzone_t z;
1036 	size_t chunking;
1037 
1038 	if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1039 		return(_slaballoc(size, 0));
1040 
1041 	if (size == 0) {
1042 	    free(ptr);
1043 	    return(ZERO_LENGTH_PTR);
1044 	}
1045 
1046 	/*
1047 	 * Handle oversized allocations.
1048 	 */
1049 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1050 		bigalloc_t big;
1051 		size_t bigbytes;
1052 
1053 		while ((big = *bigp) != NULL) {
1054 			if (big->base == ptr) {
1055 				size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1056 				bigbytes = big->bytes;
1057 				if (bigbytes == size) {
1058 					bigalloc_unlock(ptr);
1059 					return(ptr);
1060 				}
1061 				*bigp = big->next;
1062 				bigalloc_unlock(ptr);
1063 				if ((nptr = _slaballoc(size, 0)) == NULL) {
1064 					/* Relink block */
1065 					bigp = bigalloc_lock(ptr);
1066 					big->next = *bigp;
1067 					*bigp = big;
1068 					bigalloc_unlock(ptr);
1069 					return(NULL);
1070 				}
1071 				if (size > bigbytes)
1072 					size = bigbytes;
1073 				bcopy(ptr, nptr, size);
1074 				_slabfree(ptr, FASTSLABREALLOC, &big);
1075 				return(nptr);
1076 			}
1077 			bigp = &big->next;
1078 		}
1079 		bigalloc_unlock(ptr);
1080 	}
1081 
1082 	/*
1083 	 * Get the original allocation's zone.  If the new request winds
1084 	 * up using the same chunk size we do not have to do anything.
1085 	 *
1086 	 * NOTE: We don't have to lock the globaldata here, the fields we
1087 	 * access here will not change at least as long as we have control
1088 	 * over the allocation.
1089 	 */
1090 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1091 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1092 
1093 	/*
1094 	 * Use zoneindex() to chunk-align the new size, as long as the
1095 	 * new size is not too large.
1096 	 */
1097 	if (size < ZoneLimit) {
1098 		zoneindex(&size, &chunking);
1099 		if (z->z_ChunkSize == size)
1100 			return(ptr);
1101 	}
1102 
1103 	/*
1104 	 * Allocate memory for the new request size and copy as appropriate.
1105 	 */
1106 	if ((nptr = _slaballoc(size, 0)) != NULL) {
1107 		if (size > z->z_ChunkSize)
1108 			size = z->z_ChunkSize;
1109 		bcopy(ptr, nptr, size);
1110 		_slabfree(ptr, 0, NULL);
1111 	}
1112 
1113 	return(nptr);
1114 }
1115 
1116 /*
1117  * free (SLAB ALLOCATOR)
1118  *
1119  * Free a memory block previously allocated by malloc.  Note that we do not
1120  * attempt to uplodate ks_loosememuse as MP races could prevent us from
1121  * checking memory limits in malloc.
1122  *
1123  * flags:
1124  *	FASTSLABREALLOC		Fast call from realloc, *rbigp already
1125  *				unlinked.
1126  *
1127  * MPSAFE
1128  */
1129 static void
1130 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1131 {
1132 	slzone_t z;
1133 	slchunk_t chunk;
1134 	bigalloc_t big;
1135 	bigalloc_t *bigp;
1136 	slglobaldata_t slgd;
1137 	size_t size;
1138 	int zi;
1139 	int pgno;
1140 
1141 	/* Fast realloc path for big allocations */
1142 	if (flags & FASTSLABREALLOC) {
1143 		big = *rbigp;
1144 		goto fastslabrealloc;
1145 	}
1146 
1147 	/*
1148 	 * Handle NULL frees and special 0-byte allocations
1149 	 */
1150 	if (ptr == NULL)
1151 		return;
1152 	if (ptr == ZERO_LENGTH_PTR)
1153 		return;
1154 
1155 	/*
1156 	 * Handle oversized allocations.
1157 	 */
1158 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1159 		while ((big = *bigp) != NULL) {
1160 			if (big->base == ptr) {
1161 				*bigp = big->next;
1162 				bigalloc_unlock(ptr);
1163 fastslabrealloc:
1164 				size = big->bytes;
1165 				_slabfree(big, 0, NULL);
1166 #ifdef INVARIANTS
1167 				MASSERT(sizeof(weirdary) <= size);
1168 				bcopy(weirdary, ptr, sizeof(weirdary));
1169 #endif
1170 				_vmem_free(ptr, size);
1171 				return;
1172 			}
1173 			bigp = &big->next;
1174 		}
1175 		bigalloc_unlock(ptr);
1176 	}
1177 
1178 	/*
1179 	 * Zone case.  Figure out the zone based on the fact that it is
1180 	 * ZoneSize aligned.
1181 	 */
1182 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1183 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1184 
1185 	size = z->z_ChunkSize;
1186 	zi = z->z_ZoneIndex;
1187 
1188 	if (g_malloc_flags & SAFLAG_ZERO)
1189 		bzero(ptr, size);
1190 
1191 	if (mtmagazine_free(zi, ptr) == 0)
1192 		return;
1193 
1194 	pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1195 	chunk = ptr;
1196 	slgd = &SLGlobalData;
1197 	slgd_lock(slgd);
1198 
1199 #ifdef INVARIANTS
1200 	/*
1201 	 * Attempt to detect a double-free.  To reduce overhead we only check
1202 	 * if there appears to be link pointer at the base of the data.
1203 	 */
1204 	if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1205 		slchunk_t scan;
1206 
1207 		for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1208 			if (scan == chunk)
1209 				_mpanic("Double free at %p", chunk);
1210 		}
1211 	}
1212 	chunk_mark_free(z, chunk);
1213 #endif
1214 
1215 	/*
1216 	 * Put weird data into the memory to detect modifications after
1217 	 * freeing, illegal pointer use after freeing (we should fault on
1218 	 * the odd address), and so forth.
1219 	 */
1220 #ifdef INVARIANTS
1221 	if (z->z_ChunkSize < sizeof(weirdary))
1222 		bcopy(weirdary, chunk, z->z_ChunkSize);
1223 	else
1224 		bcopy(weirdary, chunk, sizeof(weirdary));
1225 #endif
1226 
1227 	/*
1228 	 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1229 	 * z_FirstFreePg.
1230 	 */
1231 	chunk->c_Next = z->z_PageAry[pgno];
1232 	z->z_PageAry[pgno] = chunk;
1233 	if (z->z_FirstFreePg > pgno)
1234 		z->z_FirstFreePg = pgno;
1235 
1236 	/*
1237 	 * Bump the number of free chunks.  If it becomes non-zero the zone
1238 	 * must be added back onto the appropriate list.
1239 	 */
1240 	if (z->z_NFree++ == 0) {
1241 		z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1242 		slgd->ZoneAry[z->z_ZoneIndex] = z;
1243 	}
1244 
1245 	/*
1246 	 * If the zone becomes totally free then release it.
1247 	 */
1248 	if (z->z_NFree == z->z_NMax) {
1249 		slzone_t *pz;
1250 
1251 		pz = &slgd->ZoneAry[z->z_ZoneIndex];
1252 		while (z != *pz)
1253 			pz = &(*pz)->z_Next;
1254 		*pz = z->z_Next;
1255 		z->z_Magic = -1;
1256 		z->z_Next = NULL;
1257 		zone_free(z);
1258 		/* slgd lock released */
1259 		return;
1260 	}
1261 	slgd_unlock(slgd);
1262 }
1263 
1264 #if defined(INVARIANTS)
1265 /*
1266  * Helper routines for sanity checks
1267  */
1268 static
1269 void
1270 chunk_mark_allocated(slzone_t z, void *chunk)
1271 {
1272 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1273 	__uint32_t *bitptr;
1274 
1275 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1276 	bitptr = &z->z_Bitmap[bitdex >> 5];
1277 	bitdex &= 31;
1278 	MASSERT((*bitptr & (1 << bitdex)) == 0);
1279 	*bitptr |= 1 << bitdex;
1280 }
1281 
1282 static
1283 void
1284 chunk_mark_free(slzone_t z, void *chunk)
1285 {
1286 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1287 	__uint32_t *bitptr;
1288 
1289 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1290 	bitptr = &z->z_Bitmap[bitdex >> 5];
1291 	bitdex &= 31;
1292 	MASSERT((*bitptr & (1 << bitdex)) != 0);
1293 	*bitptr &= ~(1 << bitdex);
1294 }
1295 
1296 #endif
1297 
1298 /*
1299  * Allocate and return a magazine.  NULL is returned and *burst is adjusted
1300  * if the magazine is empty.
1301  */
1302 static __inline void *
1303 magazine_alloc(struct magazine *mp, int *burst)
1304 {
1305 	void *obj;
1306 
1307 	if (mp == NULL)
1308 		return(NULL);
1309 	if (MAGAZINE_NOTEMPTY(mp)) {
1310 		obj = mp->objects[--mp->rounds];
1311 		return(obj);
1312 	}
1313 
1314 	/*
1315 	 * Return burst factor to caller along with NULL
1316 	 */
1317 	if ((mp->flags & M_BURST) && (burst != NULL)) {
1318 		*burst = mp->burst_factor;
1319 	}
1320 	/* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1321 	if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1322 	    (burst != NULL)) {
1323 		mp->burst_factor -= M_BURST_NSCALE;
1324 		if (mp->burst_factor <= 1) {
1325 			mp->burst_factor = 1;
1326 			mp->flags &= ~(M_BURST);
1327 			mp->flags &= ~(M_BURST_EARLY);
1328 		}
1329 	}
1330 	return (NULL);
1331 }
1332 
1333 static __inline int
1334 magazine_free(struct magazine *mp, void *p)
1335 {
1336 	if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1337 		mp->objects[mp->rounds++] = p;
1338 		return 0;
1339 	}
1340 
1341 	return -1;
1342 }
1343 
1344 static void *
1345 mtmagazine_alloc(int zi)
1346 {
1347 	thr_mags *tp;
1348 	struct magazine *mp, *emptymag;
1349 	magazine_depot *d;
1350 	void *obj;
1351 
1352 	/*
1353 	 * Do not try to access per-thread magazines while the mtmagazine
1354 	 * is being initialized or destroyed.
1355 	 */
1356 	tp = &thread_mags;
1357 	if (tp->init < 0)
1358 		return(NULL);
1359 
1360 	/*
1361 	 * Primary per-thread allocation loop
1362 	 */
1363 	for (;;) {
1364 		/*
1365 		 * If the loaded magazine has rounds, allocate and return
1366 		 */
1367 		mp = tp->mags[zi].loaded;
1368 		obj = magazine_alloc(mp, NULL);
1369 		if (obj)
1370 			break;
1371 
1372 		/*
1373 		 * If the prev magazine is full, swap with the loaded
1374 		 * magazine and retry.
1375 		 */
1376 		mp = tp->mags[zi].prev;
1377 		if (mp && MAGAZINE_FULL(mp)) {
1378 			MASSERT(mp->rounds != 0);
1379 			swap_mags(&tp->mags[zi]);
1380 			continue;
1381 		}
1382 
1383 		/*
1384 		 * Try to get a full magazine from the depot.  Cycle
1385 		 * through depot(full)->loaded->prev->depot(empty).
1386 		 * Retry if a full magazine was available from the depot.
1387 		 *
1388 		 * Return NULL (caller will fall through) if no magazines
1389 		 * can be found anywhere.
1390 		 */
1391 		d = &depots[zi];
1392 		depot_lock(d);
1393 		emptymag = tp->mags[zi].prev;
1394 		if (emptymag)
1395 			SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1396 		tp->mags[zi].prev = tp->mags[zi].loaded;
1397 		mp = SLIST_FIRST(&d->full);	/* loaded magazine */
1398 		tp->mags[zi].loaded = mp;
1399 		if (mp) {
1400 			SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1401 			MASSERT(MAGAZINE_NOTEMPTY(mp));
1402 			depot_unlock(d);
1403 			continue;
1404 		}
1405 		depot_unlock(d);
1406 		break;
1407 	}
1408 
1409 	return (obj);
1410 }
1411 
1412 static int
1413 mtmagazine_free(int zi, void *ptr)
1414 {
1415 	thr_mags *tp;
1416 	struct magazine *mp, *loadedmag;
1417 	magazine_depot *d;
1418 	int rc = -1;
1419 
1420 	/*
1421 	 * Do not try to access per-thread magazines while the mtmagazine
1422 	 * is being initialized or destroyed.
1423 	 */
1424 	tp = &thread_mags;
1425 	if (tp->init < 0)
1426 		return(-1);
1427 
1428 	/*
1429 	 * Primary per-thread freeing loop
1430 	 */
1431 	for (;;) {
1432 		/*
1433 		 * If the loaded magazine has space, free directly to it
1434 		 */
1435 		rc = magazine_free(tp->mags[zi].loaded, ptr);
1436 		if (rc == 0)
1437 			break;
1438 
1439 		/*
1440 		 * If the prev magazine is empty, swap with the loaded
1441 		 * magazine and retry.
1442 		 */
1443 		mp = tp->mags[zi].prev;
1444 		if (mp && MAGAZINE_EMPTY(mp)) {
1445 			MASSERT(mp->rounds == 0);
1446 			swap_mags(&tp->mags[zi]);
1447 			continue;
1448 		}
1449 
1450 		/*
1451 		 * Make sure a new magazine is available in case we have
1452 		 * to use it.  Staging the newmag allows us to avoid
1453 		 * some complexity.
1454 		 *
1455 		 * We have a lot of assumed state here so temporarily
1456 		 * disable the per-thread caches for this allocation
1457 		 * to avoid reentrancy.
1458 		 */
1459 		if (tp->newmag == NULL) {
1460 			tp->init = -1;
1461 			tp->newmag = _slaballoc(sizeof(struct magazine),
1462 						SAFLAG_ZERO);
1463 			tp->init = 1;
1464 			if (tp->newmag == NULL) {
1465 				rc = -1;
1466 				break;
1467 			}
1468 		}
1469 
1470 		/*
1471 		 * Try to get an empty magazine from the depot.  Cycle
1472 		 * through depot(empty)->loaded->prev->depot(full).
1473 		 * Retry if an empty magazine was available from the depot.
1474 		 */
1475 		d = &depots[zi];
1476 		depot_lock(d);
1477 
1478 		if ((loadedmag = tp->mags[zi].prev) != NULL)
1479 			SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1480 		tp->mags[zi].prev = tp->mags[zi].loaded;
1481 		mp = SLIST_FIRST(&d->empty);
1482 		if (mp) {
1483 			tp->mags[zi].loaded = mp;
1484 			SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1485 			MASSERT(MAGAZINE_NOTFULL(mp));
1486 		} else {
1487 			mp = tp->newmag;
1488 			tp->newmag = NULL;
1489 			mp->capacity = M_MAX_ROUNDS;
1490 			mp->rounds = 0;
1491 			mp->flags = 0;
1492 			tp->mags[zi].loaded = mp;
1493 		}
1494 		depot_unlock(d);
1495 	}
1496 
1497 	return rc;
1498 }
1499 
1500 static void
1501 mtmagazine_init(void)
1502 {
1503 	int error;
1504 
1505 	error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1506 	if (error)
1507 		abort();
1508 }
1509 
1510 /*
1511  * This function is only used by the thread exit destructor
1512  */
1513 static void
1514 mtmagazine_drain(struct magazine *mp)
1515 {
1516 	void *obj;
1517 
1518 	while (MAGAZINE_NOTEMPTY(mp)) {
1519 		obj = magazine_alloc(mp, NULL);
1520 		_slabfree(obj, 0, NULL);
1521 	}
1522 }
1523 
1524 /*
1525  * mtmagazine_destructor()
1526  *
1527  * When a thread exits, we reclaim all its resources; all its magazines are
1528  * drained and the structures are freed.
1529  *
1530  * WARNING!  The destructor can be called multiple times if the larger user
1531  *	     program has its own destructors which run after ours which
1532  *	     allocate or free memory.
1533  */
1534 static void
1535 mtmagazine_destructor(void *thrp)
1536 {
1537 	thr_mags *tp = thrp;
1538 	struct magazine *mp;
1539 	int i;
1540 
1541 	/*
1542 	 * Prevent further use of mtmagazines while we are destructing
1543 	 * them, as well as for any destructors which are run after us
1544 	 * prior to the thread actually being destroyed.
1545 	 */
1546 	tp->init = -1;
1547 
1548 	for (i = 0; i < NZONES; i++) {
1549 		mp = tp->mags[i].loaded;
1550 		tp->mags[i].loaded = NULL;
1551 		if (mp) {
1552 			if (MAGAZINE_NOTEMPTY(mp))
1553 				mtmagazine_drain(mp);
1554 			_slabfree(mp, 0, NULL);
1555 		}
1556 
1557 		mp = tp->mags[i].prev;
1558 		tp->mags[i].prev = NULL;
1559 		if (mp) {
1560 			if (MAGAZINE_NOTEMPTY(mp))
1561 				mtmagazine_drain(mp);
1562 			_slabfree(mp, 0, NULL);
1563 		}
1564 	}
1565 
1566 	if (tp->newmag) {
1567 		mp = tp->newmag;
1568 		tp->newmag = NULL;
1569 		_slabfree(mp, 0, NULL);
1570 	}
1571 }
1572 
1573 /*
1574  * zone_alloc()
1575  *
1576  * Attempt to allocate a zone from the zone magazine; the zone magazine has
1577  * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1578  */
1579 static slzone_t
1580 zone_alloc(int flags)
1581 {
1582 	slglobaldata_t slgd = &SLGlobalData;
1583 	int burst = 1;
1584 	int i, j;
1585 	slzone_t z;
1586 
1587 	zone_magazine_lock();
1588 	slgd_unlock(slgd);
1589 
1590 	z = magazine_alloc(&zone_magazine, &burst);
1591 	if (z == NULL && burst == 1) {
1592 		zone_magazine_unlock();
1593 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1594 	} else if (z == NULL) {
1595 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1596 		if (z) {
1597 			for (i = 1; i < burst; i++) {
1598 				j = magazine_free(&zone_magazine,
1599 						  (char *) z + (ZoneSize * i));
1600 				MASSERT(j == 0);
1601 			}
1602 		}
1603 		zone_magazine_unlock();
1604 	} else {
1605 		z->z_Flags |= SLZF_UNOTZEROD;
1606 		zone_magazine_unlock();
1607 	}
1608 	slgd_lock(slgd);
1609 	return z;
1610 }
1611 
1612 /*
1613  * zone_free()
1614  *
1615  * Release a zone and unlock the slgd lock.
1616  */
1617 static void
1618 zone_free(void *z)
1619 {
1620 	slglobaldata_t slgd = &SLGlobalData;
1621 	void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1622 	int i, j;
1623 
1624 	zone_magazine_lock();
1625 	slgd_unlock(slgd);
1626 
1627 	bzero(z, sizeof(struct slzone));
1628 
1629 	if (opt_madvise)
1630 		madvise(z, ZoneSize, MADV_FREE);
1631 
1632 	i = magazine_free(&zone_magazine, z);
1633 
1634 	/*
1635 	 * If we failed to free, collect excess magazines; release the zone
1636 	 * magazine lock, and then free to the system via _vmem_free. Re-enable
1637 	 * BURST mode for the magazine.
1638 	 */
1639 	if (i == -1) {
1640 		j = zone_magazine.rounds - zone_magazine.low_factor;
1641 		for (i = 0; i < j; i++) {
1642 			excess[i] = magazine_alloc(&zone_magazine, NULL);
1643 			MASSERT(excess[i] !=  NULL);
1644 		}
1645 
1646 		zone_magazine_unlock();
1647 
1648 		for (i = 0; i < j; i++)
1649 			_vmem_free(excess[i], ZoneSize);
1650 
1651 		_vmem_free(z, ZoneSize);
1652 	} else {
1653 		zone_magazine_unlock();
1654 	}
1655 }
1656 
1657 /*
1658  * _vmem_alloc()
1659  *
1660  *	Directly map memory in PAGE_SIZE'd chunks with the specified
1661  *	alignment.
1662  *
1663  *	Alignment must be a multiple of PAGE_SIZE.
1664  *
1665  *	Size must be >= alignment.
1666  */
1667 static void *
1668 _vmem_alloc(size_t size, size_t align, int flags)
1669 {
1670 	char *addr;
1671 	char *save;
1672 	size_t excess;
1673 
1674 	/*
1675 	 * Map anonymous private memory.
1676 	 */
1677 	addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1678 		    MAP_PRIVATE|MAP_ANON, -1, 0);
1679 	if (addr == MAP_FAILED)
1680 		return(NULL);
1681 
1682 	/*
1683 	 * Check alignment.  The misaligned offset is also the excess
1684 	 * amount.  If misaligned unmap the excess so we have a chance of
1685 	 * mapping at the next alignment point and recursively try again.
1686 	 *
1687 	 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB	block alignment
1688 	 *   aaaaaaaaa aaaaaaaaaaa aa		mis-aligned allocation
1689 	 *   xxxxxxxxx				final excess calculation
1690 	 *   ^ returned address
1691 	 */
1692 	excess = (uintptr_t)addr & (align - 1);
1693 
1694 	if (excess) {
1695 		excess = align - excess;
1696 		save = addr;
1697 
1698 		munmap(save + excess, size - excess);
1699 		addr = _vmem_alloc(size, align, flags);
1700 		munmap(save, excess);
1701 	}
1702 	return((void *)addr);
1703 }
1704 
1705 /*
1706  * _vmem_free()
1707  *
1708  *	Free a chunk of memory allocated with _vmem_alloc()
1709  */
1710 static void
1711 _vmem_free(void *ptr, size_t size)
1712 {
1713 	munmap(ptr, size);
1714 }
1715 
1716 /*
1717  * Panic on fatal conditions
1718  */
1719 static void
1720 _mpanic(const char *ctl, ...)
1721 {
1722 	va_list va;
1723 
1724 	if (malloc_panic == 0) {
1725 		malloc_panic = 1;
1726 		va_start(va, ctl);
1727 		vfprintf(stderr, ctl, va);
1728 		fprintf(stderr, "\n");
1729 		fflush(stderr);
1730 		va_end(va);
1731 	}
1732 	abort();
1733 }
1734