xref: /dflybsd-src/lib/libc/stdlib/nmalloc.c (revision 7895edcd82677365061c6b0f7ea158cc594bc515)
1 /*
2  * NMALLOC.C	- New Malloc (ported from kernel slab allocator)
3  *
4  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * This module implements a slab allocator drop-in replacement for the
38  * libc malloc().
39  *
40  * A slab allocator reserves a ZONE for each chunk size, then lays the
41  * chunks out in an array within the zone.  Allocation and deallocation
42  * is nearly instantanious, and overhead losses are limited to a fixed
43  * worst-case amount.
44  *
45  * The slab allocator does not have to pre-initialize the list of
46  * free chunks for each zone, and the underlying VM will not be
47  * touched at all beyond the zone header until an actual allocation
48  * needs it.
49  *
50  * Slab management and locking is done on a per-zone basis.
51  *
52  *	Alloc Size	Chunking        Number of zones
53  *	0-127		8		16
54  *	128-255		16		8
55  *	256-511		32		8
56  *	512-1023	64		8
57  *	1024-2047	128		8
58  *	2048-4095	256		8
59  *	4096-8191	512		8
60  *	8192-16383	1024		8
61  *	16384-32767	2048		8
62  *
63  *	Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
64  *	is used to locate for free.  One and Two-page allocations use the
65  *	zone mechanic to avoid excessive mmap()/munmap() calls.
66  *
67  *			   API FEATURES AND SIDE EFFECTS
68  *
69  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
70  *	Above that power-of-2 sized allocations are page-aligned.  Non
71  *	power-of-2 sized allocations are aligned the same as the chunk
72  *	size for their zone.
73  *    + malloc(0) returns a special non-NULL value
74  *    + ability to allocate arbitrarily large chunks of memory
75  *    + realloc will reuse the passed pointer if possible, within the
76  *	limitations of the zone chunking.
77  */
78 
79 #include "libc_private.h"
80 
81 #include <sys/param.h>
82 #include <sys/types.h>
83 #include <sys/mman.h>
84 #include <stdio.h>
85 #include <stdlib.h>
86 #include <stdarg.h>
87 #include <stddef.h>
88 #include <unistd.h>
89 #include <string.h>
90 #include <fcntl.h>
91 #include <errno.h>
92 
93 #include "spinlock.h"
94 #include "un-namespace.h"
95 
96 /*
97  * Linked list of large allocations
98  */
99 typedef struct bigalloc {
100 	struct bigalloc *next;	/* hash link */
101 	void	*base;		/* base pointer */
102 	u_long	bytes;		/* bytes allocated */
103 	u_long	unused01;
104 } *bigalloc_t;
105 
106 /*
107  * Note that any allocations which are exact multiples of PAGE_SIZE, or
108  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
109  */
110 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
111 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
112 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
113 #define ZALLOC_ZONE_SIZE	(64 * 1024)
114 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
115 #define ZALLOC_SLAB_SLIDE	20		/* L1-cache skip */
116 
117 #if ZALLOC_ZONE_LIMIT == 16384
118 #define NZONES			72
119 #elif ZALLOC_ZONE_LIMIT == 32768
120 #define NZONES			80
121 #else
122 #error "I couldn't figure out NZONES"
123 #endif
124 
125 /*
126  * Chunk structure for free elements
127  */
128 typedef struct slchunk {
129 	struct slchunk *c_Next;
130 } *slchunk_t;
131 
132 /*
133  * The IN-BAND zone header is placed at the beginning of each zone.
134  */
135 struct slglobaldata;
136 
137 typedef struct slzone {
138 	__int32_t	z_Magic;	/* magic number for sanity check */
139 	int		z_NFree;	/* total free chunks / ualloc space */
140 	struct slzone *z_Next;		/* ZoneAry[] link if z_NFree non-zero */
141 	struct slglobaldata *z_GlobalData;
142 	int		z_NMax;		/* maximum free chunks */
143 	char		*z_BasePtr;	/* pointer to start of chunk array */
144 	int		z_UIndex;	/* current initial allocation index */
145 	int		z_UEndIndex;	/* last (first) allocation index */
146 	int		z_ChunkSize;	/* chunk size for validation */
147 	int		z_FirstFreePg;	/* chunk list on a page-by-page basis */
148 	int		z_ZoneIndex;
149 	int		z_Flags;
150 	struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
151 #if defined(INVARIANTS)
152 	__uint32_t	z_Bitmap[];	/* bitmap of free chunks / sanity */
153 #endif
154 } *slzone_t;
155 
156 typedef struct slglobaldata {
157 	spinlock_t	Spinlock;
158 	slzone_t	ZoneAry[NZONES];/* linked list of zones NFree > 0 */
159 	slzone_t	FreeZones;	/* whole zones that have become free */
160 	int		NFreeZones;	/* free zone count */
161 	int		JunkIndex;
162 } *slglobaldata_t;
163 
164 #define SLZF_UNOTZEROD		0x0001
165 
166 /*
167  * Misc constants.  Note that allocations that are exact multiples of
168  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
169  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
170  */
171 #define MIN_CHUNK_SIZE		8		/* in bytes */
172 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
173 #define ZONE_RELS_THRESH	4		/* threshold number of zones */
174 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
175 
176 /*
177  * The WEIRD_ADDR is used as known text to copy into free objects to
178  * try to create deterministic failure cases if the data is accessed after
179  * free.
180  */
181 #define WEIRD_ADDR      0xdeadc0de
182 #define MAX_COPY        sizeof(weirdary)
183 #define ZERO_LENGTH_PTR	((void *)-8)
184 
185 #define BIGHSHIFT	10			/* bigalloc hash table */
186 #define BIGHSIZE	(1 << BIGHSHIFT)
187 #define BIGHMASK	(BIGHSIZE - 1)
188 #define BIGXSIZE	(BIGHSIZE / 16)		/* bigalloc lock table */
189 #define BIGXMASK	(BIGXSIZE - 1)
190 
191 #define SLGD_MAX	4			/* parallel allocations */
192 
193 #define SAFLAG_ZERO	0x0001
194 #define SAFLAG_PASSIVE	0x0002
195 
196 /*
197  * Thread control
198  */
199 
200 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
201 
202 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
203 				_mpanic("assertion: %s in %s",	\
204 				#exp, __func__);		\
205 			    } while (0)
206 
207 /*
208  * Fixed globals (not per-cpu)
209  */
210 static const int ZoneSize = ZALLOC_ZONE_SIZE;
211 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
212 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
213 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
214 
215 static struct slglobaldata	SLGlobalData[SLGD_MAX];
216 static bigalloc_t bigalloc_array[BIGHSIZE];
217 static spinlock_t bigspin_array[BIGXSIZE];
218 static int malloc_panic;
219 
220 static const int32_t weirdary[16] = {
221 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
222 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
223 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
224 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
225 };
226 
227 static __thread slglobaldata_t LastSLGD = &SLGlobalData[0];
228 
229 static void *_slaballoc(size_t size, int flags);
230 static void *_slabrealloc(void *ptr, size_t size);
231 static void _slabfree(void *ptr);
232 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
233 static void _vmem_free(void *ptr, size_t bytes);
234 static void _mpanic(const char *ctl, ...);
235 #if defined(INVARIANTS)
236 static void chunk_mark_allocated(slzone_t z, void *chunk);
237 static void chunk_mark_free(slzone_t z, void *chunk);
238 #endif
239 
240 #ifdef INVARIANTS
241 /*
242  * If enabled any memory allocated without M_ZERO is initialized to -1.
243  */
244 static int  use_malloc_pattern;
245 #endif
246 
247 /*
248  * Thread locks.
249  *
250  * NOTE: slgd_trylock() returns 0 or EBUSY
251  */
252 static __inline void
253 slgd_lock(slglobaldata_t slgd)
254 {
255 	if (__isthreaded)
256 		_SPINLOCK(&slgd->Spinlock);
257 }
258 
259 static __inline int
260 slgd_trylock(slglobaldata_t slgd)
261 {
262 	if (__isthreaded)
263 		return(_SPINTRYLOCK(&slgd->Spinlock));
264 	return(0);
265 }
266 
267 static __inline void
268 slgd_unlock(slglobaldata_t slgd)
269 {
270 	if (__isthreaded)
271 		_SPINUNLOCK(&slgd->Spinlock);
272 }
273 
274 /*
275  * bigalloc hashing and locking support.
276  *
277  * Return an unmasked hash code for the passed pointer.
278  */
279 static __inline int
280 _bigalloc_hash(void *ptr)
281 {
282 	int hv;
283 
284 	hv = ((int)ptr >> PAGE_SHIFT) ^ ((int)ptr >> (PAGE_SHIFT + BIGHSHIFT));
285 
286 	return(hv);
287 }
288 
289 /*
290  * Lock the hash chain and return a pointer to its base for the specified
291  * address.
292  */
293 static __inline bigalloc_t *
294 bigalloc_lock(void *ptr)
295 {
296 	int hv = _bigalloc_hash(ptr);
297 	bigalloc_t *bigp;
298 
299 	bigp = &bigalloc_array[hv & BIGHMASK];
300 	if (__isthreaded)
301 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
302 	return(bigp);
303 }
304 
305 /*
306  * Lock the hash chain and return a pointer to its base for the specified
307  * address.
308  *
309  * BUT, if the hash chain is empty, just return NULL and do not bother
310  * to lock anything.
311  */
312 static __inline bigalloc_t *
313 bigalloc_check_and_lock(void *ptr)
314 {
315 	int hv = _bigalloc_hash(ptr);
316 	bigalloc_t *bigp;
317 
318 	bigp = &bigalloc_array[hv & BIGHMASK];
319 	if (*bigp == NULL)
320 		return(NULL);
321 	if (__isthreaded) {
322 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
323 	}
324 	return(bigp);
325 }
326 
327 static __inline void
328 bigalloc_unlock(void *ptr)
329 {
330 	int hv;
331 
332 	if (__isthreaded) {
333 		hv = _bigalloc_hash(ptr);
334 		_SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
335 	}
336 }
337 
338 /*
339  * Calculate the zone index for the allocation request size and set the
340  * allocation request size to that particular zone's chunk size.
341  */
342 static __inline int
343 zoneindex(size_t *bytes, size_t *chunking)
344 {
345 	size_t n = (unsigned int)*bytes;	/* unsigned for shift opt */
346 	if (n < 128) {
347 		*bytes = n = (n + 7) & ~7;
348 		*chunking = 8;
349 		return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
350 	}
351 	if (n < 256) {
352 		*bytes = n = (n + 15) & ~15;
353 		*chunking = 16;
354 		return(n / 16 + 7);
355 	}
356 	if (n < 8192) {
357 		if (n < 512) {
358 			*bytes = n = (n + 31) & ~31;
359 			*chunking = 32;
360 			return(n / 32 + 15);
361 		}
362 		if (n < 1024) {
363 			*bytes = n = (n + 63) & ~63;
364 			*chunking = 64;
365 			return(n / 64 + 23);
366 		}
367 		if (n < 2048) {
368 			*bytes = n = (n + 127) & ~127;
369 			*chunking = 128;
370 			return(n / 128 + 31);
371 		}
372 		if (n < 4096) {
373 			*bytes = n = (n + 255) & ~255;
374 			*chunking = 256;
375 			return(n / 256 + 39);
376 		}
377 		*bytes = n = (n + 511) & ~511;
378 		*chunking = 512;
379 		return(n / 512 + 47);
380 	}
381 #if ZALLOC_ZONE_LIMIT > 8192
382 	if (n < 16384) {
383 		*bytes = n = (n + 1023) & ~1023;
384 		*chunking = 1024;
385 		return(n / 1024 + 55);
386 	}
387 #endif
388 #if ZALLOC_ZONE_LIMIT > 16384
389 	if (n < 32768) {
390 		*bytes = n = (n + 2047) & ~2047;
391 		*chunking = 2048;
392 		return(n / 2048 + 63);
393 	}
394 #endif
395 	_mpanic("Unexpected byte count %d", n);
396 	return(0);
397 }
398 
399 /*
400  * malloc() - call internal slab allocator
401  */
402 void *
403 malloc(size_t size)
404 {
405 	return(_slaballoc(size, 0));
406 }
407 
408 /*
409  * calloc() - call internal slab allocator
410  */
411 void *
412 calloc(size_t number, size_t size)
413 {
414 	return(_slaballoc(number * size, SAFLAG_ZERO));
415 }
416 
417 /*
418  * realloc() (SLAB ALLOCATOR)
419  *
420  * We do not attempt to optimize this routine beyond reusing the same
421  * pointer if the new size fits within the chunking of the old pointer's
422  * zone.
423  */
424 void *
425 realloc(void *ptr, size_t size)
426 {
427 	return(_slabrealloc(ptr, size));
428 }
429 
430 void
431 free(void *ptr)
432 {
433 	_slabfree(ptr);
434 }
435 
436 /*
437  * _slaballoc()	(SLAB ALLOCATOR)
438  *
439  *	Allocate memory via the slab allocator.  If the request is too large,
440  *	or if it page-aligned beyond a certain size, we fall back to the
441  *	KMEM subsystem
442  */
443 static void *
444 _slaballoc(size_t size, int flags)
445 {
446 	slzone_t z;
447 	slchunk_t chunk;
448 	slglobaldata_t slgd;
449 	int chunking;
450 	int zi;
451 #ifdef INVARIANTS
452 	int i;
453 #endif
454 	int off;
455 
456 	/*
457 	 * Handle the degenerate size == 0 case.  Yes, this does happen.
458 	 * Return a special pointer.  This is to maintain compatibility with
459 	 * the original malloc implementation.  Certain devices, such as the
460 	 * adaptec driver, not only allocate 0 bytes, they check for NULL and
461 	 * also realloc() later on.  Joy.
462 	 */
463 	if (size == 0)
464 		return(ZERO_LENGTH_PTR);
465 
466 	/*
467 	 * Handle large allocations directly.  There should not be very many
468 	 * of these so performance is not a big issue.
469 	 *
470 	 * The backend allocator is pretty nasty on a SMP system.   Use the
471 	 * slab allocator for one and two page-sized chunks even though we
472 	 * lose some efficiency.
473 	 */
474 	if (size >= ZoneLimit ||
475 	    ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
476 		bigalloc_t big;
477 		bigalloc_t *bigp;
478 
479 		size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
480 		chunk = _vmem_alloc(size, PAGE_SIZE, flags);
481 		if (chunk == NULL)
482 			return(NULL);
483 
484 		big = _slaballoc(sizeof(struct bigalloc), 0);
485 		bigp = bigalloc_lock(chunk);
486 		big->base = chunk;
487 		big->bytes = size;
488 		big->unused01 = 0;
489 		big->next = *bigp;
490 		*bigp = big;
491 		bigalloc_unlock(chunk);
492 
493 		return(chunk);
494 	}
495 
496 	/*
497 	 * Multi-threading support.  This needs work XXX.
498 	 *
499 	 * Choose a globaldata structure to allocate from.  If we cannot
500 	 * immediately get the lock try a different one.
501 	 *
502 	 * LastSLGD is a per-thread global.
503 	 */
504 	slgd = LastSLGD;
505 	if (slgd_trylock(slgd) != 0) {
506 		if (++slgd == &SLGlobalData[SLGD_MAX])
507 			slgd = &SLGlobalData[0];
508 		LastSLGD = slgd;
509 		slgd_lock(slgd);
510 	}
511 
512 	/*
513 	 * Attempt to allocate out of an existing zone.  If all zones are
514 	 * exhausted pull one off the free list or allocate a new one.
515 	 *
516 	 * Note: zoneindex() will panic of size is too large.
517 	 */
518 	zi = zoneindex(&size, &chunking);
519 	MASSERT(zi < NZONES);
520 
521 	if ((z = slgd->ZoneAry[zi]) == NULL) {
522 		/*
523 		 * Pull the zone off the free list.  If the zone on
524 		 * the free list happens to be correctly set up we
525 		 * do not have to reinitialize it.
526 		 */
527 		if ((z = slgd->FreeZones) != NULL) {
528 			slgd->FreeZones = z->z_Next;
529 			--slgd->NFreeZones;
530 			if (z->z_ChunkSize == size) {
531 				z->z_Magic = ZALLOC_SLAB_MAGIC;
532 				z->z_Next = slgd->ZoneAry[zi];
533 				slgd->ZoneAry[zi] = z;
534 				goto have_zone;
535 			}
536 			bzero(z, sizeof(struct slzone));
537 			z->z_Flags |= SLZF_UNOTZEROD;
538 		} else {
539 			z = _vmem_alloc(ZoneSize, ZoneSize, flags);
540 			if (z == NULL)
541 				goto fail;
542 		}
543 
544 		/*
545 		 * How big is the base structure?
546 		 */
547 #if defined(INVARIANTS)
548 		/*
549 		 * Make room for z_Bitmap.  An exact calculation is
550 		 * somewhat more complicated so don't make an exact
551 		 * calculation.
552 		 */
553 		off = offsetof(struct slzone,
554 				z_Bitmap[(ZoneSize / size + 31) / 32]);
555 		bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
556 #else
557 		off = sizeof(struct slzone);
558 #endif
559 
560 		/*
561 		 * Align the storage in the zone based on the chunking.
562 		 *
563 		 * Guarentee power-of-2 alignment for power-of-2-sized
564 		 * chunks.  Otherwise align based on the chunking size
565 		 * (typically 8 or 16 bytes for small allocations).
566 		 *
567 		 * NOTE: Allocations >= ZoneLimit are governed by the
568 		 * bigalloc code and typically only guarantee page-alignment.
569 		 *
570 		 * Set initial conditions for UIndex near the zone header
571 		 * to reduce unecessary page faults, vs semi-randomization
572 		 * to improve L1 cache saturation.
573 		 */
574 		if ((size | (size - 1)) + 1 == (size << 1))
575 			off = (off + size - 1) & ~(size - 1);
576 		else
577 			off = (off + chunking - 1) & ~(chunking - 1);
578 		z->z_Magic = ZALLOC_SLAB_MAGIC;
579 		z->z_GlobalData = slgd;
580 		z->z_ZoneIndex = zi;
581 		z->z_NMax = (ZoneSize - off) / size;
582 		z->z_NFree = z->z_NMax;
583 		z->z_BasePtr = (char *)z + off;
584 		/*z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;*/
585 		z->z_UIndex = z->z_UEndIndex = 0;
586 		z->z_ChunkSize = size;
587 		z->z_FirstFreePg = ZonePageCount;
588 		z->z_Next = slgd->ZoneAry[zi];
589 		slgd->ZoneAry[zi] = z;
590 		if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
591 			flags &= ~SAFLAG_ZERO;	/* already zero'd */
592 			flags |= SAFLAG_PASSIVE;
593 		}
594 
595 		/*
596 		 * Slide the base index for initial allocations out of the
597 		 * next zone we create so we do not over-weight the lower
598 		 * part of the cpu memory caches.
599 		 */
600 		slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
601 					& (ZALLOC_MAX_ZONE_SIZE - 1);
602 	}
603 
604 	/*
605 	 * Ok, we have a zone from which at least one chunk is available.
606 	 *
607 	 * Remove us from the ZoneAry[] when we become empty
608 	 */
609 have_zone:
610 	MASSERT(z->z_NFree > 0);
611 
612 	if (--z->z_NFree == 0) {
613 		slgd->ZoneAry[zi] = z->z_Next;
614 		z->z_Next = NULL;
615 	}
616 
617 	/*
618 	 * Locate a chunk in a free page.  This attempts to localize
619 	 * reallocations into earlier pages without us having to sort
620 	 * the chunk list.  A chunk may still overlap a page boundary.
621 	 */
622 	while (z->z_FirstFreePg < ZonePageCount) {
623 		if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
624 #ifdef DIAGNOSTIC
625 			/*
626 			 * Diagnostic: c_Next is not total garbage.
627 			 */
628 			MASSERT(chunk->c_Next == NULL ||
629 			    ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
630 			    ((intptr_t)chunk & IN_SAME_PAGE_MASK));
631 #endif
632 #ifdef INVARIANTS
633 			chunk_mark_allocated(z, chunk);
634 #endif
635 			MASSERT((uintptr_t)chunk & ZoneMask);
636 			z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
637 			goto done;
638 		}
639 		++z->z_FirstFreePg;
640 	}
641 
642 	/*
643 	 * No chunks are available but NFree said we had some memory,
644 	 * so it must be available in the never-before-used-memory
645 	 * area governed by UIndex.  The consequences are very
646 	 * serious if our zone got corrupted so we use an explicit
647 	 * panic rather then a KASSERT.
648 	 */
649 	chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
650 
651 	if (++z->z_UIndex == z->z_NMax)
652 		z->z_UIndex = 0;
653 	if (z->z_UIndex == z->z_UEndIndex) {
654 		if (z->z_NFree != 0)
655 			_mpanic("slaballoc: corrupted zone");
656 	}
657 
658 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
659 		flags &= ~SAFLAG_ZERO;
660 		flags |= SAFLAG_PASSIVE;
661 	}
662 #if defined(INVARIANTS)
663 	chunk_mark_allocated(z, chunk);
664 #endif
665 
666 done:
667 	slgd_unlock(slgd);
668 	if (flags & SAFLAG_ZERO) {
669 		bzero(chunk, size);
670 #ifdef INVARIANTS
671 	} else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
672 		if (use_malloc_pattern) {
673 			for (i = 0; i < size; i += sizeof(int)) {
674 				*(int *)((char *)chunk + i) = -1;
675 			}
676 		}
677 		/* avoid accidental double-free check */
678 		chunk->c_Next = (void *)-1;
679 #endif
680 	}
681 	return(chunk);
682 fail:
683 	slgd_unlock(slgd);
684 	return(NULL);
685 }
686 
687 /*
688  * Reallocate memory within the chunk
689  */
690 static void *
691 _slabrealloc(void *ptr, size_t size)
692 {
693 	bigalloc_t *bigp;
694 	void *nptr;
695 	slzone_t z;
696 	size_t chunking;
697 
698 	if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
699 		return(_slaballoc(size, 0));
700 
701 	if (size == 0) {
702 	    free(ptr);
703 	    return(ZERO_LENGTH_PTR);
704 	}
705 
706 	/*
707 	 * Handle oversized allocations.  XXX we really should require
708 	 * that a size be passed to free() instead of this nonsense.
709 	 */
710 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
711 		bigalloc_t big;
712 		size_t bigbytes;
713 
714 		while ((big = *bigp) != NULL) {
715 			if (big->base == ptr) {
716 				size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
717 				bigbytes = big->bytes;
718 				bigalloc_unlock(ptr);
719 				if (bigbytes == size)
720 					return(ptr);
721 				if ((nptr = _slaballoc(size, 0)) == NULL)
722 					return(NULL);
723 				if (size > bigbytes)
724 					size = bigbytes;
725 				bcopy(ptr, nptr, size);
726 				_slabfree(ptr);
727 				return(nptr);
728 			}
729 			bigp = &big->next;
730 		}
731 		bigalloc_unlock(ptr);
732 	}
733 
734 	/*
735 	 * Get the original allocation's zone.  If the new request winds
736 	 * up using the same chunk size we do not have to do anything.
737 	 *
738 	 * NOTE: We don't have to lock the globaldata here, the fields we
739 	 * access here will not change at least as long as we have control
740 	 * over the allocation.
741 	 */
742 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
743 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
744 
745 	/*
746 	 * Use zoneindex() to chunk-align the new size, as long as the
747 	 * new size is not too large.
748 	 */
749 	if (size < ZoneLimit) {
750 		zoneindex(&size, &chunking);
751 		if (z->z_ChunkSize == size)
752 			return(ptr);
753 	}
754 
755 	/*
756 	 * Allocate memory for the new request size and copy as appropriate.
757 	 */
758 	if ((nptr = _slaballoc(size, 0)) != NULL) {
759 		if (size > z->z_ChunkSize)
760 			size = z->z_ChunkSize;
761 		bcopy(ptr, nptr, size);
762 		_slabfree(ptr);
763 	}
764 
765 	return(nptr);
766 }
767 
768 /*
769  * free (SLAB ALLOCATOR)
770  *
771  * Free a memory block previously allocated by malloc.  Note that we do not
772  * attempt to uplodate ks_loosememuse as MP races could prevent us from
773  * checking memory limits in malloc.
774  *
775  * MPSAFE
776  */
777 static void
778 _slabfree(void *ptr)
779 {
780 	slzone_t z;
781 	slchunk_t chunk;
782 	bigalloc_t big;
783 	bigalloc_t *bigp;
784 	slglobaldata_t slgd;
785 	size_t size;
786 	int pgno;
787 
788 	/*
789 	 * Handle NULL frees and special 0-byte allocations
790 	 */
791 	if (ptr == NULL)
792 		return;
793 	if (ptr == ZERO_LENGTH_PTR)
794 		return;
795 
796 	/*
797 	 * Handle oversized allocations.
798 	 */
799 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
800 		while ((big = *bigp) != NULL) {
801 			if (big->base == ptr) {
802 				*bigp = big->next;
803 				bigalloc_unlock(ptr);
804 				size = big->bytes;
805 				_slabfree(big);
806 #ifdef INVARIANTS
807 				MASSERT(sizeof(weirdary) <= size);
808 				bcopy(weirdary, ptr, sizeof(weirdary));
809 #endif
810 				_vmem_free(ptr, size);
811 				return;
812 			}
813 			bigp = &big->next;
814 		}
815 		bigalloc_unlock(ptr);
816 	}
817 
818 	/*
819 	 * Zone case.  Figure out the zone based on the fact that it is
820 	 * ZoneSize aligned.
821 	 */
822 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
823 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
824 
825 	pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
826 	chunk = ptr;
827 	slgd = z->z_GlobalData;
828 	slgd_lock(slgd);
829 
830 #ifdef INVARIANTS
831 	/*
832 	 * Attempt to detect a double-free.  To reduce overhead we only check
833 	 * if there appears to be link pointer at the base of the data.
834 	 */
835 	if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
836 		slchunk_t scan;
837 
838 		for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
839 			if (scan == chunk)
840 				_mpanic("Double free at %p", chunk);
841 		}
842 	}
843 	chunk_mark_free(z, chunk);
844 #endif
845 
846 	/*
847 	 * Put weird data into the memory to detect modifications after
848 	 * freeing, illegal pointer use after freeing (we should fault on
849 	 * the odd address), and so forth.
850 	 */
851 #ifdef INVARIANTS
852 	if (z->z_ChunkSize < sizeof(weirdary))
853 		bcopy(weirdary, chunk, z->z_ChunkSize);
854 	else
855 		bcopy(weirdary, chunk, sizeof(weirdary));
856 #endif
857 
858 	/*
859 	 * Add this free non-zero'd chunk to a linked list for reuse, adjust
860 	 * z_FirstFreePg.
861 	 */
862 	chunk->c_Next = z->z_PageAry[pgno];
863 	z->z_PageAry[pgno] = chunk;
864 	if (z->z_FirstFreePg > pgno)
865 		z->z_FirstFreePg = pgno;
866 
867 	/*
868 	 * Bump the number of free chunks.  If it becomes non-zero the zone
869 	 * must be added back onto the appropriate list.
870 	 */
871 	if (z->z_NFree++ == 0) {
872 		z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
873 		slgd->ZoneAry[z->z_ZoneIndex] = z;
874 	}
875 
876 	/*
877 	 * If the zone becomes totally free then move this zone to
878 	 * the FreeZones list.
879 	 *
880 	 * Do not madvise here, avoiding the edge case where a malloc/free
881 	 * loop is sitting on the edge of a new zone.
882 	 *
883 	 * We could leave at least one zone in the ZoneAry for the index,
884 	 * using something like the below, but while this might be fine
885 	 * for the kernel (who cares about ~10MB of wasted memory), it
886 	 * probably isn't such a good idea for a user program.
887 	 *
888 	 * 	&& (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
889 	 */
890 	if (z->z_NFree == z->z_NMax) {
891 		slzone_t *pz;
892 
893 		pz = &slgd->ZoneAry[z->z_ZoneIndex];
894 		while (z != *pz)
895 			pz = &(*pz)->z_Next;
896 		*pz = z->z_Next;
897 		z->z_Magic = -1;
898 		z->z_Next = slgd->FreeZones;
899 		slgd->FreeZones = z;
900 		++slgd->NFreeZones;
901 	}
902 
903 	/*
904 	 * Limit the number of zones we keep cached.
905 	 */
906 	while (slgd->NFreeZones > ZONE_RELS_THRESH) {
907 		z = slgd->FreeZones;
908 		slgd->FreeZones = z->z_Next;
909 		--slgd->NFreeZones;
910 		slgd_unlock(slgd);
911 		_vmem_free(z, ZoneSize);
912 		slgd_lock(slgd);
913 	}
914 	slgd_unlock(slgd);
915 }
916 
917 #if defined(INVARIANTS)
918 /*
919  * Helper routines for sanity checks
920  */
921 static
922 void
923 chunk_mark_allocated(slzone_t z, void *chunk)
924 {
925 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
926 	__uint32_t *bitptr;
927 
928 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
929 	bitptr = &z->z_Bitmap[bitdex >> 5];
930 	bitdex &= 31;
931 	MASSERT((*bitptr & (1 << bitdex)) == 0);
932 	*bitptr |= 1 << bitdex;
933 }
934 
935 static
936 void
937 chunk_mark_free(slzone_t z, void *chunk)
938 {
939 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
940 	__uint32_t *bitptr;
941 
942 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
943 	bitptr = &z->z_Bitmap[bitdex >> 5];
944 	bitdex &= 31;
945 	MASSERT((*bitptr & (1 << bitdex)) != 0);
946 	*bitptr &= ~(1 << bitdex);
947 }
948 
949 #endif
950 
951 /*
952  * _vmem_alloc()
953  *
954  *	Directly map memory in PAGE_SIZE'd chunks with the specified
955  *	alignment.
956  *
957  *	Alignment must be a multiple of PAGE_SIZE.
958  */
959 static void *
960 _vmem_alloc(size_t size, size_t align, int flags)
961 {
962 	char *addr;
963 	char *save;
964 	size_t excess;
965 
966 	/*
967 	 * Map anonymous private memory.
968 	 */
969 	addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
970 		    MAP_PRIVATE|MAP_ANON, -1, 0);
971 	if (addr == MAP_FAILED) {
972 		errno = ENOMEM;
973 		return(NULL);
974 	}
975 
976 	/*
977 	 * Check alignment.  The misaligned offset is also the excess
978 	 * amount.  If misaligned unmap the excess so we have a chance of
979 	 * mapping at the next alignment point and recursively try again.
980 	 */
981 	excess = (uintptr_t)addr & (align - 1);
982 	if (excess) {
983 		save = addr;
984 		munmap(save + align - excess, excess);
985 		addr = _vmem_alloc(size, align, flags);
986 		munmap(save, align - excess);
987 	}
988 	return((void *)addr);
989 }
990 
991 /*
992  * _vmem_free()
993  *
994  *	Free a chunk of memory allocated with _vmem_alloc()
995  */
996 static void
997 _vmem_free(void *ptr, vm_size_t size)
998 {
999 	munmap(ptr, size);
1000 }
1001 
1002 /*
1003  * Panic on fatal conditions
1004  */
1005 static void
1006 _mpanic(const char *ctl, ...)
1007 {
1008 	va_list va;
1009 
1010 	if (malloc_panic == 0) {
1011 		malloc_panic = 1;
1012 		va_start(va, ctl);
1013 		vfprintf(stderr, ctl, va);
1014 		fprintf(stderr, "\n");
1015 		fflush(stderr);
1016 		va_end(va);
1017 	}
1018 	abort();
1019 }
1020