1 /* 2 * NMALLOC.C - New Malloc (ported from kernel slab allocator) 3 * 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and by 8 * Venkatesh Srinivas <me@endeavour.zapto.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $ 38 */ 39 /* 40 * This module implements a slab allocator drop-in replacement for the 41 * libc malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantaneous, and overhead losses are limited to a fixed 46 * worst-case amount. 47 * 48 * The slab allocator does not have to pre-initialize the list of 49 * free chunks for each zone, and the underlying VM will not be 50 * touched at all beyond the zone header until an actual allocation 51 * needs it. 52 * 53 * Slab management and locking is done on a per-zone basis. 54 * 55 * Alloc Size Chunking Number of zones 56 * 0-127 8 16 57 * 128-255 16 8 58 * 256-511 32 8 59 * 512-1023 64 8 60 * 1024-2047 128 8 61 * 2048-4095 256 8 62 * 4096-8191 512 8 63 * 8192-16383 1024 8 64 * 16384-32767 2048 8 65 * 66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table 67 * is used to locate for free. One and Two-page allocations use the 68 * zone mechanic to avoid excessive mmap()/munmap() calls. 69 * 70 * API FEATURES AND SIDE EFFECTS 71 * 72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned. 73 * Above that power-of-2 sized allocations are page-aligned. Non 74 * power-of-2 sized allocations are aligned the same as the chunk 75 * size for their zone. 76 * + malloc(0) returns a special non-NULL value 77 * + ability to allocate arbitrarily large chunks of memory 78 * + realloc will reuse the passed pointer if possible, within the 79 * limitations of the zone chunking. 80 * 81 * Multithreaded enhancements for small allocations introduced August 2010. 82 * These are in the spirit of 'libumem'. See: 83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the 84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001 85 * USENIX Technical Conference. USENIX Association. 86 * 87 * TUNING 88 * 89 * The value of the environment variable MALLOC_OPTIONS is a character string 90 * containing various flags to tune nmalloc. 91 * 92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1) 93 * This will generate utrace events for all malloc, 94 * realloc, and free calls. There are tools (mtrplay) to 95 * replay and allocation pattern or to graph heap structure 96 * (mtrgraph) which can interpret these logs. 97 * 'Z' / ['z'] Zero out / do not zero all allocations. 98 * Each new byte of memory allocated by malloc, realloc, or 99 * reallocf will be initialized to 0. This is intended for 100 * debugging and will affect performance negatively. 101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the 102 * allocation functions. 103 */ 104 105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */ 106 107 #include "libc_private.h" 108 109 #include <sys/param.h> 110 #include <sys/types.h> 111 #include <sys/mman.h> 112 #include <sys/queue.h> 113 #include <sys/uio.h> 114 #include <sys/ktrace.h> 115 #include <stdio.h> 116 #include <stdint.h> 117 #include <stdlib.h> 118 #include <stdarg.h> 119 #include <stddef.h> 120 #include <unistd.h> 121 #include <string.h> 122 #include <fcntl.h> 123 #include <errno.h> 124 #include <pthread.h> 125 126 #include "spinlock.h" 127 #include "un-namespace.h" 128 129 /* 130 * Linked list of large allocations 131 */ 132 typedef struct bigalloc { 133 struct bigalloc *next; /* hash link */ 134 void *base; /* base pointer */ 135 u_long bytes; /* bytes allocated */ 136 } *bigalloc_t; 137 138 /* 139 * Note that any allocations which are exact multiples of PAGE_SIZE, or 140 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem. 141 */ 142 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */ 143 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */ 144 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */ 145 #define ZALLOC_ZONE_SIZE (64 * 1024) 146 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */ 147 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */ 148 149 #if ZALLOC_ZONE_LIMIT == 16384 150 #define NZONES 72 151 #elif ZALLOC_ZONE_LIMIT == 32768 152 #define NZONES 80 153 #else 154 #error "I couldn't figure out NZONES" 155 #endif 156 157 /* 158 * Chunk structure for free elements 159 */ 160 typedef struct slchunk { 161 struct slchunk *c_Next; 162 } *slchunk_t; 163 164 /* 165 * The IN-BAND zone header is placed at the beginning of each zone. 166 */ 167 struct slglobaldata; 168 169 typedef struct slzone { 170 int32_t z_Magic; /* magic number for sanity check */ 171 int z_NFree; /* total free chunks / ualloc space */ 172 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */ 173 int z_NMax; /* maximum free chunks */ 174 char *z_BasePtr; /* pointer to start of chunk array */ 175 int z_UIndex; /* current initial allocation index */ 176 int z_UEndIndex; /* last (first) allocation index */ 177 int z_ChunkSize; /* chunk size for validation */ 178 int z_FirstFreePg; /* chunk list on a page-by-page basis */ 179 int z_ZoneIndex; 180 int z_Flags; 181 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE]; 182 #if defined(INVARIANTS) 183 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */ 184 #endif 185 } *slzone_t; 186 187 typedef struct slglobaldata { 188 spinlock_t Spinlock; 189 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */ 190 int JunkIndex; 191 } *slglobaldata_t; 192 193 #define SLZF_UNOTZEROD 0x0001 194 195 #define FASTSLABREALLOC 0x02 196 197 /* 198 * Misc constants. Note that allocations that are exact multiples of 199 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 200 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 201 */ 202 #define MIN_CHUNK_SIZE 8 /* in bytes */ 203 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 204 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 205 206 /* 207 * The WEIRD_ADDR is used as known text to copy into free objects to 208 * try to create deterministic failure cases if the data is accessed after 209 * free. 210 * 211 * WARNING: A limited number of spinlocks are available, BIGXSIZE should 212 * not be larger then 64. 213 */ 214 #define WEIRD_ADDR 0xdeadc0de 215 #define MAX_COPY sizeof(weirdary) 216 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer) 217 218 #define BIGHSHIFT 10 /* bigalloc hash table */ 219 #define BIGHSIZE (1 << BIGHSHIFT) 220 #define BIGHMASK (BIGHSIZE - 1) 221 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */ 222 #define BIGXMASK (BIGXSIZE - 1) 223 224 #define SAFLAG_ZERO 0x0001 225 #define SAFLAG_PASSIVE 0x0002 226 227 /* 228 * Thread control 229 */ 230 231 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 232 233 #define MASSERT(exp) do { if (__predict_false(!(exp))) \ 234 _mpanic("assertion: %s in %s", \ 235 #exp, __func__); \ 236 } while (0) 237 238 /* 239 * Magazines 240 */ 241 242 #define M_MAX_ROUNDS 64 243 #define M_ZONE_ROUNDS 64 244 #define M_LOW_ROUNDS 32 245 #define M_INIT_ROUNDS 8 246 #define M_BURST_FACTOR 8 247 #define M_BURST_NSCALE 2 248 249 #define M_BURST 0x0001 250 #define M_BURST_EARLY 0x0002 251 252 struct magazine { 253 SLIST_ENTRY(magazine) nextmagazine; 254 255 int flags; 256 int capacity; /* Max rounds in this magazine */ 257 int rounds; /* Current number of free rounds */ 258 int burst_factor; /* Number of blocks to prefill with */ 259 int low_factor; /* Free till low_factor from full mag */ 260 void *objects[M_MAX_ROUNDS]; 261 }; 262 263 SLIST_HEAD(magazinelist, magazine); 264 265 static spinlock_t zone_mag_lock; 266 static struct magazine zone_magazine = { 267 .flags = M_BURST | M_BURST_EARLY, 268 .capacity = M_ZONE_ROUNDS, 269 .rounds = 0, 270 .burst_factor = M_BURST_FACTOR, 271 .low_factor = M_LOW_ROUNDS 272 }; 273 274 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity) 275 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity) 276 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0) 277 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0) 278 279 /* Each thread will have a pair of magazines per size-class (NZONES) 280 * The loaded magazine will support immediate allocations, the previous 281 * magazine will either be full or empty and can be swapped at need */ 282 typedef struct magazine_pair { 283 struct magazine *loaded; 284 struct magazine *prev; 285 } magazine_pair; 286 287 /* A depot is a collection of magazines for a single zone. */ 288 typedef struct magazine_depot { 289 struct magazinelist full; 290 struct magazinelist empty; 291 spinlock_t lock; 292 } magazine_depot; 293 294 typedef struct thr_mags { 295 magazine_pair mags[NZONES]; 296 struct magazine *newmag; 297 int init; 298 } thr_mags; 299 300 /* With this attribute set, do not require a function call for accessing 301 * this variable when the code is compiled -fPIC */ 302 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec"))); 303 304 static int mtmagazine_free_live; 305 static __thread thr_mags thread_mags TLS_ATTRIBUTE; 306 static pthread_key_t thread_mags_key; 307 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT; 308 static magazine_depot depots[NZONES]; 309 310 /* 311 * Fixed globals (not per-cpu) 312 */ 313 static const int ZoneSize = ZALLOC_ZONE_SIZE; 314 static const int ZoneLimit = ZALLOC_ZONE_LIMIT; 315 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE; 316 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1; 317 318 static int opt_madvise = 0; 319 static int opt_utrace = 0; 320 static int malloc_started = 0; 321 static int g_malloc_flags = 0; 322 static spinlock_t malloc_init_lock; 323 static struct slglobaldata SLGlobalData; 324 static bigalloc_t bigalloc_array[BIGHSIZE]; 325 static spinlock_t bigspin_array[BIGXSIZE]; 326 static int malloc_panic; 327 static int malloc_dummy_pointer; 328 329 static const int32_t weirdary[16] = { 330 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 331 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 332 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 333 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR 334 }; 335 336 static void *_slaballoc(size_t size, int flags); 337 static void *_slabrealloc(void *ptr, size_t size); 338 static void _slabfree(void *ptr, int, bigalloc_t *); 339 static void *_vmem_alloc(size_t bytes, size_t align, int flags); 340 static void _vmem_free(void *ptr, size_t bytes); 341 static void *magazine_alloc(struct magazine *, int *); 342 static int magazine_free(struct magazine *, void *); 343 static void *mtmagazine_alloc(int zi); 344 static int mtmagazine_free(int zi, void *); 345 static void mtmagazine_init(void); 346 static void mtmagazine_destructor(void *); 347 static slzone_t zone_alloc(int flags); 348 static void zone_free(void *z); 349 static void _mpanic(const char *ctl, ...); 350 static void malloc_init(void); 351 #if defined(INVARIANTS) 352 static void chunk_mark_allocated(slzone_t z, void *chunk); 353 static void chunk_mark_free(slzone_t z, void *chunk); 354 #endif 355 356 struct nmalloc_utrace { 357 void *p; 358 size_t s; 359 void *r; 360 }; 361 362 #define UTRACE(a, b, c) \ 363 if (opt_utrace) { \ 364 struct nmalloc_utrace ut = { \ 365 .p = (a), \ 366 .s = (b), \ 367 .r = (c) \ 368 }; \ 369 utrace(&ut, sizeof(ut)); \ 370 } 371 372 #ifdef INVARIANTS 373 /* 374 * If enabled any memory allocated without M_ZERO is initialized to -1. 375 */ 376 static int use_malloc_pattern; 377 #endif 378 379 static void 380 malloc_init(void) 381 { 382 const char *p = NULL; 383 384 if (__isthreaded) { 385 _SPINLOCK(&malloc_init_lock); 386 if (malloc_started) { 387 _SPINUNLOCK(&malloc_init_lock); 388 return; 389 } 390 } 391 392 if (issetugid() == 0) 393 p = getenv("MALLOC_OPTIONS"); 394 395 for (; p != NULL && *p != '\0'; p++) { 396 switch(*p) { 397 case 'u': opt_utrace = 0; break; 398 case 'U': opt_utrace = 1; break; 399 case 'h': opt_madvise = 0; break; 400 case 'H': opt_madvise = 1; break; 401 case 'z': g_malloc_flags = 0; break; 402 case 'Z': g_malloc_flags = SAFLAG_ZERO; break; 403 default: 404 break; 405 } 406 } 407 408 malloc_started = 1; 409 410 if (__isthreaded) 411 _SPINUNLOCK(&malloc_init_lock); 412 413 UTRACE((void *) -1, 0, NULL); 414 } 415 416 /* 417 * We have to install a handler for nmalloc thread teardowns when 418 * the thread is created. We cannot delay this because destructors in 419 * sophisticated userland programs can call malloc() for the first time 420 * during their thread exit. 421 * 422 * This routine is called directly from pthreads. 423 */ 424 void 425 _nmalloc_thr_init(void) 426 { 427 thr_mags *tp; 428 429 /* 430 * Disallow mtmagazine operations until the mtmagazine is 431 * initialized. 432 */ 433 tp = &thread_mags; 434 tp->init = -1; 435 436 pthread_setspecific(thread_mags_key, tp); 437 if (mtmagazine_free_live == 0) { 438 mtmagazine_free_live = 1; 439 pthread_once(&thread_mags_once, mtmagazine_init); 440 } 441 tp->init = 1; 442 } 443 444 /* 445 * Thread locks. 446 */ 447 static __inline void 448 slgd_lock(slglobaldata_t slgd) 449 { 450 if (__isthreaded) 451 _SPINLOCK(&slgd->Spinlock); 452 } 453 454 static __inline void 455 slgd_unlock(slglobaldata_t slgd) 456 { 457 if (__isthreaded) 458 _SPINUNLOCK(&slgd->Spinlock); 459 } 460 461 static __inline void 462 depot_lock(magazine_depot *dp) 463 { 464 if (__isthreaded) 465 _SPINLOCK(&dp->lock); 466 } 467 468 static __inline void 469 depot_unlock(magazine_depot *dp) 470 { 471 if (__isthreaded) 472 _SPINUNLOCK(&dp->lock); 473 } 474 475 static __inline void 476 zone_magazine_lock(void) 477 { 478 if (__isthreaded) 479 _SPINLOCK(&zone_mag_lock); 480 } 481 482 static __inline void 483 zone_magazine_unlock(void) 484 { 485 if (__isthreaded) 486 _SPINUNLOCK(&zone_mag_lock); 487 } 488 489 static __inline void 490 swap_mags(magazine_pair *mp) 491 { 492 struct magazine *tmp; 493 tmp = mp->loaded; 494 mp->loaded = mp->prev; 495 mp->prev = tmp; 496 } 497 498 /* 499 * bigalloc hashing and locking support. 500 * 501 * Return an unmasked hash code for the passed pointer. 502 */ 503 static __inline int 504 _bigalloc_hash(void *ptr) 505 { 506 int hv; 507 508 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^ 509 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT)); 510 511 return(hv); 512 } 513 514 /* 515 * Lock the hash chain and return a pointer to its base for the specified 516 * address. 517 */ 518 static __inline bigalloc_t * 519 bigalloc_lock(void *ptr) 520 { 521 int hv = _bigalloc_hash(ptr); 522 bigalloc_t *bigp; 523 524 bigp = &bigalloc_array[hv & BIGHMASK]; 525 if (__isthreaded) 526 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 527 return(bigp); 528 } 529 530 /* 531 * Lock the hash chain and return a pointer to its base for the specified 532 * address. 533 * 534 * BUT, if the hash chain is empty, just return NULL and do not bother 535 * to lock anything. 536 */ 537 static __inline bigalloc_t * 538 bigalloc_check_and_lock(void *ptr) 539 { 540 int hv = _bigalloc_hash(ptr); 541 bigalloc_t *bigp; 542 543 bigp = &bigalloc_array[hv & BIGHMASK]; 544 if (*bigp == NULL) 545 return(NULL); 546 if (__isthreaded) { 547 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 548 } 549 return(bigp); 550 } 551 552 static __inline void 553 bigalloc_unlock(void *ptr) 554 { 555 int hv; 556 557 if (__isthreaded) { 558 hv = _bigalloc_hash(ptr); 559 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]); 560 } 561 } 562 563 /* 564 * Calculate the zone index for the allocation request size and set the 565 * allocation request size to that particular zone's chunk size. 566 */ 567 static __inline int 568 zoneindex(size_t *bytes, size_t *chunking) 569 { 570 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */ 571 if (n < 128) { 572 *bytes = n = (n + 7) & ~7; 573 *chunking = 8; 574 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 575 } 576 if (n < 256) { 577 *bytes = n = (n + 15) & ~15; 578 *chunking = 16; 579 return(n / 16 + 7); 580 } 581 if (n < 8192) { 582 if (n < 512) { 583 *bytes = n = (n + 31) & ~31; 584 *chunking = 32; 585 return(n / 32 + 15); 586 } 587 if (n < 1024) { 588 *bytes = n = (n + 63) & ~63; 589 *chunking = 64; 590 return(n / 64 + 23); 591 } 592 if (n < 2048) { 593 *bytes = n = (n + 127) & ~127; 594 *chunking = 128; 595 return(n / 128 + 31); 596 } 597 if (n < 4096) { 598 *bytes = n = (n + 255) & ~255; 599 *chunking = 256; 600 return(n / 256 + 39); 601 } 602 *bytes = n = (n + 511) & ~511; 603 *chunking = 512; 604 return(n / 512 + 47); 605 } 606 #if ZALLOC_ZONE_LIMIT > 8192 607 if (n < 16384) { 608 *bytes = n = (n + 1023) & ~1023; 609 *chunking = 1024; 610 return(n / 1024 + 55); 611 } 612 #endif 613 #if ZALLOC_ZONE_LIMIT > 16384 614 if (n < 32768) { 615 *bytes = n = (n + 2047) & ~2047; 616 *chunking = 2048; 617 return(n / 2048 + 63); 618 } 619 #endif 620 _mpanic("Unexpected byte count %d", n); 621 return(0); 622 } 623 624 /* 625 * malloc() - call internal slab allocator 626 */ 627 void * 628 malloc(size_t size) 629 { 630 void *ptr; 631 632 ptr = _slaballoc(size, 0); 633 if (ptr == NULL) 634 errno = ENOMEM; 635 else 636 UTRACE(0, size, ptr); 637 return(ptr); 638 } 639 640 /* 641 * calloc() - call internal slab allocator 642 */ 643 void * 644 calloc(size_t number, size_t size) 645 { 646 void *ptr; 647 648 ptr = _slaballoc(number * size, SAFLAG_ZERO); 649 if (ptr == NULL) 650 errno = ENOMEM; 651 else 652 UTRACE(0, number * size, ptr); 653 return(ptr); 654 } 655 656 /* 657 * realloc() (SLAB ALLOCATOR) 658 * 659 * We do not attempt to optimize this routine beyond reusing the same 660 * pointer if the new size fits within the chunking of the old pointer's 661 * zone. 662 */ 663 void * 664 realloc(void *ptr, size_t size) 665 { 666 void *ret; 667 ret = _slabrealloc(ptr, size); 668 if (ret == NULL) 669 errno = ENOMEM; 670 else 671 UTRACE(ptr, size, ret); 672 return(ret); 673 } 674 675 /* 676 * posix_memalign() 677 * 678 * Allocate (size) bytes with a alignment of (alignment), where (alignment) 679 * is a power of 2 >= sizeof(void *). 680 * 681 * The slab allocator will allocate on power-of-2 boundaries up to 682 * at least PAGE_SIZE. We use the zoneindex mechanic to find a 683 * zone matching the requirements, and _vmem_alloc() otherwise. 684 */ 685 int 686 posix_memalign(void **memptr, size_t alignment, size_t size) 687 { 688 bigalloc_t *bigp; 689 bigalloc_t big; 690 size_t chunking; 691 int zi; 692 693 /* 694 * OpenGroup spec issue 6 checks 695 */ 696 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) { 697 *memptr = NULL; 698 return(EINVAL); 699 } 700 if (alignment < sizeof(void *)) { 701 *memptr = NULL; 702 return(EINVAL); 703 } 704 705 /* 706 * Our zone mechanism guarantees same-sized alignment for any 707 * power-of-2 allocation. If size is a power-of-2 and reasonable 708 * we can just call _slaballoc() and be done. We round size up 709 * to the nearest alignment boundary to improve our odds of 710 * it becoming a power-of-2 if it wasn't before. 711 */ 712 if (size <= alignment) 713 size = alignment; 714 else 715 size = (size + alignment - 1) & ~(size_t)(alignment - 1); 716 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) { 717 *memptr = _slaballoc(size, 0); 718 return(*memptr ? 0 : ENOMEM); 719 } 720 721 /* 722 * Otherwise locate a zone with a chunking that matches 723 * the requested alignment, within reason. Consider two cases: 724 * 725 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex 726 * we find will be the best fit because the chunking will be 727 * greater or equal to the alignment. 728 * 729 * (2) A 513 allocation on a 256-byte alignment. In this case 730 * the first zoneindex we find will be for 576 byte allocations 731 * with a chunking of 64, which is not sufficient. To fix this 732 * we simply find the nearest power-of-2 >= size and use the 733 * same side-effect of _slaballoc() which guarantees 734 * same-alignment on a power-of-2 allocation. 735 */ 736 if (size < PAGE_SIZE) { 737 zi = zoneindex(&size, &chunking); 738 if (chunking >= alignment) { 739 *memptr = _slaballoc(size, 0); 740 return(*memptr ? 0 : ENOMEM); 741 } 742 if (size >= 1024) 743 alignment = 1024; 744 if (size >= 16384) 745 alignment = 16384; 746 while (alignment < size) 747 alignment <<= 1; 748 *memptr = _slaballoc(alignment, 0); 749 return(*memptr ? 0 : ENOMEM); 750 } 751 752 /* 753 * If the slab allocator cannot handle it use vmem_alloc(). 754 * 755 * Alignment must be adjusted up to at least PAGE_SIZE in this case. 756 */ 757 if (alignment < PAGE_SIZE) 758 alignment = PAGE_SIZE; 759 if (size < alignment) 760 size = alignment; 761 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 762 *memptr = _vmem_alloc(size, alignment, 0); 763 if (*memptr == NULL) 764 return(ENOMEM); 765 766 big = _slaballoc(sizeof(struct bigalloc), 0); 767 if (big == NULL) { 768 _vmem_free(*memptr, size); 769 *memptr = NULL; 770 return(ENOMEM); 771 } 772 bigp = bigalloc_lock(*memptr); 773 big->base = *memptr; 774 big->bytes = size; 775 big->next = *bigp; 776 *bigp = big; 777 bigalloc_unlock(*memptr); 778 779 return(0); 780 } 781 782 /* 783 * free() (SLAB ALLOCATOR) - do the obvious 784 */ 785 void 786 free(void *ptr) 787 { 788 UTRACE(ptr, 0, 0); 789 _slabfree(ptr, 0, NULL); 790 } 791 792 /* 793 * _slaballoc() (SLAB ALLOCATOR) 794 * 795 * Allocate memory via the slab allocator. If the request is too large, 796 * or if it page-aligned beyond a certain size, we fall back to the 797 * KMEM subsystem 798 */ 799 static void * 800 _slaballoc(size_t size, int flags) 801 { 802 slzone_t z; 803 slchunk_t chunk; 804 slglobaldata_t slgd; 805 size_t chunking; 806 int zi; 807 #ifdef INVARIANTS 808 int i; 809 #endif 810 int off; 811 void *obj; 812 813 if (!malloc_started) 814 malloc_init(); 815 816 /* 817 * Handle the degenerate size == 0 case. Yes, this does happen. 818 * Return a special pointer. This is to maintain compatibility with 819 * the original malloc implementation. Certain devices, such as the 820 * adaptec driver, not only allocate 0 bytes, they check for NULL and 821 * also realloc() later on. Joy. 822 */ 823 if (size == 0) 824 return(ZERO_LENGTH_PTR); 825 826 /* Capture global flags */ 827 flags |= g_malloc_flags; 828 829 /* 830 * Handle large allocations directly. There should not be very many 831 * of these so performance is not a big issue. 832 * 833 * The backend allocator is pretty nasty on a SMP system. Use the 834 * slab allocator for one and two page-sized chunks even though we 835 * lose some efficiency. 836 */ 837 if (size >= ZoneLimit || 838 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 839 bigalloc_t big; 840 bigalloc_t *bigp; 841 842 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 843 chunk = _vmem_alloc(size, PAGE_SIZE, flags); 844 if (chunk == NULL) 845 return(NULL); 846 847 big = _slaballoc(sizeof(struct bigalloc), 0); 848 if (big == NULL) { 849 _vmem_free(chunk, size); 850 return(NULL); 851 } 852 bigp = bigalloc_lock(chunk); 853 big->base = chunk; 854 big->bytes = size; 855 big->next = *bigp; 856 *bigp = big; 857 bigalloc_unlock(chunk); 858 859 return(chunk); 860 } 861 862 /* Compute allocation zone; zoneindex will panic on excessive sizes */ 863 zi = zoneindex(&size, &chunking); 864 MASSERT(zi < NZONES); 865 866 obj = mtmagazine_alloc(zi); 867 if (obj != NULL) { 868 if (flags & SAFLAG_ZERO) 869 bzero(obj, size); 870 return (obj); 871 } 872 873 slgd = &SLGlobalData; 874 slgd_lock(slgd); 875 876 /* 877 * Attempt to allocate out of an existing zone. If all zones are 878 * exhausted pull one off the free list or allocate a new one. 879 */ 880 if ((z = slgd->ZoneAry[zi]) == NULL) { 881 z = zone_alloc(flags); 882 if (z == NULL) 883 goto fail; 884 885 /* 886 * How big is the base structure? 887 */ 888 #if defined(INVARIANTS) 889 /* 890 * Make room for z_Bitmap. An exact calculation is 891 * somewhat more complicated so don't make an exact 892 * calculation. 893 */ 894 off = offsetof(struct slzone, 895 z_Bitmap[(ZoneSize / size + 31) / 32]); 896 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 897 #else 898 off = sizeof(struct slzone); 899 #endif 900 901 /* 902 * Align the storage in the zone based on the chunking. 903 * 904 * Guarantee power-of-2 alignment for power-of-2-sized 905 * chunks. Otherwise align based on the chunking size 906 * (typically 8 or 16 bytes for small allocations). 907 * 908 * NOTE: Allocations >= ZoneLimit are governed by the 909 * bigalloc code and typically only guarantee page-alignment. 910 * 911 * Set initial conditions for UIndex near the zone header 912 * to reduce unecessary page faults, vs semi-randomization 913 * to improve L1 cache saturation. 914 */ 915 if ((size | (size - 1)) + 1 == (size << 1)) 916 off = (off + size - 1) & ~(size - 1); 917 else 918 off = (off + chunking - 1) & ~(chunking - 1); 919 z->z_Magic = ZALLOC_SLAB_MAGIC; 920 z->z_ZoneIndex = zi; 921 z->z_NMax = (ZoneSize - off) / size; 922 z->z_NFree = z->z_NMax; 923 z->z_BasePtr = (char *)z + off; 924 z->z_UIndex = z->z_UEndIndex = 0; 925 z->z_ChunkSize = size; 926 z->z_FirstFreePg = ZonePageCount; 927 z->z_Next = slgd->ZoneAry[zi]; 928 slgd->ZoneAry[zi] = z; 929 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 930 flags &= ~SAFLAG_ZERO; /* already zero'd */ 931 flags |= SAFLAG_PASSIVE; 932 } 933 934 /* 935 * Slide the base index for initial allocations out of the 936 * next zone we create so we do not over-weight the lower 937 * part of the cpu memory caches. 938 */ 939 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 940 & (ZALLOC_MAX_ZONE_SIZE - 1); 941 } 942 943 /* 944 * Ok, we have a zone from which at least one chunk is available. 945 * 946 * Remove us from the ZoneAry[] when we become empty 947 */ 948 MASSERT(z->z_NFree > 0); 949 950 if (--z->z_NFree == 0) { 951 slgd->ZoneAry[zi] = z->z_Next; 952 z->z_Next = NULL; 953 } 954 955 /* 956 * Locate a chunk in a free page. This attempts to localize 957 * reallocations into earlier pages without us having to sort 958 * the chunk list. A chunk may still overlap a page boundary. 959 */ 960 while (z->z_FirstFreePg < ZonePageCount) { 961 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 962 #ifdef DIAGNOSTIC 963 /* 964 * Diagnostic: c_Next is not total garbage. 965 */ 966 MASSERT(chunk->c_Next == NULL || 967 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 968 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 969 #endif 970 #ifdef INVARIANTS 971 chunk_mark_allocated(z, chunk); 972 #endif 973 MASSERT((uintptr_t)chunk & ZoneMask); 974 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 975 goto done; 976 } 977 ++z->z_FirstFreePg; 978 } 979 980 /* 981 * No chunks are available but NFree said we had some memory, 982 * so it must be available in the never-before-used-memory 983 * area governed by UIndex. The consequences are very 984 * serious if our zone got corrupted so we use an explicit 985 * panic rather then a KASSERT. 986 */ 987 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size); 988 989 if (++z->z_UIndex == z->z_NMax) 990 z->z_UIndex = 0; 991 if (z->z_UIndex == z->z_UEndIndex) { 992 if (z->z_NFree != 0) 993 _mpanic("slaballoc: corrupted zone"); 994 } 995 996 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 997 flags &= ~SAFLAG_ZERO; 998 flags |= SAFLAG_PASSIVE; 999 } 1000 #if defined(INVARIANTS) 1001 chunk_mark_allocated(z, chunk); 1002 #endif 1003 1004 done: 1005 slgd_unlock(slgd); 1006 if (flags & SAFLAG_ZERO) { 1007 bzero(chunk, size); 1008 #ifdef INVARIANTS 1009 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) { 1010 if (use_malloc_pattern) { 1011 for (i = 0; i < size; i += sizeof(int)) { 1012 *(int *)((char *)chunk + i) = -1; 1013 } 1014 } 1015 /* avoid accidental double-free check */ 1016 chunk->c_Next = (void *)-1; 1017 #endif 1018 } 1019 return(chunk); 1020 fail: 1021 slgd_unlock(slgd); 1022 return(NULL); 1023 } 1024 1025 /* 1026 * Reallocate memory within the chunk 1027 */ 1028 static void * 1029 _slabrealloc(void *ptr, size_t size) 1030 { 1031 bigalloc_t *bigp; 1032 void *nptr; 1033 slzone_t z; 1034 size_t chunking; 1035 1036 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) { 1037 return(_slaballoc(size, 0)); 1038 } 1039 1040 if (size == 0) { 1041 free(ptr); 1042 return(ZERO_LENGTH_PTR); 1043 } 1044 1045 /* 1046 * Handle oversized allocations. 1047 */ 1048 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1049 bigalloc_t big; 1050 size_t bigbytes; 1051 1052 while ((big = *bigp) != NULL) { 1053 if (big->base == ptr) { 1054 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1055 bigbytes = big->bytes; 1056 if (bigbytes == size) { 1057 bigalloc_unlock(ptr); 1058 return(ptr); 1059 } 1060 *bigp = big->next; 1061 bigalloc_unlock(ptr); 1062 if ((nptr = _slaballoc(size, 0)) == NULL) { 1063 /* Relink block */ 1064 bigp = bigalloc_lock(ptr); 1065 big->next = *bigp; 1066 *bigp = big; 1067 bigalloc_unlock(ptr); 1068 return(NULL); 1069 } 1070 if (size > bigbytes) 1071 size = bigbytes; 1072 bcopy(ptr, nptr, size); 1073 _slabfree(ptr, FASTSLABREALLOC, &big); 1074 return(nptr); 1075 } 1076 bigp = &big->next; 1077 } 1078 bigalloc_unlock(ptr); 1079 } 1080 1081 /* 1082 * Get the original allocation's zone. If the new request winds 1083 * up using the same chunk size we do not have to do anything. 1084 * 1085 * NOTE: We don't have to lock the globaldata here, the fields we 1086 * access here will not change at least as long as we have control 1087 * over the allocation. 1088 */ 1089 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1090 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1091 1092 /* 1093 * Use zoneindex() to chunk-align the new size, as long as the 1094 * new size is not too large. 1095 */ 1096 if (size < ZoneLimit) { 1097 zoneindex(&size, &chunking); 1098 if (z->z_ChunkSize == size) { 1099 return(ptr); 1100 } 1101 } 1102 1103 /* 1104 * Allocate memory for the new request size and copy as appropriate. 1105 */ 1106 if ((nptr = _slaballoc(size, 0)) != NULL) { 1107 if (size > z->z_ChunkSize) 1108 size = z->z_ChunkSize; 1109 bcopy(ptr, nptr, size); 1110 _slabfree(ptr, 0, NULL); 1111 } 1112 1113 return(nptr); 1114 } 1115 1116 /* 1117 * free (SLAB ALLOCATOR) 1118 * 1119 * Free a memory block previously allocated by malloc. Note that we do not 1120 * attempt to uplodate ks_loosememuse as MP races could prevent us from 1121 * checking memory limits in malloc. 1122 * 1123 * flags: 1124 * FASTSLABREALLOC Fast call from realloc, *rbigp already 1125 * unlinked. 1126 * 1127 * MPSAFE 1128 */ 1129 static void 1130 _slabfree(void *ptr, int flags, bigalloc_t *rbigp) 1131 { 1132 slzone_t z; 1133 slchunk_t chunk; 1134 bigalloc_t big; 1135 bigalloc_t *bigp; 1136 slglobaldata_t slgd; 1137 size_t size; 1138 int zi; 1139 int pgno; 1140 1141 /* Fast realloc path for big allocations */ 1142 if (flags & FASTSLABREALLOC) { 1143 big = *rbigp; 1144 goto fastslabrealloc; 1145 } 1146 1147 /* 1148 * Handle NULL frees and special 0-byte allocations 1149 */ 1150 if (ptr == NULL) 1151 return; 1152 if (ptr == ZERO_LENGTH_PTR) 1153 return; 1154 1155 /* 1156 * Handle oversized allocations. 1157 */ 1158 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1159 while ((big = *bigp) != NULL) { 1160 if (big->base == ptr) { 1161 *bigp = big->next; 1162 bigalloc_unlock(ptr); 1163 fastslabrealloc: 1164 size = big->bytes; 1165 _slabfree(big, 0, NULL); 1166 #ifdef INVARIANTS 1167 MASSERT(sizeof(weirdary) <= size); 1168 bcopy(weirdary, ptr, sizeof(weirdary)); 1169 #endif 1170 _vmem_free(ptr, size); 1171 return; 1172 } 1173 bigp = &big->next; 1174 } 1175 bigalloc_unlock(ptr); 1176 } 1177 1178 /* 1179 * Zone case. Figure out the zone based on the fact that it is 1180 * ZoneSize aligned. 1181 */ 1182 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1183 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1184 1185 size = z->z_ChunkSize; 1186 zi = z->z_ZoneIndex; 1187 1188 if (g_malloc_flags & SAFLAG_ZERO) 1189 bzero(ptr, size); 1190 1191 if (mtmagazine_free(zi, ptr) == 0) 1192 return; 1193 1194 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 1195 chunk = ptr; 1196 slgd = &SLGlobalData; 1197 slgd_lock(slgd); 1198 1199 #ifdef INVARIANTS 1200 /* 1201 * Attempt to detect a double-free. To reduce overhead we only check 1202 * if there appears to be link pointer at the base of the data. 1203 */ 1204 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 1205 slchunk_t scan; 1206 1207 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 1208 if (scan == chunk) 1209 _mpanic("Double free at %p", chunk); 1210 } 1211 } 1212 chunk_mark_free(z, chunk); 1213 #endif 1214 1215 /* 1216 * Put weird data into the memory to detect modifications after 1217 * freeing, illegal pointer use after freeing (we should fault on 1218 * the odd address), and so forth. 1219 */ 1220 #ifdef INVARIANTS 1221 if (z->z_ChunkSize < sizeof(weirdary)) 1222 bcopy(weirdary, chunk, z->z_ChunkSize); 1223 else 1224 bcopy(weirdary, chunk, sizeof(weirdary)); 1225 #endif 1226 1227 /* 1228 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1229 * z_FirstFreePg. 1230 */ 1231 chunk->c_Next = z->z_PageAry[pgno]; 1232 z->z_PageAry[pgno] = chunk; 1233 if (z->z_FirstFreePg > pgno) 1234 z->z_FirstFreePg = pgno; 1235 1236 /* 1237 * Bump the number of free chunks. If it becomes non-zero the zone 1238 * must be added back onto the appropriate list. 1239 */ 1240 if (z->z_NFree++ == 0) { 1241 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1242 slgd->ZoneAry[z->z_ZoneIndex] = z; 1243 } 1244 1245 /* 1246 * If the zone becomes totally free then release it. 1247 */ 1248 if (z->z_NFree == z->z_NMax) { 1249 slzone_t *pz; 1250 1251 pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1252 while (z != *pz) 1253 pz = &(*pz)->z_Next; 1254 *pz = z->z_Next; 1255 z->z_Magic = -1; 1256 z->z_Next = NULL; 1257 zone_free(z); 1258 /* slgd lock released */ 1259 return; 1260 } 1261 slgd_unlock(slgd); 1262 } 1263 1264 #if defined(INVARIANTS) 1265 /* 1266 * Helper routines for sanity checks 1267 */ 1268 static 1269 void 1270 chunk_mark_allocated(slzone_t z, void *chunk) 1271 { 1272 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1273 __uint32_t *bitptr; 1274 1275 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1276 bitptr = &z->z_Bitmap[bitdex >> 5]; 1277 bitdex &= 31; 1278 MASSERT((*bitptr & (1 << bitdex)) == 0); 1279 *bitptr |= 1 << bitdex; 1280 } 1281 1282 static 1283 void 1284 chunk_mark_free(slzone_t z, void *chunk) 1285 { 1286 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1287 __uint32_t *bitptr; 1288 1289 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1290 bitptr = &z->z_Bitmap[bitdex >> 5]; 1291 bitdex &= 31; 1292 MASSERT((*bitptr & (1 << bitdex)) != 0); 1293 *bitptr &= ~(1 << bitdex); 1294 } 1295 1296 #endif 1297 1298 /* 1299 * Allocate and return a magazine. NULL is returned and *burst is adjusted 1300 * if the magazine is empty. 1301 */ 1302 static __inline void * 1303 magazine_alloc(struct magazine *mp, int *burst) 1304 { 1305 void *obj; 1306 1307 if (mp == NULL) 1308 return(NULL); 1309 if (MAGAZINE_NOTEMPTY(mp)) { 1310 obj = mp->objects[--mp->rounds]; 1311 return(obj); 1312 } 1313 1314 /* 1315 * Return burst factor to caller along with NULL 1316 */ 1317 if ((mp->flags & M_BURST) && (burst != NULL)) { 1318 *burst = mp->burst_factor; 1319 } 1320 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */ 1321 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) && 1322 (burst != NULL)) { 1323 mp->burst_factor -= M_BURST_NSCALE; 1324 if (mp->burst_factor <= 1) { 1325 mp->burst_factor = 1; 1326 mp->flags &= ~(M_BURST); 1327 mp->flags &= ~(M_BURST_EARLY); 1328 } 1329 } 1330 return (NULL); 1331 } 1332 1333 static __inline int 1334 magazine_free(struct magazine *mp, void *p) 1335 { 1336 if (mp != NULL && MAGAZINE_NOTFULL(mp)) { 1337 mp->objects[mp->rounds++] = p; 1338 return 0; 1339 } 1340 1341 return -1; 1342 } 1343 1344 static void * 1345 mtmagazine_alloc(int zi) 1346 { 1347 thr_mags *tp; 1348 struct magazine *mp, *emptymag; 1349 magazine_depot *d; 1350 void *obj; 1351 1352 /* 1353 * Do not try to access per-thread magazines while the mtmagazine 1354 * is being initialized or destroyed. 1355 */ 1356 tp = &thread_mags; 1357 if (tp->init < 0) 1358 return(NULL); 1359 1360 /* 1361 * Primary per-thread allocation loop 1362 */ 1363 for (;;) { 1364 /* 1365 * If the loaded magazine has rounds, allocate and return 1366 */ 1367 mp = tp->mags[zi].loaded; 1368 obj = magazine_alloc(mp, NULL); 1369 if (obj) 1370 break; 1371 1372 /* 1373 * If the prev magazine is full, swap with the loaded 1374 * magazine and retry. 1375 */ 1376 mp = tp->mags[zi].prev; 1377 if (mp && MAGAZINE_FULL(mp)) { 1378 MASSERT(mp->rounds != 0); 1379 swap_mags(&tp->mags[zi]); /* prev now empty */ 1380 continue; 1381 } 1382 1383 /* 1384 * Try to get a full magazine from the depot. Cycle 1385 * through depot(full)->loaded->prev->depot(empty). 1386 * Retry if a full magazine was available from the depot. 1387 * 1388 * Return NULL (caller will fall through) if no magazines 1389 * can be found anywhere. 1390 */ 1391 d = &depots[zi]; 1392 depot_lock(d); 1393 emptymag = tp->mags[zi].prev; 1394 if (emptymag) 1395 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine); 1396 tp->mags[zi].prev = tp->mags[zi].loaded; 1397 mp = SLIST_FIRST(&d->full); /* loaded magazine */ 1398 tp->mags[zi].loaded = mp; 1399 if (mp) { 1400 SLIST_REMOVE_HEAD(&d->full, nextmagazine); 1401 MASSERT(MAGAZINE_NOTEMPTY(mp)); 1402 depot_unlock(d); 1403 continue; 1404 } 1405 depot_unlock(d); 1406 break; 1407 } 1408 1409 return (obj); 1410 } 1411 1412 static int 1413 mtmagazine_free(int zi, void *ptr) 1414 { 1415 thr_mags *tp; 1416 struct magazine *mp, *loadedmag; 1417 magazine_depot *d; 1418 int rc = -1; 1419 1420 /* 1421 * Do not try to access per-thread magazines while the mtmagazine 1422 * is being initialized or destroyed. 1423 */ 1424 tp = &thread_mags; 1425 if (tp->init < 0) 1426 return(-1); 1427 1428 /* 1429 * Primary per-thread freeing loop 1430 */ 1431 for (;;) { 1432 /* 1433 * Make sure a new magazine is available in case we have 1434 * to use it. Staging the newmag allows us to avoid 1435 * some locking/reentrancy complexity. 1436 * 1437 * Temporarily disable the per-thread caches for this 1438 * allocation to avoid reentrancy and/or to avoid a 1439 * stack overflow if the [zi] happens to be the same that 1440 * would be used to allocate the new magazine. 1441 */ 1442 if (tp->newmag == NULL) { 1443 tp->init = -1; 1444 tp->newmag = _slaballoc(sizeof(struct magazine), 1445 SAFLAG_ZERO); 1446 tp->init = 1; 1447 if (tp->newmag == NULL) { 1448 rc = -1; 1449 break; 1450 } 1451 } 1452 1453 /* 1454 * If the loaded magazine has space, free directly to it 1455 */ 1456 rc = magazine_free(tp->mags[zi].loaded, ptr); 1457 if (rc == 0) 1458 break; 1459 1460 /* 1461 * If the prev magazine is empty, swap with the loaded 1462 * magazine and retry. 1463 */ 1464 mp = tp->mags[zi].prev; 1465 if (mp && MAGAZINE_EMPTY(mp)) { 1466 MASSERT(mp->rounds == 0); 1467 swap_mags(&tp->mags[zi]); /* prev now full */ 1468 continue; 1469 } 1470 1471 /* 1472 * Try to get an empty magazine from the depot. Cycle 1473 * through depot(empty)->loaded->prev->depot(full). 1474 * Retry if an empty magazine was available from the depot. 1475 */ 1476 d = &depots[zi]; 1477 depot_lock(d); 1478 1479 if ((loadedmag = tp->mags[zi].prev) != NULL) 1480 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine); 1481 tp->mags[zi].prev = tp->mags[zi].loaded; 1482 mp = SLIST_FIRST(&d->empty); 1483 if (mp) { 1484 tp->mags[zi].loaded = mp; 1485 SLIST_REMOVE_HEAD(&d->empty, nextmagazine); 1486 MASSERT(MAGAZINE_NOTFULL(mp)); 1487 } else { 1488 mp = tp->newmag; 1489 tp->newmag = NULL; 1490 mp->capacity = M_MAX_ROUNDS; 1491 mp->rounds = 0; 1492 mp->flags = 0; 1493 tp->mags[zi].loaded = mp; 1494 } 1495 depot_unlock(d); 1496 } 1497 1498 return rc; 1499 } 1500 1501 static void 1502 mtmagazine_init(void) 1503 { 1504 int error; 1505 1506 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor); 1507 if (error) 1508 abort(); 1509 } 1510 1511 /* 1512 * This function is only used by the thread exit destructor 1513 */ 1514 static void 1515 mtmagazine_drain(struct magazine *mp) 1516 { 1517 void *obj; 1518 1519 while (MAGAZINE_NOTEMPTY(mp)) { 1520 obj = magazine_alloc(mp, NULL); 1521 _slabfree(obj, 0, NULL); 1522 } 1523 } 1524 1525 /* 1526 * mtmagazine_destructor() 1527 * 1528 * When a thread exits, we reclaim all its resources; all its magazines are 1529 * drained and the structures are freed. 1530 * 1531 * WARNING! The destructor can be called multiple times if the larger user 1532 * program has its own destructors which run after ours which 1533 * allocate or free memory. 1534 */ 1535 static void 1536 mtmagazine_destructor(void *thrp) 1537 { 1538 thr_mags *tp = thrp; 1539 struct magazine *mp; 1540 int i; 1541 1542 /* 1543 * Prevent further use of mtmagazines while we are destructing 1544 * them, as well as for any destructors which are run after us 1545 * prior to the thread actually being destroyed. 1546 */ 1547 tp->init = -1; 1548 1549 for (i = 0; i < NZONES; i++) { 1550 mp = tp->mags[i].loaded; 1551 tp->mags[i].loaded = NULL; 1552 if (mp) { 1553 if (MAGAZINE_NOTEMPTY(mp)) 1554 mtmagazine_drain(mp); 1555 _slabfree(mp, 0, NULL); 1556 } 1557 1558 mp = tp->mags[i].prev; 1559 tp->mags[i].prev = NULL; 1560 if (mp) { 1561 if (MAGAZINE_NOTEMPTY(mp)) 1562 mtmagazine_drain(mp); 1563 _slabfree(mp, 0, NULL); 1564 } 1565 } 1566 1567 if (tp->newmag) { 1568 mp = tp->newmag; 1569 tp->newmag = NULL; 1570 _slabfree(mp, 0, NULL); 1571 } 1572 } 1573 1574 /* 1575 * zone_alloc() 1576 * 1577 * Attempt to allocate a zone from the zone magazine; the zone magazine has 1578 * M_BURST_EARLY enabled, so honor the burst request from the magazine. 1579 */ 1580 static slzone_t 1581 zone_alloc(int flags) 1582 { 1583 slglobaldata_t slgd = &SLGlobalData; 1584 int burst = 1; 1585 int i, j; 1586 slzone_t z; 1587 1588 zone_magazine_lock(); 1589 slgd_unlock(slgd); 1590 1591 z = magazine_alloc(&zone_magazine, &burst); 1592 if (z == NULL && burst == 1) { 1593 zone_magazine_unlock(); 1594 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1595 } else if (z == NULL) { 1596 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1597 if (z) { 1598 for (i = 1; i < burst; i++) { 1599 j = magazine_free(&zone_magazine, 1600 (char *) z + (ZoneSize * i)); 1601 MASSERT(j == 0); 1602 } 1603 } 1604 zone_magazine_unlock(); 1605 } else { 1606 z->z_Flags |= SLZF_UNOTZEROD; 1607 zone_magazine_unlock(); 1608 } 1609 slgd_lock(slgd); 1610 return z; 1611 } 1612 1613 /* 1614 * zone_free() 1615 * 1616 * Release a zone and unlock the slgd lock. 1617 */ 1618 static void 1619 zone_free(void *z) 1620 { 1621 slglobaldata_t slgd = &SLGlobalData; 1622 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {}; 1623 int i, j; 1624 1625 zone_magazine_lock(); 1626 slgd_unlock(slgd); 1627 1628 bzero(z, sizeof(struct slzone)); 1629 1630 if (opt_madvise) 1631 madvise(z, ZoneSize, MADV_FREE); 1632 1633 i = magazine_free(&zone_magazine, z); 1634 1635 /* 1636 * If we failed to free, collect excess magazines; release the zone 1637 * magazine lock, and then free to the system via _vmem_free. Re-enable 1638 * BURST mode for the magazine. 1639 */ 1640 if (i == -1) { 1641 j = zone_magazine.rounds - zone_magazine.low_factor; 1642 for (i = 0; i < j; i++) { 1643 excess[i] = magazine_alloc(&zone_magazine, NULL); 1644 MASSERT(excess[i] != NULL); 1645 } 1646 1647 zone_magazine_unlock(); 1648 1649 for (i = 0; i < j; i++) 1650 _vmem_free(excess[i], ZoneSize); 1651 1652 _vmem_free(z, ZoneSize); 1653 } else { 1654 zone_magazine_unlock(); 1655 } 1656 } 1657 1658 /* 1659 * _vmem_alloc() 1660 * 1661 * Directly map memory in PAGE_SIZE'd chunks with the specified 1662 * alignment. 1663 * 1664 * Alignment must be a multiple of PAGE_SIZE. 1665 * 1666 * Size must be >= alignment. 1667 */ 1668 static void * 1669 _vmem_alloc(size_t size, size_t align, int flags) 1670 { 1671 char *addr; 1672 char *save; 1673 size_t excess; 1674 1675 /* 1676 * Map anonymous private memory. 1677 */ 1678 addr = mmap(NULL, size, PROT_READ|PROT_WRITE, 1679 MAP_PRIVATE|MAP_ANON, -1, 0); 1680 if (addr == MAP_FAILED) 1681 return(NULL); 1682 1683 /* 1684 * Check alignment. The misaligned offset is also the excess 1685 * amount. If misaligned unmap the excess so we have a chance of 1686 * mapping at the next alignment point and recursively try again. 1687 * 1688 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment 1689 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation 1690 * xxxxxxxxx final excess calculation 1691 * ^ returned address 1692 */ 1693 excess = (uintptr_t)addr & (align - 1); 1694 1695 if (excess) { 1696 excess = align - excess; 1697 save = addr; 1698 1699 munmap(save + excess, size - excess); 1700 addr = _vmem_alloc(size, align, flags); 1701 munmap(save, excess); 1702 } 1703 return((void *)addr); 1704 } 1705 1706 /* 1707 * _vmem_free() 1708 * 1709 * Free a chunk of memory allocated with _vmem_alloc() 1710 */ 1711 static void 1712 _vmem_free(void *ptr, size_t size) 1713 { 1714 munmap(ptr, size); 1715 } 1716 1717 /* 1718 * Panic on fatal conditions 1719 */ 1720 static void 1721 _mpanic(const char *ctl, ...) 1722 { 1723 va_list va; 1724 1725 if (malloc_panic == 0) { 1726 malloc_panic = 1; 1727 va_start(va, ctl); 1728 vfprintf(stderr, ctl, va); 1729 fprintf(stderr, "\n"); 1730 fflush(stderr); 1731 va_end(va); 1732 } 1733 abort(); 1734 } 1735