1 /* 2 * NMALLOC.C - New Malloc (ported from kernel slab allocator) 3 * 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and by 8 * Venkatesh Srinivas <me@endeavour.zapto.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $ 38 */ 39 /* 40 * This module implements a slab allocator drop-in replacement for the 41 * libc malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantaneous, and overhead losses are limited to a fixed 46 * worst-case amount. 47 * 48 * The slab allocator does not have to pre-initialize the list of 49 * free chunks for each zone, and the underlying VM will not be 50 * touched at all beyond the zone header until an actual allocation 51 * needs it. 52 * 53 * Slab management and locking is done on a per-zone basis. 54 * 55 * Alloc Size Chunking Number of zones 56 * 0-127 8 16 57 * 128-255 16 8 58 * 256-511 32 8 59 * 512-1023 64 8 60 * 1024-2047 128 8 61 * 2048-4095 256 8 62 * 4096-8191 512 8 63 * 8192-16383 1024 8 64 * 16384-32767 2048 8 65 * 66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table 67 * is used to locate for free. One and Two-page allocations use the 68 * zone mechanic to avoid excessive mmap()/munmap() calls. 69 * 70 * API FEATURES AND SIDE EFFECTS 71 * 72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned. 73 * Above that power-of-2 sized allocations are page-aligned. Non 74 * power-of-2 sized allocations are aligned the same as the chunk 75 * size for their zone. 76 * + malloc(0) returns a special non-NULL value 77 * + ability to allocate arbitrarily large chunks of memory 78 * + realloc will reuse the passed pointer if possible, within the 79 * limitations of the zone chunking. 80 * 81 * Multithreaded enhancements for small allocations introduced August 2010. 82 * These are in the spirit of 'libumem'. See: 83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the 84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001 85 * USENIX Technical Conference. USENIX Association. 86 * 87 * Oversized allocations employ the BIGCACHE mechanic whereby large 88 * allocations may be handed significantly larger buffers, allowing them 89 * to avoid mmap/munmap operations even through significant realloc()s. 90 * The excess space is only trimmed if too many large allocations have been 91 * given this treatment. 92 * 93 * TUNING 94 * 95 * The value of the environment variable MALLOC_OPTIONS is a character string 96 * containing various flags to tune nmalloc. 97 * 98 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1) 99 * This will generate utrace events for all malloc, 100 * realloc, and free calls. There are tools (mtrplay) to 101 * replay and allocation pattern or to graph heap structure 102 * (mtrgraph) which can interpret these logs. 103 * 'Z' / ['z'] Zero out / do not zero all allocations. 104 * Each new byte of memory allocated by malloc, realloc, or 105 * reallocf will be initialized to 0. This is intended for 106 * debugging and will affect performance negatively. 107 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the 108 * allocation functions. 109 */ 110 111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */ 112 113 #include "libc_private.h" 114 115 #include <sys/param.h> 116 #include <sys/types.h> 117 #include <sys/mman.h> 118 #include <sys/queue.h> 119 #include <sys/uio.h> 120 #include <sys/ktrace.h> 121 #include <stdio.h> 122 #include <stdint.h> 123 #include <stdlib.h> 124 #include <stdarg.h> 125 #include <stddef.h> 126 #include <unistd.h> 127 #include <string.h> 128 #include <fcntl.h> 129 #include <errno.h> 130 #include <pthread.h> 131 #include <machine/atomic.h> 132 133 #include "spinlock.h" 134 #include "un-namespace.h" 135 136 137 /* 138 * Linked list of large allocations 139 */ 140 typedef struct bigalloc { 141 struct bigalloc *next; /* hash link */ 142 void *base; /* base pointer */ 143 u_long active; /* bytes active */ 144 u_long bytes; /* bytes allocated */ 145 } *bigalloc_t; 146 147 /* 148 * Note that any allocations which are exact multiples of PAGE_SIZE, or 149 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem. 150 */ 151 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */ 152 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */ 153 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */ 154 #define ZALLOC_ZONE_SIZE (64 * 1024) 155 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */ 156 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */ 157 158 #if ZALLOC_ZONE_LIMIT == 16384 159 #define NZONES 72 160 #elif ZALLOC_ZONE_LIMIT == 32768 161 #define NZONES 80 162 #else 163 #error "I couldn't figure out NZONES" 164 #endif 165 166 /* 167 * Chunk structure for free elements 168 */ 169 typedef struct slchunk { 170 struct slchunk *c_Next; 171 } *slchunk_t; 172 173 /* 174 * The IN-BAND zone header is placed at the beginning of each zone. 175 */ 176 struct slglobaldata; 177 178 typedef struct slzone { 179 int32_t z_Magic; /* magic number for sanity check */ 180 int z_NFree; /* total free chunks / ualloc space */ 181 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */ 182 int z_NMax; /* maximum free chunks */ 183 char *z_BasePtr; /* pointer to start of chunk array */ 184 int z_UIndex; /* current initial allocation index */ 185 int z_UEndIndex; /* last (first) allocation index */ 186 int z_ChunkSize; /* chunk size for validation */ 187 int z_FirstFreePg; /* chunk list on a page-by-page basis */ 188 int z_ZoneIndex; 189 int z_Flags; 190 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE]; 191 } *slzone_t; 192 193 typedef struct slglobaldata { 194 spinlock_t Spinlock; 195 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */ 196 int JunkIndex; 197 } *slglobaldata_t; 198 199 #define SLZF_UNOTZEROD 0x0001 200 201 #define FASTSLABREALLOC 0x02 202 203 /* 204 * Misc constants. Note that allocations that are exact multiples of 205 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 206 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 207 */ 208 #define MIN_CHUNK_SIZE 8 /* in bytes */ 209 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 210 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 211 212 /* 213 * WARNING: A limited number of spinlocks are available, BIGXSIZE should 214 * not be larger then 64. 215 */ 216 #define BIGHSHIFT 10 /* bigalloc hash table */ 217 #define BIGHSIZE (1 << BIGHSHIFT) 218 #define BIGHMASK (BIGHSIZE - 1) 219 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */ 220 #define BIGXMASK (BIGXSIZE - 1) 221 222 /* 223 * BIGCACHE caches oversized allocations. Note that a linear search is 224 * performed, so do not make the cache too large. 225 * 226 * BIGCACHE will garbage-collect excess space when the excess exceeds the 227 * specified value. A relatively large number should be used here because 228 * garbage collection is expensive. 229 */ 230 #define BIGCACHE 16 231 #define BIGCACHE_MASK (BIGCACHE - 1) 232 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */ 233 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */ 234 235 #define SAFLAG_ZERO 0x0001 236 #define SAFLAG_PASSIVE 0x0002 237 238 /* 239 * Thread control 240 */ 241 242 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 243 244 #define MASSERT(exp) do { if (__predict_false(!(exp))) \ 245 _mpanic("assertion: %s in %s", \ 246 #exp, __func__); \ 247 } while (0) 248 249 /* 250 * Magazines 251 */ 252 253 #define M_MAX_ROUNDS 64 254 #define M_ZONE_ROUNDS 64 255 #define M_LOW_ROUNDS 32 256 #define M_INIT_ROUNDS 8 257 #define M_BURST_FACTOR 8 258 #define M_BURST_NSCALE 2 259 260 #define M_BURST 0x0001 261 #define M_BURST_EARLY 0x0002 262 263 struct magazine { 264 SLIST_ENTRY(magazine) nextmagazine; 265 266 int flags; 267 int capacity; /* Max rounds in this magazine */ 268 int rounds; /* Current number of free rounds */ 269 int burst_factor; /* Number of blocks to prefill with */ 270 int low_factor; /* Free till low_factor from full mag */ 271 void *objects[M_MAX_ROUNDS]; 272 }; 273 274 SLIST_HEAD(magazinelist, magazine); 275 276 static spinlock_t zone_mag_lock; 277 static spinlock_t depot_spinlock; 278 static struct magazine zone_magazine = { 279 .flags = M_BURST | M_BURST_EARLY, 280 .capacity = M_ZONE_ROUNDS, 281 .rounds = 0, 282 .burst_factor = M_BURST_FACTOR, 283 .low_factor = M_LOW_ROUNDS 284 }; 285 286 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity) 287 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity) 288 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0) 289 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0) 290 291 /* 292 * Each thread will have a pair of magazines per size-class (NZONES) 293 * The loaded magazine will support immediate allocations, the previous 294 * magazine will either be full or empty and can be swapped at need 295 */ 296 typedef struct magazine_pair { 297 struct magazine *loaded; 298 struct magazine *prev; 299 } magazine_pair; 300 301 /* A depot is a collection of magazines for a single zone. */ 302 typedef struct magazine_depot { 303 struct magazinelist full; 304 struct magazinelist empty; 305 spinlock_t lock; 306 } magazine_depot; 307 308 typedef struct thr_mags { 309 magazine_pair mags[NZONES]; 310 struct magazine *newmag; 311 int init; 312 } thr_mags; 313 314 /* 315 * With this attribute set, do not require a function call for accessing 316 * this variable when the code is compiled -fPIC. 317 * 318 * Must be empty for libc_rtld (similar to __thread). 319 */ 320 #ifdef __LIBC_RTLD 321 #define TLS_ATTRIBUTE 322 #else 323 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec"))) 324 #endif 325 326 static __thread thr_mags thread_mags TLS_ATTRIBUTE; 327 static pthread_key_t thread_mags_key; 328 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT; 329 static magazine_depot depots[NZONES]; 330 331 /* 332 * Fixed globals (not per-cpu) 333 */ 334 static const int ZoneSize = ZALLOC_ZONE_SIZE; 335 static const int ZoneLimit = ZALLOC_ZONE_LIMIT; 336 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE; 337 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1; 338 339 static int opt_madvise = 0; 340 static int opt_utrace = 0; 341 static int g_malloc_flags = 0; 342 static struct slglobaldata SLGlobalData; 343 static bigalloc_t bigalloc_array[BIGHSIZE]; 344 static spinlock_t bigspin_array[BIGXSIZE]; 345 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */ 346 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */ 347 static volatile int bigcache_index; /* SMP races ok */ 348 static int malloc_panic; 349 static size_t excess_alloc; /* excess big allocs */ 350 351 static void *_slaballoc(size_t size, int flags); 352 static void *_slabrealloc(void *ptr, size_t size); 353 static void _slabfree(void *ptr, int, bigalloc_t *); 354 static void *_vmem_alloc(size_t bytes, size_t align, int flags); 355 static void _vmem_free(void *ptr, size_t bytes); 356 static void *magazine_alloc(struct magazine *, int *); 357 static int magazine_free(struct magazine *, void *); 358 static void *mtmagazine_alloc(int zi); 359 static int mtmagazine_free(int zi, void *); 360 static void mtmagazine_init(void); 361 static void mtmagazine_destructor(void *); 362 static slzone_t zone_alloc(int flags); 363 static void zone_free(void *z); 364 static void _mpanic(const char *ctl, ...) __printflike(1, 2); 365 static void malloc_init(void) __constructor(101); 366 367 struct nmalloc_utrace { 368 void *p; 369 size_t s; 370 void *r; 371 }; 372 373 #define UTRACE(a, b, c) \ 374 if (opt_utrace) { \ 375 struct nmalloc_utrace ut = { \ 376 .p = (a), \ 377 .s = (b), \ 378 .r = (c) \ 379 }; \ 380 utrace(&ut, sizeof(ut)); \ 381 } 382 383 static void 384 malloc_init(void) 385 { 386 const char *p = NULL; 387 388 if (issetugid() == 0) 389 p = getenv("MALLOC_OPTIONS"); 390 391 for (; p != NULL && *p != '\0'; p++) { 392 switch(*p) { 393 case 'u': opt_utrace = 0; break; 394 case 'U': opt_utrace = 1; break; 395 case 'h': opt_madvise = 0; break; 396 case 'H': opt_madvise = 1; break; 397 case 'z': g_malloc_flags = 0; break; 398 case 'Z': g_malloc_flags = SAFLAG_ZERO; break; 399 default: 400 break; 401 } 402 } 403 404 UTRACE((void *) -1, 0, NULL); 405 } 406 407 /* 408 * We have to install a handler for nmalloc thread teardowns when 409 * the thread is created. We cannot delay this because destructors in 410 * sophisticated userland programs can call malloc() for the first time 411 * during their thread exit. 412 * 413 * This routine is called directly from pthreads. 414 */ 415 void 416 _nmalloc_thr_init(void) 417 { 418 static int init_once; 419 thr_mags *tp; 420 421 /* 422 * Disallow mtmagazine operations until the mtmagazine is 423 * initialized. 424 */ 425 tp = &thread_mags; 426 tp->init = -1; 427 428 if (init_once == 0) { 429 init_once = 1; 430 pthread_once(&thread_mags_once, mtmagazine_init); 431 } 432 pthread_setspecific(thread_mags_key, tp); 433 tp->init = 1; 434 } 435 436 void 437 _nmalloc_thr_prepfork(void) 438 { 439 if (__isthreaded) { 440 _SPINLOCK(&zone_mag_lock); 441 _SPINLOCK(&depot_spinlock); 442 } 443 } 444 445 void 446 _nmalloc_thr_parentfork(void) 447 { 448 if (__isthreaded) { 449 _SPINUNLOCK(&depot_spinlock); 450 _SPINUNLOCK(&zone_mag_lock); 451 } 452 } 453 454 void 455 _nmalloc_thr_childfork(void) 456 { 457 if (__isthreaded) { 458 _SPINUNLOCK(&depot_spinlock); 459 _SPINUNLOCK(&zone_mag_lock); 460 } 461 } 462 463 /* 464 * Thread locks. 465 */ 466 static __inline void 467 slgd_lock(slglobaldata_t slgd) 468 { 469 if (__isthreaded) 470 _SPINLOCK(&slgd->Spinlock); 471 } 472 473 static __inline void 474 slgd_unlock(slglobaldata_t slgd) 475 { 476 if (__isthreaded) 477 _SPINUNLOCK(&slgd->Spinlock); 478 } 479 480 static __inline void 481 depot_lock(magazine_depot *dp) 482 { 483 if (__isthreaded) 484 _SPINLOCK(&depot_spinlock); 485 #if 0 486 if (__isthreaded) 487 _SPINLOCK(&dp->lock); 488 #endif 489 } 490 491 static __inline void 492 depot_unlock(magazine_depot *dp) 493 { 494 if (__isthreaded) 495 _SPINUNLOCK(&depot_spinlock); 496 #if 0 497 if (__isthreaded) 498 _SPINUNLOCK(&dp->lock); 499 #endif 500 } 501 502 static __inline void 503 zone_magazine_lock(void) 504 { 505 if (__isthreaded) 506 _SPINLOCK(&zone_mag_lock); 507 } 508 509 static __inline void 510 zone_magazine_unlock(void) 511 { 512 if (__isthreaded) 513 _SPINUNLOCK(&zone_mag_lock); 514 } 515 516 static __inline void 517 swap_mags(magazine_pair *mp) 518 { 519 struct magazine *tmp; 520 tmp = mp->loaded; 521 mp->loaded = mp->prev; 522 mp->prev = tmp; 523 } 524 525 /* 526 * bigalloc hashing and locking support. 527 * 528 * Return an unmasked hash code for the passed pointer. 529 */ 530 static __inline int 531 _bigalloc_hash(void *ptr) 532 { 533 int hv; 534 535 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^ 536 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT)); 537 538 return(hv); 539 } 540 541 /* 542 * Lock the hash chain and return a pointer to its base for the specified 543 * address. 544 */ 545 static __inline bigalloc_t * 546 bigalloc_lock(void *ptr) 547 { 548 int hv = _bigalloc_hash(ptr); 549 bigalloc_t *bigp; 550 551 bigp = &bigalloc_array[hv & BIGHMASK]; 552 if (__isthreaded) 553 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 554 return(bigp); 555 } 556 557 /* 558 * Lock the hash chain and return a pointer to its base for the specified 559 * address. 560 * 561 * BUT, if the hash chain is empty, just return NULL and do not bother 562 * to lock anything. 563 */ 564 static __inline bigalloc_t * 565 bigalloc_check_and_lock(void *ptr) 566 { 567 int hv = _bigalloc_hash(ptr); 568 bigalloc_t *bigp; 569 570 bigp = &bigalloc_array[hv & BIGHMASK]; 571 if (*bigp == NULL) 572 return(NULL); 573 if (__isthreaded) { 574 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 575 } 576 return(bigp); 577 } 578 579 static __inline void 580 bigalloc_unlock(void *ptr) 581 { 582 int hv; 583 584 if (__isthreaded) { 585 hv = _bigalloc_hash(ptr); 586 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]); 587 } 588 } 589 590 /* 591 * Find a bigcache entry that might work for the allocation. SMP races are 592 * ok here except for the swap (that is, it is ok if bigcache_size_array[i] 593 * is wrong or if a NULL or too-small big is returned). 594 * 595 * Generally speaking it is ok to find a large entry even if the bytes 596 * requested are relatively small (but still oversized), because we really 597 * don't know *what* the application is going to do with the buffer. 598 */ 599 static __inline 600 bigalloc_t 601 bigcache_find_alloc(size_t bytes) 602 { 603 bigalloc_t big = NULL; 604 size_t test; 605 int i; 606 607 for (i = 0; i < BIGCACHE; ++i) { 608 test = bigcache_size_array[i]; 609 if (bytes <= test) { 610 bigcache_size_array[i] = 0; 611 big = atomic_swap_ptr(&bigcache_array[i], NULL); 612 break; 613 } 614 } 615 return big; 616 } 617 618 /* 619 * Free a bigcache entry, possibly returning one that the caller really must 620 * free. This is used to cache recent oversized memory blocks. Only 621 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try 622 * to collect the biggest ones we can that are under the limit. 623 */ 624 static __inline 625 bigalloc_t 626 bigcache_find_free(bigalloc_t big) 627 { 628 int i; 629 int j; 630 int b; 631 632 b = ++bigcache_index; 633 for (i = 0; i < BIGCACHE; ++i) { 634 j = (b + i) & BIGCACHE_MASK; 635 if (bigcache_size_array[j] < big->bytes) { 636 bigcache_size_array[j] = big->bytes; 637 big = atomic_swap_ptr(&bigcache_array[j], big); 638 break; 639 } 640 } 641 return big; 642 } 643 644 static __inline 645 void 646 handle_excess_big(void) 647 { 648 int i; 649 bigalloc_t big; 650 bigalloc_t *bigp; 651 652 if (excess_alloc <= BIGCACHE_EXCESS) 653 return; 654 655 for (i = 0; i < BIGHSIZE; ++i) { 656 bigp = &bigalloc_array[i]; 657 if (*bigp == NULL) 658 continue; 659 if (__isthreaded) 660 _SPINLOCK(&bigspin_array[i & BIGXMASK]); 661 for (big = *bigp; big; big = big->next) { 662 if (big->active < big->bytes) { 663 MASSERT((big->active & PAGE_MASK) == 0); 664 MASSERT((big->bytes & PAGE_MASK) == 0); 665 munmap((char *)big->base + big->active, 666 big->bytes - big->active); 667 atomic_add_long(&excess_alloc, 668 big->active - big->bytes); 669 big->bytes = big->active; 670 } 671 } 672 if (__isthreaded) 673 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]); 674 } 675 } 676 677 /* 678 * Calculate the zone index for the allocation request size and set the 679 * allocation request size to that particular zone's chunk size. 680 */ 681 static __inline int 682 zoneindex(size_t *bytes, size_t *chunking) 683 { 684 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */ 685 686 /* 687 * This used to be 8-byte chunks and 16 zones for n < 128. 688 * However some instructions may require 16-byte alignment 689 * (aka SIMD) and programs might not request an aligned size 690 * (aka GCC-7), so change this as follows: 691 * 692 * 0-15 bytes 8-byte alignment in two zones (0-1) 693 * 16-127 bytes 16-byte alignment in four zones (3-10) 694 * zone index 2 and 11-15 are currently unused. 695 */ 696 if (n < 16) { 697 *bytes = n = (n + 7) & ~7; 698 *chunking = 8; 699 return(n / 8 - 1); /* 8 byte chunks, 2 zones */ 700 /* zones 0,1, zone 2 is unused */ 701 } 702 if (n < 128) { 703 *bytes = n = (n + 15) & ~15; 704 *chunking = 16; 705 return(n / 16 + 2); /* 16 byte chunks, 8 zones */ 706 /* zones 3-10, zones 11-15 unused */ 707 } 708 if (n < 256) { 709 *bytes = n = (n + 15) & ~15; 710 *chunking = 16; 711 return(n / 16 + 7); 712 } 713 if (n < 8192) { 714 if (n < 512) { 715 *bytes = n = (n + 31) & ~31; 716 *chunking = 32; 717 return(n / 32 + 15); 718 } 719 if (n < 1024) { 720 *bytes = n = (n + 63) & ~63; 721 *chunking = 64; 722 return(n / 64 + 23); 723 } 724 if (n < 2048) { 725 *bytes = n = (n + 127) & ~127; 726 *chunking = 128; 727 return(n / 128 + 31); 728 } 729 if (n < 4096) { 730 *bytes = n = (n + 255) & ~255; 731 *chunking = 256; 732 return(n / 256 + 39); 733 } 734 *bytes = n = (n + 511) & ~511; 735 *chunking = 512; 736 return(n / 512 + 47); 737 } 738 #if ZALLOC_ZONE_LIMIT > 8192 739 if (n < 16384) { 740 *bytes = n = (n + 1023) & ~1023; 741 *chunking = 1024; 742 return(n / 1024 + 55); 743 } 744 #endif 745 #if ZALLOC_ZONE_LIMIT > 16384 746 if (n < 32768) { 747 *bytes = n = (n + 2047) & ~2047; 748 *chunking = 2048; 749 return(n / 2048 + 63); 750 } 751 #endif 752 _mpanic("Unexpected byte count %zu", n); 753 return(0); 754 } 755 756 /* 757 * malloc() - call internal slab allocator 758 */ 759 void * 760 __malloc(size_t size) 761 { 762 void *ptr; 763 764 ptr = _slaballoc(size, 0); 765 if (ptr == NULL) 766 errno = ENOMEM; 767 else 768 UTRACE(0, size, ptr); 769 return(ptr); 770 } 771 772 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 773 774 /* 775 * calloc() - call internal slab allocator 776 */ 777 void * 778 __calloc(size_t number, size_t size) 779 { 780 void *ptr; 781 782 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 783 number > 0 && SIZE_MAX / number < size) { 784 errno = ENOMEM; 785 return(NULL); 786 } 787 788 ptr = _slaballoc(number * size, SAFLAG_ZERO); 789 if (ptr == NULL) 790 errno = ENOMEM; 791 else 792 UTRACE(0, number * size, ptr); 793 return(ptr); 794 } 795 796 /* 797 * realloc() (SLAB ALLOCATOR) 798 * 799 * We do not attempt to optimize this routine beyond reusing the same 800 * pointer if the new size fits within the chunking of the old pointer's 801 * zone. 802 */ 803 void * 804 __realloc(void *ptr, size_t size) 805 { 806 void *ret; 807 ret = _slabrealloc(ptr, size); 808 if (ret == NULL) 809 errno = ENOMEM; 810 else 811 UTRACE(ptr, size, ret); 812 return(ret); 813 } 814 815 /* 816 * posix_memalign() 817 * 818 * Allocate (size) bytes with a alignment of (alignment), where (alignment) 819 * is a power of 2 >= sizeof(void *). 820 * 821 * The slab allocator will allocate on power-of-2 boundaries up to 822 * at least PAGE_SIZE. We use the zoneindex mechanic to find a 823 * zone matching the requirements, and _vmem_alloc() otherwise. 824 */ 825 int 826 __posix_memalign(void **memptr, size_t alignment, size_t size) 827 { 828 bigalloc_t *bigp; 829 bigalloc_t big; 830 size_t chunking; 831 int zi __unused; 832 833 /* 834 * OpenGroup spec issue 6 checks 835 */ 836 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) { 837 *memptr = NULL; 838 return(EINVAL); 839 } 840 if (alignment < sizeof(void *)) { 841 *memptr = NULL; 842 return(EINVAL); 843 } 844 845 /* 846 * Our zone mechanism guarantees same-sized alignment for any 847 * power-of-2 allocation. If size is a power-of-2 and reasonable 848 * we can just call _slaballoc() and be done. We round size up 849 * to the nearest alignment boundary to improve our odds of 850 * it becoming a power-of-2 if it wasn't before. 851 */ 852 if (size <= alignment) 853 size = alignment; 854 else 855 size = (size + alignment - 1) & ~(size_t)(alignment - 1); 856 857 /* 858 * If we have overflown above when rounding to the nearest alignment 859 * boundary, just return ENOMEM, size should be == N * sizeof(void *). 860 */ 861 if (size == 0) 862 return(ENOMEM); 863 864 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) { 865 *memptr = _slaballoc(size, 0); 866 return(*memptr ? 0 : ENOMEM); 867 } 868 869 /* 870 * Otherwise locate a zone with a chunking that matches 871 * the requested alignment, within reason. Consider two cases: 872 * 873 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex 874 * we find will be the best fit because the chunking will be 875 * greater or equal to the alignment. 876 * 877 * (2) A 513 allocation on a 256-byte alignment. In this case 878 * the first zoneindex we find will be for 576 byte allocations 879 * with a chunking of 64, which is not sufficient. To fix this 880 * we simply find the nearest power-of-2 >= size and use the 881 * same side-effect of _slaballoc() which guarantees 882 * same-alignment on a power-of-2 allocation. 883 */ 884 if (size < PAGE_SIZE) { 885 zi = zoneindex(&size, &chunking); 886 if (chunking >= alignment) { 887 *memptr = _slaballoc(size, 0); 888 return(*memptr ? 0 : ENOMEM); 889 } 890 if (size >= 1024) 891 alignment = 1024; 892 if (size >= 16384) 893 alignment = 16384; 894 while (alignment < size) 895 alignment <<= 1; 896 *memptr = _slaballoc(alignment, 0); 897 return(*memptr ? 0 : ENOMEM); 898 } 899 900 /* 901 * If the slab allocator cannot handle it use vmem_alloc(). 902 * 903 * Alignment must be adjusted up to at least PAGE_SIZE in this case. 904 */ 905 if (alignment < PAGE_SIZE) 906 alignment = PAGE_SIZE; 907 if (size < alignment) 908 size = alignment; 909 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 910 *memptr = _vmem_alloc(size, alignment, 0); 911 if (*memptr == NULL) 912 return(ENOMEM); 913 914 big = _slaballoc(sizeof(struct bigalloc), 0); 915 if (big == NULL) { 916 _vmem_free(*memptr, size); 917 *memptr = NULL; 918 return(ENOMEM); 919 } 920 bigp = bigalloc_lock(*memptr); 921 big->base = *memptr; 922 big->active = size; 923 big->bytes = size; /* no excess */ 924 big->next = *bigp; 925 *bigp = big; 926 bigalloc_unlock(*memptr); 927 928 return(0); 929 } 930 931 /* 932 * free() (SLAB ALLOCATOR) - do the obvious 933 */ 934 void 935 __free(void *ptr) 936 { 937 UTRACE(ptr, 0, 0); 938 _slabfree(ptr, 0, NULL); 939 } 940 941 /* 942 * _slaballoc() (SLAB ALLOCATOR) 943 * 944 * Allocate memory via the slab allocator. If the request is too large, 945 * or if it page-aligned beyond a certain size, we fall back to the 946 * KMEM subsystem 947 */ 948 static void * 949 _slaballoc(size_t size, int flags) 950 { 951 slzone_t z; 952 slchunk_t chunk; 953 slglobaldata_t slgd; 954 size_t chunking; 955 int zi; 956 int off; 957 void *obj; 958 959 /* 960 * Handle the degenerate size == 0 case. Yes, this does happen. 961 * Return a special pointer. This is to maintain compatibility with 962 * the original malloc implementation. Certain devices, such as the 963 * adaptec driver, not only allocate 0 bytes, they check for NULL and 964 * also realloc() later on. Joy. 965 */ 966 if (size == 0) 967 size = 1; 968 969 /* Capture global flags */ 970 flags |= g_malloc_flags; 971 972 /* 973 * Handle large allocations directly. There should not be very many 974 * of these so performance is not a big issue. 975 * 976 * The backend allocator is pretty nasty on a SMP system. Use the 977 * slab allocator for one and two page-sized chunks even though we 978 * lose some efficiency. 979 */ 980 if (size >= ZoneLimit || 981 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 982 bigalloc_t big; 983 bigalloc_t *bigp; 984 985 /* 986 * Page-align and cache-color in case of virtually indexed 987 * physically tagged L1 caches (aka SandyBridge). No sweat 988 * otherwise, so just do it. 989 * 990 * (don't count as excess). 991 */ 992 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 993 if ((size & (PAGE_SIZE * 2 - 1)) == 0) 994 size += PAGE_SIZE; 995 996 /* 997 * Try to reuse a cached big block to avoid mmap'ing. If it 998 * turns out not to fit our requirements we throw it away 999 * and allocate normally. 1000 */ 1001 big = NULL; 1002 if (size <= BIGCACHE_LIMIT) { 1003 big = bigcache_find_alloc(size); 1004 if (big && big->bytes < size) { 1005 _slabfree(big->base, FASTSLABREALLOC, &big); 1006 big = NULL; 1007 } 1008 } 1009 if (big) { 1010 chunk = big->base; 1011 if (flags & SAFLAG_ZERO) 1012 bzero(chunk, size); 1013 } else { 1014 chunk = _vmem_alloc(size, PAGE_SIZE, flags); 1015 if (chunk == NULL) 1016 return(NULL); 1017 1018 big = _slaballoc(sizeof(struct bigalloc), 0); 1019 if (big == NULL) { 1020 _vmem_free(chunk, size); 1021 return(NULL); 1022 } 1023 big->base = chunk; 1024 big->bytes = size; 1025 } 1026 big->active = size; 1027 1028 bigp = bigalloc_lock(chunk); 1029 if (big->active < big->bytes) { 1030 atomic_add_long(&excess_alloc, 1031 big->bytes - big->active); 1032 } 1033 big->next = *bigp; 1034 *bigp = big; 1035 bigalloc_unlock(chunk); 1036 handle_excess_big(); 1037 1038 return(chunk); 1039 } 1040 1041 /* Compute allocation zone; zoneindex will panic on excessive sizes */ 1042 zi = zoneindex(&size, &chunking); 1043 MASSERT(zi < NZONES); 1044 1045 obj = mtmagazine_alloc(zi); 1046 if (obj != NULL) { 1047 if (flags & SAFLAG_ZERO) 1048 bzero(obj, size); 1049 return (obj); 1050 } 1051 1052 slgd = &SLGlobalData; 1053 slgd_lock(slgd); 1054 1055 /* 1056 * Attempt to allocate out of an existing zone. If all zones are 1057 * exhausted pull one off the free list or allocate a new one. 1058 */ 1059 if ((z = slgd->ZoneAry[zi]) == NULL) { 1060 z = zone_alloc(flags); 1061 if (z == NULL) 1062 goto fail; 1063 1064 /* 1065 * How big is the base structure? 1066 */ 1067 off = sizeof(struct slzone); 1068 1069 /* 1070 * Align the storage in the zone based on the chunking. 1071 * 1072 * Guarantee power-of-2 alignment for power-of-2-sized 1073 * chunks. Otherwise align based on the chunking size 1074 * (typically 8 or 16 bytes for small allocations). 1075 * 1076 * NOTE: Allocations >= ZoneLimit are governed by the 1077 * bigalloc code and typically only guarantee page-alignment. 1078 * 1079 * Set initial conditions for UIndex near the zone header 1080 * to reduce unecessary page faults, vs semi-randomization 1081 * to improve L1 cache saturation. 1082 */ 1083 if ((size | (size - 1)) + 1 == (size << 1)) 1084 off = roundup2(off, size); 1085 else 1086 off = roundup2(off, chunking); 1087 z->z_Magic = ZALLOC_SLAB_MAGIC; 1088 z->z_ZoneIndex = zi; 1089 z->z_NMax = (ZoneSize - off) / size; 1090 z->z_NFree = z->z_NMax; 1091 z->z_BasePtr = (char *)z + off; 1092 z->z_UIndex = z->z_UEndIndex = 0; 1093 z->z_ChunkSize = size; 1094 z->z_FirstFreePg = ZonePageCount; 1095 z->z_Next = slgd->ZoneAry[zi]; 1096 slgd->ZoneAry[zi] = z; 1097 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1098 flags &= ~SAFLAG_ZERO; /* already zero'd */ 1099 flags |= SAFLAG_PASSIVE; 1100 } 1101 1102 /* 1103 * Slide the base index for initial allocations out of the 1104 * next zone we create so we do not over-weight the lower 1105 * part of the cpu memory caches. 1106 */ 1107 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1108 & (ZALLOC_MAX_ZONE_SIZE - 1); 1109 } 1110 1111 /* 1112 * Ok, we have a zone from which at least one chunk is available. 1113 * 1114 * Remove us from the ZoneAry[] when we become empty 1115 */ 1116 MASSERT(z->z_NFree > 0); 1117 1118 if (--z->z_NFree == 0) { 1119 slgd->ZoneAry[zi] = z->z_Next; 1120 z->z_Next = NULL; 1121 } 1122 1123 /* 1124 * Locate a chunk in a free page. This attempts to localize 1125 * reallocations into earlier pages without us having to sort 1126 * the chunk list. A chunk may still overlap a page boundary. 1127 */ 1128 while (z->z_FirstFreePg < ZonePageCount) { 1129 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 1130 MASSERT((uintptr_t)chunk & ZoneMask); 1131 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 1132 goto done; 1133 } 1134 ++z->z_FirstFreePg; 1135 } 1136 1137 /* 1138 * No chunks are available but NFree said we had some memory, 1139 * so it must be available in the never-before-used-memory 1140 * area governed by UIndex. The consequences are very 1141 * serious if our zone got corrupted so we use an explicit 1142 * panic rather then a KASSERT. 1143 */ 1144 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size); 1145 1146 if (++z->z_UIndex == z->z_NMax) 1147 z->z_UIndex = 0; 1148 if (z->z_UIndex == z->z_UEndIndex) { 1149 if (z->z_NFree != 0) 1150 _mpanic("slaballoc: corrupted zone"); 1151 } 1152 1153 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1154 flags &= ~SAFLAG_ZERO; 1155 flags |= SAFLAG_PASSIVE; 1156 } 1157 1158 done: 1159 slgd_unlock(slgd); 1160 if (flags & SAFLAG_ZERO) 1161 bzero(chunk, size); 1162 return(chunk); 1163 fail: 1164 slgd_unlock(slgd); 1165 return(NULL); 1166 } 1167 1168 /* 1169 * Reallocate memory within the chunk 1170 */ 1171 static void * 1172 _slabrealloc(void *ptr, size_t size) 1173 { 1174 bigalloc_t *bigp; 1175 void *nptr; 1176 slzone_t z; 1177 size_t chunking; 1178 1179 if (ptr == NULL) { 1180 return(_slaballoc(size, 0)); 1181 } 1182 1183 if (size == 0) 1184 size = 1; 1185 1186 /* 1187 * Handle oversized allocations. 1188 */ 1189 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1190 bigalloc_t big; 1191 size_t bigbytes; 1192 1193 while ((big = *bigp) != NULL) { 1194 if (big->base == ptr) { 1195 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1196 bigbytes = big->bytes; 1197 1198 /* 1199 * If it already fits determine if it makes 1200 * sense to shrink/reallocate. Try to optimize 1201 * programs which stupidly make incremental 1202 * reallocations larger or smaller by scaling 1203 * the allocation. Also deal with potential 1204 * coloring. 1205 */ 1206 if (size >= (bigbytes >> 1) && 1207 size <= bigbytes) { 1208 if (big->active != size) { 1209 atomic_add_long(&excess_alloc, 1210 big->active - 1211 size); 1212 } 1213 big->active = size; 1214 bigalloc_unlock(ptr); 1215 return(ptr); 1216 } 1217 1218 /* 1219 * For large reallocations, allocate more space 1220 * than we need to try to avoid excessive 1221 * reallocations later on. 1222 */ 1223 chunking = size + (size >> 3); 1224 chunking = (chunking + PAGE_MASK) & 1225 ~(size_t)PAGE_MASK; 1226 1227 /* 1228 * Try to allocate adjacently in case the 1229 * program is idiotically realloc()ing a 1230 * huge memory block just slightly bigger. 1231 * (llvm's llc tends to do this a lot). 1232 * 1233 * (MAP_TRYFIXED forces mmap to fail if there 1234 * is already something at the address). 1235 */ 1236 if (chunking > bigbytes) { 1237 char *addr; 1238 int errno_save = errno; 1239 1240 addr = mmap((char *)ptr + bigbytes, 1241 chunking - bigbytes, 1242 PROT_READ|PROT_WRITE, 1243 MAP_PRIVATE|MAP_ANON| 1244 MAP_TRYFIXED, 1245 -1, 0); 1246 errno = errno_save; 1247 if (addr == (char *)ptr + bigbytes) { 1248 atomic_add_long(&excess_alloc, 1249 big->active - 1250 big->bytes + 1251 chunking - 1252 size); 1253 big->bytes = chunking; 1254 big->active = size; 1255 bigalloc_unlock(ptr); 1256 1257 return(ptr); 1258 } 1259 MASSERT((void *)addr == MAP_FAILED); 1260 } 1261 1262 /* 1263 * Failed, unlink big and allocate fresh. 1264 * (note that we have to leave (big) intact 1265 * in case the slaballoc fails). 1266 */ 1267 *bigp = big->next; 1268 bigalloc_unlock(ptr); 1269 if ((nptr = _slaballoc(size, 0)) == NULL) { 1270 /* Relink block */ 1271 bigp = bigalloc_lock(ptr); 1272 big->next = *bigp; 1273 *bigp = big; 1274 bigalloc_unlock(ptr); 1275 return(NULL); 1276 } 1277 if (size > bigbytes) 1278 size = bigbytes; 1279 bcopy(ptr, nptr, size); 1280 atomic_add_long(&excess_alloc, big->active - 1281 big->bytes); 1282 _slabfree(ptr, FASTSLABREALLOC, &big); 1283 1284 return(nptr); 1285 } 1286 bigp = &big->next; 1287 } 1288 bigalloc_unlock(ptr); 1289 handle_excess_big(); 1290 } 1291 1292 /* 1293 * Get the original allocation's zone. If the new request winds 1294 * up using the same chunk size we do not have to do anything. 1295 * 1296 * NOTE: We don't have to lock the globaldata here, the fields we 1297 * access here will not change at least as long as we have control 1298 * over the allocation. 1299 */ 1300 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1301 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1302 1303 /* 1304 * Use zoneindex() to chunk-align the new size, as long as the 1305 * new size is not too large. 1306 */ 1307 if (size < ZoneLimit) { 1308 zoneindex(&size, &chunking); 1309 if (z->z_ChunkSize == size) { 1310 return(ptr); 1311 } 1312 } 1313 1314 /* 1315 * Allocate memory for the new request size and copy as appropriate. 1316 */ 1317 if ((nptr = _slaballoc(size, 0)) != NULL) { 1318 if (size > z->z_ChunkSize) 1319 size = z->z_ChunkSize; 1320 bcopy(ptr, nptr, size); 1321 _slabfree(ptr, 0, NULL); 1322 } 1323 1324 return(nptr); 1325 } 1326 1327 /* 1328 * free (SLAB ALLOCATOR) 1329 * 1330 * Free a memory block previously allocated by malloc. Note that we do not 1331 * attempt to uplodate ks_loosememuse as MP races could prevent us from 1332 * checking memory limits in malloc. 1333 * 1334 * flags: 1335 * FASTSLABREALLOC Fast call from realloc, *rbigp already 1336 * unlinked. 1337 * 1338 * MPSAFE 1339 */ 1340 static void 1341 _slabfree(void *ptr, int flags, bigalloc_t *rbigp) 1342 { 1343 slzone_t z; 1344 slchunk_t chunk; 1345 bigalloc_t big; 1346 bigalloc_t *bigp; 1347 slglobaldata_t slgd; 1348 size_t size; 1349 int zi; 1350 int pgno; 1351 1352 /* Fast realloc path for big allocations */ 1353 if (flags & FASTSLABREALLOC) { 1354 big = *rbigp; 1355 goto fastslabrealloc; 1356 } 1357 1358 /* 1359 * Handle NULL frees and special 0-byte allocations 1360 */ 1361 if (ptr == NULL) 1362 return; 1363 1364 /* 1365 * Handle oversized allocations. 1366 */ 1367 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1368 while ((big = *bigp) != NULL) { 1369 if (big->base == ptr) { 1370 *bigp = big->next; 1371 atomic_add_long(&excess_alloc, big->active - 1372 big->bytes); 1373 bigalloc_unlock(ptr); 1374 1375 /* 1376 * Try to stash the block we are freeing, 1377 * potentially receiving another block in 1378 * return which must be freed. 1379 */ 1380 fastslabrealloc: 1381 if (big->bytes <= BIGCACHE_LIMIT) { 1382 big = bigcache_find_free(big); 1383 if (big == NULL) 1384 return; 1385 } 1386 ptr = big->base; /* reload */ 1387 size = big->bytes; 1388 _slabfree(big, 0, NULL); 1389 _vmem_free(ptr, size); 1390 return; 1391 } 1392 bigp = &big->next; 1393 } 1394 bigalloc_unlock(ptr); 1395 handle_excess_big(); 1396 } 1397 1398 /* 1399 * Zone case. Figure out the zone based on the fact that it is 1400 * ZoneSize aligned. 1401 */ 1402 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1403 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1404 1405 size = z->z_ChunkSize; 1406 zi = z->z_ZoneIndex; 1407 1408 if (g_malloc_flags & SAFLAG_ZERO) 1409 bzero(ptr, size); 1410 1411 if (mtmagazine_free(zi, ptr) == 0) 1412 return; 1413 1414 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 1415 chunk = ptr; 1416 slgd = &SLGlobalData; 1417 slgd_lock(slgd); 1418 1419 /* 1420 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1421 * z_FirstFreePg. 1422 */ 1423 chunk->c_Next = z->z_PageAry[pgno]; 1424 z->z_PageAry[pgno] = chunk; 1425 if (z->z_FirstFreePg > pgno) 1426 z->z_FirstFreePg = pgno; 1427 1428 /* 1429 * Bump the number of free chunks. If it becomes non-zero the zone 1430 * must be added back onto the appropriate list. 1431 */ 1432 if (z->z_NFree++ == 0) { 1433 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1434 slgd->ZoneAry[z->z_ZoneIndex] = z; 1435 } 1436 1437 /* 1438 * If the zone becomes totally free then release it. 1439 */ 1440 if (z->z_NFree == z->z_NMax) { 1441 slzone_t *pz; 1442 1443 pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1444 while (z != *pz) 1445 pz = &(*pz)->z_Next; 1446 *pz = z->z_Next; 1447 z->z_Magic = -1; 1448 z->z_Next = NULL; 1449 zone_free(z); 1450 /* slgd lock released */ 1451 return; 1452 } 1453 slgd_unlock(slgd); 1454 } 1455 1456 /* 1457 * Allocate and return a magazine. NULL is returned and *burst is adjusted 1458 * if the magazine is empty. 1459 */ 1460 static __inline void * 1461 magazine_alloc(struct magazine *mp, int *burst) 1462 { 1463 void *obj; 1464 1465 if (mp == NULL) 1466 return(NULL); 1467 if (MAGAZINE_NOTEMPTY(mp)) { 1468 obj = mp->objects[--mp->rounds]; 1469 return(obj); 1470 } 1471 1472 /* 1473 * Return burst factor to caller along with NULL 1474 */ 1475 if ((mp->flags & M_BURST) && (burst != NULL)) { 1476 *burst = mp->burst_factor; 1477 } 1478 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */ 1479 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) && 1480 (burst != NULL)) { 1481 mp->burst_factor -= M_BURST_NSCALE; 1482 if (mp->burst_factor <= 1) { 1483 mp->burst_factor = 1; 1484 mp->flags &= ~(M_BURST); 1485 mp->flags &= ~(M_BURST_EARLY); 1486 } 1487 } 1488 return (NULL); 1489 } 1490 1491 static __inline int 1492 magazine_free(struct magazine *mp, void *p) 1493 { 1494 if (mp != NULL && MAGAZINE_NOTFULL(mp)) { 1495 mp->objects[mp->rounds++] = p; 1496 return 0; 1497 } 1498 1499 return -1; 1500 } 1501 1502 static void * 1503 mtmagazine_alloc(int zi) 1504 { 1505 thr_mags *tp; 1506 struct magazine *mp, *emptymag; 1507 magazine_depot *d; 1508 void *obj; 1509 1510 /* 1511 * Do not try to access per-thread magazines while the mtmagazine 1512 * is being initialized or destroyed. 1513 */ 1514 tp = &thread_mags; 1515 if (tp->init < 0) 1516 return(NULL); 1517 1518 /* 1519 * Primary per-thread allocation loop 1520 */ 1521 for (;;) { 1522 /* 1523 * If the loaded magazine has rounds, allocate and return 1524 */ 1525 mp = tp->mags[zi].loaded; 1526 obj = magazine_alloc(mp, NULL); 1527 if (obj) 1528 break; 1529 1530 /* 1531 * If the prev magazine is full, swap with the loaded 1532 * magazine and retry. 1533 */ 1534 mp = tp->mags[zi].prev; 1535 if (mp && MAGAZINE_FULL(mp)) { 1536 MASSERT(mp->rounds != 0); 1537 swap_mags(&tp->mags[zi]); /* prev now empty */ 1538 continue; 1539 } 1540 1541 /* 1542 * Try to get a full magazine from the depot. Cycle 1543 * through depot(full)->loaded->prev->depot(empty). 1544 * Retry if a full magazine was available from the depot. 1545 * 1546 * Return NULL (caller will fall through) if no magazines 1547 * can be found anywhere. 1548 */ 1549 d = &depots[zi]; 1550 depot_lock(d); 1551 emptymag = tp->mags[zi].prev; 1552 if (emptymag) 1553 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine); 1554 tp->mags[zi].prev = tp->mags[zi].loaded; 1555 mp = SLIST_FIRST(&d->full); /* loaded magazine */ 1556 tp->mags[zi].loaded = mp; 1557 if (mp) { 1558 SLIST_REMOVE_HEAD(&d->full, nextmagazine); 1559 MASSERT(MAGAZINE_NOTEMPTY(mp)); 1560 depot_unlock(d); 1561 continue; 1562 } 1563 depot_unlock(d); 1564 break; 1565 } 1566 1567 return (obj); 1568 } 1569 1570 static int 1571 mtmagazine_free(int zi, void *ptr) 1572 { 1573 thr_mags *tp; 1574 struct magazine *mp, *loadedmag; 1575 magazine_depot *d; 1576 int rc = -1; 1577 1578 /* 1579 * Do not try to access per-thread magazines while the mtmagazine 1580 * is being initialized or destroyed. 1581 */ 1582 tp = &thread_mags; 1583 if (tp->init < 0) 1584 return(-1); 1585 1586 /* 1587 * Primary per-thread freeing loop 1588 */ 1589 for (;;) { 1590 /* 1591 * Make sure a new magazine is available in case we have 1592 * to use it. Staging the newmag allows us to avoid 1593 * some locking/reentrancy complexity. 1594 * 1595 * Temporarily disable the per-thread caches for this 1596 * allocation to avoid reentrancy and/or to avoid a 1597 * stack overflow if the [zi] happens to be the same that 1598 * would be used to allocate the new magazine. 1599 */ 1600 if (tp->newmag == NULL) { 1601 tp->init = -1; 1602 tp->newmag = _slaballoc(sizeof(struct magazine), 1603 SAFLAG_ZERO); 1604 tp->init = 1; 1605 if (tp->newmag == NULL) { 1606 rc = -1; 1607 break; 1608 } 1609 } 1610 1611 /* 1612 * If the loaded magazine has space, free directly to it 1613 */ 1614 rc = magazine_free(tp->mags[zi].loaded, ptr); 1615 if (rc == 0) 1616 break; 1617 1618 /* 1619 * If the prev magazine is empty, swap with the loaded 1620 * magazine and retry. 1621 */ 1622 mp = tp->mags[zi].prev; 1623 if (mp && MAGAZINE_EMPTY(mp)) { 1624 MASSERT(mp->rounds == 0); 1625 swap_mags(&tp->mags[zi]); /* prev now full */ 1626 continue; 1627 } 1628 1629 /* 1630 * Try to get an empty magazine from the depot. Cycle 1631 * through depot(empty)->loaded->prev->depot(full). 1632 * Retry if an empty magazine was available from the depot. 1633 */ 1634 d = &depots[zi]; 1635 depot_lock(d); 1636 1637 if ((loadedmag = tp->mags[zi].prev) != NULL) 1638 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine); 1639 tp->mags[zi].prev = tp->mags[zi].loaded; 1640 mp = SLIST_FIRST(&d->empty); 1641 if (mp) { 1642 tp->mags[zi].loaded = mp; 1643 SLIST_REMOVE_HEAD(&d->empty, nextmagazine); 1644 MASSERT(MAGAZINE_NOTFULL(mp)); 1645 } else { 1646 mp = tp->newmag; 1647 tp->newmag = NULL; 1648 mp->capacity = M_MAX_ROUNDS; 1649 mp->rounds = 0; 1650 mp->flags = 0; 1651 tp->mags[zi].loaded = mp; 1652 } 1653 depot_unlock(d); 1654 } 1655 1656 return rc; 1657 } 1658 1659 static void 1660 mtmagazine_init(void) 1661 { 1662 int error; 1663 1664 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor); 1665 if (error) 1666 abort(); 1667 } 1668 1669 /* 1670 * This function is only used by the thread exit destructor 1671 */ 1672 static void 1673 mtmagazine_drain(struct magazine *mp) 1674 { 1675 void *obj; 1676 1677 while (MAGAZINE_NOTEMPTY(mp)) { 1678 obj = magazine_alloc(mp, NULL); 1679 _slabfree(obj, 0, NULL); 1680 } 1681 } 1682 1683 /* 1684 * mtmagazine_destructor() 1685 * 1686 * When a thread exits, we reclaim all its resources; all its magazines are 1687 * drained and the structures are freed. 1688 * 1689 * WARNING! The destructor can be called multiple times if the larger user 1690 * program has its own destructors which run after ours which 1691 * allocate or free memory. 1692 */ 1693 static void 1694 mtmagazine_destructor(void *thrp) 1695 { 1696 thr_mags *tp = thrp; 1697 struct magazine *mp; 1698 int i; 1699 1700 /* 1701 * Prevent further use of mtmagazines while we are destructing 1702 * them, as well as for any destructors which are run after us 1703 * prior to the thread actually being destroyed. 1704 */ 1705 tp->init = -1; 1706 1707 for (i = 0; i < NZONES; i++) { 1708 mp = tp->mags[i].loaded; 1709 tp->mags[i].loaded = NULL; 1710 if (mp) { 1711 if (MAGAZINE_NOTEMPTY(mp)) 1712 mtmagazine_drain(mp); 1713 _slabfree(mp, 0, NULL); 1714 } 1715 1716 mp = tp->mags[i].prev; 1717 tp->mags[i].prev = NULL; 1718 if (mp) { 1719 if (MAGAZINE_NOTEMPTY(mp)) 1720 mtmagazine_drain(mp); 1721 _slabfree(mp, 0, NULL); 1722 } 1723 } 1724 1725 if (tp->newmag) { 1726 mp = tp->newmag; 1727 tp->newmag = NULL; 1728 _slabfree(mp, 0, NULL); 1729 } 1730 } 1731 1732 /* 1733 * zone_alloc() 1734 * 1735 * Attempt to allocate a zone from the zone magazine; the zone magazine has 1736 * M_BURST_EARLY enabled, so honor the burst request from the magazine. 1737 */ 1738 static slzone_t 1739 zone_alloc(int flags) 1740 { 1741 slglobaldata_t slgd = &SLGlobalData; 1742 int burst = 1; 1743 int i, j; 1744 slzone_t z; 1745 1746 zone_magazine_lock(); 1747 slgd_unlock(slgd); 1748 1749 z = magazine_alloc(&zone_magazine, &burst); 1750 if (z == NULL && burst == 1) { 1751 zone_magazine_unlock(); 1752 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1753 } else if (z == NULL) { 1754 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1755 if (z) { 1756 for (i = 1; i < burst; i++) { 1757 j = magazine_free(&zone_magazine, 1758 (char *) z + (ZoneSize * i)); 1759 MASSERT(j == 0); 1760 } 1761 } 1762 zone_magazine_unlock(); 1763 } else { 1764 z->z_Flags |= SLZF_UNOTZEROD; 1765 zone_magazine_unlock(); 1766 } 1767 slgd_lock(slgd); 1768 return z; 1769 } 1770 1771 /* 1772 * zone_free() 1773 * 1774 * Release a zone and unlock the slgd lock. 1775 */ 1776 static void 1777 zone_free(void *z) 1778 { 1779 slglobaldata_t slgd = &SLGlobalData; 1780 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {}; 1781 int i, j; 1782 1783 zone_magazine_lock(); 1784 slgd_unlock(slgd); 1785 1786 bzero(z, sizeof(struct slzone)); 1787 1788 if (opt_madvise) 1789 madvise(z, ZoneSize, MADV_FREE); 1790 1791 i = magazine_free(&zone_magazine, z); 1792 1793 /* 1794 * If we failed to free, collect excess magazines; release the zone 1795 * magazine lock, and then free to the system via _vmem_free. Re-enable 1796 * BURST mode for the magazine. 1797 */ 1798 if (i == -1) { 1799 j = zone_magazine.rounds - zone_magazine.low_factor; 1800 for (i = 0; i < j; i++) { 1801 excess[i] = magazine_alloc(&zone_magazine, NULL); 1802 MASSERT(excess[i] != NULL); 1803 } 1804 1805 zone_magazine_unlock(); 1806 1807 for (i = 0; i < j; i++) 1808 _vmem_free(excess[i], ZoneSize); 1809 1810 _vmem_free(z, ZoneSize); 1811 } else { 1812 zone_magazine_unlock(); 1813 } 1814 } 1815 1816 /* 1817 * _vmem_alloc() 1818 * 1819 * Directly map memory in PAGE_SIZE'd chunks with the specified 1820 * alignment. 1821 * 1822 * Alignment must be a multiple of PAGE_SIZE. 1823 * 1824 * Size must be >= alignment. 1825 */ 1826 static void * 1827 _vmem_alloc(size_t size, size_t align, int flags) 1828 { 1829 char *addr; 1830 char *save; 1831 size_t excess; 1832 1833 /* 1834 * Map anonymous private memory. 1835 */ 1836 addr = mmap(NULL, size, PROT_READ|PROT_WRITE, 1837 MAP_PRIVATE|MAP_ANON, -1, 0); 1838 if (addr == MAP_FAILED) 1839 return(NULL); 1840 1841 /* 1842 * Check alignment. The misaligned offset is also the excess 1843 * amount. If misaligned unmap the excess so we have a chance of 1844 * mapping at the next alignment point and recursively try again. 1845 * 1846 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment 1847 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation 1848 * xxxxxxxxx final excess calculation 1849 * ^ returned address 1850 */ 1851 excess = (uintptr_t)addr & (align - 1); 1852 1853 if (excess) { 1854 excess = align - excess; 1855 save = addr; 1856 1857 munmap(save + excess, size - excess); 1858 addr = _vmem_alloc(size, align, flags); 1859 munmap(save, excess); 1860 } 1861 return((void *)addr); 1862 } 1863 1864 /* 1865 * _vmem_free() 1866 * 1867 * Free a chunk of memory allocated with _vmem_alloc() 1868 */ 1869 static void 1870 _vmem_free(void *ptr, size_t size) 1871 { 1872 munmap(ptr, size); 1873 } 1874 1875 /* 1876 * Panic on fatal conditions 1877 */ 1878 static void 1879 _mpanic(const char *ctl, ...) 1880 { 1881 va_list va; 1882 1883 if (malloc_panic == 0) { 1884 malloc_panic = 1; 1885 va_start(va, ctl); 1886 vfprintf(stderr, ctl, va); 1887 fprintf(stderr, "\n"); 1888 fflush(stderr); 1889 va_end(va); 1890 } 1891 abort(); 1892 } 1893 1894 __weak_reference(__malloc, malloc); 1895 __weak_reference(__calloc, calloc); 1896 __weak_reference(__posix_memalign, posix_memalign); 1897 __weak_reference(__realloc, realloc); 1898 __weak_reference(__free, free); 1899