1 #include <sys/cdefs.h> 2 3 #define JEMALLOC_C_ 4 #include "jemalloc/internal/jemalloc_preamble.h" 5 #include "jemalloc/internal/jemalloc_internal_includes.h" 6 7 #include "jemalloc/internal/assert.h" 8 #include "jemalloc/internal/atomic.h" 9 #include "jemalloc/internal/ctl.h" 10 #include "jemalloc/internal/extent_dss.h" 11 #include "jemalloc/internal/extent_mmap.h" 12 #include "jemalloc/internal/jemalloc_internal_types.h" 13 #include "jemalloc/internal/log.h" 14 #include "jemalloc/internal/malloc_io.h" 15 #include "jemalloc/internal/mutex.h" 16 #include "jemalloc/internal/rtree.h" 17 #include "jemalloc/internal/size_classes.h" 18 #include "jemalloc/internal/spin.h" 19 #include "jemalloc/internal/sz.h" 20 #include "jemalloc/internal/ticker.h" 21 #include "jemalloc/internal/util.h" 22 23 #ifdef JEMALLOC_WEAK_NOSTD 24 __weak_alias(mallocx, __je_mallocx) 25 __weak_alias(rallocx, __je_rallocx) 26 __weak_alias(xallocx, __je_xallocx) 27 __weak_alias(sallocx, __je_sallocx) 28 __weak_alias(dallocx, __je_dallocx) 29 __weak_alias(sdallocx, __je_sdallocx) 30 __weak_alias(nallocx, __je_nallocx) 31 32 __weak_alias(mallctl, __je_mallctl) 33 __weak_alias(mallctlnametomib, __je_mallctlnametomib) 34 __weak_alias(mallctlbymib, __je_mallctlbymib) 35 36 __weak_alias(malloc_stats_print, __je_malloc_stats_print) 37 __weak_alias(malloc_usable_size, __je_malloc_usable_size) 38 39 __weak_alias(malloc_message, __je_malloc_message) 40 __weak_alias(malloc_conf, __je_malloc_conf) 41 42 __weak_alias(malloc_message_get, __je_malloc_message_get) 43 __weak_alias(malloc_conf_get, __je_malloc_conf_get) 44 45 __weak_alias(malloc_message_set, __je_malloc_message_set) 46 __weak_alias(malloc_conf_set, __je_malloc_conf_set) 47 #endif 48 49 /******************************************************************************/ 50 /* Data. */ 51 52 /* Runtime configuration options. */ 53 const char *je_malloc_conf 54 #ifndef _WIN32 55 JEMALLOC_ATTR(weak) 56 #endif 57 ; 58 bool opt_abort = 59 #ifdef JEMALLOC_DEBUG 60 true 61 #else 62 false 63 #endif 64 ; 65 bool opt_abort_conf = 66 #ifdef JEMALLOC_DEBUG 67 true 68 #else 69 false 70 #endif 71 ; 72 const char *opt_junk = 73 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 74 "true" 75 #else 76 "false" 77 #endif 78 ; 79 bool opt_junk_alloc = 80 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 81 true 82 #else 83 false 84 #endif 85 ; 86 bool opt_junk_free = 87 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 88 true 89 #else 90 false 91 #endif 92 ; 93 94 bool opt_utrace = false; 95 bool opt_xmalloc = false; 96 bool opt_zero = false; 97 unsigned opt_narenas = 0; 98 99 unsigned ncpus; 100 101 /* Protects arenas initialization. */ 102 malloc_mutex_t arenas_lock; 103 /* 104 * Arenas that are used to service external requests. Not all elements of the 105 * arenas array are necessarily used; arenas are created lazily as needed. 106 * 107 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 108 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 109 * takes some action to create them and allocate from them. 110 * 111 * Points to an arena_t. 112 */ 113 JEMALLOC_ALIGNED(CACHELINE) 114 atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; 115 static atomic_u_t narenas_total; /* Use narenas_total_*(). */ 116 static arena_t *a0; /* arenas[0]; read-only after initialization. */ 117 unsigned narenas_auto; /* Read-only after initialization. */ 118 119 typedef enum { 120 malloc_init_uninitialized = 3, 121 malloc_init_a0_initialized = 2, 122 malloc_init_recursible = 1, 123 malloc_init_initialized = 0 /* Common case --> jnz. */ 124 } malloc_init_t; 125 static malloc_init_t malloc_init_state = malloc_init_uninitialized; 126 127 /* False should be the common case. Set to true to trigger initialization. */ 128 bool malloc_slow = true; 129 130 /* When malloc_slow is true, set the corresponding bits for sanity check. */ 131 enum { 132 flag_opt_junk_alloc = (1U), 133 flag_opt_junk_free = (1U << 1), 134 flag_opt_zero = (1U << 2), 135 flag_opt_utrace = (1U << 3), 136 flag_opt_xmalloc = (1U << 4) 137 }; 138 static uint8_t malloc_slow_flags; 139 140 #ifdef JEMALLOC_THREADED_INIT 141 /* Used to let the initializing thread recursively allocate. */ 142 # define NO_INITIALIZER ((unsigned long)0) 143 # define INITIALIZER pthread_self() 144 # define IS_INITIALIZER (malloc_initializer == pthread_self()) 145 static pthread_t malloc_initializer = NO_INITIALIZER; 146 #else 147 # define NO_INITIALIZER false 148 # define INITIALIZER true 149 # define IS_INITIALIZER malloc_initializer 150 static bool malloc_initializer = NO_INITIALIZER; 151 #endif 152 153 /* Used to avoid initialization races. */ 154 #ifdef _WIN32 155 #if _WIN32_WINNT >= 0x0600 156 static malloc_mutex_t init_lock = SRWLOCK_INIT; 157 #else 158 static malloc_mutex_t init_lock; 159 static bool init_lock_initialized = false; 160 161 JEMALLOC_ATTR(constructor) 162 static void WINAPI 163 _init_init_lock(void) { 164 /* 165 * If another constructor in the same binary is using mallctl to e.g. 166 * set up extent hooks, it may end up running before this one, and 167 * malloc_init_hard will crash trying to lock the uninitialized lock. So 168 * we force an initialization of the lock in malloc_init_hard as well. 169 * We don't try to care about atomicity of the accessed to the 170 * init_lock_initialized boolean, since it really only matters early in 171 * the process creation, before any separate thread normally starts 172 * doing anything. 173 */ 174 if (!init_lock_initialized) { 175 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, 176 malloc_mutex_rank_exclusive); 177 } 178 init_lock_initialized = true; 179 } 180 181 #ifdef _MSC_VER 182 # pragma section(".CRT$XCU", read) 183 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 184 static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 185 #endif 186 #endif 187 #else 188 #ifndef __lint__ 189 // Broken lint 190 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 191 #else 192 static malloc_mutex_t init_lock; 193 #endif 194 #endif 195 196 typedef struct { 197 void *p; /* Input pointer (as in realloc(p, s)). */ 198 size_t s; /* Request size. */ 199 void *r; /* Result pointer. */ 200 } malloc_utrace_t; 201 202 #ifdef JEMALLOC_UTRACE 203 # define UTRACE(a, b, c) do { \ 204 if (unlikely(opt_utrace)) { \ 205 int utrace_serrno = errno; \ 206 malloc_utrace_t ut; \ 207 ut.p = (a); \ 208 ut.s = (b); \ 209 ut.r = (c); \ 210 utrace(&ut, sizeof(ut)); \ 211 errno = utrace_serrno; \ 212 } \ 213 } while (0) 214 #else 215 # define UTRACE(a, b, c) 216 #endif 217 218 /* Whether encountered any invalid config options. */ 219 static bool had_conf_error = false; 220 221 /******************************************************************************/ 222 /* 223 * Function prototypes for static functions that are referenced prior to 224 * definition. 225 */ 226 227 static bool malloc_init_hard_a0(void); 228 static bool malloc_init_hard(void); 229 230 /******************************************************************************/ 231 /* 232 * Begin miscellaneous support functions. 233 */ 234 235 bool 236 malloc_initialized(void) { 237 return (malloc_init_state == malloc_init_initialized); 238 } 239 240 JEMALLOC_ALWAYS_INLINE bool 241 malloc_init_a0(void) { 242 if (unlikely(malloc_init_state == malloc_init_uninitialized)) { 243 return malloc_init_hard_a0(); 244 } 245 return false; 246 } 247 248 JEMALLOC_ALWAYS_INLINE bool 249 malloc_init(void) { 250 if (unlikely(!malloc_initialized()) && malloc_init_hard()) { 251 return true; 252 } 253 return false; 254 } 255 256 /* 257 * The a0*() functions are used instead of i{d,}alloc() in situations that 258 * cannot tolerate TLS variable access. 259 */ 260 261 static void * 262 a0ialloc(size_t size, bool zero, bool is_internal) { 263 if (unlikely(malloc_init_a0())) { 264 return NULL; 265 } 266 267 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, 268 is_internal, arena_get(TSDN_NULL, 0, true), true); 269 } 270 271 static void 272 a0idalloc(void *ptr, bool is_internal) { 273 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); 274 } 275 276 void * 277 a0malloc(size_t size) { 278 return a0ialloc(size, false, true); 279 } 280 281 void 282 a0dalloc(void *ptr) { 283 a0idalloc(ptr, true); 284 } 285 286 /* 287 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 288 * situations that cannot tolerate TLS variable access (TLS allocation and very 289 * early internal data structure initialization). 290 */ 291 292 void * 293 bootstrap_malloc(size_t size) { 294 if (unlikely(size == 0)) { 295 size = 1; 296 } 297 298 return a0ialloc(size, false, false); 299 } 300 301 void * 302 bootstrap_calloc(size_t num, size_t size) { 303 size_t num_size; 304 305 num_size = num * size; 306 if (unlikely(num_size == 0)) { 307 assert(num == 0 || size == 0); 308 num_size = 1; 309 } 310 311 return a0ialloc(num_size, true, false); 312 } 313 314 void 315 bootstrap_free(void *ptr) { 316 if (unlikely(ptr == NULL)) { 317 return; 318 } 319 320 a0idalloc(ptr, false); 321 } 322 323 void 324 arena_set(unsigned ind, arena_t *arena) { 325 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); 326 } 327 328 static void 329 narenas_total_set(unsigned narenas) { 330 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); 331 } 332 333 static void 334 narenas_total_inc(void) { 335 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); 336 } 337 338 unsigned 339 narenas_total_get(void) { 340 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); 341 } 342 343 /* Create a new arena and insert it into the arenas array at index ind. */ 344 static arena_t * 345 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 346 arena_t *arena; 347 348 assert(ind <= narenas_total_get()); 349 if (ind >= MALLOCX_ARENA_LIMIT) { 350 return NULL; 351 } 352 if (ind == narenas_total_get()) { 353 narenas_total_inc(); 354 } 355 356 /* 357 * Another thread may have already initialized arenas[ind] if it's an 358 * auto arena. 359 */ 360 arena = arena_get(tsdn, ind, false); 361 if (arena != NULL) { 362 assert(ind < narenas_auto); 363 return arena; 364 } 365 366 /* Actually initialize the arena. */ 367 arena = arena_new(tsdn, ind, extent_hooks); 368 369 return arena; 370 } 371 372 static void 373 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { 374 if (ind == 0) { 375 return; 376 } 377 if (have_background_thread) { 378 bool err; 379 malloc_mutex_lock(tsdn, &background_thread_lock); 380 err = background_thread_create(tsdn_tsd(tsdn), ind); 381 malloc_mutex_unlock(tsdn, &background_thread_lock); 382 if (err) { 383 malloc_printf("<jemalloc>: error in background thread " 384 "creation for arena %u. Abort.\n", ind); 385 abort(); 386 } 387 } 388 } 389 390 arena_t * 391 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 392 arena_t *arena; 393 394 malloc_mutex_lock(tsdn, &arenas_lock); 395 arena = arena_init_locked(tsdn, ind, extent_hooks); 396 malloc_mutex_unlock(tsdn, &arenas_lock); 397 398 arena_new_create_background_thread(tsdn, ind); 399 400 return arena; 401 } 402 403 static void 404 arena_bind(tsd_t *tsd, unsigned ind, bool internal) { 405 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); 406 arena_nthreads_inc(arena, internal); 407 408 if (internal) { 409 tsd_iarena_set(tsd, arena); 410 } else { 411 tsd_arena_set(tsd, arena); 412 } 413 } 414 415 void 416 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { 417 arena_t *oldarena, *newarena; 418 419 oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 420 newarena = arena_get(tsd_tsdn(tsd), newind, false); 421 arena_nthreads_dec(oldarena, false); 422 arena_nthreads_inc(newarena, false); 423 tsd_arena_set(tsd, newarena); 424 } 425 426 static void 427 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { 428 arena_t *arena; 429 430 arena = arena_get(tsd_tsdn(tsd), ind, false); 431 arena_nthreads_dec(arena, internal); 432 433 if (internal) { 434 tsd_iarena_set(tsd, NULL); 435 } else { 436 tsd_arena_set(tsd, NULL); 437 } 438 } 439 440 arena_tdata_t * 441 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { 442 arena_tdata_t *tdata, *arenas_tdata_old; 443 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 444 unsigned narenas_tdata_old, i; 445 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 446 unsigned narenas_actual = narenas_total_get(); 447 448 /* 449 * Dissociate old tdata array (and set up for deallocation upon return) 450 * if it's too small. 451 */ 452 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 453 arenas_tdata_old = arenas_tdata; 454 narenas_tdata_old = narenas_tdata; 455 arenas_tdata = NULL; 456 narenas_tdata = 0; 457 tsd_arenas_tdata_set(tsd, arenas_tdata); 458 tsd_narenas_tdata_set(tsd, narenas_tdata); 459 } else { 460 arenas_tdata_old = NULL; 461 narenas_tdata_old = 0; 462 } 463 464 /* Allocate tdata array if it's missing. */ 465 if (arenas_tdata == NULL) { 466 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 467 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 468 469 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 470 *arenas_tdata_bypassp = true; 471 arenas_tdata = (arena_tdata_t *)a0malloc( 472 sizeof(arena_tdata_t) * narenas_tdata); 473 *arenas_tdata_bypassp = false; 474 } 475 if (arenas_tdata == NULL) { 476 tdata = NULL; 477 goto label_return; 478 } 479 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 480 tsd_arenas_tdata_set(tsd, arenas_tdata); 481 tsd_narenas_tdata_set(tsd, narenas_tdata); 482 } 483 484 /* 485 * Copy to tdata array. It's possible that the actual number of arenas 486 * has increased since narenas_total_get() was called above, but that 487 * causes no correctness issues unless two threads concurrently execute 488 * the arenas.create mallctl, which we trust mallctl synchronization to 489 * prevent. 490 */ 491 492 /* Copy/initialize tickers. */ 493 for (i = 0; i < narenas_actual; i++) { 494 if (i < narenas_tdata_old) { 495 ticker_copy(&arenas_tdata[i].decay_ticker, 496 &arenas_tdata_old[i].decay_ticker); 497 } else { 498 ticker_init(&arenas_tdata[i].decay_ticker, 499 DECAY_NTICKS_PER_UPDATE); 500 } 501 } 502 if (narenas_tdata > narenas_actual) { 503 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 504 * (narenas_tdata - narenas_actual)); 505 } 506 507 /* Read the refreshed tdata array. */ 508 tdata = &arenas_tdata[ind]; 509 label_return: 510 if (arenas_tdata_old != NULL) { 511 a0dalloc(arenas_tdata_old); 512 } 513 return tdata; 514 } 515 516 /* Slow path, called only by arena_choose(). */ 517 arena_t * 518 arena_choose_hard(tsd_t *tsd, bool internal) { 519 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 520 521 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { 522 unsigned choose = percpu_arena_choose(); 523 ret = arena_get(tsd_tsdn(tsd), choose, true); 524 assert(ret != NULL); 525 arena_bind(tsd, arena_ind_get(ret), false); 526 arena_bind(tsd, arena_ind_get(ret), true); 527 528 return ret; 529 } 530 531 if (narenas_auto > 1) { 532 unsigned i, j, choose[2], first_null; 533 bool is_new_arena[2]; 534 535 /* 536 * Determine binding for both non-internal and internal 537 * allocation. 538 * 539 * choose[0]: For application allocation. 540 * choose[1]: For internal metadata allocation. 541 */ 542 543 for (j = 0; j < 2; j++) { 544 choose[j] = 0; 545 is_new_arena[j] = false; 546 } 547 548 first_null = narenas_auto; 549 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 550 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 551 for (i = 1; i < narenas_auto; i++) { 552 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 553 /* 554 * Choose the first arena that has the lowest 555 * number of threads assigned to it. 556 */ 557 for (j = 0; j < 2; j++) { 558 if (arena_nthreads_get(arena_get( 559 tsd_tsdn(tsd), i, false), !!j) < 560 arena_nthreads_get(arena_get( 561 tsd_tsdn(tsd), choose[j], false), 562 !!j)) { 563 choose[j] = i; 564 } 565 } 566 } else if (first_null == narenas_auto) { 567 /* 568 * Record the index of the first uninitialized 569 * arena, in case all extant arenas are in use. 570 * 571 * NB: It is possible for there to be 572 * discontinuities in terms of initialized 573 * versus uninitialized arenas, due to the 574 * "thread.arena" mallctl. 575 */ 576 first_null = i; 577 } 578 } 579 580 for (j = 0; j < 2; j++) { 581 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 582 choose[j], false), !!j) == 0 || first_null == 583 narenas_auto) { 584 /* 585 * Use an unloaded arena, or the least loaded 586 * arena if all arenas are already initialized. 587 */ 588 if (!!j == internal) { 589 ret = arena_get(tsd_tsdn(tsd), 590 choose[j], false); 591 } 592 } else { 593 arena_t *arena; 594 595 /* Initialize a new arena. */ 596 choose[j] = first_null; 597 arena = arena_init_locked(tsd_tsdn(tsd), 598 choose[j], (extent_hooks_t *) 599 __UNCONST(&extent_hooks_default)); 600 if (arena == NULL) { 601 malloc_mutex_unlock(tsd_tsdn(tsd), 602 &arenas_lock); 603 return NULL; 604 } 605 is_new_arena[j] = true; 606 if (!!j == internal) { 607 ret = arena; 608 } 609 } 610 arena_bind(tsd, choose[j], !!j); 611 } 612 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 613 614 for (j = 0; j < 2; j++) { 615 if (is_new_arena[j]) { 616 assert(choose[j] > 0); 617 arena_new_create_background_thread( 618 tsd_tsdn(tsd), choose[j]); 619 } 620 } 621 622 } else { 623 ret = arena_get(tsd_tsdn(tsd), 0, false); 624 arena_bind(tsd, 0, false); 625 arena_bind(tsd, 0, true); 626 } 627 628 return ret; 629 } 630 631 void 632 iarena_cleanup(tsd_t *tsd) { 633 arena_t *iarena; 634 635 iarena = tsd_iarena_get(tsd); 636 if (iarena != NULL) { 637 arena_unbind(tsd, arena_ind_get(iarena), true); 638 } 639 } 640 641 void 642 arena_cleanup(tsd_t *tsd) { 643 arena_t *arena; 644 645 arena = tsd_arena_get(tsd); 646 if (arena != NULL) { 647 arena_unbind(tsd, arena_ind_get(arena), false); 648 } 649 } 650 651 void 652 arenas_tdata_cleanup(tsd_t *tsd) { 653 arena_tdata_t *arenas_tdata; 654 655 /* Prevent tsd->arenas_tdata from being (re)created. */ 656 *tsd_arenas_tdata_bypassp_get(tsd) = true; 657 658 arenas_tdata = tsd_arenas_tdata_get(tsd); 659 if (arenas_tdata != NULL) { 660 tsd_arenas_tdata_set(tsd, NULL); 661 a0dalloc(arenas_tdata); 662 } 663 } 664 665 static void 666 stats_print_atexit(void) { 667 if (config_stats) { 668 tsdn_t *tsdn; 669 unsigned narenas, i; 670 671 tsdn = tsdn_fetch(); 672 673 /* 674 * Merge stats from extant threads. This is racy, since 675 * individual threads do not lock when recording tcache stats 676 * events. As a consequence, the final stats may be slightly 677 * out of date by the time they are reported, if other threads 678 * continue to allocate. 679 */ 680 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 681 arena_t *arena = arena_get(tsdn, i, false); 682 if (arena != NULL) { 683 tcache_t *tcache; 684 685 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 686 ql_foreach(tcache, &arena->tcache_ql, link) { 687 tcache_stats_merge(tsdn, tcache, arena); 688 } 689 malloc_mutex_unlock(tsdn, 690 &arena->tcache_ql_mtx); 691 } 692 } 693 } 694 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); 695 } 696 697 /* 698 * Ensure that we don't hold any locks upon entry to or exit from allocator 699 * code (in a "broad" sense that doesn't count a reentrant allocation as an 700 * entrance or exit). 701 */ 702 JEMALLOC_ALWAYS_INLINE void 703 check_entry_exit_locking(tsdn_t *tsdn) { 704 if (!config_debug) { 705 return; 706 } 707 if (tsdn_null(tsdn)) { 708 return; 709 } 710 tsd_t *tsd = tsdn_tsd(tsdn); 711 /* 712 * It's possible we hold locks at entry/exit if we're in a nested 713 * allocation. 714 */ 715 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); 716 if (reentrancy_level != 0) { 717 return; 718 } 719 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); 720 } 721 722 /* 723 * End miscellaneous support functions. 724 */ 725 /******************************************************************************/ 726 /* 727 * Begin initialization functions. 728 */ 729 730 static char * 731 jemalloc_secure_getenv(const char *name) { 732 #ifdef JEMALLOC_HAVE_SECURE_GETENV 733 return secure_getenv(name); 734 #else 735 # ifdef JEMALLOC_HAVE_ISSETUGID 736 if (issetugid() != 0) { 737 return NULL; 738 } 739 # endif 740 return getenv(name); 741 #endif 742 } 743 744 static unsigned 745 malloc_ncpus(void) { 746 long result; 747 748 #ifdef _WIN32 749 SYSTEM_INFO si; 750 GetSystemInfo(&si); 751 result = si.dwNumberOfProcessors; 752 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 753 /* 754 * glibc >= 2.6 has the CPU_COUNT macro. 755 * 756 * glibc's sysconf() uses isspace(). glibc allocates for the first time 757 * *before* setting up the isspace tables. Therefore we need a 758 * different method to get the number of CPUs. 759 */ 760 { 761 cpu_set_t set; 762 763 pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 764 result = CPU_COUNT(&set); 765 } 766 #else 767 result = sysconf(_SC_NPROCESSORS_ONLN); 768 #endif 769 return ((result == -1) ? 1 : (unsigned)result); 770 } 771 772 static void 773 init_opt_stats_print_opts(const char *v, size_t vlen) { 774 size_t opts_len = strlen(opt_stats_print_opts); 775 assert(opts_len <= stats_print_tot_num_options); 776 777 for (size_t i = 0; i < vlen; i++) { 778 switch (v[i]) { 779 #define OPTION(o, v, d, s) case o: break; 780 STATS_PRINT_OPTIONS 781 #undef OPTION 782 default: continue; 783 } 784 785 if (strchr(opt_stats_print_opts, v[i]) != NULL) { 786 /* Ignore repeated. */ 787 continue; 788 } 789 790 opt_stats_print_opts[opts_len++] = v[i]; 791 opt_stats_print_opts[opts_len] = '\0'; 792 assert(opts_len <= stats_print_tot_num_options); 793 } 794 assert(opts_len == strlen(opt_stats_print_opts)); 795 } 796 797 static bool 798 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 799 char const **v_p, size_t *vlen_p) { 800 bool accept; 801 const char *opts = *opts_p; 802 803 *k_p = opts; 804 805 for (accept = false; !accept;) { 806 switch (*opts) { 807 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 808 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 809 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 810 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 811 case 'Y': case 'Z': 812 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 813 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 814 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 815 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 816 case 'y': case 'z': 817 case '0': case '1': case '2': case '3': case '4': case '5': 818 case '6': case '7': case '8': case '9': 819 case '_': 820 opts++; 821 break; 822 case ':': 823 opts++; 824 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 825 *v_p = opts; 826 accept = true; 827 break; 828 case '\0': 829 if (opts != *opts_p) { 830 malloc_write("<jemalloc>: Conf string ends " 831 "with key\n"); 832 } 833 return true; 834 default: 835 malloc_write("<jemalloc>: Malformed conf string\n"); 836 return true; 837 } 838 } 839 840 for (accept = false; !accept;) { 841 switch (*opts) { 842 case ',': 843 opts++; 844 /* 845 * Look ahead one character here, because the next time 846 * this function is called, it will assume that end of 847 * input has been cleanly reached if no input remains, 848 * but we have optimistically already consumed the 849 * comma if one exists. 850 */ 851 if (*opts == '\0') { 852 malloc_write("<jemalloc>: Conf string ends " 853 "with comma\n"); 854 } 855 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 856 accept = true; 857 break; 858 case '\0': 859 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 860 accept = true; 861 break; 862 default: 863 opts++; 864 break; 865 } 866 } 867 868 *opts_p = opts; 869 return false; 870 } 871 872 static JEMALLOC_NORETURN void 873 malloc_abort_invalid_conf(void) { 874 assert(opt_abort_conf); 875 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " 876 "value (see above).\n"); 877 abort(); 878 } 879 880 static void 881 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 882 size_t vlen) { 883 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 884 (int)vlen, v); 885 /* If abort_conf is set, error out after processing all options. */ 886 had_conf_error = true; 887 } 888 889 static void 890 malloc_slow_flag_init(void) { 891 /* 892 * Combine the runtime options into malloc_slow for fast path. Called 893 * after processing all the options. 894 */ 895 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 896 | (opt_junk_free ? flag_opt_junk_free : 0) 897 | (opt_zero ? flag_opt_zero : 0) 898 | (opt_utrace ? flag_opt_utrace : 0) 899 | (opt_xmalloc ? flag_opt_xmalloc : 0); 900 901 malloc_slow = (malloc_slow_flags != 0); 902 } 903 904 static void 905 malloc_conf_init(void) { 906 unsigned i; 907 char buf[PATH_MAX + 1]; 908 const char *opts, *k, *v; 909 size_t klen, vlen; 910 911 for (i = 0; i < 4; i++) { 912 /* Get runtime configuration. */ 913 switch (i) { 914 case 0: 915 opts = config_malloc_conf; 916 break; 917 case 1: 918 if (je_malloc_conf != NULL) { 919 /* 920 * Use options that were compiled into the 921 * program. 922 */ 923 opts = je_malloc_conf; 924 } else { 925 /* No configuration specified. */ 926 buf[0] = '\0'; 927 opts = buf; 928 } 929 break; 930 case 2: { 931 ssize_t linklen = 0; 932 #ifndef _WIN32 933 int saved_errno = errno; 934 const char *linkname = 935 # ifdef JEMALLOC_PREFIX 936 "/etc/"JEMALLOC_PREFIX"malloc.conf" 937 # else 938 "/etc/malloc.conf" 939 # endif 940 ; 941 942 /* 943 * Try to use the contents of the "/etc/malloc.conf" 944 * symbolic link's name. 945 */ 946 linklen = readlink(linkname, buf, sizeof(buf) - 1); 947 if (linklen == -1) { 948 /* No configuration specified. */ 949 linklen = 0; 950 /* Restore errno. */ 951 set_errno(saved_errno); 952 } 953 #endif 954 buf[linklen] = '\0'; 955 opts = buf; 956 break; 957 } case 3: { 958 const char *envname = 959 #ifdef JEMALLOC_PREFIX 960 JEMALLOC_CPREFIX"MALLOC_CONF" 961 #else 962 "MALLOC_CONF" 963 #endif 964 ; 965 966 if ((opts = jemalloc_secure_getenv(envname)) != NULL) { 967 /* 968 * Do nothing; opts is already initialized to 969 * the value of the MALLOC_CONF environment 970 * variable. 971 */ 972 } else { 973 /* No configuration specified. */ 974 buf[0] = '\0'; 975 opts = buf; 976 } 977 break; 978 } default: 979 not_reached(); 980 buf[0] = '\0'; 981 opts = buf; 982 } 983 984 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 985 &vlen)) { 986 #define CONF_MATCH(n) \ 987 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 988 #define CONF_MATCH_VALUE(n) \ 989 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 990 #define CONF_HANDLE_BOOL(o, n) \ 991 if (CONF_MATCH(n)) { \ 992 if (CONF_MATCH_VALUE("true")) { \ 993 o = true; \ 994 } else if (CONF_MATCH_VALUE("false")) { \ 995 o = false; \ 996 } else { \ 997 malloc_conf_error( \ 998 "Invalid conf value", \ 999 k, klen, v, vlen); \ 1000 } \ 1001 continue; \ 1002 } 1003 #define CONF_MIN_no(um, min) false 1004 #define CONF_MIN_yes(um, min) ((um) < (min)) 1005 #define CONF_MAX_no(um, max) false 1006 #define CONF_MAX_yes(um, max) ((um) > (max)) 1007 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 1008 if (CONF_MATCH(n)) { \ 1009 uintmax_t um; \ 1010 const char *end; \ 1011 \ 1012 set_errno(0); \ 1013 um = malloc_strtoumax(v, &end, 0); \ 1014 if (get_errno() != 0 || (uintptr_t)end -\ 1015 (uintptr_t)v != vlen) { \ 1016 malloc_conf_error( \ 1017 "Invalid conf value", \ 1018 k, klen, v, vlen); \ 1019 } else if (clip) { \ 1020 if (CONF_MIN_##check_min(um, \ 1021 (t)(min))) { \ 1022 o = (t)(min); \ 1023 } else if ( \ 1024 CONF_MAX_##check_max(um, \ 1025 (t)(max))) { \ 1026 o = (t)(max); \ 1027 } else { \ 1028 o = (t)um; \ 1029 } \ 1030 } else { \ 1031 if (CONF_MIN_##check_min(um, \ 1032 (t)(min)) || \ 1033 CONF_MAX_##check_max(um, \ 1034 (t)(max))) { \ 1035 malloc_conf_error( \ 1036 "Out-of-range " \ 1037 "conf value", \ 1038 k, klen, v, vlen); \ 1039 } else { \ 1040 o = (t)um; \ 1041 } \ 1042 } \ 1043 continue; \ 1044 } 1045 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 1046 clip) \ 1047 CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 1048 check_min, check_max, clip) 1049 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 1050 CONF_HANDLE_T_U(size_t, o, n, min, max, \ 1051 check_min, check_max, clip) 1052 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1053 if (CONF_MATCH(n)) { \ 1054 long l; \ 1055 char *end; \ 1056 \ 1057 set_errno(0); \ 1058 l = strtol(v, &end, 0); \ 1059 if (get_errno() != 0 || (uintptr_t)end -\ 1060 (uintptr_t)v != vlen) { \ 1061 malloc_conf_error( \ 1062 "Invalid conf value", \ 1063 k, klen, v, vlen); \ 1064 } else if (l < (ssize_t)(min) || l > \ 1065 (ssize_t)(max)) { \ 1066 malloc_conf_error( \ 1067 "Out-of-range conf value", \ 1068 k, klen, v, vlen); \ 1069 } else { \ 1070 o = l; \ 1071 } \ 1072 continue; \ 1073 } 1074 #define CONF_HANDLE_CHAR_P(o, n, d) \ 1075 if (CONF_MATCH(n)) { \ 1076 size_t cpylen = (vlen <= \ 1077 sizeof(o)-1) ? vlen : \ 1078 sizeof(o)-1; \ 1079 strncpy(o, v, cpylen); \ 1080 o[cpylen] = '\0'; \ 1081 continue; \ 1082 } 1083 1084 CONF_HANDLE_BOOL(opt_abort, "abort") 1085 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") 1086 if (strncmp("metadata_thp", k, klen) == 0) { 1087 int ii; 1088 bool match = false; 1089 for (ii = 0; ii < metadata_thp_mode_limit; ii++) { 1090 if (strncmp(metadata_thp_mode_names[ii], 1091 v, vlen) == 0) { 1092 opt_metadata_thp = ii; 1093 match = true; 1094 break; 1095 } 1096 } 1097 if (!match) { 1098 malloc_conf_error("Invalid conf value", 1099 k, klen, v, vlen); 1100 } 1101 continue; 1102 } 1103 CONF_HANDLE_BOOL(opt_retain, "retain") 1104 if (strncmp("dss", k, klen) == 0) { 1105 int ii; 1106 bool match = false; 1107 for (ii = 0; ii < dss_prec_limit; ii++) { 1108 if (strncmp(dss_prec_names[ii], v, vlen) 1109 == 0) { 1110 if (extent_dss_prec_set(ii)) { 1111 malloc_conf_error( 1112 "Error setting dss", 1113 k, klen, v, vlen); 1114 } else { 1115 opt_dss = 1116 dss_prec_names[ii]; 1117 match = true; 1118 break; 1119 } 1120 } 1121 } 1122 if (!match) { 1123 malloc_conf_error("Invalid conf value", 1124 k, klen, v, vlen); 1125 } 1126 continue; 1127 } 1128 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1129 UINT_MAX, yes, no, false) 1130 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, 1131 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1132 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1133 SSIZE_MAX); 1134 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, 1135 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1136 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1137 SSIZE_MAX); 1138 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 1139 if (CONF_MATCH("stats_print_opts")) { 1140 init_opt_stats_print_opts(v, vlen); 1141 continue; 1142 } 1143 if (config_fill) { 1144 if (CONF_MATCH("junk")) { 1145 if (CONF_MATCH_VALUE("true")) { 1146 opt_junk = "true"; 1147 opt_junk_alloc = opt_junk_free = 1148 true; 1149 } else if (CONF_MATCH_VALUE("false")) { 1150 opt_junk = "false"; 1151 opt_junk_alloc = opt_junk_free = 1152 false; 1153 } else if (CONF_MATCH_VALUE("alloc")) { 1154 opt_junk = "alloc"; 1155 opt_junk_alloc = true; 1156 opt_junk_free = false; 1157 } else if (CONF_MATCH_VALUE("free")) { 1158 opt_junk = "free"; 1159 opt_junk_alloc = false; 1160 opt_junk_free = true; 1161 } else { 1162 malloc_conf_error( 1163 "Invalid conf value", k, 1164 klen, v, vlen); 1165 } 1166 continue; 1167 } 1168 CONF_HANDLE_BOOL(opt_zero, "zero") 1169 } 1170 if (config_utrace) { 1171 CONF_HANDLE_BOOL(opt_utrace, "utrace") 1172 } 1173 if (config_xmalloc) { 1174 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 1175 } 1176 CONF_HANDLE_BOOL(opt_tcache, "tcache") 1177 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, 1178 "lg_extent_max_active_fit", 0, 1179 (sizeof(size_t) << 3), no, yes, false) 1180 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", 1181 -1, (sizeof(size_t) << 3) - 1) 1182 if (strncmp("percpu_arena", k, klen) == 0) { 1183 bool match = false; 1184 for (int ii = percpu_arena_mode_names_base; ii < 1185 percpu_arena_mode_names_limit; ii++) { 1186 if (strncmp(percpu_arena_mode_names[ii], 1187 v, vlen) == 0) { 1188 if (!have_percpu_arena) { 1189 malloc_conf_error( 1190 "No getcpu support", 1191 k, klen, v, vlen); 1192 } 1193 opt_percpu_arena = ii; 1194 match = true; 1195 break; 1196 } 1197 } 1198 if (!match) { 1199 malloc_conf_error("Invalid conf value", 1200 k, klen, v, vlen); 1201 } 1202 continue; 1203 } 1204 CONF_HANDLE_BOOL(opt_background_thread, 1205 "background_thread"); 1206 CONF_HANDLE_SIZE_T(opt_max_background_threads, 1207 "max_background_threads", 1, 1208 opt_max_background_threads, yes, yes, 1209 true); 1210 if (config_prof) { 1211 CONF_HANDLE_BOOL(opt_prof, "prof") 1212 CONF_HANDLE_CHAR_P(opt_prof_prefix, 1213 "prof_prefix", "jeprof") 1214 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 1215 CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1216 "prof_thread_active_init") 1217 CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1218 "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 1219 - 1, no, yes, true) 1220 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 1221 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 1222 "lg_prof_interval", -1, 1223 (sizeof(uint64_t) << 3) - 1) 1224 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 1225 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 1226 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 1227 } 1228 if (config_log) { 1229 if (CONF_MATCH("log")) { 1230 size_t cpylen = ( 1231 vlen <= sizeof(log_var_names) ? 1232 vlen : sizeof(log_var_names) - 1); 1233 strncpy(log_var_names, v, cpylen); 1234 log_var_names[cpylen] = '\0'; 1235 continue; 1236 } 1237 } 1238 if (CONF_MATCH("thp")) { 1239 bool match = false; 1240 for (int ii = 0; ii < thp_mode_names_limit; ii++) { 1241 if (strncmp(thp_mode_names[ii],v, vlen) 1242 == 0) { 1243 if (!have_madvise_huge) { 1244 malloc_conf_error( 1245 "No THP support", 1246 k, klen, v, vlen); 1247 } 1248 opt_thp = ii; 1249 match = true; 1250 break; 1251 } 1252 } 1253 if (!match) { 1254 malloc_conf_error("Invalid conf value", 1255 k, klen, v, vlen); 1256 } 1257 continue; 1258 } 1259 malloc_conf_error("Invalid conf pair", k, klen, v, 1260 vlen); 1261 #undef CONF_MATCH 1262 #undef CONF_MATCH_VALUE 1263 #undef CONF_HANDLE_BOOL 1264 #undef CONF_MIN_no 1265 #undef CONF_MIN_yes 1266 #undef CONF_MAX_no 1267 #undef CONF_MAX_yes 1268 #undef CONF_HANDLE_T_U 1269 #undef CONF_HANDLE_UNSIGNED 1270 #undef CONF_HANDLE_SIZE_T 1271 #undef CONF_HANDLE_SSIZE_T 1272 #undef CONF_HANDLE_CHAR_P 1273 } 1274 if (opt_abort_conf && had_conf_error) { 1275 malloc_abort_invalid_conf(); 1276 } 1277 } 1278 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); 1279 } 1280 1281 static bool 1282 malloc_init_hard_needed(void) { 1283 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1284 malloc_init_recursible)) { 1285 /* 1286 * Another thread initialized the allocator before this one 1287 * acquired init_lock, or this thread is the initializing 1288 * thread, and it is recursively allocating. 1289 */ 1290 return false; 1291 } 1292 #ifdef JEMALLOC_THREADED_INIT 1293 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1294 /* Busy-wait until the initializing thread completes. */ 1295 spin_t spinner = SPIN_INITIALIZER; 1296 do { 1297 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1298 spin_adaptive(&spinner); 1299 malloc_mutex_lock(TSDN_NULL, &init_lock); 1300 } while (!malloc_initialized()); 1301 return false; 1302 } 1303 #endif 1304 return true; 1305 } 1306 1307 static bool 1308 malloc_init_hard_a0_locked(void) { 1309 malloc_initializer = INITIALIZER; 1310 1311 if (config_prof) { 1312 prof_boot0(); 1313 } 1314 malloc_conf_init(); 1315 if (opt_stats_print) { 1316 /* Print statistics at exit. */ 1317 if (atexit(stats_print_atexit) != 0) { 1318 malloc_write("<jemalloc>: Error in atexit()\n"); 1319 if (opt_abort) { 1320 abort(); 1321 } 1322 } 1323 } 1324 if (pages_boot()) { 1325 return true; 1326 } 1327 if (base_boot(TSDN_NULL)) { 1328 return true; 1329 } 1330 if (extent_boot()) { 1331 return true; 1332 } 1333 if (ctl_boot()) { 1334 return true; 1335 } 1336 if (config_prof) { 1337 prof_boot1(); 1338 } 1339 arena_boot(); 1340 if (tcache_boot(TSDN_NULL)) { 1341 return true; 1342 } 1343 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, 1344 malloc_mutex_rank_exclusive)) { 1345 return true; 1346 } 1347 /* 1348 * Create enough scaffolding to allow recursive allocation in 1349 * malloc_ncpus(). 1350 */ 1351 narenas_auto = 1; 1352 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1353 /* 1354 * Initialize one arena here. The rest are lazily created in 1355 * arena_choose_hard(). 1356 */ 1357 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)__UNCONST(&extent_hooks_default)) 1358 == NULL) { 1359 return true; 1360 } 1361 a0 = arena_get(TSDN_NULL, 0, false); 1362 malloc_init_state = malloc_init_a0_initialized; 1363 1364 return false; 1365 } 1366 1367 static bool 1368 malloc_init_hard_a0(void) { 1369 bool ret; 1370 1371 malloc_mutex_lock(TSDN_NULL, &init_lock); 1372 ret = malloc_init_hard_a0_locked(); 1373 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1374 return ret; 1375 } 1376 1377 /* Initialize data structures which may trigger recursive allocation. */ 1378 static bool 1379 malloc_init_hard_recursible(void) { 1380 malloc_init_state = malloc_init_recursible; 1381 1382 ncpus = malloc_ncpus(); 1383 1384 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 1385 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 1386 !defined(__native_client__)) 1387 /* LinuxThreads' pthread_atfork() allocates. */ 1388 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1389 jemalloc_postfork_child) != 0) { 1390 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1391 if (opt_abort) { 1392 abort(); 1393 } 1394 return true; 1395 } 1396 #endif 1397 1398 if (background_thread_boot0()) { 1399 return true; 1400 } 1401 1402 return false; 1403 } 1404 1405 static unsigned 1406 malloc_narenas_default(void) { 1407 assert(ncpus > 0); 1408 /* 1409 * For SMP systems, create more than one arena per CPU by 1410 * default. 1411 */ 1412 if (ncpus > 1) { 1413 return ncpus << 2; 1414 } else { 1415 return 1; 1416 } 1417 } 1418 1419 static percpu_arena_mode_t 1420 percpu_arena_as_initialized(percpu_arena_mode_t mode) { 1421 assert(!malloc_initialized()); 1422 assert(mode <= percpu_arena_disabled); 1423 1424 if (mode != percpu_arena_disabled) { 1425 mode += percpu_arena_mode_enabled_base; 1426 } 1427 1428 return mode; 1429 } 1430 1431 static bool 1432 malloc_init_narenas(void) { 1433 assert(ncpus > 0); 1434 1435 if (opt_percpu_arena != percpu_arena_disabled) { 1436 if (!have_percpu_arena || malloc_getcpu() < 0) { 1437 opt_percpu_arena = percpu_arena_disabled; 1438 malloc_printf("<jemalloc>: perCPU arena getcpu() not " 1439 "available. Setting narenas to %u.\n", opt_narenas ? 1440 opt_narenas : malloc_narenas_default()); 1441 if (opt_abort) { 1442 abort(); 1443 } 1444 } else { 1445 if (ncpus >= MALLOCX_ARENA_LIMIT) { 1446 malloc_printf("<jemalloc>: narenas w/ percpu" 1447 "arena beyond limit (%d)\n", ncpus); 1448 if (opt_abort) { 1449 abort(); 1450 } 1451 return true; 1452 } 1453 /* NB: opt_percpu_arena isn't fully initialized yet. */ 1454 if (percpu_arena_as_initialized(opt_percpu_arena) == 1455 per_phycpu_arena && ncpus % 2 != 0) { 1456 malloc_printf("<jemalloc>: invalid " 1457 "configuration -- per physical CPU arena " 1458 "with odd number (%u) of CPUs (no hyper " 1459 "threading?).\n", ncpus); 1460 if (opt_abort) 1461 abort(); 1462 } 1463 unsigned n = percpu_arena_ind_limit( 1464 percpu_arena_as_initialized(opt_percpu_arena)); 1465 if (opt_narenas < n) { 1466 /* 1467 * If narenas is specified with percpu_arena 1468 * enabled, actual narenas is set as the greater 1469 * of the two. percpu_arena_choose will be free 1470 * to use any of the arenas based on CPU 1471 * id. This is conservative (at a small cost) 1472 * but ensures correctness. 1473 * 1474 * If for some reason the ncpus determined at 1475 * boot is not the actual number (e.g. because 1476 * of affinity setting from numactl), reserving 1477 * narenas this way provides a workaround for 1478 * percpu_arena. 1479 */ 1480 opt_narenas = n; 1481 } 1482 } 1483 } 1484 if (opt_narenas == 0) { 1485 opt_narenas = malloc_narenas_default(); 1486 } 1487 assert(opt_narenas > 0); 1488 1489 narenas_auto = opt_narenas; 1490 /* 1491 * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1492 */ 1493 if (narenas_auto >= MALLOCX_ARENA_LIMIT) { 1494 narenas_auto = MALLOCX_ARENA_LIMIT - 1; 1495 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 1496 narenas_auto); 1497 } 1498 narenas_total_set(narenas_auto); 1499 1500 return false; 1501 } 1502 1503 static void 1504 malloc_init_percpu(void) { 1505 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); 1506 } 1507 1508 static bool 1509 malloc_init_hard_finish(void) { 1510 if (malloc_mutex_boot()) { 1511 return true; 1512 } 1513 1514 malloc_init_state = malloc_init_initialized; 1515 malloc_slow_flag_init(); 1516 1517 return false; 1518 } 1519 1520 static void 1521 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { 1522 malloc_mutex_assert_owner(tsdn, &init_lock); 1523 malloc_mutex_unlock(tsdn, &init_lock); 1524 if (reentrancy_set) { 1525 assert(!tsdn_null(tsdn)); 1526 tsd_t *tsd = tsdn_tsd(tsdn); 1527 assert(tsd_reentrancy_level_get(tsd) > 0); 1528 post_reentrancy(tsd); 1529 } 1530 } 1531 1532 static bool 1533 malloc_init_hard(void) { 1534 tsd_t *tsd; 1535 1536 #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1537 _init_init_lock(); 1538 #endif 1539 malloc_mutex_lock(TSDN_NULL, &init_lock); 1540 1541 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ 1542 malloc_init_hard_cleanup(tsdn, reentrancy); \ 1543 return ret; 1544 1545 if (!malloc_init_hard_needed()) { 1546 UNLOCK_RETURN(TSDN_NULL, false, false) 1547 } 1548 1549 if (malloc_init_state != malloc_init_a0_initialized && 1550 malloc_init_hard_a0_locked()) { 1551 UNLOCK_RETURN(TSDN_NULL, true, false) 1552 } 1553 1554 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1555 /* Recursive allocation relies on functional tsd. */ 1556 tsd = malloc_tsd_boot0(); 1557 if (tsd == NULL) { 1558 return true; 1559 } 1560 if (malloc_init_hard_recursible()) { 1561 return true; 1562 } 1563 1564 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1565 /* Set reentrancy level to 1 during init. */ 1566 pre_reentrancy(tsd, NULL); 1567 /* Initialize narenas before prof_boot2 (for allocation). */ 1568 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { 1569 UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1570 } 1571 if (config_prof && prof_boot2(tsd)) { 1572 UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1573 } 1574 1575 malloc_init_percpu(); 1576 1577 if (malloc_init_hard_finish()) { 1578 UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1579 } 1580 post_reentrancy(tsd); 1581 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1582 1583 witness_assert_lockless(witness_tsd_tsdn( 1584 tsd_witness_tsdp_get_unsafe(tsd))); 1585 malloc_tsd_boot1(); 1586 /* Update TSD after tsd_boot1. */ 1587 tsd = tsd_fetch(); 1588 if (opt_background_thread) { 1589 assert(have_background_thread); 1590 /* 1591 * Need to finish init & unlock first before creating background 1592 * threads (pthread_create depends on malloc). ctl_init (which 1593 * sets isthreaded) needs to be called without holding any lock. 1594 */ 1595 background_thread_ctl_init(tsd_tsdn(tsd)); 1596 1597 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); 1598 bool err = background_thread_create(tsd, 0); 1599 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 1600 if (err) { 1601 return true; 1602 } 1603 } 1604 #undef UNLOCK_RETURN 1605 return false; 1606 } 1607 1608 /* 1609 * End initialization functions. 1610 */ 1611 /******************************************************************************/ 1612 /* 1613 * Begin allocation-path internal functions and data structures. 1614 */ 1615 1616 /* 1617 * Settings determined by the documented behavior of the allocation functions. 1618 */ 1619 typedef struct static_opts_s static_opts_t; 1620 struct static_opts_s { 1621 /* Whether or not allocation size may overflow. */ 1622 bool may_overflow; 1623 /* Whether or not allocations of size 0 should be treated as size 1. */ 1624 bool bump_empty_alloc; 1625 /* 1626 * Whether to assert that allocations are not of size 0 (after any 1627 * bumping). 1628 */ 1629 bool assert_nonempty_alloc; 1630 1631 /* 1632 * Whether or not to modify the 'result' argument to malloc in case of 1633 * error. 1634 */ 1635 bool null_out_result_on_error; 1636 /* Whether to set errno when we encounter an error condition. */ 1637 bool set_errno_on_error; 1638 1639 /* 1640 * The minimum valid alignment for functions requesting aligned storage. 1641 */ 1642 size_t min_alignment; 1643 1644 /* The error string to use if we oom. */ 1645 const char *oom_string; 1646 /* The error string to use if the passed-in alignment is invalid. */ 1647 const char *invalid_alignment_string; 1648 1649 /* 1650 * False if we're configured to skip some time-consuming operations. 1651 * 1652 * This isn't really a malloc "behavior", but it acts as a useful 1653 * summary of several other static (or at least, static after program 1654 * initialization) options. 1655 */ 1656 bool slow; 1657 }; 1658 1659 JEMALLOC_ALWAYS_INLINE void 1660 static_opts_init(static_opts_t *static_opts) { 1661 static_opts->may_overflow = false; 1662 static_opts->bump_empty_alloc = false; 1663 static_opts->assert_nonempty_alloc = false; 1664 static_opts->null_out_result_on_error = false; 1665 static_opts->set_errno_on_error = false; 1666 static_opts->min_alignment = 0; 1667 static_opts->oom_string = ""; 1668 static_opts->invalid_alignment_string = ""; 1669 static_opts->slow = false; 1670 } 1671 1672 /* 1673 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we 1674 * should have one constant here per magic value there. Note however that the 1675 * representations need not be related. 1676 */ 1677 #define TCACHE_IND_NONE ((unsigned)-1) 1678 #define TCACHE_IND_AUTOMATIC ((unsigned)-2) 1679 #define ARENA_IND_AUTOMATIC ((unsigned)-1) 1680 1681 typedef struct dynamic_opts_s dynamic_opts_t; 1682 struct dynamic_opts_s { 1683 void **result; 1684 size_t num_items; 1685 size_t item_size; 1686 size_t alignment; 1687 bool zero; 1688 unsigned tcache_ind; 1689 unsigned arena_ind; 1690 }; 1691 1692 JEMALLOC_ALWAYS_INLINE void 1693 dynamic_opts_init(dynamic_opts_t *dynamic_opts) { 1694 dynamic_opts->result = NULL; 1695 dynamic_opts->num_items = 0; 1696 dynamic_opts->item_size = 0; 1697 dynamic_opts->alignment = 0; 1698 dynamic_opts->zero = false; 1699 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; 1700 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; 1701 } 1702 1703 /* ind is ignored if dopts->alignment > 0. */ 1704 JEMALLOC_ALWAYS_INLINE void * 1705 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1706 size_t size, size_t usize, szind_t ind) { 1707 tcache_t *tcache; 1708 arena_t *arena; 1709 1710 /* Fill in the tcache. */ 1711 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { 1712 if (likely(!sopts->slow)) { 1713 /* Getting tcache ptr unconditionally. */ 1714 tcache = tsd_tcachep_get(tsd); 1715 assert(tcache == tcache_get(tsd)); 1716 } else { 1717 tcache = tcache_get(tsd); 1718 } 1719 } else if (dopts->tcache_ind == TCACHE_IND_NONE) { 1720 tcache = NULL; 1721 } else { 1722 tcache = tcaches_get(tsd, dopts->tcache_ind); 1723 } 1724 1725 /* Fill in the arena. */ 1726 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { 1727 /* 1728 * In case of automatic arena management, we defer arena 1729 * computation until as late as we can, hoping to fill the 1730 * allocation out of the tcache. 1731 */ 1732 arena = NULL; 1733 } else { 1734 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); 1735 } 1736 1737 if (unlikely(dopts->alignment != 0)) { 1738 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, 1739 dopts->zero, tcache, arena); 1740 } 1741 1742 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, 1743 arena, sopts->slow); 1744 } 1745 1746 JEMALLOC_ALWAYS_INLINE void * 1747 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1748 size_t usize, szind_t ind) { 1749 void *ret; 1750 1751 /* 1752 * For small allocations, sampling bumps the usize. If so, we allocate 1753 * from the ind_large bucket. 1754 */ 1755 szind_t ind_large; 1756 size_t bumped_usize = usize; 1757 1758 if (usize <= SMALL_MAXCLASS) { 1759 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : 1760 sz_sa2u(LARGE_MINCLASS, dopts->alignment)) 1761 == LARGE_MINCLASS); 1762 ind_large = sz_size2index(LARGE_MINCLASS); 1763 bumped_usize = sz_s2u(LARGE_MINCLASS); 1764 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, 1765 bumped_usize, ind_large); 1766 if (unlikely(ret == NULL)) { 1767 return NULL; 1768 } 1769 arena_prof_promote(tsd_tsdn(tsd), ret, usize); 1770 } else { 1771 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); 1772 } 1773 1774 return ret; 1775 } 1776 1777 /* 1778 * Returns true if the allocation will overflow, and false otherwise. Sets 1779 * *size to the product either way. 1780 */ 1781 JEMALLOC_ALWAYS_INLINE bool 1782 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, 1783 size_t *size) { 1784 /* 1785 * This function is just num_items * item_size, except that we may have 1786 * to check for overflow. 1787 */ 1788 1789 if (!may_overflow) { 1790 assert(dopts->num_items == 1); 1791 *size = dopts->item_size; 1792 return false; 1793 } 1794 1795 /* A size_t with its high-half bits all set to 1. */ 1796 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); 1797 1798 *size = dopts->item_size * dopts->num_items; 1799 1800 if (unlikely(*size == 0)) { 1801 return (dopts->num_items != 0 && dopts->item_size != 0); 1802 } 1803 1804 /* 1805 * We got a non-zero size, but we don't know if we overflowed to get 1806 * there. To avoid having to do a divide, we'll be clever and note that 1807 * if both A and B can be represented in N/2 bits, then their product 1808 * can be represented in N bits (without the possibility of overflow). 1809 */ 1810 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { 1811 return false; 1812 } 1813 if (likely(*size / dopts->item_size == dopts->num_items)) { 1814 return false; 1815 } 1816 return true; 1817 } 1818 1819 JEMALLOC_ALWAYS_INLINE int 1820 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { 1821 /* Where the actual allocated memory will live. */ 1822 void *allocation = NULL; 1823 /* Filled in by compute_size_with_overflow below. */ 1824 size_t size = 0; 1825 /* 1826 * For unaligned allocations, we need only ind. For aligned 1827 * allocations, or in case of stats or profiling we need usize. 1828 * 1829 * These are actually dead stores, in that their values are reset before 1830 * any branch on their value is taken. Sometimes though, it's 1831 * convenient to pass them as arguments before this point. To avoid 1832 * undefined behavior then, we initialize them with dummy stores. 1833 */ 1834 szind_t ind = 0; 1835 size_t usize = 0; 1836 1837 /* Reentrancy is only checked on slow path. */ 1838 int8_t reentrancy_level; 1839 1840 /* Compute the amount of memory the user wants. */ 1841 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, 1842 &size))) { 1843 goto label_oom; 1844 } 1845 1846 /* Validate the user input. */ 1847 if (sopts->bump_empty_alloc) { 1848 if (unlikely(size == 0)) { 1849 size = 1; 1850 } 1851 } 1852 1853 if (sopts->assert_nonempty_alloc) { 1854 assert (size != 0); 1855 } 1856 1857 if (unlikely(dopts->alignment < sopts->min_alignment 1858 || (dopts->alignment & (dopts->alignment - 1)) != 0)) { 1859 goto label_invalid_alignment; 1860 } 1861 1862 /* This is the beginning of the "core" algorithm. */ 1863 1864 if (dopts->alignment == 0) { 1865 ind = sz_size2index(size); 1866 if (unlikely(ind >= NSIZES)) { 1867 goto label_oom; 1868 } 1869 if (config_stats || (config_prof && opt_prof)) { 1870 usize = sz_index2size(ind); 1871 assert(usize > 0 && usize <= LARGE_MAXCLASS); 1872 } 1873 } else { 1874 usize = sz_sa2u(size, dopts->alignment); 1875 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1876 goto label_oom; 1877 } 1878 } 1879 1880 check_entry_exit_locking(tsd_tsdn(tsd)); 1881 1882 /* 1883 * If we need to handle reentrancy, we can do it out of a 1884 * known-initialized arena (i.e. arena 0). 1885 */ 1886 reentrancy_level = tsd_reentrancy_level_get(tsd); 1887 if (sopts->slow && unlikely(reentrancy_level > 0)) { 1888 /* 1889 * We should never specify particular arenas or tcaches from 1890 * within our internal allocations. 1891 */ 1892 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || 1893 dopts->tcache_ind == TCACHE_IND_NONE); 1894 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); 1895 dopts->tcache_ind = TCACHE_IND_NONE; 1896 /* We know that arena 0 has already been initialized. */ 1897 dopts->arena_ind = 0; 1898 } 1899 1900 /* If profiling is on, get our profiling context. */ 1901 if (config_prof && opt_prof) { 1902 /* 1903 * Note that if we're going down this path, usize must have been 1904 * initialized in the previous if statement. 1905 */ 1906 prof_tctx_t *tctx = prof_alloc_prep( 1907 tsd, usize, prof_active_get_unlocked(), true); 1908 1909 alloc_ctx_t alloc_ctx; 1910 if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 1911 alloc_ctx.slab = (usize <= SMALL_MAXCLASS); 1912 allocation = imalloc_no_sample( 1913 sopts, dopts, tsd, usize, usize, ind); 1914 } else if ((uintptr_t)tctx > (uintptr_t)1U) { 1915 /* 1916 * Note that ind might still be 0 here. This is fine; 1917 * imalloc_sample ignores ind if dopts->alignment > 0. 1918 */ 1919 allocation = imalloc_sample( 1920 sopts, dopts, tsd, usize, ind); 1921 alloc_ctx.slab = false; 1922 } else { 1923 allocation = NULL; 1924 } 1925 1926 if (unlikely(allocation == NULL)) { 1927 prof_alloc_rollback(tsd, tctx, true); 1928 goto label_oom; 1929 } 1930 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); 1931 } else { 1932 /* 1933 * If dopts->alignment > 0, then ind is still 0, but usize was 1934 * computed in the previous if statement. Down the positive 1935 * alignment path, imalloc_no_sample ignores ind and size 1936 * (relying only on usize). 1937 */ 1938 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, 1939 ind); 1940 if (unlikely(allocation == NULL)) { 1941 goto label_oom; 1942 } 1943 } 1944 1945 /* 1946 * Allocation has been done at this point. We still have some 1947 * post-allocation work to do though. 1948 */ 1949 assert(dopts->alignment == 0 1950 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); 1951 1952 if (config_stats) { 1953 assert(usize == isalloc(tsd_tsdn(tsd), allocation)); 1954 *tsd_thread_allocatedp_get(tsd) += usize; 1955 } 1956 1957 if (sopts->slow) { 1958 UTRACE(0, size, allocation); 1959 } 1960 1961 /* Success! */ 1962 check_entry_exit_locking(tsd_tsdn(tsd)); 1963 *dopts->result = allocation; 1964 return 0; 1965 1966 label_oom: 1967 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { 1968 malloc_write(sopts->oom_string); 1969 abort(); 1970 } 1971 1972 if (sopts->slow) { 1973 UTRACE(NULL, size, NULL); 1974 } 1975 1976 check_entry_exit_locking(tsd_tsdn(tsd)); 1977 1978 if (sopts->set_errno_on_error) { 1979 set_errno(ENOMEM); 1980 } 1981 1982 if (sopts->null_out_result_on_error) { 1983 *dopts->result = NULL; 1984 } 1985 1986 return ENOMEM; 1987 1988 /* 1989 * This label is only jumped to by one goto; we move it out of line 1990 * anyways to avoid obscuring the non-error paths, and for symmetry with 1991 * the oom case. 1992 */ 1993 label_invalid_alignment: 1994 if (config_xmalloc && unlikely(opt_xmalloc)) { 1995 malloc_write(sopts->invalid_alignment_string); 1996 abort(); 1997 } 1998 1999 if (sopts->set_errno_on_error) { 2000 set_errno(EINVAL); 2001 } 2002 2003 if (sopts->slow) { 2004 UTRACE(NULL, size, NULL); 2005 } 2006 2007 check_entry_exit_locking(tsd_tsdn(tsd)); 2008 2009 if (sopts->null_out_result_on_error) { 2010 *dopts->result = NULL; 2011 } 2012 2013 return EINVAL; 2014 } 2015 2016 /* Returns the errno-style error code of the allocation. */ 2017 JEMALLOC_ALWAYS_INLINE int 2018 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { 2019 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { 2020 if (config_xmalloc && unlikely(opt_xmalloc)) { 2021 malloc_write(sopts->oom_string); 2022 abort(); 2023 } 2024 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); 2025 set_errno(ENOMEM); 2026 *dopts->result = NULL; 2027 2028 return ENOMEM; 2029 } 2030 2031 /* We always need the tsd. Let's grab it right away. */ 2032 tsd_t *tsd = tsd_fetch(); 2033 assert(tsd); 2034 if (likely(tsd_fast(tsd))) { 2035 /* Fast and common path. */ 2036 tsd_assert_fast(tsd); 2037 sopts->slow = false; 2038 return imalloc_body(sopts, dopts, tsd); 2039 } else { 2040 sopts->slow = true; 2041 return imalloc_body(sopts, dopts, tsd); 2042 } 2043 } 2044 /******************************************************************************/ 2045 /* 2046 * Begin malloc(3)-compatible functions. 2047 */ 2048 2049 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2050 void JEMALLOC_NOTHROW * 2051 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2052 je_malloc(size_t size) { 2053 void *ret; 2054 static_opts_t sopts; 2055 dynamic_opts_t dopts; 2056 2057 LOG("core.malloc.entry", "size: %zu", size); 2058 2059 static_opts_init(&sopts); 2060 dynamic_opts_init(&dopts); 2061 2062 sopts.bump_empty_alloc = true; 2063 sopts.null_out_result_on_error = true; 2064 sopts.set_errno_on_error = true; 2065 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; 2066 2067 dopts.result = &ret; 2068 dopts.num_items = 1; 2069 dopts.item_size = size; 2070 2071 imalloc(&sopts, &dopts); 2072 2073 LOG("core.malloc.exit", "result: %p", ret); 2074 2075 return ret; 2076 } 2077 2078 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2079 JEMALLOC_ATTR(nonnull(1)) 2080 je_posix_memalign(void **memptr, size_t alignment, size_t size) { 2081 int ret; 2082 static_opts_t sopts; 2083 dynamic_opts_t dopts; 2084 2085 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " 2086 "size: %zu", memptr, alignment, size); 2087 2088 static_opts_init(&sopts); 2089 dynamic_opts_init(&dopts); 2090 2091 sopts.bump_empty_alloc = true; 2092 sopts.min_alignment = sizeof(void *); 2093 sopts.oom_string = 2094 "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2095 sopts.invalid_alignment_string = 2096 "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2097 2098 dopts.result = memptr; 2099 dopts.num_items = 1; 2100 dopts.item_size = size; 2101 dopts.alignment = alignment; 2102 2103 ret = imalloc(&sopts, &dopts); 2104 2105 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, 2106 *memptr); 2107 2108 return ret; 2109 } 2110 2111 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2112 void JEMALLOC_NOTHROW * 2113 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 2114 je_aligned_alloc(size_t alignment, size_t size) { 2115 void *ret; 2116 2117 static_opts_t sopts; 2118 dynamic_opts_t dopts; 2119 2120 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", 2121 alignment, size); 2122 2123 static_opts_init(&sopts); 2124 dynamic_opts_init(&dopts); 2125 2126 sopts.bump_empty_alloc = true; 2127 sopts.null_out_result_on_error = true; 2128 sopts.set_errno_on_error = true; 2129 sopts.min_alignment = 1; 2130 sopts.oom_string = 2131 "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2132 sopts.invalid_alignment_string = 2133 "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2134 2135 dopts.result = &ret; 2136 dopts.num_items = 1; 2137 dopts.item_size = size; 2138 dopts.alignment = alignment; 2139 2140 imalloc(&sopts, &dopts); 2141 2142 LOG("core.aligned_alloc.exit", "result: %p", ret); 2143 2144 return ret; 2145 } 2146 2147 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2148 void JEMALLOC_NOTHROW * 2149 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 2150 je_calloc(size_t num, size_t size) { 2151 void *ret; 2152 static_opts_t sopts; 2153 dynamic_opts_t dopts; 2154 2155 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); 2156 2157 static_opts_init(&sopts); 2158 dynamic_opts_init(&dopts); 2159 2160 sopts.may_overflow = true; 2161 sopts.bump_empty_alloc = true; 2162 sopts.null_out_result_on_error = true; 2163 sopts.set_errno_on_error = true; 2164 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; 2165 2166 dopts.result = &ret; 2167 dopts.num_items = num; 2168 dopts.item_size = size; 2169 dopts.zero = true; 2170 2171 imalloc(&sopts, &dopts); 2172 2173 LOG("core.calloc.exit", "result: %p", ret); 2174 2175 return ret; 2176 } 2177 2178 static void * 2179 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2180 prof_tctx_t *tctx) { 2181 void *p; 2182 2183 if (tctx == NULL) { 2184 return NULL; 2185 } 2186 if (usize <= SMALL_MAXCLASS) { 2187 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 2188 if (p == NULL) { 2189 return NULL; 2190 } 2191 arena_prof_promote(tsd_tsdn(tsd), p, usize); 2192 } else { 2193 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2194 } 2195 2196 return p; 2197 } 2198 2199 JEMALLOC_ALWAYS_INLINE void * 2200 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2201 alloc_ctx_t *alloc_ctx) { 2202 void *p; 2203 bool prof_activex; 2204 prof_tctx_t *old_tctx, *tctx; 2205 2206 prof_activex = prof_active_get_unlocked(); 2207 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2208 tctx = prof_alloc_prep(tsd, usize, prof_activex, true); 2209 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2210 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 2211 } else { 2212 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2213 } 2214 if (unlikely(p == NULL)) { 2215 prof_alloc_rollback(tsd, tctx, true); 2216 return NULL; 2217 } 2218 prof_realloc(tsd, p, usize, tctx, prof_activex, true, old_ptr, 2219 old_usize, old_tctx); 2220 2221 return p; 2222 } 2223 2224 JEMALLOC_ALWAYS_INLINE void 2225 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { 2226 if (!slow_path) { 2227 tsd_assert_fast(tsd); 2228 } 2229 check_entry_exit_locking(tsd_tsdn(tsd)); 2230 if (tsd_reentrancy_level_get(tsd) != 0) { 2231 assert(slow_path); 2232 } 2233 2234 assert(ptr != NULL); 2235 assert(malloc_initialized() || IS_INITIALIZER); 2236 2237 alloc_ctx_t alloc_ctx; 2238 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2239 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2240 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2241 assert(alloc_ctx.szind != NSIZES); 2242 2243 size_t usize; 2244 if (config_prof && opt_prof) { 2245 usize = sz_index2size(alloc_ctx.szind); 2246 prof_free(tsd, ptr, usize, &alloc_ctx); 2247 } else if (config_stats) { 2248 usize = sz_index2size(alloc_ctx.szind); 2249 } 2250 if (config_stats) { 2251 *tsd_thread_deallocatedp_get(tsd) += usize; 2252 } 2253 2254 if (likely(!slow_path)) { 2255 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2256 false); 2257 } else { 2258 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2259 true); 2260 } 2261 } 2262 2263 JEMALLOC_ALWAYS_INLINE void 2264 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { 2265 if (!slow_path) { 2266 tsd_assert_fast(tsd); 2267 } 2268 check_entry_exit_locking(tsd_tsdn(tsd)); 2269 if (tsd_reentrancy_level_get(tsd) != 0) { 2270 assert(slow_path); 2271 } 2272 2273 assert(ptr != NULL); 2274 assert(malloc_initialized() || IS_INITIALIZER); 2275 2276 alloc_ctx_t alloc_ctx, *ctx; 2277 if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { 2278 /* 2279 * When cache_oblivious is disabled and ptr is not page aligned, 2280 * the allocation was not sampled -- usize can be used to 2281 * determine szind directly. 2282 */ 2283 alloc_ctx.szind = sz_size2index(usize); 2284 alloc_ctx.slab = true; 2285 ctx = &alloc_ctx; 2286 if (config_debug) { 2287 alloc_ctx_t dbg_ctx; 2288 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2289 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, 2290 rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, 2291 &dbg_ctx.slab); 2292 assert(dbg_ctx.szind == alloc_ctx.szind); 2293 assert(dbg_ctx.slab == alloc_ctx.slab); 2294 } 2295 } else if (config_prof && opt_prof) { 2296 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2297 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2298 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2299 assert(alloc_ctx.szind == sz_size2index(usize)); 2300 ctx = &alloc_ctx; 2301 } else { 2302 ctx = NULL; 2303 } 2304 2305 if (config_prof && opt_prof) { 2306 prof_free(tsd, ptr, usize, ctx); 2307 } 2308 if (config_stats) { 2309 *tsd_thread_deallocatedp_get(tsd) += usize; 2310 } 2311 2312 if (likely(!slow_path)) { 2313 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); 2314 } else { 2315 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); 2316 } 2317 } 2318 2319 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2320 void JEMALLOC_NOTHROW * 2321 JEMALLOC_ALLOC_SIZE(2) 2322 je_realloc(void *ptr, size_t size) { 2323 void *ret; 2324 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 2325 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 2326 size_t old_usize = 0; 2327 2328 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); 2329 2330 if (unlikely(size == 0)) { 2331 #if 0 2332 // http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_400 2333 if (ptr != NULL) { 2334 /* realloc(ptr, 0) is equivalent to free(ptr). */ 2335 UTRACE(ptr, 0, 0); 2336 tcache_t *tcache; 2337 tsd_t *tsd = tsd_fetch(); 2338 if (tsd_reentrancy_level_get(tsd) == 0) { 2339 tcache = tcache_get(tsd); 2340 } else { 2341 tcache = NULL; 2342 } 2343 ifree(tsd, ptr, tcache, true); 2344 2345 LOG("core.realloc.exit", "result: %p", NULL); 2346 return NULL; 2347 } 2348 #endif 2349 size = 1; 2350 } 2351 2352 if (likely(ptr != NULL)) { 2353 assert(malloc_initialized() || IS_INITIALIZER); 2354 tsd_t *tsd = tsd_fetch(); 2355 2356 check_entry_exit_locking(tsd_tsdn(tsd)); 2357 2358 alloc_ctx_t alloc_ctx; 2359 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2360 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2361 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2362 assert(alloc_ctx.szind != NSIZES); 2363 old_usize = sz_index2size(alloc_ctx.szind); 2364 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2365 if (config_prof && opt_prof) { 2366 usize = sz_s2u(size); 2367 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? 2368 NULL : irealloc_prof(tsd, ptr, old_usize, usize, 2369 &alloc_ctx); 2370 } else { 2371 if (config_stats) { 2372 usize = sz_s2u(size); 2373 } 2374 ret = iralloc(tsd, ptr, old_usize, size, 0, false); 2375 } 2376 tsdn = tsd_tsdn(tsd); 2377 } else { 2378 /* realloc(NULL, size) is equivalent to malloc(size). */ 2379 void *ret1 = je_malloc(size); 2380 LOG("core.realloc.exit", "result: %p", ret1); 2381 return ret1; 2382 } 2383 2384 if (unlikely(ret == NULL)) { 2385 if (config_xmalloc && unlikely(opt_xmalloc)) { 2386 malloc_write("<jemalloc>: Error in realloc(): " 2387 "out of memory\n"); 2388 abort(); 2389 } 2390 set_errno(ENOMEM); 2391 } 2392 if (config_stats && likely(ret != NULL)) { 2393 tsd_t *tsd; 2394 2395 assert(usize == isalloc(tsdn, ret)); 2396 tsd = tsdn_tsd(tsdn); 2397 *tsd_thread_allocatedp_get(tsd) += usize; 2398 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2399 } 2400 UTRACE(ptr, size, ret); 2401 check_entry_exit_locking(tsdn); 2402 2403 LOG("core.realloc.exit", "result: %p", ret); 2404 return ret; 2405 } 2406 2407 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2408 je_free(void *ptr) { 2409 LOG("core.free.entry", "ptr: %p", ptr); 2410 2411 UTRACE(ptr, 0, 0); 2412 if (likely(ptr != NULL)) { 2413 /* 2414 * We avoid setting up tsd fully (e.g. tcache, arena binding) 2415 * based on only free() calls -- other activities trigger the 2416 * minimal to full transition. This is because free() may 2417 * happen during thread shutdown after tls deallocation: if a 2418 * thread never had any malloc activities until then, a 2419 * fully-setup tsd won't be destructed properly. 2420 */ 2421 tsd_t *tsd = tsd_fetch_min(); 2422 check_entry_exit_locking(tsd_tsdn(tsd)); 2423 2424 tcache_t *tcache; 2425 if (likely(tsd_fast(tsd))) { 2426 tsd_assert_fast(tsd); 2427 /* Unconditionally get tcache ptr on fast path. */ 2428 tcache = tsd_tcachep_get(tsd); 2429 ifree(tsd, ptr, tcache, false); 2430 } else { 2431 if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2432 tcache = tcache_get(tsd); 2433 } else { 2434 tcache = NULL; 2435 } 2436 ifree(tsd, ptr, tcache, true); 2437 } 2438 check_entry_exit_locking(tsd_tsdn(tsd)); 2439 } 2440 LOG("core.free.exit", ""); 2441 } 2442 2443 /* 2444 * End malloc(3)-compatible functions. 2445 */ 2446 /******************************************************************************/ 2447 /* 2448 * Begin non-standard override functions. 2449 */ 2450 2451 #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2452 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2453 void JEMALLOC_NOTHROW * 2454 JEMALLOC_ATTR(malloc) 2455 je_memalign(size_t alignment, size_t size) { 2456 void *ret; 2457 static_opts_t sopts; 2458 dynamic_opts_t dopts; 2459 2460 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, 2461 size); 2462 2463 static_opts_init(&sopts); 2464 dynamic_opts_init(&dopts); 2465 2466 sopts.bump_empty_alloc = true; 2467 sopts.min_alignment = 1; 2468 sopts.oom_string = 2469 "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2470 sopts.invalid_alignment_string = 2471 "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2472 sopts.null_out_result_on_error = true; 2473 2474 dopts.result = &ret; 2475 dopts.num_items = 1; 2476 dopts.item_size = size; 2477 dopts.alignment = alignment; 2478 2479 imalloc(&sopts, &dopts); 2480 2481 LOG("core.memalign.exit", "result: %p", ret); 2482 return ret; 2483 } 2484 #endif 2485 2486 #ifdef JEMALLOC_OVERRIDE_VALLOC 2487 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2488 void JEMALLOC_NOTHROW * 2489 JEMALLOC_ATTR(malloc) 2490 je_valloc(size_t size) { 2491 void *ret; 2492 2493 static_opts_t sopts; 2494 dynamic_opts_t dopts; 2495 2496 LOG("core.valloc.entry", "size: %zu\n", size); 2497 2498 static_opts_init(&sopts); 2499 dynamic_opts_init(&dopts); 2500 2501 sopts.bump_empty_alloc = true; 2502 sopts.null_out_result_on_error = true; 2503 sopts.min_alignment = PAGE; 2504 sopts.oom_string = 2505 "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2506 sopts.invalid_alignment_string = 2507 "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2508 2509 dopts.result = &ret; 2510 dopts.num_items = 1; 2511 dopts.item_size = size; 2512 dopts.alignment = PAGE; 2513 2514 imalloc(&sopts, &dopts); 2515 2516 LOG("core.valloc.exit", "result: %p\n", ret); 2517 return ret; 2518 } 2519 #endif 2520 2521 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) 2522 /* 2523 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2524 * to inconsistently reference libc's malloc(3)-compatible functions 2525 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2526 * 2527 * These definitions interpose hooks in glibc. The functions are actually 2528 * passed an extra argument for the caller return address, which will be 2529 * ignored. 2530 */ 2531 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 2532 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 2533 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2534 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 2535 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2536 je_memalign; 2537 # endif 2538 2539 # ifdef CPU_COUNT 2540 /* 2541 * To enable static linking with glibc, the libc specific malloc interface must 2542 * be implemented also, so none of glibc's malloc.o functions are added to the 2543 * link. 2544 */ 2545 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2546 /* To force macro expansion of je_ prefix before stringification. */ 2547 # define PREALIAS(je_fn) ALIAS(je_fn) 2548 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC 2549 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2550 # endif 2551 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE 2552 void __libc_free(void* ptr) PREALIAS(je_free); 2553 # endif 2554 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC 2555 void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2556 # endif 2557 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN 2558 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2559 # endif 2560 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC 2561 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2562 # endif 2563 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC 2564 void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2565 # endif 2566 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN 2567 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); 2568 # endif 2569 # undef PREALIAS 2570 # undef ALIAS 2571 # endif 2572 #endif 2573 2574 /* 2575 * End non-standard override functions. 2576 */ 2577 /******************************************************************************/ 2578 /* 2579 * Begin non-standard functions. 2580 */ 2581 2582 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2583 void JEMALLOC_NOTHROW * 2584 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2585 je_mallocx(size_t size, int flags) { 2586 void *ret; 2587 static_opts_t sopts; 2588 dynamic_opts_t dopts; 2589 2590 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); 2591 2592 static_opts_init(&sopts); 2593 dynamic_opts_init(&dopts); 2594 2595 sopts.assert_nonempty_alloc = true; 2596 sopts.null_out_result_on_error = true; 2597 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; 2598 2599 dopts.result = &ret; 2600 dopts.num_items = 1; 2601 dopts.item_size = size; 2602 if (unlikely(flags != 0)) { 2603 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { 2604 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2605 } 2606 2607 dopts.zero = MALLOCX_ZERO_GET(flags); 2608 2609 if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2610 if ((flags & MALLOCX_TCACHE_MASK) 2611 == MALLOCX_TCACHE_NONE) { 2612 dopts.tcache_ind = TCACHE_IND_NONE; 2613 } else { 2614 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); 2615 } 2616 } else { 2617 dopts.tcache_ind = TCACHE_IND_AUTOMATIC; 2618 } 2619 2620 if ((flags & MALLOCX_ARENA_MASK) != 0) 2621 dopts.arena_ind = MALLOCX_ARENA_GET(flags); 2622 } 2623 2624 imalloc(&sopts, &dopts); 2625 2626 LOG("core.mallocx.exit", "result: %p", ret); 2627 return ret; 2628 } 2629 2630 static void * 2631 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, 2632 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2633 prof_tctx_t *tctx) { 2634 void *p; 2635 2636 if (tctx == NULL) { 2637 return NULL; 2638 } 2639 if (usize <= SMALL_MAXCLASS) { 2640 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, 2641 alignment, zero, tcache, arena); 2642 if (p == NULL) { 2643 return NULL; 2644 } 2645 arena_prof_promote(tsdn, p, usize); 2646 } else { 2647 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, 2648 tcache, arena); 2649 } 2650 2651 return p; 2652 } 2653 2654 JEMALLOC_ALWAYS_INLINE void * 2655 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2656 size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2657 arena_t *arena, alloc_ctx_t *alloc_ctx) { 2658 void *p; 2659 bool prof_activex; 2660 prof_tctx_t *old_tctx, *tctx; 2661 2662 prof_activex = prof_active_get_unlocked(); 2663 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2664 tctx = prof_alloc_prep(tsd, *usize, prof_activex, false); 2665 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2666 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, 2667 *usize, alignment, zero, tcache, arena, tctx); 2668 } else { 2669 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, 2670 zero, tcache, arena); 2671 } 2672 if (unlikely(p == NULL)) { 2673 prof_alloc_rollback(tsd, tctx, false); 2674 return NULL; 2675 } 2676 2677 if (p == old_ptr && alignment != 0) { 2678 /* 2679 * The allocation did not move, so it is possible that the size 2680 * class is smaller than would guarantee the requested 2681 * alignment, and that the alignment constraint was 2682 * serendipitously satisfied. Additionally, old_usize may not 2683 * be the same as the current usize because of in-place large 2684 * reallocation. Therefore, query the actual value of usize. 2685 */ 2686 *usize = isalloc(tsd_tsdn(tsd), p); 2687 } 2688 prof_realloc(tsd, p, *usize, tctx, prof_activex, false, old_ptr, 2689 old_usize, old_tctx); 2690 2691 return p; 2692 } 2693 2694 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2695 void JEMALLOC_NOTHROW * 2696 JEMALLOC_ALLOC_SIZE(2) 2697 je_rallocx(void *ptr, size_t size, int flags) { 2698 void *p; 2699 tsd_t *tsd; 2700 size_t usize; 2701 size_t old_usize; 2702 size_t alignment = MALLOCX_ALIGN_GET(flags); 2703 bool zero = flags & MALLOCX_ZERO; 2704 arena_t *arena; 2705 tcache_t *tcache; 2706 2707 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 2708 size, flags); 2709 2710 2711 assert(ptr != NULL); 2712 assert(size != 0); 2713 assert(malloc_initialized() || IS_INITIALIZER); 2714 tsd = tsd_fetch(); 2715 check_entry_exit_locking(tsd_tsdn(tsd)); 2716 2717 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2718 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2719 arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2720 if (unlikely(arena == NULL)) { 2721 goto label_oom; 2722 } 2723 } else { 2724 arena = NULL; 2725 } 2726 2727 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2728 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2729 tcache = NULL; 2730 } else { 2731 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2732 } 2733 } else { 2734 tcache = tcache_get(tsd); 2735 } 2736 2737 alloc_ctx_t alloc_ctx; 2738 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2739 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2740 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2741 assert(alloc_ctx.szind != NSIZES); 2742 old_usize = sz_index2size(alloc_ctx.szind); 2743 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2744 if (config_prof && opt_prof) { 2745 usize = (alignment == 0) ? 2746 sz_s2u(size) : sz_sa2u(size, alignment); 2747 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 2748 goto label_oom; 2749 } 2750 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2751 zero, tcache, arena, &alloc_ctx); 2752 if (unlikely(p == NULL)) { 2753 goto label_oom; 2754 } 2755 } else { 2756 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, 2757 zero, tcache, arena); 2758 if (unlikely(p == NULL)) { 2759 goto label_oom; 2760 } 2761 if (config_stats) { 2762 usize = isalloc(tsd_tsdn(tsd), p); 2763 } 2764 } 2765 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2766 2767 if (config_stats) { 2768 *tsd_thread_allocatedp_get(tsd) += usize; 2769 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2770 } 2771 UTRACE(ptr, size, p); 2772 check_entry_exit_locking(tsd_tsdn(tsd)); 2773 2774 LOG("core.rallocx.exit", "result: %p", p); 2775 return p; 2776 label_oom: 2777 if (config_xmalloc && unlikely(opt_xmalloc)) { 2778 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2779 abort(); 2780 } 2781 UTRACE(ptr, size, 0); 2782 check_entry_exit_locking(tsd_tsdn(tsd)); 2783 2784 LOG("core.rallocx.exit", "result: %p", NULL); 2785 return NULL; 2786 } 2787 2788 JEMALLOC_ALWAYS_INLINE size_t 2789 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2790 size_t extra, size_t alignment, bool zero) { 2791 size_t usize; 2792 2793 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { 2794 return old_usize; 2795 } 2796 usize = isalloc(tsdn, ptr); 2797 2798 return usize; 2799 } 2800 2801 static size_t 2802 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2803 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { 2804 size_t usize; 2805 2806 if (tctx == NULL) { 2807 return old_usize; 2808 } 2809 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2810 zero); 2811 2812 return usize; 2813 } 2814 2815 JEMALLOC_ALWAYS_INLINE size_t 2816 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2817 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { 2818 size_t usize_max, usize; 2819 bool prof_activex; 2820 prof_tctx_t *old_tctx, *tctx; 2821 2822 prof_activex = prof_active_get_unlocked(); 2823 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); 2824 /* 2825 * usize isn't knowable before ixalloc() returns when extra is non-zero. 2826 * Therefore, compute its maximum possible value and use that in 2827 * prof_alloc_prep() to decide whether to capture a backtrace. 2828 * prof_realloc() will use the actual usize to decide whether to sample. 2829 */ 2830 if (alignment == 0) { 2831 usize_max = sz_s2u(size+extra); 2832 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); 2833 } else { 2834 usize_max = sz_sa2u(size+extra, alignment); 2835 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { 2836 /* 2837 * usize_max is out of range, and chances are that 2838 * allocation will fail, but use the maximum possible 2839 * value and carry on with prof_alloc_prep(), just in 2840 * case allocation succeeds. 2841 */ 2842 usize_max = LARGE_MAXCLASS; 2843 } 2844 } 2845 tctx = prof_alloc_prep(tsd, usize_max, prof_activex, false); 2846 2847 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2848 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 2849 size, extra, alignment, zero, tctx); 2850 } else { 2851 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2852 extra, alignment, zero); 2853 } 2854 if (usize == old_usize) { 2855 prof_alloc_rollback(tsd, tctx, false); 2856 return usize; 2857 } 2858 prof_realloc(tsd, ptr, usize, tctx, prof_activex, false, ptr, old_usize, 2859 old_tctx); 2860 2861 return usize; 2862 } 2863 2864 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2865 je_xallocx(void *ptr, size_t size, size_t extra, int flags) { 2866 tsd_t *tsd; 2867 size_t usize, old_usize; 2868 size_t alignment = MALLOCX_ALIGN_GET(flags); 2869 bool zero = flags & MALLOCX_ZERO; 2870 2871 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " 2872 "flags: %d", ptr, size, extra, flags); 2873 2874 assert(ptr != NULL); 2875 assert(size != 0); 2876 assert(SIZE_T_MAX - size >= extra); 2877 assert(malloc_initialized() || IS_INITIALIZER); 2878 tsd = tsd_fetch(); 2879 check_entry_exit_locking(tsd_tsdn(tsd)); 2880 2881 alloc_ctx_t alloc_ctx; 2882 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2883 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2884 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2885 assert(alloc_ctx.szind != NSIZES); 2886 old_usize = sz_index2size(alloc_ctx.szind); 2887 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2888 /* 2889 * The API explicitly absolves itself of protecting against (size + 2890 * extra) numerical overflow, but we may need to clamp extra to avoid 2891 * exceeding LARGE_MAXCLASS. 2892 * 2893 * Ordinarily, size limit checking is handled deeper down, but here we 2894 * have to check as part of (size + extra) clamping, since we need the 2895 * clamped value in the above helper functions. 2896 */ 2897 if (unlikely(size > LARGE_MAXCLASS)) { 2898 usize = old_usize; 2899 goto label_not_resized; 2900 } 2901 if (unlikely(LARGE_MAXCLASS - size < extra)) { 2902 extra = LARGE_MAXCLASS - size; 2903 } 2904 2905 if (config_prof && opt_prof) { 2906 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2907 alignment, zero, &alloc_ctx); 2908 } else { 2909 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2910 extra, alignment, zero); 2911 } 2912 if (unlikely(usize == old_usize)) { 2913 goto label_not_resized; 2914 } 2915 2916 if (config_stats) { 2917 *tsd_thread_allocatedp_get(tsd) += usize; 2918 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2919 } 2920 label_not_resized: 2921 UTRACE(ptr, size, ptr); 2922 check_entry_exit_locking(tsd_tsdn(tsd)); 2923 2924 LOG("core.xallocx.exit", "result: %zu", usize); 2925 return usize; 2926 } 2927 2928 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2929 JEMALLOC_ATTR(pure) 2930 je_sallocx(const void *ptr, UNUSED int flags) { 2931 size_t usize; 2932 tsdn_t *tsdn; 2933 2934 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2935 2936 assert(malloc_initialized() || IS_INITIALIZER); 2937 assert(ptr != NULL); 2938 2939 tsdn = tsdn_fetch(); 2940 check_entry_exit_locking(tsdn); 2941 2942 if (config_debug || force_ivsalloc) { 2943 usize = ivsalloc(tsdn, ptr); 2944 assert(force_ivsalloc || usize != 0); 2945 } else { 2946 usize = isalloc(tsdn, ptr); 2947 } 2948 2949 check_entry_exit_locking(tsdn); 2950 2951 LOG("core.sallocx.exit", "result: %zu", usize); 2952 return usize; 2953 } 2954 2955 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2956 je_dallocx(void *ptr, int flags) { 2957 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2958 2959 assert(ptr != NULL); 2960 assert(malloc_initialized() || IS_INITIALIZER); 2961 2962 tsd_t *tsd = tsd_fetch(); 2963 bool fast = tsd_fast(tsd); 2964 check_entry_exit_locking(tsd_tsdn(tsd)); 2965 2966 tcache_t *tcache; 2967 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2968 /* Not allowed to be reentrant and specify a custom tcache. */ 2969 assert(tsd_reentrancy_level_get(tsd) == 0); 2970 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2971 tcache = NULL; 2972 } else { 2973 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2974 } 2975 } else { 2976 if (likely(fast)) { 2977 tcache = tsd_tcachep_get(tsd); 2978 assert(tcache == tcache_get(tsd)); 2979 } else { 2980 if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2981 tcache = tcache_get(tsd); 2982 } else { 2983 tcache = NULL; 2984 } 2985 } 2986 } 2987 2988 UTRACE(ptr, 0, 0); 2989 if (likely(fast)) { 2990 tsd_assert_fast(tsd); 2991 ifree(tsd, ptr, tcache, false); 2992 } else { 2993 ifree(tsd, ptr, tcache, true); 2994 } 2995 check_entry_exit_locking(tsd_tsdn(tsd)); 2996 2997 LOG("core.dallocx.exit", ""); 2998 } 2999 3000 JEMALLOC_ALWAYS_INLINE size_t 3001 inallocx(tsdn_t *tsdn, size_t size, int flags) { 3002 check_entry_exit_locking(tsdn); 3003 3004 size_t usize; 3005 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { 3006 usize = sz_s2u(size); 3007 } else { 3008 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 3009 } 3010 check_entry_exit_locking(tsdn); 3011 return usize; 3012 } 3013 3014 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 3015 je_sdallocx(void *ptr, size_t size, int flags) { 3016 assert(ptr != NULL); 3017 assert(malloc_initialized() || IS_INITIALIZER); 3018 3019 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 3020 size, flags); 3021 3022 tsd_t *tsd = tsd_fetch(); 3023 bool fast = tsd_fast(tsd); 3024 size_t usize = inallocx(tsd_tsdn(tsd), size, flags); 3025 assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 3026 check_entry_exit_locking(tsd_tsdn(tsd)); 3027 3028 tcache_t *tcache; 3029 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 3030 /* Not allowed to be reentrant and specify a custom tcache. */ 3031 assert(tsd_reentrancy_level_get(tsd) == 0); 3032 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 3033 tcache = NULL; 3034 } else { 3035 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 3036 } 3037 } else { 3038 if (likely(fast)) { 3039 tcache = tsd_tcachep_get(tsd); 3040 assert(tcache == tcache_get(tsd)); 3041 } else { 3042 if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 3043 tcache = tcache_get(tsd); 3044 } else { 3045 tcache = NULL; 3046 } 3047 } 3048 } 3049 3050 UTRACE(ptr, 0, 0); 3051 if (likely(fast)) { 3052 tsd_assert_fast(tsd); 3053 isfree(tsd, ptr, usize, tcache, false); 3054 } else { 3055 isfree(tsd, ptr, usize, tcache, true); 3056 } 3057 check_entry_exit_locking(tsd_tsdn(tsd)); 3058 3059 LOG("core.sdallocx.exit", ""); 3060 } 3061 3062 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3063 JEMALLOC_ATTR(pure) 3064 je_nallocx(size_t size, int flags) { 3065 size_t usize; 3066 tsdn_t *tsdn; 3067 3068 assert(size != 0); 3069 3070 if (unlikely(malloc_init())) { 3071 LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3072 return 0; 3073 } 3074 3075 tsdn = tsdn_fetch(); 3076 check_entry_exit_locking(tsdn); 3077 3078 usize = inallocx(tsdn, size, flags); 3079 if (unlikely(usize > LARGE_MAXCLASS)) { 3080 LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3081 return 0; 3082 } 3083 3084 check_entry_exit_locking(tsdn); 3085 LOG("core.nallocx.exit", "result: %zu", usize); 3086 return usize; 3087 } 3088 3089 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3090 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 3091 size_t newlen) { 3092 int ret; 3093 tsd_t *tsd; 3094 3095 LOG("core.mallctl.entry", "name: %s", name); 3096 3097 if (unlikely(malloc_init())) { 3098 LOG("core.mallctl.exit", "result: %d", EAGAIN); 3099 return EAGAIN; 3100 } 3101 3102 tsd = tsd_fetch(); 3103 check_entry_exit_locking(tsd_tsdn(tsd)); 3104 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 3105 check_entry_exit_locking(tsd_tsdn(tsd)); 3106 3107 LOG("core.mallctl.exit", "result: %d", ret); 3108 return ret; 3109 } 3110 3111 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3112 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { 3113 int ret; 3114 3115 LOG("core.mallctlnametomib.entry", "name: %s", name); 3116 3117 if (unlikely(malloc_init())) { 3118 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); 3119 return EAGAIN; 3120 } 3121 3122 tsd_t *tsd = tsd_fetch(); 3123 check_entry_exit_locking(tsd_tsdn(tsd)); 3124 ret = ctl_nametomib(tsd, name, mibp, miblenp); 3125 check_entry_exit_locking(tsd_tsdn(tsd)); 3126 3127 LOG("core.mallctlnametomib.exit", "result: %d", ret); 3128 return ret; 3129 } 3130 3131 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3132 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 3133 void *newp, size_t newlen) { 3134 int ret; 3135 tsd_t *tsd; 3136 3137 LOG("core.mallctlbymib.entry", ""); 3138 3139 if (unlikely(malloc_init())) { 3140 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); 3141 return EAGAIN; 3142 } 3143 3144 tsd = tsd_fetch(); 3145 check_entry_exit_locking(tsd_tsdn(tsd)); 3146 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 3147 check_entry_exit_locking(tsd_tsdn(tsd)); 3148 LOG("core.mallctlbymib.exit", "result: %d", ret); 3149 return ret; 3150 } 3151 3152 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 3153 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 3154 const char *opts) { 3155 tsdn_t *tsdn; 3156 3157 LOG("core.malloc_stats_print.entry", ""); 3158 3159 tsdn = tsdn_fetch(); 3160 check_entry_exit_locking(tsdn); 3161 stats_print(write_cb, cbopaque, opts); 3162 check_entry_exit_locking(tsdn); 3163 LOG("core.malloc_stats_print.exit", ""); 3164 } 3165 3166 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3167 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { 3168 size_t ret; 3169 tsdn_t *tsdn; 3170 3171 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); 3172 3173 assert(malloc_initialized() || IS_INITIALIZER); 3174 3175 tsdn = tsdn_fetch(); 3176 check_entry_exit_locking(tsdn); 3177 3178 if (unlikely(ptr == NULL)) { 3179 ret = 0; 3180 } else { 3181 if (config_debug || force_ivsalloc) { 3182 ret = ivsalloc(tsdn, ptr); 3183 assert(force_ivsalloc || ret != 0); 3184 } else { 3185 ret = isalloc(tsdn, ptr); 3186 } 3187 } 3188 3189 check_entry_exit_locking(tsdn); 3190 LOG("core.malloc_usable_size.exit", "result: %zu", ret); 3191 return ret; 3192 } 3193 3194 /* 3195 * End non-standard functions. 3196 */ 3197 /******************************************************************************/ 3198 /* 3199 * The following functions are used by threading libraries for protection of 3200 * malloc during fork(). 3201 */ 3202 3203 /* 3204 * If an application creates a thread before doing any allocation in the main 3205 * thread, then calls fork(2) in the main thread followed by memory allocation 3206 * in the child process, a race can occur that results in deadlock within the 3207 * child: the main thread may have forked while the created thread had 3208 * partially initialized the allocator. Ordinarily jemalloc prevents 3209 * fork/malloc races via the following functions it registers during 3210 * initialization using pthread_atfork(), but of course that does no good if 3211 * the allocator isn't fully initialized at fork time. The following library 3212 * constructor is a partial solution to this problem. It may still be possible 3213 * to trigger the deadlock described above, but doing so would involve forking 3214 * via a library constructor that runs before jemalloc's runs. 3215 */ 3216 #ifndef JEMALLOC_JET 3217 JEMALLOC_ATTR(constructor) 3218 static void 3219 jemalloc_constructor(void) { 3220 malloc_init(); 3221 } 3222 #endif 3223 3224 #ifndef JEMALLOC_MUTEX_INIT_CB 3225 void 3226 jemalloc_prefork(void) 3227 #else 3228 JEMALLOC_EXPORT void 3229 _malloc_prefork(void) 3230 #endif 3231 { 3232 tsd_t *tsd; 3233 unsigned i, j, narenas; 3234 arena_t *arena; 3235 3236 #ifdef JEMALLOC_MUTEX_INIT_CB 3237 if (!malloc_initialized()) { 3238 return; 3239 } 3240 #endif 3241 assert(malloc_initialized()); 3242 3243 tsd = tsd_fetch(); 3244 3245 narenas = narenas_total_get(); 3246 3247 witness_prefork(tsd_witness_tsdp_get(tsd)); 3248 /* Acquire all mutexes in a safe order. */ 3249 ctl_prefork(tsd_tsdn(tsd)); 3250 tcache_prefork(tsd_tsdn(tsd)); 3251 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 3252 if (have_background_thread) { 3253 background_thread_prefork0(tsd_tsdn(tsd)); 3254 } 3255 prof_prefork0(tsd_tsdn(tsd)); 3256 if (have_background_thread) { 3257 background_thread_prefork1(tsd_tsdn(tsd)); 3258 } 3259 /* Break arena prefork into stages to preserve lock order. */ 3260 for (i = 0; i < 8; i++) { 3261 for (j = 0; j < narenas; j++) { 3262 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 3263 NULL) { 3264 switch (i) { 3265 case 0: 3266 arena_prefork0(tsd_tsdn(tsd), arena); 3267 break; 3268 case 1: 3269 arena_prefork1(tsd_tsdn(tsd), arena); 3270 break; 3271 case 2: 3272 arena_prefork2(tsd_tsdn(tsd), arena); 3273 break; 3274 case 3: 3275 arena_prefork3(tsd_tsdn(tsd), arena); 3276 break; 3277 case 4: 3278 arena_prefork4(tsd_tsdn(tsd), arena); 3279 break; 3280 case 5: 3281 arena_prefork5(tsd_tsdn(tsd), arena); 3282 break; 3283 case 6: 3284 arena_prefork6(tsd_tsdn(tsd), arena); 3285 break; 3286 case 7: 3287 arena_prefork7(tsd_tsdn(tsd), arena); 3288 break; 3289 default: not_reached(); 3290 } 3291 } 3292 } 3293 } 3294 prof_prefork1(tsd_tsdn(tsd)); 3295 } 3296 3297 #ifndef JEMALLOC_MUTEX_INIT_CB 3298 void 3299 jemalloc_postfork_parent(void) 3300 #else 3301 JEMALLOC_EXPORT void 3302 _malloc_postfork(void) 3303 #endif 3304 { 3305 tsd_t *tsd; 3306 unsigned i, narenas; 3307 3308 #ifdef JEMALLOC_MUTEX_INIT_CB 3309 if (!malloc_initialized()) { 3310 return; 3311 } 3312 #endif 3313 assert(malloc_initialized()); 3314 3315 tsd = tsd_fetch(); 3316 3317 witness_postfork_parent(tsd_witness_tsdp_get(tsd)); 3318 /* Release all mutexes, now that fork() has completed. */ 3319 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3320 arena_t *arena; 3321 3322 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 3323 arena_postfork_parent(tsd_tsdn(tsd), arena); 3324 } 3325 } 3326 prof_postfork_parent(tsd_tsdn(tsd)); 3327 if (have_background_thread) { 3328 background_thread_postfork_parent(tsd_tsdn(tsd)); 3329 } 3330 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 3331 tcache_postfork_parent(tsd_tsdn(tsd)); 3332 ctl_postfork_parent(tsd_tsdn(tsd)); 3333 } 3334 3335 void 3336 jemalloc_postfork_child(void) { 3337 tsd_t *tsd; 3338 unsigned i, narenas; 3339 3340 assert(malloc_initialized()); 3341 3342 tsd = tsd_fetch(); 3343 3344 witness_postfork_child(tsd_witness_tsdp_get(tsd)); 3345 /* Release all mutexes, now that fork() has completed. */ 3346 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3347 arena_t *arena; 3348 3349 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 3350 arena_postfork_child(tsd_tsdn(tsd), arena); 3351 } 3352 } 3353 prof_postfork_child(tsd_tsdn(tsd)); 3354 if (have_background_thread) { 3355 background_thread_postfork_child(tsd_tsdn(tsd)); 3356 } 3357 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 3358 tcache_postfork_child(tsd_tsdn(tsd)); 3359 ctl_postfork_child(tsd_tsdn(tsd)); 3360 } 3361 3362 void (* 3363 je_malloc_message_get(void))(void *, const char *) 3364 { 3365 return je_malloc_message; 3366 } 3367 3368 void 3369 je_malloc_message_set(void (*m)(void *, const char *)) 3370 { 3371 je_malloc_message = m; 3372 } 3373 3374 const char * 3375 je_malloc_conf_get(void) 3376 { 3377 return je_malloc_conf; 3378 } 3379 3380 void 3381 je_malloc_conf_set(const char *m) 3382 { 3383 je_malloc_conf = m; 3384 } 3385 3386 /******************************************************************************/ 3387