1*8e33eff8Schristos #include <sys/cdefs.h> 2*8e33eff8Schristos 3*8e33eff8Schristos #ifdef __NetBSD__ 4*8e33eff8Schristos #include "extern.h" 5*8e33eff8Schristos #endif 6*8e33eff8Schristos 7*8e33eff8Schristos #define JEMALLOC_C_ 8*8e33eff8Schristos #include "jemalloc/internal/jemalloc_preamble.h" 9*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_includes.h" 10*8e33eff8Schristos 11*8e33eff8Schristos #include "jemalloc/internal/assert.h" 12*8e33eff8Schristos #include "jemalloc/internal/atomic.h" 13*8e33eff8Schristos #include "jemalloc/internal/ctl.h" 14*8e33eff8Schristos #include "jemalloc/internal/extent_dss.h" 15*8e33eff8Schristos #include "jemalloc/internal/extent_mmap.h" 16*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_types.h" 17*8e33eff8Schristos #include "jemalloc/internal/log.h" 18*8e33eff8Schristos #include "jemalloc/internal/malloc_io.h" 19*8e33eff8Schristos #include "jemalloc/internal/mutex.h" 20*8e33eff8Schristos #include "jemalloc/internal/rtree.h" 21*8e33eff8Schristos #include "jemalloc/internal/size_classes.h" 22*8e33eff8Schristos #include "jemalloc/internal/spin.h" 23*8e33eff8Schristos #include "jemalloc/internal/sz.h" 24*8e33eff8Schristos #include "jemalloc/internal/ticker.h" 25*8e33eff8Schristos #include "jemalloc/internal/util.h" 26*8e33eff8Schristos 27*8e33eff8Schristos #ifdef JEMALLOC_WEAK_NOSTD 28*8e33eff8Schristos __weak_alias(mallocx, __je_mallocx) 29*8e33eff8Schristos __weak_alias(rallocx, __je_rallocx) 30*8e33eff8Schristos __weak_alias(xallocx, __je_xallocx) 31*8e33eff8Schristos __weak_alias(sallocx, __je_sallocx) 32*8e33eff8Schristos __weak_alias(dallocx, __je_dallocx) 33*8e33eff8Schristos __weak_alias(sdallocx, __je_sdallocx) 34*8e33eff8Schristos __weak_alias(nallocx, __je_nallocx) 35*8e33eff8Schristos 36*8e33eff8Schristos __weak_alias(mallctl, __je_mallctl) 37*8e33eff8Schristos __weak_alias(mallctlnametomib, __je_mallctlnametomib) 38*8e33eff8Schristos __weak_alias(mallctlbymib, __je_mallctlbymib) 39*8e33eff8Schristos 40*8e33eff8Schristos __weak_alias(malloc_stats_print, __je_malloc_stats_print) 41*8e33eff8Schristos __weak_alias(malloc_usable_size, __je_malloc_usable_size) 42*8e33eff8Schristos 43*8e33eff8Schristos __weak_alias(malloc_message, __je_malloc_message) 44*8e33eff8Schristos __weak_alias(malloc_conf, __je_malloc_conf) 45*8e33eff8Schristos 46*8e33eff8Schristos __weak_alias(malloc_message_get, __je_malloc_message_get) 47*8e33eff8Schristos __weak_alias(malloc_conf_get, __je_malloc_conf_get) 48*8e33eff8Schristos 49*8e33eff8Schristos __weak_alias(malloc_message_set, __je_malloc_message_set) 50*8e33eff8Schristos __weak_alias(malloc_conf_set, __je_malloc_conf_set) 51*8e33eff8Schristos #endif 52*8e33eff8Schristos 53*8e33eff8Schristos /******************************************************************************/ 54*8e33eff8Schristos /* Data. */ 55*8e33eff8Schristos 56*8e33eff8Schristos /* Runtime configuration options. */ 57*8e33eff8Schristos const char *je_malloc_conf 58*8e33eff8Schristos #ifndef _WIN32 59*8e33eff8Schristos JEMALLOC_ATTR(weak) 60*8e33eff8Schristos #endif 61*8e33eff8Schristos ; 62*8e33eff8Schristos bool opt_abort = 63*8e33eff8Schristos #ifdef JEMALLOC_DEBUG 64*8e33eff8Schristos true 65*8e33eff8Schristos #else 66*8e33eff8Schristos false 67*8e33eff8Schristos #endif 68*8e33eff8Schristos ; 69*8e33eff8Schristos bool opt_abort_conf = 70*8e33eff8Schristos #ifdef JEMALLOC_DEBUG 71*8e33eff8Schristos true 72*8e33eff8Schristos #else 73*8e33eff8Schristos false 74*8e33eff8Schristos #endif 75*8e33eff8Schristos ; 76*8e33eff8Schristos const char *opt_junk = 77*8e33eff8Schristos #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 78*8e33eff8Schristos "true" 79*8e33eff8Schristos #else 80*8e33eff8Schristos "false" 81*8e33eff8Schristos #endif 82*8e33eff8Schristos ; 83*8e33eff8Schristos bool opt_junk_alloc = 84*8e33eff8Schristos #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 85*8e33eff8Schristos true 86*8e33eff8Schristos #else 87*8e33eff8Schristos false 88*8e33eff8Schristos #endif 89*8e33eff8Schristos ; 90*8e33eff8Schristos bool opt_junk_free = 91*8e33eff8Schristos #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 92*8e33eff8Schristos true 93*8e33eff8Schristos #else 94*8e33eff8Schristos false 95*8e33eff8Schristos #endif 96*8e33eff8Schristos ; 97*8e33eff8Schristos 98*8e33eff8Schristos bool opt_utrace = false; 99*8e33eff8Schristos bool opt_xmalloc = false; 100*8e33eff8Schristos bool opt_zero = false; 101*8e33eff8Schristos unsigned opt_narenas = 0; 102*8e33eff8Schristos 103*8e33eff8Schristos unsigned ncpus; 104*8e33eff8Schristos 105*8e33eff8Schristos /* Protects arenas initialization. */ 106*8e33eff8Schristos malloc_mutex_t arenas_lock; 107*8e33eff8Schristos /* 108*8e33eff8Schristos * Arenas that are used to service external requests. Not all elements of the 109*8e33eff8Schristos * arenas array are necessarily used; arenas are created lazily as needed. 110*8e33eff8Schristos * 111*8e33eff8Schristos * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 112*8e33eff8Schristos * arenas. arenas[narenas_auto..narenas_total) are only used if the application 113*8e33eff8Schristos * takes some action to create them and allocate from them. 114*8e33eff8Schristos * 115*8e33eff8Schristos * Points to an arena_t. 116*8e33eff8Schristos */ 117*8e33eff8Schristos JEMALLOC_ALIGNED(CACHELINE) 118*8e33eff8Schristos atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; 119*8e33eff8Schristos static atomic_u_t narenas_total; /* Use narenas_total_*(). */ 120*8e33eff8Schristos static arena_t *a0; /* arenas[0]; read-only after initialization. */ 121*8e33eff8Schristos unsigned narenas_auto; /* Read-only after initialization. */ 122*8e33eff8Schristos 123*8e33eff8Schristos typedef enum { 124*8e33eff8Schristos malloc_init_uninitialized = 3, 125*8e33eff8Schristos malloc_init_a0_initialized = 2, 126*8e33eff8Schristos malloc_init_recursible = 1, 127*8e33eff8Schristos malloc_init_initialized = 0 /* Common case --> jnz. */ 128*8e33eff8Schristos } malloc_init_t; 129*8e33eff8Schristos static malloc_init_t malloc_init_state = malloc_init_uninitialized; 130*8e33eff8Schristos 131*8e33eff8Schristos /* False should be the common case. Set to true to trigger initialization. */ 132*8e33eff8Schristos bool malloc_slow = true; 133*8e33eff8Schristos 134*8e33eff8Schristos /* When malloc_slow is true, set the corresponding bits for sanity check. */ 135*8e33eff8Schristos enum { 136*8e33eff8Schristos flag_opt_junk_alloc = (1U), 137*8e33eff8Schristos flag_opt_junk_free = (1U << 1), 138*8e33eff8Schristos flag_opt_zero = (1U << 2), 139*8e33eff8Schristos flag_opt_utrace = (1U << 3), 140*8e33eff8Schristos flag_opt_xmalloc = (1U << 4) 141*8e33eff8Schristos }; 142*8e33eff8Schristos static uint8_t malloc_slow_flags; 143*8e33eff8Schristos 144*8e33eff8Schristos #ifdef JEMALLOC_THREADED_INIT 145*8e33eff8Schristos /* Used to let the initializing thread recursively allocate. */ 146*8e33eff8Schristos # define NO_INITIALIZER ((unsigned long)0) 147*8e33eff8Schristos # define INITIALIZER pthread_self() 148*8e33eff8Schristos # define IS_INITIALIZER (malloc_initializer == pthread_self()) 149*8e33eff8Schristos static pthread_t malloc_initializer = NO_INITIALIZER; 150*8e33eff8Schristos #else 151*8e33eff8Schristos # define NO_INITIALIZER false 152*8e33eff8Schristos # define INITIALIZER true 153*8e33eff8Schristos # define IS_INITIALIZER malloc_initializer 154*8e33eff8Schristos static bool malloc_initializer = NO_INITIALIZER; 155*8e33eff8Schristos #endif 156*8e33eff8Schristos 157*8e33eff8Schristos /* Used to avoid initialization races. */ 158*8e33eff8Schristos #ifdef _WIN32 159*8e33eff8Schristos #if _WIN32_WINNT >= 0x0600 160*8e33eff8Schristos static malloc_mutex_t init_lock = SRWLOCK_INIT; 161*8e33eff8Schristos #else 162*8e33eff8Schristos static malloc_mutex_t init_lock; 163*8e33eff8Schristos static bool init_lock_initialized = false; 164*8e33eff8Schristos 165*8e33eff8Schristos JEMALLOC_ATTR(constructor) 166*8e33eff8Schristos static void WINAPI 167*8e33eff8Schristos _init_init_lock(void) { 168*8e33eff8Schristos /* 169*8e33eff8Schristos * If another constructor in the same binary is using mallctl to e.g. 170*8e33eff8Schristos * set up extent hooks, it may end up running before this one, and 171*8e33eff8Schristos * malloc_init_hard will crash trying to lock the uninitialized lock. So 172*8e33eff8Schristos * we force an initialization of the lock in malloc_init_hard as well. 173*8e33eff8Schristos * We don't try to care about atomicity of the accessed to the 174*8e33eff8Schristos * init_lock_initialized boolean, since it really only matters early in 175*8e33eff8Schristos * the process creation, before any separate thread normally starts 176*8e33eff8Schristos * doing anything. 177*8e33eff8Schristos */ 178*8e33eff8Schristos if (!init_lock_initialized) { 179*8e33eff8Schristos malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, 180*8e33eff8Schristos malloc_mutex_rank_exclusive); 181*8e33eff8Schristos } 182*8e33eff8Schristos init_lock_initialized = true; 183*8e33eff8Schristos } 184*8e33eff8Schristos 185*8e33eff8Schristos #ifdef _MSC_VER 186*8e33eff8Schristos # pragma section(".CRT$XCU", read) 187*8e33eff8Schristos JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 188*8e33eff8Schristos static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 189*8e33eff8Schristos #endif 190*8e33eff8Schristos #endif 191*8e33eff8Schristos #else 192*8e33eff8Schristos static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 193*8e33eff8Schristos #endif 194*8e33eff8Schristos 195*8e33eff8Schristos typedef struct { 196*8e33eff8Schristos void *p; /* Input pointer (as in realloc(p, s)). */ 197*8e33eff8Schristos size_t s; /* Request size. */ 198*8e33eff8Schristos void *r; /* Result pointer. */ 199*8e33eff8Schristos } malloc_utrace_t; 200*8e33eff8Schristos 201*8e33eff8Schristos #ifdef JEMALLOC_UTRACE 202*8e33eff8Schristos # define UTRACE(a, b, c) do { \ 203*8e33eff8Schristos if (unlikely(opt_utrace)) { \ 204*8e33eff8Schristos int utrace_serrno = errno; \ 205*8e33eff8Schristos malloc_utrace_t ut; \ 206*8e33eff8Schristos ut.p = (a); \ 207*8e33eff8Schristos ut.s = (b); \ 208*8e33eff8Schristos ut.r = (c); \ 209*8e33eff8Schristos utrace(&ut, sizeof(ut)); \ 210*8e33eff8Schristos errno = utrace_serrno; \ 211*8e33eff8Schristos } \ 212*8e33eff8Schristos } while (0) 213*8e33eff8Schristos #else 214*8e33eff8Schristos # define UTRACE(a, b, c) 215*8e33eff8Schristos #endif 216*8e33eff8Schristos 217*8e33eff8Schristos /* Whether encountered any invalid config options. */ 218*8e33eff8Schristos static bool had_conf_error = false; 219*8e33eff8Schristos 220*8e33eff8Schristos /******************************************************************************/ 221*8e33eff8Schristos /* 222*8e33eff8Schristos * Function prototypes for static functions that are referenced prior to 223*8e33eff8Schristos * definition. 224*8e33eff8Schristos */ 225*8e33eff8Schristos 226*8e33eff8Schristos static bool malloc_init_hard_a0(void); 227*8e33eff8Schristos static bool malloc_init_hard(void); 228*8e33eff8Schristos 229*8e33eff8Schristos /******************************************************************************/ 230*8e33eff8Schristos /* 231*8e33eff8Schristos * Begin miscellaneous support functions. 232*8e33eff8Schristos */ 233*8e33eff8Schristos 234*8e33eff8Schristos bool 235*8e33eff8Schristos malloc_initialized(void) { 236*8e33eff8Schristos return (malloc_init_state == malloc_init_initialized); 237*8e33eff8Schristos } 238*8e33eff8Schristos 239*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE bool 240*8e33eff8Schristos malloc_init_a0(void) { 241*8e33eff8Schristos if (unlikely(malloc_init_state == malloc_init_uninitialized)) { 242*8e33eff8Schristos return malloc_init_hard_a0(); 243*8e33eff8Schristos } 244*8e33eff8Schristos return false; 245*8e33eff8Schristos } 246*8e33eff8Schristos 247*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE bool 248*8e33eff8Schristos malloc_init(void) { 249*8e33eff8Schristos if (unlikely(!malloc_initialized()) && malloc_init_hard()) { 250*8e33eff8Schristos return true; 251*8e33eff8Schristos } 252*8e33eff8Schristos return false; 253*8e33eff8Schristos } 254*8e33eff8Schristos 255*8e33eff8Schristos /* 256*8e33eff8Schristos * The a0*() functions are used instead of i{d,}alloc() in situations that 257*8e33eff8Schristos * cannot tolerate TLS variable access. 258*8e33eff8Schristos */ 259*8e33eff8Schristos 260*8e33eff8Schristos static void * 261*8e33eff8Schristos a0ialloc(size_t size, bool zero, bool is_internal) { 262*8e33eff8Schristos if (unlikely(malloc_init_a0())) { 263*8e33eff8Schristos return NULL; 264*8e33eff8Schristos } 265*8e33eff8Schristos 266*8e33eff8Schristos return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, 267*8e33eff8Schristos is_internal, arena_get(TSDN_NULL, 0, true), true); 268*8e33eff8Schristos } 269*8e33eff8Schristos 270*8e33eff8Schristos static void 271*8e33eff8Schristos a0idalloc(void *ptr, bool is_internal) { 272*8e33eff8Schristos idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); 273*8e33eff8Schristos } 274*8e33eff8Schristos 275*8e33eff8Schristos void * 276*8e33eff8Schristos a0malloc(size_t size) { 277*8e33eff8Schristos return a0ialloc(size, false, true); 278*8e33eff8Schristos } 279*8e33eff8Schristos 280*8e33eff8Schristos void 281*8e33eff8Schristos a0dalloc(void *ptr) { 282*8e33eff8Schristos a0idalloc(ptr, true); 283*8e33eff8Schristos } 284*8e33eff8Schristos 285*8e33eff8Schristos /* 286*8e33eff8Schristos * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 287*8e33eff8Schristos * situations that cannot tolerate TLS variable access (TLS allocation and very 288*8e33eff8Schristos * early internal data structure initialization). 289*8e33eff8Schristos */ 290*8e33eff8Schristos 291*8e33eff8Schristos void * 292*8e33eff8Schristos bootstrap_malloc(size_t size) { 293*8e33eff8Schristos if (unlikely(size == 0)) { 294*8e33eff8Schristos size = 1; 295*8e33eff8Schristos } 296*8e33eff8Schristos 297*8e33eff8Schristos return a0ialloc(size, false, false); 298*8e33eff8Schristos } 299*8e33eff8Schristos 300*8e33eff8Schristos void * 301*8e33eff8Schristos bootstrap_calloc(size_t num, size_t size) { 302*8e33eff8Schristos size_t num_size; 303*8e33eff8Schristos 304*8e33eff8Schristos num_size = num * size; 305*8e33eff8Schristos if (unlikely(num_size == 0)) { 306*8e33eff8Schristos assert(num == 0 || size == 0); 307*8e33eff8Schristos num_size = 1; 308*8e33eff8Schristos } 309*8e33eff8Schristos 310*8e33eff8Schristos return a0ialloc(num_size, true, false); 311*8e33eff8Schristos } 312*8e33eff8Schristos 313*8e33eff8Schristos void 314*8e33eff8Schristos bootstrap_free(void *ptr) { 315*8e33eff8Schristos if (unlikely(ptr == NULL)) { 316*8e33eff8Schristos return; 317*8e33eff8Schristos } 318*8e33eff8Schristos 319*8e33eff8Schristos a0idalloc(ptr, false); 320*8e33eff8Schristos } 321*8e33eff8Schristos 322*8e33eff8Schristos void 323*8e33eff8Schristos arena_set(unsigned ind, arena_t *arena) { 324*8e33eff8Schristos atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); 325*8e33eff8Schristos } 326*8e33eff8Schristos 327*8e33eff8Schristos static void 328*8e33eff8Schristos narenas_total_set(unsigned narenas) { 329*8e33eff8Schristos atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); 330*8e33eff8Schristos } 331*8e33eff8Schristos 332*8e33eff8Schristos static void 333*8e33eff8Schristos narenas_total_inc(void) { 334*8e33eff8Schristos atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); 335*8e33eff8Schristos } 336*8e33eff8Schristos 337*8e33eff8Schristos unsigned 338*8e33eff8Schristos narenas_total_get(void) { 339*8e33eff8Schristos return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); 340*8e33eff8Schristos } 341*8e33eff8Schristos 342*8e33eff8Schristos /* Create a new arena and insert it into the arenas array at index ind. */ 343*8e33eff8Schristos static arena_t * 344*8e33eff8Schristos arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 345*8e33eff8Schristos arena_t *arena; 346*8e33eff8Schristos 347*8e33eff8Schristos assert(ind <= narenas_total_get()); 348*8e33eff8Schristos if (ind >= MALLOCX_ARENA_LIMIT) { 349*8e33eff8Schristos return NULL; 350*8e33eff8Schristos } 351*8e33eff8Schristos if (ind == narenas_total_get()) { 352*8e33eff8Schristos narenas_total_inc(); 353*8e33eff8Schristos } 354*8e33eff8Schristos 355*8e33eff8Schristos /* 356*8e33eff8Schristos * Another thread may have already initialized arenas[ind] if it's an 357*8e33eff8Schristos * auto arena. 358*8e33eff8Schristos */ 359*8e33eff8Schristos arena = arena_get(tsdn, ind, false); 360*8e33eff8Schristos if (arena != NULL) { 361*8e33eff8Schristos assert(ind < narenas_auto); 362*8e33eff8Schristos return arena; 363*8e33eff8Schristos } 364*8e33eff8Schristos 365*8e33eff8Schristos /* Actually initialize the arena. */ 366*8e33eff8Schristos arena = arena_new(tsdn, ind, extent_hooks); 367*8e33eff8Schristos 368*8e33eff8Schristos return arena; 369*8e33eff8Schristos } 370*8e33eff8Schristos 371*8e33eff8Schristos static void 372*8e33eff8Schristos arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { 373*8e33eff8Schristos if (ind == 0) { 374*8e33eff8Schristos return; 375*8e33eff8Schristos } 376*8e33eff8Schristos if (have_background_thread) { 377*8e33eff8Schristos bool err; 378*8e33eff8Schristos malloc_mutex_lock(tsdn, &background_thread_lock); 379*8e33eff8Schristos err = background_thread_create(tsdn_tsd(tsdn), ind); 380*8e33eff8Schristos malloc_mutex_unlock(tsdn, &background_thread_lock); 381*8e33eff8Schristos if (err) { 382*8e33eff8Schristos malloc_printf("<jemalloc>: error in background thread " 383*8e33eff8Schristos "creation for arena %u. Abort.\n", ind); 384*8e33eff8Schristos abort(); 385*8e33eff8Schristos } 386*8e33eff8Schristos } 387*8e33eff8Schristos } 388*8e33eff8Schristos 389*8e33eff8Schristos arena_t * 390*8e33eff8Schristos arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 391*8e33eff8Schristos arena_t *arena; 392*8e33eff8Schristos 393*8e33eff8Schristos malloc_mutex_lock(tsdn, &arenas_lock); 394*8e33eff8Schristos arena = arena_init_locked(tsdn, ind, extent_hooks); 395*8e33eff8Schristos malloc_mutex_unlock(tsdn, &arenas_lock); 396*8e33eff8Schristos 397*8e33eff8Schristos arena_new_create_background_thread(tsdn, ind); 398*8e33eff8Schristos 399*8e33eff8Schristos return arena; 400*8e33eff8Schristos } 401*8e33eff8Schristos 402*8e33eff8Schristos static void 403*8e33eff8Schristos arena_bind(tsd_t *tsd, unsigned ind, bool internal) { 404*8e33eff8Schristos arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); 405*8e33eff8Schristos arena_nthreads_inc(arena, internal); 406*8e33eff8Schristos 407*8e33eff8Schristos if (internal) { 408*8e33eff8Schristos tsd_iarena_set(tsd, arena); 409*8e33eff8Schristos } else { 410*8e33eff8Schristos tsd_arena_set(tsd, arena); 411*8e33eff8Schristos } 412*8e33eff8Schristos } 413*8e33eff8Schristos 414*8e33eff8Schristos void 415*8e33eff8Schristos arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { 416*8e33eff8Schristos arena_t *oldarena, *newarena; 417*8e33eff8Schristos 418*8e33eff8Schristos oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 419*8e33eff8Schristos newarena = arena_get(tsd_tsdn(tsd), newind, false); 420*8e33eff8Schristos arena_nthreads_dec(oldarena, false); 421*8e33eff8Schristos arena_nthreads_inc(newarena, false); 422*8e33eff8Schristos tsd_arena_set(tsd, newarena); 423*8e33eff8Schristos } 424*8e33eff8Schristos 425*8e33eff8Schristos static void 426*8e33eff8Schristos arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { 427*8e33eff8Schristos arena_t *arena; 428*8e33eff8Schristos 429*8e33eff8Schristos arena = arena_get(tsd_tsdn(tsd), ind, false); 430*8e33eff8Schristos arena_nthreads_dec(arena, internal); 431*8e33eff8Schristos 432*8e33eff8Schristos if (internal) { 433*8e33eff8Schristos tsd_iarena_set(tsd, NULL); 434*8e33eff8Schristos } else { 435*8e33eff8Schristos tsd_arena_set(tsd, NULL); 436*8e33eff8Schristos } 437*8e33eff8Schristos } 438*8e33eff8Schristos 439*8e33eff8Schristos arena_tdata_t * 440*8e33eff8Schristos arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { 441*8e33eff8Schristos arena_tdata_t *tdata, *arenas_tdata_old; 442*8e33eff8Schristos arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 443*8e33eff8Schristos unsigned narenas_tdata_old, i; 444*8e33eff8Schristos unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 445*8e33eff8Schristos unsigned narenas_actual = narenas_total_get(); 446*8e33eff8Schristos 447*8e33eff8Schristos /* 448*8e33eff8Schristos * Dissociate old tdata array (and set up for deallocation upon return) 449*8e33eff8Schristos * if it's too small. 450*8e33eff8Schristos */ 451*8e33eff8Schristos if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 452*8e33eff8Schristos arenas_tdata_old = arenas_tdata; 453*8e33eff8Schristos narenas_tdata_old = narenas_tdata; 454*8e33eff8Schristos arenas_tdata = NULL; 455*8e33eff8Schristos narenas_tdata = 0; 456*8e33eff8Schristos tsd_arenas_tdata_set(tsd, arenas_tdata); 457*8e33eff8Schristos tsd_narenas_tdata_set(tsd, narenas_tdata); 458*8e33eff8Schristos } else { 459*8e33eff8Schristos arenas_tdata_old = NULL; 460*8e33eff8Schristos narenas_tdata_old = 0; 461*8e33eff8Schristos } 462*8e33eff8Schristos 463*8e33eff8Schristos /* Allocate tdata array if it's missing. */ 464*8e33eff8Schristos if (arenas_tdata == NULL) { 465*8e33eff8Schristos bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 466*8e33eff8Schristos narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 467*8e33eff8Schristos 468*8e33eff8Schristos if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 469*8e33eff8Schristos *arenas_tdata_bypassp = true; 470*8e33eff8Schristos arenas_tdata = (arena_tdata_t *)a0malloc( 471*8e33eff8Schristos sizeof(arena_tdata_t) * narenas_tdata); 472*8e33eff8Schristos *arenas_tdata_bypassp = false; 473*8e33eff8Schristos } 474*8e33eff8Schristos if (arenas_tdata == NULL) { 475*8e33eff8Schristos tdata = NULL; 476*8e33eff8Schristos goto label_return; 477*8e33eff8Schristos } 478*8e33eff8Schristos assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 479*8e33eff8Schristos tsd_arenas_tdata_set(tsd, arenas_tdata); 480*8e33eff8Schristos tsd_narenas_tdata_set(tsd, narenas_tdata); 481*8e33eff8Schristos } 482*8e33eff8Schristos 483*8e33eff8Schristos /* 484*8e33eff8Schristos * Copy to tdata array. It's possible that the actual number of arenas 485*8e33eff8Schristos * has increased since narenas_total_get() was called above, but that 486*8e33eff8Schristos * causes no correctness issues unless two threads concurrently execute 487*8e33eff8Schristos * the arenas.create mallctl, which we trust mallctl synchronization to 488*8e33eff8Schristos * prevent. 489*8e33eff8Schristos */ 490*8e33eff8Schristos 491*8e33eff8Schristos /* Copy/initialize tickers. */ 492*8e33eff8Schristos for (i = 0; i < narenas_actual; i++) { 493*8e33eff8Schristos if (i < narenas_tdata_old) { 494*8e33eff8Schristos ticker_copy(&arenas_tdata[i].decay_ticker, 495*8e33eff8Schristos &arenas_tdata_old[i].decay_ticker); 496*8e33eff8Schristos } else { 497*8e33eff8Schristos ticker_init(&arenas_tdata[i].decay_ticker, 498*8e33eff8Schristos DECAY_NTICKS_PER_UPDATE); 499*8e33eff8Schristos } 500*8e33eff8Schristos } 501*8e33eff8Schristos if (narenas_tdata > narenas_actual) { 502*8e33eff8Schristos memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 503*8e33eff8Schristos * (narenas_tdata - narenas_actual)); 504*8e33eff8Schristos } 505*8e33eff8Schristos 506*8e33eff8Schristos /* Read the refreshed tdata array. */ 507*8e33eff8Schristos tdata = &arenas_tdata[ind]; 508*8e33eff8Schristos label_return: 509*8e33eff8Schristos if (arenas_tdata_old != NULL) { 510*8e33eff8Schristos a0dalloc(arenas_tdata_old); 511*8e33eff8Schristos } 512*8e33eff8Schristos return tdata; 513*8e33eff8Schristos } 514*8e33eff8Schristos 515*8e33eff8Schristos /* Slow path, called only by arena_choose(). */ 516*8e33eff8Schristos arena_t * 517*8e33eff8Schristos arena_choose_hard(tsd_t *tsd, bool internal) { 518*8e33eff8Schristos arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 519*8e33eff8Schristos 520*8e33eff8Schristos if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { 521*8e33eff8Schristos unsigned choose = percpu_arena_choose(); 522*8e33eff8Schristos ret = arena_get(tsd_tsdn(tsd), choose, true); 523*8e33eff8Schristos assert(ret != NULL); 524*8e33eff8Schristos arena_bind(tsd, arena_ind_get(ret), false); 525*8e33eff8Schristos arena_bind(tsd, arena_ind_get(ret), true); 526*8e33eff8Schristos 527*8e33eff8Schristos return ret; 528*8e33eff8Schristos } 529*8e33eff8Schristos 530*8e33eff8Schristos if (narenas_auto > 1) { 531*8e33eff8Schristos unsigned i, j, choose[2], first_null; 532*8e33eff8Schristos bool is_new_arena[2]; 533*8e33eff8Schristos 534*8e33eff8Schristos /* 535*8e33eff8Schristos * Determine binding for both non-internal and internal 536*8e33eff8Schristos * allocation. 537*8e33eff8Schristos * 538*8e33eff8Schristos * choose[0]: For application allocation. 539*8e33eff8Schristos * choose[1]: For internal metadata allocation. 540*8e33eff8Schristos */ 541*8e33eff8Schristos 542*8e33eff8Schristos for (j = 0; j < 2; j++) { 543*8e33eff8Schristos choose[j] = 0; 544*8e33eff8Schristos is_new_arena[j] = false; 545*8e33eff8Schristos } 546*8e33eff8Schristos 547*8e33eff8Schristos first_null = narenas_auto; 548*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 549*8e33eff8Schristos assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 550*8e33eff8Schristos for (i = 1; i < narenas_auto; i++) { 551*8e33eff8Schristos if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 552*8e33eff8Schristos /* 553*8e33eff8Schristos * Choose the first arena that has the lowest 554*8e33eff8Schristos * number of threads assigned to it. 555*8e33eff8Schristos */ 556*8e33eff8Schristos for (j = 0; j < 2; j++) { 557*8e33eff8Schristos if (arena_nthreads_get(arena_get( 558*8e33eff8Schristos tsd_tsdn(tsd), i, false), !!j) < 559*8e33eff8Schristos arena_nthreads_get(arena_get( 560*8e33eff8Schristos tsd_tsdn(tsd), choose[j], false), 561*8e33eff8Schristos !!j)) { 562*8e33eff8Schristos choose[j] = i; 563*8e33eff8Schristos } 564*8e33eff8Schristos } 565*8e33eff8Schristos } else if (first_null == narenas_auto) { 566*8e33eff8Schristos /* 567*8e33eff8Schristos * Record the index of the first uninitialized 568*8e33eff8Schristos * arena, in case all extant arenas are in use. 569*8e33eff8Schristos * 570*8e33eff8Schristos * NB: It is possible for there to be 571*8e33eff8Schristos * discontinuities in terms of initialized 572*8e33eff8Schristos * versus uninitialized arenas, due to the 573*8e33eff8Schristos * "thread.arena" mallctl. 574*8e33eff8Schristos */ 575*8e33eff8Schristos first_null = i; 576*8e33eff8Schristos } 577*8e33eff8Schristos } 578*8e33eff8Schristos 579*8e33eff8Schristos for (j = 0; j < 2; j++) { 580*8e33eff8Schristos if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 581*8e33eff8Schristos choose[j], false), !!j) == 0 || first_null == 582*8e33eff8Schristos narenas_auto) { 583*8e33eff8Schristos /* 584*8e33eff8Schristos * Use an unloaded arena, or the least loaded 585*8e33eff8Schristos * arena if all arenas are already initialized. 586*8e33eff8Schristos */ 587*8e33eff8Schristos if (!!j == internal) { 588*8e33eff8Schristos ret = arena_get(tsd_tsdn(tsd), 589*8e33eff8Schristos choose[j], false); 590*8e33eff8Schristos } 591*8e33eff8Schristos } else { 592*8e33eff8Schristos arena_t *arena; 593*8e33eff8Schristos 594*8e33eff8Schristos /* Initialize a new arena. */ 595*8e33eff8Schristos choose[j] = first_null; 596*8e33eff8Schristos arena = arena_init_locked(tsd_tsdn(tsd), 597*8e33eff8Schristos choose[j], (extent_hooks_t *) 598*8e33eff8Schristos __UNCONST(&extent_hooks_default)); 599*8e33eff8Schristos if (arena == NULL) { 600*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), 601*8e33eff8Schristos &arenas_lock); 602*8e33eff8Schristos return NULL; 603*8e33eff8Schristos } 604*8e33eff8Schristos is_new_arena[j] = true; 605*8e33eff8Schristos if (!!j == internal) { 606*8e33eff8Schristos ret = arena; 607*8e33eff8Schristos } 608*8e33eff8Schristos } 609*8e33eff8Schristos arena_bind(tsd, choose[j], !!j); 610*8e33eff8Schristos } 611*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 612*8e33eff8Schristos 613*8e33eff8Schristos for (j = 0; j < 2; j++) { 614*8e33eff8Schristos if (is_new_arena[j]) { 615*8e33eff8Schristos assert(choose[j] > 0); 616*8e33eff8Schristos arena_new_create_background_thread( 617*8e33eff8Schristos tsd_tsdn(tsd), choose[j]); 618*8e33eff8Schristos } 619*8e33eff8Schristos } 620*8e33eff8Schristos 621*8e33eff8Schristos } else { 622*8e33eff8Schristos ret = arena_get(tsd_tsdn(tsd), 0, false); 623*8e33eff8Schristos arena_bind(tsd, 0, false); 624*8e33eff8Schristos arena_bind(tsd, 0, true); 625*8e33eff8Schristos } 626*8e33eff8Schristos 627*8e33eff8Schristos return ret; 628*8e33eff8Schristos } 629*8e33eff8Schristos 630*8e33eff8Schristos void 631*8e33eff8Schristos iarena_cleanup(tsd_t *tsd) { 632*8e33eff8Schristos arena_t *iarena; 633*8e33eff8Schristos 634*8e33eff8Schristos iarena = tsd_iarena_get(tsd); 635*8e33eff8Schristos if (iarena != NULL) { 636*8e33eff8Schristos arena_unbind(tsd, arena_ind_get(iarena), true); 637*8e33eff8Schristos } 638*8e33eff8Schristos } 639*8e33eff8Schristos 640*8e33eff8Schristos void 641*8e33eff8Schristos arena_cleanup(tsd_t *tsd) { 642*8e33eff8Schristos arena_t *arena; 643*8e33eff8Schristos 644*8e33eff8Schristos arena = tsd_arena_get(tsd); 645*8e33eff8Schristos if (arena != NULL) { 646*8e33eff8Schristos arena_unbind(tsd, arena_ind_get(arena), false); 647*8e33eff8Schristos } 648*8e33eff8Schristos } 649*8e33eff8Schristos 650*8e33eff8Schristos void 651*8e33eff8Schristos arenas_tdata_cleanup(tsd_t *tsd) { 652*8e33eff8Schristos arena_tdata_t *arenas_tdata; 653*8e33eff8Schristos 654*8e33eff8Schristos /* Prevent tsd->arenas_tdata from being (re)created. */ 655*8e33eff8Schristos *tsd_arenas_tdata_bypassp_get(tsd) = true; 656*8e33eff8Schristos 657*8e33eff8Schristos arenas_tdata = tsd_arenas_tdata_get(tsd); 658*8e33eff8Schristos if (arenas_tdata != NULL) { 659*8e33eff8Schristos tsd_arenas_tdata_set(tsd, NULL); 660*8e33eff8Schristos a0dalloc(arenas_tdata); 661*8e33eff8Schristos } 662*8e33eff8Schristos } 663*8e33eff8Schristos 664*8e33eff8Schristos static void 665*8e33eff8Schristos stats_print_atexit(void) { 666*8e33eff8Schristos if (config_stats) { 667*8e33eff8Schristos tsdn_t *tsdn; 668*8e33eff8Schristos unsigned narenas, i; 669*8e33eff8Schristos 670*8e33eff8Schristos tsdn = tsdn_fetch(); 671*8e33eff8Schristos 672*8e33eff8Schristos /* 673*8e33eff8Schristos * Merge stats from extant threads. This is racy, since 674*8e33eff8Schristos * individual threads do not lock when recording tcache stats 675*8e33eff8Schristos * events. As a consequence, the final stats may be slightly 676*8e33eff8Schristos * out of date by the time they are reported, if other threads 677*8e33eff8Schristos * continue to allocate. 678*8e33eff8Schristos */ 679*8e33eff8Schristos for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 680*8e33eff8Schristos arena_t *arena = arena_get(tsdn, i, false); 681*8e33eff8Schristos if (arena != NULL) { 682*8e33eff8Schristos tcache_t *tcache; 683*8e33eff8Schristos 684*8e33eff8Schristos malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 685*8e33eff8Schristos ql_foreach(tcache, &arena->tcache_ql, link) { 686*8e33eff8Schristos tcache_stats_merge(tsdn, tcache, arena); 687*8e33eff8Schristos } 688*8e33eff8Schristos malloc_mutex_unlock(tsdn, 689*8e33eff8Schristos &arena->tcache_ql_mtx); 690*8e33eff8Schristos } 691*8e33eff8Schristos } 692*8e33eff8Schristos } 693*8e33eff8Schristos je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); 694*8e33eff8Schristos } 695*8e33eff8Schristos 696*8e33eff8Schristos /* 697*8e33eff8Schristos * Ensure that we don't hold any locks upon entry to or exit from allocator 698*8e33eff8Schristos * code (in a "broad" sense that doesn't count a reentrant allocation as an 699*8e33eff8Schristos * entrance or exit). 700*8e33eff8Schristos */ 701*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void 702*8e33eff8Schristos check_entry_exit_locking(tsdn_t *tsdn) { 703*8e33eff8Schristos if (!config_debug) { 704*8e33eff8Schristos return; 705*8e33eff8Schristos } 706*8e33eff8Schristos if (tsdn_null(tsdn)) { 707*8e33eff8Schristos return; 708*8e33eff8Schristos } 709*8e33eff8Schristos tsd_t *tsd = tsdn_tsd(tsdn); 710*8e33eff8Schristos /* 711*8e33eff8Schristos * It's possible we hold locks at entry/exit if we're in a nested 712*8e33eff8Schristos * allocation. 713*8e33eff8Schristos */ 714*8e33eff8Schristos int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); 715*8e33eff8Schristos if (reentrancy_level != 0) { 716*8e33eff8Schristos return; 717*8e33eff8Schristos } 718*8e33eff8Schristos witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); 719*8e33eff8Schristos } 720*8e33eff8Schristos 721*8e33eff8Schristos /* 722*8e33eff8Schristos * End miscellaneous support functions. 723*8e33eff8Schristos */ 724*8e33eff8Schristos /******************************************************************************/ 725*8e33eff8Schristos /* 726*8e33eff8Schristos * Begin initialization functions. 727*8e33eff8Schristos */ 728*8e33eff8Schristos 729*8e33eff8Schristos static char * 730*8e33eff8Schristos jemalloc_secure_getenv(const char *name) { 731*8e33eff8Schristos #ifdef JEMALLOC_HAVE_SECURE_GETENV 732*8e33eff8Schristos return secure_getenv(name); 733*8e33eff8Schristos #else 734*8e33eff8Schristos # ifdef JEMALLOC_HAVE_ISSETUGID 735*8e33eff8Schristos if (issetugid() != 0) { 736*8e33eff8Schristos return NULL; 737*8e33eff8Schristos } 738*8e33eff8Schristos # endif 739*8e33eff8Schristos return getenv(name); 740*8e33eff8Schristos #endif 741*8e33eff8Schristos } 742*8e33eff8Schristos 743*8e33eff8Schristos static unsigned 744*8e33eff8Schristos malloc_ncpus(void) { 745*8e33eff8Schristos long result; 746*8e33eff8Schristos 747*8e33eff8Schristos #ifdef _WIN32 748*8e33eff8Schristos SYSTEM_INFO si; 749*8e33eff8Schristos GetSystemInfo(&si); 750*8e33eff8Schristos result = si.dwNumberOfProcessors; 751*8e33eff8Schristos #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 752*8e33eff8Schristos /* 753*8e33eff8Schristos * glibc >= 2.6 has the CPU_COUNT macro. 754*8e33eff8Schristos * 755*8e33eff8Schristos * glibc's sysconf() uses isspace(). glibc allocates for the first time 756*8e33eff8Schristos * *before* setting up the isspace tables. Therefore we need a 757*8e33eff8Schristos * different method to get the number of CPUs. 758*8e33eff8Schristos */ 759*8e33eff8Schristos { 760*8e33eff8Schristos cpu_set_t set; 761*8e33eff8Schristos 762*8e33eff8Schristos pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 763*8e33eff8Schristos result = CPU_COUNT(&set); 764*8e33eff8Schristos } 765*8e33eff8Schristos #else 766*8e33eff8Schristos result = sysconf(_SC_NPROCESSORS_ONLN); 767*8e33eff8Schristos #endif 768*8e33eff8Schristos return ((result == -1) ? 1 : (unsigned)result); 769*8e33eff8Schristos } 770*8e33eff8Schristos 771*8e33eff8Schristos static void 772*8e33eff8Schristos init_opt_stats_print_opts(const char *v, size_t vlen) { 773*8e33eff8Schristos size_t opts_len = strlen(opt_stats_print_opts); 774*8e33eff8Schristos assert(opts_len <= stats_print_tot_num_options); 775*8e33eff8Schristos 776*8e33eff8Schristos for (size_t i = 0; i < vlen; i++) { 777*8e33eff8Schristos switch (v[i]) { 778*8e33eff8Schristos #define OPTION(o, v, d, s) case o: break; 779*8e33eff8Schristos STATS_PRINT_OPTIONS 780*8e33eff8Schristos #undef OPTION 781*8e33eff8Schristos default: continue; 782*8e33eff8Schristos } 783*8e33eff8Schristos 784*8e33eff8Schristos if (strchr(opt_stats_print_opts, v[i]) != NULL) { 785*8e33eff8Schristos /* Ignore repeated. */ 786*8e33eff8Schristos continue; 787*8e33eff8Schristos } 788*8e33eff8Schristos 789*8e33eff8Schristos opt_stats_print_opts[opts_len++] = v[i]; 790*8e33eff8Schristos opt_stats_print_opts[opts_len] = '\0'; 791*8e33eff8Schristos assert(opts_len <= stats_print_tot_num_options); 792*8e33eff8Schristos } 793*8e33eff8Schristos assert(opts_len == strlen(opt_stats_print_opts)); 794*8e33eff8Schristos } 795*8e33eff8Schristos 796*8e33eff8Schristos static bool 797*8e33eff8Schristos malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 798*8e33eff8Schristos char const **v_p, size_t *vlen_p) { 799*8e33eff8Schristos bool accept; 800*8e33eff8Schristos const char *opts = *opts_p; 801*8e33eff8Schristos 802*8e33eff8Schristos *k_p = opts; 803*8e33eff8Schristos 804*8e33eff8Schristos for (accept = false; !accept;) { 805*8e33eff8Schristos switch (*opts) { 806*8e33eff8Schristos case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 807*8e33eff8Schristos case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 808*8e33eff8Schristos case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 809*8e33eff8Schristos case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 810*8e33eff8Schristos case 'Y': case 'Z': 811*8e33eff8Schristos case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 812*8e33eff8Schristos case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 813*8e33eff8Schristos case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 814*8e33eff8Schristos case 's': case 't': case 'u': case 'v': case 'w': case 'x': 815*8e33eff8Schristos case 'y': case 'z': 816*8e33eff8Schristos case '0': case '1': case '2': case '3': case '4': case '5': 817*8e33eff8Schristos case '6': case '7': case '8': case '9': 818*8e33eff8Schristos case '_': 819*8e33eff8Schristos opts++; 820*8e33eff8Schristos break; 821*8e33eff8Schristos case ':': 822*8e33eff8Schristos opts++; 823*8e33eff8Schristos *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 824*8e33eff8Schristos *v_p = opts; 825*8e33eff8Schristos accept = true; 826*8e33eff8Schristos break; 827*8e33eff8Schristos case '\0': 828*8e33eff8Schristos if (opts != *opts_p) { 829*8e33eff8Schristos malloc_write("<jemalloc>: Conf string ends " 830*8e33eff8Schristos "with key\n"); 831*8e33eff8Schristos } 832*8e33eff8Schristos return true; 833*8e33eff8Schristos default: 834*8e33eff8Schristos malloc_write("<jemalloc>: Malformed conf string\n"); 835*8e33eff8Schristos return true; 836*8e33eff8Schristos } 837*8e33eff8Schristos } 838*8e33eff8Schristos 839*8e33eff8Schristos for (accept = false; !accept;) { 840*8e33eff8Schristos switch (*opts) { 841*8e33eff8Schristos case ',': 842*8e33eff8Schristos opts++; 843*8e33eff8Schristos /* 844*8e33eff8Schristos * Look ahead one character here, because the next time 845*8e33eff8Schristos * this function is called, it will assume that end of 846*8e33eff8Schristos * input has been cleanly reached if no input remains, 847*8e33eff8Schristos * but we have optimistically already consumed the 848*8e33eff8Schristos * comma if one exists. 849*8e33eff8Schristos */ 850*8e33eff8Schristos if (*opts == '\0') { 851*8e33eff8Schristos malloc_write("<jemalloc>: Conf string ends " 852*8e33eff8Schristos "with comma\n"); 853*8e33eff8Schristos } 854*8e33eff8Schristos *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 855*8e33eff8Schristos accept = true; 856*8e33eff8Schristos break; 857*8e33eff8Schristos case '\0': 858*8e33eff8Schristos *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 859*8e33eff8Schristos accept = true; 860*8e33eff8Schristos break; 861*8e33eff8Schristos default: 862*8e33eff8Schristos opts++; 863*8e33eff8Schristos break; 864*8e33eff8Schristos } 865*8e33eff8Schristos } 866*8e33eff8Schristos 867*8e33eff8Schristos *opts_p = opts; 868*8e33eff8Schristos return false; 869*8e33eff8Schristos } 870*8e33eff8Schristos 871*8e33eff8Schristos static JEMALLOC_NORETURN void 872*8e33eff8Schristos malloc_abort_invalid_conf(void) { 873*8e33eff8Schristos assert(opt_abort_conf); 874*8e33eff8Schristos malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " 875*8e33eff8Schristos "value (see above).\n"); 876*8e33eff8Schristos abort(); 877*8e33eff8Schristos } 878*8e33eff8Schristos 879*8e33eff8Schristos static void 880*8e33eff8Schristos malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 881*8e33eff8Schristos size_t vlen) { 882*8e33eff8Schristos malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 883*8e33eff8Schristos (int)vlen, v); 884*8e33eff8Schristos /* If abort_conf is set, error out after processing all options. */ 885*8e33eff8Schristos had_conf_error = true; 886*8e33eff8Schristos } 887*8e33eff8Schristos 888*8e33eff8Schristos static void 889*8e33eff8Schristos malloc_slow_flag_init(void) { 890*8e33eff8Schristos /* 891*8e33eff8Schristos * Combine the runtime options into malloc_slow for fast path. Called 892*8e33eff8Schristos * after processing all the options. 893*8e33eff8Schristos */ 894*8e33eff8Schristos malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 895*8e33eff8Schristos | (opt_junk_free ? flag_opt_junk_free : 0) 896*8e33eff8Schristos | (opt_zero ? flag_opt_zero : 0) 897*8e33eff8Schristos | (opt_utrace ? flag_opt_utrace : 0) 898*8e33eff8Schristos | (opt_xmalloc ? flag_opt_xmalloc : 0); 899*8e33eff8Schristos 900*8e33eff8Schristos malloc_slow = (malloc_slow_flags != 0); 901*8e33eff8Schristos } 902*8e33eff8Schristos 903*8e33eff8Schristos static void 904*8e33eff8Schristos malloc_conf_init(void) { 905*8e33eff8Schristos unsigned i; 906*8e33eff8Schristos char buf[PATH_MAX + 1]; 907*8e33eff8Schristos const char *opts, *k, *v; 908*8e33eff8Schristos size_t klen, vlen; 909*8e33eff8Schristos 910*8e33eff8Schristos for (i = 0; i < 4; i++) { 911*8e33eff8Schristos /* Get runtime configuration. */ 912*8e33eff8Schristos switch (i) { 913*8e33eff8Schristos case 0: 914*8e33eff8Schristos opts = config_malloc_conf; 915*8e33eff8Schristos break; 916*8e33eff8Schristos case 1: 917*8e33eff8Schristos if (je_malloc_conf != NULL) { 918*8e33eff8Schristos /* 919*8e33eff8Schristos * Use options that were compiled into the 920*8e33eff8Schristos * program. 921*8e33eff8Schristos */ 922*8e33eff8Schristos opts = je_malloc_conf; 923*8e33eff8Schristos } else { 924*8e33eff8Schristos /* No configuration specified. */ 925*8e33eff8Schristos buf[0] = '\0'; 926*8e33eff8Schristos opts = buf; 927*8e33eff8Schristos } 928*8e33eff8Schristos break; 929*8e33eff8Schristos case 2: { 930*8e33eff8Schristos ssize_t linklen = 0; 931*8e33eff8Schristos #ifndef _WIN32 932*8e33eff8Schristos int saved_errno = errno; 933*8e33eff8Schristos const char *linkname = 934*8e33eff8Schristos # ifdef JEMALLOC_PREFIX 935*8e33eff8Schristos "/etc/"JEMALLOC_PREFIX"malloc.conf" 936*8e33eff8Schristos # else 937*8e33eff8Schristos "/etc/malloc.conf" 938*8e33eff8Schristos # endif 939*8e33eff8Schristos ; 940*8e33eff8Schristos 941*8e33eff8Schristos /* 942*8e33eff8Schristos * Try to use the contents of the "/etc/malloc.conf" 943*8e33eff8Schristos * symbolic link's name. 944*8e33eff8Schristos */ 945*8e33eff8Schristos linklen = readlink(linkname, buf, sizeof(buf) - 1); 946*8e33eff8Schristos if (linklen == -1) { 947*8e33eff8Schristos /* No configuration specified. */ 948*8e33eff8Schristos linklen = 0; 949*8e33eff8Schristos /* Restore errno. */ 950*8e33eff8Schristos set_errno(saved_errno); 951*8e33eff8Schristos } 952*8e33eff8Schristos #endif 953*8e33eff8Schristos buf[linklen] = '\0'; 954*8e33eff8Schristos opts = buf; 955*8e33eff8Schristos break; 956*8e33eff8Schristos } case 3: { 957*8e33eff8Schristos const char *envname = 958*8e33eff8Schristos #ifdef JEMALLOC_PREFIX 959*8e33eff8Schristos JEMALLOC_CPREFIX"MALLOC_CONF" 960*8e33eff8Schristos #else 961*8e33eff8Schristos "MALLOC_CONF" 962*8e33eff8Schristos #endif 963*8e33eff8Schristos ; 964*8e33eff8Schristos 965*8e33eff8Schristos if ((opts = jemalloc_secure_getenv(envname)) != NULL) { 966*8e33eff8Schristos /* 967*8e33eff8Schristos * Do nothing; opts is already initialized to 968*8e33eff8Schristos * the value of the MALLOC_CONF environment 969*8e33eff8Schristos * variable. 970*8e33eff8Schristos */ 971*8e33eff8Schristos } else { 972*8e33eff8Schristos /* No configuration specified. */ 973*8e33eff8Schristos buf[0] = '\0'; 974*8e33eff8Schristos opts = buf; 975*8e33eff8Schristos } 976*8e33eff8Schristos break; 977*8e33eff8Schristos } default: 978*8e33eff8Schristos not_reached(); 979*8e33eff8Schristos buf[0] = '\0'; 980*8e33eff8Schristos opts = buf; 981*8e33eff8Schristos } 982*8e33eff8Schristos 983*8e33eff8Schristos while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 984*8e33eff8Schristos &vlen)) { 985*8e33eff8Schristos #define CONF_MATCH(n) \ 986*8e33eff8Schristos (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 987*8e33eff8Schristos #define CONF_MATCH_VALUE(n) \ 988*8e33eff8Schristos (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 989*8e33eff8Schristos #define CONF_HANDLE_BOOL(o, n) \ 990*8e33eff8Schristos if (CONF_MATCH(n)) { \ 991*8e33eff8Schristos if (CONF_MATCH_VALUE("true")) { \ 992*8e33eff8Schristos o = true; \ 993*8e33eff8Schristos } else if (CONF_MATCH_VALUE("false")) { \ 994*8e33eff8Schristos o = false; \ 995*8e33eff8Schristos } else { \ 996*8e33eff8Schristos malloc_conf_error( \ 997*8e33eff8Schristos "Invalid conf value", \ 998*8e33eff8Schristos k, klen, v, vlen); \ 999*8e33eff8Schristos } \ 1000*8e33eff8Schristos continue; \ 1001*8e33eff8Schristos } 1002*8e33eff8Schristos #define CONF_MIN_no(um, min) false 1003*8e33eff8Schristos #define CONF_MIN_yes(um, min) ((um) < (min)) 1004*8e33eff8Schristos #define CONF_MAX_no(um, max) false 1005*8e33eff8Schristos #define CONF_MAX_yes(um, max) ((um) > (max)) 1006*8e33eff8Schristos #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 1007*8e33eff8Schristos if (CONF_MATCH(n)) { \ 1008*8e33eff8Schristos uintmax_t um; \ 1009*8e33eff8Schristos const char *end; \ 1010*8e33eff8Schristos \ 1011*8e33eff8Schristos set_errno(0); \ 1012*8e33eff8Schristos um = malloc_strtoumax(v, &end, 0); \ 1013*8e33eff8Schristos if (get_errno() != 0 || (uintptr_t)end -\ 1014*8e33eff8Schristos (uintptr_t)v != vlen) { \ 1015*8e33eff8Schristos malloc_conf_error( \ 1016*8e33eff8Schristos "Invalid conf value", \ 1017*8e33eff8Schristos k, klen, v, vlen); \ 1018*8e33eff8Schristos } else if (clip) { \ 1019*8e33eff8Schristos if (CONF_MIN_##check_min(um, \ 1020*8e33eff8Schristos (t)(min))) { \ 1021*8e33eff8Schristos o = (t)(min); \ 1022*8e33eff8Schristos } else if ( \ 1023*8e33eff8Schristos CONF_MAX_##check_max(um, \ 1024*8e33eff8Schristos (t)(max))) { \ 1025*8e33eff8Schristos o = (t)(max); \ 1026*8e33eff8Schristos } else { \ 1027*8e33eff8Schristos o = (t)um; \ 1028*8e33eff8Schristos } \ 1029*8e33eff8Schristos } else { \ 1030*8e33eff8Schristos if (CONF_MIN_##check_min(um, \ 1031*8e33eff8Schristos (t)(min)) || \ 1032*8e33eff8Schristos CONF_MAX_##check_max(um, \ 1033*8e33eff8Schristos (t)(max))) { \ 1034*8e33eff8Schristos malloc_conf_error( \ 1035*8e33eff8Schristos "Out-of-range " \ 1036*8e33eff8Schristos "conf value", \ 1037*8e33eff8Schristos k, klen, v, vlen); \ 1038*8e33eff8Schristos } else { \ 1039*8e33eff8Schristos o = (t)um; \ 1040*8e33eff8Schristos } \ 1041*8e33eff8Schristos } \ 1042*8e33eff8Schristos continue; \ 1043*8e33eff8Schristos } 1044*8e33eff8Schristos #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 1045*8e33eff8Schristos clip) \ 1046*8e33eff8Schristos CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 1047*8e33eff8Schristos check_min, check_max, clip) 1048*8e33eff8Schristos #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 1049*8e33eff8Schristos CONF_HANDLE_T_U(size_t, o, n, min, max, \ 1050*8e33eff8Schristos check_min, check_max, clip) 1051*8e33eff8Schristos #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1052*8e33eff8Schristos if (CONF_MATCH(n)) { \ 1053*8e33eff8Schristos long l; \ 1054*8e33eff8Schristos char *end; \ 1055*8e33eff8Schristos \ 1056*8e33eff8Schristos set_errno(0); \ 1057*8e33eff8Schristos l = strtol(v, &end, 0); \ 1058*8e33eff8Schristos if (get_errno() != 0 || (uintptr_t)end -\ 1059*8e33eff8Schristos (uintptr_t)v != vlen) { \ 1060*8e33eff8Schristos malloc_conf_error( \ 1061*8e33eff8Schristos "Invalid conf value", \ 1062*8e33eff8Schristos k, klen, v, vlen); \ 1063*8e33eff8Schristos } else if (l < (ssize_t)(min) || l > \ 1064*8e33eff8Schristos (ssize_t)(max)) { \ 1065*8e33eff8Schristos malloc_conf_error( \ 1066*8e33eff8Schristos "Out-of-range conf value", \ 1067*8e33eff8Schristos k, klen, v, vlen); \ 1068*8e33eff8Schristos } else { \ 1069*8e33eff8Schristos o = l; \ 1070*8e33eff8Schristos } \ 1071*8e33eff8Schristos continue; \ 1072*8e33eff8Schristos } 1073*8e33eff8Schristos #define CONF_HANDLE_CHAR_P(o, n, d) \ 1074*8e33eff8Schristos if (CONF_MATCH(n)) { \ 1075*8e33eff8Schristos size_t cpylen = (vlen <= \ 1076*8e33eff8Schristos sizeof(o)-1) ? vlen : \ 1077*8e33eff8Schristos sizeof(o)-1; \ 1078*8e33eff8Schristos strncpy(o, v, cpylen); \ 1079*8e33eff8Schristos o[cpylen] = '\0'; \ 1080*8e33eff8Schristos continue; \ 1081*8e33eff8Schristos } 1082*8e33eff8Schristos 1083*8e33eff8Schristos CONF_HANDLE_BOOL(opt_abort, "abort") 1084*8e33eff8Schristos CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") 1085*8e33eff8Schristos if (strncmp("metadata_thp", k, klen) == 0) { 1086*8e33eff8Schristos int ii; 1087*8e33eff8Schristos bool match = false; 1088*8e33eff8Schristos for (ii = 0; ii < metadata_thp_mode_limit; ii++) { 1089*8e33eff8Schristos if (strncmp(metadata_thp_mode_names[ii], 1090*8e33eff8Schristos v, vlen) == 0) { 1091*8e33eff8Schristos opt_metadata_thp = ii; 1092*8e33eff8Schristos match = true; 1093*8e33eff8Schristos break; 1094*8e33eff8Schristos } 1095*8e33eff8Schristos } 1096*8e33eff8Schristos if (!match) { 1097*8e33eff8Schristos malloc_conf_error("Invalid conf value", 1098*8e33eff8Schristos k, klen, v, vlen); 1099*8e33eff8Schristos } 1100*8e33eff8Schristos continue; 1101*8e33eff8Schristos } 1102*8e33eff8Schristos CONF_HANDLE_BOOL(opt_retain, "retain") 1103*8e33eff8Schristos if (strncmp("dss", k, klen) == 0) { 1104*8e33eff8Schristos int ii; 1105*8e33eff8Schristos bool match = false; 1106*8e33eff8Schristos for (ii = 0; ii < dss_prec_limit; ii++) { 1107*8e33eff8Schristos if (strncmp(dss_prec_names[ii], v, vlen) 1108*8e33eff8Schristos == 0) { 1109*8e33eff8Schristos if (extent_dss_prec_set(ii)) { 1110*8e33eff8Schristos malloc_conf_error( 1111*8e33eff8Schristos "Error setting dss", 1112*8e33eff8Schristos k, klen, v, vlen); 1113*8e33eff8Schristos } else { 1114*8e33eff8Schristos opt_dss = 1115*8e33eff8Schristos dss_prec_names[ii]; 1116*8e33eff8Schristos match = true; 1117*8e33eff8Schristos break; 1118*8e33eff8Schristos } 1119*8e33eff8Schristos } 1120*8e33eff8Schristos } 1121*8e33eff8Schristos if (!match) { 1122*8e33eff8Schristos malloc_conf_error("Invalid conf value", 1123*8e33eff8Schristos k, klen, v, vlen); 1124*8e33eff8Schristos } 1125*8e33eff8Schristos continue; 1126*8e33eff8Schristos } 1127*8e33eff8Schristos CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1128*8e33eff8Schristos UINT_MAX, yes, no, false) 1129*8e33eff8Schristos CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, 1130*8e33eff8Schristos "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1131*8e33eff8Schristos QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1132*8e33eff8Schristos SSIZE_MAX); 1133*8e33eff8Schristos CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, 1134*8e33eff8Schristos "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1135*8e33eff8Schristos QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1136*8e33eff8Schristos SSIZE_MAX); 1137*8e33eff8Schristos CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 1138*8e33eff8Schristos if (CONF_MATCH("stats_print_opts")) { 1139*8e33eff8Schristos init_opt_stats_print_opts(v, vlen); 1140*8e33eff8Schristos continue; 1141*8e33eff8Schristos } 1142*8e33eff8Schristos if (config_fill) { 1143*8e33eff8Schristos if (CONF_MATCH("junk")) { 1144*8e33eff8Schristos if (CONF_MATCH_VALUE("true")) { 1145*8e33eff8Schristos opt_junk = "true"; 1146*8e33eff8Schristos opt_junk_alloc = opt_junk_free = 1147*8e33eff8Schristos true; 1148*8e33eff8Schristos } else if (CONF_MATCH_VALUE("false")) { 1149*8e33eff8Schristos opt_junk = "false"; 1150*8e33eff8Schristos opt_junk_alloc = opt_junk_free = 1151*8e33eff8Schristos false; 1152*8e33eff8Schristos } else if (CONF_MATCH_VALUE("alloc")) { 1153*8e33eff8Schristos opt_junk = "alloc"; 1154*8e33eff8Schristos opt_junk_alloc = true; 1155*8e33eff8Schristos opt_junk_free = false; 1156*8e33eff8Schristos } else if (CONF_MATCH_VALUE("free")) { 1157*8e33eff8Schristos opt_junk = "free"; 1158*8e33eff8Schristos opt_junk_alloc = false; 1159*8e33eff8Schristos opt_junk_free = true; 1160*8e33eff8Schristos } else { 1161*8e33eff8Schristos malloc_conf_error( 1162*8e33eff8Schristos "Invalid conf value", k, 1163*8e33eff8Schristos klen, v, vlen); 1164*8e33eff8Schristos } 1165*8e33eff8Schristos continue; 1166*8e33eff8Schristos } 1167*8e33eff8Schristos CONF_HANDLE_BOOL(opt_zero, "zero") 1168*8e33eff8Schristos } 1169*8e33eff8Schristos if (config_utrace) { 1170*8e33eff8Schristos CONF_HANDLE_BOOL(opt_utrace, "utrace") 1171*8e33eff8Schristos } 1172*8e33eff8Schristos if (config_xmalloc) { 1173*8e33eff8Schristos CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 1174*8e33eff8Schristos } 1175*8e33eff8Schristos CONF_HANDLE_BOOL(opt_tcache, "tcache") 1176*8e33eff8Schristos CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, 1177*8e33eff8Schristos "lg_extent_max_active_fit", 0, 1178*8e33eff8Schristos (sizeof(size_t) << 3), no, yes, false) 1179*8e33eff8Schristos CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", 1180*8e33eff8Schristos -1, (sizeof(size_t) << 3) - 1) 1181*8e33eff8Schristos if (strncmp("percpu_arena", k, klen) == 0) { 1182*8e33eff8Schristos bool match = false; 1183*8e33eff8Schristos for (int ii = percpu_arena_mode_names_base; ii < 1184*8e33eff8Schristos percpu_arena_mode_names_limit; ii++) { 1185*8e33eff8Schristos if (strncmp(percpu_arena_mode_names[ii], 1186*8e33eff8Schristos v, vlen) == 0) { 1187*8e33eff8Schristos if (!have_percpu_arena) { 1188*8e33eff8Schristos malloc_conf_error( 1189*8e33eff8Schristos "No getcpu support", 1190*8e33eff8Schristos k, klen, v, vlen); 1191*8e33eff8Schristos } 1192*8e33eff8Schristos opt_percpu_arena = ii; 1193*8e33eff8Schristos match = true; 1194*8e33eff8Schristos break; 1195*8e33eff8Schristos } 1196*8e33eff8Schristos } 1197*8e33eff8Schristos if (!match) { 1198*8e33eff8Schristos malloc_conf_error("Invalid conf value", 1199*8e33eff8Schristos k, klen, v, vlen); 1200*8e33eff8Schristos } 1201*8e33eff8Schristos continue; 1202*8e33eff8Schristos } 1203*8e33eff8Schristos CONF_HANDLE_BOOL(opt_background_thread, 1204*8e33eff8Schristos "background_thread"); 1205*8e33eff8Schristos CONF_HANDLE_SIZE_T(opt_max_background_threads, 1206*8e33eff8Schristos "max_background_threads", 1, 1207*8e33eff8Schristos opt_max_background_threads, yes, yes, 1208*8e33eff8Schristos true); 1209*8e33eff8Schristos if (config_prof) { 1210*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof, "prof") 1211*8e33eff8Schristos CONF_HANDLE_CHAR_P(opt_prof_prefix, 1212*8e33eff8Schristos "prof_prefix", "jeprof") 1213*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 1214*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1215*8e33eff8Schristos "prof_thread_active_init") 1216*8e33eff8Schristos CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1217*8e33eff8Schristos "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 1218*8e33eff8Schristos - 1, no, yes, true) 1219*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 1220*8e33eff8Schristos CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 1221*8e33eff8Schristos "lg_prof_interval", -1, 1222*8e33eff8Schristos (sizeof(uint64_t) << 3) - 1) 1223*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 1224*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 1225*8e33eff8Schristos CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 1226*8e33eff8Schristos } 1227*8e33eff8Schristos if (config_log) { 1228*8e33eff8Schristos if (CONF_MATCH("log")) { 1229*8e33eff8Schristos size_t cpylen = ( 1230*8e33eff8Schristos vlen <= sizeof(log_var_names) ? 1231*8e33eff8Schristos vlen : sizeof(log_var_names) - 1); 1232*8e33eff8Schristos strncpy(log_var_names, v, cpylen); 1233*8e33eff8Schristos log_var_names[cpylen] = '\0'; 1234*8e33eff8Schristos continue; 1235*8e33eff8Schristos } 1236*8e33eff8Schristos } 1237*8e33eff8Schristos if (CONF_MATCH("thp")) { 1238*8e33eff8Schristos bool match = false; 1239*8e33eff8Schristos for (int ii = 0; ii < thp_mode_names_limit; ii++) { 1240*8e33eff8Schristos if (strncmp(thp_mode_names[ii],v, vlen) 1241*8e33eff8Schristos == 0) { 1242*8e33eff8Schristos if (!have_madvise_huge) { 1243*8e33eff8Schristos malloc_conf_error( 1244*8e33eff8Schristos "No THP support", 1245*8e33eff8Schristos k, klen, v, vlen); 1246*8e33eff8Schristos } 1247*8e33eff8Schristos opt_thp = ii; 1248*8e33eff8Schristos match = true; 1249*8e33eff8Schristos break; 1250*8e33eff8Schristos } 1251*8e33eff8Schristos } 1252*8e33eff8Schristos if (!match) { 1253*8e33eff8Schristos malloc_conf_error("Invalid conf value", 1254*8e33eff8Schristos k, klen, v, vlen); 1255*8e33eff8Schristos } 1256*8e33eff8Schristos continue; 1257*8e33eff8Schristos } 1258*8e33eff8Schristos malloc_conf_error("Invalid conf pair", k, klen, v, 1259*8e33eff8Schristos vlen); 1260*8e33eff8Schristos #undef CONF_MATCH 1261*8e33eff8Schristos #undef CONF_MATCH_VALUE 1262*8e33eff8Schristos #undef CONF_HANDLE_BOOL 1263*8e33eff8Schristos #undef CONF_MIN_no 1264*8e33eff8Schristos #undef CONF_MIN_yes 1265*8e33eff8Schristos #undef CONF_MAX_no 1266*8e33eff8Schristos #undef CONF_MAX_yes 1267*8e33eff8Schristos #undef CONF_HANDLE_T_U 1268*8e33eff8Schristos #undef CONF_HANDLE_UNSIGNED 1269*8e33eff8Schristos #undef CONF_HANDLE_SIZE_T 1270*8e33eff8Schristos #undef CONF_HANDLE_SSIZE_T 1271*8e33eff8Schristos #undef CONF_HANDLE_CHAR_P 1272*8e33eff8Schristos } 1273*8e33eff8Schristos if (opt_abort_conf && had_conf_error) { 1274*8e33eff8Schristos malloc_abort_invalid_conf(); 1275*8e33eff8Schristos } 1276*8e33eff8Schristos } 1277*8e33eff8Schristos atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); 1278*8e33eff8Schristos } 1279*8e33eff8Schristos 1280*8e33eff8Schristos static bool 1281*8e33eff8Schristos malloc_init_hard_needed(void) { 1282*8e33eff8Schristos if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1283*8e33eff8Schristos malloc_init_recursible)) { 1284*8e33eff8Schristos /* 1285*8e33eff8Schristos * Another thread initialized the allocator before this one 1286*8e33eff8Schristos * acquired init_lock, or this thread is the initializing 1287*8e33eff8Schristos * thread, and it is recursively allocating. 1288*8e33eff8Schristos */ 1289*8e33eff8Schristos return false; 1290*8e33eff8Schristos } 1291*8e33eff8Schristos #ifdef JEMALLOC_THREADED_INIT 1292*8e33eff8Schristos if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1293*8e33eff8Schristos /* Busy-wait until the initializing thread completes. */ 1294*8e33eff8Schristos spin_t spinner = SPIN_INITIALIZER; 1295*8e33eff8Schristos do { 1296*8e33eff8Schristos malloc_mutex_unlock(TSDN_NULL, &init_lock); 1297*8e33eff8Schristos spin_adaptive(&spinner); 1298*8e33eff8Schristos malloc_mutex_lock(TSDN_NULL, &init_lock); 1299*8e33eff8Schristos } while (!malloc_initialized()); 1300*8e33eff8Schristos return false; 1301*8e33eff8Schristos } 1302*8e33eff8Schristos #endif 1303*8e33eff8Schristos return true; 1304*8e33eff8Schristos } 1305*8e33eff8Schristos 1306*8e33eff8Schristos static bool 1307*8e33eff8Schristos malloc_init_hard_a0_locked(void) { 1308*8e33eff8Schristos malloc_initializer = INITIALIZER; 1309*8e33eff8Schristos 1310*8e33eff8Schristos if (config_prof) { 1311*8e33eff8Schristos prof_boot0(); 1312*8e33eff8Schristos } 1313*8e33eff8Schristos malloc_conf_init(); 1314*8e33eff8Schristos if (opt_stats_print) { 1315*8e33eff8Schristos /* Print statistics at exit. */ 1316*8e33eff8Schristos if (atexit(stats_print_atexit) != 0) { 1317*8e33eff8Schristos malloc_write("<jemalloc>: Error in atexit()\n"); 1318*8e33eff8Schristos if (opt_abort) { 1319*8e33eff8Schristos abort(); 1320*8e33eff8Schristos } 1321*8e33eff8Schristos } 1322*8e33eff8Schristos } 1323*8e33eff8Schristos if (pages_boot()) { 1324*8e33eff8Schristos return true; 1325*8e33eff8Schristos } 1326*8e33eff8Schristos if (base_boot(TSDN_NULL)) { 1327*8e33eff8Schristos return true; 1328*8e33eff8Schristos } 1329*8e33eff8Schristos if (extent_boot()) { 1330*8e33eff8Schristos return true; 1331*8e33eff8Schristos } 1332*8e33eff8Schristos if (ctl_boot()) { 1333*8e33eff8Schristos return true; 1334*8e33eff8Schristos } 1335*8e33eff8Schristos if (config_prof) { 1336*8e33eff8Schristos prof_boot1(); 1337*8e33eff8Schristos } 1338*8e33eff8Schristos arena_boot(); 1339*8e33eff8Schristos if (tcache_boot(TSDN_NULL)) { 1340*8e33eff8Schristos return true; 1341*8e33eff8Schristos } 1342*8e33eff8Schristos if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, 1343*8e33eff8Schristos malloc_mutex_rank_exclusive)) { 1344*8e33eff8Schristos return true; 1345*8e33eff8Schristos } 1346*8e33eff8Schristos /* 1347*8e33eff8Schristos * Create enough scaffolding to allow recursive allocation in 1348*8e33eff8Schristos * malloc_ncpus(). 1349*8e33eff8Schristos */ 1350*8e33eff8Schristos narenas_auto = 1; 1351*8e33eff8Schristos memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1352*8e33eff8Schristos /* 1353*8e33eff8Schristos * Initialize one arena here. The rest are lazily created in 1354*8e33eff8Schristos * arena_choose_hard(). 1355*8e33eff8Schristos */ 1356*8e33eff8Schristos if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)__UNCONST(&extent_hooks_default)) 1357*8e33eff8Schristos == NULL) { 1358*8e33eff8Schristos return true; 1359*8e33eff8Schristos } 1360*8e33eff8Schristos a0 = arena_get(TSDN_NULL, 0, false); 1361*8e33eff8Schristos malloc_init_state = malloc_init_a0_initialized; 1362*8e33eff8Schristos 1363*8e33eff8Schristos return false; 1364*8e33eff8Schristos } 1365*8e33eff8Schristos 1366*8e33eff8Schristos static bool 1367*8e33eff8Schristos malloc_init_hard_a0(void) { 1368*8e33eff8Schristos bool ret; 1369*8e33eff8Schristos 1370*8e33eff8Schristos malloc_mutex_lock(TSDN_NULL, &init_lock); 1371*8e33eff8Schristos ret = malloc_init_hard_a0_locked(); 1372*8e33eff8Schristos malloc_mutex_unlock(TSDN_NULL, &init_lock); 1373*8e33eff8Schristos return ret; 1374*8e33eff8Schristos } 1375*8e33eff8Schristos 1376*8e33eff8Schristos /* Initialize data structures which may trigger recursive allocation. */ 1377*8e33eff8Schristos static bool 1378*8e33eff8Schristos malloc_init_hard_recursible(void) { 1379*8e33eff8Schristos malloc_init_state = malloc_init_recursible; 1380*8e33eff8Schristos 1381*8e33eff8Schristos ncpus = malloc_ncpus(); 1382*8e33eff8Schristos 1383*8e33eff8Schristos #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 1384*8e33eff8Schristos && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 1385*8e33eff8Schristos !defined(__native_client__) && !defined(__NetBSD__)) 1386*8e33eff8Schristos /* LinuxThreads' pthread_atfork() allocates. */ 1387*8e33eff8Schristos if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1388*8e33eff8Schristos jemalloc_postfork_child) != 0) { 1389*8e33eff8Schristos malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1390*8e33eff8Schristos if (opt_abort) { 1391*8e33eff8Schristos abort(); 1392*8e33eff8Schristos } 1393*8e33eff8Schristos return true; 1394*8e33eff8Schristos } 1395*8e33eff8Schristos #endif 1396*8e33eff8Schristos 1397*8e33eff8Schristos if (background_thread_boot0()) { 1398*8e33eff8Schristos return true; 1399*8e33eff8Schristos } 1400*8e33eff8Schristos 1401*8e33eff8Schristos return false; 1402*8e33eff8Schristos } 1403*8e33eff8Schristos 1404*8e33eff8Schristos static unsigned 1405*8e33eff8Schristos malloc_narenas_default(void) { 1406*8e33eff8Schristos assert(ncpus > 0); 1407*8e33eff8Schristos /* 1408*8e33eff8Schristos * For SMP systems, create more than one arena per CPU by 1409*8e33eff8Schristos * default. 1410*8e33eff8Schristos */ 1411*8e33eff8Schristos if (ncpus > 1) { 1412*8e33eff8Schristos return ncpus << 2; 1413*8e33eff8Schristos } else { 1414*8e33eff8Schristos return 1; 1415*8e33eff8Schristos } 1416*8e33eff8Schristos } 1417*8e33eff8Schristos 1418*8e33eff8Schristos static percpu_arena_mode_t 1419*8e33eff8Schristos percpu_arena_as_initialized(percpu_arena_mode_t mode) { 1420*8e33eff8Schristos assert(!malloc_initialized()); 1421*8e33eff8Schristos assert(mode <= percpu_arena_disabled); 1422*8e33eff8Schristos 1423*8e33eff8Schristos if (mode != percpu_arena_disabled) { 1424*8e33eff8Schristos mode += percpu_arena_mode_enabled_base; 1425*8e33eff8Schristos } 1426*8e33eff8Schristos 1427*8e33eff8Schristos return mode; 1428*8e33eff8Schristos } 1429*8e33eff8Schristos 1430*8e33eff8Schristos static bool 1431*8e33eff8Schristos malloc_init_narenas(void) { 1432*8e33eff8Schristos assert(ncpus > 0); 1433*8e33eff8Schristos 1434*8e33eff8Schristos if (opt_percpu_arena != percpu_arena_disabled) { 1435*8e33eff8Schristos if (!have_percpu_arena || malloc_getcpu() < 0) { 1436*8e33eff8Schristos opt_percpu_arena = percpu_arena_disabled; 1437*8e33eff8Schristos malloc_printf("<jemalloc>: perCPU arena getcpu() not " 1438*8e33eff8Schristos "available. Setting narenas to %u.\n", opt_narenas ? 1439*8e33eff8Schristos opt_narenas : malloc_narenas_default()); 1440*8e33eff8Schristos if (opt_abort) { 1441*8e33eff8Schristos abort(); 1442*8e33eff8Schristos } 1443*8e33eff8Schristos } else { 1444*8e33eff8Schristos if (ncpus >= MALLOCX_ARENA_LIMIT) { 1445*8e33eff8Schristos malloc_printf("<jemalloc>: narenas w/ percpu" 1446*8e33eff8Schristos "arena beyond limit (%d)\n", ncpus); 1447*8e33eff8Schristos if (opt_abort) { 1448*8e33eff8Schristos abort(); 1449*8e33eff8Schristos } 1450*8e33eff8Schristos return true; 1451*8e33eff8Schristos } 1452*8e33eff8Schristos /* NB: opt_percpu_arena isn't fully initialized yet. */ 1453*8e33eff8Schristos if (percpu_arena_as_initialized(opt_percpu_arena) == 1454*8e33eff8Schristos per_phycpu_arena && ncpus % 2 != 0) { 1455*8e33eff8Schristos malloc_printf("<jemalloc>: invalid " 1456*8e33eff8Schristos "configuration -- per physical CPU arena " 1457*8e33eff8Schristos "with odd number (%u) of CPUs (no hyper " 1458*8e33eff8Schristos "threading?).\n", ncpus); 1459*8e33eff8Schristos if (opt_abort) 1460*8e33eff8Schristos abort(); 1461*8e33eff8Schristos } 1462*8e33eff8Schristos unsigned n = percpu_arena_ind_limit( 1463*8e33eff8Schristos percpu_arena_as_initialized(opt_percpu_arena)); 1464*8e33eff8Schristos if (opt_narenas < n) { 1465*8e33eff8Schristos /* 1466*8e33eff8Schristos * If narenas is specified with percpu_arena 1467*8e33eff8Schristos * enabled, actual narenas is set as the greater 1468*8e33eff8Schristos * of the two. percpu_arena_choose will be free 1469*8e33eff8Schristos * to use any of the arenas based on CPU 1470*8e33eff8Schristos * id. This is conservative (at a small cost) 1471*8e33eff8Schristos * but ensures correctness. 1472*8e33eff8Schristos * 1473*8e33eff8Schristos * If for some reason the ncpus determined at 1474*8e33eff8Schristos * boot is not the actual number (e.g. because 1475*8e33eff8Schristos * of affinity setting from numactl), reserving 1476*8e33eff8Schristos * narenas this way provides a workaround for 1477*8e33eff8Schristos * percpu_arena. 1478*8e33eff8Schristos */ 1479*8e33eff8Schristos opt_narenas = n; 1480*8e33eff8Schristos } 1481*8e33eff8Schristos } 1482*8e33eff8Schristos } 1483*8e33eff8Schristos if (opt_narenas == 0) { 1484*8e33eff8Schristos opt_narenas = malloc_narenas_default(); 1485*8e33eff8Schristos } 1486*8e33eff8Schristos assert(opt_narenas > 0); 1487*8e33eff8Schristos 1488*8e33eff8Schristos narenas_auto = opt_narenas; 1489*8e33eff8Schristos /* 1490*8e33eff8Schristos * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1491*8e33eff8Schristos */ 1492*8e33eff8Schristos if (narenas_auto >= MALLOCX_ARENA_LIMIT) { 1493*8e33eff8Schristos narenas_auto = MALLOCX_ARENA_LIMIT - 1; 1494*8e33eff8Schristos malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 1495*8e33eff8Schristos narenas_auto); 1496*8e33eff8Schristos } 1497*8e33eff8Schristos narenas_total_set(narenas_auto); 1498*8e33eff8Schristos 1499*8e33eff8Schristos return false; 1500*8e33eff8Schristos } 1501*8e33eff8Schristos 1502*8e33eff8Schristos static void 1503*8e33eff8Schristos malloc_init_percpu(void) { 1504*8e33eff8Schristos opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); 1505*8e33eff8Schristos } 1506*8e33eff8Schristos 1507*8e33eff8Schristos static bool 1508*8e33eff8Schristos malloc_init_hard_finish(void) { 1509*8e33eff8Schristos if (malloc_mutex_boot()) { 1510*8e33eff8Schristos return true; 1511*8e33eff8Schristos } 1512*8e33eff8Schristos 1513*8e33eff8Schristos malloc_init_state = malloc_init_initialized; 1514*8e33eff8Schristos malloc_slow_flag_init(); 1515*8e33eff8Schristos 1516*8e33eff8Schristos return false; 1517*8e33eff8Schristos } 1518*8e33eff8Schristos 1519*8e33eff8Schristos static void 1520*8e33eff8Schristos malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { 1521*8e33eff8Schristos malloc_mutex_assert_owner(tsdn, &init_lock); 1522*8e33eff8Schristos malloc_mutex_unlock(tsdn, &init_lock); 1523*8e33eff8Schristos if (reentrancy_set) { 1524*8e33eff8Schristos assert(!tsdn_null(tsdn)); 1525*8e33eff8Schristos tsd_t *tsd = tsdn_tsd(tsdn); 1526*8e33eff8Schristos assert(tsd_reentrancy_level_get(tsd) > 0); 1527*8e33eff8Schristos post_reentrancy(tsd); 1528*8e33eff8Schristos } 1529*8e33eff8Schristos } 1530*8e33eff8Schristos 1531*8e33eff8Schristos static bool 1532*8e33eff8Schristos malloc_init_hard(void) { 1533*8e33eff8Schristos tsd_t *tsd; 1534*8e33eff8Schristos 1535*8e33eff8Schristos #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1536*8e33eff8Schristos _init_init_lock(); 1537*8e33eff8Schristos #endif 1538*8e33eff8Schristos malloc_mutex_lock(TSDN_NULL, &init_lock); 1539*8e33eff8Schristos 1540*8e33eff8Schristos #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ 1541*8e33eff8Schristos malloc_init_hard_cleanup(tsdn, reentrancy); \ 1542*8e33eff8Schristos return ret; 1543*8e33eff8Schristos 1544*8e33eff8Schristos if (!malloc_init_hard_needed()) { 1545*8e33eff8Schristos UNLOCK_RETURN(TSDN_NULL, false, false) 1546*8e33eff8Schristos } 1547*8e33eff8Schristos 1548*8e33eff8Schristos if (malloc_init_state != malloc_init_a0_initialized && 1549*8e33eff8Schristos malloc_init_hard_a0_locked()) { 1550*8e33eff8Schristos UNLOCK_RETURN(TSDN_NULL, true, false) 1551*8e33eff8Schristos } 1552*8e33eff8Schristos 1553*8e33eff8Schristos malloc_mutex_unlock(TSDN_NULL, &init_lock); 1554*8e33eff8Schristos /* Recursive allocation relies on functional tsd. */ 1555*8e33eff8Schristos tsd = malloc_tsd_boot0(); 1556*8e33eff8Schristos if (tsd == NULL) { 1557*8e33eff8Schristos return true; 1558*8e33eff8Schristos } 1559*8e33eff8Schristos if (malloc_init_hard_recursible()) { 1560*8e33eff8Schristos return true; 1561*8e33eff8Schristos } 1562*8e33eff8Schristos 1563*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1564*8e33eff8Schristos /* Set reentrancy level to 1 during init. */ 1565*8e33eff8Schristos pre_reentrancy(tsd, NULL); 1566*8e33eff8Schristos /* Initialize narenas before prof_boot2 (for allocation). */ 1567*8e33eff8Schristos if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { 1568*8e33eff8Schristos UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1569*8e33eff8Schristos } 1570*8e33eff8Schristos if (config_prof && prof_boot2(tsd)) { 1571*8e33eff8Schristos UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1572*8e33eff8Schristos } 1573*8e33eff8Schristos 1574*8e33eff8Schristos malloc_init_percpu(); 1575*8e33eff8Schristos 1576*8e33eff8Schristos if (malloc_init_hard_finish()) { 1577*8e33eff8Schristos UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1578*8e33eff8Schristos } 1579*8e33eff8Schristos post_reentrancy(tsd); 1580*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1581*8e33eff8Schristos 1582*8e33eff8Schristos witness_assert_lockless(witness_tsd_tsdn( 1583*8e33eff8Schristos tsd_witness_tsdp_get_unsafe(tsd))); 1584*8e33eff8Schristos malloc_tsd_boot1(); 1585*8e33eff8Schristos /* Update TSD after tsd_boot1. */ 1586*8e33eff8Schristos tsd = tsd_fetch(); 1587*8e33eff8Schristos if (opt_background_thread) { 1588*8e33eff8Schristos assert(have_background_thread); 1589*8e33eff8Schristos /* 1590*8e33eff8Schristos * Need to finish init & unlock first before creating background 1591*8e33eff8Schristos * threads (pthread_create depends on malloc). ctl_init (which 1592*8e33eff8Schristos * sets isthreaded) needs to be called without holding any lock. 1593*8e33eff8Schristos */ 1594*8e33eff8Schristos background_thread_ctl_init(tsd_tsdn(tsd)); 1595*8e33eff8Schristos 1596*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); 1597*8e33eff8Schristos bool err = background_thread_create(tsd, 0); 1598*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 1599*8e33eff8Schristos if (err) { 1600*8e33eff8Schristos return true; 1601*8e33eff8Schristos } 1602*8e33eff8Schristos } 1603*8e33eff8Schristos #undef UNLOCK_RETURN 1604*8e33eff8Schristos return false; 1605*8e33eff8Schristos } 1606*8e33eff8Schristos 1607*8e33eff8Schristos /* 1608*8e33eff8Schristos * End initialization functions. 1609*8e33eff8Schristos */ 1610*8e33eff8Schristos /******************************************************************************/ 1611*8e33eff8Schristos /* 1612*8e33eff8Schristos * Begin allocation-path internal functions and data structures. 1613*8e33eff8Schristos */ 1614*8e33eff8Schristos 1615*8e33eff8Schristos /* 1616*8e33eff8Schristos * Settings determined by the documented behavior of the allocation functions. 1617*8e33eff8Schristos */ 1618*8e33eff8Schristos typedef struct static_opts_s static_opts_t; 1619*8e33eff8Schristos struct static_opts_s { 1620*8e33eff8Schristos /* Whether or not allocation size may overflow. */ 1621*8e33eff8Schristos bool may_overflow; 1622*8e33eff8Schristos /* Whether or not allocations of size 0 should be treated as size 1. */ 1623*8e33eff8Schristos bool bump_empty_alloc; 1624*8e33eff8Schristos /* 1625*8e33eff8Schristos * Whether to assert that allocations are not of size 0 (after any 1626*8e33eff8Schristos * bumping). 1627*8e33eff8Schristos */ 1628*8e33eff8Schristos bool assert_nonempty_alloc; 1629*8e33eff8Schristos 1630*8e33eff8Schristos /* 1631*8e33eff8Schristos * Whether or not to modify the 'result' argument to malloc in case of 1632*8e33eff8Schristos * error. 1633*8e33eff8Schristos */ 1634*8e33eff8Schristos bool null_out_result_on_error; 1635*8e33eff8Schristos /* Whether to set errno when we encounter an error condition. */ 1636*8e33eff8Schristos bool set_errno_on_error; 1637*8e33eff8Schristos 1638*8e33eff8Schristos /* 1639*8e33eff8Schristos * The minimum valid alignment for functions requesting aligned storage. 1640*8e33eff8Schristos */ 1641*8e33eff8Schristos size_t min_alignment; 1642*8e33eff8Schristos 1643*8e33eff8Schristos /* The error string to use if we oom. */ 1644*8e33eff8Schristos const char *oom_string; 1645*8e33eff8Schristos /* The error string to use if the passed-in alignment is invalid. */ 1646*8e33eff8Schristos const char *invalid_alignment_string; 1647*8e33eff8Schristos 1648*8e33eff8Schristos /* 1649*8e33eff8Schristos * False if we're configured to skip some time-consuming operations. 1650*8e33eff8Schristos * 1651*8e33eff8Schristos * This isn't really a malloc "behavior", but it acts as a useful 1652*8e33eff8Schristos * summary of several other static (or at least, static after program 1653*8e33eff8Schristos * initialization) options. 1654*8e33eff8Schristos */ 1655*8e33eff8Schristos bool slow; 1656*8e33eff8Schristos }; 1657*8e33eff8Schristos 1658*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void 1659*8e33eff8Schristos static_opts_init(static_opts_t *static_opts) { 1660*8e33eff8Schristos static_opts->may_overflow = false; 1661*8e33eff8Schristos static_opts->bump_empty_alloc = false; 1662*8e33eff8Schristos static_opts->assert_nonempty_alloc = false; 1663*8e33eff8Schristos static_opts->null_out_result_on_error = false; 1664*8e33eff8Schristos static_opts->set_errno_on_error = false; 1665*8e33eff8Schristos static_opts->min_alignment = 0; 1666*8e33eff8Schristos static_opts->oom_string = ""; 1667*8e33eff8Schristos static_opts->invalid_alignment_string = ""; 1668*8e33eff8Schristos static_opts->slow = false; 1669*8e33eff8Schristos } 1670*8e33eff8Schristos 1671*8e33eff8Schristos /* 1672*8e33eff8Schristos * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we 1673*8e33eff8Schristos * should have one constant here per magic value there. Note however that the 1674*8e33eff8Schristos * representations need not be related. 1675*8e33eff8Schristos */ 1676*8e33eff8Schristos #define TCACHE_IND_NONE ((unsigned)-1) 1677*8e33eff8Schristos #define TCACHE_IND_AUTOMATIC ((unsigned)-2) 1678*8e33eff8Schristos #define ARENA_IND_AUTOMATIC ((unsigned)-1) 1679*8e33eff8Schristos 1680*8e33eff8Schristos typedef struct dynamic_opts_s dynamic_opts_t; 1681*8e33eff8Schristos struct dynamic_opts_s { 1682*8e33eff8Schristos void **result; 1683*8e33eff8Schristos size_t num_items; 1684*8e33eff8Schristos size_t item_size; 1685*8e33eff8Schristos size_t alignment; 1686*8e33eff8Schristos bool zero; 1687*8e33eff8Schristos unsigned tcache_ind; 1688*8e33eff8Schristos unsigned arena_ind; 1689*8e33eff8Schristos }; 1690*8e33eff8Schristos 1691*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void 1692*8e33eff8Schristos dynamic_opts_init(dynamic_opts_t *dynamic_opts) { 1693*8e33eff8Schristos dynamic_opts->result = NULL; 1694*8e33eff8Schristos dynamic_opts->num_items = 0; 1695*8e33eff8Schristos dynamic_opts->item_size = 0; 1696*8e33eff8Schristos dynamic_opts->alignment = 0; 1697*8e33eff8Schristos dynamic_opts->zero = false; 1698*8e33eff8Schristos dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; 1699*8e33eff8Schristos dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; 1700*8e33eff8Schristos } 1701*8e33eff8Schristos 1702*8e33eff8Schristos /* ind is ignored if dopts->alignment > 0. */ 1703*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void * 1704*8e33eff8Schristos imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1705*8e33eff8Schristos size_t size, size_t usize, szind_t ind) { 1706*8e33eff8Schristos tcache_t *tcache; 1707*8e33eff8Schristos arena_t *arena; 1708*8e33eff8Schristos 1709*8e33eff8Schristos /* Fill in the tcache. */ 1710*8e33eff8Schristos if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { 1711*8e33eff8Schristos if (likely(!sopts->slow)) { 1712*8e33eff8Schristos /* Getting tcache ptr unconditionally. */ 1713*8e33eff8Schristos tcache = tsd_tcachep_get(tsd); 1714*8e33eff8Schristos assert(tcache == tcache_get(tsd)); 1715*8e33eff8Schristos } else { 1716*8e33eff8Schristos tcache = tcache_get(tsd); 1717*8e33eff8Schristos } 1718*8e33eff8Schristos } else if (dopts->tcache_ind == TCACHE_IND_NONE) { 1719*8e33eff8Schristos tcache = NULL; 1720*8e33eff8Schristos } else { 1721*8e33eff8Schristos tcache = tcaches_get(tsd, dopts->tcache_ind); 1722*8e33eff8Schristos } 1723*8e33eff8Schristos 1724*8e33eff8Schristos /* Fill in the arena. */ 1725*8e33eff8Schristos if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { 1726*8e33eff8Schristos /* 1727*8e33eff8Schristos * In case of automatic arena management, we defer arena 1728*8e33eff8Schristos * computation until as late as we can, hoping to fill the 1729*8e33eff8Schristos * allocation out of the tcache. 1730*8e33eff8Schristos */ 1731*8e33eff8Schristos arena = NULL; 1732*8e33eff8Schristos } else { 1733*8e33eff8Schristos arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); 1734*8e33eff8Schristos } 1735*8e33eff8Schristos 1736*8e33eff8Schristos if (unlikely(dopts->alignment != 0)) { 1737*8e33eff8Schristos return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, 1738*8e33eff8Schristos dopts->zero, tcache, arena); 1739*8e33eff8Schristos } 1740*8e33eff8Schristos 1741*8e33eff8Schristos return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, 1742*8e33eff8Schristos arena, sopts->slow); 1743*8e33eff8Schristos } 1744*8e33eff8Schristos 1745*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void * 1746*8e33eff8Schristos imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1747*8e33eff8Schristos size_t usize, szind_t ind) { 1748*8e33eff8Schristos void *ret; 1749*8e33eff8Schristos 1750*8e33eff8Schristos /* 1751*8e33eff8Schristos * For small allocations, sampling bumps the usize. If so, we allocate 1752*8e33eff8Schristos * from the ind_large bucket. 1753*8e33eff8Schristos */ 1754*8e33eff8Schristos szind_t ind_large; 1755*8e33eff8Schristos size_t bumped_usize = usize; 1756*8e33eff8Schristos 1757*8e33eff8Schristos if (usize <= SMALL_MAXCLASS) { 1758*8e33eff8Schristos assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : 1759*8e33eff8Schristos sz_sa2u(LARGE_MINCLASS, dopts->alignment)) 1760*8e33eff8Schristos == LARGE_MINCLASS); 1761*8e33eff8Schristos ind_large = sz_size2index(LARGE_MINCLASS); 1762*8e33eff8Schristos bumped_usize = sz_s2u(LARGE_MINCLASS); 1763*8e33eff8Schristos ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, 1764*8e33eff8Schristos bumped_usize, ind_large); 1765*8e33eff8Schristos if (unlikely(ret == NULL)) { 1766*8e33eff8Schristos return NULL; 1767*8e33eff8Schristos } 1768*8e33eff8Schristos arena_prof_promote(tsd_tsdn(tsd), ret, usize); 1769*8e33eff8Schristos } else { 1770*8e33eff8Schristos ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); 1771*8e33eff8Schristos } 1772*8e33eff8Schristos 1773*8e33eff8Schristos return ret; 1774*8e33eff8Schristos } 1775*8e33eff8Schristos 1776*8e33eff8Schristos /* 1777*8e33eff8Schristos * Returns true if the allocation will overflow, and false otherwise. Sets 1778*8e33eff8Schristos * *size to the product either way. 1779*8e33eff8Schristos */ 1780*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE bool 1781*8e33eff8Schristos compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, 1782*8e33eff8Schristos size_t *size) { 1783*8e33eff8Schristos /* 1784*8e33eff8Schristos * This function is just num_items * item_size, except that we may have 1785*8e33eff8Schristos * to check for overflow. 1786*8e33eff8Schristos */ 1787*8e33eff8Schristos 1788*8e33eff8Schristos if (!may_overflow) { 1789*8e33eff8Schristos assert(dopts->num_items == 1); 1790*8e33eff8Schristos *size = dopts->item_size; 1791*8e33eff8Schristos return false; 1792*8e33eff8Schristos } 1793*8e33eff8Schristos 1794*8e33eff8Schristos /* A size_t with its high-half bits all set to 1. */ 1795*8e33eff8Schristos static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); 1796*8e33eff8Schristos 1797*8e33eff8Schristos *size = dopts->item_size * dopts->num_items; 1798*8e33eff8Schristos 1799*8e33eff8Schristos if (unlikely(*size == 0)) { 1800*8e33eff8Schristos return (dopts->num_items != 0 && dopts->item_size != 0); 1801*8e33eff8Schristos } 1802*8e33eff8Schristos 1803*8e33eff8Schristos /* 1804*8e33eff8Schristos * We got a non-zero size, but we don't know if we overflowed to get 1805*8e33eff8Schristos * there. To avoid having to do a divide, we'll be clever and note that 1806*8e33eff8Schristos * if both A and B can be represented in N/2 bits, then their product 1807*8e33eff8Schristos * can be represented in N bits (without the possibility of overflow). 1808*8e33eff8Schristos */ 1809*8e33eff8Schristos if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { 1810*8e33eff8Schristos return false; 1811*8e33eff8Schristos } 1812*8e33eff8Schristos if (likely(*size / dopts->item_size == dopts->num_items)) { 1813*8e33eff8Schristos return false; 1814*8e33eff8Schristos } 1815*8e33eff8Schristos return true; 1816*8e33eff8Schristos } 1817*8e33eff8Schristos 1818*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE int 1819*8e33eff8Schristos imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { 1820*8e33eff8Schristos /* Where the actual allocated memory will live. */ 1821*8e33eff8Schristos void *allocation = NULL; 1822*8e33eff8Schristos /* Filled in by compute_size_with_overflow below. */ 1823*8e33eff8Schristos size_t size = 0; 1824*8e33eff8Schristos /* 1825*8e33eff8Schristos * For unaligned allocations, we need only ind. For aligned 1826*8e33eff8Schristos * allocations, or in case of stats or profiling we need usize. 1827*8e33eff8Schristos * 1828*8e33eff8Schristos * These are actually dead stores, in that their values are reset before 1829*8e33eff8Schristos * any branch on their value is taken. Sometimes though, it's 1830*8e33eff8Schristos * convenient to pass them as arguments before this point. To avoid 1831*8e33eff8Schristos * undefined behavior then, we initialize them with dummy stores. 1832*8e33eff8Schristos */ 1833*8e33eff8Schristos szind_t ind = 0; 1834*8e33eff8Schristos size_t usize = 0; 1835*8e33eff8Schristos 1836*8e33eff8Schristos /* Reentrancy is only checked on slow path. */ 1837*8e33eff8Schristos int8_t reentrancy_level; 1838*8e33eff8Schristos 1839*8e33eff8Schristos /* Compute the amount of memory the user wants. */ 1840*8e33eff8Schristos if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, 1841*8e33eff8Schristos &size))) { 1842*8e33eff8Schristos goto label_oom; 1843*8e33eff8Schristos } 1844*8e33eff8Schristos 1845*8e33eff8Schristos /* Validate the user input. */ 1846*8e33eff8Schristos if (sopts->bump_empty_alloc) { 1847*8e33eff8Schristos if (unlikely(size == 0)) { 1848*8e33eff8Schristos size = 1; 1849*8e33eff8Schristos } 1850*8e33eff8Schristos } 1851*8e33eff8Schristos 1852*8e33eff8Schristos if (sopts->assert_nonempty_alloc) { 1853*8e33eff8Schristos assert (size != 0); 1854*8e33eff8Schristos } 1855*8e33eff8Schristos 1856*8e33eff8Schristos if (unlikely(dopts->alignment < sopts->min_alignment 1857*8e33eff8Schristos || (dopts->alignment & (dopts->alignment - 1)) != 0)) { 1858*8e33eff8Schristos goto label_invalid_alignment; 1859*8e33eff8Schristos } 1860*8e33eff8Schristos 1861*8e33eff8Schristos /* This is the beginning of the "core" algorithm. */ 1862*8e33eff8Schristos 1863*8e33eff8Schristos if (dopts->alignment == 0) { 1864*8e33eff8Schristos ind = sz_size2index(size); 1865*8e33eff8Schristos if (unlikely(ind >= NSIZES)) { 1866*8e33eff8Schristos goto label_oom; 1867*8e33eff8Schristos } 1868*8e33eff8Schristos if (config_stats || (config_prof && opt_prof)) { 1869*8e33eff8Schristos usize = sz_index2size(ind); 1870*8e33eff8Schristos assert(usize > 0 && usize <= LARGE_MAXCLASS); 1871*8e33eff8Schristos } 1872*8e33eff8Schristos } else { 1873*8e33eff8Schristos usize = sz_sa2u(size, dopts->alignment); 1874*8e33eff8Schristos if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1875*8e33eff8Schristos goto label_oom; 1876*8e33eff8Schristos } 1877*8e33eff8Schristos } 1878*8e33eff8Schristos 1879*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 1880*8e33eff8Schristos 1881*8e33eff8Schristos /* 1882*8e33eff8Schristos * If we need to handle reentrancy, we can do it out of a 1883*8e33eff8Schristos * known-initialized arena (i.e. arena 0). 1884*8e33eff8Schristos */ 1885*8e33eff8Schristos reentrancy_level = tsd_reentrancy_level_get(tsd); 1886*8e33eff8Schristos if (sopts->slow && unlikely(reentrancy_level > 0)) { 1887*8e33eff8Schristos /* 1888*8e33eff8Schristos * We should never specify particular arenas or tcaches from 1889*8e33eff8Schristos * within our internal allocations. 1890*8e33eff8Schristos */ 1891*8e33eff8Schristos assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || 1892*8e33eff8Schristos dopts->tcache_ind == TCACHE_IND_NONE); 1893*8e33eff8Schristos assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); 1894*8e33eff8Schristos dopts->tcache_ind = TCACHE_IND_NONE; 1895*8e33eff8Schristos /* We know that arena 0 has already been initialized. */ 1896*8e33eff8Schristos dopts->arena_ind = 0; 1897*8e33eff8Schristos } 1898*8e33eff8Schristos 1899*8e33eff8Schristos /* If profiling is on, get our profiling context. */ 1900*8e33eff8Schristos if (config_prof && opt_prof) { 1901*8e33eff8Schristos /* 1902*8e33eff8Schristos * Note that if we're going down this path, usize must have been 1903*8e33eff8Schristos * initialized in the previous if statement. 1904*8e33eff8Schristos */ 1905*8e33eff8Schristos prof_tctx_t *tctx = prof_alloc_prep( 1906*8e33eff8Schristos tsd, usize, prof_active_get_unlocked(), true); 1907*8e33eff8Schristos 1908*8e33eff8Schristos alloc_ctx_t alloc_ctx; 1909*8e33eff8Schristos if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 1910*8e33eff8Schristos alloc_ctx.slab = (usize <= SMALL_MAXCLASS); 1911*8e33eff8Schristos allocation = imalloc_no_sample( 1912*8e33eff8Schristos sopts, dopts, tsd, usize, usize, ind); 1913*8e33eff8Schristos } else if ((uintptr_t)tctx > (uintptr_t)1U) { 1914*8e33eff8Schristos /* 1915*8e33eff8Schristos * Note that ind might still be 0 here. This is fine; 1916*8e33eff8Schristos * imalloc_sample ignores ind if dopts->alignment > 0. 1917*8e33eff8Schristos */ 1918*8e33eff8Schristos allocation = imalloc_sample( 1919*8e33eff8Schristos sopts, dopts, tsd, usize, ind); 1920*8e33eff8Schristos alloc_ctx.slab = false; 1921*8e33eff8Schristos } else { 1922*8e33eff8Schristos allocation = NULL; 1923*8e33eff8Schristos } 1924*8e33eff8Schristos 1925*8e33eff8Schristos if (unlikely(allocation == NULL)) { 1926*8e33eff8Schristos prof_alloc_rollback(tsd, tctx, true); 1927*8e33eff8Schristos goto label_oom; 1928*8e33eff8Schristos } 1929*8e33eff8Schristos prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); 1930*8e33eff8Schristos } else { 1931*8e33eff8Schristos /* 1932*8e33eff8Schristos * If dopts->alignment > 0, then ind is still 0, but usize was 1933*8e33eff8Schristos * computed in the previous if statement. Down the positive 1934*8e33eff8Schristos * alignment path, imalloc_no_sample ignores ind and size 1935*8e33eff8Schristos * (relying only on usize). 1936*8e33eff8Schristos */ 1937*8e33eff8Schristos allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, 1938*8e33eff8Schristos ind); 1939*8e33eff8Schristos if (unlikely(allocation == NULL)) { 1940*8e33eff8Schristos goto label_oom; 1941*8e33eff8Schristos } 1942*8e33eff8Schristos } 1943*8e33eff8Schristos 1944*8e33eff8Schristos /* 1945*8e33eff8Schristos * Allocation has been done at this point. We still have some 1946*8e33eff8Schristos * post-allocation work to do though. 1947*8e33eff8Schristos */ 1948*8e33eff8Schristos assert(dopts->alignment == 0 1949*8e33eff8Schristos || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); 1950*8e33eff8Schristos 1951*8e33eff8Schristos if (config_stats) { 1952*8e33eff8Schristos assert(usize == isalloc(tsd_tsdn(tsd), allocation)); 1953*8e33eff8Schristos *tsd_thread_allocatedp_get(tsd) += usize; 1954*8e33eff8Schristos } 1955*8e33eff8Schristos 1956*8e33eff8Schristos if (sopts->slow) { 1957*8e33eff8Schristos UTRACE(0, size, allocation); 1958*8e33eff8Schristos } 1959*8e33eff8Schristos 1960*8e33eff8Schristos /* Success! */ 1961*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 1962*8e33eff8Schristos *dopts->result = allocation; 1963*8e33eff8Schristos return 0; 1964*8e33eff8Schristos 1965*8e33eff8Schristos label_oom: 1966*8e33eff8Schristos if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { 1967*8e33eff8Schristos malloc_write(sopts->oom_string); 1968*8e33eff8Schristos abort(); 1969*8e33eff8Schristos } 1970*8e33eff8Schristos 1971*8e33eff8Schristos if (sopts->slow) { 1972*8e33eff8Schristos UTRACE(NULL, size, NULL); 1973*8e33eff8Schristos } 1974*8e33eff8Schristos 1975*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 1976*8e33eff8Schristos 1977*8e33eff8Schristos if (sopts->set_errno_on_error) { 1978*8e33eff8Schristos set_errno(ENOMEM); 1979*8e33eff8Schristos } 1980*8e33eff8Schristos 1981*8e33eff8Schristos if (sopts->null_out_result_on_error) { 1982*8e33eff8Schristos *dopts->result = NULL; 1983*8e33eff8Schristos } 1984*8e33eff8Schristos 1985*8e33eff8Schristos return ENOMEM; 1986*8e33eff8Schristos 1987*8e33eff8Schristos /* 1988*8e33eff8Schristos * This label is only jumped to by one goto; we move it out of line 1989*8e33eff8Schristos * anyways to avoid obscuring the non-error paths, and for symmetry with 1990*8e33eff8Schristos * the oom case. 1991*8e33eff8Schristos */ 1992*8e33eff8Schristos label_invalid_alignment: 1993*8e33eff8Schristos if (config_xmalloc && unlikely(opt_xmalloc)) { 1994*8e33eff8Schristos malloc_write(sopts->invalid_alignment_string); 1995*8e33eff8Schristos abort(); 1996*8e33eff8Schristos } 1997*8e33eff8Schristos 1998*8e33eff8Schristos if (sopts->set_errno_on_error) { 1999*8e33eff8Schristos set_errno(EINVAL); 2000*8e33eff8Schristos } 2001*8e33eff8Schristos 2002*8e33eff8Schristos if (sopts->slow) { 2003*8e33eff8Schristos UTRACE(NULL, size, NULL); 2004*8e33eff8Schristos } 2005*8e33eff8Schristos 2006*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2007*8e33eff8Schristos 2008*8e33eff8Schristos if (sopts->null_out_result_on_error) { 2009*8e33eff8Schristos *dopts->result = NULL; 2010*8e33eff8Schristos } 2011*8e33eff8Schristos 2012*8e33eff8Schristos return EINVAL; 2013*8e33eff8Schristos } 2014*8e33eff8Schristos 2015*8e33eff8Schristos /* Returns the errno-style error code of the allocation. */ 2016*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE int 2017*8e33eff8Schristos imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { 2018*8e33eff8Schristos if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { 2019*8e33eff8Schristos if (config_xmalloc && unlikely(opt_xmalloc)) { 2020*8e33eff8Schristos malloc_write(sopts->oom_string); 2021*8e33eff8Schristos abort(); 2022*8e33eff8Schristos } 2023*8e33eff8Schristos UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); 2024*8e33eff8Schristos set_errno(ENOMEM); 2025*8e33eff8Schristos *dopts->result = NULL; 2026*8e33eff8Schristos 2027*8e33eff8Schristos return ENOMEM; 2028*8e33eff8Schristos } 2029*8e33eff8Schristos 2030*8e33eff8Schristos /* We always need the tsd. Let's grab it right away. */ 2031*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 2032*8e33eff8Schristos assert(tsd); 2033*8e33eff8Schristos if (likely(tsd_fast(tsd))) { 2034*8e33eff8Schristos /* Fast and common path. */ 2035*8e33eff8Schristos tsd_assert_fast(tsd); 2036*8e33eff8Schristos sopts->slow = false; 2037*8e33eff8Schristos return imalloc_body(sopts, dopts, tsd); 2038*8e33eff8Schristos } else { 2039*8e33eff8Schristos sopts->slow = true; 2040*8e33eff8Schristos return imalloc_body(sopts, dopts, tsd); 2041*8e33eff8Schristos } 2042*8e33eff8Schristos } 2043*8e33eff8Schristos /******************************************************************************/ 2044*8e33eff8Schristos /* 2045*8e33eff8Schristos * Begin malloc(3)-compatible functions. 2046*8e33eff8Schristos */ 2047*8e33eff8Schristos 2048*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2049*8e33eff8Schristos void JEMALLOC_NOTHROW * 2050*8e33eff8Schristos JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2051*8e33eff8Schristos je_malloc(size_t size) { 2052*8e33eff8Schristos void *ret; 2053*8e33eff8Schristos static_opts_t sopts; 2054*8e33eff8Schristos dynamic_opts_t dopts; 2055*8e33eff8Schristos 2056*8e33eff8Schristos LOG("core.malloc.entry", "size: %zu", size); 2057*8e33eff8Schristos 2058*8e33eff8Schristos static_opts_init(&sopts); 2059*8e33eff8Schristos dynamic_opts_init(&dopts); 2060*8e33eff8Schristos 2061*8e33eff8Schristos sopts.bump_empty_alloc = true; 2062*8e33eff8Schristos sopts.null_out_result_on_error = true; 2063*8e33eff8Schristos sopts.set_errno_on_error = true; 2064*8e33eff8Schristos sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; 2065*8e33eff8Schristos 2066*8e33eff8Schristos dopts.result = &ret; 2067*8e33eff8Schristos dopts.num_items = 1; 2068*8e33eff8Schristos dopts.item_size = size; 2069*8e33eff8Schristos 2070*8e33eff8Schristos imalloc(&sopts, &dopts); 2071*8e33eff8Schristos 2072*8e33eff8Schristos LOG("core.malloc.exit", "result: %p", ret); 2073*8e33eff8Schristos 2074*8e33eff8Schristos return ret; 2075*8e33eff8Schristos } 2076*8e33eff8Schristos 2077*8e33eff8Schristos JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2078*8e33eff8Schristos JEMALLOC_ATTR(nonnull(1)) 2079*8e33eff8Schristos je_posix_memalign(void **memptr, size_t alignment, size_t size) { 2080*8e33eff8Schristos int ret; 2081*8e33eff8Schristos static_opts_t sopts; 2082*8e33eff8Schristos dynamic_opts_t dopts; 2083*8e33eff8Schristos 2084*8e33eff8Schristos LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " 2085*8e33eff8Schristos "size: %zu", memptr, alignment, size); 2086*8e33eff8Schristos 2087*8e33eff8Schristos static_opts_init(&sopts); 2088*8e33eff8Schristos dynamic_opts_init(&dopts); 2089*8e33eff8Schristos 2090*8e33eff8Schristos sopts.bump_empty_alloc = true; 2091*8e33eff8Schristos sopts.min_alignment = sizeof(void *); 2092*8e33eff8Schristos sopts.oom_string = 2093*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2094*8e33eff8Schristos sopts.invalid_alignment_string = 2095*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2096*8e33eff8Schristos 2097*8e33eff8Schristos dopts.result = memptr; 2098*8e33eff8Schristos dopts.num_items = 1; 2099*8e33eff8Schristos dopts.item_size = size; 2100*8e33eff8Schristos dopts.alignment = alignment; 2101*8e33eff8Schristos 2102*8e33eff8Schristos ret = imalloc(&sopts, &dopts); 2103*8e33eff8Schristos 2104*8e33eff8Schristos LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, 2105*8e33eff8Schristos *memptr); 2106*8e33eff8Schristos 2107*8e33eff8Schristos return ret; 2108*8e33eff8Schristos } 2109*8e33eff8Schristos 2110*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2111*8e33eff8Schristos void JEMALLOC_NOTHROW * 2112*8e33eff8Schristos JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 2113*8e33eff8Schristos je_aligned_alloc(size_t alignment, size_t size) { 2114*8e33eff8Schristos void *ret; 2115*8e33eff8Schristos 2116*8e33eff8Schristos static_opts_t sopts; 2117*8e33eff8Schristos dynamic_opts_t dopts; 2118*8e33eff8Schristos 2119*8e33eff8Schristos LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", 2120*8e33eff8Schristos alignment, size); 2121*8e33eff8Schristos 2122*8e33eff8Schristos static_opts_init(&sopts); 2123*8e33eff8Schristos dynamic_opts_init(&dopts); 2124*8e33eff8Schristos 2125*8e33eff8Schristos sopts.bump_empty_alloc = true; 2126*8e33eff8Schristos sopts.null_out_result_on_error = true; 2127*8e33eff8Schristos sopts.set_errno_on_error = true; 2128*8e33eff8Schristos sopts.min_alignment = 1; 2129*8e33eff8Schristos sopts.oom_string = 2130*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2131*8e33eff8Schristos sopts.invalid_alignment_string = 2132*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2133*8e33eff8Schristos 2134*8e33eff8Schristos dopts.result = &ret; 2135*8e33eff8Schristos dopts.num_items = 1; 2136*8e33eff8Schristos dopts.item_size = size; 2137*8e33eff8Schristos dopts.alignment = alignment; 2138*8e33eff8Schristos 2139*8e33eff8Schristos imalloc(&sopts, &dopts); 2140*8e33eff8Schristos 2141*8e33eff8Schristos LOG("core.aligned_alloc.exit", "result: %p", ret); 2142*8e33eff8Schristos 2143*8e33eff8Schristos return ret; 2144*8e33eff8Schristos } 2145*8e33eff8Schristos 2146*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2147*8e33eff8Schristos void JEMALLOC_NOTHROW * 2148*8e33eff8Schristos JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 2149*8e33eff8Schristos je_calloc(size_t num, size_t size) { 2150*8e33eff8Schristos void *ret; 2151*8e33eff8Schristos static_opts_t sopts; 2152*8e33eff8Schristos dynamic_opts_t dopts; 2153*8e33eff8Schristos 2154*8e33eff8Schristos LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); 2155*8e33eff8Schristos 2156*8e33eff8Schristos static_opts_init(&sopts); 2157*8e33eff8Schristos dynamic_opts_init(&dopts); 2158*8e33eff8Schristos 2159*8e33eff8Schristos sopts.may_overflow = true; 2160*8e33eff8Schristos sopts.bump_empty_alloc = true; 2161*8e33eff8Schristos sopts.null_out_result_on_error = true; 2162*8e33eff8Schristos sopts.set_errno_on_error = true; 2163*8e33eff8Schristos sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; 2164*8e33eff8Schristos 2165*8e33eff8Schristos dopts.result = &ret; 2166*8e33eff8Schristos dopts.num_items = num; 2167*8e33eff8Schristos dopts.item_size = size; 2168*8e33eff8Schristos dopts.zero = true; 2169*8e33eff8Schristos 2170*8e33eff8Schristos imalloc(&sopts, &dopts); 2171*8e33eff8Schristos 2172*8e33eff8Schristos LOG("core.calloc.exit", "result: %p", ret); 2173*8e33eff8Schristos 2174*8e33eff8Schristos return ret; 2175*8e33eff8Schristos } 2176*8e33eff8Schristos 2177*8e33eff8Schristos static void * 2178*8e33eff8Schristos irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2179*8e33eff8Schristos prof_tctx_t *tctx) { 2180*8e33eff8Schristos void *p; 2181*8e33eff8Schristos 2182*8e33eff8Schristos if (tctx == NULL) { 2183*8e33eff8Schristos return NULL; 2184*8e33eff8Schristos } 2185*8e33eff8Schristos if (usize <= SMALL_MAXCLASS) { 2186*8e33eff8Schristos p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 2187*8e33eff8Schristos if (p == NULL) { 2188*8e33eff8Schristos return NULL; 2189*8e33eff8Schristos } 2190*8e33eff8Schristos arena_prof_promote(tsd_tsdn(tsd), p, usize); 2191*8e33eff8Schristos } else { 2192*8e33eff8Schristos p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2193*8e33eff8Schristos } 2194*8e33eff8Schristos 2195*8e33eff8Schristos return p; 2196*8e33eff8Schristos } 2197*8e33eff8Schristos 2198*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void * 2199*8e33eff8Schristos irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2200*8e33eff8Schristos alloc_ctx_t *alloc_ctx) { 2201*8e33eff8Schristos void *p; 2202*8e33eff8Schristos bool prof_activex; 2203*8e33eff8Schristos prof_tctx_t *old_tctx, *tctx; 2204*8e33eff8Schristos 2205*8e33eff8Schristos prof_activex = prof_active_get_unlocked(); 2206*8e33eff8Schristos old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2207*8e33eff8Schristos tctx = prof_alloc_prep(tsd, usize, prof_activex, true); 2208*8e33eff8Schristos if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2209*8e33eff8Schristos p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 2210*8e33eff8Schristos } else { 2211*8e33eff8Schristos p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2212*8e33eff8Schristos } 2213*8e33eff8Schristos if (unlikely(p == NULL)) { 2214*8e33eff8Schristos prof_alloc_rollback(tsd, tctx, true); 2215*8e33eff8Schristos return NULL; 2216*8e33eff8Schristos } 2217*8e33eff8Schristos prof_realloc(tsd, p, usize, tctx, prof_activex, true, old_ptr, 2218*8e33eff8Schristos old_usize, old_tctx); 2219*8e33eff8Schristos 2220*8e33eff8Schristos return p; 2221*8e33eff8Schristos } 2222*8e33eff8Schristos 2223*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void 2224*8e33eff8Schristos ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { 2225*8e33eff8Schristos if (!slow_path) { 2226*8e33eff8Schristos tsd_assert_fast(tsd); 2227*8e33eff8Schristos } 2228*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2229*8e33eff8Schristos if (tsd_reentrancy_level_get(tsd) != 0) { 2230*8e33eff8Schristos assert(slow_path); 2231*8e33eff8Schristos } 2232*8e33eff8Schristos 2233*8e33eff8Schristos assert(ptr != NULL); 2234*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2235*8e33eff8Schristos 2236*8e33eff8Schristos alloc_ctx_t alloc_ctx; 2237*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2238*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2239*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2240*8e33eff8Schristos assert(alloc_ctx.szind != NSIZES); 2241*8e33eff8Schristos 2242*8e33eff8Schristos size_t usize; 2243*8e33eff8Schristos if (config_prof && opt_prof) { 2244*8e33eff8Schristos usize = sz_index2size(alloc_ctx.szind); 2245*8e33eff8Schristos prof_free(tsd, ptr, usize, &alloc_ctx); 2246*8e33eff8Schristos } else if (config_stats) { 2247*8e33eff8Schristos usize = sz_index2size(alloc_ctx.szind); 2248*8e33eff8Schristos } 2249*8e33eff8Schristos if (config_stats) { 2250*8e33eff8Schristos *tsd_thread_deallocatedp_get(tsd) += usize; 2251*8e33eff8Schristos } 2252*8e33eff8Schristos 2253*8e33eff8Schristos if (likely(!slow_path)) { 2254*8e33eff8Schristos idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2255*8e33eff8Schristos false); 2256*8e33eff8Schristos } else { 2257*8e33eff8Schristos idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2258*8e33eff8Schristos true); 2259*8e33eff8Schristos } 2260*8e33eff8Schristos } 2261*8e33eff8Schristos 2262*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void 2263*8e33eff8Schristos isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { 2264*8e33eff8Schristos if (!slow_path) { 2265*8e33eff8Schristos tsd_assert_fast(tsd); 2266*8e33eff8Schristos } 2267*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2268*8e33eff8Schristos if (tsd_reentrancy_level_get(tsd) != 0) { 2269*8e33eff8Schristos assert(slow_path); 2270*8e33eff8Schristos } 2271*8e33eff8Schristos 2272*8e33eff8Schristos assert(ptr != NULL); 2273*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2274*8e33eff8Schristos 2275*8e33eff8Schristos alloc_ctx_t alloc_ctx, *ctx; 2276*8e33eff8Schristos if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { 2277*8e33eff8Schristos /* 2278*8e33eff8Schristos * When cache_oblivious is disabled and ptr is not page aligned, 2279*8e33eff8Schristos * the allocation was not sampled -- usize can be used to 2280*8e33eff8Schristos * determine szind directly. 2281*8e33eff8Schristos */ 2282*8e33eff8Schristos alloc_ctx.szind = sz_size2index(usize); 2283*8e33eff8Schristos alloc_ctx.slab = true; 2284*8e33eff8Schristos ctx = &alloc_ctx; 2285*8e33eff8Schristos if (config_debug) { 2286*8e33eff8Schristos alloc_ctx_t dbg_ctx; 2287*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2288*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, 2289*8e33eff8Schristos rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, 2290*8e33eff8Schristos &dbg_ctx.slab); 2291*8e33eff8Schristos assert(dbg_ctx.szind == alloc_ctx.szind); 2292*8e33eff8Schristos assert(dbg_ctx.slab == alloc_ctx.slab); 2293*8e33eff8Schristos } 2294*8e33eff8Schristos } else if (config_prof && opt_prof) { 2295*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2296*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2297*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2298*8e33eff8Schristos assert(alloc_ctx.szind == sz_size2index(usize)); 2299*8e33eff8Schristos ctx = &alloc_ctx; 2300*8e33eff8Schristos } else { 2301*8e33eff8Schristos ctx = NULL; 2302*8e33eff8Schristos } 2303*8e33eff8Schristos 2304*8e33eff8Schristos if (config_prof && opt_prof) { 2305*8e33eff8Schristos prof_free(tsd, ptr, usize, ctx); 2306*8e33eff8Schristos } 2307*8e33eff8Schristos if (config_stats) { 2308*8e33eff8Schristos *tsd_thread_deallocatedp_get(tsd) += usize; 2309*8e33eff8Schristos } 2310*8e33eff8Schristos 2311*8e33eff8Schristos if (likely(!slow_path)) { 2312*8e33eff8Schristos isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); 2313*8e33eff8Schristos } else { 2314*8e33eff8Schristos isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); 2315*8e33eff8Schristos } 2316*8e33eff8Schristos } 2317*8e33eff8Schristos 2318*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2319*8e33eff8Schristos void JEMALLOC_NOTHROW * 2320*8e33eff8Schristos JEMALLOC_ALLOC_SIZE(2) 2321*8e33eff8Schristos je_realloc(void *ptr, size_t size) { 2322*8e33eff8Schristos void *ret; 2323*8e33eff8Schristos tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 2324*8e33eff8Schristos size_t usize JEMALLOC_CC_SILENCE_INIT(0); 2325*8e33eff8Schristos size_t old_usize = 0; 2326*8e33eff8Schristos 2327*8e33eff8Schristos LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); 2328*8e33eff8Schristos 2329*8e33eff8Schristos if (unlikely(size == 0)) { 2330*8e33eff8Schristos #if 0 2331*8e33eff8Schristos // http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_400 2332*8e33eff8Schristos if (ptr != NULL) { 2333*8e33eff8Schristos /* realloc(ptr, 0) is equivalent to free(ptr). */ 2334*8e33eff8Schristos UTRACE(ptr, 0, 0); 2335*8e33eff8Schristos tcache_t *tcache; 2336*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 2337*8e33eff8Schristos if (tsd_reentrancy_level_get(tsd) == 0) { 2338*8e33eff8Schristos tcache = tcache_get(tsd); 2339*8e33eff8Schristos } else { 2340*8e33eff8Schristos tcache = NULL; 2341*8e33eff8Schristos } 2342*8e33eff8Schristos ifree(tsd, ptr, tcache, true); 2343*8e33eff8Schristos 2344*8e33eff8Schristos LOG("core.realloc.exit", "result: %p", NULL); 2345*8e33eff8Schristos return NULL; 2346*8e33eff8Schristos } 2347*8e33eff8Schristos #endif 2348*8e33eff8Schristos size = 1; 2349*8e33eff8Schristos } 2350*8e33eff8Schristos 2351*8e33eff8Schristos if (likely(ptr != NULL)) { 2352*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2353*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 2354*8e33eff8Schristos 2355*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2356*8e33eff8Schristos 2357*8e33eff8Schristos alloc_ctx_t alloc_ctx; 2358*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2359*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2360*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2361*8e33eff8Schristos assert(alloc_ctx.szind != NSIZES); 2362*8e33eff8Schristos old_usize = sz_index2size(alloc_ctx.szind); 2363*8e33eff8Schristos assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2364*8e33eff8Schristos if (config_prof && opt_prof) { 2365*8e33eff8Schristos usize = sz_s2u(size); 2366*8e33eff8Schristos ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? 2367*8e33eff8Schristos NULL : irealloc_prof(tsd, ptr, old_usize, usize, 2368*8e33eff8Schristos &alloc_ctx); 2369*8e33eff8Schristos } else { 2370*8e33eff8Schristos if (config_stats) { 2371*8e33eff8Schristos usize = sz_s2u(size); 2372*8e33eff8Schristos } 2373*8e33eff8Schristos ret = iralloc(tsd, ptr, old_usize, size, 0, false); 2374*8e33eff8Schristos } 2375*8e33eff8Schristos tsdn = tsd_tsdn(tsd); 2376*8e33eff8Schristos } else { 2377*8e33eff8Schristos /* realloc(NULL, size) is equivalent to malloc(size). */ 2378*8e33eff8Schristos void *ret1 = je_malloc(size); 2379*8e33eff8Schristos LOG("core.realloc.exit", "result: %p", ret1); 2380*8e33eff8Schristos return ret1; 2381*8e33eff8Schristos } 2382*8e33eff8Schristos 2383*8e33eff8Schristos if (unlikely(ret == NULL)) { 2384*8e33eff8Schristos if (config_xmalloc && unlikely(opt_xmalloc)) { 2385*8e33eff8Schristos malloc_write("<jemalloc>: Error in realloc(): " 2386*8e33eff8Schristos "out of memory\n"); 2387*8e33eff8Schristos abort(); 2388*8e33eff8Schristos } 2389*8e33eff8Schristos set_errno(ENOMEM); 2390*8e33eff8Schristos } 2391*8e33eff8Schristos if (config_stats && likely(ret != NULL)) { 2392*8e33eff8Schristos tsd_t *tsd; 2393*8e33eff8Schristos 2394*8e33eff8Schristos assert(usize == isalloc(tsdn, ret)); 2395*8e33eff8Schristos tsd = tsdn_tsd(tsdn); 2396*8e33eff8Schristos *tsd_thread_allocatedp_get(tsd) += usize; 2397*8e33eff8Schristos *tsd_thread_deallocatedp_get(tsd) += old_usize; 2398*8e33eff8Schristos } 2399*8e33eff8Schristos UTRACE(ptr, size, ret); 2400*8e33eff8Schristos check_entry_exit_locking(tsdn); 2401*8e33eff8Schristos 2402*8e33eff8Schristos LOG("core.realloc.exit", "result: %p", ret); 2403*8e33eff8Schristos return ret; 2404*8e33eff8Schristos } 2405*8e33eff8Schristos 2406*8e33eff8Schristos JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2407*8e33eff8Schristos je_free(void *ptr) { 2408*8e33eff8Schristos LOG("core.free.entry", "ptr: %p", ptr); 2409*8e33eff8Schristos 2410*8e33eff8Schristos UTRACE(ptr, 0, 0); 2411*8e33eff8Schristos if (likely(ptr != NULL)) { 2412*8e33eff8Schristos /* 2413*8e33eff8Schristos * We avoid setting up tsd fully (e.g. tcache, arena binding) 2414*8e33eff8Schristos * based on only free() calls -- other activities trigger the 2415*8e33eff8Schristos * minimal to full transition. This is because free() may 2416*8e33eff8Schristos * happen during thread shutdown after tls deallocation: if a 2417*8e33eff8Schristos * thread never had any malloc activities until then, a 2418*8e33eff8Schristos * fully-setup tsd won't be destructed properly. 2419*8e33eff8Schristos */ 2420*8e33eff8Schristos tsd_t *tsd = tsd_fetch_min(); 2421*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2422*8e33eff8Schristos 2423*8e33eff8Schristos tcache_t *tcache; 2424*8e33eff8Schristos if (likely(tsd_fast(tsd))) { 2425*8e33eff8Schristos tsd_assert_fast(tsd); 2426*8e33eff8Schristos /* Unconditionally get tcache ptr on fast path. */ 2427*8e33eff8Schristos tcache = tsd_tcachep_get(tsd); 2428*8e33eff8Schristos ifree(tsd, ptr, tcache, false); 2429*8e33eff8Schristos } else { 2430*8e33eff8Schristos if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2431*8e33eff8Schristos tcache = tcache_get(tsd); 2432*8e33eff8Schristos } else { 2433*8e33eff8Schristos tcache = NULL; 2434*8e33eff8Schristos } 2435*8e33eff8Schristos ifree(tsd, ptr, tcache, true); 2436*8e33eff8Schristos } 2437*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2438*8e33eff8Schristos } 2439*8e33eff8Schristos LOG("core.free.exit", ""); 2440*8e33eff8Schristos } 2441*8e33eff8Schristos 2442*8e33eff8Schristos /* 2443*8e33eff8Schristos * End malloc(3)-compatible functions. 2444*8e33eff8Schristos */ 2445*8e33eff8Schristos /******************************************************************************/ 2446*8e33eff8Schristos /* 2447*8e33eff8Schristos * Begin non-standard override functions. 2448*8e33eff8Schristos */ 2449*8e33eff8Schristos 2450*8e33eff8Schristos #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2451*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2452*8e33eff8Schristos void JEMALLOC_NOTHROW * 2453*8e33eff8Schristos JEMALLOC_ATTR(malloc) 2454*8e33eff8Schristos je_memalign(size_t alignment, size_t size) { 2455*8e33eff8Schristos void *ret; 2456*8e33eff8Schristos static_opts_t sopts; 2457*8e33eff8Schristos dynamic_opts_t dopts; 2458*8e33eff8Schristos 2459*8e33eff8Schristos LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, 2460*8e33eff8Schristos size); 2461*8e33eff8Schristos 2462*8e33eff8Schristos static_opts_init(&sopts); 2463*8e33eff8Schristos dynamic_opts_init(&dopts); 2464*8e33eff8Schristos 2465*8e33eff8Schristos sopts.bump_empty_alloc = true; 2466*8e33eff8Schristos sopts.min_alignment = 1; 2467*8e33eff8Schristos sopts.oom_string = 2468*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2469*8e33eff8Schristos sopts.invalid_alignment_string = 2470*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2471*8e33eff8Schristos sopts.null_out_result_on_error = true; 2472*8e33eff8Schristos 2473*8e33eff8Schristos dopts.result = &ret; 2474*8e33eff8Schristos dopts.num_items = 1; 2475*8e33eff8Schristos dopts.item_size = size; 2476*8e33eff8Schristos dopts.alignment = alignment; 2477*8e33eff8Schristos 2478*8e33eff8Schristos imalloc(&sopts, &dopts); 2479*8e33eff8Schristos 2480*8e33eff8Schristos LOG("core.memalign.exit", "result: %p", ret); 2481*8e33eff8Schristos return ret; 2482*8e33eff8Schristos } 2483*8e33eff8Schristos #endif 2484*8e33eff8Schristos 2485*8e33eff8Schristos #ifdef JEMALLOC_OVERRIDE_VALLOC 2486*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2487*8e33eff8Schristos void JEMALLOC_NOTHROW * 2488*8e33eff8Schristos JEMALLOC_ATTR(malloc) 2489*8e33eff8Schristos je_valloc(size_t size) { 2490*8e33eff8Schristos void *ret; 2491*8e33eff8Schristos 2492*8e33eff8Schristos static_opts_t sopts; 2493*8e33eff8Schristos dynamic_opts_t dopts; 2494*8e33eff8Schristos 2495*8e33eff8Schristos LOG("core.valloc.entry", "size: %zu\n", size); 2496*8e33eff8Schristos 2497*8e33eff8Schristos static_opts_init(&sopts); 2498*8e33eff8Schristos dynamic_opts_init(&dopts); 2499*8e33eff8Schristos 2500*8e33eff8Schristos sopts.bump_empty_alloc = true; 2501*8e33eff8Schristos sopts.null_out_result_on_error = true; 2502*8e33eff8Schristos sopts.min_alignment = PAGE; 2503*8e33eff8Schristos sopts.oom_string = 2504*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2505*8e33eff8Schristos sopts.invalid_alignment_string = 2506*8e33eff8Schristos "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2507*8e33eff8Schristos 2508*8e33eff8Schristos dopts.result = &ret; 2509*8e33eff8Schristos dopts.num_items = 1; 2510*8e33eff8Schristos dopts.item_size = size; 2511*8e33eff8Schristos dopts.alignment = PAGE; 2512*8e33eff8Schristos 2513*8e33eff8Schristos imalloc(&sopts, &dopts); 2514*8e33eff8Schristos 2515*8e33eff8Schristos LOG("core.valloc.exit", "result: %p\n", ret); 2516*8e33eff8Schristos return ret; 2517*8e33eff8Schristos } 2518*8e33eff8Schristos #endif 2519*8e33eff8Schristos 2520*8e33eff8Schristos #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) 2521*8e33eff8Schristos /* 2522*8e33eff8Schristos * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2523*8e33eff8Schristos * to inconsistently reference libc's malloc(3)-compatible functions 2524*8e33eff8Schristos * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2525*8e33eff8Schristos * 2526*8e33eff8Schristos * These definitions interpose hooks in glibc. The functions are actually 2527*8e33eff8Schristos * passed an extra argument for the caller return address, which will be 2528*8e33eff8Schristos * ignored. 2529*8e33eff8Schristos */ 2530*8e33eff8Schristos JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 2531*8e33eff8Schristos JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 2532*8e33eff8Schristos JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2533*8e33eff8Schristos # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 2534*8e33eff8Schristos JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2535*8e33eff8Schristos je_memalign; 2536*8e33eff8Schristos # endif 2537*8e33eff8Schristos 2538*8e33eff8Schristos # ifdef CPU_COUNT 2539*8e33eff8Schristos /* 2540*8e33eff8Schristos * To enable static linking with glibc, the libc specific malloc interface must 2541*8e33eff8Schristos * be implemented also, so none of glibc's malloc.o functions are added to the 2542*8e33eff8Schristos * link. 2543*8e33eff8Schristos */ 2544*8e33eff8Schristos # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2545*8e33eff8Schristos /* To force macro expansion of je_ prefix before stringification. */ 2546*8e33eff8Schristos # define PREALIAS(je_fn) ALIAS(je_fn) 2547*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC 2548*8e33eff8Schristos void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2549*8e33eff8Schristos # endif 2550*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_FREE 2551*8e33eff8Schristos void __libc_free(void* ptr) PREALIAS(je_free); 2552*8e33eff8Schristos # endif 2553*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC 2554*8e33eff8Schristos void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2555*8e33eff8Schristos # endif 2556*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN 2557*8e33eff8Schristos void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2558*8e33eff8Schristos # endif 2559*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC 2560*8e33eff8Schristos void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2561*8e33eff8Schristos # endif 2562*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC 2563*8e33eff8Schristos void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2564*8e33eff8Schristos # endif 2565*8e33eff8Schristos # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN 2566*8e33eff8Schristos int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); 2567*8e33eff8Schristos # endif 2568*8e33eff8Schristos # undef PREALIAS 2569*8e33eff8Schristos # undef ALIAS 2570*8e33eff8Schristos # endif 2571*8e33eff8Schristos #endif 2572*8e33eff8Schristos 2573*8e33eff8Schristos /* 2574*8e33eff8Schristos * End non-standard override functions. 2575*8e33eff8Schristos */ 2576*8e33eff8Schristos /******************************************************************************/ 2577*8e33eff8Schristos /* 2578*8e33eff8Schristos * Begin non-standard functions. 2579*8e33eff8Schristos */ 2580*8e33eff8Schristos 2581*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2582*8e33eff8Schristos void JEMALLOC_NOTHROW * 2583*8e33eff8Schristos JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2584*8e33eff8Schristos je_mallocx(size_t size, int flags) { 2585*8e33eff8Schristos void *ret; 2586*8e33eff8Schristos static_opts_t sopts; 2587*8e33eff8Schristos dynamic_opts_t dopts; 2588*8e33eff8Schristos 2589*8e33eff8Schristos LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); 2590*8e33eff8Schristos 2591*8e33eff8Schristos static_opts_init(&sopts); 2592*8e33eff8Schristos dynamic_opts_init(&dopts); 2593*8e33eff8Schristos 2594*8e33eff8Schristos sopts.assert_nonempty_alloc = true; 2595*8e33eff8Schristos sopts.null_out_result_on_error = true; 2596*8e33eff8Schristos sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; 2597*8e33eff8Schristos 2598*8e33eff8Schristos dopts.result = &ret; 2599*8e33eff8Schristos dopts.num_items = 1; 2600*8e33eff8Schristos dopts.item_size = size; 2601*8e33eff8Schristos if (unlikely(flags != 0)) { 2602*8e33eff8Schristos if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { 2603*8e33eff8Schristos dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2604*8e33eff8Schristos } 2605*8e33eff8Schristos 2606*8e33eff8Schristos dopts.zero = MALLOCX_ZERO_GET(flags); 2607*8e33eff8Schristos 2608*8e33eff8Schristos if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2609*8e33eff8Schristos if ((flags & MALLOCX_TCACHE_MASK) 2610*8e33eff8Schristos == MALLOCX_TCACHE_NONE) { 2611*8e33eff8Schristos dopts.tcache_ind = TCACHE_IND_NONE; 2612*8e33eff8Schristos } else { 2613*8e33eff8Schristos dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); 2614*8e33eff8Schristos } 2615*8e33eff8Schristos } else { 2616*8e33eff8Schristos dopts.tcache_ind = TCACHE_IND_AUTOMATIC; 2617*8e33eff8Schristos } 2618*8e33eff8Schristos 2619*8e33eff8Schristos if ((flags & MALLOCX_ARENA_MASK) != 0) 2620*8e33eff8Schristos dopts.arena_ind = MALLOCX_ARENA_GET(flags); 2621*8e33eff8Schristos } 2622*8e33eff8Schristos 2623*8e33eff8Schristos imalloc(&sopts, &dopts); 2624*8e33eff8Schristos 2625*8e33eff8Schristos LOG("core.mallocx.exit", "result: %p", ret); 2626*8e33eff8Schristos return ret; 2627*8e33eff8Schristos } 2628*8e33eff8Schristos 2629*8e33eff8Schristos static void * 2630*8e33eff8Schristos irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, 2631*8e33eff8Schristos size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2632*8e33eff8Schristos prof_tctx_t *tctx) { 2633*8e33eff8Schristos void *p; 2634*8e33eff8Schristos 2635*8e33eff8Schristos if (tctx == NULL) { 2636*8e33eff8Schristos return NULL; 2637*8e33eff8Schristos } 2638*8e33eff8Schristos if (usize <= SMALL_MAXCLASS) { 2639*8e33eff8Schristos p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, 2640*8e33eff8Schristos alignment, zero, tcache, arena); 2641*8e33eff8Schristos if (p == NULL) { 2642*8e33eff8Schristos return NULL; 2643*8e33eff8Schristos } 2644*8e33eff8Schristos arena_prof_promote(tsdn, p, usize); 2645*8e33eff8Schristos } else { 2646*8e33eff8Schristos p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, 2647*8e33eff8Schristos tcache, arena); 2648*8e33eff8Schristos } 2649*8e33eff8Schristos 2650*8e33eff8Schristos return p; 2651*8e33eff8Schristos } 2652*8e33eff8Schristos 2653*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void * 2654*8e33eff8Schristos irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2655*8e33eff8Schristos size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2656*8e33eff8Schristos arena_t *arena, alloc_ctx_t *alloc_ctx) { 2657*8e33eff8Schristos void *p; 2658*8e33eff8Schristos bool prof_activex; 2659*8e33eff8Schristos prof_tctx_t *old_tctx, *tctx; 2660*8e33eff8Schristos 2661*8e33eff8Schristos prof_activex = prof_active_get_unlocked(); 2662*8e33eff8Schristos old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2663*8e33eff8Schristos tctx = prof_alloc_prep(tsd, *usize, prof_activex, false); 2664*8e33eff8Schristos if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2665*8e33eff8Schristos p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, 2666*8e33eff8Schristos *usize, alignment, zero, tcache, arena, tctx); 2667*8e33eff8Schristos } else { 2668*8e33eff8Schristos p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, 2669*8e33eff8Schristos zero, tcache, arena); 2670*8e33eff8Schristos } 2671*8e33eff8Schristos if (unlikely(p == NULL)) { 2672*8e33eff8Schristos prof_alloc_rollback(tsd, tctx, false); 2673*8e33eff8Schristos return NULL; 2674*8e33eff8Schristos } 2675*8e33eff8Schristos 2676*8e33eff8Schristos if (p == old_ptr && alignment != 0) { 2677*8e33eff8Schristos /* 2678*8e33eff8Schristos * The allocation did not move, so it is possible that the size 2679*8e33eff8Schristos * class is smaller than would guarantee the requested 2680*8e33eff8Schristos * alignment, and that the alignment constraint was 2681*8e33eff8Schristos * serendipitously satisfied. Additionally, old_usize may not 2682*8e33eff8Schristos * be the same as the current usize because of in-place large 2683*8e33eff8Schristos * reallocation. Therefore, query the actual value of usize. 2684*8e33eff8Schristos */ 2685*8e33eff8Schristos *usize = isalloc(tsd_tsdn(tsd), p); 2686*8e33eff8Schristos } 2687*8e33eff8Schristos prof_realloc(tsd, p, *usize, tctx, prof_activex, false, old_ptr, 2688*8e33eff8Schristos old_usize, old_tctx); 2689*8e33eff8Schristos 2690*8e33eff8Schristos return p; 2691*8e33eff8Schristos } 2692*8e33eff8Schristos 2693*8e33eff8Schristos JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2694*8e33eff8Schristos void JEMALLOC_NOTHROW * 2695*8e33eff8Schristos JEMALLOC_ALLOC_SIZE(2) 2696*8e33eff8Schristos je_rallocx(void *ptr, size_t size, int flags) { 2697*8e33eff8Schristos void *p; 2698*8e33eff8Schristos tsd_t *tsd; 2699*8e33eff8Schristos size_t usize; 2700*8e33eff8Schristos size_t old_usize; 2701*8e33eff8Schristos size_t alignment = MALLOCX_ALIGN_GET(flags); 2702*8e33eff8Schristos bool zero = flags & MALLOCX_ZERO; 2703*8e33eff8Schristos arena_t *arena; 2704*8e33eff8Schristos tcache_t *tcache; 2705*8e33eff8Schristos 2706*8e33eff8Schristos LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 2707*8e33eff8Schristos size, flags); 2708*8e33eff8Schristos 2709*8e33eff8Schristos 2710*8e33eff8Schristos assert(ptr != NULL); 2711*8e33eff8Schristos assert(size != 0); 2712*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2713*8e33eff8Schristos tsd = tsd_fetch(); 2714*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2715*8e33eff8Schristos 2716*8e33eff8Schristos if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2717*8e33eff8Schristos unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2718*8e33eff8Schristos arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2719*8e33eff8Schristos if (unlikely(arena == NULL)) { 2720*8e33eff8Schristos goto label_oom; 2721*8e33eff8Schristos } 2722*8e33eff8Schristos } else { 2723*8e33eff8Schristos arena = NULL; 2724*8e33eff8Schristos } 2725*8e33eff8Schristos 2726*8e33eff8Schristos if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2727*8e33eff8Schristos if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2728*8e33eff8Schristos tcache = NULL; 2729*8e33eff8Schristos } else { 2730*8e33eff8Schristos tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2731*8e33eff8Schristos } 2732*8e33eff8Schristos } else { 2733*8e33eff8Schristos tcache = tcache_get(tsd); 2734*8e33eff8Schristos } 2735*8e33eff8Schristos 2736*8e33eff8Schristos alloc_ctx_t alloc_ctx; 2737*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2738*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2739*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2740*8e33eff8Schristos assert(alloc_ctx.szind != NSIZES); 2741*8e33eff8Schristos old_usize = sz_index2size(alloc_ctx.szind); 2742*8e33eff8Schristos assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2743*8e33eff8Schristos if (config_prof && opt_prof) { 2744*8e33eff8Schristos usize = (alignment == 0) ? 2745*8e33eff8Schristos sz_s2u(size) : sz_sa2u(size, alignment); 2746*8e33eff8Schristos if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 2747*8e33eff8Schristos goto label_oom; 2748*8e33eff8Schristos } 2749*8e33eff8Schristos p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2750*8e33eff8Schristos zero, tcache, arena, &alloc_ctx); 2751*8e33eff8Schristos if (unlikely(p == NULL)) { 2752*8e33eff8Schristos goto label_oom; 2753*8e33eff8Schristos } 2754*8e33eff8Schristos } else { 2755*8e33eff8Schristos p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, 2756*8e33eff8Schristos zero, tcache, arena); 2757*8e33eff8Schristos if (unlikely(p == NULL)) { 2758*8e33eff8Schristos goto label_oom; 2759*8e33eff8Schristos } 2760*8e33eff8Schristos if (config_stats) { 2761*8e33eff8Schristos usize = isalloc(tsd_tsdn(tsd), p); 2762*8e33eff8Schristos } 2763*8e33eff8Schristos } 2764*8e33eff8Schristos assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2765*8e33eff8Schristos 2766*8e33eff8Schristos if (config_stats) { 2767*8e33eff8Schristos *tsd_thread_allocatedp_get(tsd) += usize; 2768*8e33eff8Schristos *tsd_thread_deallocatedp_get(tsd) += old_usize; 2769*8e33eff8Schristos } 2770*8e33eff8Schristos UTRACE(ptr, size, p); 2771*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2772*8e33eff8Schristos 2773*8e33eff8Schristos LOG("core.rallocx.exit", "result: %p", p); 2774*8e33eff8Schristos return p; 2775*8e33eff8Schristos label_oom: 2776*8e33eff8Schristos if (config_xmalloc && unlikely(opt_xmalloc)) { 2777*8e33eff8Schristos malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2778*8e33eff8Schristos abort(); 2779*8e33eff8Schristos } 2780*8e33eff8Schristos UTRACE(ptr, size, 0); 2781*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2782*8e33eff8Schristos 2783*8e33eff8Schristos LOG("core.rallocx.exit", "result: %p", NULL); 2784*8e33eff8Schristos return NULL; 2785*8e33eff8Schristos } 2786*8e33eff8Schristos 2787*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE size_t 2788*8e33eff8Schristos ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2789*8e33eff8Schristos size_t extra, size_t alignment, bool zero) { 2790*8e33eff8Schristos size_t usize; 2791*8e33eff8Schristos 2792*8e33eff8Schristos if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { 2793*8e33eff8Schristos return old_usize; 2794*8e33eff8Schristos } 2795*8e33eff8Schristos usize = isalloc(tsdn, ptr); 2796*8e33eff8Schristos 2797*8e33eff8Schristos return usize; 2798*8e33eff8Schristos } 2799*8e33eff8Schristos 2800*8e33eff8Schristos static size_t 2801*8e33eff8Schristos ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2802*8e33eff8Schristos size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { 2803*8e33eff8Schristos size_t usize; 2804*8e33eff8Schristos 2805*8e33eff8Schristos if (tctx == NULL) { 2806*8e33eff8Schristos return old_usize; 2807*8e33eff8Schristos } 2808*8e33eff8Schristos usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2809*8e33eff8Schristos zero); 2810*8e33eff8Schristos 2811*8e33eff8Schristos return usize; 2812*8e33eff8Schristos } 2813*8e33eff8Schristos 2814*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE size_t 2815*8e33eff8Schristos ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2816*8e33eff8Schristos size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { 2817*8e33eff8Schristos size_t usize_max, usize; 2818*8e33eff8Schristos bool prof_activex; 2819*8e33eff8Schristos prof_tctx_t *old_tctx, *tctx; 2820*8e33eff8Schristos 2821*8e33eff8Schristos prof_activex = prof_active_get_unlocked(); 2822*8e33eff8Schristos old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); 2823*8e33eff8Schristos /* 2824*8e33eff8Schristos * usize isn't knowable before ixalloc() returns when extra is non-zero. 2825*8e33eff8Schristos * Therefore, compute its maximum possible value and use that in 2826*8e33eff8Schristos * prof_alloc_prep() to decide whether to capture a backtrace. 2827*8e33eff8Schristos * prof_realloc() will use the actual usize to decide whether to sample. 2828*8e33eff8Schristos */ 2829*8e33eff8Schristos if (alignment == 0) { 2830*8e33eff8Schristos usize_max = sz_s2u(size+extra); 2831*8e33eff8Schristos assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); 2832*8e33eff8Schristos } else { 2833*8e33eff8Schristos usize_max = sz_sa2u(size+extra, alignment); 2834*8e33eff8Schristos if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { 2835*8e33eff8Schristos /* 2836*8e33eff8Schristos * usize_max is out of range, and chances are that 2837*8e33eff8Schristos * allocation will fail, but use the maximum possible 2838*8e33eff8Schristos * value and carry on with prof_alloc_prep(), just in 2839*8e33eff8Schristos * case allocation succeeds. 2840*8e33eff8Schristos */ 2841*8e33eff8Schristos usize_max = LARGE_MAXCLASS; 2842*8e33eff8Schristos } 2843*8e33eff8Schristos } 2844*8e33eff8Schristos tctx = prof_alloc_prep(tsd, usize_max, prof_activex, false); 2845*8e33eff8Schristos 2846*8e33eff8Schristos if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2847*8e33eff8Schristos usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 2848*8e33eff8Schristos size, extra, alignment, zero, tctx); 2849*8e33eff8Schristos } else { 2850*8e33eff8Schristos usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2851*8e33eff8Schristos extra, alignment, zero); 2852*8e33eff8Schristos } 2853*8e33eff8Schristos if (usize == old_usize) { 2854*8e33eff8Schristos prof_alloc_rollback(tsd, tctx, false); 2855*8e33eff8Schristos return usize; 2856*8e33eff8Schristos } 2857*8e33eff8Schristos prof_realloc(tsd, ptr, usize, tctx, prof_activex, false, ptr, old_usize, 2858*8e33eff8Schristos old_tctx); 2859*8e33eff8Schristos 2860*8e33eff8Schristos return usize; 2861*8e33eff8Schristos } 2862*8e33eff8Schristos 2863*8e33eff8Schristos JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2864*8e33eff8Schristos je_xallocx(void *ptr, size_t size, size_t extra, int flags) { 2865*8e33eff8Schristos tsd_t *tsd; 2866*8e33eff8Schristos size_t usize, old_usize; 2867*8e33eff8Schristos size_t alignment = MALLOCX_ALIGN_GET(flags); 2868*8e33eff8Schristos bool zero = flags & MALLOCX_ZERO; 2869*8e33eff8Schristos 2870*8e33eff8Schristos LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " 2871*8e33eff8Schristos "flags: %d", ptr, size, extra, flags); 2872*8e33eff8Schristos 2873*8e33eff8Schristos assert(ptr != NULL); 2874*8e33eff8Schristos assert(size != 0); 2875*8e33eff8Schristos assert(SIZE_T_MAX - size >= extra); 2876*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2877*8e33eff8Schristos tsd = tsd_fetch(); 2878*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2879*8e33eff8Schristos 2880*8e33eff8Schristos alloc_ctx_t alloc_ctx; 2881*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2882*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2883*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2884*8e33eff8Schristos assert(alloc_ctx.szind != NSIZES); 2885*8e33eff8Schristos old_usize = sz_index2size(alloc_ctx.szind); 2886*8e33eff8Schristos assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2887*8e33eff8Schristos /* 2888*8e33eff8Schristos * The API explicitly absolves itself of protecting against (size + 2889*8e33eff8Schristos * extra) numerical overflow, but we may need to clamp extra to avoid 2890*8e33eff8Schristos * exceeding LARGE_MAXCLASS. 2891*8e33eff8Schristos * 2892*8e33eff8Schristos * Ordinarily, size limit checking is handled deeper down, but here we 2893*8e33eff8Schristos * have to check as part of (size + extra) clamping, since we need the 2894*8e33eff8Schristos * clamped value in the above helper functions. 2895*8e33eff8Schristos */ 2896*8e33eff8Schristos if (unlikely(size > LARGE_MAXCLASS)) { 2897*8e33eff8Schristos usize = old_usize; 2898*8e33eff8Schristos goto label_not_resized; 2899*8e33eff8Schristos } 2900*8e33eff8Schristos if (unlikely(LARGE_MAXCLASS - size < extra)) { 2901*8e33eff8Schristos extra = LARGE_MAXCLASS - size; 2902*8e33eff8Schristos } 2903*8e33eff8Schristos 2904*8e33eff8Schristos if (config_prof && opt_prof) { 2905*8e33eff8Schristos usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2906*8e33eff8Schristos alignment, zero, &alloc_ctx); 2907*8e33eff8Schristos } else { 2908*8e33eff8Schristos usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2909*8e33eff8Schristos extra, alignment, zero); 2910*8e33eff8Schristos } 2911*8e33eff8Schristos if (unlikely(usize == old_usize)) { 2912*8e33eff8Schristos goto label_not_resized; 2913*8e33eff8Schristos } 2914*8e33eff8Schristos 2915*8e33eff8Schristos if (config_stats) { 2916*8e33eff8Schristos *tsd_thread_allocatedp_get(tsd) += usize; 2917*8e33eff8Schristos *tsd_thread_deallocatedp_get(tsd) += old_usize; 2918*8e33eff8Schristos } 2919*8e33eff8Schristos label_not_resized: 2920*8e33eff8Schristos UTRACE(ptr, size, ptr); 2921*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2922*8e33eff8Schristos 2923*8e33eff8Schristos LOG("core.xallocx.exit", "result: %zu", usize); 2924*8e33eff8Schristos return usize; 2925*8e33eff8Schristos } 2926*8e33eff8Schristos 2927*8e33eff8Schristos JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2928*8e33eff8Schristos JEMALLOC_ATTR(pure) 2929*8e33eff8Schristos je_sallocx(const void *ptr, UNUSED int flags) { 2930*8e33eff8Schristos size_t usize; 2931*8e33eff8Schristos tsdn_t *tsdn; 2932*8e33eff8Schristos 2933*8e33eff8Schristos LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2934*8e33eff8Schristos 2935*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2936*8e33eff8Schristos assert(ptr != NULL); 2937*8e33eff8Schristos 2938*8e33eff8Schristos tsdn = tsdn_fetch(); 2939*8e33eff8Schristos check_entry_exit_locking(tsdn); 2940*8e33eff8Schristos 2941*8e33eff8Schristos if (config_debug || force_ivsalloc) { 2942*8e33eff8Schristos usize = ivsalloc(tsdn, ptr); 2943*8e33eff8Schristos assert(force_ivsalloc || usize != 0); 2944*8e33eff8Schristos } else { 2945*8e33eff8Schristos usize = isalloc(tsdn, ptr); 2946*8e33eff8Schristos } 2947*8e33eff8Schristos 2948*8e33eff8Schristos check_entry_exit_locking(tsdn); 2949*8e33eff8Schristos 2950*8e33eff8Schristos LOG("core.sallocx.exit", "result: %zu", usize); 2951*8e33eff8Schristos return usize; 2952*8e33eff8Schristos } 2953*8e33eff8Schristos 2954*8e33eff8Schristos JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2955*8e33eff8Schristos je_dallocx(void *ptr, int flags) { 2956*8e33eff8Schristos LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2957*8e33eff8Schristos 2958*8e33eff8Schristos assert(ptr != NULL); 2959*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 2960*8e33eff8Schristos 2961*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 2962*8e33eff8Schristos bool fast = tsd_fast(tsd); 2963*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2964*8e33eff8Schristos 2965*8e33eff8Schristos tcache_t *tcache; 2966*8e33eff8Schristos if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2967*8e33eff8Schristos /* Not allowed to be reentrant and specify a custom tcache. */ 2968*8e33eff8Schristos assert(tsd_reentrancy_level_get(tsd) == 0); 2969*8e33eff8Schristos if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2970*8e33eff8Schristos tcache = NULL; 2971*8e33eff8Schristos } else { 2972*8e33eff8Schristos tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2973*8e33eff8Schristos } 2974*8e33eff8Schristos } else { 2975*8e33eff8Schristos if (likely(fast)) { 2976*8e33eff8Schristos tcache = tsd_tcachep_get(tsd); 2977*8e33eff8Schristos assert(tcache == tcache_get(tsd)); 2978*8e33eff8Schristos } else { 2979*8e33eff8Schristos if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2980*8e33eff8Schristos tcache = tcache_get(tsd); 2981*8e33eff8Schristos } else { 2982*8e33eff8Schristos tcache = NULL; 2983*8e33eff8Schristos } 2984*8e33eff8Schristos } 2985*8e33eff8Schristos } 2986*8e33eff8Schristos 2987*8e33eff8Schristos UTRACE(ptr, 0, 0); 2988*8e33eff8Schristos if (likely(fast)) { 2989*8e33eff8Schristos tsd_assert_fast(tsd); 2990*8e33eff8Schristos ifree(tsd, ptr, tcache, false); 2991*8e33eff8Schristos } else { 2992*8e33eff8Schristos ifree(tsd, ptr, tcache, true); 2993*8e33eff8Schristos } 2994*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 2995*8e33eff8Schristos 2996*8e33eff8Schristos LOG("core.dallocx.exit", ""); 2997*8e33eff8Schristos } 2998*8e33eff8Schristos 2999*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE size_t 3000*8e33eff8Schristos inallocx(tsdn_t *tsdn, size_t size, int flags) { 3001*8e33eff8Schristos check_entry_exit_locking(tsdn); 3002*8e33eff8Schristos 3003*8e33eff8Schristos size_t usize; 3004*8e33eff8Schristos if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { 3005*8e33eff8Schristos usize = sz_s2u(size); 3006*8e33eff8Schristos } else { 3007*8e33eff8Schristos usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 3008*8e33eff8Schristos } 3009*8e33eff8Schristos check_entry_exit_locking(tsdn); 3010*8e33eff8Schristos return usize; 3011*8e33eff8Schristos } 3012*8e33eff8Schristos 3013*8e33eff8Schristos JEMALLOC_EXPORT void JEMALLOC_NOTHROW 3014*8e33eff8Schristos je_sdallocx(void *ptr, size_t size, int flags) { 3015*8e33eff8Schristos assert(ptr != NULL); 3016*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 3017*8e33eff8Schristos 3018*8e33eff8Schristos LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 3019*8e33eff8Schristos size, flags); 3020*8e33eff8Schristos 3021*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 3022*8e33eff8Schristos bool fast = tsd_fast(tsd); 3023*8e33eff8Schristos size_t usize = inallocx(tsd_tsdn(tsd), size, flags); 3024*8e33eff8Schristos assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 3025*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3026*8e33eff8Schristos 3027*8e33eff8Schristos tcache_t *tcache; 3028*8e33eff8Schristos if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 3029*8e33eff8Schristos /* Not allowed to be reentrant and specify a custom tcache. */ 3030*8e33eff8Schristos assert(tsd_reentrancy_level_get(tsd) == 0); 3031*8e33eff8Schristos if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 3032*8e33eff8Schristos tcache = NULL; 3033*8e33eff8Schristos } else { 3034*8e33eff8Schristos tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 3035*8e33eff8Schristos } 3036*8e33eff8Schristos } else { 3037*8e33eff8Schristos if (likely(fast)) { 3038*8e33eff8Schristos tcache = tsd_tcachep_get(tsd); 3039*8e33eff8Schristos assert(tcache == tcache_get(tsd)); 3040*8e33eff8Schristos } else { 3041*8e33eff8Schristos if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 3042*8e33eff8Schristos tcache = tcache_get(tsd); 3043*8e33eff8Schristos } else { 3044*8e33eff8Schristos tcache = NULL; 3045*8e33eff8Schristos } 3046*8e33eff8Schristos } 3047*8e33eff8Schristos } 3048*8e33eff8Schristos 3049*8e33eff8Schristos UTRACE(ptr, 0, 0); 3050*8e33eff8Schristos if (likely(fast)) { 3051*8e33eff8Schristos tsd_assert_fast(tsd); 3052*8e33eff8Schristos isfree(tsd, ptr, usize, tcache, false); 3053*8e33eff8Schristos } else { 3054*8e33eff8Schristos isfree(tsd, ptr, usize, tcache, true); 3055*8e33eff8Schristos } 3056*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3057*8e33eff8Schristos 3058*8e33eff8Schristos LOG("core.sdallocx.exit", ""); 3059*8e33eff8Schristos } 3060*8e33eff8Schristos 3061*8e33eff8Schristos JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3062*8e33eff8Schristos JEMALLOC_ATTR(pure) 3063*8e33eff8Schristos je_nallocx(size_t size, int flags) { 3064*8e33eff8Schristos size_t usize; 3065*8e33eff8Schristos tsdn_t *tsdn; 3066*8e33eff8Schristos 3067*8e33eff8Schristos assert(size != 0); 3068*8e33eff8Schristos 3069*8e33eff8Schristos if (unlikely(malloc_init())) { 3070*8e33eff8Schristos LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3071*8e33eff8Schristos return 0; 3072*8e33eff8Schristos } 3073*8e33eff8Schristos 3074*8e33eff8Schristos tsdn = tsdn_fetch(); 3075*8e33eff8Schristos check_entry_exit_locking(tsdn); 3076*8e33eff8Schristos 3077*8e33eff8Schristos usize = inallocx(tsdn, size, flags); 3078*8e33eff8Schristos if (unlikely(usize > LARGE_MAXCLASS)) { 3079*8e33eff8Schristos LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3080*8e33eff8Schristos return 0; 3081*8e33eff8Schristos } 3082*8e33eff8Schristos 3083*8e33eff8Schristos check_entry_exit_locking(tsdn); 3084*8e33eff8Schristos LOG("core.nallocx.exit", "result: %zu", usize); 3085*8e33eff8Schristos return usize; 3086*8e33eff8Schristos } 3087*8e33eff8Schristos 3088*8e33eff8Schristos JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3089*8e33eff8Schristos je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 3090*8e33eff8Schristos size_t newlen) { 3091*8e33eff8Schristos int ret; 3092*8e33eff8Schristos tsd_t *tsd; 3093*8e33eff8Schristos 3094*8e33eff8Schristos LOG("core.mallctl.entry", "name: %s", name); 3095*8e33eff8Schristos 3096*8e33eff8Schristos if (unlikely(malloc_init())) { 3097*8e33eff8Schristos LOG("core.mallctl.exit", "result: %d", EAGAIN); 3098*8e33eff8Schristos return EAGAIN; 3099*8e33eff8Schristos } 3100*8e33eff8Schristos 3101*8e33eff8Schristos tsd = tsd_fetch(); 3102*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3103*8e33eff8Schristos ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 3104*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3105*8e33eff8Schristos 3106*8e33eff8Schristos LOG("core.mallctl.exit", "result: %d", ret); 3107*8e33eff8Schristos return ret; 3108*8e33eff8Schristos } 3109*8e33eff8Schristos 3110*8e33eff8Schristos JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3111*8e33eff8Schristos je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { 3112*8e33eff8Schristos int ret; 3113*8e33eff8Schristos 3114*8e33eff8Schristos LOG("core.mallctlnametomib.entry", "name: %s", name); 3115*8e33eff8Schristos 3116*8e33eff8Schristos if (unlikely(malloc_init())) { 3117*8e33eff8Schristos LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); 3118*8e33eff8Schristos return EAGAIN; 3119*8e33eff8Schristos } 3120*8e33eff8Schristos 3121*8e33eff8Schristos tsd_t *tsd = tsd_fetch(); 3122*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3123*8e33eff8Schristos ret = ctl_nametomib(tsd, name, mibp, miblenp); 3124*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3125*8e33eff8Schristos 3126*8e33eff8Schristos LOG("core.mallctlnametomib.exit", "result: %d", ret); 3127*8e33eff8Schristos return ret; 3128*8e33eff8Schristos } 3129*8e33eff8Schristos 3130*8e33eff8Schristos JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3131*8e33eff8Schristos je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 3132*8e33eff8Schristos void *newp, size_t newlen) { 3133*8e33eff8Schristos int ret; 3134*8e33eff8Schristos tsd_t *tsd; 3135*8e33eff8Schristos 3136*8e33eff8Schristos LOG("core.mallctlbymib.entry", ""); 3137*8e33eff8Schristos 3138*8e33eff8Schristos if (unlikely(malloc_init())) { 3139*8e33eff8Schristos LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); 3140*8e33eff8Schristos return EAGAIN; 3141*8e33eff8Schristos } 3142*8e33eff8Schristos 3143*8e33eff8Schristos tsd = tsd_fetch(); 3144*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3145*8e33eff8Schristos ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 3146*8e33eff8Schristos check_entry_exit_locking(tsd_tsdn(tsd)); 3147*8e33eff8Schristos LOG("core.mallctlbymib.exit", "result: %d", ret); 3148*8e33eff8Schristos return ret; 3149*8e33eff8Schristos } 3150*8e33eff8Schristos 3151*8e33eff8Schristos JEMALLOC_EXPORT void JEMALLOC_NOTHROW 3152*8e33eff8Schristos je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 3153*8e33eff8Schristos const char *opts) { 3154*8e33eff8Schristos tsdn_t *tsdn; 3155*8e33eff8Schristos 3156*8e33eff8Schristos LOG("core.malloc_stats_print.entry", ""); 3157*8e33eff8Schristos 3158*8e33eff8Schristos tsdn = tsdn_fetch(); 3159*8e33eff8Schristos check_entry_exit_locking(tsdn); 3160*8e33eff8Schristos stats_print(write_cb, cbopaque, opts); 3161*8e33eff8Schristos check_entry_exit_locking(tsdn); 3162*8e33eff8Schristos LOG("core.malloc_stats_print.exit", ""); 3163*8e33eff8Schristos } 3164*8e33eff8Schristos 3165*8e33eff8Schristos JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3166*8e33eff8Schristos je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { 3167*8e33eff8Schristos size_t ret; 3168*8e33eff8Schristos tsdn_t *tsdn; 3169*8e33eff8Schristos 3170*8e33eff8Schristos LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); 3171*8e33eff8Schristos 3172*8e33eff8Schristos assert(malloc_initialized() || IS_INITIALIZER); 3173*8e33eff8Schristos 3174*8e33eff8Schristos tsdn = tsdn_fetch(); 3175*8e33eff8Schristos check_entry_exit_locking(tsdn); 3176*8e33eff8Schristos 3177*8e33eff8Schristos if (unlikely(ptr == NULL)) { 3178*8e33eff8Schristos ret = 0; 3179*8e33eff8Schristos } else { 3180*8e33eff8Schristos if (config_debug || force_ivsalloc) { 3181*8e33eff8Schristos ret = ivsalloc(tsdn, ptr); 3182*8e33eff8Schristos assert(force_ivsalloc || ret != 0); 3183*8e33eff8Schristos } else { 3184*8e33eff8Schristos ret = isalloc(tsdn, ptr); 3185*8e33eff8Schristos } 3186*8e33eff8Schristos } 3187*8e33eff8Schristos 3188*8e33eff8Schristos check_entry_exit_locking(tsdn); 3189*8e33eff8Schristos LOG("core.malloc_usable_size.exit", "result: %zu", ret); 3190*8e33eff8Schristos return ret; 3191*8e33eff8Schristos } 3192*8e33eff8Schristos 3193*8e33eff8Schristos /* 3194*8e33eff8Schristos * End non-standard functions. 3195*8e33eff8Schristos */ 3196*8e33eff8Schristos /******************************************************************************/ 3197*8e33eff8Schristos /* 3198*8e33eff8Schristos * The following functions are used by threading libraries for protection of 3199*8e33eff8Schristos * malloc during fork(). 3200*8e33eff8Schristos */ 3201*8e33eff8Schristos 3202*8e33eff8Schristos /* 3203*8e33eff8Schristos * If an application creates a thread before doing any allocation in the main 3204*8e33eff8Schristos * thread, then calls fork(2) in the main thread followed by memory allocation 3205*8e33eff8Schristos * in the child process, a race can occur that results in deadlock within the 3206*8e33eff8Schristos * child: the main thread may have forked while the created thread had 3207*8e33eff8Schristos * partially initialized the allocator. Ordinarily jemalloc prevents 3208*8e33eff8Schristos * fork/malloc races via the following functions it registers during 3209*8e33eff8Schristos * initialization using pthread_atfork(), but of course that does no good if 3210*8e33eff8Schristos * the allocator isn't fully initialized at fork time. The following library 3211*8e33eff8Schristos * constructor is a partial solution to this problem. It may still be possible 3212*8e33eff8Schristos * to trigger the deadlock described above, but doing so would involve forking 3213*8e33eff8Schristos * via a library constructor that runs before jemalloc's runs. 3214*8e33eff8Schristos */ 3215*8e33eff8Schristos #ifndef JEMALLOC_JET 3216*8e33eff8Schristos JEMALLOC_ATTR(constructor) 3217*8e33eff8Schristos static void 3218*8e33eff8Schristos jemalloc_constructor(void) { 3219*8e33eff8Schristos malloc_init(); 3220*8e33eff8Schristos } 3221*8e33eff8Schristos #endif 3222*8e33eff8Schristos 3223*8e33eff8Schristos #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__) 3224*8e33eff8Schristos void 3225*8e33eff8Schristos jemalloc_prefork(void) 3226*8e33eff8Schristos #else 3227*8e33eff8Schristos JEMALLOC_EXPORT void 3228*8e33eff8Schristos _malloc_prefork(void) 3229*8e33eff8Schristos #endif 3230*8e33eff8Schristos { 3231*8e33eff8Schristos tsd_t *tsd; 3232*8e33eff8Schristos unsigned i, j, narenas; 3233*8e33eff8Schristos arena_t *arena; 3234*8e33eff8Schristos 3235*8e33eff8Schristos #ifdef JEMALLOC_MUTEX_INIT_CB 3236*8e33eff8Schristos if (!malloc_initialized()) { 3237*8e33eff8Schristos return; 3238*8e33eff8Schristos } 3239*8e33eff8Schristos #endif 3240*8e33eff8Schristos assert(malloc_initialized()); 3241*8e33eff8Schristos 3242*8e33eff8Schristos tsd = tsd_fetch(); 3243*8e33eff8Schristos 3244*8e33eff8Schristos narenas = narenas_total_get(); 3245*8e33eff8Schristos 3246*8e33eff8Schristos witness_prefork(tsd_witness_tsdp_get(tsd)); 3247*8e33eff8Schristos /* Acquire all mutexes in a safe order. */ 3248*8e33eff8Schristos ctl_prefork(tsd_tsdn(tsd)); 3249*8e33eff8Schristos tcache_prefork(tsd_tsdn(tsd)); 3250*8e33eff8Schristos malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 3251*8e33eff8Schristos if (have_background_thread) { 3252*8e33eff8Schristos background_thread_prefork0(tsd_tsdn(tsd)); 3253*8e33eff8Schristos } 3254*8e33eff8Schristos prof_prefork0(tsd_tsdn(tsd)); 3255*8e33eff8Schristos if (have_background_thread) { 3256*8e33eff8Schristos background_thread_prefork1(tsd_tsdn(tsd)); 3257*8e33eff8Schristos } 3258*8e33eff8Schristos /* Break arena prefork into stages to preserve lock order. */ 3259*8e33eff8Schristos for (i = 0; i < 8; i++) { 3260*8e33eff8Schristos for (j = 0; j < narenas; j++) { 3261*8e33eff8Schristos if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 3262*8e33eff8Schristos NULL) { 3263*8e33eff8Schristos switch (i) { 3264*8e33eff8Schristos case 0: 3265*8e33eff8Schristos arena_prefork0(tsd_tsdn(tsd), arena); 3266*8e33eff8Schristos break; 3267*8e33eff8Schristos case 1: 3268*8e33eff8Schristos arena_prefork1(tsd_tsdn(tsd), arena); 3269*8e33eff8Schristos break; 3270*8e33eff8Schristos case 2: 3271*8e33eff8Schristos arena_prefork2(tsd_tsdn(tsd), arena); 3272*8e33eff8Schristos break; 3273*8e33eff8Schristos case 3: 3274*8e33eff8Schristos arena_prefork3(tsd_tsdn(tsd), arena); 3275*8e33eff8Schristos break; 3276*8e33eff8Schristos case 4: 3277*8e33eff8Schristos arena_prefork4(tsd_tsdn(tsd), arena); 3278*8e33eff8Schristos break; 3279*8e33eff8Schristos case 5: 3280*8e33eff8Schristos arena_prefork5(tsd_tsdn(tsd), arena); 3281*8e33eff8Schristos break; 3282*8e33eff8Schristos case 6: 3283*8e33eff8Schristos arena_prefork6(tsd_tsdn(tsd), arena); 3284*8e33eff8Schristos break; 3285*8e33eff8Schristos case 7: 3286*8e33eff8Schristos arena_prefork7(tsd_tsdn(tsd), arena); 3287*8e33eff8Schristos break; 3288*8e33eff8Schristos default: not_reached(); 3289*8e33eff8Schristos } 3290*8e33eff8Schristos } 3291*8e33eff8Schristos } 3292*8e33eff8Schristos } 3293*8e33eff8Schristos prof_prefork1(tsd_tsdn(tsd)); 3294*8e33eff8Schristos } 3295*8e33eff8Schristos 3296*8e33eff8Schristos #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__) 3297*8e33eff8Schristos void 3298*8e33eff8Schristos jemalloc_postfork_parent(void) 3299*8e33eff8Schristos #else 3300*8e33eff8Schristos JEMALLOC_EXPORT void 3301*8e33eff8Schristos _malloc_postfork(void) 3302*8e33eff8Schristos #endif 3303*8e33eff8Schristos { 3304*8e33eff8Schristos tsd_t *tsd; 3305*8e33eff8Schristos unsigned i, narenas; 3306*8e33eff8Schristos 3307*8e33eff8Schristos #ifdef JEMALLOC_MUTEX_INIT_CB 3308*8e33eff8Schristos if (!malloc_initialized()) { 3309*8e33eff8Schristos return; 3310*8e33eff8Schristos } 3311*8e33eff8Schristos #endif 3312*8e33eff8Schristos assert(malloc_initialized()); 3313*8e33eff8Schristos 3314*8e33eff8Schristos tsd = tsd_fetch(); 3315*8e33eff8Schristos 3316*8e33eff8Schristos witness_postfork_parent(tsd_witness_tsdp_get(tsd)); 3317*8e33eff8Schristos /* Release all mutexes, now that fork() has completed. */ 3318*8e33eff8Schristos for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3319*8e33eff8Schristos arena_t *arena; 3320*8e33eff8Schristos 3321*8e33eff8Schristos if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 3322*8e33eff8Schristos arena_postfork_parent(tsd_tsdn(tsd), arena); 3323*8e33eff8Schristos } 3324*8e33eff8Schristos } 3325*8e33eff8Schristos prof_postfork_parent(tsd_tsdn(tsd)); 3326*8e33eff8Schristos if (have_background_thread) { 3327*8e33eff8Schristos background_thread_postfork_parent(tsd_tsdn(tsd)); 3328*8e33eff8Schristos } 3329*8e33eff8Schristos malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 3330*8e33eff8Schristos tcache_postfork_parent(tsd_tsdn(tsd)); 3331*8e33eff8Schristos ctl_postfork_parent(tsd_tsdn(tsd)); 3332*8e33eff8Schristos } 3333*8e33eff8Schristos 3334*8e33eff8Schristos #if !defined(__NetBSD__) 3335*8e33eff8Schristos void 3336*8e33eff8Schristos jemalloc_postfork_child(void) 3337*8e33eff8Schristos #else 3338*8e33eff8Schristos JEMALLOC_EXPORT void 3339*8e33eff8Schristos _malloc_postfork_child(void) 3340*8e33eff8Schristos #endif 3341*8e33eff8Schristos { 3342*8e33eff8Schristos tsd_t *tsd; 3343*8e33eff8Schristos unsigned i, narenas; 3344*8e33eff8Schristos 3345*8e33eff8Schristos assert(malloc_initialized()); 3346*8e33eff8Schristos 3347*8e33eff8Schristos tsd = tsd_fetch(); 3348*8e33eff8Schristos 3349*8e33eff8Schristos witness_postfork_child(tsd_witness_tsdp_get(tsd)); 3350*8e33eff8Schristos /* Release all mutexes, now that fork() has completed. */ 3351*8e33eff8Schristos for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3352*8e33eff8Schristos arena_t *arena; 3353*8e33eff8Schristos 3354*8e33eff8Schristos if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 3355*8e33eff8Schristos arena_postfork_child(tsd_tsdn(tsd), arena); 3356*8e33eff8Schristos } 3357*8e33eff8Schristos } 3358*8e33eff8Schristos prof_postfork_child(tsd_tsdn(tsd)); 3359*8e33eff8Schristos if (have_background_thread) { 3360*8e33eff8Schristos background_thread_postfork_child(tsd_tsdn(tsd)); 3361*8e33eff8Schristos } 3362*8e33eff8Schristos malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 3363*8e33eff8Schristos tcache_postfork_child(tsd_tsdn(tsd)); 3364*8e33eff8Schristos ctl_postfork_child(tsd_tsdn(tsd)); 3365*8e33eff8Schristos } 3366*8e33eff8Schristos 3367*8e33eff8Schristos void (* 3368*8e33eff8Schristos je_malloc_message_get(void))(void *, const char *) 3369*8e33eff8Schristos { 3370*8e33eff8Schristos return je_malloc_message; 3371*8e33eff8Schristos } 3372*8e33eff8Schristos 3373*8e33eff8Schristos void 3374*8e33eff8Schristos je_malloc_message_set(void (*m)(void *, const char *)) 3375*8e33eff8Schristos { 3376*8e33eff8Schristos je_malloc_message = m; 3377*8e33eff8Schristos } 3378*8e33eff8Schristos 3379*8e33eff8Schristos const char * 3380*8e33eff8Schristos je_malloc_conf_get(void) 3381*8e33eff8Schristos { 3382*8e33eff8Schristos return je_malloc_conf; 3383*8e33eff8Schristos } 3384*8e33eff8Schristos 3385*8e33eff8Schristos void 3386*8e33eff8Schristos je_malloc_conf_set(const char *m) 3387*8e33eff8Schristos { 3388*8e33eff8Schristos je_malloc_conf = m; 3389*8e33eff8Schristos } 3390*8e33eff8Schristos 3391*8e33eff8Schristos /******************************************************************************/ 3392