xref: /netbsd-src/external/bsd/jemalloc.old/dist/src/jemalloc.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 #include <sys/cdefs.h>
2 
3 #ifdef __NetBSD__
4 #include "extern.h"
5 #endif
6 
7 #define JEMALLOC_C_
8 #include "jemalloc/internal/jemalloc_preamble.h"
9 #include "jemalloc/internal/jemalloc_internal_includes.h"
10 
11 #include "jemalloc/internal/assert.h"
12 #include "jemalloc/internal/atomic.h"
13 #include "jemalloc/internal/ctl.h"
14 #include "jemalloc/internal/extent_dss.h"
15 #include "jemalloc/internal/extent_mmap.h"
16 #include "jemalloc/internal/jemalloc_internal_types.h"
17 #include "jemalloc/internal/log.h"
18 #include "jemalloc/internal/malloc_io.h"
19 #include "jemalloc/internal/mutex.h"
20 #include "jemalloc/internal/rtree.h"
21 #include "jemalloc/internal/size_classes.h"
22 #include "jemalloc/internal/spin.h"
23 #include "jemalloc/internal/sz.h"
24 #include "jemalloc/internal/ticker.h"
25 #include "jemalloc/internal/util.h"
26 
27 #ifdef JEMALLOC_WEAK_NOSTD
28 __weak_alias(mallocx, __je_mallocx)
29 __weak_alias(rallocx, __je_rallocx)
30 __weak_alias(xallocx, __je_xallocx)
31 __weak_alias(sallocx, __je_sallocx)
32 __weak_alias(dallocx, __je_dallocx)
33 __weak_alias(sdallocx, __je_sdallocx)
34 __weak_alias(nallocx, __je_nallocx)
35 
36 __weak_alias(mallctl, __je_mallctl)
37 __weak_alias(mallctlnametomib, __je_mallctlnametomib)
38 __weak_alias(mallctlbymib, __je_mallctlbymib)
39 
40 __weak_alias(malloc_stats_print, __je_malloc_stats_print)
41 __weak_alias(malloc_usable_size, __je_malloc_usable_size)
42 
43 __weak_alias(malloc_message, __je_malloc_message)
44 __weak_alias(malloc_conf, __je_malloc_conf)
45 
46 __weak_alias(malloc_message_get, __je_malloc_message_get)
47 __weak_alias(malloc_conf_get, __je_malloc_conf_get)
48 
49 __weak_alias(malloc_message_set, __je_malloc_message_set)
50 __weak_alias(malloc_conf_set, __je_malloc_conf_set)
51 #endif
52 
53 /******************************************************************************/
54 /* Data. */
55 
56 /* Runtime configuration options. */
57 const char	*je_malloc_conf
58 #ifndef _WIN32
59     JEMALLOC_ATTR(weak)
60 #endif
61     ;
62 bool	opt_abort =
63 #ifdef JEMALLOC_DEBUG
64     true
65 #else
66     false
67 #endif
68     ;
69 bool	opt_abort_conf =
70 #ifdef JEMALLOC_DEBUG
71     true
72 #else
73     false
74 #endif
75     ;
76 const char	*opt_junk =
77 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
78     "true"
79 #else
80     "false"
81 #endif
82     ;
83 bool	opt_junk_alloc =
84 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
85     true
86 #else
87     false
88 #endif
89     ;
90 bool	opt_junk_free =
91 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
92     true
93 #else
94     false
95 #endif
96     ;
97 
98 bool	opt_utrace = false;
99 bool	opt_xmalloc = false;
100 bool	opt_zero = false;
101 unsigned	opt_narenas = 0;
102 
103 unsigned	ncpus;
104 
105 /* Protects arenas initialization. */
106 malloc_mutex_t arenas_lock;
107 /*
108  * Arenas that are used to service external requests.  Not all elements of the
109  * arenas array are necessarily used; arenas are created lazily as needed.
110  *
111  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
112  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
113  * takes some action to create them and allocate from them.
114  *
115  * Points to an arena_t.
116  */
117 JEMALLOC_ALIGNED(CACHELINE)
118 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
119 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
120 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
121 unsigned		narenas_auto; /* Read-only after initialization. */
122 
123 typedef enum {
124 	malloc_init_uninitialized	= 3,
125 	malloc_init_a0_initialized	= 2,
126 	malloc_init_recursible		= 1,
127 	malloc_init_initialized		= 0 /* Common case --> jnz. */
128 } malloc_init_t;
129 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
130 
131 /* False should be the common case.  Set to true to trigger initialization. */
132 bool			malloc_slow = true;
133 
134 /* When malloc_slow is true, set the corresponding bits for sanity check. */
135 enum {
136 	flag_opt_junk_alloc	= (1U),
137 	flag_opt_junk_free	= (1U << 1),
138 	flag_opt_zero		= (1U << 2),
139 	flag_opt_utrace		= (1U << 3),
140 	flag_opt_xmalloc	= (1U << 4)
141 };
142 static uint8_t	malloc_slow_flags;
143 
144 #ifdef JEMALLOC_THREADED_INIT
145 /* Used to let the initializing thread recursively allocate. */
146 #  define NO_INITIALIZER	((unsigned long)0)
147 #  define INITIALIZER		pthread_self()
148 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
149 static pthread_t		malloc_initializer = NO_INITIALIZER;
150 #else
151 #  define NO_INITIALIZER	false
152 #  define INITIALIZER		true
153 #  define IS_INITIALIZER	malloc_initializer
154 static bool			malloc_initializer = NO_INITIALIZER;
155 #endif
156 
157 /* Used to avoid initialization races. */
158 #ifdef _WIN32
159 #if _WIN32_WINNT >= 0x0600
160 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
161 #else
162 static malloc_mutex_t	init_lock;
163 static bool init_lock_initialized = false;
164 
165 JEMALLOC_ATTR(constructor)
166 static void WINAPI
167 _init_init_lock(void) {
168 	/*
169 	 * If another constructor in the same binary is using mallctl to e.g.
170 	 * set up extent hooks, it may end up running before this one, and
171 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
172 	 * we force an initialization of the lock in malloc_init_hard as well.
173 	 * We don't try to care about atomicity of the accessed to the
174 	 * init_lock_initialized boolean, since it really only matters early in
175 	 * the process creation, before any separate thread normally starts
176 	 * doing anything.
177 	 */
178 	if (!init_lock_initialized) {
179 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
180 		    malloc_mutex_rank_exclusive);
181 	}
182 	init_lock_initialized = true;
183 }
184 
185 #ifdef _MSC_VER
186 #  pragma section(".CRT$XCU", read)
187 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
188 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
189 #endif
190 #endif
191 #else
192 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
193 #endif
194 
195 typedef struct {
196 	void	*p;	/* Input pointer (as in realloc(p, s)). */
197 	size_t	s;	/* Request size. */
198 	void	*r;	/* Result pointer. */
199 } malloc_utrace_t;
200 
201 #ifdef JEMALLOC_UTRACE
202 #  define UTRACE(a, b, c) do {						\
203 	if (unlikely(opt_utrace)) {					\
204 		int utrace_serrno = errno;				\
205 		malloc_utrace_t ut;					\
206 		ut.p = (a);						\
207 		ut.s = (b);						\
208 		ut.r = (c);						\
209 		utrace(&ut, sizeof(ut));				\
210 		errno = utrace_serrno;					\
211 	}								\
212 } while (0)
213 #else
214 #  define UTRACE(a, b, c)
215 #endif
216 
217 /* Whether encountered any invalid config options. */
218 static bool had_conf_error = false;
219 
220 /******************************************************************************/
221 /*
222  * Function prototypes for static functions that are referenced prior to
223  * definition.
224  */
225 
226 static bool	malloc_init_hard_a0(void);
227 static bool	malloc_init_hard(void);
228 
229 /******************************************************************************/
230 /*
231  * Begin miscellaneous support functions.
232  */
233 
234 bool
235 malloc_initialized(void) {
236 	return (malloc_init_state == malloc_init_initialized);
237 }
238 
239 JEMALLOC_ALWAYS_INLINE bool
240 malloc_init_a0(void) {
241 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
242 		return malloc_init_hard_a0();
243 	}
244 	return false;
245 }
246 
247 JEMALLOC_ALWAYS_INLINE bool
248 malloc_init(void) {
249 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
250 		return true;
251 	}
252 	return false;
253 }
254 
255 /*
256  * The a0*() functions are used instead of i{d,}alloc() in situations that
257  * cannot tolerate TLS variable access.
258  */
259 
260 static void *
261 a0ialloc(size_t size, bool zero, bool is_internal) {
262 	if (unlikely(malloc_init_a0())) {
263 		return NULL;
264 	}
265 
266 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
267 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
268 }
269 
270 static void
271 a0idalloc(void *ptr, bool is_internal) {
272 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
273 }
274 
275 void *
276 a0malloc(size_t size) {
277 	return a0ialloc(size, false, true);
278 }
279 
280 void
281 a0dalloc(void *ptr) {
282 	a0idalloc(ptr, true);
283 }
284 
285 /*
286  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
287  * situations that cannot tolerate TLS variable access (TLS allocation and very
288  * early internal data structure initialization).
289  */
290 
291 void *
292 bootstrap_malloc(size_t size) {
293 	if (unlikely(size == 0)) {
294 		size = 1;
295 	}
296 
297 	return a0ialloc(size, false, false);
298 }
299 
300 void *
301 bootstrap_calloc(size_t num, size_t size) {
302 	size_t num_size;
303 
304 	num_size = num * size;
305 	if (unlikely(num_size == 0)) {
306 		assert(num == 0 || size == 0);
307 		num_size = 1;
308 	}
309 
310 	return a0ialloc(num_size, true, false);
311 }
312 
313 void
314 bootstrap_free(void *ptr) {
315 	if (unlikely(ptr == NULL)) {
316 		return;
317 	}
318 
319 	a0idalloc(ptr, false);
320 }
321 
322 void
323 arena_set(unsigned ind, arena_t *arena) {
324 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
325 }
326 
327 static void
328 narenas_total_set(unsigned narenas) {
329 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
330 }
331 
332 static void
333 narenas_total_inc(void) {
334 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
335 }
336 
337 unsigned
338 narenas_total_get(void) {
339 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
340 }
341 
342 /* Create a new arena and insert it into the arenas array at index ind. */
343 static arena_t *
344 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
345 	arena_t *arena;
346 
347 	assert(ind <= narenas_total_get());
348 	if (ind >= MALLOCX_ARENA_LIMIT) {
349 		return NULL;
350 	}
351 	if (ind == narenas_total_get()) {
352 		narenas_total_inc();
353 	}
354 
355 	/*
356 	 * Another thread may have already initialized arenas[ind] if it's an
357 	 * auto arena.
358 	 */
359 	arena = arena_get(tsdn, ind, false);
360 	if (arena != NULL) {
361 		assert(ind < narenas_auto);
362 		return arena;
363 	}
364 
365 	/* Actually initialize the arena. */
366 	arena = arena_new(tsdn, ind, extent_hooks);
367 
368 	return arena;
369 }
370 
371 static void
372 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
373 	if (ind == 0) {
374 		return;
375 	}
376 	if (have_background_thread) {
377 		bool err;
378 		malloc_mutex_lock(tsdn, &background_thread_lock);
379 		err = background_thread_create(tsdn_tsd(tsdn), ind);
380 		malloc_mutex_unlock(tsdn, &background_thread_lock);
381 		if (err) {
382 			malloc_printf("<jemalloc>: error in background thread "
383 				      "creation for arena %u. Abort.\n", ind);
384 			abort();
385 		}
386 	}
387 }
388 
389 arena_t *
390 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
391 	arena_t *arena;
392 
393 	malloc_mutex_lock(tsdn, &arenas_lock);
394 	arena = arena_init_locked(tsdn, ind, extent_hooks);
395 	malloc_mutex_unlock(tsdn, &arenas_lock);
396 
397 	arena_new_create_background_thread(tsdn, ind);
398 
399 	return arena;
400 }
401 
402 static void
403 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
404 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
405 	arena_nthreads_inc(arena, internal);
406 
407 	if (internal) {
408 		tsd_iarena_set(tsd, arena);
409 	} else {
410 		tsd_arena_set(tsd, arena);
411 	}
412 }
413 
414 void
415 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
416 	arena_t *oldarena, *newarena;
417 
418 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
419 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
420 	arena_nthreads_dec(oldarena, false);
421 	arena_nthreads_inc(newarena, false);
422 	tsd_arena_set(tsd, newarena);
423 }
424 
425 static void
426 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
427 	arena_t *arena;
428 
429 	arena = arena_get(tsd_tsdn(tsd), ind, false);
430 	arena_nthreads_dec(arena, internal);
431 
432 	if (internal) {
433 		tsd_iarena_set(tsd, NULL);
434 	} else {
435 		tsd_arena_set(tsd, NULL);
436 	}
437 }
438 
439 arena_tdata_t *
440 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
441 	arena_tdata_t *tdata, *arenas_tdata_old;
442 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
443 	unsigned narenas_tdata_old, i;
444 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
445 	unsigned narenas_actual = narenas_total_get();
446 
447 	/*
448 	 * Dissociate old tdata array (and set up for deallocation upon return)
449 	 * if it's too small.
450 	 */
451 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
452 		arenas_tdata_old = arenas_tdata;
453 		narenas_tdata_old = narenas_tdata;
454 		arenas_tdata = NULL;
455 		narenas_tdata = 0;
456 		tsd_arenas_tdata_set(tsd, arenas_tdata);
457 		tsd_narenas_tdata_set(tsd, narenas_tdata);
458 	} else {
459 		arenas_tdata_old = NULL;
460 		narenas_tdata_old = 0;
461 	}
462 
463 	/* Allocate tdata array if it's missing. */
464 	if (arenas_tdata == NULL) {
465 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
466 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
467 
468 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
469 			*arenas_tdata_bypassp = true;
470 			arenas_tdata = (arena_tdata_t *)a0malloc(
471 			    sizeof(arena_tdata_t) * narenas_tdata);
472 			*arenas_tdata_bypassp = false;
473 		}
474 		if (arenas_tdata == NULL) {
475 			tdata = NULL;
476 			goto label_return;
477 		}
478 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
479 		tsd_arenas_tdata_set(tsd, arenas_tdata);
480 		tsd_narenas_tdata_set(tsd, narenas_tdata);
481 	}
482 
483 	/*
484 	 * Copy to tdata array.  It's possible that the actual number of arenas
485 	 * has increased since narenas_total_get() was called above, but that
486 	 * causes no correctness issues unless two threads concurrently execute
487 	 * the arenas.create mallctl, which we trust mallctl synchronization to
488 	 * prevent.
489 	 */
490 
491 	/* Copy/initialize tickers. */
492 	for (i = 0; i < narenas_actual; i++) {
493 		if (i < narenas_tdata_old) {
494 			ticker_copy(&arenas_tdata[i].decay_ticker,
495 			    &arenas_tdata_old[i].decay_ticker);
496 		} else {
497 			ticker_init(&arenas_tdata[i].decay_ticker,
498 			    DECAY_NTICKS_PER_UPDATE);
499 		}
500 	}
501 	if (narenas_tdata > narenas_actual) {
502 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
503 		    * (narenas_tdata - narenas_actual));
504 	}
505 
506 	/* Read the refreshed tdata array. */
507 	tdata = &arenas_tdata[ind];
508 label_return:
509 	if (arenas_tdata_old != NULL) {
510 		a0dalloc(arenas_tdata_old);
511 	}
512 	return tdata;
513 }
514 
515 /* Slow path, called only by arena_choose(). */
516 arena_t *
517 arena_choose_hard(tsd_t *tsd, bool internal) {
518 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
519 
520 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
521 		unsigned choose = percpu_arena_choose();
522 		ret = arena_get(tsd_tsdn(tsd), choose, true);
523 		assert(ret != NULL);
524 		arena_bind(tsd, arena_ind_get(ret), false);
525 		arena_bind(tsd, arena_ind_get(ret), true);
526 
527 		return ret;
528 	}
529 
530 	if (narenas_auto > 1) {
531 		unsigned i, j, choose[2], first_null;
532 		bool is_new_arena[2];
533 
534 		/*
535 		 * Determine binding for both non-internal and internal
536 		 * allocation.
537 		 *
538 		 *   choose[0]: For application allocation.
539 		 *   choose[1]: For internal metadata allocation.
540 		 */
541 
542 		for (j = 0; j < 2; j++) {
543 			choose[j] = 0;
544 			is_new_arena[j] = false;
545 		}
546 
547 		first_null = narenas_auto;
548 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
549 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
550 		for (i = 1; i < narenas_auto; i++) {
551 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
552 				/*
553 				 * Choose the first arena that has the lowest
554 				 * number of threads assigned to it.
555 				 */
556 				for (j = 0; j < 2; j++) {
557 					if (arena_nthreads_get(arena_get(
558 					    tsd_tsdn(tsd), i, false), !!j) <
559 					    arena_nthreads_get(arena_get(
560 					    tsd_tsdn(tsd), choose[j], false),
561 					    !!j)) {
562 						choose[j] = i;
563 					}
564 				}
565 			} else if (first_null == narenas_auto) {
566 				/*
567 				 * Record the index of the first uninitialized
568 				 * arena, in case all extant arenas are in use.
569 				 *
570 				 * NB: It is possible for there to be
571 				 * discontinuities in terms of initialized
572 				 * versus uninitialized arenas, due to the
573 				 * "thread.arena" mallctl.
574 				 */
575 				first_null = i;
576 			}
577 		}
578 
579 		for (j = 0; j < 2; j++) {
580 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
581 			    choose[j], false), !!j) == 0 || first_null ==
582 			    narenas_auto) {
583 				/*
584 				 * Use an unloaded arena, or the least loaded
585 				 * arena if all arenas are already initialized.
586 				 */
587 				if (!!j == internal) {
588 					ret = arena_get(tsd_tsdn(tsd),
589 					    choose[j], false);
590 				}
591 			} else {
592 				arena_t *arena;
593 
594 				/* Initialize a new arena. */
595 				choose[j] = first_null;
596 				arena = arena_init_locked(tsd_tsdn(tsd),
597 				    choose[j], (extent_hooks_t *)
598 				    __UNCONST(&extent_hooks_default));
599 				if (arena == NULL) {
600 					malloc_mutex_unlock(tsd_tsdn(tsd),
601 					    &arenas_lock);
602 					return NULL;
603 				}
604 				is_new_arena[j] = true;
605 				if (!!j == internal) {
606 					ret = arena;
607 				}
608 			}
609 			arena_bind(tsd, choose[j], !!j);
610 		}
611 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
612 
613 		for (j = 0; j < 2; j++) {
614 			if (is_new_arena[j]) {
615 				assert(choose[j] > 0);
616 				arena_new_create_background_thread(
617 				    tsd_tsdn(tsd), choose[j]);
618 			}
619 		}
620 
621 	} else {
622 		ret = arena_get(tsd_tsdn(tsd), 0, false);
623 		arena_bind(tsd, 0, false);
624 		arena_bind(tsd, 0, true);
625 	}
626 
627 	return ret;
628 }
629 
630 void
631 iarena_cleanup(tsd_t *tsd) {
632 	arena_t *iarena;
633 
634 	iarena = tsd_iarena_get(tsd);
635 	if (iarena != NULL) {
636 		arena_unbind(tsd, arena_ind_get(iarena), true);
637 	}
638 }
639 
640 void
641 arena_cleanup(tsd_t *tsd) {
642 	arena_t *arena;
643 
644 	arena = tsd_arena_get(tsd);
645 	if (arena != NULL) {
646 		arena_unbind(tsd, arena_ind_get(arena), false);
647 	}
648 }
649 
650 void
651 arenas_tdata_cleanup(tsd_t *tsd) {
652 	arena_tdata_t *arenas_tdata;
653 
654 	/* Prevent tsd->arenas_tdata from being (re)created. */
655 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
656 
657 	arenas_tdata = tsd_arenas_tdata_get(tsd);
658 	if (arenas_tdata != NULL) {
659 		tsd_arenas_tdata_set(tsd, NULL);
660 		a0dalloc(arenas_tdata);
661 	}
662 }
663 
664 static void
665 stats_print_atexit(void) {
666 	if (config_stats) {
667 		tsdn_t *tsdn;
668 		unsigned narenas, i;
669 
670 		tsdn = tsdn_fetch();
671 
672 		/*
673 		 * Merge stats from extant threads.  This is racy, since
674 		 * individual threads do not lock when recording tcache stats
675 		 * events.  As a consequence, the final stats may be slightly
676 		 * out of date by the time they are reported, if other threads
677 		 * continue to allocate.
678 		 */
679 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
680 			arena_t *arena = arena_get(tsdn, i, false);
681 			if (arena != NULL) {
682 				tcache_t *tcache;
683 
684 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
685 				ql_foreach(tcache, &arena->tcache_ql, link) {
686 					tcache_stats_merge(tsdn, tcache, arena);
687 				}
688 				malloc_mutex_unlock(tsdn,
689 				    &arena->tcache_ql_mtx);
690 			}
691 		}
692 	}
693 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
694 }
695 
696 /*
697  * Ensure that we don't hold any locks upon entry to or exit from allocator
698  * code (in a "broad" sense that doesn't count a reentrant allocation as an
699  * entrance or exit).
700  */
701 JEMALLOC_ALWAYS_INLINE void
702 check_entry_exit_locking(tsdn_t *tsdn) {
703 	if (!config_debug) {
704 		return;
705 	}
706 	if (tsdn_null(tsdn)) {
707 		return;
708 	}
709 	tsd_t *tsd = tsdn_tsd(tsdn);
710 	/*
711 	 * It's possible we hold locks at entry/exit if we're in a nested
712 	 * allocation.
713 	 */
714 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
715 	if (reentrancy_level != 0) {
716 		return;
717 	}
718 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
719 }
720 
721 /*
722  * End miscellaneous support functions.
723  */
724 /******************************************************************************/
725 /*
726  * Begin initialization functions.
727  */
728 
729 static char *
730 jemalloc_secure_getenv(const char *name) {
731 #ifdef JEMALLOC_HAVE_SECURE_GETENV
732 	return secure_getenv(name);
733 #else
734 #  ifdef JEMALLOC_HAVE_ISSETUGID
735 	if (issetugid() != 0) {
736 		return NULL;
737 	}
738 #  endif
739 	return getenv(name);
740 #endif
741 }
742 
743 static unsigned
744 malloc_ncpus(void) {
745 	long result;
746 
747 #ifdef _WIN32
748 	SYSTEM_INFO si;
749 	GetSystemInfo(&si);
750 	result = si.dwNumberOfProcessors;
751 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
752 	/*
753 	 * glibc >= 2.6 has the CPU_COUNT macro.
754 	 *
755 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
756 	 * *before* setting up the isspace tables.  Therefore we need a
757 	 * different method to get the number of CPUs.
758 	 */
759 	{
760 		cpu_set_t set;
761 
762 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
763 		result = CPU_COUNT(&set);
764 	}
765 #else
766 	result = sysconf(_SC_NPROCESSORS_ONLN);
767 #endif
768 	return ((result == -1) ? 1 : (unsigned)result);
769 }
770 
771 static void
772 init_opt_stats_print_opts(const char *v, size_t vlen) {
773 	size_t opts_len = strlen(opt_stats_print_opts);
774 	assert(opts_len <= stats_print_tot_num_options);
775 
776 	for (size_t i = 0; i < vlen; i++) {
777 		switch (v[i]) {
778 #define OPTION(o, v, d, s) case o: break;
779 			STATS_PRINT_OPTIONS
780 #undef OPTION
781 		default: continue;
782 		}
783 
784 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
785 			/* Ignore repeated. */
786 			continue;
787 		}
788 
789 		opt_stats_print_opts[opts_len++] = v[i];
790 		opt_stats_print_opts[opts_len] = '\0';
791 		assert(opts_len <= stats_print_tot_num_options);
792 	}
793 	assert(opts_len == strlen(opt_stats_print_opts));
794 }
795 
796 static bool
797 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
798     char const **v_p, size_t *vlen_p) {
799 	bool accept;
800 	const char *opts = *opts_p;
801 
802 	*k_p = opts;
803 
804 	for (accept = false; !accept;) {
805 		switch (*opts) {
806 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
807 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
808 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
809 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
810 		case 'Y': case 'Z':
811 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
812 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
813 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
814 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
815 		case 'y': case 'z':
816 		case '0': case '1': case '2': case '3': case '4': case '5':
817 		case '6': case '7': case '8': case '9':
818 		case '_':
819 			opts++;
820 			break;
821 		case ':':
822 			opts++;
823 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
824 			*v_p = opts;
825 			accept = true;
826 			break;
827 		case '\0':
828 			if (opts != *opts_p) {
829 				malloc_write("<jemalloc>: Conf string ends "
830 				    "with key\n");
831 			}
832 			return true;
833 		default:
834 			malloc_write("<jemalloc>: Malformed conf string\n");
835 			return true;
836 		}
837 	}
838 
839 	for (accept = false; !accept;) {
840 		switch (*opts) {
841 		case ',':
842 			opts++;
843 			/*
844 			 * Look ahead one character here, because the next time
845 			 * this function is called, it will assume that end of
846 			 * input has been cleanly reached if no input remains,
847 			 * but we have optimistically already consumed the
848 			 * comma if one exists.
849 			 */
850 			if (*opts == '\0') {
851 				malloc_write("<jemalloc>: Conf string ends "
852 				    "with comma\n");
853 			}
854 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
855 			accept = true;
856 			break;
857 		case '\0':
858 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
859 			accept = true;
860 			break;
861 		default:
862 			opts++;
863 			break;
864 		}
865 	}
866 
867 	*opts_p = opts;
868 	return false;
869 }
870 
871 static JEMALLOC_NORETURN void
872 malloc_abort_invalid_conf(void) {
873 	assert(opt_abort_conf);
874 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
875 	    "value (see above).\n");
876 	abort();
877 }
878 
879 static void
880 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
881     size_t vlen) {
882 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
883 	    (int)vlen, v);
884 	/* If abort_conf is set, error out after processing all options. */
885 	had_conf_error = true;
886 }
887 
888 static void
889 malloc_slow_flag_init(void) {
890 	/*
891 	 * Combine the runtime options into malloc_slow for fast path.  Called
892 	 * after processing all the options.
893 	 */
894 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
895 	    | (opt_junk_free ? flag_opt_junk_free : 0)
896 	    | (opt_zero ? flag_opt_zero : 0)
897 	    | (opt_utrace ? flag_opt_utrace : 0)
898 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
899 
900 	malloc_slow = (malloc_slow_flags != 0);
901 }
902 
903 static void
904 malloc_conf_init(void) {
905 	unsigned i;
906 	char buf[PATH_MAX + 1];
907 	const char *opts, *k, *v;
908 	size_t klen, vlen;
909 
910 	for (i = 0; i < 4; i++) {
911 		/* Get runtime configuration. */
912 		switch (i) {
913 		case 0:
914 			opts = config_malloc_conf;
915 			break;
916 		case 1:
917 			if (je_malloc_conf != NULL) {
918 				/*
919 				 * Use options that were compiled into the
920 				 * program.
921 				 */
922 				opts = je_malloc_conf;
923 			} else {
924 				/* No configuration specified. */
925 				buf[0] = '\0';
926 				opts = buf;
927 			}
928 			break;
929 		case 2: {
930 			ssize_t linklen = 0;
931 #ifndef _WIN32
932 			int saved_errno = errno;
933 			const char *linkname =
934 #  ifdef JEMALLOC_PREFIX
935 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
936 #  else
937 			    "/etc/malloc.conf"
938 #  endif
939 			    ;
940 
941 			/*
942 			 * Try to use the contents of the "/etc/malloc.conf"
943 			 * symbolic link's name.
944 			 */
945 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
946 			if (linklen == -1) {
947 				/* No configuration specified. */
948 				linklen = 0;
949 				/* Restore errno. */
950 				set_errno(saved_errno);
951 			}
952 #endif
953 			buf[linklen] = '\0';
954 			opts = buf;
955 			break;
956 		} case 3: {
957 			const char *envname =
958 #ifdef JEMALLOC_PREFIX
959 			    JEMALLOC_CPREFIX"MALLOC_CONF"
960 #else
961 			    "MALLOC_CONF"
962 #endif
963 			    ;
964 
965 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
966 				/*
967 				 * Do nothing; opts is already initialized to
968 				 * the value of the MALLOC_CONF environment
969 				 * variable.
970 				 */
971 			} else {
972 				/* No configuration specified. */
973 				buf[0] = '\0';
974 				opts = buf;
975 			}
976 			break;
977 		} default:
978 			not_reached();
979 			buf[0] = '\0';
980 			opts = buf;
981 		}
982 
983 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
984 		    &vlen)) {
985 #define CONF_MATCH(n)							\
986 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
987 #define CONF_MATCH_VALUE(n)						\
988 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
989 #define CONF_HANDLE_BOOL(o, n)						\
990 			if (CONF_MATCH(n)) {				\
991 				if (CONF_MATCH_VALUE("true")) {		\
992 					o = true;			\
993 				} else if (CONF_MATCH_VALUE("false")) {	\
994 					o = false;			\
995 				} else {				\
996 					malloc_conf_error(		\
997 					    "Invalid conf value",	\
998 					    k, klen, v, vlen);		\
999 				}					\
1000 				continue;				\
1001 			}
1002 #define CONF_MIN_no(um, min)	false
1003 #define CONF_MIN_yes(um, min)	((um) < (min))
1004 #define CONF_MAX_no(um, max)	false
1005 #define CONF_MAX_yes(um, max)	((um) > (max))
1006 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
1007 			if (CONF_MATCH(n)) {				\
1008 				uintmax_t um;				\
1009 				const char *end;			\
1010 									\
1011 				set_errno(0);				\
1012 				um = malloc_strtoumax(v, &end, 0);	\
1013 				if (get_errno() != 0 || (uintptr_t)end -\
1014 				    (uintptr_t)v != vlen) {		\
1015 					malloc_conf_error(		\
1016 					    "Invalid conf value",	\
1017 					    k, klen, v, vlen);		\
1018 				} else if (clip) {			\
1019 					if (CONF_MIN_##check_min(um,	\
1020 					    (t)(min))) {		\
1021 						o = (t)(min);		\
1022 					} else if (			\
1023 					    CONF_MAX_##check_max(um,	\
1024 					    (t)(max))) {		\
1025 						o = (t)(max);		\
1026 					} else {			\
1027 						o = (t)um;		\
1028 					}				\
1029 				} else {				\
1030 					if (CONF_MIN_##check_min(um,	\
1031 					    (t)(min)) ||		\
1032 					    CONF_MAX_##check_max(um,	\
1033 					    (t)(max))) {		\
1034 						malloc_conf_error(	\
1035 						    "Out-of-range "	\
1036 						    "conf value",	\
1037 						    k, klen, v, vlen);	\
1038 					} else {			\
1039 						o = (t)um;		\
1040 					}				\
1041 				}					\
1042 				continue;				\
1043 			}
1044 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1045     clip)								\
1046 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1047 			    check_min, check_max, clip)
1048 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1049 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1050 			    check_min, check_max, clip)
1051 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1052 			if (CONF_MATCH(n)) {				\
1053 				long l;					\
1054 				char *end;				\
1055 									\
1056 				set_errno(0);				\
1057 				l = strtol(v, &end, 0);			\
1058 				if (get_errno() != 0 || (uintptr_t)end -\
1059 				    (uintptr_t)v != vlen) {		\
1060 					malloc_conf_error(		\
1061 					    "Invalid conf value",	\
1062 					    k, klen, v, vlen);		\
1063 				} else if (l < (ssize_t)(min) || l >	\
1064 				    (ssize_t)(max)) {			\
1065 					malloc_conf_error(		\
1066 					    "Out-of-range conf value",	\
1067 					    k, klen, v, vlen);		\
1068 				} else {				\
1069 					o = l;				\
1070 				}					\
1071 				continue;				\
1072 			}
1073 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1074 			if (CONF_MATCH(n)) {				\
1075 				size_t cpylen = (vlen <=		\
1076 				    sizeof(o)-1) ? vlen :		\
1077 				    sizeof(o)-1;			\
1078 				strncpy(o, v, cpylen);			\
1079 				o[cpylen] = '\0';			\
1080 				continue;				\
1081 			}
1082 
1083 			CONF_HANDLE_BOOL(opt_abort, "abort")
1084 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1085 			if (strncmp("metadata_thp", k, klen) == 0) {
1086 				int ii;
1087 				bool match = false;
1088 				for (ii = 0; ii < metadata_thp_mode_limit; ii++) {
1089 					if (strncmp(metadata_thp_mode_names[ii],
1090 					    v, vlen) == 0) {
1091 						opt_metadata_thp = ii;
1092 						match = true;
1093 						break;
1094 					}
1095 				}
1096 				if (!match) {
1097 					malloc_conf_error("Invalid conf value",
1098 					    k, klen, v, vlen);
1099 				}
1100 				continue;
1101 			}
1102 			CONF_HANDLE_BOOL(opt_retain, "retain")
1103 			if (strncmp("dss", k, klen) == 0) {
1104 				int ii;
1105 				bool match = false;
1106 				for (ii = 0; ii < dss_prec_limit; ii++) {
1107 					if (strncmp(dss_prec_names[ii], v, vlen)
1108 					    == 0) {
1109 						if (extent_dss_prec_set(ii)) {
1110 							malloc_conf_error(
1111 							    "Error setting dss",
1112 							    k, klen, v, vlen);
1113 						} else {
1114 							opt_dss =
1115 							    dss_prec_names[ii];
1116 							match = true;
1117 							break;
1118 						}
1119 					}
1120 				}
1121 				if (!match) {
1122 					malloc_conf_error("Invalid conf value",
1123 					    k, klen, v, vlen);
1124 				}
1125 				continue;
1126 			}
1127 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1128 			    UINT_MAX, yes, no, false)
1129 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1130 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1131 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1132 			    SSIZE_MAX);
1133 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1134 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1135 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1136 			    SSIZE_MAX);
1137 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1138 			if (CONF_MATCH("stats_print_opts")) {
1139 				init_opt_stats_print_opts(v, vlen);
1140 				continue;
1141 			}
1142 			if (config_fill) {
1143 				if (CONF_MATCH("junk")) {
1144 					if (CONF_MATCH_VALUE("true")) {
1145 						opt_junk = "true";
1146 						opt_junk_alloc = opt_junk_free =
1147 						    true;
1148 					} else if (CONF_MATCH_VALUE("false")) {
1149 						opt_junk = "false";
1150 						opt_junk_alloc = opt_junk_free =
1151 						    false;
1152 					} else if (CONF_MATCH_VALUE("alloc")) {
1153 						opt_junk = "alloc";
1154 						opt_junk_alloc = true;
1155 						opt_junk_free = false;
1156 					} else if (CONF_MATCH_VALUE("free")) {
1157 						opt_junk = "free";
1158 						opt_junk_alloc = false;
1159 						opt_junk_free = true;
1160 					} else {
1161 						malloc_conf_error(
1162 						    "Invalid conf value", k,
1163 						    klen, v, vlen);
1164 					}
1165 					continue;
1166 				}
1167 				CONF_HANDLE_BOOL(opt_zero, "zero")
1168 			}
1169 			if (config_utrace) {
1170 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1171 			}
1172 			if (config_xmalloc) {
1173 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1174 			}
1175 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1176 			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1177 			    "lg_extent_max_active_fit", 0,
1178 			    (sizeof(size_t) << 3), no, yes, false)
1179 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1180 			    -1, (sizeof(size_t) << 3) - 1)
1181 			if (strncmp("percpu_arena", k, klen) == 0) {
1182 				bool match = false;
1183 				for (int ii = percpu_arena_mode_names_base; ii <
1184 				    percpu_arena_mode_names_limit; ii++) {
1185 					if (strncmp(percpu_arena_mode_names[ii],
1186 					    v, vlen) == 0) {
1187 						if (!have_percpu_arena) {
1188 							malloc_conf_error(
1189 							    "No getcpu support",
1190 							    k, klen, v, vlen);
1191 						}
1192 						opt_percpu_arena = ii;
1193 						match = true;
1194 						break;
1195 					}
1196 				}
1197 				if (!match) {
1198 					malloc_conf_error("Invalid conf value",
1199 					    k, klen, v, vlen);
1200 				}
1201 				continue;
1202 			}
1203 			CONF_HANDLE_BOOL(opt_background_thread,
1204 			    "background_thread");
1205 			CONF_HANDLE_SIZE_T(opt_max_background_threads,
1206 					   "max_background_threads", 1,
1207 					   opt_max_background_threads, yes, yes,
1208 					   true);
1209 			if (config_prof) {
1210 				CONF_HANDLE_BOOL(opt_prof, "prof")
1211 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1212 				    "prof_prefix", "jeprof")
1213 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1214 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1215 				    "prof_thread_active_init")
1216 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1217 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1218 				    - 1, no, yes, true)
1219 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1220 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1221 				    "lg_prof_interval", -1,
1222 				    (sizeof(uint64_t) << 3) - 1)
1223 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1224 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1225 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1226 			}
1227 			if (config_log) {
1228 				if (CONF_MATCH("log")) {
1229 					size_t cpylen = (
1230 					    vlen <= sizeof(log_var_names) ?
1231 					    vlen : sizeof(log_var_names) - 1);
1232 					strncpy(log_var_names, v, cpylen);
1233 					log_var_names[cpylen] = '\0';
1234 					continue;
1235 				}
1236 			}
1237 			if (CONF_MATCH("thp")) {
1238 				bool match = false;
1239 				for (int ii = 0; ii < thp_mode_names_limit; ii++) {
1240 					if (strncmp(thp_mode_names[ii],v, vlen)
1241 					    == 0) {
1242 						if (!have_madvise_huge) {
1243 							malloc_conf_error(
1244 							    "No THP support",
1245 							    k, klen, v, vlen);
1246 						}
1247 						opt_thp = ii;
1248 						match = true;
1249 						break;
1250 					}
1251 				}
1252 				if (!match) {
1253 					malloc_conf_error("Invalid conf value",
1254 					    k, klen, v, vlen);
1255 				}
1256 				continue;
1257 			}
1258 			malloc_conf_error("Invalid conf pair", k, klen, v,
1259 			    vlen);
1260 #undef CONF_MATCH
1261 #undef CONF_MATCH_VALUE
1262 #undef CONF_HANDLE_BOOL
1263 #undef CONF_MIN_no
1264 #undef CONF_MIN_yes
1265 #undef CONF_MAX_no
1266 #undef CONF_MAX_yes
1267 #undef CONF_HANDLE_T_U
1268 #undef CONF_HANDLE_UNSIGNED
1269 #undef CONF_HANDLE_SIZE_T
1270 #undef CONF_HANDLE_SSIZE_T
1271 #undef CONF_HANDLE_CHAR_P
1272 		}
1273 		if (opt_abort_conf && had_conf_error) {
1274 			malloc_abort_invalid_conf();
1275 		}
1276 	}
1277 	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1278 }
1279 
1280 static bool
1281 malloc_init_hard_needed(void) {
1282 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1283 	    malloc_init_recursible)) {
1284 		/*
1285 		 * Another thread initialized the allocator before this one
1286 		 * acquired init_lock, or this thread is the initializing
1287 		 * thread, and it is recursively allocating.
1288 		 */
1289 		return false;
1290 	}
1291 #ifdef JEMALLOC_THREADED_INIT
1292 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1293 		/* Busy-wait until the initializing thread completes. */
1294 		spin_t spinner = SPIN_INITIALIZER;
1295 		do {
1296 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1297 			spin_adaptive(&spinner);
1298 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1299 		} while (!malloc_initialized());
1300 		return false;
1301 	}
1302 #endif
1303 	return true;
1304 }
1305 
1306 static bool
1307 malloc_init_hard_a0_locked(void) {
1308 	malloc_initializer = INITIALIZER;
1309 
1310 	if (config_prof) {
1311 		prof_boot0();
1312 	}
1313 	malloc_conf_init();
1314 	if (opt_stats_print) {
1315 		/* Print statistics at exit. */
1316 		if (atexit(stats_print_atexit) != 0) {
1317 			malloc_write("<jemalloc>: Error in atexit()\n");
1318 			if (opt_abort) {
1319 				abort();
1320 			}
1321 		}
1322 	}
1323 	if (pages_boot()) {
1324 		return true;
1325 	}
1326 	if (base_boot(TSDN_NULL)) {
1327 		return true;
1328 	}
1329 	if (extent_boot()) {
1330 		return true;
1331 	}
1332 	if (ctl_boot()) {
1333 		return true;
1334 	}
1335 	if (config_prof) {
1336 		prof_boot1();
1337 	}
1338 	arena_boot();
1339 	if (tcache_boot(TSDN_NULL)) {
1340 		return true;
1341 	}
1342 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1343 	    malloc_mutex_rank_exclusive)) {
1344 		return true;
1345 	}
1346 	/*
1347 	 * Create enough scaffolding to allow recursive allocation in
1348 	 * malloc_ncpus().
1349 	 */
1350 	narenas_auto = 1;
1351 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1352 	/*
1353 	 * Initialize one arena here.  The rest are lazily created in
1354 	 * arena_choose_hard().
1355 	 */
1356 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)__UNCONST(&extent_hooks_default))
1357 	    == NULL) {
1358 		return true;
1359 	}
1360 	a0 = arena_get(TSDN_NULL, 0, false);
1361 	malloc_init_state = malloc_init_a0_initialized;
1362 
1363 	return false;
1364 }
1365 
1366 static bool
1367 malloc_init_hard_a0(void) {
1368 	bool ret;
1369 
1370 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1371 	ret = malloc_init_hard_a0_locked();
1372 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1373 	return ret;
1374 }
1375 
1376 /* Initialize data structures which may trigger recursive allocation. */
1377 static bool
1378 malloc_init_hard_recursible(void) {
1379 	malloc_init_state = malloc_init_recursible;
1380 
1381 	ncpus = malloc_ncpus();
1382 
1383 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1384     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1385     !defined(__native_client__) && !defined(__NetBSD__))
1386 	/* LinuxThreads' pthread_atfork() allocates. */
1387 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1388 	    jemalloc_postfork_child) != 0) {
1389 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1390 		if (opt_abort) {
1391 			abort();
1392 		}
1393 		return true;
1394 	}
1395 #endif
1396 
1397 	if (background_thread_boot0()) {
1398 		return true;
1399 	}
1400 
1401 	return false;
1402 }
1403 
1404 static unsigned
1405 malloc_narenas_default(void) {
1406 	assert(ncpus > 0);
1407 	/*
1408 	 * For SMP systems, create more than one arena per CPU by
1409 	 * default.
1410 	 */
1411 	if (ncpus > 1) {
1412 		return ncpus << 2;
1413 	} else {
1414 		return 1;
1415 	}
1416 }
1417 
1418 static percpu_arena_mode_t
1419 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1420 	assert(!malloc_initialized());
1421 	assert(mode <= percpu_arena_disabled);
1422 
1423 	if (mode != percpu_arena_disabled) {
1424 		mode += percpu_arena_mode_enabled_base;
1425 	}
1426 
1427 	return mode;
1428 }
1429 
1430 static bool
1431 malloc_init_narenas(void) {
1432 	assert(ncpus > 0);
1433 
1434 	if (opt_percpu_arena != percpu_arena_disabled) {
1435 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1436 			opt_percpu_arena = percpu_arena_disabled;
1437 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1438 			    "available. Setting narenas to %u.\n", opt_narenas ?
1439 			    opt_narenas : malloc_narenas_default());
1440 			if (opt_abort) {
1441 				abort();
1442 			}
1443 		} else {
1444 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1445 				malloc_printf("<jemalloc>: narenas w/ percpu"
1446 				    "arena beyond limit (%d)\n", ncpus);
1447 				if (opt_abort) {
1448 					abort();
1449 				}
1450 				return true;
1451 			}
1452 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1453 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1454 			    per_phycpu_arena && ncpus % 2 != 0) {
1455 				malloc_printf("<jemalloc>: invalid "
1456 				    "configuration -- per physical CPU arena "
1457 				    "with odd number (%u) of CPUs (no hyper "
1458 				    "threading?).\n", ncpus);
1459 				if (opt_abort)
1460 					abort();
1461 			}
1462 			unsigned n = percpu_arena_ind_limit(
1463 			    percpu_arena_as_initialized(opt_percpu_arena));
1464 			if (opt_narenas < n) {
1465 				/*
1466 				 * If narenas is specified with percpu_arena
1467 				 * enabled, actual narenas is set as the greater
1468 				 * of the two. percpu_arena_choose will be free
1469 				 * to use any of the arenas based on CPU
1470 				 * id. This is conservative (at a small cost)
1471 				 * but ensures correctness.
1472 				 *
1473 				 * If for some reason the ncpus determined at
1474 				 * boot is not the actual number (e.g. because
1475 				 * of affinity setting from numactl), reserving
1476 				 * narenas this way provides a workaround for
1477 				 * percpu_arena.
1478 				 */
1479 				opt_narenas = n;
1480 			}
1481 		}
1482 	}
1483 	if (opt_narenas == 0) {
1484 		opt_narenas = malloc_narenas_default();
1485 	}
1486 	assert(opt_narenas > 0);
1487 
1488 	narenas_auto = opt_narenas;
1489 	/*
1490 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1491 	 */
1492 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1493 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1494 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1495 		    narenas_auto);
1496 	}
1497 	narenas_total_set(narenas_auto);
1498 
1499 	return false;
1500 }
1501 
1502 static void
1503 malloc_init_percpu(void) {
1504 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1505 }
1506 
1507 static bool
1508 malloc_init_hard_finish(void) {
1509 	if (malloc_mutex_boot()) {
1510 		return true;
1511 	}
1512 
1513 	malloc_init_state = malloc_init_initialized;
1514 	malloc_slow_flag_init();
1515 
1516 	return false;
1517 }
1518 
1519 static void
1520 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1521 	malloc_mutex_assert_owner(tsdn, &init_lock);
1522 	malloc_mutex_unlock(tsdn, &init_lock);
1523 	if (reentrancy_set) {
1524 		assert(!tsdn_null(tsdn));
1525 		tsd_t *tsd = tsdn_tsd(tsdn);
1526 		assert(tsd_reentrancy_level_get(tsd) > 0);
1527 		post_reentrancy(tsd);
1528 	}
1529 }
1530 
1531 static bool
1532 malloc_init_hard(void) {
1533 	tsd_t *tsd;
1534 
1535 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1536 	_init_init_lock();
1537 #endif
1538 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1539 
1540 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1541 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1542 	return ret;
1543 
1544 	if (!malloc_init_hard_needed()) {
1545 		UNLOCK_RETURN(TSDN_NULL, false, false)
1546 	}
1547 
1548 	if (malloc_init_state != malloc_init_a0_initialized &&
1549 	    malloc_init_hard_a0_locked()) {
1550 		UNLOCK_RETURN(TSDN_NULL, true, false)
1551 	}
1552 
1553 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1554 	/* Recursive allocation relies on functional tsd. */
1555 	tsd = malloc_tsd_boot0();
1556 	if (tsd == NULL) {
1557 		return true;
1558 	}
1559 	if (malloc_init_hard_recursible()) {
1560 		return true;
1561 	}
1562 
1563 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1564 	/* Set reentrancy level to 1 during init. */
1565 	pre_reentrancy(tsd, NULL);
1566 	/* Initialize narenas before prof_boot2 (for allocation). */
1567 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1568 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1569 	}
1570 	if (config_prof && prof_boot2(tsd)) {
1571 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1572 	}
1573 
1574 	malloc_init_percpu();
1575 
1576 	if (malloc_init_hard_finish()) {
1577 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1578 	}
1579 	post_reentrancy(tsd);
1580 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1581 
1582 	witness_assert_lockless(witness_tsd_tsdn(
1583 	    tsd_witness_tsdp_get_unsafe(tsd)));
1584 	malloc_tsd_boot1();
1585 	/* Update TSD after tsd_boot1. */
1586 	tsd = tsd_fetch();
1587 	if (opt_background_thread) {
1588 		assert(have_background_thread);
1589 		/*
1590 		 * Need to finish init & unlock first before creating background
1591 		 * threads (pthread_create depends on malloc).  ctl_init (which
1592 		 * sets isthreaded) needs to be called without holding any lock.
1593 		 */
1594 		background_thread_ctl_init(tsd_tsdn(tsd));
1595 
1596 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1597 		bool err = background_thread_create(tsd, 0);
1598 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1599 		if (err) {
1600 			return true;
1601 		}
1602 	}
1603 #undef UNLOCK_RETURN
1604 	return false;
1605 }
1606 
1607 /*
1608  * End initialization functions.
1609  */
1610 /******************************************************************************/
1611 /*
1612  * Begin allocation-path internal functions and data structures.
1613  */
1614 
1615 /*
1616  * Settings determined by the documented behavior of the allocation functions.
1617  */
1618 typedef struct static_opts_s static_opts_t;
1619 struct static_opts_s {
1620 	/* Whether or not allocation size may overflow. */
1621 	bool may_overflow;
1622 	/* Whether or not allocations of size 0 should be treated as size 1. */
1623 	bool bump_empty_alloc;
1624 	/*
1625 	 * Whether to assert that allocations are not of size 0 (after any
1626 	 * bumping).
1627 	 */
1628 	bool assert_nonempty_alloc;
1629 
1630 	/*
1631 	 * Whether or not to modify the 'result' argument to malloc in case of
1632 	 * error.
1633 	 */
1634 	bool null_out_result_on_error;
1635 	/* Whether to set errno when we encounter an error condition. */
1636 	bool set_errno_on_error;
1637 
1638 	/*
1639 	 * The minimum valid alignment for functions requesting aligned storage.
1640 	 */
1641 	size_t min_alignment;
1642 
1643 	/* The error string to use if we oom. */
1644 	const char *oom_string;
1645 	/* The error string to use if the passed-in alignment is invalid. */
1646 	const char *invalid_alignment_string;
1647 
1648 	/*
1649 	 * False if we're configured to skip some time-consuming operations.
1650 	 *
1651 	 * This isn't really a malloc "behavior", but it acts as a useful
1652 	 * summary of several other static (or at least, static after program
1653 	 * initialization) options.
1654 	 */
1655 	bool slow;
1656 };
1657 
1658 JEMALLOC_ALWAYS_INLINE void
1659 static_opts_init(static_opts_t *static_opts) {
1660 	static_opts->may_overflow = false;
1661 	static_opts->bump_empty_alloc = false;
1662 	static_opts->assert_nonempty_alloc = false;
1663 	static_opts->null_out_result_on_error = false;
1664 	static_opts->set_errno_on_error = false;
1665 	static_opts->min_alignment = 0;
1666 	static_opts->oom_string = "";
1667 	static_opts->invalid_alignment_string = "";
1668 	static_opts->slow = false;
1669 }
1670 
1671 /*
1672  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1673  * should have one constant here per magic value there.  Note however that the
1674  * representations need not be related.
1675  */
1676 #define TCACHE_IND_NONE ((unsigned)-1)
1677 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1678 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1679 
1680 typedef struct dynamic_opts_s dynamic_opts_t;
1681 struct dynamic_opts_s {
1682 	void **result;
1683 	size_t num_items;
1684 	size_t item_size;
1685 	size_t alignment;
1686 	bool zero;
1687 	unsigned tcache_ind;
1688 	unsigned arena_ind;
1689 };
1690 
1691 JEMALLOC_ALWAYS_INLINE void
1692 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1693 	dynamic_opts->result = NULL;
1694 	dynamic_opts->num_items = 0;
1695 	dynamic_opts->item_size = 0;
1696 	dynamic_opts->alignment = 0;
1697 	dynamic_opts->zero = false;
1698 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1699 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1700 }
1701 
1702 /* ind is ignored if dopts->alignment > 0. */
1703 JEMALLOC_ALWAYS_INLINE void *
1704 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1705     size_t size, size_t usize, szind_t ind) {
1706 	tcache_t *tcache;
1707 	arena_t *arena;
1708 
1709 	/* Fill in the tcache. */
1710 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1711 		if (likely(!sopts->slow)) {
1712 			/* Getting tcache ptr unconditionally. */
1713 			tcache = tsd_tcachep_get(tsd);
1714 			assert(tcache == tcache_get(tsd));
1715 		} else {
1716 			tcache = tcache_get(tsd);
1717 		}
1718 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1719 		tcache = NULL;
1720 	} else {
1721 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1722 	}
1723 
1724 	/* Fill in the arena. */
1725 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1726 		/*
1727 		 * In case of automatic arena management, we defer arena
1728 		 * computation until as late as we can, hoping to fill the
1729 		 * allocation out of the tcache.
1730 		 */
1731 		arena = NULL;
1732 	} else {
1733 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1734 	}
1735 
1736 	if (unlikely(dopts->alignment != 0)) {
1737 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1738 		    dopts->zero, tcache, arena);
1739 	}
1740 
1741 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1742 	    arena, sopts->slow);
1743 }
1744 
1745 JEMALLOC_ALWAYS_INLINE void *
1746 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1747     size_t usize, szind_t ind) {
1748 	void *ret;
1749 
1750 	/*
1751 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1752 	 * from the ind_large bucket.
1753 	 */
1754 	szind_t ind_large;
1755 	size_t bumped_usize = usize;
1756 
1757 	if (usize <= SMALL_MAXCLASS) {
1758 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1759 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1760 		    == LARGE_MINCLASS);
1761 		ind_large = sz_size2index(LARGE_MINCLASS);
1762 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1763 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1764 		    bumped_usize, ind_large);
1765 		if (unlikely(ret == NULL)) {
1766 			return NULL;
1767 		}
1768 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1769 	} else {
1770 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1771 	}
1772 
1773 	return ret;
1774 }
1775 
1776 /*
1777  * Returns true if the allocation will overflow, and false otherwise.  Sets
1778  * *size to the product either way.
1779  */
1780 JEMALLOC_ALWAYS_INLINE bool
1781 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1782     size_t *size) {
1783 	/*
1784 	 * This function is just num_items * item_size, except that we may have
1785 	 * to check for overflow.
1786 	 */
1787 
1788 	if (!may_overflow) {
1789 		assert(dopts->num_items == 1);
1790 		*size = dopts->item_size;
1791 		return false;
1792 	}
1793 
1794 	/* A size_t with its high-half bits all set to 1. */
1795 	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1796 
1797 	*size = dopts->item_size * dopts->num_items;
1798 
1799 	if (unlikely(*size == 0)) {
1800 		return (dopts->num_items != 0 && dopts->item_size != 0);
1801 	}
1802 
1803 	/*
1804 	 * We got a non-zero size, but we don't know if we overflowed to get
1805 	 * there.  To avoid having to do a divide, we'll be clever and note that
1806 	 * if both A and B can be represented in N/2 bits, then their product
1807 	 * can be represented in N bits (without the possibility of overflow).
1808 	 */
1809 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1810 		return false;
1811 	}
1812 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1813 		return false;
1814 	}
1815 	return true;
1816 }
1817 
1818 JEMALLOC_ALWAYS_INLINE int
1819 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1820 	/* Where the actual allocated memory will live. */
1821 	void *allocation = NULL;
1822 	/* Filled in by compute_size_with_overflow below. */
1823 	size_t size = 0;
1824 	/*
1825 	 * For unaligned allocations, we need only ind.  For aligned
1826 	 * allocations, or in case of stats or profiling we need usize.
1827 	 *
1828 	 * These are actually dead stores, in that their values are reset before
1829 	 * any branch on their value is taken.  Sometimes though, it's
1830 	 * convenient to pass them as arguments before this point.  To avoid
1831 	 * undefined behavior then, we initialize them with dummy stores.
1832 	 */
1833 	szind_t ind = 0;
1834 	size_t usize = 0;
1835 
1836 	/* Reentrancy is only checked on slow path. */
1837 	int8_t reentrancy_level;
1838 
1839 	/* Compute the amount of memory the user wants. */
1840 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1841 	    &size))) {
1842 		goto label_oom;
1843 	}
1844 
1845 	/* Validate the user input. */
1846 	if (sopts->bump_empty_alloc) {
1847 		if (unlikely(size == 0)) {
1848 			size = 1;
1849 		}
1850 	}
1851 
1852 	if (sopts->assert_nonempty_alloc) {
1853 		assert (size != 0);
1854 	}
1855 
1856 	if (unlikely(dopts->alignment < sopts->min_alignment
1857 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1858 		goto label_invalid_alignment;
1859 	}
1860 
1861 	/* This is the beginning of the "core" algorithm. */
1862 
1863 	if (dopts->alignment == 0) {
1864 		ind = sz_size2index(size);
1865 		if (unlikely(ind >= NSIZES)) {
1866 			goto label_oom;
1867 		}
1868 		if (config_stats || (config_prof && opt_prof)) {
1869 			usize = sz_index2size(ind);
1870 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1871 		}
1872 	} else {
1873 		usize = sz_sa2u(size, dopts->alignment);
1874 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1875 			goto label_oom;
1876 		}
1877 	}
1878 
1879 	check_entry_exit_locking(tsd_tsdn(tsd));
1880 
1881 	/*
1882 	 * If we need to handle reentrancy, we can do it out of a
1883 	 * known-initialized arena (i.e. arena 0).
1884 	 */
1885 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1886 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1887 		/*
1888 		 * We should never specify particular arenas or tcaches from
1889 		 * within our internal allocations.
1890 		 */
1891 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1892 		    dopts->tcache_ind == TCACHE_IND_NONE);
1893 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1894 		dopts->tcache_ind = TCACHE_IND_NONE;
1895 		/* We know that arena 0 has already been initialized. */
1896 		dopts->arena_ind = 0;
1897 	}
1898 
1899 	/* If profiling is on, get our profiling context. */
1900 	if (config_prof && opt_prof) {
1901 		/*
1902 		 * Note that if we're going down this path, usize must have been
1903 		 * initialized in the previous if statement.
1904 		 */
1905 		prof_tctx_t *tctx = prof_alloc_prep(
1906 		    tsd, usize, prof_active_get_unlocked(), true);
1907 
1908 		alloc_ctx_t alloc_ctx;
1909 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1910 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1911 			allocation = imalloc_no_sample(
1912 			    sopts, dopts, tsd, usize, usize, ind);
1913 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1914 			/*
1915 			 * Note that ind might still be 0 here.  This is fine;
1916 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1917 			 */
1918 			allocation = imalloc_sample(
1919 			    sopts, dopts, tsd, usize, ind);
1920 			alloc_ctx.slab = false;
1921 		} else {
1922 			allocation = NULL;
1923 		}
1924 
1925 		if (unlikely(allocation == NULL)) {
1926 			prof_alloc_rollback(tsd, tctx, true);
1927 			goto label_oom;
1928 		}
1929 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1930 	} else {
1931 		/*
1932 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1933 		 * computed in the previous if statement.  Down the positive
1934 		 * alignment path, imalloc_no_sample ignores ind and size
1935 		 * (relying only on usize).
1936 		 */
1937 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1938 		    ind);
1939 		if (unlikely(allocation == NULL)) {
1940 			goto label_oom;
1941 		}
1942 	}
1943 
1944 	/*
1945 	 * Allocation has been done at this point.  We still have some
1946 	 * post-allocation work to do though.
1947 	 */
1948 	assert(dopts->alignment == 0
1949 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1950 
1951 	if (config_stats) {
1952 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1953 		*tsd_thread_allocatedp_get(tsd) += usize;
1954 	}
1955 
1956 	if (sopts->slow) {
1957 		UTRACE(0, size, allocation);
1958 	}
1959 
1960 	/* Success! */
1961 	check_entry_exit_locking(tsd_tsdn(tsd));
1962 	*dopts->result = allocation;
1963 	return 0;
1964 
1965 label_oom:
1966 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1967 		malloc_write(sopts->oom_string);
1968 		abort();
1969 	}
1970 
1971 	if (sopts->slow) {
1972 		UTRACE(NULL, size, NULL);
1973 	}
1974 
1975 	check_entry_exit_locking(tsd_tsdn(tsd));
1976 
1977 	if (sopts->set_errno_on_error) {
1978 		set_errno(ENOMEM);
1979 	}
1980 
1981 	if (sopts->null_out_result_on_error) {
1982 		*dopts->result = NULL;
1983 	}
1984 
1985 	return ENOMEM;
1986 
1987 	/*
1988 	 * This label is only jumped to by one goto; we move it out of line
1989 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1990 	 * the oom case.
1991 	 */
1992 label_invalid_alignment:
1993 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1994 		malloc_write(sopts->invalid_alignment_string);
1995 		abort();
1996 	}
1997 
1998 	if (sopts->set_errno_on_error) {
1999 		set_errno(EINVAL);
2000 	}
2001 
2002 	if (sopts->slow) {
2003 		UTRACE(NULL, size, NULL);
2004 	}
2005 
2006 	check_entry_exit_locking(tsd_tsdn(tsd));
2007 
2008 	if (sopts->null_out_result_on_error) {
2009 		*dopts->result = NULL;
2010 	}
2011 
2012 	return EINVAL;
2013 }
2014 
2015 /* Returns the errno-style error code of the allocation. */
2016 JEMALLOC_ALWAYS_INLINE int
2017 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2018 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2019 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2020 			malloc_write(sopts->oom_string);
2021 			abort();
2022 		}
2023 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2024 		set_errno(ENOMEM);
2025 		*dopts->result = NULL;
2026 
2027 		return ENOMEM;
2028 	}
2029 
2030 	/* We always need the tsd.  Let's grab it right away. */
2031 	tsd_t *tsd = tsd_fetch();
2032 	assert(tsd);
2033 	if (likely(tsd_fast(tsd))) {
2034 		/* Fast and common path. */
2035 		tsd_assert_fast(tsd);
2036 		sopts->slow = false;
2037 		return imalloc_body(sopts, dopts, tsd);
2038 	} else {
2039 		sopts->slow = true;
2040 		return imalloc_body(sopts, dopts, tsd);
2041 	}
2042 }
2043 /******************************************************************************/
2044 /*
2045  * Begin malloc(3)-compatible functions.
2046  */
2047 
2048 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2049 void JEMALLOC_NOTHROW *
2050 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2051 je_malloc(size_t size) {
2052 	void *ret;
2053 	static_opts_t sopts;
2054 	dynamic_opts_t dopts;
2055 
2056 	LOG("core.malloc.entry", "size: %zu", size);
2057 
2058 	static_opts_init(&sopts);
2059 	dynamic_opts_init(&dopts);
2060 
2061 	sopts.bump_empty_alloc = true;
2062 	sopts.null_out_result_on_error = true;
2063 	sopts.set_errno_on_error = true;
2064 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2065 
2066 	dopts.result = &ret;
2067 	dopts.num_items = 1;
2068 	dopts.item_size = size;
2069 
2070 	imalloc(&sopts, &dopts);
2071 
2072 	LOG("core.malloc.exit", "result: %p", ret);
2073 
2074 	return ret;
2075 }
2076 
2077 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2078 JEMALLOC_ATTR(nonnull(1))
2079 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2080 	int ret;
2081 	static_opts_t sopts;
2082 	dynamic_opts_t dopts;
2083 
2084 	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2085 	    "size: %zu", memptr, alignment, size);
2086 
2087 	static_opts_init(&sopts);
2088 	dynamic_opts_init(&dopts);
2089 
2090 	sopts.bump_empty_alloc = true;
2091 	sopts.min_alignment = sizeof(void *);
2092 	sopts.oom_string =
2093 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2094 	sopts.invalid_alignment_string =
2095 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2096 
2097 	dopts.result = memptr;
2098 	dopts.num_items = 1;
2099 	dopts.item_size = size;
2100 	dopts.alignment = alignment;
2101 
2102 	ret = imalloc(&sopts, &dopts);
2103 
2104 	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2105 	    *memptr);
2106 
2107 	return ret;
2108 }
2109 
2110 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2111 void JEMALLOC_NOTHROW *
2112 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2113 je_aligned_alloc(size_t alignment, size_t size) {
2114 	void *ret;
2115 
2116 	static_opts_t sopts;
2117 	dynamic_opts_t dopts;
2118 
2119 	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2120 	    alignment, size);
2121 
2122 	static_opts_init(&sopts);
2123 	dynamic_opts_init(&dopts);
2124 
2125 	sopts.bump_empty_alloc = true;
2126 	sopts.null_out_result_on_error = true;
2127 	sopts.set_errno_on_error = true;
2128 	sopts.min_alignment = 1;
2129 	sopts.oom_string =
2130 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2131 	sopts.invalid_alignment_string =
2132 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2133 
2134 	dopts.result = &ret;
2135 	dopts.num_items = 1;
2136 	dopts.item_size = size;
2137 	dopts.alignment = alignment;
2138 
2139 	imalloc(&sopts, &dopts);
2140 
2141 	LOG("core.aligned_alloc.exit", "result: %p", ret);
2142 
2143 	return ret;
2144 }
2145 
2146 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2147 void JEMALLOC_NOTHROW *
2148 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2149 je_calloc(size_t num, size_t size) {
2150 	void *ret;
2151 	static_opts_t sopts;
2152 	dynamic_opts_t dopts;
2153 
2154 	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2155 
2156 	static_opts_init(&sopts);
2157 	dynamic_opts_init(&dopts);
2158 
2159 	sopts.may_overflow = true;
2160 	sopts.bump_empty_alloc = true;
2161 	sopts.null_out_result_on_error = true;
2162 	sopts.set_errno_on_error = true;
2163 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2164 
2165 	dopts.result = &ret;
2166 	dopts.num_items = num;
2167 	dopts.item_size = size;
2168 	dopts.zero = true;
2169 
2170 	imalloc(&sopts, &dopts);
2171 
2172 	LOG("core.calloc.exit", "result: %p", ret);
2173 
2174 	return ret;
2175 }
2176 
2177 static void *
2178 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2179     prof_tctx_t *tctx) {
2180 	void *p;
2181 
2182 	if (tctx == NULL) {
2183 		return NULL;
2184 	}
2185 	if (usize <= SMALL_MAXCLASS) {
2186 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2187 		if (p == NULL) {
2188 			return NULL;
2189 		}
2190 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2191 	} else {
2192 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2193 	}
2194 
2195 	return p;
2196 }
2197 
2198 JEMALLOC_ALWAYS_INLINE void *
2199 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2200    alloc_ctx_t *alloc_ctx) {
2201 	void *p;
2202 	bool prof_activex;
2203 	prof_tctx_t *old_tctx, *tctx;
2204 
2205 	prof_activex = prof_active_get_unlocked();
2206 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2207 	tctx = prof_alloc_prep(tsd, usize, prof_activex, true);
2208 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2209 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2210 	} else {
2211 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2212 	}
2213 	if (unlikely(p == NULL)) {
2214 		prof_alloc_rollback(tsd, tctx, true);
2215 		return NULL;
2216 	}
2217 	prof_realloc(tsd, p, usize, tctx, prof_activex, true, old_ptr,
2218 	    old_usize, old_tctx);
2219 
2220 	return p;
2221 }
2222 
2223 JEMALLOC_ALWAYS_INLINE void
2224 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2225 	if (!slow_path) {
2226 		tsd_assert_fast(tsd);
2227 	}
2228 	check_entry_exit_locking(tsd_tsdn(tsd));
2229 	if (tsd_reentrancy_level_get(tsd) != 0) {
2230 		assert(slow_path);
2231 	}
2232 
2233 	assert(ptr != NULL);
2234 	assert(malloc_initialized() || IS_INITIALIZER);
2235 
2236 	alloc_ctx_t alloc_ctx;
2237 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2238 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2239 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2240 	assert(alloc_ctx.szind != NSIZES);
2241 
2242 	size_t usize;
2243 	if (config_prof && opt_prof) {
2244 		usize = sz_index2size(alloc_ctx.szind);
2245 		prof_free(tsd, ptr, usize, &alloc_ctx);
2246 	} else if (config_stats) {
2247 		usize = sz_index2size(alloc_ctx.szind);
2248 	}
2249 	if (config_stats) {
2250 		*tsd_thread_deallocatedp_get(tsd) += usize;
2251 	}
2252 
2253 	if (likely(!slow_path)) {
2254 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2255 		    false);
2256 	} else {
2257 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2258 		    true);
2259 	}
2260 }
2261 
2262 JEMALLOC_ALWAYS_INLINE void
2263 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2264 	if (!slow_path) {
2265 		tsd_assert_fast(tsd);
2266 	}
2267 	check_entry_exit_locking(tsd_tsdn(tsd));
2268 	if (tsd_reentrancy_level_get(tsd) != 0) {
2269 		assert(slow_path);
2270 	}
2271 
2272 	assert(ptr != NULL);
2273 	assert(malloc_initialized() || IS_INITIALIZER);
2274 
2275 	alloc_ctx_t alloc_ctx, *ctx;
2276 	if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2277 		/*
2278 		 * When cache_oblivious is disabled and ptr is not page aligned,
2279 		 * the allocation was not sampled -- usize can be used to
2280 		 * determine szind directly.
2281 		 */
2282 		alloc_ctx.szind = sz_size2index(usize);
2283 		alloc_ctx.slab = true;
2284 		ctx = &alloc_ctx;
2285 		if (config_debug) {
2286 			alloc_ctx_t dbg_ctx;
2287 			rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2288 			rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2289 			    rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2290 			    &dbg_ctx.slab);
2291 			assert(dbg_ctx.szind == alloc_ctx.szind);
2292 			assert(dbg_ctx.slab == alloc_ctx.slab);
2293 		}
2294 	} else if (config_prof && opt_prof) {
2295 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2296 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2297 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2298 		assert(alloc_ctx.szind == sz_size2index(usize));
2299 		ctx = &alloc_ctx;
2300 	} else {
2301 		ctx = NULL;
2302 	}
2303 
2304 	if (config_prof && opt_prof) {
2305 		prof_free(tsd, ptr, usize, ctx);
2306 	}
2307 	if (config_stats) {
2308 		*tsd_thread_deallocatedp_get(tsd) += usize;
2309 	}
2310 
2311 	if (likely(!slow_path)) {
2312 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2313 	} else {
2314 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2315 	}
2316 }
2317 
2318 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2319 void JEMALLOC_NOTHROW *
2320 JEMALLOC_ALLOC_SIZE(2)
2321 je_realloc(void *ptr, size_t size) {
2322 	void *ret;
2323 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2324 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2325 	size_t old_usize = 0;
2326 
2327 	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2328 
2329 	if (unlikely(size == 0)) {
2330 #if 0
2331 	// http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_400
2332 		if (ptr != NULL) {
2333 			/* realloc(ptr, 0) is equivalent to free(ptr). */
2334 			UTRACE(ptr, 0, 0);
2335 			tcache_t *tcache;
2336 			tsd_t *tsd = tsd_fetch();
2337 			if (tsd_reentrancy_level_get(tsd) == 0) {
2338 				tcache = tcache_get(tsd);
2339 			} else {
2340 				tcache = NULL;
2341 			}
2342 			ifree(tsd, ptr, tcache, true);
2343 
2344 			LOG("core.realloc.exit", "result: %p", NULL);
2345 			return NULL;
2346 		}
2347 #endif
2348 		size = 1;
2349 	}
2350 
2351 	if (likely(ptr != NULL)) {
2352 		assert(malloc_initialized() || IS_INITIALIZER);
2353 		tsd_t *tsd = tsd_fetch();
2354 
2355 		check_entry_exit_locking(tsd_tsdn(tsd));
2356 
2357 		alloc_ctx_t alloc_ctx;
2358 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2359 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2360 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2361 		assert(alloc_ctx.szind != NSIZES);
2362 		old_usize = sz_index2size(alloc_ctx.szind);
2363 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2364 		if (config_prof && opt_prof) {
2365 			usize = sz_s2u(size);
2366 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2367 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2368 			    &alloc_ctx);
2369 		} else {
2370 			if (config_stats) {
2371 				usize = sz_s2u(size);
2372 			}
2373 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2374 		}
2375 		tsdn = tsd_tsdn(tsd);
2376 	} else {
2377 		/* realloc(NULL, size) is equivalent to malloc(size). */
2378 		void *ret1 = je_malloc(size);
2379 		LOG("core.realloc.exit", "result: %p", ret1);
2380 		return ret1;
2381 	}
2382 
2383 	if (unlikely(ret == NULL)) {
2384 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2385 			malloc_write("<jemalloc>: Error in realloc(): "
2386 			    "out of memory\n");
2387 			abort();
2388 		}
2389 		set_errno(ENOMEM);
2390 	}
2391 	if (config_stats && likely(ret != NULL)) {
2392 		tsd_t *tsd;
2393 
2394 		assert(usize == isalloc(tsdn, ret));
2395 		tsd = tsdn_tsd(tsdn);
2396 		*tsd_thread_allocatedp_get(tsd) += usize;
2397 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2398 	}
2399 	UTRACE(ptr, size, ret);
2400 	check_entry_exit_locking(tsdn);
2401 
2402 	LOG("core.realloc.exit", "result: %p", ret);
2403 	return ret;
2404 }
2405 
2406 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2407 je_free(void *ptr) {
2408 	LOG("core.free.entry", "ptr: %p", ptr);
2409 
2410 	UTRACE(ptr, 0, 0);
2411 	if (likely(ptr != NULL)) {
2412 		/*
2413 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2414 		 * based on only free() calls -- other activities trigger the
2415 		 * minimal to full transition.  This is because free() may
2416 		 * happen during thread shutdown after tls deallocation: if a
2417 		 * thread never had any malloc activities until then, a
2418 		 * fully-setup tsd won't be destructed properly.
2419 		 */
2420 		tsd_t *tsd = tsd_fetch_min();
2421 		check_entry_exit_locking(tsd_tsdn(tsd));
2422 
2423 		tcache_t *tcache;
2424 		if (likely(tsd_fast(tsd))) {
2425 			tsd_assert_fast(tsd);
2426 			/* Unconditionally get tcache ptr on fast path. */
2427 			tcache = tsd_tcachep_get(tsd);
2428 			ifree(tsd, ptr, tcache, false);
2429 		} else {
2430 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2431 				tcache = tcache_get(tsd);
2432 			} else {
2433 				tcache = NULL;
2434 			}
2435 			ifree(tsd, ptr, tcache, true);
2436 		}
2437 		check_entry_exit_locking(tsd_tsdn(tsd));
2438 	}
2439 	LOG("core.free.exit", "");
2440 }
2441 
2442 /*
2443  * End malloc(3)-compatible functions.
2444  */
2445 /******************************************************************************/
2446 /*
2447  * Begin non-standard override functions.
2448  */
2449 
2450 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2451 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2452 void JEMALLOC_NOTHROW *
2453 JEMALLOC_ATTR(malloc)
2454 je_memalign(size_t alignment, size_t size) {
2455 	void *ret;
2456 	static_opts_t sopts;
2457 	dynamic_opts_t dopts;
2458 
2459 	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2460 	    size);
2461 
2462 	static_opts_init(&sopts);
2463 	dynamic_opts_init(&dopts);
2464 
2465 	sopts.bump_empty_alloc = true;
2466 	sopts.min_alignment = 1;
2467 	sopts.oom_string =
2468 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2469 	sopts.invalid_alignment_string =
2470 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2471 	sopts.null_out_result_on_error = true;
2472 
2473 	dopts.result = &ret;
2474 	dopts.num_items = 1;
2475 	dopts.item_size = size;
2476 	dopts.alignment = alignment;
2477 
2478 	imalloc(&sopts, &dopts);
2479 
2480 	LOG("core.memalign.exit", "result: %p", ret);
2481 	return ret;
2482 }
2483 #endif
2484 
2485 #ifdef JEMALLOC_OVERRIDE_VALLOC
2486 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2487 void JEMALLOC_NOTHROW *
2488 JEMALLOC_ATTR(malloc)
2489 je_valloc(size_t size) {
2490 	void *ret;
2491 
2492 	static_opts_t sopts;
2493 	dynamic_opts_t dopts;
2494 
2495 	LOG("core.valloc.entry", "size: %zu\n", size);
2496 
2497 	static_opts_init(&sopts);
2498 	dynamic_opts_init(&dopts);
2499 
2500 	sopts.bump_empty_alloc = true;
2501 	sopts.null_out_result_on_error = true;
2502 	sopts.min_alignment = PAGE;
2503 	sopts.oom_string =
2504 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2505 	sopts.invalid_alignment_string =
2506 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2507 
2508 	dopts.result = &ret;
2509 	dopts.num_items = 1;
2510 	dopts.item_size = size;
2511 	dopts.alignment = PAGE;
2512 
2513 	imalloc(&sopts, &dopts);
2514 
2515 	LOG("core.valloc.exit", "result: %p\n", ret);
2516 	return ret;
2517 }
2518 #endif
2519 
2520 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2521 /*
2522  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2523  * to inconsistently reference libc's malloc(3)-compatible functions
2524  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2525  *
2526  * These definitions interpose hooks in glibc.  The functions are actually
2527  * passed an extra argument for the caller return address, which will be
2528  * ignored.
2529  */
2530 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2531 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2532 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2533 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2534 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2535     je_memalign;
2536 #  endif
2537 
2538 #  ifdef CPU_COUNT
2539 /*
2540  * To enable static linking with glibc, the libc specific malloc interface must
2541  * be implemented also, so none of glibc's malloc.o functions are added to the
2542  * link.
2543  */
2544 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2545 /* To force macro expansion of je_ prefix before stringification. */
2546 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2547 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2548 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2549 #    endif
2550 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2551 void __libc_free(void* ptr) PREALIAS(je_free);
2552 #    endif
2553 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2554 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2555 #    endif
2556 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2557 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2558 #    endif
2559 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2560 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2561 #    endif
2562 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2563 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2564 #    endif
2565 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2566 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2567 #    endif
2568 #    undef PREALIAS
2569 #    undef ALIAS
2570 #  endif
2571 #endif
2572 
2573 /*
2574  * End non-standard override functions.
2575  */
2576 /******************************************************************************/
2577 /*
2578  * Begin non-standard functions.
2579  */
2580 
2581 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2582 void JEMALLOC_NOTHROW *
2583 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2584 je_mallocx(size_t size, int flags) {
2585 	void *ret;
2586 	static_opts_t sopts;
2587 	dynamic_opts_t dopts;
2588 
2589 	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2590 
2591 	static_opts_init(&sopts);
2592 	dynamic_opts_init(&dopts);
2593 
2594 	sopts.assert_nonempty_alloc = true;
2595 	sopts.null_out_result_on_error = true;
2596 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2597 
2598 	dopts.result = &ret;
2599 	dopts.num_items = 1;
2600 	dopts.item_size = size;
2601 	if (unlikely(flags != 0)) {
2602 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2603 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2604 		}
2605 
2606 		dopts.zero = MALLOCX_ZERO_GET(flags);
2607 
2608 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2609 			if ((flags & MALLOCX_TCACHE_MASK)
2610 			    == MALLOCX_TCACHE_NONE) {
2611 				dopts.tcache_ind = TCACHE_IND_NONE;
2612 			} else {
2613 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2614 			}
2615 		} else {
2616 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2617 		}
2618 
2619 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2620 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2621 	}
2622 
2623 	imalloc(&sopts, &dopts);
2624 
2625 	LOG("core.mallocx.exit", "result: %p", ret);
2626 	return ret;
2627 }
2628 
2629 static void *
2630 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2631     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2632     prof_tctx_t *tctx) {
2633 	void *p;
2634 
2635 	if (tctx == NULL) {
2636 		return NULL;
2637 	}
2638 	if (usize <= SMALL_MAXCLASS) {
2639 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2640 		    alignment, zero, tcache, arena);
2641 		if (p == NULL) {
2642 			return NULL;
2643 		}
2644 		arena_prof_promote(tsdn, p, usize);
2645 	} else {
2646 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2647 		    tcache, arena);
2648 	}
2649 
2650 	return p;
2651 }
2652 
2653 JEMALLOC_ALWAYS_INLINE void *
2654 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2655     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2656     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2657 	void *p;
2658 	bool prof_activex;
2659 	prof_tctx_t *old_tctx, *tctx;
2660 
2661 	prof_activex = prof_active_get_unlocked();
2662 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2663 	tctx = prof_alloc_prep(tsd, *usize, prof_activex, false);
2664 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2665 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2666 		    *usize, alignment, zero, tcache, arena, tctx);
2667 	} else {
2668 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2669 		    zero, tcache, arena);
2670 	}
2671 	if (unlikely(p == NULL)) {
2672 		prof_alloc_rollback(tsd, tctx, false);
2673 		return NULL;
2674 	}
2675 
2676 	if (p == old_ptr && alignment != 0) {
2677 		/*
2678 		 * The allocation did not move, so it is possible that the size
2679 		 * class is smaller than would guarantee the requested
2680 		 * alignment, and that the alignment constraint was
2681 		 * serendipitously satisfied.  Additionally, old_usize may not
2682 		 * be the same as the current usize because of in-place large
2683 		 * reallocation.  Therefore, query the actual value of usize.
2684 		 */
2685 		*usize = isalloc(tsd_tsdn(tsd), p);
2686 	}
2687 	prof_realloc(tsd, p, *usize, tctx, prof_activex, false, old_ptr,
2688 	    old_usize, old_tctx);
2689 
2690 	return p;
2691 }
2692 
2693 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2694 void JEMALLOC_NOTHROW *
2695 JEMALLOC_ALLOC_SIZE(2)
2696 je_rallocx(void *ptr, size_t size, int flags) {
2697 	void *p;
2698 	tsd_t *tsd;
2699 	size_t usize;
2700 	size_t old_usize;
2701 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2702 	bool zero = flags & MALLOCX_ZERO;
2703 	arena_t *arena;
2704 	tcache_t *tcache;
2705 
2706 	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2707 	    size, flags);
2708 
2709 
2710 	assert(ptr != NULL);
2711 	assert(size != 0);
2712 	assert(malloc_initialized() || IS_INITIALIZER);
2713 	tsd = tsd_fetch();
2714 	check_entry_exit_locking(tsd_tsdn(tsd));
2715 
2716 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2717 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2718 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2719 		if (unlikely(arena == NULL)) {
2720 			goto label_oom;
2721 		}
2722 	} else {
2723 		arena = NULL;
2724 	}
2725 
2726 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2727 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2728 			tcache = NULL;
2729 		} else {
2730 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2731 		}
2732 	} else {
2733 		tcache = tcache_get(tsd);
2734 	}
2735 
2736 	alloc_ctx_t alloc_ctx;
2737 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2738 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2739 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2740 	assert(alloc_ctx.szind != NSIZES);
2741 	old_usize = sz_index2size(alloc_ctx.szind);
2742 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2743 	if (config_prof && opt_prof) {
2744 		usize = (alignment == 0) ?
2745 		    sz_s2u(size) : sz_sa2u(size, alignment);
2746 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2747 			goto label_oom;
2748 		}
2749 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2750 		    zero, tcache, arena, &alloc_ctx);
2751 		if (unlikely(p == NULL)) {
2752 			goto label_oom;
2753 		}
2754 	} else {
2755 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2756 		    zero, tcache, arena);
2757 		if (unlikely(p == NULL)) {
2758 			goto label_oom;
2759 		}
2760 		if (config_stats) {
2761 			usize = isalloc(tsd_tsdn(tsd), p);
2762 		}
2763 	}
2764 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2765 
2766 	if (config_stats) {
2767 		*tsd_thread_allocatedp_get(tsd) += usize;
2768 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2769 	}
2770 	UTRACE(ptr, size, p);
2771 	check_entry_exit_locking(tsd_tsdn(tsd));
2772 
2773 	LOG("core.rallocx.exit", "result: %p", p);
2774 	return p;
2775 label_oom:
2776 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2777 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2778 		abort();
2779 	}
2780 	UTRACE(ptr, size, 0);
2781 	check_entry_exit_locking(tsd_tsdn(tsd));
2782 
2783 	LOG("core.rallocx.exit", "result: %p", NULL);
2784 	return NULL;
2785 }
2786 
2787 JEMALLOC_ALWAYS_INLINE size_t
2788 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2789     size_t extra, size_t alignment, bool zero) {
2790 	size_t usize;
2791 
2792 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2793 		return old_usize;
2794 	}
2795 	usize = isalloc(tsdn, ptr);
2796 
2797 	return usize;
2798 }
2799 
2800 static size_t
2801 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2802     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2803 	size_t usize;
2804 
2805 	if (tctx == NULL) {
2806 		return old_usize;
2807 	}
2808 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2809 	    zero);
2810 
2811 	return usize;
2812 }
2813 
2814 JEMALLOC_ALWAYS_INLINE size_t
2815 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2816     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2817 	size_t usize_max, usize;
2818 	bool prof_activex;
2819 	prof_tctx_t *old_tctx, *tctx;
2820 
2821 	prof_activex = prof_active_get_unlocked();
2822 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2823 	/*
2824 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2825 	 * Therefore, compute its maximum possible value and use that in
2826 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2827 	 * prof_realloc() will use the actual usize to decide whether to sample.
2828 	 */
2829 	if (alignment == 0) {
2830 		usize_max = sz_s2u(size+extra);
2831 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2832 	} else {
2833 		usize_max = sz_sa2u(size+extra, alignment);
2834 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2835 			/*
2836 			 * usize_max is out of range, and chances are that
2837 			 * allocation will fail, but use the maximum possible
2838 			 * value and carry on with prof_alloc_prep(), just in
2839 			 * case allocation succeeds.
2840 			 */
2841 			usize_max = LARGE_MAXCLASS;
2842 		}
2843 	}
2844 	tctx = prof_alloc_prep(tsd, usize_max, prof_activex, false);
2845 
2846 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2847 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2848 		    size, extra, alignment, zero, tctx);
2849 	} else {
2850 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2851 		    extra, alignment, zero);
2852 	}
2853 	if (usize == old_usize) {
2854 		prof_alloc_rollback(tsd, tctx, false);
2855 		return usize;
2856 	}
2857 	prof_realloc(tsd, ptr, usize, tctx, prof_activex, false, ptr, old_usize,
2858 	    old_tctx);
2859 
2860 	return usize;
2861 }
2862 
2863 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2864 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2865 	tsd_t *tsd;
2866 	size_t usize, old_usize;
2867 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2868 	bool zero = flags & MALLOCX_ZERO;
2869 
2870 	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2871 	    "flags: %d", ptr, size, extra, flags);
2872 
2873 	assert(ptr != NULL);
2874 	assert(size != 0);
2875 	assert(SIZE_T_MAX - size >= extra);
2876 	assert(malloc_initialized() || IS_INITIALIZER);
2877 	tsd = tsd_fetch();
2878 	check_entry_exit_locking(tsd_tsdn(tsd));
2879 
2880 	alloc_ctx_t alloc_ctx;
2881 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2882 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2883 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2884 	assert(alloc_ctx.szind != NSIZES);
2885 	old_usize = sz_index2size(alloc_ctx.szind);
2886 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2887 	/*
2888 	 * The API explicitly absolves itself of protecting against (size +
2889 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2890 	 * exceeding LARGE_MAXCLASS.
2891 	 *
2892 	 * Ordinarily, size limit checking is handled deeper down, but here we
2893 	 * have to check as part of (size + extra) clamping, since we need the
2894 	 * clamped value in the above helper functions.
2895 	 */
2896 	if (unlikely(size > LARGE_MAXCLASS)) {
2897 		usize = old_usize;
2898 		goto label_not_resized;
2899 	}
2900 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2901 		extra = LARGE_MAXCLASS - size;
2902 	}
2903 
2904 	if (config_prof && opt_prof) {
2905 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2906 		    alignment, zero, &alloc_ctx);
2907 	} else {
2908 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2909 		    extra, alignment, zero);
2910 	}
2911 	if (unlikely(usize == old_usize)) {
2912 		goto label_not_resized;
2913 	}
2914 
2915 	if (config_stats) {
2916 		*tsd_thread_allocatedp_get(tsd) += usize;
2917 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2918 	}
2919 label_not_resized:
2920 	UTRACE(ptr, size, ptr);
2921 	check_entry_exit_locking(tsd_tsdn(tsd));
2922 
2923 	LOG("core.xallocx.exit", "result: %zu", usize);
2924 	return usize;
2925 }
2926 
2927 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2928 JEMALLOC_ATTR(pure)
2929 je_sallocx(const void *ptr, UNUSED int flags) {
2930 	size_t usize;
2931 	tsdn_t *tsdn;
2932 
2933 	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2934 
2935 	assert(malloc_initialized() || IS_INITIALIZER);
2936 	assert(ptr != NULL);
2937 
2938 	tsdn = tsdn_fetch();
2939 	check_entry_exit_locking(tsdn);
2940 
2941 	if (config_debug || force_ivsalloc) {
2942 		usize = ivsalloc(tsdn, ptr);
2943 		assert(force_ivsalloc || usize != 0);
2944 	} else {
2945 		usize = isalloc(tsdn, ptr);
2946 	}
2947 
2948 	check_entry_exit_locking(tsdn);
2949 
2950 	LOG("core.sallocx.exit", "result: %zu", usize);
2951 	return usize;
2952 }
2953 
2954 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2955 je_dallocx(void *ptr, int flags) {
2956 	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2957 
2958 	assert(ptr != NULL);
2959 	assert(malloc_initialized() || IS_INITIALIZER);
2960 
2961 	tsd_t *tsd = tsd_fetch();
2962 	bool fast = tsd_fast(tsd);
2963 	check_entry_exit_locking(tsd_tsdn(tsd));
2964 
2965 	tcache_t *tcache;
2966 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2967 		/* Not allowed to be reentrant and specify a custom tcache. */
2968 		assert(tsd_reentrancy_level_get(tsd) == 0);
2969 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2970 			tcache = NULL;
2971 		} else {
2972 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2973 		}
2974 	} else {
2975 		if (likely(fast)) {
2976 			tcache = tsd_tcachep_get(tsd);
2977 			assert(tcache == tcache_get(tsd));
2978 		} else {
2979 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2980 				tcache = tcache_get(tsd);
2981 			}  else {
2982 				tcache = NULL;
2983 			}
2984 		}
2985 	}
2986 
2987 	UTRACE(ptr, 0, 0);
2988 	if (likely(fast)) {
2989 		tsd_assert_fast(tsd);
2990 		ifree(tsd, ptr, tcache, false);
2991 	} else {
2992 		ifree(tsd, ptr, tcache, true);
2993 	}
2994 	check_entry_exit_locking(tsd_tsdn(tsd));
2995 
2996 	LOG("core.dallocx.exit", "");
2997 }
2998 
2999 JEMALLOC_ALWAYS_INLINE size_t
3000 inallocx(tsdn_t *tsdn, size_t size, int flags) {
3001 	check_entry_exit_locking(tsdn);
3002 
3003 	size_t usize;
3004 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
3005 		usize = sz_s2u(size);
3006 	} else {
3007 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
3008 	}
3009 	check_entry_exit_locking(tsdn);
3010 	return usize;
3011 }
3012 
3013 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3014 je_sdallocx(void *ptr, size_t size, int flags) {
3015 	assert(ptr != NULL);
3016 	assert(malloc_initialized() || IS_INITIALIZER);
3017 
3018 	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3019 	    size, flags);
3020 
3021 	tsd_t *tsd = tsd_fetch();
3022 	bool fast = tsd_fast(tsd);
3023 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3024 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3025 	check_entry_exit_locking(tsd_tsdn(tsd));
3026 
3027 	tcache_t *tcache;
3028 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3029 		/* Not allowed to be reentrant and specify a custom tcache. */
3030 		assert(tsd_reentrancy_level_get(tsd) == 0);
3031 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3032 			tcache = NULL;
3033 		} else {
3034 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3035 		}
3036 	} else {
3037 		if (likely(fast)) {
3038 			tcache = tsd_tcachep_get(tsd);
3039 			assert(tcache == tcache_get(tsd));
3040 		} else {
3041 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3042 				tcache = tcache_get(tsd);
3043 			} else {
3044 				tcache = NULL;
3045 			}
3046 		}
3047 	}
3048 
3049 	UTRACE(ptr, 0, 0);
3050 	if (likely(fast)) {
3051 		tsd_assert_fast(tsd);
3052 		isfree(tsd, ptr, usize, tcache, false);
3053 	} else {
3054 		isfree(tsd, ptr, usize, tcache, true);
3055 	}
3056 	check_entry_exit_locking(tsd_tsdn(tsd));
3057 
3058 	LOG("core.sdallocx.exit", "");
3059 }
3060 
3061 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3062 JEMALLOC_ATTR(pure)
3063 je_nallocx(size_t size, int flags) {
3064 	size_t usize;
3065 	tsdn_t *tsdn;
3066 
3067 	assert(size != 0);
3068 
3069 	if (unlikely(malloc_init())) {
3070 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3071 		return 0;
3072 	}
3073 
3074 	tsdn = tsdn_fetch();
3075 	check_entry_exit_locking(tsdn);
3076 
3077 	usize = inallocx(tsdn, size, flags);
3078 	if (unlikely(usize > LARGE_MAXCLASS)) {
3079 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3080 		return 0;
3081 	}
3082 
3083 	check_entry_exit_locking(tsdn);
3084 	LOG("core.nallocx.exit", "result: %zu", usize);
3085 	return usize;
3086 }
3087 
3088 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3089 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3090     size_t newlen) {
3091 	int ret;
3092 	tsd_t *tsd;
3093 
3094 	LOG("core.mallctl.entry", "name: %s", name);
3095 
3096 	if (unlikely(malloc_init())) {
3097 		LOG("core.mallctl.exit", "result: %d", EAGAIN);
3098 		return EAGAIN;
3099 	}
3100 
3101 	tsd = tsd_fetch();
3102 	check_entry_exit_locking(tsd_tsdn(tsd));
3103 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3104 	check_entry_exit_locking(tsd_tsdn(tsd));
3105 
3106 	LOG("core.mallctl.exit", "result: %d", ret);
3107 	return ret;
3108 }
3109 
3110 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3111 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3112 	int ret;
3113 
3114 	LOG("core.mallctlnametomib.entry", "name: %s", name);
3115 
3116 	if (unlikely(malloc_init())) {
3117 		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3118 		return EAGAIN;
3119 	}
3120 
3121 	tsd_t *tsd = tsd_fetch();
3122 	check_entry_exit_locking(tsd_tsdn(tsd));
3123 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
3124 	check_entry_exit_locking(tsd_tsdn(tsd));
3125 
3126 	LOG("core.mallctlnametomib.exit", "result: %d", ret);
3127 	return ret;
3128 }
3129 
3130 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3131 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3132   void *newp, size_t newlen) {
3133 	int ret;
3134 	tsd_t *tsd;
3135 
3136 	LOG("core.mallctlbymib.entry", "");
3137 
3138 	if (unlikely(malloc_init())) {
3139 		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3140 		return EAGAIN;
3141 	}
3142 
3143 	tsd = tsd_fetch();
3144 	check_entry_exit_locking(tsd_tsdn(tsd));
3145 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3146 	check_entry_exit_locking(tsd_tsdn(tsd));
3147 	LOG("core.mallctlbymib.exit", "result: %d", ret);
3148 	return ret;
3149 }
3150 
3151 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3152 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3153     const char *opts) {
3154 	tsdn_t *tsdn;
3155 
3156 	LOG("core.malloc_stats_print.entry", "");
3157 
3158 	tsdn = tsdn_fetch();
3159 	check_entry_exit_locking(tsdn);
3160 	stats_print(write_cb, cbopaque, opts);
3161 	check_entry_exit_locking(tsdn);
3162 	LOG("core.malloc_stats_print.exit", "");
3163 }
3164 
3165 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3166 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3167 	size_t ret;
3168 	tsdn_t *tsdn;
3169 
3170 	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3171 
3172 	assert(malloc_initialized() || IS_INITIALIZER);
3173 
3174 	tsdn = tsdn_fetch();
3175 	check_entry_exit_locking(tsdn);
3176 
3177 	if (unlikely(ptr == NULL)) {
3178 		ret = 0;
3179 	} else {
3180 		if (config_debug || force_ivsalloc) {
3181 			ret = ivsalloc(tsdn, ptr);
3182 			assert(force_ivsalloc || ret != 0);
3183 		} else {
3184 			ret = isalloc(tsdn, ptr);
3185 		}
3186 	}
3187 
3188 	check_entry_exit_locking(tsdn);
3189 	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3190 	return ret;
3191 }
3192 
3193 /*
3194  * End non-standard functions.
3195  */
3196 /******************************************************************************/
3197 /*
3198  * The following functions are used by threading libraries for protection of
3199  * malloc during fork().
3200  */
3201 
3202 /*
3203  * If an application creates a thread before doing any allocation in the main
3204  * thread, then calls fork(2) in the main thread followed by memory allocation
3205  * in the child process, a race can occur that results in deadlock within the
3206  * child: the main thread may have forked while the created thread had
3207  * partially initialized the allocator.  Ordinarily jemalloc prevents
3208  * fork/malloc races via the following functions it registers during
3209  * initialization using pthread_atfork(), but of course that does no good if
3210  * the allocator isn't fully initialized at fork time.  The following library
3211  * constructor is a partial solution to this problem.  It may still be possible
3212  * to trigger the deadlock described above, but doing so would involve forking
3213  * via a library constructor that runs before jemalloc's runs.
3214  */
3215 #ifndef JEMALLOC_JET
3216 JEMALLOC_ATTR(constructor)
3217 static void
3218 jemalloc_constructor(void) {
3219 	malloc_init();
3220 }
3221 #endif
3222 
3223 #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__)
3224 void
3225 jemalloc_prefork(void)
3226 #else
3227 JEMALLOC_EXPORT void
3228 _malloc_prefork(void)
3229 #endif
3230 {
3231 	tsd_t *tsd;
3232 	unsigned i, j, narenas;
3233 	arena_t *arena;
3234 
3235 #ifdef JEMALLOC_MUTEX_INIT_CB
3236 	if (!malloc_initialized()) {
3237 		return;
3238 	}
3239 #endif
3240 	assert(malloc_initialized());
3241 
3242 	tsd = tsd_fetch();
3243 
3244 	narenas = narenas_total_get();
3245 
3246 	witness_prefork(tsd_witness_tsdp_get(tsd));
3247 	/* Acquire all mutexes in a safe order. */
3248 	ctl_prefork(tsd_tsdn(tsd));
3249 	tcache_prefork(tsd_tsdn(tsd));
3250 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3251 	if (have_background_thread) {
3252 		background_thread_prefork0(tsd_tsdn(tsd));
3253 	}
3254 	prof_prefork0(tsd_tsdn(tsd));
3255 	if (have_background_thread) {
3256 		background_thread_prefork1(tsd_tsdn(tsd));
3257 	}
3258 	/* Break arena prefork into stages to preserve lock order. */
3259 	for (i = 0; i < 8; i++) {
3260 		for (j = 0; j < narenas; j++) {
3261 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3262 			    NULL) {
3263 				switch (i) {
3264 				case 0:
3265 					arena_prefork0(tsd_tsdn(tsd), arena);
3266 					break;
3267 				case 1:
3268 					arena_prefork1(tsd_tsdn(tsd), arena);
3269 					break;
3270 				case 2:
3271 					arena_prefork2(tsd_tsdn(tsd), arena);
3272 					break;
3273 				case 3:
3274 					arena_prefork3(tsd_tsdn(tsd), arena);
3275 					break;
3276 				case 4:
3277 					arena_prefork4(tsd_tsdn(tsd), arena);
3278 					break;
3279 				case 5:
3280 					arena_prefork5(tsd_tsdn(tsd), arena);
3281 					break;
3282 				case 6:
3283 					arena_prefork6(tsd_tsdn(tsd), arena);
3284 					break;
3285 				case 7:
3286 					arena_prefork7(tsd_tsdn(tsd), arena);
3287 					break;
3288 				default: not_reached();
3289 				}
3290 			}
3291 		}
3292 	}
3293 	prof_prefork1(tsd_tsdn(tsd));
3294 }
3295 
3296 #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__)
3297 void
3298 jemalloc_postfork_parent(void)
3299 #else
3300 JEMALLOC_EXPORT void
3301 _malloc_postfork(void)
3302 #endif
3303 {
3304 	tsd_t *tsd;
3305 	unsigned i, narenas;
3306 
3307 #ifdef JEMALLOC_MUTEX_INIT_CB
3308 	if (!malloc_initialized()) {
3309 		return;
3310 	}
3311 #endif
3312 	assert(malloc_initialized());
3313 
3314 	tsd = tsd_fetch();
3315 
3316 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3317 	/* Release all mutexes, now that fork() has completed. */
3318 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3319 		arena_t *arena;
3320 
3321 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3322 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3323 		}
3324 	}
3325 	prof_postfork_parent(tsd_tsdn(tsd));
3326 	if (have_background_thread) {
3327 		background_thread_postfork_parent(tsd_tsdn(tsd));
3328 	}
3329 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3330 	tcache_postfork_parent(tsd_tsdn(tsd));
3331 	ctl_postfork_parent(tsd_tsdn(tsd));
3332 }
3333 
3334 #if !defined(__NetBSD__)
3335 void
3336 jemalloc_postfork_child(void)
3337 #else
3338 JEMALLOC_EXPORT void
3339 _malloc_postfork_child(void)
3340 #endif
3341 {
3342 	tsd_t *tsd;
3343 	unsigned i, narenas;
3344 
3345 	assert(malloc_initialized());
3346 
3347 	tsd = tsd_fetch();
3348 
3349 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3350 	/* Release all mutexes, now that fork() has completed. */
3351 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3352 		arena_t *arena;
3353 
3354 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3355 			arena_postfork_child(tsd_tsdn(tsd), arena);
3356 		}
3357 	}
3358 	prof_postfork_child(tsd_tsdn(tsd));
3359 	if (have_background_thread) {
3360 		background_thread_postfork_child(tsd_tsdn(tsd));
3361 	}
3362 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3363 	tcache_postfork_child(tsd_tsdn(tsd));
3364 	ctl_postfork_child(tsd_tsdn(tsd));
3365 }
3366 
3367 void (*
3368 je_malloc_message_get(void))(void *, const char *)
3369 {
3370 	return je_malloc_message;
3371 }
3372 
3373 void
3374 je_malloc_message_set(void (*m)(void *, const char *))
3375 {
3376 	je_malloc_message = m;
3377 }
3378 
3379 const char *
3380 je_malloc_conf_get(void)
3381 {
3382 	return je_malloc_conf;
3383 }
3384 
3385 void
3386 je_malloc_conf_set(const char *m)
3387 {
3388 	je_malloc_conf = m;
3389 }
3390 
3391 /******************************************************************************/
3392