xref: /netbsd-src/external/bsd/jemalloc/dist/src/jemalloc.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 #include <sys/cdefs.h>
2 
3 #define JEMALLOC_C_
4 #include "jemalloc/internal/jemalloc_preamble.h"
5 #include "jemalloc/internal/jemalloc_internal_includes.h"
6 
7 #include "jemalloc/internal/assert.h"
8 #include "jemalloc/internal/atomic.h"
9 #include "jemalloc/internal/ctl.h"
10 #include "jemalloc/internal/extent_dss.h"
11 #include "jemalloc/internal/extent_mmap.h"
12 #include "jemalloc/internal/jemalloc_internal_types.h"
13 #include "jemalloc/internal/log.h"
14 #include "jemalloc/internal/malloc_io.h"
15 #include "jemalloc/internal/mutex.h"
16 #include "jemalloc/internal/rtree.h"
17 #include "jemalloc/internal/size_classes.h"
18 #include "jemalloc/internal/spin.h"
19 #include "jemalloc/internal/sz.h"
20 #include "jemalloc/internal/ticker.h"
21 #include "jemalloc/internal/util.h"
22 
23 #ifdef JEMALLOC_WEAK_NOSTD
24 __weak_alias(mallocx, __je_mallocx)
25 __weak_alias(rallocx, __je_rallocx)
26 __weak_alias(xallocx, __je_xallocx)
27 __weak_alias(sallocx, __je_sallocx)
28 __weak_alias(dallocx, __je_dallocx)
29 __weak_alias(sdallocx, __je_sdallocx)
30 __weak_alias(nallocx, __je_nallocx)
31 
32 __weak_alias(mallctl, __je_mallctl)
33 __weak_alias(mallctlnametomib, __je_mallctlnametomib)
34 __weak_alias(mallctlbymib, __je_mallctlbymib)
35 
36 __weak_alias(malloc_stats_print, __je_malloc_stats_print)
37 __weak_alias(malloc_usable_size, __je_malloc_usable_size)
38 
39 __weak_alias(malloc_message, __je_malloc_message)
40 __weak_alias(malloc_conf, __je_malloc_conf)
41 
42 __weak_alias(malloc_message_get, __je_malloc_message_get)
43 __weak_alias(malloc_conf_get, __je_malloc_conf_get)
44 
45 __weak_alias(malloc_message_set, __je_malloc_message_set)
46 __weak_alias(malloc_conf_set, __je_malloc_conf_set)
47 #endif
48 
49 /******************************************************************************/
50 /* Data. */
51 
52 /* Runtime configuration options. */
53 const char	*je_malloc_conf
54 #ifndef _WIN32
55     JEMALLOC_ATTR(weak)
56 #endif
57     ;
58 bool	opt_abort =
59 #ifdef JEMALLOC_DEBUG
60     true
61 #else
62     false
63 #endif
64     ;
65 bool	opt_abort_conf =
66 #ifdef JEMALLOC_DEBUG
67     true
68 #else
69     false
70 #endif
71     ;
72 const char	*opt_junk =
73 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
74     "true"
75 #else
76     "false"
77 #endif
78     ;
79 bool	opt_junk_alloc =
80 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
81     true
82 #else
83     false
84 #endif
85     ;
86 bool	opt_junk_free =
87 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
88     true
89 #else
90     false
91 #endif
92     ;
93 
94 bool	opt_utrace = false;
95 bool	opt_xmalloc = false;
96 bool	opt_zero = false;
97 unsigned	opt_narenas = 0;
98 
99 unsigned	ncpus;
100 
101 /* Protects arenas initialization. */
102 malloc_mutex_t arenas_lock;
103 /*
104  * Arenas that are used to service external requests.  Not all elements of the
105  * arenas array are necessarily used; arenas are created lazily as needed.
106  *
107  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
108  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
109  * takes some action to create them and allocate from them.
110  *
111  * Points to an arena_t.
112  */
113 JEMALLOC_ALIGNED(CACHELINE)
114 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
115 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
116 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
117 unsigned		narenas_auto; /* Read-only after initialization. */
118 
119 typedef enum {
120 	malloc_init_uninitialized	= 3,
121 	malloc_init_a0_initialized	= 2,
122 	malloc_init_recursible		= 1,
123 	malloc_init_initialized		= 0 /* Common case --> jnz. */
124 } malloc_init_t;
125 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
126 
127 /* False should be the common case.  Set to true to trigger initialization. */
128 bool			malloc_slow = true;
129 
130 /* When malloc_slow is true, set the corresponding bits for sanity check. */
131 enum {
132 	flag_opt_junk_alloc	= (1U),
133 	flag_opt_junk_free	= (1U << 1),
134 	flag_opt_zero		= (1U << 2),
135 	flag_opt_utrace		= (1U << 3),
136 	flag_opt_xmalloc	= (1U << 4)
137 };
138 static uint8_t	malloc_slow_flags;
139 
140 #ifdef JEMALLOC_THREADED_INIT
141 /* Used to let the initializing thread recursively allocate. */
142 #  define NO_INITIALIZER	((unsigned long)0)
143 #  define INITIALIZER		pthread_self()
144 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
145 static pthread_t		malloc_initializer = NO_INITIALIZER;
146 #else
147 #  define NO_INITIALIZER	false
148 #  define INITIALIZER		true
149 #  define IS_INITIALIZER	malloc_initializer
150 static bool			malloc_initializer = NO_INITIALIZER;
151 #endif
152 
153 /* Used to avoid initialization races. */
154 #ifdef _WIN32
155 #if _WIN32_WINNT >= 0x0600
156 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
157 #else
158 static malloc_mutex_t	init_lock;
159 static bool init_lock_initialized = false;
160 
161 JEMALLOC_ATTR(constructor)
162 static void WINAPI
163 _init_init_lock(void) {
164 	/*
165 	 * If another constructor in the same binary is using mallctl to e.g.
166 	 * set up extent hooks, it may end up running before this one, and
167 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
168 	 * we force an initialization of the lock in malloc_init_hard as well.
169 	 * We don't try to care about atomicity of the accessed to the
170 	 * init_lock_initialized boolean, since it really only matters early in
171 	 * the process creation, before any separate thread normally starts
172 	 * doing anything.
173 	 */
174 	if (!init_lock_initialized) {
175 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
176 		    malloc_mutex_rank_exclusive);
177 	}
178 	init_lock_initialized = true;
179 }
180 
181 #ifdef _MSC_VER
182 #  pragma section(".CRT$XCU", read)
183 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
184 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
185 #endif
186 #endif
187 #else
188 #ifndef __lint__
189 // Broken lint
190 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
191 #else
192 static malloc_mutex_t	init_lock;
193 #endif
194 #endif
195 
196 typedef struct {
197 	void	*p;	/* Input pointer (as in realloc(p, s)). */
198 	size_t	s;	/* Request size. */
199 	void	*r;	/* Result pointer. */
200 } malloc_utrace_t;
201 
202 #ifdef JEMALLOC_UTRACE
203 #  define UTRACE(a, b, c) do {						\
204 	if (unlikely(opt_utrace)) {					\
205 		int utrace_serrno = errno;				\
206 		malloc_utrace_t ut;					\
207 		ut.p = (a);						\
208 		ut.s = (b);						\
209 		ut.r = (c);						\
210 		utrace(&ut, sizeof(ut));				\
211 		errno = utrace_serrno;					\
212 	}								\
213 } while (0)
214 #else
215 #  define UTRACE(a, b, c)
216 #endif
217 
218 /* Whether encountered any invalid config options. */
219 static bool had_conf_error = false;
220 
221 /******************************************************************************/
222 /*
223  * Function prototypes for static functions that are referenced prior to
224  * definition.
225  */
226 
227 static bool	malloc_init_hard_a0(void);
228 static bool	malloc_init_hard(void);
229 
230 /******************************************************************************/
231 /*
232  * Begin miscellaneous support functions.
233  */
234 
235 bool
236 malloc_initialized(void) {
237 	return (malloc_init_state == malloc_init_initialized);
238 }
239 
240 JEMALLOC_ALWAYS_INLINE bool
241 malloc_init_a0(void) {
242 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
243 		return malloc_init_hard_a0();
244 	}
245 	return false;
246 }
247 
248 JEMALLOC_ALWAYS_INLINE bool
249 malloc_init(void) {
250 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
251 		return true;
252 	}
253 	return false;
254 }
255 
256 /*
257  * The a0*() functions are used instead of i{d,}alloc() in situations that
258  * cannot tolerate TLS variable access.
259  */
260 
261 static void *
262 a0ialloc(size_t size, bool zero, bool is_internal) {
263 	if (unlikely(malloc_init_a0())) {
264 		return NULL;
265 	}
266 
267 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
268 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
269 }
270 
271 static void
272 a0idalloc(void *ptr, bool is_internal) {
273 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
274 }
275 
276 void *
277 a0malloc(size_t size) {
278 	return a0ialloc(size, false, true);
279 }
280 
281 void
282 a0dalloc(void *ptr) {
283 	a0idalloc(ptr, true);
284 }
285 
286 /*
287  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
288  * situations that cannot tolerate TLS variable access (TLS allocation and very
289  * early internal data structure initialization).
290  */
291 
292 void *
293 bootstrap_malloc(size_t size) {
294 	if (unlikely(size == 0)) {
295 		size = 1;
296 	}
297 
298 	return a0ialloc(size, false, false);
299 }
300 
301 void *
302 bootstrap_calloc(size_t num, size_t size) {
303 	size_t num_size;
304 
305 	num_size = num * size;
306 	if (unlikely(num_size == 0)) {
307 		assert(num == 0 || size == 0);
308 		num_size = 1;
309 	}
310 
311 	return a0ialloc(num_size, true, false);
312 }
313 
314 void
315 bootstrap_free(void *ptr) {
316 	if (unlikely(ptr == NULL)) {
317 		return;
318 	}
319 
320 	a0idalloc(ptr, false);
321 }
322 
323 void
324 arena_set(unsigned ind, arena_t *arena) {
325 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
326 }
327 
328 static void
329 narenas_total_set(unsigned narenas) {
330 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
331 }
332 
333 static void
334 narenas_total_inc(void) {
335 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
336 }
337 
338 unsigned
339 narenas_total_get(void) {
340 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
341 }
342 
343 /* Create a new arena and insert it into the arenas array at index ind. */
344 static arena_t *
345 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
346 	arena_t *arena;
347 
348 	assert(ind <= narenas_total_get());
349 	if (ind >= MALLOCX_ARENA_LIMIT) {
350 		return NULL;
351 	}
352 	if (ind == narenas_total_get()) {
353 		narenas_total_inc();
354 	}
355 
356 	/*
357 	 * Another thread may have already initialized arenas[ind] if it's an
358 	 * auto arena.
359 	 */
360 	arena = arena_get(tsdn, ind, false);
361 	if (arena != NULL) {
362 		assert(ind < narenas_auto);
363 		return arena;
364 	}
365 
366 	/* Actually initialize the arena. */
367 	arena = arena_new(tsdn, ind, extent_hooks);
368 
369 	return arena;
370 }
371 
372 static void
373 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
374 	if (ind == 0) {
375 		return;
376 	}
377 	if (have_background_thread) {
378 		bool err;
379 		malloc_mutex_lock(tsdn, &background_thread_lock);
380 		err = background_thread_create(tsdn_tsd(tsdn), ind);
381 		malloc_mutex_unlock(tsdn, &background_thread_lock);
382 		if (err) {
383 			malloc_printf("<jemalloc>: error in background thread "
384 				      "creation for arena %u. Abort.\n", ind);
385 			abort();
386 		}
387 	}
388 }
389 
390 arena_t *
391 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
392 	arena_t *arena;
393 
394 	malloc_mutex_lock(tsdn, &arenas_lock);
395 	arena = arena_init_locked(tsdn, ind, extent_hooks);
396 	malloc_mutex_unlock(tsdn, &arenas_lock);
397 
398 	arena_new_create_background_thread(tsdn, ind);
399 
400 	return arena;
401 }
402 
403 static void
404 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
405 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
406 	arena_nthreads_inc(arena, internal);
407 
408 	if (internal) {
409 		tsd_iarena_set(tsd, arena);
410 	} else {
411 		tsd_arena_set(tsd, arena);
412 	}
413 }
414 
415 void
416 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
417 	arena_t *oldarena, *newarena;
418 
419 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
420 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
421 	arena_nthreads_dec(oldarena, false);
422 	arena_nthreads_inc(newarena, false);
423 	tsd_arena_set(tsd, newarena);
424 }
425 
426 static void
427 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
428 	arena_t *arena;
429 
430 	arena = arena_get(tsd_tsdn(tsd), ind, false);
431 	arena_nthreads_dec(arena, internal);
432 
433 	if (internal) {
434 		tsd_iarena_set(tsd, NULL);
435 	} else {
436 		tsd_arena_set(tsd, NULL);
437 	}
438 }
439 
440 arena_tdata_t *
441 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
442 	arena_tdata_t *tdata, *arenas_tdata_old;
443 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
444 	unsigned narenas_tdata_old, i;
445 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
446 	unsigned narenas_actual = narenas_total_get();
447 
448 	/*
449 	 * Dissociate old tdata array (and set up for deallocation upon return)
450 	 * if it's too small.
451 	 */
452 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
453 		arenas_tdata_old = arenas_tdata;
454 		narenas_tdata_old = narenas_tdata;
455 		arenas_tdata = NULL;
456 		narenas_tdata = 0;
457 		tsd_arenas_tdata_set(tsd, arenas_tdata);
458 		tsd_narenas_tdata_set(tsd, narenas_tdata);
459 	} else {
460 		arenas_tdata_old = NULL;
461 		narenas_tdata_old = 0;
462 	}
463 
464 	/* Allocate tdata array if it's missing. */
465 	if (arenas_tdata == NULL) {
466 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
467 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
468 
469 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
470 			*arenas_tdata_bypassp = true;
471 			arenas_tdata = (arena_tdata_t *)a0malloc(
472 			    sizeof(arena_tdata_t) * narenas_tdata);
473 			*arenas_tdata_bypassp = false;
474 		}
475 		if (arenas_tdata == NULL) {
476 			tdata = NULL;
477 			goto label_return;
478 		}
479 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
480 		tsd_arenas_tdata_set(tsd, arenas_tdata);
481 		tsd_narenas_tdata_set(tsd, narenas_tdata);
482 	}
483 
484 	/*
485 	 * Copy to tdata array.  It's possible that the actual number of arenas
486 	 * has increased since narenas_total_get() was called above, but that
487 	 * causes no correctness issues unless two threads concurrently execute
488 	 * the arenas.create mallctl, which we trust mallctl synchronization to
489 	 * prevent.
490 	 */
491 
492 	/* Copy/initialize tickers. */
493 	for (i = 0; i < narenas_actual; i++) {
494 		if (i < narenas_tdata_old) {
495 			ticker_copy(&arenas_tdata[i].decay_ticker,
496 			    &arenas_tdata_old[i].decay_ticker);
497 		} else {
498 			ticker_init(&arenas_tdata[i].decay_ticker,
499 			    DECAY_NTICKS_PER_UPDATE);
500 		}
501 	}
502 	if (narenas_tdata > narenas_actual) {
503 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
504 		    * (narenas_tdata - narenas_actual));
505 	}
506 
507 	/* Read the refreshed tdata array. */
508 	tdata = &arenas_tdata[ind];
509 label_return:
510 	if (arenas_tdata_old != NULL) {
511 		a0dalloc(arenas_tdata_old);
512 	}
513 	return tdata;
514 }
515 
516 /* Slow path, called only by arena_choose(). */
517 arena_t *
518 arena_choose_hard(tsd_t *tsd, bool internal) {
519 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
520 
521 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
522 		unsigned choose = percpu_arena_choose();
523 		ret = arena_get(tsd_tsdn(tsd), choose, true);
524 		assert(ret != NULL);
525 		arena_bind(tsd, arena_ind_get(ret), false);
526 		arena_bind(tsd, arena_ind_get(ret), true);
527 
528 		return ret;
529 	}
530 
531 	if (narenas_auto > 1) {
532 		unsigned i, j, choose[2], first_null;
533 		bool is_new_arena[2];
534 
535 		/*
536 		 * Determine binding for both non-internal and internal
537 		 * allocation.
538 		 *
539 		 *   choose[0]: For application allocation.
540 		 *   choose[1]: For internal metadata allocation.
541 		 */
542 
543 		for (j = 0; j < 2; j++) {
544 			choose[j] = 0;
545 			is_new_arena[j] = false;
546 		}
547 
548 		first_null = narenas_auto;
549 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
550 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
551 		for (i = 1; i < narenas_auto; i++) {
552 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
553 				/*
554 				 * Choose the first arena that has the lowest
555 				 * number of threads assigned to it.
556 				 */
557 				for (j = 0; j < 2; j++) {
558 					if (arena_nthreads_get(arena_get(
559 					    tsd_tsdn(tsd), i, false), !!j) <
560 					    arena_nthreads_get(arena_get(
561 					    tsd_tsdn(tsd), choose[j], false),
562 					    !!j)) {
563 						choose[j] = i;
564 					}
565 				}
566 			} else if (first_null == narenas_auto) {
567 				/*
568 				 * Record the index of the first uninitialized
569 				 * arena, in case all extant arenas are in use.
570 				 *
571 				 * NB: It is possible for there to be
572 				 * discontinuities in terms of initialized
573 				 * versus uninitialized arenas, due to the
574 				 * "thread.arena" mallctl.
575 				 */
576 				first_null = i;
577 			}
578 		}
579 
580 		for (j = 0; j < 2; j++) {
581 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
582 			    choose[j], false), !!j) == 0 || first_null ==
583 			    narenas_auto) {
584 				/*
585 				 * Use an unloaded arena, or the least loaded
586 				 * arena if all arenas are already initialized.
587 				 */
588 				if (!!j == internal) {
589 					ret = arena_get(tsd_tsdn(tsd),
590 					    choose[j], false);
591 				}
592 			} else {
593 				arena_t *arena;
594 
595 				/* Initialize a new arena. */
596 				choose[j] = first_null;
597 				arena = arena_init_locked(tsd_tsdn(tsd),
598 				    choose[j], (extent_hooks_t *)
599 				    __UNCONST(&extent_hooks_default));
600 				if (arena == NULL) {
601 					malloc_mutex_unlock(tsd_tsdn(tsd),
602 					    &arenas_lock);
603 					return NULL;
604 				}
605 				is_new_arena[j] = true;
606 				if (!!j == internal) {
607 					ret = arena;
608 				}
609 			}
610 			arena_bind(tsd, choose[j], !!j);
611 		}
612 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
613 
614 		for (j = 0; j < 2; j++) {
615 			if (is_new_arena[j]) {
616 				assert(choose[j] > 0);
617 				arena_new_create_background_thread(
618 				    tsd_tsdn(tsd), choose[j]);
619 			}
620 		}
621 
622 	} else {
623 		ret = arena_get(tsd_tsdn(tsd), 0, false);
624 		arena_bind(tsd, 0, false);
625 		arena_bind(tsd, 0, true);
626 	}
627 
628 	return ret;
629 }
630 
631 void
632 iarena_cleanup(tsd_t *tsd) {
633 	arena_t *iarena;
634 
635 	iarena = tsd_iarena_get(tsd);
636 	if (iarena != NULL) {
637 		arena_unbind(tsd, arena_ind_get(iarena), true);
638 	}
639 }
640 
641 void
642 arena_cleanup(tsd_t *tsd) {
643 	arena_t *arena;
644 
645 	arena = tsd_arena_get(tsd);
646 	if (arena != NULL) {
647 		arena_unbind(tsd, arena_ind_get(arena), false);
648 	}
649 }
650 
651 void
652 arenas_tdata_cleanup(tsd_t *tsd) {
653 	arena_tdata_t *arenas_tdata;
654 
655 	/* Prevent tsd->arenas_tdata from being (re)created. */
656 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
657 
658 	arenas_tdata = tsd_arenas_tdata_get(tsd);
659 	if (arenas_tdata != NULL) {
660 		tsd_arenas_tdata_set(tsd, NULL);
661 		a0dalloc(arenas_tdata);
662 	}
663 }
664 
665 static void
666 stats_print_atexit(void) {
667 	if (config_stats) {
668 		tsdn_t *tsdn;
669 		unsigned narenas, i;
670 
671 		tsdn = tsdn_fetch();
672 
673 		/*
674 		 * Merge stats from extant threads.  This is racy, since
675 		 * individual threads do not lock when recording tcache stats
676 		 * events.  As a consequence, the final stats may be slightly
677 		 * out of date by the time they are reported, if other threads
678 		 * continue to allocate.
679 		 */
680 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
681 			arena_t *arena = arena_get(tsdn, i, false);
682 			if (arena != NULL) {
683 				tcache_t *tcache;
684 
685 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
686 				ql_foreach(tcache, &arena->tcache_ql, link) {
687 					tcache_stats_merge(tsdn, tcache, arena);
688 				}
689 				malloc_mutex_unlock(tsdn,
690 				    &arena->tcache_ql_mtx);
691 			}
692 		}
693 	}
694 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
695 }
696 
697 /*
698  * Ensure that we don't hold any locks upon entry to or exit from allocator
699  * code (in a "broad" sense that doesn't count a reentrant allocation as an
700  * entrance or exit).
701  */
702 JEMALLOC_ALWAYS_INLINE void
703 check_entry_exit_locking(tsdn_t *tsdn) {
704 	if (!config_debug) {
705 		return;
706 	}
707 	if (tsdn_null(tsdn)) {
708 		return;
709 	}
710 	tsd_t *tsd = tsdn_tsd(tsdn);
711 	/*
712 	 * It's possible we hold locks at entry/exit if we're in a nested
713 	 * allocation.
714 	 */
715 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
716 	if (reentrancy_level != 0) {
717 		return;
718 	}
719 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
720 }
721 
722 /*
723  * End miscellaneous support functions.
724  */
725 /******************************************************************************/
726 /*
727  * Begin initialization functions.
728  */
729 
730 static char *
731 jemalloc_secure_getenv(const char *name) {
732 #ifdef JEMALLOC_HAVE_SECURE_GETENV
733 	return secure_getenv(name);
734 #else
735 #  ifdef JEMALLOC_HAVE_ISSETUGID
736 	if (issetugid() != 0) {
737 		return NULL;
738 	}
739 #  endif
740 	return getenv(name);
741 #endif
742 }
743 
744 static unsigned
745 malloc_ncpus(void) {
746 	long result;
747 
748 #ifdef _WIN32
749 	SYSTEM_INFO si;
750 	GetSystemInfo(&si);
751 	result = si.dwNumberOfProcessors;
752 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
753 	/*
754 	 * glibc >= 2.6 has the CPU_COUNT macro.
755 	 *
756 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
757 	 * *before* setting up the isspace tables.  Therefore we need a
758 	 * different method to get the number of CPUs.
759 	 */
760 	{
761 		cpu_set_t set;
762 
763 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
764 		result = CPU_COUNT(&set);
765 	}
766 #else
767 	result = sysconf(_SC_NPROCESSORS_ONLN);
768 #endif
769 	return ((result == -1) ? 1 : (unsigned)result);
770 }
771 
772 static void
773 init_opt_stats_print_opts(const char *v, size_t vlen) {
774 	size_t opts_len = strlen(opt_stats_print_opts);
775 	assert(opts_len <= stats_print_tot_num_options);
776 
777 	for (size_t i = 0; i < vlen; i++) {
778 		switch (v[i]) {
779 #define OPTION(o, v, d, s) case o: break;
780 			STATS_PRINT_OPTIONS
781 #undef OPTION
782 		default: continue;
783 		}
784 
785 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
786 			/* Ignore repeated. */
787 			continue;
788 		}
789 
790 		opt_stats_print_opts[opts_len++] = v[i];
791 		opt_stats_print_opts[opts_len] = '\0';
792 		assert(opts_len <= stats_print_tot_num_options);
793 	}
794 	assert(opts_len == strlen(opt_stats_print_opts));
795 }
796 
797 static bool
798 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
799     char const **v_p, size_t *vlen_p) {
800 	bool accept;
801 	const char *opts = *opts_p;
802 
803 	*k_p = opts;
804 
805 	for (accept = false; !accept;) {
806 		switch (*opts) {
807 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
808 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
809 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
810 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
811 		case 'Y': case 'Z':
812 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
813 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
814 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
815 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
816 		case 'y': case 'z':
817 		case '0': case '1': case '2': case '3': case '4': case '5':
818 		case '6': case '7': case '8': case '9':
819 		case '_':
820 			opts++;
821 			break;
822 		case ':':
823 			opts++;
824 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
825 			*v_p = opts;
826 			accept = true;
827 			break;
828 		case '\0':
829 			if (opts != *opts_p) {
830 				malloc_write("<jemalloc>: Conf string ends "
831 				    "with key\n");
832 			}
833 			return true;
834 		default:
835 			malloc_write("<jemalloc>: Malformed conf string\n");
836 			return true;
837 		}
838 	}
839 
840 	for (accept = false; !accept;) {
841 		switch (*opts) {
842 		case ',':
843 			opts++;
844 			/*
845 			 * Look ahead one character here, because the next time
846 			 * this function is called, it will assume that end of
847 			 * input has been cleanly reached if no input remains,
848 			 * but we have optimistically already consumed the
849 			 * comma if one exists.
850 			 */
851 			if (*opts == '\0') {
852 				malloc_write("<jemalloc>: Conf string ends "
853 				    "with comma\n");
854 			}
855 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
856 			accept = true;
857 			break;
858 		case '\0':
859 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
860 			accept = true;
861 			break;
862 		default:
863 			opts++;
864 			break;
865 		}
866 	}
867 
868 	*opts_p = opts;
869 	return false;
870 }
871 
872 static JEMALLOC_NORETURN void
873 malloc_abort_invalid_conf(void) {
874 	assert(opt_abort_conf);
875 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
876 	    "value (see above).\n");
877 	abort();
878 }
879 
880 static void
881 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
882     size_t vlen) {
883 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
884 	    (int)vlen, v);
885 	/* If abort_conf is set, error out after processing all options. */
886 	had_conf_error = true;
887 }
888 
889 static void
890 malloc_slow_flag_init(void) {
891 	/*
892 	 * Combine the runtime options into malloc_slow for fast path.  Called
893 	 * after processing all the options.
894 	 */
895 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
896 	    | (opt_junk_free ? flag_opt_junk_free : 0)
897 	    | (opt_zero ? flag_opt_zero : 0)
898 	    | (opt_utrace ? flag_opt_utrace : 0)
899 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
900 
901 	malloc_slow = (malloc_slow_flags != 0);
902 }
903 
904 static void
905 malloc_conf_init(void) {
906 	unsigned i;
907 	char buf[PATH_MAX + 1];
908 	const char *opts, *k, *v;
909 	size_t klen, vlen;
910 
911 	for (i = 0; i < 4; i++) {
912 		/* Get runtime configuration. */
913 		switch (i) {
914 		case 0:
915 			opts = config_malloc_conf;
916 			break;
917 		case 1:
918 			if (je_malloc_conf != NULL) {
919 				/*
920 				 * Use options that were compiled into the
921 				 * program.
922 				 */
923 				opts = je_malloc_conf;
924 			} else {
925 				/* No configuration specified. */
926 				buf[0] = '\0';
927 				opts = buf;
928 			}
929 			break;
930 		case 2: {
931 			ssize_t linklen = 0;
932 #ifndef _WIN32
933 			int saved_errno = errno;
934 			const char *linkname =
935 #  ifdef JEMALLOC_PREFIX
936 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
937 #  else
938 			    "/etc/malloc.conf"
939 #  endif
940 			    ;
941 
942 			/*
943 			 * Try to use the contents of the "/etc/malloc.conf"
944 			 * symbolic link's name.
945 			 */
946 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
947 			if (linklen == -1) {
948 				/* No configuration specified. */
949 				linklen = 0;
950 				/* Restore errno. */
951 				set_errno(saved_errno);
952 			}
953 #endif
954 			buf[linklen] = '\0';
955 			opts = buf;
956 			break;
957 		} case 3: {
958 			const char *envname =
959 #ifdef JEMALLOC_PREFIX
960 			    JEMALLOC_CPREFIX"MALLOC_CONF"
961 #else
962 			    "MALLOC_CONF"
963 #endif
964 			    ;
965 
966 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
967 				/*
968 				 * Do nothing; opts is already initialized to
969 				 * the value of the MALLOC_CONF environment
970 				 * variable.
971 				 */
972 			} else {
973 				/* No configuration specified. */
974 				buf[0] = '\0';
975 				opts = buf;
976 			}
977 			break;
978 		} default:
979 			not_reached();
980 			buf[0] = '\0';
981 			opts = buf;
982 		}
983 
984 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
985 		    &vlen)) {
986 #define CONF_MATCH(n)							\
987 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
988 #define CONF_MATCH_VALUE(n)						\
989 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
990 #define CONF_HANDLE_BOOL(o, n)						\
991 			if (CONF_MATCH(n)) {				\
992 				if (CONF_MATCH_VALUE("true")) {		\
993 					o = true;			\
994 				} else if (CONF_MATCH_VALUE("false")) {	\
995 					o = false;			\
996 				} else {				\
997 					malloc_conf_error(		\
998 					    "Invalid conf value",	\
999 					    k, klen, v, vlen);		\
1000 				}					\
1001 				continue;				\
1002 			}
1003 #define CONF_MIN_no(um, min)	false
1004 #define CONF_MIN_yes(um, min)	((um) < (min))
1005 #define CONF_MAX_no(um, max)	false
1006 #define CONF_MAX_yes(um, max)	((um) > (max))
1007 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
1008 			if (CONF_MATCH(n)) {				\
1009 				uintmax_t um;				\
1010 				const char *end;			\
1011 									\
1012 				set_errno(0);				\
1013 				um = malloc_strtoumax(v, &end, 0);	\
1014 				if (get_errno() != 0 || (uintptr_t)end -\
1015 				    (uintptr_t)v != vlen) {		\
1016 					malloc_conf_error(		\
1017 					    "Invalid conf value",	\
1018 					    k, klen, v, vlen);		\
1019 				} else if (clip) {			\
1020 					if (CONF_MIN_##check_min(um,	\
1021 					    (t)(min))) {		\
1022 						o = (t)(min);		\
1023 					} else if (			\
1024 					    CONF_MAX_##check_max(um,	\
1025 					    (t)(max))) {		\
1026 						o = (t)(max);		\
1027 					} else {			\
1028 						o = (t)um;		\
1029 					}				\
1030 				} else {				\
1031 					if (CONF_MIN_##check_min(um,	\
1032 					    (t)(min)) ||		\
1033 					    CONF_MAX_##check_max(um,	\
1034 					    (t)(max))) {		\
1035 						malloc_conf_error(	\
1036 						    "Out-of-range "	\
1037 						    "conf value",	\
1038 						    k, klen, v, vlen);	\
1039 					} else {			\
1040 						o = (t)um;		\
1041 					}				\
1042 				}					\
1043 				continue;				\
1044 			}
1045 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1046     clip)								\
1047 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1048 			    check_min, check_max, clip)
1049 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1050 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1051 			    check_min, check_max, clip)
1052 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1053 			if (CONF_MATCH(n)) {				\
1054 				long l;					\
1055 				char *end;				\
1056 									\
1057 				set_errno(0);				\
1058 				l = strtol(v, &end, 0);			\
1059 				if (get_errno() != 0 || (uintptr_t)end -\
1060 				    (uintptr_t)v != vlen) {		\
1061 					malloc_conf_error(		\
1062 					    "Invalid conf value",	\
1063 					    k, klen, v, vlen);		\
1064 				} else if (l < (ssize_t)(min) || l >	\
1065 				    (ssize_t)(max)) {			\
1066 					malloc_conf_error(		\
1067 					    "Out-of-range conf value",	\
1068 					    k, klen, v, vlen);		\
1069 				} else {				\
1070 					o = l;				\
1071 				}					\
1072 				continue;				\
1073 			}
1074 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1075 			if (CONF_MATCH(n)) {				\
1076 				size_t cpylen = (vlen <=		\
1077 				    sizeof(o)-1) ? vlen :		\
1078 				    sizeof(o)-1;			\
1079 				strncpy(o, v, cpylen);			\
1080 				o[cpylen] = '\0';			\
1081 				continue;				\
1082 			}
1083 
1084 			CONF_HANDLE_BOOL(opt_abort, "abort")
1085 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1086 			if (strncmp("metadata_thp", k, klen) == 0) {
1087 				int ii;
1088 				bool match = false;
1089 				for (ii = 0; ii < metadata_thp_mode_limit; ii++) {
1090 					if (strncmp(metadata_thp_mode_names[ii],
1091 					    v, vlen) == 0) {
1092 						opt_metadata_thp = ii;
1093 						match = true;
1094 						break;
1095 					}
1096 				}
1097 				if (!match) {
1098 					malloc_conf_error("Invalid conf value",
1099 					    k, klen, v, vlen);
1100 				}
1101 				continue;
1102 			}
1103 			CONF_HANDLE_BOOL(opt_retain, "retain")
1104 			if (strncmp("dss", k, klen) == 0) {
1105 				int ii;
1106 				bool match = false;
1107 				for (ii = 0; ii < dss_prec_limit; ii++) {
1108 					if (strncmp(dss_prec_names[ii], v, vlen)
1109 					    == 0) {
1110 						if (extent_dss_prec_set(ii)) {
1111 							malloc_conf_error(
1112 							    "Error setting dss",
1113 							    k, klen, v, vlen);
1114 						} else {
1115 							opt_dss =
1116 							    dss_prec_names[ii];
1117 							match = true;
1118 							break;
1119 						}
1120 					}
1121 				}
1122 				if (!match) {
1123 					malloc_conf_error("Invalid conf value",
1124 					    k, klen, v, vlen);
1125 				}
1126 				continue;
1127 			}
1128 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1129 			    UINT_MAX, yes, no, false)
1130 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1131 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1132 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1133 			    SSIZE_MAX);
1134 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1135 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1136 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1137 			    SSIZE_MAX);
1138 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1139 			if (CONF_MATCH("stats_print_opts")) {
1140 				init_opt_stats_print_opts(v, vlen);
1141 				continue;
1142 			}
1143 			if (config_fill) {
1144 				if (CONF_MATCH("junk")) {
1145 					if (CONF_MATCH_VALUE("true")) {
1146 						opt_junk = "true";
1147 						opt_junk_alloc = opt_junk_free =
1148 						    true;
1149 					} else if (CONF_MATCH_VALUE("false")) {
1150 						opt_junk = "false";
1151 						opt_junk_alloc = opt_junk_free =
1152 						    false;
1153 					} else if (CONF_MATCH_VALUE("alloc")) {
1154 						opt_junk = "alloc";
1155 						opt_junk_alloc = true;
1156 						opt_junk_free = false;
1157 					} else if (CONF_MATCH_VALUE("free")) {
1158 						opt_junk = "free";
1159 						opt_junk_alloc = false;
1160 						opt_junk_free = true;
1161 					} else {
1162 						malloc_conf_error(
1163 						    "Invalid conf value", k,
1164 						    klen, v, vlen);
1165 					}
1166 					continue;
1167 				}
1168 				CONF_HANDLE_BOOL(opt_zero, "zero")
1169 			}
1170 			if (config_utrace) {
1171 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1172 			}
1173 			if (config_xmalloc) {
1174 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1175 			}
1176 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1177 			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1178 			    "lg_extent_max_active_fit", 0,
1179 			    (sizeof(size_t) << 3), no, yes, false)
1180 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1181 			    -1, (sizeof(size_t) << 3) - 1)
1182 			if (strncmp("percpu_arena", k, klen) == 0) {
1183 				bool match = false;
1184 				for (int ii = percpu_arena_mode_names_base; ii <
1185 				    percpu_arena_mode_names_limit; ii++) {
1186 					if (strncmp(percpu_arena_mode_names[ii],
1187 					    v, vlen) == 0) {
1188 						if (!have_percpu_arena) {
1189 							malloc_conf_error(
1190 							    "No getcpu support",
1191 							    k, klen, v, vlen);
1192 						}
1193 						opt_percpu_arena = ii;
1194 						match = true;
1195 						break;
1196 					}
1197 				}
1198 				if (!match) {
1199 					malloc_conf_error("Invalid conf value",
1200 					    k, klen, v, vlen);
1201 				}
1202 				continue;
1203 			}
1204 			CONF_HANDLE_BOOL(opt_background_thread,
1205 			    "background_thread");
1206 			CONF_HANDLE_SIZE_T(opt_max_background_threads,
1207 					   "max_background_threads", 1,
1208 					   opt_max_background_threads, yes, yes,
1209 					   true);
1210 			if (config_prof) {
1211 				CONF_HANDLE_BOOL(opt_prof, "prof")
1212 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1213 				    "prof_prefix", "jeprof")
1214 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1215 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1216 				    "prof_thread_active_init")
1217 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1218 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1219 				    - 1, no, yes, true)
1220 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1221 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1222 				    "lg_prof_interval", -1,
1223 				    (sizeof(uint64_t) << 3) - 1)
1224 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1225 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1226 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1227 			}
1228 			if (config_log) {
1229 				if (CONF_MATCH("log")) {
1230 					size_t cpylen = (
1231 					    vlen <= sizeof(log_var_names) ?
1232 					    vlen : sizeof(log_var_names) - 1);
1233 					strncpy(log_var_names, v, cpylen);
1234 					log_var_names[cpylen] = '\0';
1235 					continue;
1236 				}
1237 			}
1238 			if (CONF_MATCH("thp")) {
1239 				bool match = false;
1240 				for (int ii = 0; ii < thp_mode_names_limit; ii++) {
1241 					if (strncmp(thp_mode_names[ii],v, vlen)
1242 					    == 0) {
1243 						if (!have_madvise_huge) {
1244 							malloc_conf_error(
1245 							    "No THP support",
1246 							    k, klen, v, vlen);
1247 						}
1248 						opt_thp = ii;
1249 						match = true;
1250 						break;
1251 					}
1252 				}
1253 				if (!match) {
1254 					malloc_conf_error("Invalid conf value",
1255 					    k, klen, v, vlen);
1256 				}
1257 				continue;
1258 			}
1259 			malloc_conf_error("Invalid conf pair", k, klen, v,
1260 			    vlen);
1261 #undef CONF_MATCH
1262 #undef CONF_MATCH_VALUE
1263 #undef CONF_HANDLE_BOOL
1264 #undef CONF_MIN_no
1265 #undef CONF_MIN_yes
1266 #undef CONF_MAX_no
1267 #undef CONF_MAX_yes
1268 #undef CONF_HANDLE_T_U
1269 #undef CONF_HANDLE_UNSIGNED
1270 #undef CONF_HANDLE_SIZE_T
1271 #undef CONF_HANDLE_SSIZE_T
1272 #undef CONF_HANDLE_CHAR_P
1273 		}
1274 		if (opt_abort_conf && had_conf_error) {
1275 			malloc_abort_invalid_conf();
1276 		}
1277 	}
1278 	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1279 }
1280 
1281 static bool
1282 malloc_init_hard_needed(void) {
1283 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1284 	    malloc_init_recursible)) {
1285 		/*
1286 		 * Another thread initialized the allocator before this one
1287 		 * acquired init_lock, or this thread is the initializing
1288 		 * thread, and it is recursively allocating.
1289 		 */
1290 		return false;
1291 	}
1292 #ifdef JEMALLOC_THREADED_INIT
1293 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1294 		/* Busy-wait until the initializing thread completes. */
1295 		spin_t spinner = SPIN_INITIALIZER;
1296 		do {
1297 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1298 			spin_adaptive(&spinner);
1299 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1300 		} while (!malloc_initialized());
1301 		return false;
1302 	}
1303 #endif
1304 	return true;
1305 }
1306 
1307 static bool
1308 malloc_init_hard_a0_locked(void) {
1309 	malloc_initializer = INITIALIZER;
1310 
1311 	if (config_prof) {
1312 		prof_boot0();
1313 	}
1314 	malloc_conf_init();
1315 	if (opt_stats_print) {
1316 		/* Print statistics at exit. */
1317 		if (atexit(stats_print_atexit) != 0) {
1318 			malloc_write("<jemalloc>: Error in atexit()\n");
1319 			if (opt_abort) {
1320 				abort();
1321 			}
1322 		}
1323 	}
1324 	if (pages_boot()) {
1325 		return true;
1326 	}
1327 	if (base_boot(TSDN_NULL)) {
1328 		return true;
1329 	}
1330 	if (extent_boot()) {
1331 		return true;
1332 	}
1333 	if (ctl_boot()) {
1334 		return true;
1335 	}
1336 	if (config_prof) {
1337 		prof_boot1();
1338 	}
1339 	arena_boot();
1340 	if (tcache_boot(TSDN_NULL)) {
1341 		return true;
1342 	}
1343 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1344 	    malloc_mutex_rank_exclusive)) {
1345 		return true;
1346 	}
1347 	/*
1348 	 * Create enough scaffolding to allow recursive allocation in
1349 	 * malloc_ncpus().
1350 	 */
1351 	narenas_auto = 1;
1352 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1353 	/*
1354 	 * Initialize one arena here.  The rest are lazily created in
1355 	 * arena_choose_hard().
1356 	 */
1357 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)__UNCONST(&extent_hooks_default))
1358 	    == NULL) {
1359 		return true;
1360 	}
1361 	a0 = arena_get(TSDN_NULL, 0, false);
1362 	malloc_init_state = malloc_init_a0_initialized;
1363 
1364 	return false;
1365 }
1366 
1367 static bool
1368 malloc_init_hard_a0(void) {
1369 	bool ret;
1370 
1371 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1372 	ret = malloc_init_hard_a0_locked();
1373 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1374 	return ret;
1375 }
1376 
1377 /* Initialize data structures which may trigger recursive allocation. */
1378 static bool
1379 malloc_init_hard_recursible(void) {
1380 	malloc_init_state = malloc_init_recursible;
1381 
1382 	ncpus = malloc_ncpus();
1383 
1384 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1385     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1386     !defined(__native_client__))
1387 	/* LinuxThreads' pthread_atfork() allocates. */
1388 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1389 	    jemalloc_postfork_child) != 0) {
1390 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1391 		if (opt_abort) {
1392 			abort();
1393 		}
1394 		return true;
1395 	}
1396 #endif
1397 
1398 	if (background_thread_boot0()) {
1399 		return true;
1400 	}
1401 
1402 	return false;
1403 }
1404 
1405 static unsigned
1406 malloc_narenas_default(void) {
1407 	assert(ncpus > 0);
1408 	/*
1409 	 * For SMP systems, create more than one arena per CPU by
1410 	 * default.
1411 	 */
1412 	if (ncpus > 1) {
1413 		return ncpus << 2;
1414 	} else {
1415 		return 1;
1416 	}
1417 }
1418 
1419 static percpu_arena_mode_t
1420 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1421 	assert(!malloc_initialized());
1422 	assert(mode <= percpu_arena_disabled);
1423 
1424 	if (mode != percpu_arena_disabled) {
1425 		mode += percpu_arena_mode_enabled_base;
1426 	}
1427 
1428 	return mode;
1429 }
1430 
1431 static bool
1432 malloc_init_narenas(void) {
1433 	assert(ncpus > 0);
1434 
1435 	if (opt_percpu_arena != percpu_arena_disabled) {
1436 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1437 			opt_percpu_arena = percpu_arena_disabled;
1438 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1439 			    "available. Setting narenas to %u.\n", opt_narenas ?
1440 			    opt_narenas : malloc_narenas_default());
1441 			if (opt_abort) {
1442 				abort();
1443 			}
1444 		} else {
1445 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1446 				malloc_printf("<jemalloc>: narenas w/ percpu"
1447 				    "arena beyond limit (%d)\n", ncpus);
1448 				if (opt_abort) {
1449 					abort();
1450 				}
1451 				return true;
1452 			}
1453 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1454 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1455 			    per_phycpu_arena && ncpus % 2 != 0) {
1456 				malloc_printf("<jemalloc>: invalid "
1457 				    "configuration -- per physical CPU arena "
1458 				    "with odd number (%u) of CPUs (no hyper "
1459 				    "threading?).\n", ncpus);
1460 				if (opt_abort)
1461 					abort();
1462 			}
1463 			unsigned n = percpu_arena_ind_limit(
1464 			    percpu_arena_as_initialized(opt_percpu_arena));
1465 			if (opt_narenas < n) {
1466 				/*
1467 				 * If narenas is specified with percpu_arena
1468 				 * enabled, actual narenas is set as the greater
1469 				 * of the two. percpu_arena_choose will be free
1470 				 * to use any of the arenas based on CPU
1471 				 * id. This is conservative (at a small cost)
1472 				 * but ensures correctness.
1473 				 *
1474 				 * If for some reason the ncpus determined at
1475 				 * boot is not the actual number (e.g. because
1476 				 * of affinity setting from numactl), reserving
1477 				 * narenas this way provides a workaround for
1478 				 * percpu_arena.
1479 				 */
1480 				opt_narenas = n;
1481 			}
1482 		}
1483 	}
1484 	if (opt_narenas == 0) {
1485 		opt_narenas = malloc_narenas_default();
1486 	}
1487 	assert(opt_narenas > 0);
1488 
1489 	narenas_auto = opt_narenas;
1490 	/*
1491 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1492 	 */
1493 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1494 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1495 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1496 		    narenas_auto);
1497 	}
1498 	narenas_total_set(narenas_auto);
1499 
1500 	return false;
1501 }
1502 
1503 static void
1504 malloc_init_percpu(void) {
1505 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1506 }
1507 
1508 static bool
1509 malloc_init_hard_finish(void) {
1510 	if (malloc_mutex_boot()) {
1511 		return true;
1512 	}
1513 
1514 	malloc_init_state = malloc_init_initialized;
1515 	malloc_slow_flag_init();
1516 
1517 	return false;
1518 }
1519 
1520 static void
1521 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1522 	malloc_mutex_assert_owner(tsdn, &init_lock);
1523 	malloc_mutex_unlock(tsdn, &init_lock);
1524 	if (reentrancy_set) {
1525 		assert(!tsdn_null(tsdn));
1526 		tsd_t *tsd = tsdn_tsd(tsdn);
1527 		assert(tsd_reentrancy_level_get(tsd) > 0);
1528 		post_reentrancy(tsd);
1529 	}
1530 }
1531 
1532 static bool
1533 malloc_init_hard(void) {
1534 	tsd_t *tsd;
1535 
1536 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1537 	_init_init_lock();
1538 #endif
1539 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1540 
1541 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1542 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1543 	return ret;
1544 
1545 	if (!malloc_init_hard_needed()) {
1546 		UNLOCK_RETURN(TSDN_NULL, false, false)
1547 	}
1548 
1549 	if (malloc_init_state != malloc_init_a0_initialized &&
1550 	    malloc_init_hard_a0_locked()) {
1551 		UNLOCK_RETURN(TSDN_NULL, true, false)
1552 	}
1553 
1554 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1555 	/* Recursive allocation relies on functional tsd. */
1556 	tsd = malloc_tsd_boot0();
1557 	if (tsd == NULL) {
1558 		return true;
1559 	}
1560 	if (malloc_init_hard_recursible()) {
1561 		return true;
1562 	}
1563 
1564 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1565 	/* Set reentrancy level to 1 during init. */
1566 	pre_reentrancy(tsd, NULL);
1567 	/* Initialize narenas before prof_boot2 (for allocation). */
1568 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1569 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1570 	}
1571 	if (config_prof && prof_boot2(tsd)) {
1572 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1573 	}
1574 
1575 	malloc_init_percpu();
1576 
1577 	if (malloc_init_hard_finish()) {
1578 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1579 	}
1580 	post_reentrancy(tsd);
1581 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1582 
1583 	witness_assert_lockless(witness_tsd_tsdn(
1584 	    tsd_witness_tsdp_get_unsafe(tsd)));
1585 	malloc_tsd_boot1();
1586 	/* Update TSD after tsd_boot1. */
1587 	tsd = tsd_fetch();
1588 	if (opt_background_thread) {
1589 		assert(have_background_thread);
1590 		/*
1591 		 * Need to finish init & unlock first before creating background
1592 		 * threads (pthread_create depends on malloc).  ctl_init (which
1593 		 * sets isthreaded) needs to be called without holding any lock.
1594 		 */
1595 		background_thread_ctl_init(tsd_tsdn(tsd));
1596 
1597 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1598 		bool err = background_thread_create(tsd, 0);
1599 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1600 		if (err) {
1601 			return true;
1602 		}
1603 	}
1604 #undef UNLOCK_RETURN
1605 	return false;
1606 }
1607 
1608 /*
1609  * End initialization functions.
1610  */
1611 /******************************************************************************/
1612 /*
1613  * Begin allocation-path internal functions and data structures.
1614  */
1615 
1616 /*
1617  * Settings determined by the documented behavior of the allocation functions.
1618  */
1619 typedef struct static_opts_s static_opts_t;
1620 struct static_opts_s {
1621 	/* Whether or not allocation size may overflow. */
1622 	bool may_overflow;
1623 	/* Whether or not allocations of size 0 should be treated as size 1. */
1624 	bool bump_empty_alloc;
1625 	/*
1626 	 * Whether to assert that allocations are not of size 0 (after any
1627 	 * bumping).
1628 	 */
1629 	bool assert_nonempty_alloc;
1630 
1631 	/*
1632 	 * Whether or not to modify the 'result' argument to malloc in case of
1633 	 * error.
1634 	 */
1635 	bool null_out_result_on_error;
1636 	/* Whether to set errno when we encounter an error condition. */
1637 	bool set_errno_on_error;
1638 
1639 	/*
1640 	 * The minimum valid alignment for functions requesting aligned storage.
1641 	 */
1642 	size_t min_alignment;
1643 
1644 	/* The error string to use if we oom. */
1645 	const char *oom_string;
1646 	/* The error string to use if the passed-in alignment is invalid. */
1647 	const char *invalid_alignment_string;
1648 
1649 	/*
1650 	 * False if we're configured to skip some time-consuming operations.
1651 	 *
1652 	 * This isn't really a malloc "behavior", but it acts as a useful
1653 	 * summary of several other static (or at least, static after program
1654 	 * initialization) options.
1655 	 */
1656 	bool slow;
1657 };
1658 
1659 JEMALLOC_ALWAYS_INLINE void
1660 static_opts_init(static_opts_t *static_opts) {
1661 	static_opts->may_overflow = false;
1662 	static_opts->bump_empty_alloc = false;
1663 	static_opts->assert_nonempty_alloc = false;
1664 	static_opts->null_out_result_on_error = false;
1665 	static_opts->set_errno_on_error = false;
1666 	static_opts->min_alignment = 0;
1667 	static_opts->oom_string = "";
1668 	static_opts->invalid_alignment_string = "";
1669 	static_opts->slow = false;
1670 }
1671 
1672 /*
1673  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1674  * should have one constant here per magic value there.  Note however that the
1675  * representations need not be related.
1676  */
1677 #define TCACHE_IND_NONE ((unsigned)-1)
1678 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1679 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1680 
1681 typedef struct dynamic_opts_s dynamic_opts_t;
1682 struct dynamic_opts_s {
1683 	void **result;
1684 	size_t num_items;
1685 	size_t item_size;
1686 	size_t alignment;
1687 	bool zero;
1688 	unsigned tcache_ind;
1689 	unsigned arena_ind;
1690 };
1691 
1692 JEMALLOC_ALWAYS_INLINE void
1693 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1694 	dynamic_opts->result = NULL;
1695 	dynamic_opts->num_items = 0;
1696 	dynamic_opts->item_size = 0;
1697 	dynamic_opts->alignment = 0;
1698 	dynamic_opts->zero = false;
1699 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1700 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1701 }
1702 
1703 /* ind is ignored if dopts->alignment > 0. */
1704 JEMALLOC_ALWAYS_INLINE void *
1705 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1706     size_t size, size_t usize, szind_t ind) {
1707 	tcache_t *tcache;
1708 	arena_t *arena;
1709 
1710 	/* Fill in the tcache. */
1711 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1712 		if (likely(!sopts->slow)) {
1713 			/* Getting tcache ptr unconditionally. */
1714 			tcache = tsd_tcachep_get(tsd);
1715 			assert(tcache == tcache_get(tsd));
1716 		} else {
1717 			tcache = tcache_get(tsd);
1718 		}
1719 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1720 		tcache = NULL;
1721 	} else {
1722 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1723 	}
1724 
1725 	/* Fill in the arena. */
1726 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1727 		/*
1728 		 * In case of automatic arena management, we defer arena
1729 		 * computation until as late as we can, hoping to fill the
1730 		 * allocation out of the tcache.
1731 		 */
1732 		arena = NULL;
1733 	} else {
1734 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1735 	}
1736 
1737 	if (unlikely(dopts->alignment != 0)) {
1738 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1739 		    dopts->zero, tcache, arena);
1740 	}
1741 
1742 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1743 	    arena, sopts->slow);
1744 }
1745 
1746 JEMALLOC_ALWAYS_INLINE void *
1747 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1748     size_t usize, szind_t ind) {
1749 	void *ret;
1750 
1751 	/*
1752 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1753 	 * from the ind_large bucket.
1754 	 */
1755 	szind_t ind_large;
1756 	size_t bumped_usize = usize;
1757 
1758 	if (usize <= SMALL_MAXCLASS) {
1759 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1760 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1761 		    == LARGE_MINCLASS);
1762 		ind_large = sz_size2index(LARGE_MINCLASS);
1763 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1764 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1765 		    bumped_usize, ind_large);
1766 		if (unlikely(ret == NULL)) {
1767 			return NULL;
1768 		}
1769 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1770 	} else {
1771 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1772 	}
1773 
1774 	return ret;
1775 }
1776 
1777 /*
1778  * Returns true if the allocation will overflow, and false otherwise.  Sets
1779  * *size to the product either way.
1780  */
1781 JEMALLOC_ALWAYS_INLINE bool
1782 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1783     size_t *size) {
1784 	/*
1785 	 * This function is just num_items * item_size, except that we may have
1786 	 * to check for overflow.
1787 	 */
1788 
1789 	if (!may_overflow) {
1790 		assert(dopts->num_items == 1);
1791 		*size = dopts->item_size;
1792 		return false;
1793 	}
1794 
1795 	/* A size_t with its high-half bits all set to 1. */
1796 	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1797 
1798 	*size = dopts->item_size * dopts->num_items;
1799 
1800 	if (unlikely(*size == 0)) {
1801 		return (dopts->num_items != 0 && dopts->item_size != 0);
1802 	}
1803 
1804 	/*
1805 	 * We got a non-zero size, but we don't know if we overflowed to get
1806 	 * there.  To avoid having to do a divide, we'll be clever and note that
1807 	 * if both A and B can be represented in N/2 bits, then their product
1808 	 * can be represented in N bits (without the possibility of overflow).
1809 	 */
1810 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1811 		return false;
1812 	}
1813 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1814 		return false;
1815 	}
1816 	return true;
1817 }
1818 
1819 JEMALLOC_ALWAYS_INLINE int
1820 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1821 	/* Where the actual allocated memory will live. */
1822 	void *allocation = NULL;
1823 	/* Filled in by compute_size_with_overflow below. */
1824 	size_t size = 0;
1825 	/*
1826 	 * For unaligned allocations, we need only ind.  For aligned
1827 	 * allocations, or in case of stats or profiling we need usize.
1828 	 *
1829 	 * These are actually dead stores, in that their values are reset before
1830 	 * any branch on their value is taken.  Sometimes though, it's
1831 	 * convenient to pass them as arguments before this point.  To avoid
1832 	 * undefined behavior then, we initialize them with dummy stores.
1833 	 */
1834 	szind_t ind = 0;
1835 	size_t usize = 0;
1836 
1837 	/* Reentrancy is only checked on slow path. */
1838 	int8_t reentrancy_level;
1839 
1840 	/* Compute the amount of memory the user wants. */
1841 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1842 	    &size))) {
1843 		goto label_oom;
1844 	}
1845 
1846 	/* Validate the user input. */
1847 	if (sopts->bump_empty_alloc) {
1848 		if (unlikely(size == 0)) {
1849 			size = 1;
1850 		}
1851 	}
1852 
1853 	if (sopts->assert_nonempty_alloc) {
1854 		assert (size != 0);
1855 	}
1856 
1857 	if (unlikely(dopts->alignment < sopts->min_alignment
1858 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1859 		goto label_invalid_alignment;
1860 	}
1861 
1862 	/* This is the beginning of the "core" algorithm. */
1863 
1864 	if (dopts->alignment == 0) {
1865 		ind = sz_size2index(size);
1866 		if (unlikely(ind >= NSIZES)) {
1867 			goto label_oom;
1868 		}
1869 		if (config_stats || (config_prof && opt_prof)) {
1870 			usize = sz_index2size(ind);
1871 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1872 		}
1873 	} else {
1874 		usize = sz_sa2u(size, dopts->alignment);
1875 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1876 			goto label_oom;
1877 		}
1878 	}
1879 
1880 	check_entry_exit_locking(tsd_tsdn(tsd));
1881 
1882 	/*
1883 	 * If we need to handle reentrancy, we can do it out of a
1884 	 * known-initialized arena (i.e. arena 0).
1885 	 */
1886 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1887 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1888 		/*
1889 		 * We should never specify particular arenas or tcaches from
1890 		 * within our internal allocations.
1891 		 */
1892 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1893 		    dopts->tcache_ind == TCACHE_IND_NONE);
1894 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1895 		dopts->tcache_ind = TCACHE_IND_NONE;
1896 		/* We know that arena 0 has already been initialized. */
1897 		dopts->arena_ind = 0;
1898 	}
1899 
1900 	/* If profiling is on, get our profiling context. */
1901 	if (config_prof && opt_prof) {
1902 		/*
1903 		 * Note that if we're going down this path, usize must have been
1904 		 * initialized in the previous if statement.
1905 		 */
1906 		prof_tctx_t *tctx = prof_alloc_prep(
1907 		    tsd, usize, prof_active_get_unlocked(), true);
1908 
1909 		alloc_ctx_t alloc_ctx;
1910 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1911 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1912 			allocation = imalloc_no_sample(
1913 			    sopts, dopts, tsd, usize, usize, ind);
1914 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1915 			/*
1916 			 * Note that ind might still be 0 here.  This is fine;
1917 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1918 			 */
1919 			allocation = imalloc_sample(
1920 			    sopts, dopts, tsd, usize, ind);
1921 			alloc_ctx.slab = false;
1922 		} else {
1923 			allocation = NULL;
1924 		}
1925 
1926 		if (unlikely(allocation == NULL)) {
1927 			prof_alloc_rollback(tsd, tctx, true);
1928 			goto label_oom;
1929 		}
1930 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1931 	} else {
1932 		/*
1933 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1934 		 * computed in the previous if statement.  Down the positive
1935 		 * alignment path, imalloc_no_sample ignores ind and size
1936 		 * (relying only on usize).
1937 		 */
1938 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1939 		    ind);
1940 		if (unlikely(allocation == NULL)) {
1941 			goto label_oom;
1942 		}
1943 	}
1944 
1945 	/*
1946 	 * Allocation has been done at this point.  We still have some
1947 	 * post-allocation work to do though.
1948 	 */
1949 	assert(dopts->alignment == 0
1950 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1951 
1952 	if (config_stats) {
1953 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1954 		*tsd_thread_allocatedp_get(tsd) += usize;
1955 	}
1956 
1957 	if (sopts->slow) {
1958 		UTRACE(0, size, allocation);
1959 	}
1960 
1961 	/* Success! */
1962 	check_entry_exit_locking(tsd_tsdn(tsd));
1963 	*dopts->result = allocation;
1964 	return 0;
1965 
1966 label_oom:
1967 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1968 		malloc_write(sopts->oom_string);
1969 		abort();
1970 	}
1971 
1972 	if (sopts->slow) {
1973 		UTRACE(NULL, size, NULL);
1974 	}
1975 
1976 	check_entry_exit_locking(tsd_tsdn(tsd));
1977 
1978 	if (sopts->set_errno_on_error) {
1979 		set_errno(ENOMEM);
1980 	}
1981 
1982 	if (sopts->null_out_result_on_error) {
1983 		*dopts->result = NULL;
1984 	}
1985 
1986 	return ENOMEM;
1987 
1988 	/*
1989 	 * This label is only jumped to by one goto; we move it out of line
1990 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1991 	 * the oom case.
1992 	 */
1993 label_invalid_alignment:
1994 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1995 		malloc_write(sopts->invalid_alignment_string);
1996 		abort();
1997 	}
1998 
1999 	if (sopts->set_errno_on_error) {
2000 		set_errno(EINVAL);
2001 	}
2002 
2003 	if (sopts->slow) {
2004 		UTRACE(NULL, size, NULL);
2005 	}
2006 
2007 	check_entry_exit_locking(tsd_tsdn(tsd));
2008 
2009 	if (sopts->null_out_result_on_error) {
2010 		*dopts->result = NULL;
2011 	}
2012 
2013 	return EINVAL;
2014 }
2015 
2016 /* Returns the errno-style error code of the allocation. */
2017 JEMALLOC_ALWAYS_INLINE int
2018 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2019 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2020 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2021 			malloc_write(sopts->oom_string);
2022 			abort();
2023 		}
2024 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2025 		set_errno(ENOMEM);
2026 		*dopts->result = NULL;
2027 
2028 		return ENOMEM;
2029 	}
2030 
2031 	/* We always need the tsd.  Let's grab it right away. */
2032 	tsd_t *tsd = tsd_fetch();
2033 	assert(tsd);
2034 	if (likely(tsd_fast(tsd))) {
2035 		/* Fast and common path. */
2036 		tsd_assert_fast(tsd);
2037 		sopts->slow = false;
2038 		return imalloc_body(sopts, dopts, tsd);
2039 	} else {
2040 		sopts->slow = true;
2041 		return imalloc_body(sopts, dopts, tsd);
2042 	}
2043 }
2044 /******************************************************************************/
2045 /*
2046  * Begin malloc(3)-compatible functions.
2047  */
2048 
2049 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2050 void JEMALLOC_NOTHROW *
2051 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2052 je_malloc(size_t size) {
2053 	void *ret;
2054 	static_opts_t sopts;
2055 	dynamic_opts_t dopts;
2056 
2057 	LOG("core.malloc.entry", "size: %zu", size);
2058 
2059 	static_opts_init(&sopts);
2060 	dynamic_opts_init(&dopts);
2061 
2062 	sopts.bump_empty_alloc = true;
2063 	sopts.null_out_result_on_error = true;
2064 	sopts.set_errno_on_error = true;
2065 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2066 
2067 	dopts.result = &ret;
2068 	dopts.num_items = 1;
2069 	dopts.item_size = size;
2070 
2071 	imalloc(&sopts, &dopts);
2072 
2073 	LOG("core.malloc.exit", "result: %p", ret);
2074 
2075 	return ret;
2076 }
2077 
2078 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2079 JEMALLOC_ATTR(nonnull(1))
2080 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2081 	int ret;
2082 	static_opts_t sopts;
2083 	dynamic_opts_t dopts;
2084 
2085 	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2086 	    "size: %zu", memptr, alignment, size);
2087 
2088 	static_opts_init(&sopts);
2089 	dynamic_opts_init(&dopts);
2090 
2091 	sopts.bump_empty_alloc = true;
2092 	sopts.min_alignment = sizeof(void *);
2093 	sopts.oom_string =
2094 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2095 	sopts.invalid_alignment_string =
2096 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2097 
2098 	dopts.result = memptr;
2099 	dopts.num_items = 1;
2100 	dopts.item_size = size;
2101 	dopts.alignment = alignment;
2102 
2103 	ret = imalloc(&sopts, &dopts);
2104 
2105 	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2106 	    *memptr);
2107 
2108 	return ret;
2109 }
2110 
2111 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2112 void JEMALLOC_NOTHROW *
2113 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2114 je_aligned_alloc(size_t alignment, size_t size) {
2115 	void *ret;
2116 
2117 	static_opts_t sopts;
2118 	dynamic_opts_t dopts;
2119 
2120 	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2121 	    alignment, size);
2122 
2123 	static_opts_init(&sopts);
2124 	dynamic_opts_init(&dopts);
2125 
2126 	sopts.bump_empty_alloc = true;
2127 	sopts.null_out_result_on_error = true;
2128 	sopts.set_errno_on_error = true;
2129 	sopts.min_alignment = 1;
2130 	sopts.oom_string =
2131 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2132 	sopts.invalid_alignment_string =
2133 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2134 
2135 	dopts.result = &ret;
2136 	dopts.num_items = 1;
2137 	dopts.item_size = size;
2138 	dopts.alignment = alignment;
2139 
2140 	imalloc(&sopts, &dopts);
2141 
2142 	LOG("core.aligned_alloc.exit", "result: %p", ret);
2143 
2144 	return ret;
2145 }
2146 
2147 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2148 void JEMALLOC_NOTHROW *
2149 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2150 je_calloc(size_t num, size_t size) {
2151 	void *ret;
2152 	static_opts_t sopts;
2153 	dynamic_opts_t dopts;
2154 
2155 	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2156 
2157 	static_opts_init(&sopts);
2158 	dynamic_opts_init(&dopts);
2159 
2160 	sopts.may_overflow = true;
2161 	sopts.bump_empty_alloc = true;
2162 	sopts.null_out_result_on_error = true;
2163 	sopts.set_errno_on_error = true;
2164 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2165 
2166 	dopts.result = &ret;
2167 	dopts.num_items = num;
2168 	dopts.item_size = size;
2169 	dopts.zero = true;
2170 
2171 	imalloc(&sopts, &dopts);
2172 
2173 	LOG("core.calloc.exit", "result: %p", ret);
2174 
2175 	return ret;
2176 }
2177 
2178 static void *
2179 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2180     prof_tctx_t *tctx) {
2181 	void *p;
2182 
2183 	if (tctx == NULL) {
2184 		return NULL;
2185 	}
2186 	if (usize <= SMALL_MAXCLASS) {
2187 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2188 		if (p == NULL) {
2189 			return NULL;
2190 		}
2191 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2192 	} else {
2193 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2194 	}
2195 
2196 	return p;
2197 }
2198 
2199 JEMALLOC_ALWAYS_INLINE void *
2200 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2201    alloc_ctx_t *alloc_ctx) {
2202 	void *p;
2203 	bool prof_activex;
2204 	prof_tctx_t *old_tctx, *tctx;
2205 
2206 	prof_activex = prof_active_get_unlocked();
2207 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2208 	tctx = prof_alloc_prep(tsd, usize, prof_activex, true);
2209 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2210 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2211 	} else {
2212 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2213 	}
2214 	if (unlikely(p == NULL)) {
2215 		prof_alloc_rollback(tsd, tctx, true);
2216 		return NULL;
2217 	}
2218 	prof_realloc(tsd, p, usize, tctx, prof_activex, true, old_ptr,
2219 	    old_usize, old_tctx);
2220 
2221 	return p;
2222 }
2223 
2224 JEMALLOC_ALWAYS_INLINE void
2225 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2226 	if (!slow_path) {
2227 		tsd_assert_fast(tsd);
2228 	}
2229 	check_entry_exit_locking(tsd_tsdn(tsd));
2230 	if (tsd_reentrancy_level_get(tsd) != 0) {
2231 		assert(slow_path);
2232 	}
2233 
2234 	assert(ptr != NULL);
2235 	assert(malloc_initialized() || IS_INITIALIZER);
2236 
2237 	alloc_ctx_t alloc_ctx;
2238 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2239 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2240 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2241 	assert(alloc_ctx.szind != NSIZES);
2242 
2243 	size_t usize;
2244 	if (config_prof && opt_prof) {
2245 		usize = sz_index2size(alloc_ctx.szind);
2246 		prof_free(tsd, ptr, usize, &alloc_ctx);
2247 	} else if (config_stats) {
2248 		usize = sz_index2size(alloc_ctx.szind);
2249 	}
2250 	if (config_stats) {
2251 		*tsd_thread_deallocatedp_get(tsd) += usize;
2252 	}
2253 
2254 	if (likely(!slow_path)) {
2255 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2256 		    false);
2257 	} else {
2258 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2259 		    true);
2260 	}
2261 }
2262 
2263 JEMALLOC_ALWAYS_INLINE void
2264 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2265 	if (!slow_path) {
2266 		tsd_assert_fast(tsd);
2267 	}
2268 	check_entry_exit_locking(tsd_tsdn(tsd));
2269 	if (tsd_reentrancy_level_get(tsd) != 0) {
2270 		assert(slow_path);
2271 	}
2272 
2273 	assert(ptr != NULL);
2274 	assert(malloc_initialized() || IS_INITIALIZER);
2275 
2276 	alloc_ctx_t alloc_ctx, *ctx;
2277 	if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2278 		/*
2279 		 * When cache_oblivious is disabled and ptr is not page aligned,
2280 		 * the allocation was not sampled -- usize can be used to
2281 		 * determine szind directly.
2282 		 */
2283 		alloc_ctx.szind = sz_size2index(usize);
2284 		alloc_ctx.slab = true;
2285 		ctx = &alloc_ctx;
2286 		if (config_debug) {
2287 			alloc_ctx_t dbg_ctx;
2288 			rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2289 			rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2290 			    rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2291 			    &dbg_ctx.slab);
2292 			assert(dbg_ctx.szind == alloc_ctx.szind);
2293 			assert(dbg_ctx.slab == alloc_ctx.slab);
2294 		}
2295 	} else if (config_prof && opt_prof) {
2296 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2297 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2298 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2299 		assert(alloc_ctx.szind == sz_size2index(usize));
2300 		ctx = &alloc_ctx;
2301 	} else {
2302 		ctx = NULL;
2303 	}
2304 
2305 	if (config_prof && opt_prof) {
2306 		prof_free(tsd, ptr, usize, ctx);
2307 	}
2308 	if (config_stats) {
2309 		*tsd_thread_deallocatedp_get(tsd) += usize;
2310 	}
2311 
2312 	if (likely(!slow_path)) {
2313 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2314 	} else {
2315 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2316 	}
2317 }
2318 
2319 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2320 void JEMALLOC_NOTHROW *
2321 JEMALLOC_ALLOC_SIZE(2)
2322 je_realloc(void *ptr, size_t size) {
2323 	void *ret;
2324 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2325 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2326 	size_t old_usize = 0;
2327 
2328 	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2329 
2330 	if (unlikely(size == 0)) {
2331 		if (ptr != NULL) {
2332 			/* realloc(ptr, 0) is equivalent to free(ptr). */
2333 			UTRACE(ptr, 0, 0);
2334 			tcache_t *tcache;
2335 			tsd_t *tsd = tsd_fetch();
2336 			if (tsd_reentrancy_level_get(tsd) == 0) {
2337 				tcache = tcache_get(tsd);
2338 			} else {
2339 				tcache = NULL;
2340 			}
2341 			ifree(tsd, ptr, tcache, true);
2342 
2343 			LOG("core.realloc.exit", "result: %p", NULL);
2344 			return NULL;
2345 		}
2346 		size = 1;
2347 	}
2348 
2349 	if (likely(ptr != NULL)) {
2350 		assert(malloc_initialized() || IS_INITIALIZER);
2351 		tsd_t *tsd = tsd_fetch();
2352 
2353 		check_entry_exit_locking(tsd_tsdn(tsd));
2354 
2355 		alloc_ctx_t alloc_ctx;
2356 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2357 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2358 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2359 		assert(alloc_ctx.szind != NSIZES);
2360 		old_usize = sz_index2size(alloc_ctx.szind);
2361 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2362 		if (config_prof && opt_prof) {
2363 			usize = sz_s2u(size);
2364 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2365 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2366 			    &alloc_ctx);
2367 		} else {
2368 			if (config_stats) {
2369 				usize = sz_s2u(size);
2370 			}
2371 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2372 		}
2373 		tsdn = tsd_tsdn(tsd);
2374 	} else {
2375 		/* realloc(NULL, size) is equivalent to malloc(size). */
2376 		void *ret1 = je_malloc(size);
2377 		LOG("core.realloc.exit", "result: %p", ret1);
2378 		return ret1;
2379 	}
2380 
2381 	if (unlikely(ret == NULL)) {
2382 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2383 			malloc_write("<jemalloc>: Error in realloc(): "
2384 			    "out of memory\n");
2385 			abort();
2386 		}
2387 		set_errno(ENOMEM);
2388 	}
2389 	if (config_stats && likely(ret != NULL)) {
2390 		tsd_t *tsd;
2391 
2392 		assert(usize == isalloc(tsdn, ret));
2393 		tsd = tsdn_tsd(tsdn);
2394 		*tsd_thread_allocatedp_get(tsd) += usize;
2395 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2396 	}
2397 	UTRACE(ptr, size, ret);
2398 	check_entry_exit_locking(tsdn);
2399 
2400 	LOG("core.realloc.exit", "result: %p", ret);
2401 	return ret;
2402 }
2403 
2404 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2405 je_free(void *ptr) {
2406 	LOG("core.free.entry", "ptr: %p", ptr);
2407 
2408 	UTRACE(ptr, 0, 0);
2409 	if (likely(ptr != NULL)) {
2410 		/*
2411 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2412 		 * based on only free() calls -- other activities trigger the
2413 		 * minimal to full transition.  This is because free() may
2414 		 * happen during thread shutdown after tls deallocation: if a
2415 		 * thread never had any malloc activities until then, a
2416 		 * fully-setup tsd won't be destructed properly.
2417 		 */
2418 		tsd_t *tsd = tsd_fetch_min();
2419 		check_entry_exit_locking(tsd_tsdn(tsd));
2420 
2421 		tcache_t *tcache;
2422 		if (likely(tsd_fast(tsd))) {
2423 			tsd_assert_fast(tsd);
2424 			/* Unconditionally get tcache ptr on fast path. */
2425 			tcache = tsd_tcachep_get(tsd);
2426 			ifree(tsd, ptr, tcache, false);
2427 		} else {
2428 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2429 				tcache = tcache_get(tsd);
2430 			} else {
2431 				tcache = NULL;
2432 			}
2433 			ifree(tsd, ptr, tcache, true);
2434 		}
2435 		check_entry_exit_locking(tsd_tsdn(tsd));
2436 	}
2437 	LOG("core.free.exit", "");
2438 }
2439 
2440 /*
2441  * End malloc(3)-compatible functions.
2442  */
2443 /******************************************************************************/
2444 /*
2445  * Begin non-standard override functions.
2446  */
2447 
2448 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2449 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2450 void JEMALLOC_NOTHROW *
2451 JEMALLOC_ATTR(malloc)
2452 je_memalign(size_t alignment, size_t size) {
2453 	void *ret;
2454 	static_opts_t sopts;
2455 	dynamic_opts_t dopts;
2456 
2457 	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2458 	    size);
2459 
2460 	static_opts_init(&sopts);
2461 	dynamic_opts_init(&dopts);
2462 
2463 	sopts.bump_empty_alloc = true;
2464 	sopts.min_alignment = 1;
2465 	sopts.oom_string =
2466 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2467 	sopts.invalid_alignment_string =
2468 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2469 	sopts.null_out_result_on_error = true;
2470 
2471 	dopts.result = &ret;
2472 	dopts.num_items = 1;
2473 	dopts.item_size = size;
2474 	dopts.alignment = alignment;
2475 
2476 	imalloc(&sopts, &dopts);
2477 
2478 	LOG("core.memalign.exit", "result: %p", ret);
2479 	return ret;
2480 }
2481 #endif
2482 
2483 #ifdef JEMALLOC_OVERRIDE_VALLOC
2484 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2485 void JEMALLOC_NOTHROW *
2486 JEMALLOC_ATTR(malloc)
2487 je_valloc(size_t size) {
2488 	void *ret;
2489 
2490 	static_opts_t sopts;
2491 	dynamic_opts_t dopts;
2492 
2493 	LOG("core.valloc.entry", "size: %zu\n", size);
2494 
2495 	static_opts_init(&sopts);
2496 	dynamic_opts_init(&dopts);
2497 
2498 	sopts.bump_empty_alloc = true;
2499 	sopts.null_out_result_on_error = true;
2500 	sopts.min_alignment = PAGE;
2501 	sopts.oom_string =
2502 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2503 	sopts.invalid_alignment_string =
2504 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2505 
2506 	dopts.result = &ret;
2507 	dopts.num_items = 1;
2508 	dopts.item_size = size;
2509 	dopts.alignment = PAGE;
2510 
2511 	imalloc(&sopts, &dopts);
2512 
2513 	LOG("core.valloc.exit", "result: %p\n", ret);
2514 	return ret;
2515 }
2516 #endif
2517 
2518 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2519 /*
2520  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2521  * to inconsistently reference libc's malloc(3)-compatible functions
2522  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2523  *
2524  * These definitions interpose hooks in glibc.  The functions are actually
2525  * passed an extra argument for the caller return address, which will be
2526  * ignored.
2527  */
2528 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2529 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2530 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2531 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2532 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2533     je_memalign;
2534 #  endif
2535 
2536 #  ifdef CPU_COUNT
2537 /*
2538  * To enable static linking with glibc, the libc specific malloc interface must
2539  * be implemented also, so none of glibc's malloc.o functions are added to the
2540  * link.
2541  */
2542 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2543 /* To force macro expansion of je_ prefix before stringification. */
2544 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2545 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2546 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2547 #    endif
2548 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2549 void __libc_free(void* ptr) PREALIAS(je_free);
2550 #    endif
2551 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2552 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2553 #    endif
2554 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2555 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2556 #    endif
2557 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2558 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2559 #    endif
2560 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2561 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2562 #    endif
2563 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2564 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2565 #    endif
2566 #    undef PREALIAS
2567 #    undef ALIAS
2568 #  endif
2569 #endif
2570 
2571 /*
2572  * End non-standard override functions.
2573  */
2574 /******************************************************************************/
2575 /*
2576  * Begin non-standard functions.
2577  */
2578 
2579 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2580 void JEMALLOC_NOTHROW *
2581 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2582 je_mallocx(size_t size, int flags) {
2583 	void *ret;
2584 	static_opts_t sopts;
2585 	dynamic_opts_t dopts;
2586 
2587 	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2588 
2589 	static_opts_init(&sopts);
2590 	dynamic_opts_init(&dopts);
2591 
2592 	sopts.assert_nonempty_alloc = true;
2593 	sopts.null_out_result_on_error = true;
2594 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2595 
2596 	dopts.result = &ret;
2597 	dopts.num_items = 1;
2598 	dopts.item_size = size;
2599 	if (unlikely(flags != 0)) {
2600 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2601 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2602 		}
2603 
2604 		dopts.zero = MALLOCX_ZERO_GET(flags);
2605 
2606 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2607 			if ((flags & MALLOCX_TCACHE_MASK)
2608 			    == MALLOCX_TCACHE_NONE) {
2609 				dopts.tcache_ind = TCACHE_IND_NONE;
2610 			} else {
2611 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2612 			}
2613 		} else {
2614 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2615 		}
2616 
2617 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2618 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2619 	}
2620 
2621 	imalloc(&sopts, &dopts);
2622 
2623 	LOG("core.mallocx.exit", "result: %p", ret);
2624 	return ret;
2625 }
2626 
2627 static void *
2628 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2629     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2630     prof_tctx_t *tctx) {
2631 	void *p;
2632 
2633 	if (tctx == NULL) {
2634 		return NULL;
2635 	}
2636 	if (usize <= SMALL_MAXCLASS) {
2637 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2638 		    alignment, zero, tcache, arena);
2639 		if (p == NULL) {
2640 			return NULL;
2641 		}
2642 		arena_prof_promote(tsdn, p, usize);
2643 	} else {
2644 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2645 		    tcache, arena);
2646 	}
2647 
2648 	return p;
2649 }
2650 
2651 JEMALLOC_ALWAYS_INLINE void *
2652 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2653     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2654     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2655 	void *p;
2656 	bool prof_activex;
2657 	prof_tctx_t *old_tctx, *tctx;
2658 
2659 	prof_activex = prof_active_get_unlocked();
2660 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2661 	tctx = prof_alloc_prep(tsd, *usize, prof_activex, false);
2662 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2663 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2664 		    *usize, alignment, zero, tcache, arena, tctx);
2665 	} else {
2666 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2667 		    zero, tcache, arena);
2668 	}
2669 	if (unlikely(p == NULL)) {
2670 		prof_alloc_rollback(tsd, tctx, false);
2671 		return NULL;
2672 	}
2673 
2674 	if (p == old_ptr && alignment != 0) {
2675 		/*
2676 		 * The allocation did not move, so it is possible that the size
2677 		 * class is smaller than would guarantee the requested
2678 		 * alignment, and that the alignment constraint was
2679 		 * serendipitously satisfied.  Additionally, old_usize may not
2680 		 * be the same as the current usize because of in-place large
2681 		 * reallocation.  Therefore, query the actual value of usize.
2682 		 */
2683 		*usize = isalloc(tsd_tsdn(tsd), p);
2684 	}
2685 	prof_realloc(tsd, p, *usize, tctx, prof_activex, false, old_ptr,
2686 	    old_usize, old_tctx);
2687 
2688 	return p;
2689 }
2690 
2691 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2692 void JEMALLOC_NOTHROW *
2693 JEMALLOC_ALLOC_SIZE(2)
2694 je_rallocx(void *ptr, size_t size, int flags) {
2695 	void *p;
2696 	tsd_t *tsd;
2697 	size_t usize;
2698 	size_t old_usize;
2699 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2700 	bool zero = flags & MALLOCX_ZERO;
2701 	arena_t *arena;
2702 	tcache_t *tcache;
2703 
2704 	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2705 	    size, flags);
2706 
2707 
2708 	assert(ptr != NULL);
2709 	assert(size != 0);
2710 	assert(malloc_initialized() || IS_INITIALIZER);
2711 	tsd = tsd_fetch();
2712 	check_entry_exit_locking(tsd_tsdn(tsd));
2713 
2714 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2715 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2716 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2717 		if (unlikely(arena == NULL)) {
2718 			goto label_oom;
2719 		}
2720 	} else {
2721 		arena = NULL;
2722 	}
2723 
2724 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2725 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2726 			tcache = NULL;
2727 		} else {
2728 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2729 		}
2730 	} else {
2731 		tcache = tcache_get(tsd);
2732 	}
2733 
2734 	alloc_ctx_t alloc_ctx;
2735 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2736 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2737 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2738 	assert(alloc_ctx.szind != NSIZES);
2739 	old_usize = sz_index2size(alloc_ctx.szind);
2740 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2741 	if (config_prof && opt_prof) {
2742 		usize = (alignment == 0) ?
2743 		    sz_s2u(size) : sz_sa2u(size, alignment);
2744 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2745 			goto label_oom;
2746 		}
2747 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2748 		    zero, tcache, arena, &alloc_ctx);
2749 		if (unlikely(p == NULL)) {
2750 			goto label_oom;
2751 		}
2752 	} else {
2753 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2754 		    zero, tcache, arena);
2755 		if (unlikely(p == NULL)) {
2756 			goto label_oom;
2757 		}
2758 		if (config_stats) {
2759 			usize = isalloc(tsd_tsdn(tsd), p);
2760 		}
2761 	}
2762 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2763 
2764 	if (config_stats) {
2765 		*tsd_thread_allocatedp_get(tsd) += usize;
2766 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2767 	}
2768 	UTRACE(ptr, size, p);
2769 	check_entry_exit_locking(tsd_tsdn(tsd));
2770 
2771 	LOG("core.rallocx.exit", "result: %p", p);
2772 	return p;
2773 label_oom:
2774 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2775 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2776 		abort();
2777 	}
2778 	UTRACE(ptr, size, 0);
2779 	check_entry_exit_locking(tsd_tsdn(tsd));
2780 
2781 	LOG("core.rallocx.exit", "result: %p", NULL);
2782 	return NULL;
2783 }
2784 
2785 JEMALLOC_ALWAYS_INLINE size_t
2786 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2787     size_t extra, size_t alignment, bool zero) {
2788 	size_t usize;
2789 
2790 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2791 		return old_usize;
2792 	}
2793 	usize = isalloc(tsdn, ptr);
2794 
2795 	return usize;
2796 }
2797 
2798 static size_t
2799 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2800     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2801 	size_t usize;
2802 
2803 	if (tctx == NULL) {
2804 		return old_usize;
2805 	}
2806 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2807 	    zero);
2808 
2809 	return usize;
2810 }
2811 
2812 JEMALLOC_ALWAYS_INLINE size_t
2813 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2814     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2815 	size_t usize_max, usize;
2816 	bool prof_activex;
2817 	prof_tctx_t *old_tctx, *tctx;
2818 
2819 	prof_activex = prof_active_get_unlocked();
2820 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2821 	/*
2822 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2823 	 * Therefore, compute its maximum possible value and use that in
2824 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2825 	 * prof_realloc() will use the actual usize to decide whether to sample.
2826 	 */
2827 	if (alignment == 0) {
2828 		usize_max = sz_s2u(size+extra);
2829 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2830 	} else {
2831 		usize_max = sz_sa2u(size+extra, alignment);
2832 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2833 			/*
2834 			 * usize_max is out of range, and chances are that
2835 			 * allocation will fail, but use the maximum possible
2836 			 * value and carry on with prof_alloc_prep(), just in
2837 			 * case allocation succeeds.
2838 			 */
2839 			usize_max = LARGE_MAXCLASS;
2840 		}
2841 	}
2842 	tctx = prof_alloc_prep(tsd, usize_max, prof_activex, false);
2843 
2844 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2845 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2846 		    size, extra, alignment, zero, tctx);
2847 	} else {
2848 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2849 		    extra, alignment, zero);
2850 	}
2851 	if (usize == old_usize) {
2852 		prof_alloc_rollback(tsd, tctx, false);
2853 		return usize;
2854 	}
2855 	prof_realloc(tsd, ptr, usize, tctx, prof_activex, false, ptr, old_usize,
2856 	    old_tctx);
2857 
2858 	return usize;
2859 }
2860 
2861 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2862 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2863 	tsd_t *tsd;
2864 	size_t usize, old_usize;
2865 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2866 	bool zero = flags & MALLOCX_ZERO;
2867 
2868 	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2869 	    "flags: %d", ptr, size, extra, flags);
2870 
2871 	assert(ptr != NULL);
2872 	assert(size != 0);
2873 	assert(SIZE_T_MAX - size >= extra);
2874 	assert(malloc_initialized() || IS_INITIALIZER);
2875 	tsd = tsd_fetch();
2876 	check_entry_exit_locking(tsd_tsdn(tsd));
2877 
2878 	alloc_ctx_t alloc_ctx;
2879 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2880 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2881 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2882 	assert(alloc_ctx.szind != NSIZES);
2883 	old_usize = sz_index2size(alloc_ctx.szind);
2884 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2885 	/*
2886 	 * The API explicitly absolves itself of protecting against (size +
2887 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2888 	 * exceeding LARGE_MAXCLASS.
2889 	 *
2890 	 * Ordinarily, size limit checking is handled deeper down, but here we
2891 	 * have to check as part of (size + extra) clamping, since we need the
2892 	 * clamped value in the above helper functions.
2893 	 */
2894 	if (unlikely(size > LARGE_MAXCLASS)) {
2895 		usize = old_usize;
2896 		goto label_not_resized;
2897 	}
2898 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2899 		extra = LARGE_MAXCLASS - size;
2900 	}
2901 
2902 	if (config_prof && opt_prof) {
2903 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2904 		    alignment, zero, &alloc_ctx);
2905 	} else {
2906 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2907 		    extra, alignment, zero);
2908 	}
2909 	if (unlikely(usize == old_usize)) {
2910 		goto label_not_resized;
2911 	}
2912 
2913 	if (config_stats) {
2914 		*tsd_thread_allocatedp_get(tsd) += usize;
2915 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2916 	}
2917 label_not_resized:
2918 	UTRACE(ptr, size, ptr);
2919 	check_entry_exit_locking(tsd_tsdn(tsd));
2920 
2921 	LOG("core.xallocx.exit", "result: %zu", usize);
2922 	return usize;
2923 }
2924 
2925 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2926 JEMALLOC_ATTR(pure)
2927 je_sallocx(const void *ptr, UNUSED int flags) {
2928 	size_t usize;
2929 	tsdn_t *tsdn;
2930 
2931 	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2932 
2933 	assert(malloc_initialized() || IS_INITIALIZER);
2934 	assert(ptr != NULL);
2935 
2936 	tsdn = tsdn_fetch();
2937 	check_entry_exit_locking(tsdn);
2938 
2939 	if (config_debug || force_ivsalloc) {
2940 		usize = ivsalloc(tsdn, ptr);
2941 		assert(force_ivsalloc || usize != 0);
2942 	} else {
2943 		usize = isalloc(tsdn, ptr);
2944 	}
2945 
2946 	check_entry_exit_locking(tsdn);
2947 
2948 	LOG("core.sallocx.exit", "result: %zu", usize);
2949 	return usize;
2950 }
2951 
2952 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2953 je_dallocx(void *ptr, int flags) {
2954 	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2955 
2956 	assert(ptr != NULL);
2957 	assert(malloc_initialized() || IS_INITIALIZER);
2958 
2959 	tsd_t *tsd = tsd_fetch();
2960 	bool fast = tsd_fast(tsd);
2961 	check_entry_exit_locking(tsd_tsdn(tsd));
2962 
2963 	tcache_t *tcache;
2964 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2965 		/* Not allowed to be reentrant and specify a custom tcache. */
2966 		assert(tsd_reentrancy_level_get(tsd) == 0);
2967 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2968 			tcache = NULL;
2969 		} else {
2970 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2971 		}
2972 	} else {
2973 		if (likely(fast)) {
2974 			tcache = tsd_tcachep_get(tsd);
2975 			assert(tcache == tcache_get(tsd));
2976 		} else {
2977 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2978 				tcache = tcache_get(tsd);
2979 			}  else {
2980 				tcache = NULL;
2981 			}
2982 		}
2983 	}
2984 
2985 	UTRACE(ptr, 0, 0);
2986 	if (likely(fast)) {
2987 		tsd_assert_fast(tsd);
2988 		ifree(tsd, ptr, tcache, false);
2989 	} else {
2990 		ifree(tsd, ptr, tcache, true);
2991 	}
2992 	check_entry_exit_locking(tsd_tsdn(tsd));
2993 
2994 	LOG("core.dallocx.exit", "");
2995 }
2996 
2997 JEMALLOC_ALWAYS_INLINE size_t
2998 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2999 	check_entry_exit_locking(tsdn);
3000 
3001 	size_t usize;
3002 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
3003 		usize = sz_s2u(size);
3004 	} else {
3005 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
3006 	}
3007 	check_entry_exit_locking(tsdn);
3008 	return usize;
3009 }
3010 
3011 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3012 je_sdallocx(void *ptr, size_t size, int flags) {
3013 	assert(ptr != NULL);
3014 	assert(malloc_initialized() || IS_INITIALIZER);
3015 
3016 	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3017 	    size, flags);
3018 
3019 	tsd_t *tsd = tsd_fetch();
3020 	bool fast = tsd_fast(tsd);
3021 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3022 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3023 	check_entry_exit_locking(tsd_tsdn(tsd));
3024 
3025 	tcache_t *tcache;
3026 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3027 		/* Not allowed to be reentrant and specify a custom tcache. */
3028 		assert(tsd_reentrancy_level_get(tsd) == 0);
3029 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3030 			tcache = NULL;
3031 		} else {
3032 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3033 		}
3034 	} else {
3035 		if (likely(fast)) {
3036 			tcache = tsd_tcachep_get(tsd);
3037 			assert(tcache == tcache_get(tsd));
3038 		} else {
3039 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3040 				tcache = tcache_get(tsd);
3041 			} else {
3042 				tcache = NULL;
3043 			}
3044 		}
3045 	}
3046 
3047 	UTRACE(ptr, 0, 0);
3048 	if (likely(fast)) {
3049 		tsd_assert_fast(tsd);
3050 		isfree(tsd, ptr, usize, tcache, false);
3051 	} else {
3052 		isfree(tsd, ptr, usize, tcache, true);
3053 	}
3054 	check_entry_exit_locking(tsd_tsdn(tsd));
3055 
3056 	LOG("core.sdallocx.exit", "");
3057 }
3058 
3059 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3060 JEMALLOC_ATTR(pure)
3061 je_nallocx(size_t size, int flags) {
3062 	size_t usize;
3063 	tsdn_t *tsdn;
3064 
3065 	assert(size != 0);
3066 
3067 	if (unlikely(malloc_init())) {
3068 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3069 		return 0;
3070 	}
3071 
3072 	tsdn = tsdn_fetch();
3073 	check_entry_exit_locking(tsdn);
3074 
3075 	usize = inallocx(tsdn, size, flags);
3076 	if (unlikely(usize > LARGE_MAXCLASS)) {
3077 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3078 		return 0;
3079 	}
3080 
3081 	check_entry_exit_locking(tsdn);
3082 	LOG("core.nallocx.exit", "result: %zu", usize);
3083 	return usize;
3084 }
3085 
3086 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3087 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3088     size_t newlen) {
3089 	int ret;
3090 	tsd_t *tsd;
3091 
3092 	LOG("core.mallctl.entry", "name: %s", name);
3093 
3094 	if (unlikely(malloc_init())) {
3095 		LOG("core.mallctl.exit", "result: %d", EAGAIN);
3096 		return EAGAIN;
3097 	}
3098 
3099 	tsd = tsd_fetch();
3100 	check_entry_exit_locking(tsd_tsdn(tsd));
3101 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3102 	check_entry_exit_locking(tsd_tsdn(tsd));
3103 
3104 	LOG("core.mallctl.exit", "result: %d", ret);
3105 	return ret;
3106 }
3107 
3108 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3109 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3110 	int ret;
3111 
3112 	LOG("core.mallctlnametomib.entry", "name: %s", name);
3113 
3114 	if (unlikely(malloc_init())) {
3115 		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3116 		return EAGAIN;
3117 	}
3118 
3119 	tsd_t *tsd = tsd_fetch();
3120 	check_entry_exit_locking(tsd_tsdn(tsd));
3121 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
3122 	check_entry_exit_locking(tsd_tsdn(tsd));
3123 
3124 	LOG("core.mallctlnametomib.exit", "result: %d", ret);
3125 	return ret;
3126 }
3127 
3128 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3129 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3130   void *newp, size_t newlen) {
3131 	int ret;
3132 	tsd_t *tsd;
3133 
3134 	LOG("core.mallctlbymib.entry", "");
3135 
3136 	if (unlikely(malloc_init())) {
3137 		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3138 		return EAGAIN;
3139 	}
3140 
3141 	tsd = tsd_fetch();
3142 	check_entry_exit_locking(tsd_tsdn(tsd));
3143 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3144 	check_entry_exit_locking(tsd_tsdn(tsd));
3145 	LOG("core.mallctlbymib.exit", "result: %d", ret);
3146 	return ret;
3147 }
3148 
3149 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3150 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3151     const char *opts) {
3152 	tsdn_t *tsdn;
3153 
3154 	LOG("core.malloc_stats_print.entry", "");
3155 
3156 	tsdn = tsdn_fetch();
3157 	check_entry_exit_locking(tsdn);
3158 	stats_print(write_cb, cbopaque, opts);
3159 	check_entry_exit_locking(tsdn);
3160 	LOG("core.malloc_stats_print.exit", "");
3161 }
3162 
3163 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3164 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3165 	size_t ret;
3166 	tsdn_t *tsdn;
3167 
3168 	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3169 
3170 	assert(malloc_initialized() || IS_INITIALIZER);
3171 
3172 	tsdn = tsdn_fetch();
3173 	check_entry_exit_locking(tsdn);
3174 
3175 	if (unlikely(ptr == NULL)) {
3176 		ret = 0;
3177 	} else {
3178 		if (config_debug || force_ivsalloc) {
3179 			ret = ivsalloc(tsdn, ptr);
3180 			assert(force_ivsalloc || ret != 0);
3181 		} else {
3182 			ret = isalloc(tsdn, ptr);
3183 		}
3184 	}
3185 
3186 	check_entry_exit_locking(tsdn);
3187 	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3188 	return ret;
3189 }
3190 
3191 /*
3192  * End non-standard functions.
3193  */
3194 /******************************************************************************/
3195 /*
3196  * The following functions are used by threading libraries for protection of
3197  * malloc during fork().
3198  */
3199 
3200 /*
3201  * If an application creates a thread before doing any allocation in the main
3202  * thread, then calls fork(2) in the main thread followed by memory allocation
3203  * in the child process, a race can occur that results in deadlock within the
3204  * child: the main thread may have forked while the created thread had
3205  * partially initialized the allocator.  Ordinarily jemalloc prevents
3206  * fork/malloc races via the following functions it registers during
3207  * initialization using pthread_atfork(), but of course that does no good if
3208  * the allocator isn't fully initialized at fork time.  The following library
3209  * constructor is a partial solution to this problem.  It may still be possible
3210  * to trigger the deadlock described above, but doing so would involve forking
3211  * via a library constructor that runs before jemalloc's runs.
3212  */
3213 #ifndef JEMALLOC_JET
3214 JEMALLOC_ATTR(constructor)
3215 static void
3216 jemalloc_constructor(void) {
3217 	malloc_init();
3218 }
3219 #endif
3220 
3221 #ifndef JEMALLOC_MUTEX_INIT_CB
3222 void
3223 jemalloc_prefork(void)
3224 #else
3225 JEMALLOC_EXPORT void
3226 _malloc_prefork(void)
3227 #endif
3228 {
3229 	tsd_t *tsd;
3230 	unsigned i, j, narenas;
3231 	arena_t *arena;
3232 
3233 #ifdef JEMALLOC_MUTEX_INIT_CB
3234 	if (!malloc_initialized()) {
3235 		return;
3236 	}
3237 #endif
3238 	assert(malloc_initialized());
3239 
3240 	tsd = tsd_fetch();
3241 
3242 	narenas = narenas_total_get();
3243 
3244 	witness_prefork(tsd_witness_tsdp_get(tsd));
3245 	/* Acquire all mutexes in a safe order. */
3246 	ctl_prefork(tsd_tsdn(tsd));
3247 	tcache_prefork(tsd_tsdn(tsd));
3248 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3249 	if (have_background_thread) {
3250 		background_thread_prefork0(tsd_tsdn(tsd));
3251 	}
3252 	prof_prefork0(tsd_tsdn(tsd));
3253 	if (have_background_thread) {
3254 		background_thread_prefork1(tsd_tsdn(tsd));
3255 	}
3256 	/* Break arena prefork into stages to preserve lock order. */
3257 	for (i = 0; i < 8; i++) {
3258 		for (j = 0; j < narenas; j++) {
3259 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3260 			    NULL) {
3261 				switch (i) {
3262 				case 0:
3263 					arena_prefork0(tsd_tsdn(tsd), arena);
3264 					break;
3265 				case 1:
3266 					arena_prefork1(tsd_tsdn(tsd), arena);
3267 					break;
3268 				case 2:
3269 					arena_prefork2(tsd_tsdn(tsd), arena);
3270 					break;
3271 				case 3:
3272 					arena_prefork3(tsd_tsdn(tsd), arena);
3273 					break;
3274 				case 4:
3275 					arena_prefork4(tsd_tsdn(tsd), arena);
3276 					break;
3277 				case 5:
3278 					arena_prefork5(tsd_tsdn(tsd), arena);
3279 					break;
3280 				case 6:
3281 					arena_prefork6(tsd_tsdn(tsd), arena);
3282 					break;
3283 				case 7:
3284 					arena_prefork7(tsd_tsdn(tsd), arena);
3285 					break;
3286 				default: not_reached();
3287 				}
3288 			}
3289 		}
3290 	}
3291 	prof_prefork1(tsd_tsdn(tsd));
3292 }
3293 
3294 #ifndef JEMALLOC_MUTEX_INIT_CB
3295 void
3296 jemalloc_postfork_parent(void)
3297 #else
3298 JEMALLOC_EXPORT void
3299 _malloc_postfork(void)
3300 #endif
3301 {
3302 	tsd_t *tsd;
3303 	unsigned i, narenas;
3304 
3305 #ifdef JEMALLOC_MUTEX_INIT_CB
3306 	if (!malloc_initialized()) {
3307 		return;
3308 	}
3309 #endif
3310 	assert(malloc_initialized());
3311 
3312 	tsd = tsd_fetch();
3313 
3314 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3315 	/* Release all mutexes, now that fork() has completed. */
3316 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3317 		arena_t *arena;
3318 
3319 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3320 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3321 		}
3322 	}
3323 	prof_postfork_parent(tsd_tsdn(tsd));
3324 	if (have_background_thread) {
3325 		background_thread_postfork_parent(tsd_tsdn(tsd));
3326 	}
3327 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3328 	tcache_postfork_parent(tsd_tsdn(tsd));
3329 	ctl_postfork_parent(tsd_tsdn(tsd));
3330 }
3331 
3332 void
3333 jemalloc_postfork_child(void) {
3334 	tsd_t *tsd;
3335 	unsigned i, narenas;
3336 
3337 	assert(malloc_initialized());
3338 
3339 	tsd = tsd_fetch();
3340 
3341 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3342 	/* Release all mutexes, now that fork() has completed. */
3343 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3344 		arena_t *arena;
3345 
3346 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3347 			arena_postfork_child(tsd_tsdn(tsd), arena);
3348 		}
3349 	}
3350 	prof_postfork_child(tsd_tsdn(tsd));
3351 	if (have_background_thread) {
3352 		background_thread_postfork_child(tsd_tsdn(tsd));
3353 	}
3354 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3355 	tcache_postfork_child(tsd_tsdn(tsd));
3356 	ctl_postfork_child(tsd_tsdn(tsd));
3357 }
3358 
3359 void (*
3360 je_malloc_message_get(void))(void *, const char *)
3361 {
3362 	return je_malloc_message;
3363 }
3364 
3365 void
3366 je_malloc_message_set(void (*m)(void *, const char *))
3367 {
3368 	je_malloc_message = m;
3369 }
3370 
3371 const char *
3372 je_malloc_conf_get(void)
3373 {
3374 	return je_malloc_conf;
3375 }
3376 
3377 void
3378 je_malloc_conf_set(const char *m)
3379 {
3380 	je_malloc_conf = m;
3381 }
3382 
3383 /******************************************************************************/
3384