xref: /netbsd-src/external/bsd/jemalloc/dist/src/jemalloc.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 #include <sys/cdefs.h>
2 
3 #ifdef __NetBSD__
4 #include "extern.h"
5 #endif
6 
7 #define JEMALLOC_C_
8 #include "jemalloc/internal/jemalloc_preamble.h"
9 #include "jemalloc/internal/jemalloc_internal_includes.h"
10 
11 #include "jemalloc/internal/assert.h"
12 #include "jemalloc/internal/atomic.h"
13 #include "jemalloc/internal/ctl.h"
14 #include "jemalloc/internal/extent_dss.h"
15 #include "jemalloc/internal/extent_mmap.h"
16 #include "jemalloc/internal/jemalloc_internal_types.h"
17 #include "jemalloc/internal/log.h"
18 #include "jemalloc/internal/malloc_io.h"
19 #include "jemalloc/internal/mutex.h"
20 #include "jemalloc/internal/rtree.h"
21 #include "jemalloc/internal/size_classes.h"
22 #include "jemalloc/internal/spin.h"
23 #include "jemalloc/internal/sz.h"
24 #include "jemalloc/internal/ticker.h"
25 #include "jemalloc/internal/util.h"
26 
27 #ifdef JEMALLOC_WEAK_NOSTD
28 __weak_alias(mallocx, __je_mallocx)
29 __weak_alias(rallocx, __je_rallocx)
30 __weak_alias(xallocx, __je_xallocx)
31 __weak_alias(sallocx, __je_sallocx)
32 __weak_alias(dallocx, __je_dallocx)
33 __weak_alias(sdallocx, __je_sdallocx)
34 __weak_alias(nallocx, __je_nallocx)
35 
36 __weak_alias(mallctl, __je_mallctl)
37 __weak_alias(mallctlnametomib, __je_mallctlnametomib)
38 __weak_alias(mallctlbymib, __je_mallctlbymib)
39 
40 __weak_alias(malloc_stats_print, __je_malloc_stats_print)
41 __weak_alias(malloc_usable_size, __je_malloc_usable_size)
42 
43 __weak_alias(malloc_message, __je_malloc_message)
44 __weak_alias(malloc_conf, __je_malloc_conf)
45 
46 __weak_alias(malloc_message_get, __je_malloc_message_get)
47 __weak_alias(malloc_conf_get, __je_malloc_conf_get)
48 
49 __weak_alias(malloc_message_set, __je_malloc_message_set)
50 __weak_alias(malloc_conf_set, __je_malloc_conf_set)
51 #endif
52 
53 /******************************************************************************/
54 /* Data. */
55 
56 /* Runtime configuration options. */
57 const char	*je_malloc_conf
58 #ifndef _WIN32
59     JEMALLOC_ATTR(weak)
60 #endif
61     ;
62 bool	opt_abort =
63 #ifdef JEMALLOC_DEBUG
64     true
65 #else
66     false
67 #endif
68     ;
69 bool	opt_abort_conf =
70 #ifdef JEMALLOC_DEBUG
71     true
72 #else
73     false
74 #endif
75     ;
76 const char	*opt_junk =
77 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
78     "true"
79 #else
80     "false"
81 #endif
82     ;
83 bool	opt_junk_alloc =
84 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
85     true
86 #else
87     false
88 #endif
89     ;
90 bool	opt_junk_free =
91 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
92     true
93 #else
94     false
95 #endif
96     ;
97 
98 bool	opt_utrace = false;
99 bool	opt_xmalloc = false;
100 bool	opt_zero = false;
101 unsigned	opt_narenas = 0;
102 
103 unsigned	ncpus;
104 
105 /* Protects arenas initialization. */
106 malloc_mutex_t arenas_lock;
107 /*
108  * Arenas that are used to service external requests.  Not all elements of the
109  * arenas array are necessarily used; arenas are created lazily as needed.
110  *
111  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
112  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
113  * takes some action to create them and allocate from them.
114  *
115  * Points to an arena_t.
116  */
117 JEMALLOC_ALIGNED(CACHELINE)
118 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
119 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
120 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
121 unsigned		narenas_auto; /* Read-only after initialization. */
122 
123 typedef enum {
124 	malloc_init_uninitialized	= 3,
125 	malloc_init_a0_initialized	= 2,
126 	malloc_init_recursible		= 1,
127 	malloc_init_initialized		= 0 /* Common case --> jnz. */
128 } malloc_init_t;
129 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
130 
131 /* False should be the common case.  Set to true to trigger initialization. */
132 bool			malloc_slow = true;
133 
134 /* When malloc_slow is true, set the corresponding bits for sanity check. */
135 enum {
136 	flag_opt_junk_alloc	= (1U),
137 	flag_opt_junk_free	= (1U << 1),
138 	flag_opt_zero		= (1U << 2),
139 	flag_opt_utrace		= (1U << 3),
140 	flag_opt_xmalloc	= (1U << 4)
141 };
142 static uint8_t	malloc_slow_flags;
143 
144 #ifdef JEMALLOC_THREADED_INIT
145 /* Used to let the initializing thread recursively allocate. */
146 #  define NO_INITIALIZER	((unsigned long)0)
147 #  define INITIALIZER		pthread_self()
148 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
149 static pthread_t		malloc_initializer = NO_INITIALIZER;
150 #else
151 #  define NO_INITIALIZER	false
152 #  define INITIALIZER		true
153 #  define IS_INITIALIZER	malloc_initializer
154 static bool			malloc_initializer = NO_INITIALIZER;
155 #endif
156 
157 /* Used to avoid initialization races. */
158 #ifdef _WIN32
159 #if _WIN32_WINNT >= 0x0600
160 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
161 #else
162 static malloc_mutex_t	init_lock;
163 static bool init_lock_initialized = false;
164 
165 JEMALLOC_ATTR(constructor)
166 static void WINAPI
167 _init_init_lock(void) {
168 	/*
169 	 * If another constructor in the same binary is using mallctl to e.g.
170 	 * set up extent hooks, it may end up running before this one, and
171 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
172 	 * we force an initialization of the lock in malloc_init_hard as well.
173 	 * We don't try to care about atomicity of the accessed to the
174 	 * init_lock_initialized boolean, since it really only matters early in
175 	 * the process creation, before any separate thread normally starts
176 	 * doing anything.
177 	 */
178 	if (!init_lock_initialized) {
179 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
180 		    malloc_mutex_rank_exclusive);
181 	}
182 	init_lock_initialized = true;
183 }
184 
185 #ifdef _MSC_VER
186 #  pragma section(".CRT$XCU", read)
187 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
188 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
189 #endif
190 #endif
191 #else
192 #ifndef __lint__
193 // Broken lint
194 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
195 #else
196 static malloc_mutex_t	init_lock;
197 #endif
198 #endif
199 
200 typedef struct {
201 	void	*p;	/* Input pointer (as in realloc(p, s)). */
202 	size_t	s;	/* Request size. */
203 	void	*r;	/* Result pointer. */
204 } malloc_utrace_t;
205 
206 #ifdef JEMALLOC_UTRACE
207 #  define UTRACE(a, b, c) do {						\
208 	if (unlikely(opt_utrace)) {					\
209 		int utrace_serrno = errno;				\
210 		malloc_utrace_t ut;					\
211 		ut.p = (a);						\
212 		ut.s = (b);						\
213 		ut.r = (c);						\
214 		utrace(&ut, sizeof(ut));				\
215 		errno = utrace_serrno;					\
216 	}								\
217 } while (0)
218 #else
219 #  define UTRACE(a, b, c)
220 #endif
221 
222 /* Whether encountered any invalid config options. */
223 static bool had_conf_error = false;
224 
225 /******************************************************************************/
226 /*
227  * Function prototypes for static functions that are referenced prior to
228  * definition.
229  */
230 
231 static bool	malloc_init_hard_a0(void);
232 static bool	malloc_init_hard(void);
233 
234 /******************************************************************************/
235 /*
236  * Begin miscellaneous support functions.
237  */
238 
239 bool
240 malloc_initialized(void) {
241 	return (malloc_init_state == malloc_init_initialized);
242 }
243 
244 JEMALLOC_ALWAYS_INLINE bool
245 malloc_init_a0(void) {
246 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
247 		return malloc_init_hard_a0();
248 	}
249 	return false;
250 }
251 
252 JEMALLOC_ALWAYS_INLINE bool
253 malloc_init(void) {
254 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
255 		return true;
256 	}
257 	return false;
258 }
259 
260 /*
261  * The a0*() functions are used instead of i{d,}alloc() in situations that
262  * cannot tolerate TLS variable access.
263  */
264 
265 static void *
266 a0ialloc(size_t size, bool zero, bool is_internal) {
267 	if (unlikely(malloc_init_a0())) {
268 		return NULL;
269 	}
270 
271 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
272 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
273 }
274 
275 static void
276 a0idalloc(void *ptr, bool is_internal) {
277 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
278 }
279 
280 void *
281 a0malloc(size_t size) {
282 	return a0ialloc(size, false, true);
283 }
284 
285 void
286 a0dalloc(void *ptr) {
287 	a0idalloc(ptr, true);
288 }
289 
290 /*
291  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
292  * situations that cannot tolerate TLS variable access (TLS allocation and very
293  * early internal data structure initialization).
294  */
295 
296 void *
297 bootstrap_malloc(size_t size) {
298 	if (unlikely(size == 0)) {
299 		size = 1;
300 	}
301 
302 	return a0ialloc(size, false, false);
303 }
304 
305 void *
306 bootstrap_calloc(size_t num, size_t size) {
307 	size_t num_size;
308 
309 	num_size = num * size;
310 	if (unlikely(num_size == 0)) {
311 		assert(num == 0 || size == 0);
312 		num_size = 1;
313 	}
314 
315 	return a0ialloc(num_size, true, false);
316 }
317 
318 void
319 bootstrap_free(void *ptr) {
320 	if (unlikely(ptr == NULL)) {
321 		return;
322 	}
323 
324 	a0idalloc(ptr, false);
325 }
326 
327 void
328 arena_set(unsigned ind, arena_t *arena) {
329 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
330 }
331 
332 static void
333 narenas_total_set(unsigned narenas) {
334 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
335 }
336 
337 static void
338 narenas_total_inc(void) {
339 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
340 }
341 
342 unsigned
343 narenas_total_get(void) {
344 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
345 }
346 
347 /* Create a new arena and insert it into the arenas array at index ind. */
348 static arena_t *
349 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
350 	arena_t *arena;
351 
352 	assert(ind <= narenas_total_get());
353 	if (ind >= MALLOCX_ARENA_LIMIT) {
354 		return NULL;
355 	}
356 	if (ind == narenas_total_get()) {
357 		narenas_total_inc();
358 	}
359 
360 	/*
361 	 * Another thread may have already initialized arenas[ind] if it's an
362 	 * auto arena.
363 	 */
364 	arena = arena_get(tsdn, ind, false);
365 	if (arena != NULL) {
366 		assert(ind < narenas_auto);
367 		return arena;
368 	}
369 
370 	/* Actually initialize the arena. */
371 	arena = arena_new(tsdn, ind, extent_hooks);
372 
373 	return arena;
374 }
375 
376 static void
377 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
378 	if (ind == 0) {
379 		return;
380 	}
381 	if (have_background_thread) {
382 		bool err;
383 		malloc_mutex_lock(tsdn, &background_thread_lock);
384 		err = background_thread_create(tsdn_tsd(tsdn), ind);
385 		malloc_mutex_unlock(tsdn, &background_thread_lock);
386 		if (err) {
387 			malloc_printf("<jemalloc>: error in background thread "
388 				      "creation for arena %u. Abort.\n", ind);
389 			abort();
390 		}
391 	}
392 }
393 
394 arena_t *
395 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
396 	arena_t *arena;
397 
398 	malloc_mutex_lock(tsdn, &arenas_lock);
399 	arena = arena_init_locked(tsdn, ind, extent_hooks);
400 	malloc_mutex_unlock(tsdn, &arenas_lock);
401 
402 	arena_new_create_background_thread(tsdn, ind);
403 
404 	return arena;
405 }
406 
407 static void
408 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
409 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
410 	arena_nthreads_inc(arena, internal);
411 
412 	if (internal) {
413 		tsd_iarena_set(tsd, arena);
414 	} else {
415 		tsd_arena_set(tsd, arena);
416 	}
417 }
418 
419 void
420 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
421 	arena_t *oldarena, *newarena;
422 
423 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
424 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
425 	arena_nthreads_dec(oldarena, false);
426 	arena_nthreads_inc(newarena, false);
427 	tsd_arena_set(tsd, newarena);
428 }
429 
430 static void
431 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
432 	arena_t *arena;
433 
434 	arena = arena_get(tsd_tsdn(tsd), ind, false);
435 	arena_nthreads_dec(arena, internal);
436 
437 	if (internal) {
438 		tsd_iarena_set(tsd, NULL);
439 	} else {
440 		tsd_arena_set(tsd, NULL);
441 	}
442 }
443 
444 arena_tdata_t *
445 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
446 	arena_tdata_t *tdata, *arenas_tdata_old;
447 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
448 	unsigned narenas_tdata_old, i;
449 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
450 	unsigned narenas_actual = narenas_total_get();
451 
452 	/*
453 	 * Dissociate old tdata array (and set up for deallocation upon return)
454 	 * if it's too small.
455 	 */
456 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
457 		arenas_tdata_old = arenas_tdata;
458 		narenas_tdata_old = narenas_tdata;
459 		arenas_tdata = NULL;
460 		narenas_tdata = 0;
461 		tsd_arenas_tdata_set(tsd, arenas_tdata);
462 		tsd_narenas_tdata_set(tsd, narenas_tdata);
463 	} else {
464 		arenas_tdata_old = NULL;
465 		narenas_tdata_old = 0;
466 	}
467 
468 	/* Allocate tdata array if it's missing. */
469 	if (arenas_tdata == NULL) {
470 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
471 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
472 
473 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
474 			*arenas_tdata_bypassp = true;
475 			arenas_tdata = (arena_tdata_t *)a0malloc(
476 			    sizeof(arena_tdata_t) * narenas_tdata);
477 			*arenas_tdata_bypassp = false;
478 		}
479 		if (arenas_tdata == NULL) {
480 			tdata = NULL;
481 			goto label_return;
482 		}
483 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
484 		tsd_arenas_tdata_set(tsd, arenas_tdata);
485 		tsd_narenas_tdata_set(tsd, narenas_tdata);
486 	}
487 
488 	/*
489 	 * Copy to tdata array.  It's possible that the actual number of arenas
490 	 * has increased since narenas_total_get() was called above, but that
491 	 * causes no correctness issues unless two threads concurrently execute
492 	 * the arenas.create mallctl, which we trust mallctl synchronization to
493 	 * prevent.
494 	 */
495 
496 	/* Copy/initialize tickers. */
497 	for (i = 0; i < narenas_actual; i++) {
498 		if (i < narenas_tdata_old) {
499 			ticker_copy(&arenas_tdata[i].decay_ticker,
500 			    &arenas_tdata_old[i].decay_ticker);
501 		} else {
502 			ticker_init(&arenas_tdata[i].decay_ticker,
503 			    DECAY_NTICKS_PER_UPDATE);
504 		}
505 	}
506 	if (narenas_tdata > narenas_actual) {
507 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
508 		    * (narenas_tdata - narenas_actual));
509 	}
510 
511 	/* Read the refreshed tdata array. */
512 	tdata = &arenas_tdata[ind];
513 label_return:
514 	if (arenas_tdata_old != NULL) {
515 		a0dalloc(arenas_tdata_old);
516 	}
517 	return tdata;
518 }
519 
520 /* Slow path, called only by arena_choose(). */
521 arena_t *
522 arena_choose_hard(tsd_t *tsd, bool internal) {
523 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
524 
525 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
526 		unsigned choose = percpu_arena_choose();
527 		ret = arena_get(tsd_tsdn(tsd), choose, true);
528 		assert(ret != NULL);
529 		arena_bind(tsd, arena_ind_get(ret), false);
530 		arena_bind(tsd, arena_ind_get(ret), true);
531 
532 		return ret;
533 	}
534 
535 	if (narenas_auto > 1) {
536 		unsigned i, j, choose[2], first_null;
537 		bool is_new_arena[2];
538 
539 		/*
540 		 * Determine binding for both non-internal and internal
541 		 * allocation.
542 		 *
543 		 *   choose[0]: For application allocation.
544 		 *   choose[1]: For internal metadata allocation.
545 		 */
546 
547 		for (j = 0; j < 2; j++) {
548 			choose[j] = 0;
549 			is_new_arena[j] = false;
550 		}
551 
552 		first_null = narenas_auto;
553 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
554 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
555 		for (i = 1; i < narenas_auto; i++) {
556 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
557 				/*
558 				 * Choose the first arena that has the lowest
559 				 * number of threads assigned to it.
560 				 */
561 				for (j = 0; j < 2; j++) {
562 					if (arena_nthreads_get(arena_get(
563 					    tsd_tsdn(tsd), i, false), !!j) <
564 					    arena_nthreads_get(arena_get(
565 					    tsd_tsdn(tsd), choose[j], false),
566 					    !!j)) {
567 						choose[j] = i;
568 					}
569 				}
570 			} else if (first_null == narenas_auto) {
571 				/*
572 				 * Record the index of the first uninitialized
573 				 * arena, in case all extant arenas are in use.
574 				 *
575 				 * NB: It is possible for there to be
576 				 * discontinuities in terms of initialized
577 				 * versus uninitialized arenas, due to the
578 				 * "thread.arena" mallctl.
579 				 */
580 				first_null = i;
581 			}
582 		}
583 
584 		for (j = 0; j < 2; j++) {
585 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
586 			    choose[j], false), !!j) == 0 || first_null ==
587 			    narenas_auto) {
588 				/*
589 				 * Use an unloaded arena, or the least loaded
590 				 * arena if all arenas are already initialized.
591 				 */
592 				if (!!j == internal) {
593 					ret = arena_get(tsd_tsdn(tsd),
594 					    choose[j], false);
595 				}
596 			} else {
597 				arena_t *arena;
598 
599 				/* Initialize a new arena. */
600 				choose[j] = first_null;
601 				arena = arena_init_locked(tsd_tsdn(tsd),
602 				    choose[j], (extent_hooks_t *)
603 				    __UNCONST(&extent_hooks_default));
604 				if (arena == NULL) {
605 					malloc_mutex_unlock(tsd_tsdn(tsd),
606 					    &arenas_lock);
607 					return NULL;
608 				}
609 				is_new_arena[j] = true;
610 				if (!!j == internal) {
611 					ret = arena;
612 				}
613 			}
614 			arena_bind(tsd, choose[j], !!j);
615 		}
616 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
617 
618 		for (j = 0; j < 2; j++) {
619 			if (is_new_arena[j]) {
620 				assert(choose[j] > 0);
621 				arena_new_create_background_thread(
622 				    tsd_tsdn(tsd), choose[j]);
623 			}
624 		}
625 
626 	} else {
627 		ret = arena_get(tsd_tsdn(tsd), 0, false);
628 		arena_bind(tsd, 0, false);
629 		arena_bind(tsd, 0, true);
630 	}
631 
632 	return ret;
633 }
634 
635 void
636 iarena_cleanup(tsd_t *tsd) {
637 	arena_t *iarena;
638 
639 	iarena = tsd_iarena_get(tsd);
640 	if (iarena != NULL) {
641 		arena_unbind(tsd, arena_ind_get(iarena), true);
642 	}
643 }
644 
645 void
646 arena_cleanup(tsd_t *tsd) {
647 	arena_t *arena;
648 
649 	arena = tsd_arena_get(tsd);
650 	if (arena != NULL) {
651 		arena_unbind(tsd, arena_ind_get(arena), false);
652 	}
653 }
654 
655 void
656 arenas_tdata_cleanup(tsd_t *tsd) {
657 	arena_tdata_t *arenas_tdata;
658 
659 	/* Prevent tsd->arenas_tdata from being (re)created. */
660 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
661 
662 	arenas_tdata = tsd_arenas_tdata_get(tsd);
663 	if (arenas_tdata != NULL) {
664 		tsd_arenas_tdata_set(tsd, NULL);
665 		a0dalloc(arenas_tdata);
666 	}
667 }
668 
669 static void
670 stats_print_atexit(void) {
671 	if (config_stats) {
672 		tsdn_t *tsdn;
673 		unsigned narenas, i;
674 
675 		tsdn = tsdn_fetch();
676 
677 		/*
678 		 * Merge stats from extant threads.  This is racy, since
679 		 * individual threads do not lock when recording tcache stats
680 		 * events.  As a consequence, the final stats may be slightly
681 		 * out of date by the time they are reported, if other threads
682 		 * continue to allocate.
683 		 */
684 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
685 			arena_t *arena = arena_get(tsdn, i, false);
686 			if (arena != NULL) {
687 				tcache_t *tcache;
688 
689 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
690 				ql_foreach(tcache, &arena->tcache_ql, link) {
691 					tcache_stats_merge(tsdn, tcache, arena);
692 				}
693 				malloc_mutex_unlock(tsdn,
694 				    &arena->tcache_ql_mtx);
695 			}
696 		}
697 	}
698 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
699 }
700 
701 /*
702  * Ensure that we don't hold any locks upon entry to or exit from allocator
703  * code (in a "broad" sense that doesn't count a reentrant allocation as an
704  * entrance or exit).
705  */
706 JEMALLOC_ALWAYS_INLINE void
707 check_entry_exit_locking(tsdn_t *tsdn) {
708 	if (!config_debug) {
709 		return;
710 	}
711 	if (tsdn_null(tsdn)) {
712 		return;
713 	}
714 	tsd_t *tsd = tsdn_tsd(tsdn);
715 	/*
716 	 * It's possible we hold locks at entry/exit if we're in a nested
717 	 * allocation.
718 	 */
719 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
720 	if (reentrancy_level != 0) {
721 		return;
722 	}
723 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
724 }
725 
726 /*
727  * End miscellaneous support functions.
728  */
729 /******************************************************************************/
730 /*
731  * Begin initialization functions.
732  */
733 
734 static char *
735 jemalloc_secure_getenv(const char *name) {
736 #ifdef JEMALLOC_HAVE_SECURE_GETENV
737 	return secure_getenv(name);
738 #else
739 #  ifdef JEMALLOC_HAVE_ISSETUGID
740 	if (issetugid() != 0) {
741 		return NULL;
742 	}
743 #  endif
744 	return getenv(name);
745 #endif
746 }
747 
748 static unsigned
749 malloc_ncpus(void) {
750 	long result;
751 
752 #ifdef _WIN32
753 	SYSTEM_INFO si;
754 	GetSystemInfo(&si);
755 	result = si.dwNumberOfProcessors;
756 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
757 	/*
758 	 * glibc >= 2.6 has the CPU_COUNT macro.
759 	 *
760 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
761 	 * *before* setting up the isspace tables.  Therefore we need a
762 	 * different method to get the number of CPUs.
763 	 */
764 	{
765 		cpu_set_t set;
766 
767 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
768 		result = CPU_COUNT(&set);
769 	}
770 #else
771 	result = sysconf(_SC_NPROCESSORS_ONLN);
772 #endif
773 	return ((result == -1) ? 1 : (unsigned)result);
774 }
775 
776 static void
777 init_opt_stats_print_opts(const char *v, size_t vlen) {
778 	size_t opts_len = strlen(opt_stats_print_opts);
779 	assert(opts_len <= stats_print_tot_num_options);
780 
781 	for (size_t i = 0; i < vlen; i++) {
782 		switch (v[i]) {
783 #define OPTION(o, v, d, s) case o: break;
784 			STATS_PRINT_OPTIONS
785 #undef OPTION
786 		default: continue;
787 		}
788 
789 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
790 			/* Ignore repeated. */
791 			continue;
792 		}
793 
794 		opt_stats_print_opts[opts_len++] = v[i];
795 		opt_stats_print_opts[opts_len] = '\0';
796 		assert(opts_len <= stats_print_tot_num_options);
797 	}
798 	assert(opts_len == strlen(opt_stats_print_opts));
799 }
800 
801 static bool
802 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
803     char const **v_p, size_t *vlen_p) {
804 	bool accept;
805 	const char *opts = *opts_p;
806 
807 	*k_p = opts;
808 
809 	for (accept = false; !accept;) {
810 		switch (*opts) {
811 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
812 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
813 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
814 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
815 		case 'Y': case 'Z':
816 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
817 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
818 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
819 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
820 		case 'y': case 'z':
821 		case '0': case '1': case '2': case '3': case '4': case '5':
822 		case '6': case '7': case '8': case '9':
823 		case '_':
824 			opts++;
825 			break;
826 		case ':':
827 			opts++;
828 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
829 			*v_p = opts;
830 			accept = true;
831 			break;
832 		case '\0':
833 			if (opts != *opts_p) {
834 				malloc_write("<jemalloc>: Conf string ends "
835 				    "with key\n");
836 			}
837 			return true;
838 		default:
839 			malloc_write("<jemalloc>: Malformed conf string\n");
840 			return true;
841 		}
842 	}
843 
844 	for (accept = false; !accept;) {
845 		switch (*opts) {
846 		case ',':
847 			opts++;
848 			/*
849 			 * Look ahead one character here, because the next time
850 			 * this function is called, it will assume that end of
851 			 * input has been cleanly reached if no input remains,
852 			 * but we have optimistically already consumed the
853 			 * comma if one exists.
854 			 */
855 			if (*opts == '\0') {
856 				malloc_write("<jemalloc>: Conf string ends "
857 				    "with comma\n");
858 			}
859 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
860 			accept = true;
861 			break;
862 		case '\0':
863 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
864 			accept = true;
865 			break;
866 		default:
867 			opts++;
868 			break;
869 		}
870 	}
871 
872 	*opts_p = opts;
873 	return false;
874 }
875 
876 static JEMALLOC_NORETURN void
877 malloc_abort_invalid_conf(void) {
878 	assert(opt_abort_conf);
879 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
880 	    "value (see above).\n");
881 	abort();
882 }
883 
884 static void
885 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
886     size_t vlen) {
887 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
888 	    (int)vlen, v);
889 	/* If abort_conf is set, error out after processing all options. */
890 	had_conf_error = true;
891 }
892 
893 static void
894 malloc_slow_flag_init(void) {
895 	/*
896 	 * Combine the runtime options into malloc_slow for fast path.  Called
897 	 * after processing all the options.
898 	 */
899 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
900 	    | (opt_junk_free ? flag_opt_junk_free : 0)
901 	    | (opt_zero ? flag_opt_zero : 0)
902 	    | (opt_utrace ? flag_opt_utrace : 0)
903 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
904 
905 	malloc_slow = (malloc_slow_flags != 0);
906 }
907 
908 static void
909 malloc_conf_init(void) {
910 	unsigned i;
911 	char buf[PATH_MAX + 1];
912 	const char *opts, *k, *v;
913 	size_t klen, vlen;
914 
915 	for (i = 0; i < 4; i++) {
916 		/* Get runtime configuration. */
917 		switch (i) {
918 		case 0:
919 			opts = config_malloc_conf;
920 			break;
921 		case 1:
922 			if (je_malloc_conf != NULL) {
923 				/*
924 				 * Use options that were compiled into the
925 				 * program.
926 				 */
927 				opts = je_malloc_conf;
928 			} else {
929 				/* No configuration specified. */
930 				buf[0] = '\0';
931 				opts = buf;
932 			}
933 			break;
934 		case 2: {
935 			ssize_t linklen = 0;
936 #ifndef _WIN32
937 			int saved_errno = errno;
938 			const char *linkname =
939 #  ifdef JEMALLOC_PREFIX
940 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
941 #  else
942 			    "/etc/malloc.conf"
943 #  endif
944 			    ;
945 
946 			/*
947 			 * Try to use the contents of the "/etc/malloc.conf"
948 			 * symbolic link's name.
949 			 */
950 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
951 			if (linklen == -1) {
952 				/* No configuration specified. */
953 				linklen = 0;
954 				/* Restore errno. */
955 				set_errno(saved_errno);
956 			}
957 #endif
958 			buf[linklen] = '\0';
959 			opts = buf;
960 			break;
961 		} case 3: {
962 			const char *envname =
963 #ifdef JEMALLOC_PREFIX
964 			    JEMALLOC_CPREFIX"MALLOC_CONF"
965 #else
966 			    "MALLOC_CONF"
967 #endif
968 			    ;
969 
970 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
971 				/*
972 				 * Do nothing; opts is already initialized to
973 				 * the value of the MALLOC_CONF environment
974 				 * variable.
975 				 */
976 			} else {
977 				/* No configuration specified. */
978 				buf[0] = '\0';
979 				opts = buf;
980 			}
981 			break;
982 		} default:
983 			not_reached();
984 			buf[0] = '\0';
985 			opts = buf;
986 		}
987 
988 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
989 		    &vlen)) {
990 #define CONF_MATCH(n)							\
991 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
992 #define CONF_MATCH_VALUE(n)						\
993 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
994 #define CONF_HANDLE_BOOL(o, n)						\
995 			if (CONF_MATCH(n)) {				\
996 				if (CONF_MATCH_VALUE("true")) {		\
997 					o = true;			\
998 				} else if (CONF_MATCH_VALUE("false")) {	\
999 					o = false;			\
1000 				} else {				\
1001 					malloc_conf_error(		\
1002 					    "Invalid conf value",	\
1003 					    k, klen, v, vlen);		\
1004 				}					\
1005 				continue;				\
1006 			}
1007 #define CONF_MIN_no(um, min)	false
1008 #define CONF_MIN_yes(um, min)	((um) < (min))
1009 #define CONF_MAX_no(um, max)	false
1010 #define CONF_MAX_yes(um, max)	((um) > (max))
1011 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
1012 			if (CONF_MATCH(n)) {				\
1013 				uintmax_t um;				\
1014 				const char *end;			\
1015 									\
1016 				set_errno(0);				\
1017 				um = malloc_strtoumax(v, &end, 0);	\
1018 				if (get_errno() != 0 || (uintptr_t)end -\
1019 				    (uintptr_t)v != vlen) {		\
1020 					malloc_conf_error(		\
1021 					    "Invalid conf value",	\
1022 					    k, klen, v, vlen);		\
1023 				} else if (clip) {			\
1024 					if (CONF_MIN_##check_min(um,	\
1025 					    (t)(min))) {		\
1026 						o = (t)(min);		\
1027 					} else if (			\
1028 					    CONF_MAX_##check_max(um,	\
1029 					    (t)(max))) {		\
1030 						o = (t)(max);		\
1031 					} else {			\
1032 						o = (t)um;		\
1033 					}				\
1034 				} else {				\
1035 					if (CONF_MIN_##check_min(um,	\
1036 					    (t)(min)) ||		\
1037 					    CONF_MAX_##check_max(um,	\
1038 					    (t)(max))) {		\
1039 						malloc_conf_error(	\
1040 						    "Out-of-range "	\
1041 						    "conf value",	\
1042 						    k, klen, v, vlen);	\
1043 					} else {			\
1044 						o = (t)um;		\
1045 					}				\
1046 				}					\
1047 				continue;				\
1048 			}
1049 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1050     clip)								\
1051 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1052 			    check_min, check_max, clip)
1053 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1054 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1055 			    check_min, check_max, clip)
1056 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1057 			if (CONF_MATCH(n)) {				\
1058 				long l;					\
1059 				char *end;				\
1060 									\
1061 				set_errno(0);				\
1062 				l = strtol(v, &end, 0);			\
1063 				if (get_errno() != 0 || (uintptr_t)end -\
1064 				    (uintptr_t)v != vlen) {		\
1065 					malloc_conf_error(		\
1066 					    "Invalid conf value",	\
1067 					    k, klen, v, vlen);		\
1068 				} else if (l < (ssize_t)(min) || l >	\
1069 				    (ssize_t)(max)) {			\
1070 					malloc_conf_error(		\
1071 					    "Out-of-range conf value",	\
1072 					    k, klen, v, vlen);		\
1073 				} else {				\
1074 					o = l;				\
1075 				}					\
1076 				continue;				\
1077 			}
1078 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1079 			if (CONF_MATCH(n)) {				\
1080 				size_t cpylen = (vlen <=		\
1081 				    sizeof(o)-1) ? vlen :		\
1082 				    sizeof(o)-1;			\
1083 				strncpy(o, v, cpylen);			\
1084 				o[cpylen] = '\0';			\
1085 				continue;				\
1086 			}
1087 
1088 			CONF_HANDLE_BOOL(opt_abort, "abort")
1089 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1090 			if (strncmp("metadata_thp", k, klen) == 0) {
1091 				int ii;
1092 				bool match = false;
1093 				for (ii = 0; ii < metadata_thp_mode_limit; ii++) {
1094 					if (strncmp(metadata_thp_mode_names[ii],
1095 					    v, vlen) == 0) {
1096 						opt_metadata_thp = ii;
1097 						match = true;
1098 						break;
1099 					}
1100 				}
1101 				if (!match) {
1102 					malloc_conf_error("Invalid conf value",
1103 					    k, klen, v, vlen);
1104 				}
1105 				continue;
1106 			}
1107 			CONF_HANDLE_BOOL(opt_retain, "retain")
1108 			if (strncmp("dss", k, klen) == 0) {
1109 				int ii;
1110 				bool match = false;
1111 				for (ii = 0; ii < dss_prec_limit; ii++) {
1112 					if (strncmp(dss_prec_names[ii], v, vlen)
1113 					    == 0) {
1114 						if (extent_dss_prec_set(ii)) {
1115 							malloc_conf_error(
1116 							    "Error setting dss",
1117 							    k, klen, v, vlen);
1118 						} else {
1119 							opt_dss =
1120 							    dss_prec_names[ii];
1121 							match = true;
1122 							break;
1123 						}
1124 					}
1125 				}
1126 				if (!match) {
1127 					malloc_conf_error("Invalid conf value",
1128 					    k, klen, v, vlen);
1129 				}
1130 				continue;
1131 			}
1132 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1133 			    UINT_MAX, yes, no, false)
1134 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1135 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1136 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1137 			    SSIZE_MAX);
1138 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1139 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1140 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1141 			    SSIZE_MAX);
1142 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1143 			if (CONF_MATCH("stats_print_opts")) {
1144 				init_opt_stats_print_opts(v, vlen);
1145 				continue;
1146 			}
1147 			if (config_fill) {
1148 				if (CONF_MATCH("junk")) {
1149 					if (CONF_MATCH_VALUE("true")) {
1150 						opt_junk = "true";
1151 						opt_junk_alloc = opt_junk_free =
1152 						    true;
1153 					} else if (CONF_MATCH_VALUE("false")) {
1154 						opt_junk = "false";
1155 						opt_junk_alloc = opt_junk_free =
1156 						    false;
1157 					} else if (CONF_MATCH_VALUE("alloc")) {
1158 						opt_junk = "alloc";
1159 						opt_junk_alloc = true;
1160 						opt_junk_free = false;
1161 					} else if (CONF_MATCH_VALUE("free")) {
1162 						opt_junk = "free";
1163 						opt_junk_alloc = false;
1164 						opt_junk_free = true;
1165 					} else {
1166 						malloc_conf_error(
1167 						    "Invalid conf value", k,
1168 						    klen, v, vlen);
1169 					}
1170 					continue;
1171 				}
1172 				CONF_HANDLE_BOOL(opt_zero, "zero")
1173 			}
1174 			if (config_utrace) {
1175 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1176 			}
1177 			if (config_xmalloc) {
1178 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1179 			}
1180 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1181 			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1182 			    "lg_extent_max_active_fit", 0,
1183 			    (sizeof(size_t) << 3), no, yes, false)
1184 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1185 			    -1, (sizeof(size_t) << 3) - 1)
1186 			if (strncmp("percpu_arena", k, klen) == 0) {
1187 				bool match = false;
1188 				for (int ii = percpu_arena_mode_names_base; ii <
1189 				    percpu_arena_mode_names_limit; ii++) {
1190 					if (strncmp(percpu_arena_mode_names[ii],
1191 					    v, vlen) == 0) {
1192 						if (!have_percpu_arena) {
1193 							malloc_conf_error(
1194 							    "No getcpu support",
1195 							    k, klen, v, vlen);
1196 						}
1197 						opt_percpu_arena = ii;
1198 						match = true;
1199 						break;
1200 					}
1201 				}
1202 				if (!match) {
1203 					malloc_conf_error("Invalid conf value",
1204 					    k, klen, v, vlen);
1205 				}
1206 				continue;
1207 			}
1208 			CONF_HANDLE_BOOL(opt_background_thread,
1209 			    "background_thread");
1210 			CONF_HANDLE_SIZE_T(opt_max_background_threads,
1211 					   "max_background_threads", 1,
1212 					   opt_max_background_threads, yes, yes,
1213 					   true);
1214 			if (config_prof) {
1215 				CONF_HANDLE_BOOL(opt_prof, "prof")
1216 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1217 				    "prof_prefix", "jeprof")
1218 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1219 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1220 				    "prof_thread_active_init")
1221 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1222 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1223 				    - 1, no, yes, true)
1224 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1225 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1226 				    "lg_prof_interval", -1,
1227 				    (sizeof(uint64_t) << 3) - 1)
1228 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1229 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1230 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1231 			}
1232 			if (config_log) {
1233 				if (CONF_MATCH("log")) {
1234 					size_t cpylen = (
1235 					    vlen <= sizeof(log_var_names) ?
1236 					    vlen : sizeof(log_var_names) - 1);
1237 					strncpy(log_var_names, v, cpylen);
1238 					log_var_names[cpylen] = '\0';
1239 					continue;
1240 				}
1241 			}
1242 			if (CONF_MATCH("thp")) {
1243 				bool match = false;
1244 				for (int ii = 0; ii < thp_mode_names_limit; ii++) {
1245 					if (strncmp(thp_mode_names[ii],v, vlen)
1246 					    == 0) {
1247 						if (!have_madvise_huge) {
1248 							malloc_conf_error(
1249 							    "No THP support",
1250 							    k, klen, v, vlen);
1251 						}
1252 						opt_thp = ii;
1253 						match = true;
1254 						break;
1255 					}
1256 				}
1257 				if (!match) {
1258 					malloc_conf_error("Invalid conf value",
1259 					    k, klen, v, vlen);
1260 				}
1261 				continue;
1262 			}
1263 			malloc_conf_error("Invalid conf pair", k, klen, v,
1264 			    vlen);
1265 #undef CONF_MATCH
1266 #undef CONF_MATCH_VALUE
1267 #undef CONF_HANDLE_BOOL
1268 #undef CONF_MIN_no
1269 #undef CONF_MIN_yes
1270 #undef CONF_MAX_no
1271 #undef CONF_MAX_yes
1272 #undef CONF_HANDLE_T_U
1273 #undef CONF_HANDLE_UNSIGNED
1274 #undef CONF_HANDLE_SIZE_T
1275 #undef CONF_HANDLE_SSIZE_T
1276 #undef CONF_HANDLE_CHAR_P
1277 		}
1278 		if (opt_abort_conf && had_conf_error) {
1279 			malloc_abort_invalid_conf();
1280 		}
1281 	}
1282 	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1283 }
1284 
1285 static bool
1286 malloc_init_hard_needed(void) {
1287 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1288 	    malloc_init_recursible)) {
1289 		/*
1290 		 * Another thread initialized the allocator before this one
1291 		 * acquired init_lock, or this thread is the initializing
1292 		 * thread, and it is recursively allocating.
1293 		 */
1294 		return false;
1295 	}
1296 #ifdef JEMALLOC_THREADED_INIT
1297 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1298 		/* Busy-wait until the initializing thread completes. */
1299 		spin_t spinner = SPIN_INITIALIZER;
1300 		do {
1301 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1302 			spin_adaptive(&spinner);
1303 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1304 		} while (!malloc_initialized());
1305 		return false;
1306 	}
1307 #endif
1308 	return true;
1309 }
1310 
1311 static bool
1312 malloc_init_hard_a0_locked(void) {
1313 	malloc_initializer = INITIALIZER;
1314 
1315 	if (config_prof) {
1316 		prof_boot0();
1317 	}
1318 	malloc_conf_init();
1319 	if (opt_stats_print) {
1320 		/* Print statistics at exit. */
1321 		if (atexit(stats_print_atexit) != 0) {
1322 			malloc_write("<jemalloc>: Error in atexit()\n");
1323 			if (opt_abort) {
1324 				abort();
1325 			}
1326 		}
1327 	}
1328 	if (pages_boot()) {
1329 		return true;
1330 	}
1331 	if (base_boot(TSDN_NULL)) {
1332 		return true;
1333 	}
1334 	if (extent_boot()) {
1335 		return true;
1336 	}
1337 	if (ctl_boot()) {
1338 		return true;
1339 	}
1340 	if (config_prof) {
1341 		prof_boot1();
1342 	}
1343 	arena_boot();
1344 	if (tcache_boot(TSDN_NULL)) {
1345 		return true;
1346 	}
1347 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1348 	    malloc_mutex_rank_exclusive)) {
1349 		return true;
1350 	}
1351 	/*
1352 	 * Create enough scaffolding to allow recursive allocation in
1353 	 * malloc_ncpus().
1354 	 */
1355 	narenas_auto = 1;
1356 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1357 	/*
1358 	 * Initialize one arena here.  The rest are lazily created in
1359 	 * arena_choose_hard().
1360 	 */
1361 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)__UNCONST(&extent_hooks_default))
1362 	    == NULL) {
1363 		return true;
1364 	}
1365 	a0 = arena_get(TSDN_NULL, 0, false);
1366 	malloc_init_state = malloc_init_a0_initialized;
1367 
1368 	return false;
1369 }
1370 
1371 static bool
1372 malloc_init_hard_a0(void) {
1373 	bool ret;
1374 
1375 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1376 	ret = malloc_init_hard_a0_locked();
1377 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1378 	return ret;
1379 }
1380 
1381 /* Initialize data structures which may trigger recursive allocation. */
1382 static bool
1383 malloc_init_hard_recursible(void) {
1384 	malloc_init_state = malloc_init_recursible;
1385 
1386 	ncpus = malloc_ncpus();
1387 
1388 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1389     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1390     !defined(__native_client__) && !defined(__NetBSD__))
1391 	/* LinuxThreads' pthread_atfork() allocates. */
1392 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1393 	    jemalloc_postfork_child) != 0) {
1394 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1395 		if (opt_abort) {
1396 			abort();
1397 		}
1398 		return true;
1399 	}
1400 #endif
1401 
1402 	if (background_thread_boot0()) {
1403 		return true;
1404 	}
1405 
1406 	return false;
1407 }
1408 
1409 static unsigned
1410 malloc_narenas_default(void) {
1411 	assert(ncpus > 0);
1412 	/*
1413 	 * For SMP systems, create more than one arena per CPU by
1414 	 * default.
1415 	 */
1416 	if (ncpus > 1) {
1417 		return ncpus << 2;
1418 	} else {
1419 		return 1;
1420 	}
1421 }
1422 
1423 static percpu_arena_mode_t
1424 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1425 	assert(!malloc_initialized());
1426 	assert(mode <= percpu_arena_disabled);
1427 
1428 	if (mode != percpu_arena_disabled) {
1429 		mode += percpu_arena_mode_enabled_base;
1430 	}
1431 
1432 	return mode;
1433 }
1434 
1435 static bool
1436 malloc_init_narenas(void) {
1437 	assert(ncpus > 0);
1438 
1439 	if (opt_percpu_arena != percpu_arena_disabled) {
1440 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1441 			opt_percpu_arena = percpu_arena_disabled;
1442 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1443 			    "available. Setting narenas to %u.\n", opt_narenas ?
1444 			    opt_narenas : malloc_narenas_default());
1445 			if (opt_abort) {
1446 				abort();
1447 			}
1448 		} else {
1449 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1450 				malloc_printf("<jemalloc>: narenas w/ percpu"
1451 				    "arena beyond limit (%d)\n", ncpus);
1452 				if (opt_abort) {
1453 					abort();
1454 				}
1455 				return true;
1456 			}
1457 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1458 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1459 			    per_phycpu_arena && ncpus % 2 != 0) {
1460 				malloc_printf("<jemalloc>: invalid "
1461 				    "configuration -- per physical CPU arena "
1462 				    "with odd number (%u) of CPUs (no hyper "
1463 				    "threading?).\n", ncpus);
1464 				if (opt_abort)
1465 					abort();
1466 			}
1467 			unsigned n = percpu_arena_ind_limit(
1468 			    percpu_arena_as_initialized(opt_percpu_arena));
1469 			if (opt_narenas < n) {
1470 				/*
1471 				 * If narenas is specified with percpu_arena
1472 				 * enabled, actual narenas is set as the greater
1473 				 * of the two. percpu_arena_choose will be free
1474 				 * to use any of the arenas based on CPU
1475 				 * id. This is conservative (at a small cost)
1476 				 * but ensures correctness.
1477 				 *
1478 				 * If for some reason the ncpus determined at
1479 				 * boot is not the actual number (e.g. because
1480 				 * of affinity setting from numactl), reserving
1481 				 * narenas this way provides a workaround for
1482 				 * percpu_arena.
1483 				 */
1484 				opt_narenas = n;
1485 			}
1486 		}
1487 	}
1488 	if (opt_narenas == 0) {
1489 		opt_narenas = malloc_narenas_default();
1490 	}
1491 	assert(opt_narenas > 0);
1492 
1493 	narenas_auto = opt_narenas;
1494 	/*
1495 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1496 	 */
1497 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1498 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1499 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1500 		    narenas_auto);
1501 	}
1502 	narenas_total_set(narenas_auto);
1503 
1504 	return false;
1505 }
1506 
1507 static void
1508 malloc_init_percpu(void) {
1509 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1510 }
1511 
1512 static bool
1513 malloc_init_hard_finish(void) {
1514 	if (malloc_mutex_boot()) {
1515 		return true;
1516 	}
1517 
1518 	malloc_init_state = malloc_init_initialized;
1519 	malloc_slow_flag_init();
1520 
1521 	return false;
1522 }
1523 
1524 static void
1525 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1526 	malloc_mutex_assert_owner(tsdn, &init_lock);
1527 	malloc_mutex_unlock(tsdn, &init_lock);
1528 	if (reentrancy_set) {
1529 		assert(!tsdn_null(tsdn));
1530 		tsd_t *tsd = tsdn_tsd(tsdn);
1531 		assert(tsd_reentrancy_level_get(tsd) > 0);
1532 		post_reentrancy(tsd);
1533 	}
1534 }
1535 
1536 static bool
1537 malloc_init_hard(void) {
1538 	tsd_t *tsd;
1539 
1540 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1541 	_init_init_lock();
1542 #endif
1543 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1544 
1545 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1546 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1547 	return ret;
1548 
1549 	if (!malloc_init_hard_needed()) {
1550 		UNLOCK_RETURN(TSDN_NULL, false, false)
1551 	}
1552 
1553 	if (malloc_init_state != malloc_init_a0_initialized &&
1554 	    malloc_init_hard_a0_locked()) {
1555 		UNLOCK_RETURN(TSDN_NULL, true, false)
1556 	}
1557 
1558 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1559 	/* Recursive allocation relies on functional tsd. */
1560 	tsd = malloc_tsd_boot0();
1561 	if (tsd == NULL) {
1562 		return true;
1563 	}
1564 	if (malloc_init_hard_recursible()) {
1565 		return true;
1566 	}
1567 
1568 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1569 	/* Set reentrancy level to 1 during init. */
1570 	pre_reentrancy(tsd, NULL);
1571 	/* Initialize narenas before prof_boot2 (for allocation). */
1572 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1573 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1574 	}
1575 	if (config_prof && prof_boot2(tsd)) {
1576 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1577 	}
1578 
1579 	malloc_init_percpu();
1580 
1581 	if (malloc_init_hard_finish()) {
1582 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1583 	}
1584 	post_reentrancy(tsd);
1585 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1586 
1587 	witness_assert_lockless(witness_tsd_tsdn(
1588 	    tsd_witness_tsdp_get_unsafe(tsd)));
1589 	malloc_tsd_boot1();
1590 	/* Update TSD after tsd_boot1. */
1591 	tsd = tsd_fetch();
1592 	if (opt_background_thread) {
1593 		assert(have_background_thread);
1594 		/*
1595 		 * Need to finish init & unlock first before creating background
1596 		 * threads (pthread_create depends on malloc).  ctl_init (which
1597 		 * sets isthreaded) needs to be called without holding any lock.
1598 		 */
1599 		background_thread_ctl_init(tsd_tsdn(tsd));
1600 
1601 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1602 		bool err = background_thread_create(tsd, 0);
1603 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1604 		if (err) {
1605 			return true;
1606 		}
1607 	}
1608 #undef UNLOCK_RETURN
1609 	return false;
1610 }
1611 
1612 /*
1613  * End initialization functions.
1614  */
1615 /******************************************************************************/
1616 /*
1617  * Begin allocation-path internal functions and data structures.
1618  */
1619 
1620 /*
1621  * Settings determined by the documented behavior of the allocation functions.
1622  */
1623 typedef struct static_opts_s static_opts_t;
1624 struct static_opts_s {
1625 	/* Whether or not allocation size may overflow. */
1626 	bool may_overflow;
1627 	/* Whether or not allocations of size 0 should be treated as size 1. */
1628 	bool bump_empty_alloc;
1629 	/*
1630 	 * Whether to assert that allocations are not of size 0 (after any
1631 	 * bumping).
1632 	 */
1633 	bool assert_nonempty_alloc;
1634 
1635 	/*
1636 	 * Whether or not to modify the 'result' argument to malloc in case of
1637 	 * error.
1638 	 */
1639 	bool null_out_result_on_error;
1640 	/* Whether to set errno when we encounter an error condition. */
1641 	bool set_errno_on_error;
1642 
1643 	/*
1644 	 * The minimum valid alignment for functions requesting aligned storage.
1645 	 */
1646 	size_t min_alignment;
1647 
1648 	/* The error string to use if we oom. */
1649 	const char *oom_string;
1650 	/* The error string to use if the passed-in alignment is invalid. */
1651 	const char *invalid_alignment_string;
1652 
1653 	/*
1654 	 * False if we're configured to skip some time-consuming operations.
1655 	 *
1656 	 * This isn't really a malloc "behavior", but it acts as a useful
1657 	 * summary of several other static (or at least, static after program
1658 	 * initialization) options.
1659 	 */
1660 	bool slow;
1661 };
1662 
1663 JEMALLOC_ALWAYS_INLINE void
1664 static_opts_init(static_opts_t *static_opts) {
1665 	static_opts->may_overflow = false;
1666 	static_opts->bump_empty_alloc = false;
1667 	static_opts->assert_nonempty_alloc = false;
1668 	static_opts->null_out_result_on_error = false;
1669 	static_opts->set_errno_on_error = false;
1670 	static_opts->min_alignment = 0;
1671 	static_opts->oom_string = "";
1672 	static_opts->invalid_alignment_string = "";
1673 	static_opts->slow = false;
1674 }
1675 
1676 /*
1677  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1678  * should have one constant here per magic value there.  Note however that the
1679  * representations need not be related.
1680  */
1681 #define TCACHE_IND_NONE ((unsigned)-1)
1682 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1683 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1684 
1685 typedef struct dynamic_opts_s dynamic_opts_t;
1686 struct dynamic_opts_s {
1687 	void **result;
1688 	size_t num_items;
1689 	size_t item_size;
1690 	size_t alignment;
1691 	bool zero;
1692 	unsigned tcache_ind;
1693 	unsigned arena_ind;
1694 };
1695 
1696 JEMALLOC_ALWAYS_INLINE void
1697 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1698 	dynamic_opts->result = NULL;
1699 	dynamic_opts->num_items = 0;
1700 	dynamic_opts->item_size = 0;
1701 	dynamic_opts->alignment = 0;
1702 	dynamic_opts->zero = false;
1703 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1704 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1705 }
1706 
1707 /* ind is ignored if dopts->alignment > 0. */
1708 JEMALLOC_ALWAYS_INLINE void *
1709 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1710     size_t size, size_t usize, szind_t ind) {
1711 	tcache_t *tcache;
1712 	arena_t *arena;
1713 
1714 	/* Fill in the tcache. */
1715 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1716 		if (likely(!sopts->slow)) {
1717 			/* Getting tcache ptr unconditionally. */
1718 			tcache = tsd_tcachep_get(tsd);
1719 			assert(tcache == tcache_get(tsd));
1720 		} else {
1721 			tcache = tcache_get(tsd);
1722 		}
1723 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1724 		tcache = NULL;
1725 	} else {
1726 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1727 	}
1728 
1729 	/* Fill in the arena. */
1730 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1731 		/*
1732 		 * In case of automatic arena management, we defer arena
1733 		 * computation until as late as we can, hoping to fill the
1734 		 * allocation out of the tcache.
1735 		 */
1736 		arena = NULL;
1737 	} else {
1738 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1739 	}
1740 
1741 	if (unlikely(dopts->alignment != 0)) {
1742 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1743 		    dopts->zero, tcache, arena);
1744 	}
1745 
1746 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1747 	    arena, sopts->slow);
1748 }
1749 
1750 JEMALLOC_ALWAYS_INLINE void *
1751 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1752     size_t usize, szind_t ind) {
1753 	void *ret;
1754 
1755 	/*
1756 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1757 	 * from the ind_large bucket.
1758 	 */
1759 	szind_t ind_large;
1760 	size_t bumped_usize = usize;
1761 
1762 	if (usize <= SMALL_MAXCLASS) {
1763 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1764 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1765 		    == LARGE_MINCLASS);
1766 		ind_large = sz_size2index(LARGE_MINCLASS);
1767 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1768 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1769 		    bumped_usize, ind_large);
1770 		if (unlikely(ret == NULL)) {
1771 			return NULL;
1772 		}
1773 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1774 	} else {
1775 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1776 	}
1777 
1778 	return ret;
1779 }
1780 
1781 /*
1782  * Returns true if the allocation will overflow, and false otherwise.  Sets
1783  * *size to the product either way.
1784  */
1785 JEMALLOC_ALWAYS_INLINE bool
1786 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1787     size_t *size) {
1788 	/*
1789 	 * This function is just num_items * item_size, except that we may have
1790 	 * to check for overflow.
1791 	 */
1792 
1793 	if (!may_overflow) {
1794 		assert(dopts->num_items == 1);
1795 		*size = dopts->item_size;
1796 		return false;
1797 	}
1798 
1799 	/* A size_t with its high-half bits all set to 1. */
1800 	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1801 
1802 	*size = dopts->item_size * dopts->num_items;
1803 
1804 	if (unlikely(*size == 0)) {
1805 		return (dopts->num_items != 0 && dopts->item_size != 0);
1806 	}
1807 
1808 	/*
1809 	 * We got a non-zero size, but we don't know if we overflowed to get
1810 	 * there.  To avoid having to do a divide, we'll be clever and note that
1811 	 * if both A and B can be represented in N/2 bits, then their product
1812 	 * can be represented in N bits (without the possibility of overflow).
1813 	 */
1814 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1815 		return false;
1816 	}
1817 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1818 		return false;
1819 	}
1820 	return true;
1821 }
1822 
1823 JEMALLOC_ALWAYS_INLINE int
1824 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1825 	/* Where the actual allocated memory will live. */
1826 	void *allocation = NULL;
1827 	/* Filled in by compute_size_with_overflow below. */
1828 	size_t size = 0;
1829 	/*
1830 	 * For unaligned allocations, we need only ind.  For aligned
1831 	 * allocations, or in case of stats or profiling we need usize.
1832 	 *
1833 	 * These are actually dead stores, in that their values are reset before
1834 	 * any branch on their value is taken.  Sometimes though, it's
1835 	 * convenient to pass them as arguments before this point.  To avoid
1836 	 * undefined behavior then, we initialize them with dummy stores.
1837 	 */
1838 	szind_t ind = 0;
1839 	size_t usize = 0;
1840 
1841 	/* Reentrancy is only checked on slow path. */
1842 	int8_t reentrancy_level;
1843 
1844 	/* Compute the amount of memory the user wants. */
1845 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1846 	    &size))) {
1847 		goto label_oom;
1848 	}
1849 
1850 	/* Validate the user input. */
1851 	if (sopts->bump_empty_alloc) {
1852 		if (unlikely(size == 0)) {
1853 			size = 1;
1854 		}
1855 	}
1856 
1857 	if (sopts->assert_nonempty_alloc) {
1858 		assert (size != 0);
1859 	}
1860 
1861 	if (unlikely(dopts->alignment < sopts->min_alignment
1862 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1863 		goto label_invalid_alignment;
1864 	}
1865 
1866 	/* This is the beginning of the "core" algorithm. */
1867 
1868 	if (dopts->alignment == 0) {
1869 		ind = sz_size2index(size);
1870 		if (unlikely(ind >= NSIZES)) {
1871 			goto label_oom;
1872 		}
1873 		if (config_stats || (config_prof && opt_prof)) {
1874 			usize = sz_index2size(ind);
1875 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1876 		}
1877 	} else {
1878 		usize = sz_sa2u(size, dopts->alignment);
1879 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1880 			goto label_oom;
1881 		}
1882 	}
1883 
1884 	check_entry_exit_locking(tsd_tsdn(tsd));
1885 
1886 	/*
1887 	 * If we need to handle reentrancy, we can do it out of a
1888 	 * known-initialized arena (i.e. arena 0).
1889 	 */
1890 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1891 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1892 		/*
1893 		 * We should never specify particular arenas or tcaches from
1894 		 * within our internal allocations.
1895 		 */
1896 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1897 		    dopts->tcache_ind == TCACHE_IND_NONE);
1898 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1899 		dopts->tcache_ind = TCACHE_IND_NONE;
1900 		/* We know that arena 0 has already been initialized. */
1901 		dopts->arena_ind = 0;
1902 	}
1903 
1904 	/* If profiling is on, get our profiling context. */
1905 	if (config_prof && opt_prof) {
1906 		/*
1907 		 * Note that if we're going down this path, usize must have been
1908 		 * initialized in the previous if statement.
1909 		 */
1910 		prof_tctx_t *tctx = prof_alloc_prep(
1911 		    tsd, usize, prof_active_get_unlocked(), true);
1912 
1913 		alloc_ctx_t alloc_ctx;
1914 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1915 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1916 			allocation = imalloc_no_sample(
1917 			    sopts, dopts, tsd, usize, usize, ind);
1918 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1919 			/*
1920 			 * Note that ind might still be 0 here.  This is fine;
1921 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1922 			 */
1923 			allocation = imalloc_sample(
1924 			    sopts, dopts, tsd, usize, ind);
1925 			alloc_ctx.slab = false;
1926 		} else {
1927 			allocation = NULL;
1928 		}
1929 
1930 		if (unlikely(allocation == NULL)) {
1931 			prof_alloc_rollback(tsd, tctx, true);
1932 			goto label_oom;
1933 		}
1934 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1935 	} else {
1936 		/*
1937 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1938 		 * computed in the previous if statement.  Down the positive
1939 		 * alignment path, imalloc_no_sample ignores ind and size
1940 		 * (relying only on usize).
1941 		 */
1942 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1943 		    ind);
1944 		if (unlikely(allocation == NULL)) {
1945 			goto label_oom;
1946 		}
1947 	}
1948 
1949 	/*
1950 	 * Allocation has been done at this point.  We still have some
1951 	 * post-allocation work to do though.
1952 	 */
1953 	assert(dopts->alignment == 0
1954 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1955 
1956 	if (config_stats) {
1957 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1958 		*tsd_thread_allocatedp_get(tsd) += usize;
1959 	}
1960 
1961 	if (sopts->slow) {
1962 		UTRACE(0, size, allocation);
1963 	}
1964 
1965 	/* Success! */
1966 	check_entry_exit_locking(tsd_tsdn(tsd));
1967 	*dopts->result = allocation;
1968 	return 0;
1969 
1970 label_oom:
1971 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1972 		malloc_write(sopts->oom_string);
1973 		abort();
1974 	}
1975 
1976 	if (sopts->slow) {
1977 		UTRACE(NULL, size, NULL);
1978 	}
1979 
1980 	check_entry_exit_locking(tsd_tsdn(tsd));
1981 
1982 	if (sopts->set_errno_on_error) {
1983 		set_errno(ENOMEM);
1984 	}
1985 
1986 	if (sopts->null_out_result_on_error) {
1987 		*dopts->result = NULL;
1988 	}
1989 
1990 	return ENOMEM;
1991 
1992 	/*
1993 	 * This label is only jumped to by one goto; we move it out of line
1994 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1995 	 * the oom case.
1996 	 */
1997 label_invalid_alignment:
1998 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1999 		malloc_write(sopts->invalid_alignment_string);
2000 		abort();
2001 	}
2002 
2003 	if (sopts->set_errno_on_error) {
2004 		set_errno(EINVAL);
2005 	}
2006 
2007 	if (sopts->slow) {
2008 		UTRACE(NULL, size, NULL);
2009 	}
2010 
2011 	check_entry_exit_locking(tsd_tsdn(tsd));
2012 
2013 	if (sopts->null_out_result_on_error) {
2014 		*dopts->result = NULL;
2015 	}
2016 
2017 	return EINVAL;
2018 }
2019 
2020 /* Returns the errno-style error code of the allocation. */
2021 JEMALLOC_ALWAYS_INLINE int
2022 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2023 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2024 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2025 			malloc_write(sopts->oom_string);
2026 			abort();
2027 		}
2028 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2029 		set_errno(ENOMEM);
2030 		*dopts->result = NULL;
2031 
2032 		return ENOMEM;
2033 	}
2034 
2035 	/* We always need the tsd.  Let's grab it right away. */
2036 	tsd_t *tsd = tsd_fetch();
2037 	assert(tsd);
2038 	if (likely(tsd_fast(tsd))) {
2039 		/* Fast and common path. */
2040 		tsd_assert_fast(tsd);
2041 		sopts->slow = false;
2042 		return imalloc_body(sopts, dopts, tsd);
2043 	} else {
2044 		sopts->slow = true;
2045 		return imalloc_body(sopts, dopts, tsd);
2046 	}
2047 }
2048 /******************************************************************************/
2049 /*
2050  * Begin malloc(3)-compatible functions.
2051  */
2052 
2053 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2054 void JEMALLOC_NOTHROW *
2055 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2056 je_malloc(size_t size) {
2057 	void *ret;
2058 	static_opts_t sopts;
2059 	dynamic_opts_t dopts;
2060 
2061 	LOG("core.malloc.entry", "size: %zu", size);
2062 
2063 	static_opts_init(&sopts);
2064 	dynamic_opts_init(&dopts);
2065 
2066 	sopts.bump_empty_alloc = true;
2067 	sopts.null_out_result_on_error = true;
2068 	sopts.set_errno_on_error = true;
2069 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2070 
2071 	dopts.result = &ret;
2072 	dopts.num_items = 1;
2073 	dopts.item_size = size;
2074 
2075 	imalloc(&sopts, &dopts);
2076 
2077 	LOG("core.malloc.exit", "result: %p", ret);
2078 
2079 	return ret;
2080 }
2081 
2082 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2083 JEMALLOC_ATTR(nonnull(1))
2084 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2085 	int ret;
2086 	static_opts_t sopts;
2087 	dynamic_opts_t dopts;
2088 
2089 	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2090 	    "size: %zu", memptr, alignment, size);
2091 
2092 	static_opts_init(&sopts);
2093 	dynamic_opts_init(&dopts);
2094 
2095 	sopts.bump_empty_alloc = true;
2096 	sopts.min_alignment = sizeof(void *);
2097 	sopts.oom_string =
2098 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2099 	sopts.invalid_alignment_string =
2100 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2101 
2102 	dopts.result = memptr;
2103 	dopts.num_items = 1;
2104 	dopts.item_size = size;
2105 	dopts.alignment = alignment;
2106 
2107 	ret = imalloc(&sopts, &dopts);
2108 
2109 	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2110 	    *memptr);
2111 
2112 	return ret;
2113 }
2114 
2115 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2116 void JEMALLOC_NOTHROW *
2117 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2118 je_aligned_alloc(size_t alignment, size_t size) {
2119 	void *ret;
2120 
2121 	static_opts_t sopts;
2122 	dynamic_opts_t dopts;
2123 
2124 	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2125 	    alignment, size);
2126 
2127 	static_opts_init(&sopts);
2128 	dynamic_opts_init(&dopts);
2129 
2130 	sopts.bump_empty_alloc = true;
2131 	sopts.null_out_result_on_error = true;
2132 	sopts.set_errno_on_error = true;
2133 	sopts.min_alignment = 1;
2134 	sopts.oom_string =
2135 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2136 	sopts.invalid_alignment_string =
2137 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2138 
2139 	dopts.result = &ret;
2140 	dopts.num_items = 1;
2141 	dopts.item_size = size;
2142 	dopts.alignment = alignment;
2143 
2144 	imalloc(&sopts, &dopts);
2145 
2146 	LOG("core.aligned_alloc.exit", "result: %p", ret);
2147 
2148 	return ret;
2149 }
2150 
2151 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2152 void JEMALLOC_NOTHROW *
2153 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2154 je_calloc(size_t num, size_t size) {
2155 	void *ret;
2156 	static_opts_t sopts;
2157 	dynamic_opts_t dopts;
2158 
2159 	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2160 
2161 	static_opts_init(&sopts);
2162 	dynamic_opts_init(&dopts);
2163 
2164 	sopts.may_overflow = true;
2165 	sopts.bump_empty_alloc = true;
2166 	sopts.null_out_result_on_error = true;
2167 	sopts.set_errno_on_error = true;
2168 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2169 
2170 	dopts.result = &ret;
2171 	dopts.num_items = num;
2172 	dopts.item_size = size;
2173 	dopts.zero = true;
2174 
2175 	imalloc(&sopts, &dopts);
2176 
2177 	LOG("core.calloc.exit", "result: %p", ret);
2178 
2179 	return ret;
2180 }
2181 
2182 static void *
2183 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2184     prof_tctx_t *tctx) {
2185 	void *p;
2186 
2187 	if (tctx == NULL) {
2188 		return NULL;
2189 	}
2190 	if (usize <= SMALL_MAXCLASS) {
2191 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2192 		if (p == NULL) {
2193 			return NULL;
2194 		}
2195 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2196 	} else {
2197 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2198 	}
2199 
2200 	return p;
2201 }
2202 
2203 JEMALLOC_ALWAYS_INLINE void *
2204 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2205    alloc_ctx_t *alloc_ctx) {
2206 	void *p;
2207 	bool prof_activex;
2208 	prof_tctx_t *old_tctx, *tctx;
2209 
2210 	prof_activex = prof_active_get_unlocked();
2211 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2212 	tctx = prof_alloc_prep(tsd, usize, prof_activex, true);
2213 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2214 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2215 	} else {
2216 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2217 	}
2218 	if (unlikely(p == NULL)) {
2219 		prof_alloc_rollback(tsd, tctx, true);
2220 		return NULL;
2221 	}
2222 	prof_realloc(tsd, p, usize, tctx, prof_activex, true, old_ptr,
2223 	    old_usize, old_tctx);
2224 
2225 	return p;
2226 }
2227 
2228 JEMALLOC_ALWAYS_INLINE void
2229 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2230 	if (!slow_path) {
2231 		tsd_assert_fast(tsd);
2232 	}
2233 	check_entry_exit_locking(tsd_tsdn(tsd));
2234 	if (tsd_reentrancy_level_get(tsd) != 0) {
2235 		assert(slow_path);
2236 	}
2237 
2238 	assert(ptr != NULL);
2239 	assert(malloc_initialized() || IS_INITIALIZER);
2240 
2241 	alloc_ctx_t alloc_ctx;
2242 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2243 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2244 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2245 	assert(alloc_ctx.szind != NSIZES);
2246 
2247 	size_t usize;
2248 	if (config_prof && opt_prof) {
2249 		usize = sz_index2size(alloc_ctx.szind);
2250 		prof_free(tsd, ptr, usize, &alloc_ctx);
2251 	} else if (config_stats) {
2252 		usize = sz_index2size(alloc_ctx.szind);
2253 	}
2254 	if (config_stats) {
2255 		*tsd_thread_deallocatedp_get(tsd) += usize;
2256 	}
2257 
2258 	if (likely(!slow_path)) {
2259 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2260 		    false);
2261 	} else {
2262 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2263 		    true);
2264 	}
2265 }
2266 
2267 JEMALLOC_ALWAYS_INLINE void
2268 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2269 	if (!slow_path) {
2270 		tsd_assert_fast(tsd);
2271 	}
2272 	check_entry_exit_locking(tsd_tsdn(tsd));
2273 	if (tsd_reentrancy_level_get(tsd) != 0) {
2274 		assert(slow_path);
2275 	}
2276 
2277 	assert(ptr != NULL);
2278 	assert(malloc_initialized() || IS_INITIALIZER);
2279 
2280 	alloc_ctx_t alloc_ctx, *ctx;
2281 	if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2282 		/*
2283 		 * When cache_oblivious is disabled and ptr is not page aligned,
2284 		 * the allocation was not sampled -- usize can be used to
2285 		 * determine szind directly.
2286 		 */
2287 		alloc_ctx.szind = sz_size2index(usize);
2288 		alloc_ctx.slab = true;
2289 		ctx = &alloc_ctx;
2290 		if (config_debug) {
2291 			alloc_ctx_t dbg_ctx;
2292 			rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2293 			rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2294 			    rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2295 			    &dbg_ctx.slab);
2296 			assert(dbg_ctx.szind == alloc_ctx.szind);
2297 			assert(dbg_ctx.slab == alloc_ctx.slab);
2298 		}
2299 	} else if (config_prof && opt_prof) {
2300 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2301 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2302 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2303 		assert(alloc_ctx.szind == sz_size2index(usize));
2304 		ctx = &alloc_ctx;
2305 	} else {
2306 		ctx = NULL;
2307 	}
2308 
2309 	if (config_prof && opt_prof) {
2310 		prof_free(tsd, ptr, usize, ctx);
2311 	}
2312 	if (config_stats) {
2313 		*tsd_thread_deallocatedp_get(tsd) += usize;
2314 	}
2315 
2316 	if (likely(!slow_path)) {
2317 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2318 	} else {
2319 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2320 	}
2321 }
2322 
2323 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2324 void JEMALLOC_NOTHROW *
2325 JEMALLOC_ALLOC_SIZE(2)
2326 je_realloc(void *ptr, size_t size) {
2327 	void *ret;
2328 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2329 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2330 	size_t old_usize = 0;
2331 
2332 	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2333 
2334 	if (unlikely(size == 0)) {
2335 #if 0
2336 	// http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_400
2337 		if (ptr != NULL) {
2338 			/* realloc(ptr, 0) is equivalent to free(ptr). */
2339 			UTRACE(ptr, 0, 0);
2340 			tcache_t *tcache;
2341 			tsd_t *tsd = tsd_fetch();
2342 			if (tsd_reentrancy_level_get(tsd) == 0) {
2343 				tcache = tcache_get(tsd);
2344 			} else {
2345 				tcache = NULL;
2346 			}
2347 			ifree(tsd, ptr, tcache, true);
2348 
2349 			LOG("core.realloc.exit", "result: %p", NULL);
2350 			return NULL;
2351 		}
2352 #endif
2353 		size = 1;
2354 	}
2355 
2356 	if (likely(ptr != NULL)) {
2357 		assert(malloc_initialized() || IS_INITIALIZER);
2358 		tsd_t *tsd = tsd_fetch();
2359 
2360 		check_entry_exit_locking(tsd_tsdn(tsd));
2361 
2362 		alloc_ctx_t alloc_ctx;
2363 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2364 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2365 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2366 		assert(alloc_ctx.szind != NSIZES);
2367 		old_usize = sz_index2size(alloc_ctx.szind);
2368 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2369 		if (config_prof && opt_prof) {
2370 			usize = sz_s2u(size);
2371 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2372 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2373 			    &alloc_ctx);
2374 		} else {
2375 			if (config_stats) {
2376 				usize = sz_s2u(size);
2377 			}
2378 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2379 		}
2380 		tsdn = tsd_tsdn(tsd);
2381 	} else {
2382 		/* realloc(NULL, size) is equivalent to malloc(size). */
2383 		void *ret1 = je_malloc(size);
2384 		LOG("core.realloc.exit", "result: %p", ret1);
2385 		return ret1;
2386 	}
2387 
2388 	if (unlikely(ret == NULL)) {
2389 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2390 			malloc_write("<jemalloc>: Error in realloc(): "
2391 			    "out of memory\n");
2392 			abort();
2393 		}
2394 		set_errno(ENOMEM);
2395 	}
2396 	if (config_stats && likely(ret != NULL)) {
2397 		tsd_t *tsd;
2398 
2399 		assert(usize == isalloc(tsdn, ret));
2400 		tsd = tsdn_tsd(tsdn);
2401 		*tsd_thread_allocatedp_get(tsd) += usize;
2402 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2403 	}
2404 	UTRACE(ptr, size, ret);
2405 	check_entry_exit_locking(tsdn);
2406 
2407 	LOG("core.realloc.exit", "result: %p", ret);
2408 	return ret;
2409 }
2410 
2411 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2412 je_free(void *ptr) {
2413 	LOG("core.free.entry", "ptr: %p", ptr);
2414 
2415 	UTRACE(ptr, 0, 0);
2416 	if (likely(ptr != NULL)) {
2417 		/*
2418 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2419 		 * based on only free() calls -- other activities trigger the
2420 		 * minimal to full transition.  This is because free() may
2421 		 * happen during thread shutdown after tls deallocation: if a
2422 		 * thread never had any malloc activities until then, a
2423 		 * fully-setup tsd won't be destructed properly.
2424 		 */
2425 		tsd_t *tsd = tsd_fetch_min();
2426 		check_entry_exit_locking(tsd_tsdn(tsd));
2427 
2428 		tcache_t *tcache;
2429 		if (likely(tsd_fast(tsd))) {
2430 			tsd_assert_fast(tsd);
2431 			/* Unconditionally get tcache ptr on fast path. */
2432 			tcache = tsd_tcachep_get(tsd);
2433 			ifree(tsd, ptr, tcache, false);
2434 		} else {
2435 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2436 				tcache = tcache_get(tsd);
2437 			} else {
2438 				tcache = NULL;
2439 			}
2440 			ifree(tsd, ptr, tcache, true);
2441 		}
2442 		check_entry_exit_locking(tsd_tsdn(tsd));
2443 	}
2444 	LOG("core.free.exit", "");
2445 }
2446 
2447 /*
2448  * End malloc(3)-compatible functions.
2449  */
2450 /******************************************************************************/
2451 /*
2452  * Begin non-standard override functions.
2453  */
2454 
2455 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2456 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2457 void JEMALLOC_NOTHROW *
2458 JEMALLOC_ATTR(malloc)
2459 je_memalign(size_t alignment, size_t size) {
2460 	void *ret;
2461 	static_opts_t sopts;
2462 	dynamic_opts_t dopts;
2463 
2464 	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2465 	    size);
2466 
2467 	static_opts_init(&sopts);
2468 	dynamic_opts_init(&dopts);
2469 
2470 	sopts.bump_empty_alloc = true;
2471 	sopts.min_alignment = 1;
2472 	sopts.oom_string =
2473 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2474 	sopts.invalid_alignment_string =
2475 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2476 	sopts.null_out_result_on_error = true;
2477 
2478 	dopts.result = &ret;
2479 	dopts.num_items = 1;
2480 	dopts.item_size = size;
2481 	dopts.alignment = alignment;
2482 
2483 	imalloc(&sopts, &dopts);
2484 
2485 	LOG("core.memalign.exit", "result: %p", ret);
2486 	return ret;
2487 }
2488 #endif
2489 
2490 #ifdef JEMALLOC_OVERRIDE_VALLOC
2491 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2492 void JEMALLOC_NOTHROW *
2493 JEMALLOC_ATTR(malloc)
2494 je_valloc(size_t size) {
2495 	void *ret;
2496 
2497 	static_opts_t sopts;
2498 	dynamic_opts_t dopts;
2499 
2500 	LOG("core.valloc.entry", "size: %zu\n", size);
2501 
2502 	static_opts_init(&sopts);
2503 	dynamic_opts_init(&dopts);
2504 
2505 	sopts.bump_empty_alloc = true;
2506 	sopts.null_out_result_on_error = true;
2507 	sopts.min_alignment = PAGE;
2508 	sopts.oom_string =
2509 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2510 	sopts.invalid_alignment_string =
2511 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2512 
2513 	dopts.result = &ret;
2514 	dopts.num_items = 1;
2515 	dopts.item_size = size;
2516 	dopts.alignment = PAGE;
2517 
2518 	imalloc(&sopts, &dopts);
2519 
2520 	LOG("core.valloc.exit", "result: %p\n", ret);
2521 	return ret;
2522 }
2523 #endif
2524 
2525 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2526 /*
2527  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2528  * to inconsistently reference libc's malloc(3)-compatible functions
2529  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2530  *
2531  * These definitions interpose hooks in glibc.  The functions are actually
2532  * passed an extra argument for the caller return address, which will be
2533  * ignored.
2534  */
2535 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2536 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2537 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2538 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2539 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2540     je_memalign;
2541 #  endif
2542 
2543 #  ifdef CPU_COUNT
2544 /*
2545  * To enable static linking with glibc, the libc specific malloc interface must
2546  * be implemented also, so none of glibc's malloc.o functions are added to the
2547  * link.
2548  */
2549 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2550 /* To force macro expansion of je_ prefix before stringification. */
2551 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2552 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2553 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2554 #    endif
2555 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2556 void __libc_free(void* ptr) PREALIAS(je_free);
2557 #    endif
2558 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2559 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2560 #    endif
2561 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2562 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2563 #    endif
2564 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2565 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2566 #    endif
2567 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2568 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2569 #    endif
2570 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2571 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2572 #    endif
2573 #    undef PREALIAS
2574 #    undef ALIAS
2575 #  endif
2576 #endif
2577 
2578 /*
2579  * End non-standard override functions.
2580  */
2581 /******************************************************************************/
2582 /*
2583  * Begin non-standard functions.
2584  */
2585 
2586 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2587 void JEMALLOC_NOTHROW *
2588 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2589 je_mallocx(size_t size, int flags) {
2590 	void *ret;
2591 	static_opts_t sopts;
2592 	dynamic_opts_t dopts;
2593 
2594 	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2595 
2596 	static_opts_init(&sopts);
2597 	dynamic_opts_init(&dopts);
2598 
2599 	sopts.assert_nonempty_alloc = true;
2600 	sopts.null_out_result_on_error = true;
2601 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2602 
2603 	dopts.result = &ret;
2604 	dopts.num_items = 1;
2605 	dopts.item_size = size;
2606 	if (unlikely(flags != 0)) {
2607 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2608 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2609 		}
2610 
2611 		dopts.zero = MALLOCX_ZERO_GET(flags);
2612 
2613 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2614 			if ((flags & MALLOCX_TCACHE_MASK)
2615 			    == MALLOCX_TCACHE_NONE) {
2616 				dopts.tcache_ind = TCACHE_IND_NONE;
2617 			} else {
2618 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2619 			}
2620 		} else {
2621 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2622 		}
2623 
2624 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2625 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2626 	}
2627 
2628 	imalloc(&sopts, &dopts);
2629 
2630 	LOG("core.mallocx.exit", "result: %p", ret);
2631 	return ret;
2632 }
2633 
2634 static void *
2635 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2636     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2637     prof_tctx_t *tctx) {
2638 	void *p;
2639 
2640 	if (tctx == NULL) {
2641 		return NULL;
2642 	}
2643 	if (usize <= SMALL_MAXCLASS) {
2644 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2645 		    alignment, zero, tcache, arena);
2646 		if (p == NULL) {
2647 			return NULL;
2648 		}
2649 		arena_prof_promote(tsdn, p, usize);
2650 	} else {
2651 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2652 		    tcache, arena);
2653 	}
2654 
2655 	return p;
2656 }
2657 
2658 JEMALLOC_ALWAYS_INLINE void *
2659 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2660     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2661     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2662 	void *p;
2663 	bool prof_activex;
2664 	prof_tctx_t *old_tctx, *tctx;
2665 
2666 	prof_activex = prof_active_get_unlocked();
2667 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2668 	tctx = prof_alloc_prep(tsd, *usize, prof_activex, false);
2669 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2670 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2671 		    *usize, alignment, zero, tcache, arena, tctx);
2672 	} else {
2673 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2674 		    zero, tcache, arena);
2675 	}
2676 	if (unlikely(p == NULL)) {
2677 		prof_alloc_rollback(tsd, tctx, false);
2678 		return NULL;
2679 	}
2680 
2681 	if (p == old_ptr && alignment != 0) {
2682 		/*
2683 		 * The allocation did not move, so it is possible that the size
2684 		 * class is smaller than would guarantee the requested
2685 		 * alignment, and that the alignment constraint was
2686 		 * serendipitously satisfied.  Additionally, old_usize may not
2687 		 * be the same as the current usize because of in-place large
2688 		 * reallocation.  Therefore, query the actual value of usize.
2689 		 */
2690 		*usize = isalloc(tsd_tsdn(tsd), p);
2691 	}
2692 	prof_realloc(tsd, p, *usize, tctx, prof_activex, false, old_ptr,
2693 	    old_usize, old_tctx);
2694 
2695 	return p;
2696 }
2697 
2698 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2699 void JEMALLOC_NOTHROW *
2700 JEMALLOC_ALLOC_SIZE(2)
2701 je_rallocx(void *ptr, size_t size, int flags) {
2702 	void *p;
2703 	tsd_t *tsd;
2704 	size_t usize;
2705 	size_t old_usize;
2706 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2707 	bool zero = flags & MALLOCX_ZERO;
2708 	arena_t *arena;
2709 	tcache_t *tcache;
2710 
2711 	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2712 	    size, flags);
2713 
2714 
2715 	assert(ptr != NULL);
2716 	assert(size != 0);
2717 	assert(malloc_initialized() || IS_INITIALIZER);
2718 	tsd = tsd_fetch();
2719 	check_entry_exit_locking(tsd_tsdn(tsd));
2720 
2721 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2722 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2723 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2724 		if (unlikely(arena == NULL)) {
2725 			goto label_oom;
2726 		}
2727 	} else {
2728 		arena = NULL;
2729 	}
2730 
2731 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2732 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2733 			tcache = NULL;
2734 		} else {
2735 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2736 		}
2737 	} else {
2738 		tcache = tcache_get(tsd);
2739 	}
2740 
2741 	alloc_ctx_t alloc_ctx;
2742 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2743 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2744 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2745 	assert(alloc_ctx.szind != NSIZES);
2746 	old_usize = sz_index2size(alloc_ctx.szind);
2747 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2748 	if (config_prof && opt_prof) {
2749 		usize = (alignment == 0) ?
2750 		    sz_s2u(size) : sz_sa2u(size, alignment);
2751 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2752 			goto label_oom;
2753 		}
2754 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2755 		    zero, tcache, arena, &alloc_ctx);
2756 		if (unlikely(p == NULL)) {
2757 			goto label_oom;
2758 		}
2759 	} else {
2760 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2761 		    zero, tcache, arena);
2762 		if (unlikely(p == NULL)) {
2763 			goto label_oom;
2764 		}
2765 		if (config_stats) {
2766 			usize = isalloc(tsd_tsdn(tsd), p);
2767 		}
2768 	}
2769 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2770 
2771 	if (config_stats) {
2772 		*tsd_thread_allocatedp_get(tsd) += usize;
2773 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2774 	}
2775 	UTRACE(ptr, size, p);
2776 	check_entry_exit_locking(tsd_tsdn(tsd));
2777 
2778 	LOG("core.rallocx.exit", "result: %p", p);
2779 	return p;
2780 label_oom:
2781 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2782 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2783 		abort();
2784 	}
2785 	UTRACE(ptr, size, 0);
2786 	check_entry_exit_locking(tsd_tsdn(tsd));
2787 
2788 	LOG("core.rallocx.exit", "result: %p", NULL);
2789 	return NULL;
2790 }
2791 
2792 JEMALLOC_ALWAYS_INLINE size_t
2793 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2794     size_t extra, size_t alignment, bool zero) {
2795 	size_t usize;
2796 
2797 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2798 		return old_usize;
2799 	}
2800 	usize = isalloc(tsdn, ptr);
2801 
2802 	return usize;
2803 }
2804 
2805 static size_t
2806 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2807     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2808 	size_t usize;
2809 
2810 	if (tctx == NULL) {
2811 		return old_usize;
2812 	}
2813 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2814 	    zero);
2815 
2816 	return usize;
2817 }
2818 
2819 JEMALLOC_ALWAYS_INLINE size_t
2820 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2821     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2822 	size_t usize_max, usize;
2823 	bool prof_activex;
2824 	prof_tctx_t *old_tctx, *tctx;
2825 
2826 	prof_activex = prof_active_get_unlocked();
2827 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2828 	/*
2829 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2830 	 * Therefore, compute its maximum possible value and use that in
2831 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2832 	 * prof_realloc() will use the actual usize to decide whether to sample.
2833 	 */
2834 	if (alignment == 0) {
2835 		usize_max = sz_s2u(size+extra);
2836 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2837 	} else {
2838 		usize_max = sz_sa2u(size+extra, alignment);
2839 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2840 			/*
2841 			 * usize_max is out of range, and chances are that
2842 			 * allocation will fail, but use the maximum possible
2843 			 * value and carry on with prof_alloc_prep(), just in
2844 			 * case allocation succeeds.
2845 			 */
2846 			usize_max = LARGE_MAXCLASS;
2847 		}
2848 	}
2849 	tctx = prof_alloc_prep(tsd, usize_max, prof_activex, false);
2850 
2851 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2852 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2853 		    size, extra, alignment, zero, tctx);
2854 	} else {
2855 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2856 		    extra, alignment, zero);
2857 	}
2858 	if (usize == old_usize) {
2859 		prof_alloc_rollback(tsd, tctx, false);
2860 		return usize;
2861 	}
2862 	prof_realloc(tsd, ptr, usize, tctx, prof_activex, false, ptr, old_usize,
2863 	    old_tctx);
2864 
2865 	return usize;
2866 }
2867 
2868 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2869 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2870 	tsd_t *tsd;
2871 	size_t usize, old_usize;
2872 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2873 	bool zero = flags & MALLOCX_ZERO;
2874 
2875 	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2876 	    "flags: %d", ptr, size, extra, flags);
2877 
2878 	assert(ptr != NULL);
2879 	assert(size != 0);
2880 	assert(SIZE_T_MAX - size >= extra);
2881 	assert(malloc_initialized() || IS_INITIALIZER);
2882 	tsd = tsd_fetch();
2883 	check_entry_exit_locking(tsd_tsdn(tsd));
2884 
2885 	alloc_ctx_t alloc_ctx;
2886 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2887 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2888 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2889 	assert(alloc_ctx.szind != NSIZES);
2890 	old_usize = sz_index2size(alloc_ctx.szind);
2891 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2892 	/*
2893 	 * The API explicitly absolves itself of protecting against (size +
2894 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2895 	 * exceeding LARGE_MAXCLASS.
2896 	 *
2897 	 * Ordinarily, size limit checking is handled deeper down, but here we
2898 	 * have to check as part of (size + extra) clamping, since we need the
2899 	 * clamped value in the above helper functions.
2900 	 */
2901 	if (unlikely(size > LARGE_MAXCLASS)) {
2902 		usize = old_usize;
2903 		goto label_not_resized;
2904 	}
2905 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2906 		extra = LARGE_MAXCLASS - size;
2907 	}
2908 
2909 	if (config_prof && opt_prof) {
2910 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2911 		    alignment, zero, &alloc_ctx);
2912 	} else {
2913 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2914 		    extra, alignment, zero);
2915 	}
2916 	if (unlikely(usize == old_usize)) {
2917 		goto label_not_resized;
2918 	}
2919 
2920 	if (config_stats) {
2921 		*tsd_thread_allocatedp_get(tsd) += usize;
2922 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2923 	}
2924 label_not_resized:
2925 	UTRACE(ptr, size, ptr);
2926 	check_entry_exit_locking(tsd_tsdn(tsd));
2927 
2928 	LOG("core.xallocx.exit", "result: %zu", usize);
2929 	return usize;
2930 }
2931 
2932 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2933 JEMALLOC_ATTR(pure)
2934 je_sallocx(const void *ptr, UNUSED int flags) {
2935 	size_t usize;
2936 	tsdn_t *tsdn;
2937 
2938 	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2939 
2940 	assert(malloc_initialized() || IS_INITIALIZER);
2941 	assert(ptr != NULL);
2942 
2943 	tsdn = tsdn_fetch();
2944 	check_entry_exit_locking(tsdn);
2945 
2946 	if (config_debug || force_ivsalloc) {
2947 		usize = ivsalloc(tsdn, ptr);
2948 		assert(force_ivsalloc || usize != 0);
2949 	} else {
2950 		usize = isalloc(tsdn, ptr);
2951 	}
2952 
2953 	check_entry_exit_locking(tsdn);
2954 
2955 	LOG("core.sallocx.exit", "result: %zu", usize);
2956 	return usize;
2957 }
2958 
2959 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2960 je_dallocx(void *ptr, int flags) {
2961 	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2962 
2963 	assert(ptr != NULL);
2964 	assert(malloc_initialized() || IS_INITIALIZER);
2965 
2966 	tsd_t *tsd = tsd_fetch();
2967 	bool fast = tsd_fast(tsd);
2968 	check_entry_exit_locking(tsd_tsdn(tsd));
2969 
2970 	tcache_t *tcache;
2971 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2972 		/* Not allowed to be reentrant and specify a custom tcache. */
2973 		assert(tsd_reentrancy_level_get(tsd) == 0);
2974 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2975 			tcache = NULL;
2976 		} else {
2977 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2978 		}
2979 	} else {
2980 		if (likely(fast)) {
2981 			tcache = tsd_tcachep_get(tsd);
2982 			assert(tcache == tcache_get(tsd));
2983 		} else {
2984 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2985 				tcache = tcache_get(tsd);
2986 			}  else {
2987 				tcache = NULL;
2988 			}
2989 		}
2990 	}
2991 
2992 	UTRACE(ptr, 0, 0);
2993 	if (likely(fast)) {
2994 		tsd_assert_fast(tsd);
2995 		ifree(tsd, ptr, tcache, false);
2996 	} else {
2997 		ifree(tsd, ptr, tcache, true);
2998 	}
2999 	check_entry_exit_locking(tsd_tsdn(tsd));
3000 
3001 	LOG("core.dallocx.exit", "");
3002 }
3003 
3004 JEMALLOC_ALWAYS_INLINE size_t
3005 inallocx(tsdn_t *tsdn, size_t size, int flags) {
3006 	check_entry_exit_locking(tsdn);
3007 
3008 	size_t usize;
3009 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
3010 		usize = sz_s2u(size);
3011 	} else {
3012 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
3013 	}
3014 	check_entry_exit_locking(tsdn);
3015 	return usize;
3016 }
3017 
3018 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3019 je_sdallocx(void *ptr, size_t size, int flags) {
3020 	assert(ptr != NULL);
3021 	assert(malloc_initialized() || IS_INITIALIZER);
3022 
3023 	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3024 	    size, flags);
3025 
3026 	tsd_t *tsd = tsd_fetch();
3027 	bool fast = tsd_fast(tsd);
3028 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3029 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3030 	check_entry_exit_locking(tsd_tsdn(tsd));
3031 
3032 	tcache_t *tcache;
3033 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3034 		/* Not allowed to be reentrant and specify a custom tcache. */
3035 		assert(tsd_reentrancy_level_get(tsd) == 0);
3036 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3037 			tcache = NULL;
3038 		} else {
3039 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3040 		}
3041 	} else {
3042 		if (likely(fast)) {
3043 			tcache = tsd_tcachep_get(tsd);
3044 			assert(tcache == tcache_get(tsd));
3045 		} else {
3046 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3047 				tcache = tcache_get(tsd);
3048 			} else {
3049 				tcache = NULL;
3050 			}
3051 		}
3052 	}
3053 
3054 	UTRACE(ptr, 0, 0);
3055 	if (likely(fast)) {
3056 		tsd_assert_fast(tsd);
3057 		isfree(tsd, ptr, usize, tcache, false);
3058 	} else {
3059 		isfree(tsd, ptr, usize, tcache, true);
3060 	}
3061 	check_entry_exit_locking(tsd_tsdn(tsd));
3062 
3063 	LOG("core.sdallocx.exit", "");
3064 }
3065 
3066 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3067 JEMALLOC_ATTR(pure)
3068 je_nallocx(size_t size, int flags) {
3069 	size_t usize;
3070 	tsdn_t *tsdn;
3071 
3072 	assert(size != 0);
3073 
3074 	if (unlikely(malloc_init())) {
3075 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3076 		return 0;
3077 	}
3078 
3079 	tsdn = tsdn_fetch();
3080 	check_entry_exit_locking(tsdn);
3081 
3082 	usize = inallocx(tsdn, size, flags);
3083 	if (unlikely(usize > LARGE_MAXCLASS)) {
3084 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3085 		return 0;
3086 	}
3087 
3088 	check_entry_exit_locking(tsdn);
3089 	LOG("core.nallocx.exit", "result: %zu", usize);
3090 	return usize;
3091 }
3092 
3093 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3094 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3095     size_t newlen) {
3096 	int ret;
3097 	tsd_t *tsd;
3098 
3099 	LOG("core.mallctl.entry", "name: %s", name);
3100 
3101 	if (unlikely(malloc_init())) {
3102 		LOG("core.mallctl.exit", "result: %d", EAGAIN);
3103 		return EAGAIN;
3104 	}
3105 
3106 	tsd = tsd_fetch();
3107 	check_entry_exit_locking(tsd_tsdn(tsd));
3108 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3109 	check_entry_exit_locking(tsd_tsdn(tsd));
3110 
3111 	LOG("core.mallctl.exit", "result: %d", ret);
3112 	return ret;
3113 }
3114 
3115 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3116 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3117 	int ret;
3118 
3119 	LOG("core.mallctlnametomib.entry", "name: %s", name);
3120 
3121 	if (unlikely(malloc_init())) {
3122 		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3123 		return EAGAIN;
3124 	}
3125 
3126 	tsd_t *tsd = tsd_fetch();
3127 	check_entry_exit_locking(tsd_tsdn(tsd));
3128 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
3129 	check_entry_exit_locking(tsd_tsdn(tsd));
3130 
3131 	LOG("core.mallctlnametomib.exit", "result: %d", ret);
3132 	return ret;
3133 }
3134 
3135 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3136 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3137   void *newp, size_t newlen) {
3138 	int ret;
3139 	tsd_t *tsd;
3140 
3141 	LOG("core.mallctlbymib.entry", "");
3142 
3143 	if (unlikely(malloc_init())) {
3144 		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3145 		return EAGAIN;
3146 	}
3147 
3148 	tsd = tsd_fetch();
3149 	check_entry_exit_locking(tsd_tsdn(tsd));
3150 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3151 	check_entry_exit_locking(tsd_tsdn(tsd));
3152 	LOG("core.mallctlbymib.exit", "result: %d", ret);
3153 	return ret;
3154 }
3155 
3156 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3157 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3158     const char *opts) {
3159 	tsdn_t *tsdn;
3160 
3161 	LOG("core.malloc_stats_print.entry", "");
3162 
3163 	tsdn = tsdn_fetch();
3164 	check_entry_exit_locking(tsdn);
3165 	stats_print(write_cb, cbopaque, opts);
3166 	check_entry_exit_locking(tsdn);
3167 	LOG("core.malloc_stats_print.exit", "");
3168 }
3169 
3170 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3171 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3172 	size_t ret;
3173 	tsdn_t *tsdn;
3174 
3175 	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3176 
3177 	assert(malloc_initialized() || IS_INITIALIZER);
3178 
3179 	tsdn = tsdn_fetch();
3180 	check_entry_exit_locking(tsdn);
3181 
3182 	if (unlikely(ptr == NULL)) {
3183 		ret = 0;
3184 	} else {
3185 		if (config_debug || force_ivsalloc) {
3186 			ret = ivsalloc(tsdn, ptr);
3187 			assert(force_ivsalloc || ret != 0);
3188 		} else {
3189 			ret = isalloc(tsdn, ptr);
3190 		}
3191 	}
3192 
3193 	check_entry_exit_locking(tsdn);
3194 	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3195 	return ret;
3196 }
3197 
3198 /*
3199  * End non-standard functions.
3200  */
3201 /******************************************************************************/
3202 /*
3203  * The following functions are used by threading libraries for protection of
3204  * malloc during fork().
3205  */
3206 
3207 /*
3208  * If an application creates a thread before doing any allocation in the main
3209  * thread, then calls fork(2) in the main thread followed by memory allocation
3210  * in the child process, a race can occur that results in deadlock within the
3211  * child: the main thread may have forked while the created thread had
3212  * partially initialized the allocator.  Ordinarily jemalloc prevents
3213  * fork/malloc races via the following functions it registers during
3214  * initialization using pthread_atfork(), but of course that does no good if
3215  * the allocator isn't fully initialized at fork time.  The following library
3216  * constructor is a partial solution to this problem.  It may still be possible
3217  * to trigger the deadlock described above, but doing so would involve forking
3218  * via a library constructor that runs before jemalloc's runs.
3219  */
3220 #ifndef JEMALLOC_JET
3221 JEMALLOC_ATTR(constructor)
3222 static void
3223 jemalloc_constructor(void) {
3224 	malloc_init();
3225 }
3226 #endif
3227 
3228 #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__)
3229 void
3230 jemalloc_prefork(void)
3231 #else
3232 JEMALLOC_EXPORT void
3233 _malloc_prefork(void)
3234 #endif
3235 {
3236 	tsd_t *tsd;
3237 	unsigned i, j, narenas;
3238 	arena_t *arena;
3239 
3240 #ifdef JEMALLOC_MUTEX_INIT_CB
3241 	if (!malloc_initialized()) {
3242 		return;
3243 	}
3244 #endif
3245 	assert(malloc_initialized());
3246 
3247 	tsd = tsd_fetch();
3248 
3249 	narenas = narenas_total_get();
3250 
3251 	witness_prefork(tsd_witness_tsdp_get(tsd));
3252 	/* Acquire all mutexes in a safe order. */
3253 	ctl_prefork(tsd_tsdn(tsd));
3254 	tcache_prefork(tsd_tsdn(tsd));
3255 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3256 	if (have_background_thread) {
3257 		background_thread_prefork0(tsd_tsdn(tsd));
3258 	}
3259 	prof_prefork0(tsd_tsdn(tsd));
3260 	if (have_background_thread) {
3261 		background_thread_prefork1(tsd_tsdn(tsd));
3262 	}
3263 	/* Break arena prefork into stages to preserve lock order. */
3264 	for (i = 0; i < 8; i++) {
3265 		for (j = 0; j < narenas; j++) {
3266 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3267 			    NULL) {
3268 				switch (i) {
3269 				case 0:
3270 					arena_prefork0(tsd_tsdn(tsd), arena);
3271 					break;
3272 				case 1:
3273 					arena_prefork1(tsd_tsdn(tsd), arena);
3274 					break;
3275 				case 2:
3276 					arena_prefork2(tsd_tsdn(tsd), arena);
3277 					break;
3278 				case 3:
3279 					arena_prefork3(tsd_tsdn(tsd), arena);
3280 					break;
3281 				case 4:
3282 					arena_prefork4(tsd_tsdn(tsd), arena);
3283 					break;
3284 				case 5:
3285 					arena_prefork5(tsd_tsdn(tsd), arena);
3286 					break;
3287 				case 6:
3288 					arena_prefork6(tsd_tsdn(tsd), arena);
3289 					break;
3290 				case 7:
3291 					arena_prefork7(tsd_tsdn(tsd), arena);
3292 					break;
3293 				default: not_reached();
3294 				}
3295 			}
3296 		}
3297 	}
3298 	prof_prefork1(tsd_tsdn(tsd));
3299 }
3300 
3301 #if !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(__NetBSD__)
3302 void
3303 jemalloc_postfork_parent(void)
3304 #else
3305 JEMALLOC_EXPORT void
3306 _malloc_postfork(void)
3307 #endif
3308 {
3309 	tsd_t *tsd;
3310 	unsigned i, narenas;
3311 
3312 #ifdef JEMALLOC_MUTEX_INIT_CB
3313 	if (!malloc_initialized()) {
3314 		return;
3315 	}
3316 #endif
3317 	assert(malloc_initialized());
3318 
3319 	tsd = tsd_fetch();
3320 
3321 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3322 	/* Release all mutexes, now that fork() has completed. */
3323 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3324 		arena_t *arena;
3325 
3326 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3327 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3328 		}
3329 	}
3330 	prof_postfork_parent(tsd_tsdn(tsd));
3331 	if (have_background_thread) {
3332 		background_thread_postfork_parent(tsd_tsdn(tsd));
3333 	}
3334 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3335 	tcache_postfork_parent(tsd_tsdn(tsd));
3336 	ctl_postfork_parent(tsd_tsdn(tsd));
3337 }
3338 
3339 #if !defined(__NetBSD__)
3340 void
3341 jemalloc_postfork_child(void)
3342 #else
3343 JEMALLOC_EXPORT void
3344 _malloc_postfork_child(void)
3345 #endif
3346 {
3347 	tsd_t *tsd;
3348 	unsigned i, narenas;
3349 
3350 	assert(malloc_initialized());
3351 
3352 	tsd = tsd_fetch();
3353 
3354 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3355 	/* Release all mutexes, now that fork() has completed. */
3356 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3357 		arena_t *arena;
3358 
3359 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3360 			arena_postfork_child(tsd_tsdn(tsd), arena);
3361 		}
3362 	}
3363 	prof_postfork_child(tsd_tsdn(tsd));
3364 	if (have_background_thread) {
3365 		background_thread_postfork_child(tsd_tsdn(tsd));
3366 	}
3367 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3368 	tcache_postfork_child(tsd_tsdn(tsd));
3369 	ctl_postfork_child(tsd_tsdn(tsd));
3370 }
3371 
3372 void (*
3373 je_malloc_message_get(void))(void *, const char *)
3374 {
3375 	return je_malloc_message;
3376 }
3377 
3378 void
3379 je_malloc_message_set(void (*m)(void *, const char *))
3380 {
3381 	je_malloc_message = m;
3382 }
3383 
3384 const char *
3385 je_malloc_conf_get(void)
3386 {
3387 	return je_malloc_conf;
3388 }
3389 
3390 void
3391 je_malloc_conf_set(const char *m)
3392 {
3393 	je_malloc_conf = m;
3394 }
3395 
3396 /******************************************************************************/
3397